From c7ed9c35ae3da8c1fe50b37afb678892fe124cbb Mon Sep 17 00:00:00 2001 From: zhangzhw8 <826035498@qq.com> Date: Mon, 29 May 2023 15:14:30 +0800 Subject: [PATCH] init --- .ci/open_source_check.yml | 95 + .ci/python_code_format.yml | 37 + .ci/python_unit_test.yml | 56 + .ci/templates/open_source_gate.yml | 23 + .code.yml | 17 + .github/CODEOWNERS | 32 + .github/CODE_OF_CONDUCT.md | 50 + .github/CONTRIBUTING.md | 66 + .github/ISSUE_TEMPLATE/bug_report_zh.md | 29 + .github/ISSUE_TEMPLATE/enhancement.md | 13 + .github/gitflow.png | Bin 0 -> 45618 bytes .github/workflows/check_hard_code_ip.yml | 20 + .github/workflows/go_code_check.yml | 57 + .github/workflows/python_code_check.yml | 49 + .github/workflows/python_unit_test.yml | 50 + .gitignore | 7 + .gtmproject.yaml | 25 + .pre-commit-config.yaml | 31 + LICENSE | 21 + build.yml | 29 + .../db-tools/dbactuator/.ci/codecc.yml | 29 + .../dbactuator/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + .../bigdata/db-tools/dbactuator/.gitignore | 26 + .../bigdata/db-tools/dbactuator/.golangci.yml | 121 + .../bigdata/db-tools/dbactuator/LICENSE | 0 .../bigdata/db-tools/dbactuator/Makefile | 22 + .../bigdata/db-tools/dbactuator/README.md | 134 + .../bigdata/db-tools/dbactuator/build.sh | 8 + .../bigdata/db-tools/dbactuator/build_doc.sh | 10 + .../bigdata/db-tools/dbactuator/cmd/cmd.go | 191 + .../bigdata/db-tools/dbactuator/docs/.gitkeep | 0 .../db-tools/dbactuator/docs/dbactuator.md | 30 + .../bigdata/db-tools/dbactuator/docs/docs.go | 2 + .../db-tools/dbactuator/docs/embed_docs.go | 8 + .../db-tools/dbactuator/docs/swagger.json | 346 + .../db-tools/dbactuator/docs/swagger.yaml | 250 + .../dbactuator/example/install-hdfs.md | 31 + .../bigdata/db-tools/dbactuator/go.mod | 52 + .../bigdata/db-tools/dbactuator/go.sum | 157 + .../internal/subcmd/commoncmd/cmd.go | 48 + .../internal/subcmd/commoncmd/commoncmd.go | 2 + .../subcmd/commoncmd/download_http.go | 100 + .../internal/subcmd/commoncmd/download_scp.go | 92 + .../internal/subcmd/commoncmd/fileserver.go | 85 + .../subcmd/commoncmd/rm_large_file.go | 119 + .../subcmd/crontabcmd/clear_crontab.go | 64 + .../internal/subcmd/crontabcmd/crontabcmd.go | 2 + .../subcmd/escmd/check_connections.go | 103 + .../internal/subcmd/escmd/check_es.go | 103 + .../internal/subcmd/escmd/check_nodes.go | 103 + .../internal/subcmd/escmd/check_shards.go | 103 + .../internal/subcmd/escmd/clean_data.go | 97 + .../dbactuator/internal/subcmd/escmd/cmd.go | 48 + .../internal/subcmd/escmd/decompress_pkg.go | 103 + .../dbactuator/internal/subcmd/escmd/escmd.go | 2 + .../internal/subcmd/escmd/exclude_node.go | 103 + .../dbactuator/internal/subcmd/escmd/init.go | 103 + .../internal/subcmd/escmd/init_grant.go | 103 + .../internal/subcmd/escmd/install_client.go | 103 + .../internal/subcmd/escmd/install_cold.go | 103 + .../internal/subcmd/escmd/install_exporter.go | 103 + .../internal/subcmd/escmd/install_hot.go | 103 + .../internal/subcmd/escmd/install_kibana.go | 103 + .../internal/subcmd/escmd/install_master.go | 111 + .../subcmd/escmd/install_supervisor.go | 103 + .../internal/subcmd/escmd/install_telegraf.go | 103 + .../internal/subcmd/escmd/replace_master.go | 103 + .../internal/subcmd/escmd/restart_process.go | 97 + .../internal/subcmd/escmd/start_process.go | 97 + .../internal/subcmd/escmd/stop_process.go | 97 + .../internal/subcmd/hdfscmd/check_active.go | 100 + .../subcmd/hdfscmd/check_decommission.go | 100 + .../dbactuator/internal/subcmd/hdfscmd/cmd.go | 51 + .../internal/subcmd/hdfscmd/data_clean.go | 103 + .../internal/subcmd/hdfscmd/decompress_pkg.go | 105 + .../internal/subcmd/hdfscmd/generate_key.go | 100 + .../internal/subcmd/hdfscmd/hdfscmd.go | 2 + .../subcmd/hdfscmd/init_system_config.go | 98 + .../subcmd/hdfscmd/install_datanode.go | 104 + .../subcmd/hdfscmd/install_first_namenode.go | 104 + .../subcmd/hdfscmd/install_haproxy.go | 111 + .../subcmd/hdfscmd/install_journalnode.go | 104 + .../subcmd/hdfscmd/install_second_namenode.go | 104 + .../subcmd/hdfscmd/install_supervisor.go | 104 + .../subcmd/hdfscmd/install_telegraf.go | 111 + .../internal/subcmd/hdfscmd/install_zkfc.go | 104 + .../subcmd/hdfscmd/install_zookeeper.go | 103 + .../subcmd/hdfscmd/instance_operation.go | 98 + .../internal/subcmd/hdfscmd/refresh_nodes.go | 98 + .../subcmd/hdfscmd/render_hdfs_config.go | 104 + .../internal/subcmd/hdfscmd/scp_dir.go | 100 + .../subcmd/hdfscmd/start_component.go | 98 + .../internal/subcmd/hdfscmd/stop_process.go | 98 + .../subcmd/hdfscmd/update_dfs_host.go | 100 + .../subcmd/hdfscmd/update_host_mappng.go | 97 + .../subcmd/hdfscmd/update_zookeeper_config.go | 100 + .../internal/subcmd/hdfscmd/write_key.go | 100 + .../internal/subcmd/influxdbcmd/clean_data.go | 97 + .../internal/subcmd/influxdbcmd/cmd.go | 38 + .../influxdbcmd/decompress_influxdb_pkg.go | 103 + .../subcmd/influxdbcmd/influxdbcmd.go | 2 + .../internal/subcmd/influxdbcmd/init.go | 103 + .../internal/subcmd/influxdbcmd/init_user.go | 103 + .../subcmd/influxdbcmd/install_influxdb.go | 103 + .../subcmd/influxdbcmd/install_supervisor.go | 103 + .../subcmd/influxdbcmd/install_telegraf.go | 103 + .../subcmd/influxdbcmd/restart_process.go | 97 + .../subcmd/influxdbcmd/start_process.go | 97 + .../subcmd/influxdbcmd/stop_process.go | 97 + .../subcmd/kafkacmd/check_reassignment.go | 97 + .../internal/subcmd/kafkacmd/clean_data.go | 97 + .../internal/subcmd/kafkacmd/cmd.go | 45 + .../subcmd/kafkacmd/decompress_kafka_pkg.go | 103 + .../internal/subcmd/kafkacmd/init.go | 103 + .../subcmd/kafkacmd/init_kafkaUser.go | 103 + .../subcmd/kafkacmd/install_broker.go | 103 + .../subcmd/kafkacmd/install_manager.go | 103 + .../subcmd/kafkacmd/install_supervisor.go | 103 + .../subcmd/kafkacmd/install_zookeeper.go | 103 + .../internal/subcmd/kafkacmd/kafkacmd.go | 2 + .../internal/subcmd/kafkacmd/reconfig_add.go | 97 + .../subcmd/kafkacmd/reconfig_remove.go | 97 + .../internal/subcmd/kafkacmd/reduce_broker.go | 97 + .../subcmd/kafkacmd/replace_broker.go | 97 + .../subcmd/kafkacmd/restart_broker.go | 97 + .../subcmd/kafkacmd/restart_process.go | 97 + .../internal/subcmd/kafkacmd/start_process.go | 97 + .../internal/subcmd/kafkacmd/stop_process.go | 97 + .../internal/subcmd/pulsarcmd/add_hosts.go | 103 + .../subcmd/pulsarcmd/check_broker_config.go | 103 + .../subcmd/pulsarcmd/check_ledger_metadata.go | 101 + .../pulsarcmd/check_namespace_config.go | 103 + .../pulsarcmd/check_under_replicated.go | 103 + .../internal/subcmd/pulsarcmd/clean_data.go | 97 + .../internal/subcmd/pulsarcmd/cmd.go | 49 + .../subcmd/pulsarcmd/decommission_bookie.go | 103 + .../subcmd/pulsarcmd/decompress_pkg.go | 103 + .../internal/subcmd/pulsarcmd/init.go | 103 + .../internal/subcmd/pulsarcmd/init_cluster.go | 103 + .../subcmd/pulsarcmd/init_pulsar_manager.go | 103 + .../subcmd/pulsarcmd/install_bookkeeper.go | 103 + .../subcmd/pulsarcmd/install_broker.go | 103 + .../pulsarcmd/install_pulsar_manager.go | 103 + .../subcmd/pulsarcmd/install_supervisor.go | 103 + .../subcmd/pulsarcmd/install_zookeeper.go | 103 + .../internal/subcmd/pulsarcmd/modify_hosts.go | 103 + .../internal/subcmd/pulsarcmd/pulsarcmd.go | 2 + .../subcmd/pulsarcmd/restart_process.go | 97 + .../subcmd/pulsarcmd/set_bookie_readonly.go | 95 + .../internal/subcmd/pulsarcmd/start_broker.go | 103 + .../subcmd/pulsarcmd/start_process.go | 97 + .../internal/subcmd/pulsarcmd/stop_process.go | 97 + .../subcmd/pulsarcmd/unset_bookie_readonly.go | 95 + .../dbactuator/internal/subcmd/subcmd.go | 294 + .../internal/subcmd/subcmd_helper.go | 334 + .../dbactuator/internal/subcmd/subcmd_util.go | 12 + .../internal/subcmd/sysinitcmd/sysinit.go | 68 + .../internal/subcmd/sysinitcmd/sysinitcmd.go | 2 + .../pkg/components/backup_download/backup.go | 23 + .../backup_download/backup_download.go | 2 + .../pkg/components/backup_download/cos.go | 1 + .../pkg/components/backup_download/gse.go | 1 + .../pkg/components/backup_download/http.go | 172 + .../pkg/components/backup_download/scp.go | 193 + .../dbactuator/pkg/components/base.go | 17 + .../dbactuator/pkg/components/components.go | 2 + .../pkg/components/computil/computil.go | 2 + .../pkg/components/crontab/clear_crontab.go | 27 + .../pkg/components/crontab/crontab.go | 2 + .../pkg/components/dbconfig/dbconfig.go | 2 + .../pkg/components/dbconfig/query_change.go | 1 + .../components/elasticsearch/check_health.go | 70 + .../components/elasticsearch/check_nodes.go | 83 + .../components/elasticsearch/clean_data.go | 102 + .../components/elasticsearch/exclude_node.go | 127 + .../elasticsearch/install_elasticsearch.go | 798 + .../components/elasticsearch/replace_node.go | 60 + .../elasticsearch/startstop_process.go | 106 + .../pkg/components/fileserver/README.md | 9 + .../pkg/components/fileserver/acl.go | 67 + .../pkg/components/fileserver/fileserver.go | 342 + .../hdfs/config_tpl/check_nn_active | 10 + .../components/hdfs/config_tpl/config_tpl.go | 2 + .../hdfs/config_tpl/hadoop-daemon-wrapper.sh | 234 + .../components/hdfs/config_tpl/hadoop-env.sh | 28 + .../components/hdfs/config_tpl/haproxy.cfg | 60 + .../hdfs/config_tpl/log4j.properties | 59 + .../components/hdfs/config_tpl/rack-aware.sh | 7 + .../components/hdfs/config_tpl/template.go | 51 + .../dbactuator/pkg/components/hdfs/const.go | 59 + .../pkg/components/hdfs/decompress_pkg.go | 79 + .../dbactuator/pkg/components/hdfs/hdfs.go | 2 + .../pkg/components/hdfs/init_system_config.go | 59 + .../pkg/components/hdfs/install_haproxy.go | 102 + .../pkg/components/hdfs/install_hdfs.go | 465 + .../pkg/components/hdfs/install_zookeeper.go | 123 + .../pkg/components/hdfs/node_operation.go | 187 + .../pkg/components/hdfs/replace_hdfs.go | 177 + .../pkg/components/hdfs/shrink_hdfs.go | 172 + .../components/hdfs/update_host_mapping.go | 44 + .../pkg/components/hdfs/util/disk.go | 37 + .../pkg/components/hdfs/util/http.go | 27 + .../pkg/components/hdfs/util/util.go | 2 + .../pkg/components/hdfs/util/xml_util.go | 37 + .../pkg/components/influxdb/clean_data.go | 85 + .../pkg/components/influxdb/influxdb.go | 2 + .../components/influxdb/install_influxdb.go | 548 + .../components/influxdb/startstop_process.go | 89 + .../pkg/components/kafka/clean_data.go | 101 + .../pkg/components/kafka/decom_broker.go | 230 + .../pkg/components/kafka/install_kafka.go | 923 + .../dbactuator/pkg/components/kafka/kafka.go | 2 + .../pkg/components/kafka/reconfig.go | 70 + .../pkg/components/kafka/startstop_process.go | 146 + .../dbactuator/pkg/components/medium.go | 65 + .../dbactuator/pkg/components/output.go | 39 + .../pkg/components/pulsar/check_shrink.go | 78 + .../pkg/components/pulsar/clean_data.go | 92 + .../pkg/components/pulsar/install_pulsar.go | 818 + .../pkg/components/pulsar/pulsar.go | 2 + .../components/pulsar/startstop_process.go | 94 + .../pkg/components/sysinit/essysinit.go | 46 + .../pkg/components/sysinit/sysinit.go | 54 + .../dbactuator/pkg/core/codes/codes.go | 62 + .../dbactuator/pkg/core/config/base.go | 20 + .../dbactuator/pkg/core/config/init.go | 33 + .../db-tools/dbactuator/pkg/core/cst/const.go | 22 + .../db-tools/dbactuator/pkg/core/cst/cst.go | 2 + .../db-tools/dbactuator/pkg/core/cst/es.go | 55 + .../dbactuator/pkg/core/cst/influxdb.go | 16 + .../db-tools/dbactuator/pkg/core/cst/kafka.go | 36 + .../db-tools/dbactuator/pkg/core/cst/mysql.go | 73 + .../db-tools/dbactuator/pkg/core/cst/os.go | 94 + .../db-tools/dbactuator/pkg/core/cst/proxy.go | 18 + .../dbactuator/pkg/core/cst/pulsar.go | 39 + .../dbactuator/pkg/core/safego/graceful.go | 35 + .../dbactuator/pkg/core/safego/recover.go | 34 + .../dbactuator/pkg/core/safego/safego.go | 2 + .../core/staticembed/default_sys_schema.go | 11 + .../core/staticembed/default_sys_schema.sql | 51 + .../pkg/core/staticembed/staticembed.go | 2 + .../pkg/core/staticembed/sysinit_es.go | 11 + .../pkg/core/staticembed/sysinit_es.sh | 53 + .../pkg/core/staticembed/sysinit_hdfs.go | 11 + .../pkg/core/staticembed/sysinit_hdfs.sh | 49 + .../pkg/core/staticembed/sysinit_mysql.go | 11 + .../pkg/core/staticembed/sysinit_mysql.sh | 86 + .../db-tools/dbactuator/pkg/mock/mock.go | 110 + .../db-tools/dbactuator/pkg/mock/mock_test.go | 20 + .../dbactuator/pkg/rollback/rollback.go | 226 + .../dbactuator/pkg/rollback/rollback_test.go | 85 + .../dbactuator/pkg/util/esutil/es_helper.go | 270 + .../dbactuator/pkg/util/esutil/es_operate.go | 445 + .../dbactuator/pkg/util/esutil/esutil.go | 2 + .../db-tools/dbactuator/pkg/util/helpers.go | 91 + .../dbactuator/pkg/util/httpclient/client.go | 73 + .../pkg/util/httpclient/httpclient.go | 2 + .../pkg/util/kafkautil/kafkautil.go | 333 + .../db-tools/dbactuator/pkg/util/logger.go | 10 + .../dbactuator/pkg/util/osutil/cmdexec.go | 151 + .../dbactuator/pkg/util/osutil/crontab.go | 252 + .../pkg/util/osutil/crontab_test.go | 6 + .../dbactuator/pkg/util/osutil/mountpoint.go | 70 + .../dbactuator/pkg/util/osutil/netutil.go | 26 + .../dbactuator/pkg/util/osutil/osutil.go | 649 + .../dbactuator/pkg/util/osutil/osutil_test.go | 16 + .../dbactuator/pkg/util/osutil/sysctl.go | 59 + .../dbactuator/pkg/util/osutil/truncate.go | 89 + .../dbactuator/pkg/util/osutil/unix_only.go | 237 + .../pkg/util/osutil/windows_only.go | 16 + .../pkg/util/pulsarutil/pulsar_helper.go | 207 + .../pkg/util/pulsarutil/pulsar_operate.go | 202 + .../pkg/util/pulsarutil/pulsarutil.go | 2 + .../db-tools/dbactuator/pkg/util/sftp/init.go | 77 + .../db-tools/dbactuator/pkg/util/sftp/sftp.go | 203 + .../dbactuator/pkg/util/sftp/sftp_test.go | 18 + .../db-tools/dbactuator/pkg/util/slice.go | 207 + .../db-tools/dbactuator/pkg/util/str.go | 33 + .../pkg/util/templates/command_groups.go | 48 + .../dbactuator/pkg/util/templates/markdown.go | 190 + .../pkg/util/templates/normallizers.go | 83 + .../pkg/util/templates/templates.go | 2 + .../dbactuator/pkg/util/timeutil/duration.go | 67 + .../dbactuator/pkg/util/timeutil/timeutil.go | 2 + .../db-tools/dbactuator/pkg/util/util.go | 361 + .../dbactuator/pkg/util/validate/validate.go | 152 + .../dbactuator/pkg/util/xmlutil/xml.go | 60 + .../dbactuator/pkg/util/xmlutil/xmlutil.go | 2 + .../db-tools/dbactuator/scripts/upload.sh | 182 + dbm-services/common/db-config/.ci/codecc.yml | 29 + .../db-config/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + dbm-services/common/db-config/.gitignore | 7 + dbm-services/common/db-config/.golangci.yml | 121 + dbm-services/common/db-config/Dockerfile | 10 + dbm-services/common/db-config/Makefile | 52 + dbm-services/common/db-config/README.md | 204 + dbm-services/common/db-config/admin.sh | 69 + .../common/db-config/assets/assets.go | 2 + .../common/db-config/assets/migrate.go | 10 + .../common/db-config/assets/migrate.md | 38 + .../assets/migrations/000001_init.down.sql | 1 + .../assets/migrations/000001_init.up.sql | 3 + .../migrations/000002_create_table.down.sql | 13 + .../migrations/000002_create_table.up.sql | 285 + .../migrations/000003_init_sensitive.up.sql | 1 + .../migrations/000010_common_data.down.sql | 2 + .../migrations/000010_common_data.up.sql | 81 + .../assets/migrations/000011_es_data.down.sql | 2 + .../assets/migrations/000011_es_data.up.sql | 106 + .../migrations/000012_hdfs_data.down.sql | 2 + .../assets/migrations/000012_hdfs_data.up.sql | 166 + .../migrations/000013_kafka_data.down.sql | 2 + .../migrations/000013_kafka_data.up.sql | 86 + ...014_PredixyTendisplusCluster_data.down.sql | 2 + ...00014_PredixyTendisplusCluster_data.up.sql | 187 + .../migrations/000015_rediscomm_data.down.sql | 2 + .../migrations/000015_rediscomm_data.up.sql | 109 + .../000016_RedisInstance_data.down.sql | 2 + .../000016_RedisInstance_data.up.sql | 134 + .../migrations/000017_RedisMS_data.down.sql | 2 + .../migrations/000017_RedisMS_data.up.sql | 78 + .../migrations/000018_tendb_data.down.sql | 2 + .../migrations/000018_tendb_data.up.sql | 89 + .../000019_tendbcluster_data.down.sql | 2 + .../000019_tendbcluster_data.up.sql | 1626 ++ .../migrations/000020_tendbha_data.down.sql | 2 + .../migrations/000020_tendbha_data.up.sql | 1044 ++ .../000021_tendbsingle_data.down.sql | 2 + .../migrations/000021_tendbsingle_data.up.sql | 1017 + .../000022_TendisCache_data.down.sql | 2 + .../migrations/000022_TendisCache_data.up.sql | 99 + .../000023_TendisplusInstance_data.down.sql | 2 + .../000023_TendisplusInstance_data.up.sql | 119 + .../migrations/000024_TendisSSD_data.down.sql | 2 + .../migrations/000024_TendisSSD_data.up.sql | 77 + .../migrations/000025_TendisX_data.down.sql | 2 + .../migrations/000025_TendisX_data.up.sql | 77 + ...00026_TwemproxyRedisInstance_data.down.sql | 2 + .../000026_TwemproxyRedisInstance_data.up.sql | 167 + ..._TwemproxyTendisplusInstance_data.down.sql | 2 + ...27_TwemproxyTendisplusInstance_data.up.sql | 136 + ...8_TwemproxyTendisSSDInstance_data.down.sql | 2 + ...028_TwemproxyTendisSSDInstance_data.up.sql | 225 + .../migrations/000029_pulsar_data.down.sql | 2 + .../migrations/000029_pulsar_data.up.sql | 550 + .../migrations/000030_influxdb_data.down.sql | 2 + .../migrations/000030_influxdb_data.up.sql | 78 + dbm-services/common/db-config/build.sh | 18 + dbm-services/common/db-config/build_doc.sh | 8 + .../db-config/cmd/bkconfigcli/bkconfigcli.go | 2 + .../common/db-config/cmd/bkconfigcli/main.go | 1 + .../common/db-config/cmd/bkconfigsvr/main.go | 101 + .../common/db-config/cmd/encryptcli/main.go | 2 + .../common/db-config/conf/config.yaml | 26 + .../common/db-config/conf/logger.yaml | 18 + .../common/db-config/docs/design/encrypt.md | 38 + .../common/db-config/docs/design/examples.md | 204 + .../common/db-config/docs/design/readme.md | 0 .../db-config/docs/design/value_validate.md | 58 + dbm-services/common/db-config/docs/docs.go | 1845 ++ .../common/db-config/docs/embed_apidoc.go | 10 + .../common/db-config/docs/swagger.json | 1825 ++ .../common/db-config/docs/swagger.yaml | 1402 ++ dbm-services/common/db-config/go.mod | 80 + dbm-services/common/db-config/go.sum | 1931 ++ .../common/db-config/internal/api/api.go | 2 + .../db-config/internal/api/apply_config.go | 212 + .../db-config/internal/api/baseResponse.go | 40 + .../db-config/internal/api/bkapigw_user.go | 30 + .../db-config/internal/api/config_base.go | 91 + .../db-config/internal/api/config_file.go | 24 + .../db-config/internal/api/config_item.go | 180 + .../db-config/internal/api/config_meta.go | 109 + .../db-config/internal/api/config_plat.go | 51 + .../db-config/internal/api/config_version.go | 62 + .../common/db-config/internal/api/dbha.go | 34 + .../db-config/internal/api/simple_config.go | 171 + .../db-config/internal/handler/handler.go | 44 + .../internal/handler/simple/batchget.go | 80 + .../internal/handler/simple/config_apply.go | 210 + .../internal/handler/simple/config_file.go | 43 + .../internal/handler/simple/config_item.go | 216 + .../internal/handler/simple/config_meta.go | 48 + .../internal/handler/simple/config_plat.go | 125 + .../internal/handler/simple/config_version.go | 263 + .../db-config/internal/handler/simple/dbha.go | 33 + .../internal/handler/simple/simple.go | 50 + .../internal/handler/simple/simple_item.go | 1 + .../db-config/internal/pkg/cst/const.go | 75 + .../common/db-config/internal/pkg/cst/cst.go | 2 + .../db-config/internal/pkg/cst/mysql.go | 138 + .../db-config/internal/pkg/errno/code.go | 77 + .../db-config/internal/pkg/errno/errno.go | 116 + .../db-config/internal/repository/migrate.go | 141 + .../repository/migratespec/sensitive.go | 133 + .../internal/repository/model/batchget.go | 27 + .../internal/repository/model/cache.go | 24 + .../repository/model/cache_config_file.go | 190 + .../repository/model/cache_config_name.go | 71 + .../internal/repository/model/cache_crond.go | 29 + .../repository/model/cache_file_node.go | 58 + .../internal/repository/model/config_apply.go | 142 + .../internal/repository/model/config_file.go | 117 + .../internal/repository/model/config_item.go | 494 + .../repository/model/config_item_check.go | 127 + .../repository/model/config_item_test.go | 27 + .../internal/repository/model/config_level.go | 86 + .../internal/repository/model/config_meta.go | 135 + .../internal/repository/model/config_plat.go | 134 + .../repository/model/config_version.go | 402 + .../db-config/internal/repository/model/db.go | 163 + .../internal/repository/model/dbmeta.go | 33 + .../repository/model/dbmeta_inside.go | 109 + .../internal/repository/model/dbtime.go | 52 + .../internal/repository/model/file_node.go | 115 + .../internal/repository/model/level_node.go | 88 + .../internal/repository/model/model.go | 367 + .../repository/model/model_config_node.go | 76 + .../internal/repository/model/node_task.go | 88 + .../internal/repository/repository.go | 2 + .../db-config/internal/router/router.go | 27 + .../internal/router/router_restapi.go | 55 + .../internal/service/configcheck/README.MD | 2 + .../service/configcheck/config_check.go | 2 + .../service/configcheck/config_file.go | 1 + .../internal/service/dbha/batchget.go | 35 + .../db-config/internal/service/dbha/dbha.go | 2 + .../service/simpleconfig/config_apply.go | 353 + .../service/simpleconfig/config_file.go | 189 + .../service/simpleconfig/config_item.go | 665 + .../service/simpleconfig/config_item_check.go | 201 + .../simpleconfig/config_item_format.go | 165 + .../service/simpleconfig/config_item_merge.go | 189 + .../service/simpleconfig/config_item_test.go | 123 + .../service/simpleconfig/config_meta.go | 192 + .../service/simpleconfig/config_plat.go | 181 + .../service/simpleconfig/config_version.go | 232 + .../service/simpleconfig/simple_config.go | 2 + .../common/db-config/pkg/constvar/const.go | 81 + .../common/db-config/pkg/constvar/constvar.go | 2 + .../common/db-config/pkg/constvar/mysql.go | 31 + .../common/db-config/pkg/constvar/os.go | 117 + .../common/db-config/pkg/core/config/base.go | 61 + .../db-config/pkg/core/config/config.go | 2 + .../db-config/pkg/core/config/logger.go | 47 + .../common/db-config/pkg/core/config/tls.go | 24 + .../db-config/pkg/core/logger/README.md | 41 + .../db-config/pkg/core/logger/base/base.go | 2 + .../pkg/core/logger/base/interface.go | 14 + .../db-config/pkg/core/logger/example_test.go | 20 + .../common/db-config/pkg/core/logger/init.go | 16 + .../common/db-config/pkg/core/logger/log.go | 87 + .../db-config/pkg/core/logger/logger.go | 2 + .../pkg/core/logger/logrus/fields.go | 61 + .../db-config/pkg/core/logger/logrus/log.go | 64 + .../pkg/core/logger/logrus/logrus.go | 2 + .../db-config/pkg/core/logger/logrus/new.go | 63 + .../pkg/core/logger/lumberjack/lumberjack.go | 535 + .../db-config/pkg/core/logger/zap/new.go | 137 + .../db-config/pkg/core/logger/zap/zap.go | 2 + .../pkg/core/logger/zap/zapfields.go | 67 + .../db-config/pkg/core/logger/zap/zaplog.go | 66 + .../db-config/pkg/core/safego/graceful.go | 35 + .../db-config/pkg/core/safego/recover.go | 34 + .../db-config/pkg/core/safego/safego.go | 2 + .../common/db-config/pkg/core/trace/file.go | 23 + .../common/db-config/pkg/core/trace/trace.go | 2 + .../common/db-config/pkg/httpclient/client.go | 85 + .../db-config/pkg/httpclient/httpclient.go | 2 + .../db-config/pkg/httpclient/response.go | 10 + .../common/db-config/pkg/httpclient/sign.go | 25 + .../common/db-config/pkg/middleware/cors.go | 16 + .../db-config/pkg/middleware/middleware.go | 2 + .../db-config/pkg/middleware/request_body.go | 41 + .../db-config/pkg/middleware/request_id.go | 22 + .../common/db-config/pkg/util/backoff.go | 67 + .../common/db-config/pkg/util/boolext.go | 65 + .../db-config/pkg/util/compress/compress.go | 40 + .../common/db-config/pkg/util/confvalue.go | 11 + .../common/db-config/pkg/util/crypt/auth.go | 14 + .../db-config/pkg/util/crypt/encrypt.go | 164 + .../common/db-config/pkg/util/datasize.go | 70 + .../db-config/pkg/util/dbutil/dbutil.go | 131 + .../common/db-config/pkg/util/dbutil/json.go | 64 + .../common/db-config/pkg/util/dbutil/time.go | 102 + .../common/db-config/pkg/util/durationext.go | 216 + dbm-services/common/db-config/pkg/util/map.go | 59 + .../db-config/pkg/util/serialize/serialize.go | 45 + dbm-services/common/db-config/pkg/util/set.go | 101 + .../common/db-config/pkg/util/slice.go | 255 + dbm-services/common/db-config/pkg/util/str.go | 86 + dbm-services/common/db-config/pkg/util/tls.go | 175 + .../common/db-config/pkg/util/trim.go | 90 + .../common/db-config/pkg/util/trim_test.go | 141 + .../common/db-config/pkg/util/util.go | 18 + .../db-config/pkg/validate/check_value.go | 387 + .../pkg/validate/check_value_test.go | 131 + .../common/db-config/pkg/validate/const.go | 61 + .../common/db-config/pkg/validate/validate.go | 140 + dbm-services/common/db-dns/dns-api/.gitignore | 4 + dbm-services/common/db-dns/dns-api/Dockerfile | 12 + dbm-services/common/db-dns/dns-api/Makefile | 21 + dbm-services/common/db-dns/dns-api/README.md | 19 + .../db-dns/dns-api/cmd/bk-dnsapi/main.go | 58 + .../common/db-dns/dns-api/docs/.gitkeep | 0 dbm-services/common/db-dns/dns-api/go.mod | 50 + dbm-services/common/db-dns/dns-api/go.sum | 711 + .../common/db-dns/dns-api/internal/dao/dao.go | 64 + .../dns-api/internal/domain/entity/base.go | 106 + .../dns-api/internal/domain/entity/entity.go | 2 + .../dns-api/internal/domain/entity/error.go | 15 + .../internal/domain/repo/domain/base.go | 189 + .../internal/domain/repo/domain/domain.go | 2 + .../dns-api/internal/domain/service/.gitkeep | 0 .../dns-api/internal/handler/domain/base.go | 51 + .../dns-api/internal/handler/domain/delete.go | 103 + .../dns-api/internal/handler/domain/domain.go | 2 + .../dns-api/internal/handler/domain/insert.go | 104 + .../dns-api/internal/handler/domain/query.go | 127 + .../dns-api/internal/handler/domain/update.go | 156 + .../common/db-dns/dns-api/pkg/README.md | 8 + .../common/db-dns/dns-api/pkg/errno/code.go | 32 + .../common/db-dns/dns-api/pkg/errno/errno.go | 67 + dbm-services/common/db-dns/dns-api/pkg/go.mod | 3 + dbm-services/common/db-dns/dns-api/pkg/go.sum | 0 .../common/db-dns/dns-api/pkg/tools/tools.go | 41 + .../common/db-dns/dns-api/pkg/tools/util.go | 97 + .../db-dns/dns-api/scripts/ddl/init.sql | 18 + .../db-dns/dns-api/scripts/git/pre-commit | 31 + .../common/db-dns/dns-reload/Makefile | 21 + .../common/db-dns/dns-reload/api/api.go | 68 + .../db-dns/dns-reload/config/config.conf | 17 + .../common/db-dns/dns-reload/config/config.go | 2 + .../common/db-dns/dns-reload/config/init.go | 59 + .../common/db-dns/dns-reload/dao/dao.go | 2 + .../common/db-dns/dns-reload/dao/domain.go | 44 + .../db-dns/dns-reload/doc/named.conf_tpl | 78 + dbm-services/common/db-dns/dns-reload/go.mod | 3 + dbm-services/common/db-dns/dns-reload/go.sum | 0 .../common/db-dns/dns-reload/logger/init.go | 42 + .../common/db-dns/dns-reload/logger/logger.go | 2 + .../common/db-dns/dns-reload/main/main.go | 57 + .../db-dns/dns-reload/service/dnsService.go | 194 + .../db-dns/dns-reload/service/service.go | 2 + .../common/db-dns/dns-reload/util/tools.go | 37 + .../common/db-dns/dns-reload/util/util.go | 2 + .../common/db-resource/.ci/codecc.yml | 29 + .../db-resource/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + dbm-services/common/db-resource/.gitignore | 13 + dbm-services/common/db-resource/.golangci.yml | 58 + dbm-services/common/db-resource/Dockerfile | 8 + dbm-services/common/db-resource/Makefile | 45 + dbm-services/common/db-resource/README.md | 1 + dbm-services/common/db-resource/go.mod | 58 + dbm-services/common/db-resource/go.sum | 597 + .../db-resource/internal/config/config.go | 78 + .../internal/controller/apply/apply.go | 162 + .../internal/controller/controller.go | 53 + .../internal/controller/manage/manage.go | 2 + .../internal/controller/manage/rs.go | 325 + .../internal/controller/manage/rs_import.go | 248 + .../internal/controller/manage/rs_lable.go | 51 + .../controller/manage/rs_operation_info.go | 70 + .../common/db-resource/internal/lock/lock.go | 2 + .../db-resource/internal/lock/redis_lock.go | 55 + .../internal/lock/redis_lock_test.go | 31 + .../db-resource/internal/lock/spinlock.go | 47 + .../internal/middleware/middleware.go | 80 + .../internal/model/TbDeviceSpec.go | 34 + .../internal/model/TbRequestLog.go | 39 + .../internal/model/TbRpApplyDetailLog.go | 37 + .../db-resource/internal/model/TbRpDetail.go | 231 + .../internal/model/TbRpDetailArchive.go | 90 + .../internal/model/TbRpOperatorInfo.go | 34 + .../internal/model/TbRpReturnDetail.go | 25 + .../db-resource/internal/model/model.go | 422 + .../db-resource/internal/routers/router.go | 18 + .../db-resource/internal/routers/routers.go | 2 + .../db-resource/internal/svr/apply/api.go | 201 + .../db-resource/internal/svr/apply/apply.go | 331 + .../db-resource/internal/svr/apply/core.go | 354 + .../internal/svr/apply/instance.go | 42 + .../common/db-resource/internal/svr/bk/bk.go | 2 + .../common/db-resource/internal/svr/bk/cc.go | 98 + .../db-resource/internal/svr/bk/cc_test.go | 73 + .../db-resource/internal/svr/bk/disk.go | 187 + .../internal/svr/bk/get_block_info.sh | 147 + .../db-resource/internal/svr/bk/job_v3.go | 217 + .../internal/svr/bk/job_v3_test.go | 87 + .../db-resource/internal/svr/cloud/cloud.go | 22 + .../internal/svr/cloud/tencent/tencent.go | 65 + .../svr/cloud/tencent/tencentcloud_test.go | 89 + .../db-resource/internal/svr/meta/meta.go | 77 + .../db-resource/internal/svr/task/task.go | 114 + dbm-services/common/db-resource/main.go | 53 + .../common/db-resource/pkg/errno/code.go | 76 + .../common/db-resource/pkg/errno/errno.go | 115 + .../common/db-resource/pkg/util/util.go | 2 + .../common/db-resource/scripts/.gitkeep | 0 dbm-services/common/dbha/ha-module/.gitignore | 7 + dbm-services/common/dbha/ha-module/Dockerfile | 14 + dbm-services/common/dbha/ha-module/README.md | 176 + .../common/dbha/ha-module/agent/agent.go | 2 + .../common/dbha/ha-module/agent/connection.go | 71 + .../dbha/ha-module/agent/monitor_agent.go | 448 + .../common/dbha/ha-module/bk-dbha/.helmignore | 23 + .../common/dbha/ha-module/bk-dbha/Chart.yaml | 24 + .../ha-module/bk-dbha/templates/NOTES.txt | 22 + .../ha-module/bk-dbha/templates/_helpers.tpl | 62 + .../bk-dbha/templates/configmap.yaml | 76 + .../bk-dbha/templates/deployment.yaml | 75 + .../dbha/ha-module/bk-dbha/templates/hpa.yaml | 28 + .../ha-module/bk-dbha/templates/ingress.yaml | 61 + .../ha-module/bk-dbha/templates/service.yaml | 15 + .../bk-dbha/templates/serviceaccount.yaml | 12 + .../templates/tests/test-connection.yaml | 15 + .../common/dbha/ha-module/bk-dbha/values.yaml | 82 + dbm-services/common/dbha/ha-module/build.sh | 3 + .../common/dbha/ha-module/client/client.go | 338 + .../common/dbha/ha-module/client/cmdb.go | 208 + .../common/dbha/ha-module/client/hadb.go | 860 + .../dbha/ha-module/client/name_service.go | 266 + .../common/dbha/ha-module/client/nc.go | 83 + .../dbha/ha-module/client/redis_client.go | 175 + .../dbha/ha-module/client/remote_config.go | 105 + .../common/dbha/ha-module/config/config.go | 282 + .../dbha/ha-module/constvar/constant.go | 321 + .../dbha/ha-module/constvar/constvar.go | 2 + dbm-services/common/dbha/ha-module/dbha.go | 98 + .../dbha/ha-module/dbmodule/dbmodule.go | 2 + .../dbmodule/mysql/MySQLProxy_callback.go | 182 + .../dbmodule/mysql/MySQLProxy_detect.go | 37 + .../dbmodule/mysql/MySQLProxy_switch.go | 89 + .../dbmodule/mysql/MySQL_callback.go | 342 + .../ha-module/dbmodule/mysql/MySQL_detect.go | 217 + .../dbmodule/mysql/MySQL_proxy_handle.go | 86 + .../ha-module/dbmodule/mysql/MySQL_switch.go | 597 + .../dbha/ha-module/dbmodule/mysql/mysql.go | 2 + .../ha-module/dbmodule/redis/lru_cache.go | 186 + .../dbmodule/redis/predixy_callback.go | 93 + .../dbmodule/redis/predixy_detect.go | 130 + .../dbmodule/redis/predixy_switch.go | 69 + .../dbha/ha-module/dbmodule/redis/redis.go | 2 + .../ha-module/dbmodule/redis/redis_base.go | 832 + .../dbmodule/redis/redis_callback.go | 85 + .../ha-module/dbmodule/redis/redis_detect.go | 198 + .../ha-module/dbmodule/redis/redis_switch.go | 588 + .../ha-module/dbmodule/redis/svr_password.go | 377 + .../dbmodule/redis/tendisplus_callback.go | 85 + .../dbmodule/redis/tendisplus_detect.go | 203 + .../dbmodule/redis/tendisplus_switch.go | 137 + .../dbmodule/redis/twemproxy_callback.go | 91 + .../dbmodule/redis/twemproxy_detect.go | 134 + .../dbmodule/redis/twemproxy_switch.go | 68 + .../dbha/ha-module/dbmodule/register.go | 64 + .../common/dbha/ha-module/dbutil/db_detect.go | 168 + .../common/dbha/ha-module/dbutil/db_switch.go | 153 + .../common/dbha/ha-module/dbutil/dbutil.go | 2 + .../common/dbha/ha-module/errno/code.go | 47 + .../common/dbha/ha-module/errno/errno.go | 102 + .../common/dbha/ha-module/gm/connection.go | 236 + dbm-services/common/dbha/ha-module/gm/gcm.go | 261 + dbm-services/common/dbha/ha-module/gm/gdm.go | 174 + dbm-services/common/dbha/ha-module/gm/gm.go | 238 + dbm-services/common/dbha/ha-module/gm/gmm.go | 147 + dbm-services/common/dbha/ha-module/gm/gqa.go | 260 + dbm-services/common/dbha/ha-module/go.mod | 37 + dbm-services/common/dbha/ha-module/go.sum | 124 + dbm-services/common/dbha/ha-module/ha.yaml | 92 + dbm-services/common/dbha/ha-module/log/log.go | 143 + .../common/dbha/ha-module/monitor/monitor.go | 159 + .../dbha/ha-module/monitor/monitor_api.go | 175 + .../common/dbha/ha-module/test/MySQL_test.go | 62 + .../common/dbha/ha-module/test/agent_test.go | 77 + .../common/dbha/ha-module/test/client_test.go | 84 + .../common/dbha/ha-module/test/log_test.go | 17 + .../common/dbha/ha-module/test/util_test.go | 20 + .../common/dbha/ha-module/types/types.go | 8 + .../common/dbha/ha-module/util/file_lock.go | 59 + .../common/dbha/ha-module/util/timezone.go | 36 + .../common/dbha/ha-module/util/util.go | 136 + dbm-services/common/dbha/hadb-api/Dockerfile | 20 + dbm-services/common/dbha/hadb-api/LICENSE | 0 dbm-services/common/dbha/hadb-api/README.md | 13 + dbm-services/common/dbha/hadb-api/cmd/add.go | 42 + dbm-services/common/dbha/hadb-api/cmd/root.go | 48 + dbm-services/common/dbha/hadb-api/cmd/run.go | 68 + .../common/dbha/hadb-api/conf/config.yaml | 20 + dbm-services/common/dbha/hadb-api/go.mod | 46 + dbm-services/common/dbha/hadb-api/go.sum | 547 + .../common/dbha/hadb-api/initc/initc.go | 2 + .../common/dbha/hadb-api/initc/initconfig.go | 48 + dbm-services/common/dbha/hadb-api/log/log.go | 121 + dbm-services/common/dbha/hadb-api/main.go | 40 + .../common/dbha/hadb-api/model/DBStatus.go | 22 + .../common/dbha/hadb-api/model/HALogs.go | 21 + .../common/dbha/hadb-api/model/HAStatus.go | 27 + .../common/dbha/hadb-api/model/SwitchLogs.go | 19 + .../dbha/hadb-api/model/TbMonSwitchQueue.go | 32 + .../common/dbha/hadb-api/model/init.go | 127 + .../common/dbha/hadb-api/pkg/api/api.go | 62 + .../dbha/hadb-api/pkg/handler/add_dbstatus.go | 12 + .../dbha/hadb-api/pkg/handler/add_halogs.go | 12 + .../dbha/hadb-api/pkg/handler/add_hastatus.go | 12 + .../hadb-api/pkg/handler/add_switchlogs.go | 10 + .../hadb-api/pkg/handler/add_switchqueue.go | 12 + .../hadb-api/pkg/handler/dbstatus/dbstatus.go | 2 + .../pkg/handler/dbstatus/dbstatus_handler.go | 202 + .../hadb-api/pkg/handler/halogs/halogs.go | 2 + .../pkg/handler/halogs/halogs_handler.go | 81 + .../dbha/hadb-api/pkg/handler/handler.go | 21 + .../hadb-api/pkg/handler/hastatus/hastatus.go | 2 + .../pkg/handler/hastatus/hastatus_handler.go | 404 + .../pkg/handler/switchlog/switchlog.go | 2 + .../handler/switchlog/switchlogs_handler.go | 166 + .../pkg/handler/switchqueue/switchqueue.go | 2 + .../switchqueue/switchqueue_handler.go | 442 + .../common/dbha/hadb-api/util/constants.go | 38 + .../common/dbha/hadb-api/util/timezone.go | 34 + .../common/dbha/hadb-api/util/util.go | 26 + dbm-services/common/go-pubpkg/.gitignore | 1 + dbm-services/common/go-pubpkg/README.md | 1 + dbm-services/common/go-pubpkg/cc.v3/README.md | 2 + .../go-pubpkg/cc.v3/add_host_from_cmpy.go | 27 + .../cc.v3/add_host_from_cmpy_test.go | 26 + .../go-pubpkg/cc.v3/biz_internal_module.go | 37 + .../cc.v3/biz_internal_module_test.go | 19 + .../common/go-pubpkg/cc.v3/biz_list.go | 47 + .../common/go-pubpkg/cc.v3/biz_list_test.go | 21 + .../common/go-pubpkg/cc.v3/biz_location.go | 38 + .../go-pubpkg/cc.v3/biz_location_test.go | 41 + .../common/go-pubpkg/cc.v3/biz_module.go | 25 + .../common/go-pubpkg/cc.v3/biz_module_list.go | 45 + .../go-pubpkg/cc.v3/biz_module_list_test.go | 21 + .../common/go-pubpkg/cc.v3/biz_sensitive.go | 44 + .../common/go-pubpkg/cc.v3/biz_set.go | 61 + .../common/go-pubpkg/cc.v3/biz_set_list.go | 44 + .../common/go-pubpkg/cc.v3/biz_set_test.go | 34 + .../common/go-pubpkg/cc.v3/biz_topo_tree.go | 38 + .../common/go-pubpkg/cc.v3/biz_watch.go | 99 + dbm-services/common/go-pubpkg/cc.v3/client.go | 128 + .../go-pubpkg/cc.v3/clone_host_property.go | 29 + .../cc.v3/clont_host_service_instance_proc.go | 29 + .../common/go-pubpkg/cc.v3/dept_list.go | 37 + .../common/go-pubpkg/cc.v3/host_base_info.go | 38 + .../go-pubpkg/cc.v3/host_base_info_test.go | 30 + .../go-pubpkg/cc.v3/host_biz_relations.go | 49 + .../common/go-pubpkg/cc.v3/host_id_query.go | 69 + .../common/go-pubpkg/cc.v3/host_location.go | 44 + .../go-pubpkg/cc.v3/host_location_test.go | 26 + .../cc.v3/host_relation_info_test.go | 26 + .../go-pubpkg/cc.v3/host_relation_list.go | 57 + .../go-pubpkg/cc.v3/host_relation_watch.go | 103 + .../common/go-pubpkg/cc.v3/host_watch.go | 100 + .../common/go-pubpkg/cc.v3/host_watch_test.go | 19 + .../go-pubpkg/cc.v3/host_without_biz_list.go | 39 + .../cc.v3/host_without_biz_list_test.go | 55 + .../common/go-pubpkg/cc.v3/list_biz_hosts.go | 37 + .../common/go-pubpkg/cc.v3/module_watch.go | 101 + dbm-services/common/go-pubpkg/cc.v3/schema.go | 729 + .../common/go-pubpkg/cc.v3/set_watch.go | 98 + .../cc.v3/sync_host_info_from_cmpy.go | 27 + .../cc.v3/sync_host_info_from_comy_test.go | 26 + .../common/go-pubpkg/cc.v3/test_config.go | 14 + .../common/go-pubpkg/cc.v3/transfer_host.go | 36 + .../go-pubpkg/cc.v3/transfer_host_module.go | 28 + .../go-pubpkg/cc.v3/transfer_host_test.go | 27 + .../common/go-pubpkg/cc.v3/update_host.go | 45 + .../go-pubpkg/cc.v3/update_host_test.go | 24 + .../common/go-pubpkg/cc.v3/utils/utils.go | 40 + .../go-pubpkg/cc.v3/utils/utils_test.go | 52 + dbm-services/common/go-pubpkg/cc.v3/watch.go | 86 + .../common/go-pubpkg/cmutil/command.go | 34 + dbm-services/common/go-pubpkg/cmutil/db.go | 72 + dbm-services/common/go-pubpkg/cmutil/file.go | 72 + dbm-services/common/go-pubpkg/cmutil/map.go | 22 + dbm-services/common/go-pubpkg/cmutil/mysql.go | 110 + .../common/go-pubpkg/cmutil/randstring.go | 27 + .../common/go-pubpkg/cmutil/ratelimit.go | 30 + .../go-pubpkg/cmutil/remove_file_limit.go | 94 + .../common/go-pubpkg/cmutil/sizebytes.go | 81 + dbm-services/common/go-pubpkg/cmutil/slice.go | 190 + dbm-services/common/go-pubpkg/cmutil/str.go | 49 + dbm-services/common/go-pubpkg/cmutil/util.go | 35 + dbm-services/common/go-pubpkg/go.mod | 48 + dbm-services/common/go-pubpkg/go.sum | 538 + dbm-services/common/go-pubpkg/logger/cst.go | 29 + .../go-pubpkg/logger/custom_field_test.go | 66 + .../common/go-pubpkg/logger/default.go | 63 + .../common/go-pubpkg/logger/default_test.go | 19 + .../common/go-pubpkg/logger/encoder.go | 46 + dbm-services/common/go-pubpkg/logger/field.go | 146 + dbm-services/common/go-pubpkg/logger/log.go | 69 + .../common/go-pubpkg/logger/logger.go | 2 + .../common/go-pubpkg/logger/rotate.go | 65 + .../common/go-pubpkg/logger/rotate_test.go | 48 + .../common/go-pubpkg/reportlog/report.go | 91 + .../common/go-pubpkg/reportlog/reportlog.go | 2 + .../common/go-pubpkg/timeutil/duration.go | 67 + .../common/go-pubpkg/timeutil/duration_ext.go | 216 + .../common/go-pubpkg/timeutil/timeutil.go | 2 + .../common/go-pubpkg/validate/validate.go | 189 + dbm-services/go.work | 23 + dbm-services/go.work.sum | 995 + .../mysql/db-partition/.ci/codecc.yml | 29 + .../db-partition/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + dbm-services/mysql/db-partition/.gitignore | 14 + dbm-services/mysql/db-partition/Dockerfile | 11 + dbm-services/mysql/db-partition/Makefile | 44 + dbm-services/mysql/db-partition/README.md | 109 + .../mysql/db-partition/assests/assests.go | 2 + .../mysql/db-partition/assests/migrate.go | 51 + .../migrations/000001_init.down.sql.sql | 1 + .../assests/migrations/000001_init.up.sql | 1 + .../migrations/000002_create_table.down.sql | 2 + .../migrations/000002_create_table.up.sql | 184 + dbm-services/mysql/db-partition/cron/cron.go | 71 + .../db-partition/cron/cron_basic_func.go | 79 + .../mysql/db-partition/cron/cron_object.go | 17 + dbm-services/mysql/db-partition/errno/code.go | 361 + .../mysql/db-partition/errno/errno.go | 133 + dbm-services/mysql/db-partition/go.mod | 64 + dbm-services/mysql/db-partition/go.sum | 1944 ++ .../mysql/db-partition/handler/handler.go | 205 + dbm-services/mysql/db-partition/main.go | 56 + .../mysql/db-partition/model/init_db.go | 49 + .../mysql/db-partition/model/init_env.go | 60 + .../mysql/db-partition/model/init_logger.go | 38 + .../mysql/db-partition/model/init_redis.go | 46 + .../mysql/db-partition/model/model.go | 2 + .../mysql/db-partition/monitor/monitor.go | 110 + .../db-partition/monitor/monitor_object.go | 33 + .../mysql/db-partition/router/router.go | 21 + .../db-partition/service/check_partition.go | 210 + .../service/check_partition_base_func.go | 635 + .../service/check_partition_object.go | 81 + .../db-partition/service/db_meta_service.go | 144 + .../db-partition/service/db_remote_service.go | 118 + .../service/execute_partition_object.go | 187 + .../db-partition/service/manage_config.go | 450 + .../service/manage_config_object.go | 84 + .../mysql/db-partition/util/client.go | 226 + dbm-services/mysql/db-partition/util/time.go | 96 + dbm-services/mysql/db-partition/util/util.go | 84 + dbm-services/mysql/db-priv/.ci/codecc.yml | 29 + .../mysql/db-priv/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + dbm-services/mysql/db-priv/.gitignore | 16 + dbm-services/mysql/db-priv/.golangci.yml | 121 + dbm-services/mysql/db-priv/Dockerfile | 12 + dbm-services/mysql/db-priv/Makefile | 44 + dbm-services/mysql/db-priv/README.md | 1 + dbm-services/mysql/db-priv/admin.sh | 70 + dbm-services/mysql/db-priv/assests/assests.go | 2 + dbm-services/mysql/db-priv/assests/migrate.go | 50 + .../migrations/000001_init.down.sql.sql | 1 + .../assests/migrations/000001_init.up.sql | 1 + .../assests/migrations/000002_init.down.sql | 4 + .../assests/migrations/000002_init.up.sql | 123 + dbm-services/mysql/db-priv/errno/code.go | 403 + dbm-services/mysql/db-priv/errno/errno.go | 133 + dbm-services/mysql/db-priv/go.mod | 74 + dbm-services/mysql/db-priv/go.sum | 2063 +++ dbm-services/mysql/db-priv/handler/account.go | 117 + .../mysql/db-priv/handler/account_rule.go | 117 + .../mysql/db-priv/handler/add_priv.go | 90 + .../db-priv/handler/clone_client_priv.go | 59 + .../db-priv/handler/clone_instance_priv.go | 59 + dbm-services/mysql/db-priv/handler/handler.go | 2 + .../mysql/db-priv/handler/public_key.go | 53 + .../mysql/db-priv/handler/register_routes.go | 59 + dbm-services/mysql/db-priv/main.go | 109 + dbm-services/mysql/db-priv/service/account.go | 214 + .../mysql/db-priv/service/account_object.go | 53 + .../mysql/db-priv/service/accout_rule.go | 298 + .../db-priv/service/accout_rule_object.go | 55 + .../mysql/db-priv/service/add_priv.go | 204 + .../db-priv/service/add_priv_base_func.go | 562 + .../mysql/db-priv/service/add_priv_object.go | 106 + .../db-priv/service/clone_client_priv.go | 144 + .../service/clone_client_priv_base_func.go | 162 + .../service/clone_client_priv_object.go | 38 + .../db-priv/service/clone_instance_priv.go | 106 + .../service/clone_instance_priv_base_func.go | 555 + .../service/clone_instance_priv_object.go | 80 + .../mysql/db-priv/service/db_meta_service.go | 99 + .../db-priv/service/db_remote_service.go | 125 + dbm-services/mysql/db-priv/service/init_db.go | 90 + dbm-services/mysql/db-priv/service/service.go | 2 + dbm-services/mysql/db-priv/util/base_func.go | 72 + dbm-services/mysql/db-priv/util/client.go | 210 + dbm-services/mysql/db-priv/util/db.go | 41 + dbm-services/mysql/db-priv/util/rsa.go | 213 + dbm-services/mysql/db-priv/util/time.go | 96 + dbm-services/mysql/db-priv/util/util.go | 2 + .../mysql/db-remote-service/.gitignore | 26 + .../mysql/db-remote-service/.golangci.yml | 57 + .../mysql/db-remote-service/Dockerfile | 6 + dbm-services/mysql/db-remote-service/LICENSE | 0 dbm-services/mysql/db-remote-service/Makefile | 36 + .../db-remote-service/all_sql_commands.txt | 150 + .../mysql/db-remote-service/cmd/init.go | 54 + .../mysql/db-remote-service/cmd/root.go | 138 + .../mysql/db-remote-service/cmd/version.go | 42 + dbm-services/mysql/db-remote-service/go.mod | 59 + dbm-services/mysql/db-remote-service/go.sum | 575 + dbm-services/mysql/db-remote-service/main.go | 9 + .../db-remote-service/pkg/config/config.go | 67 + .../db-remote-service/pkg/mysql_rpc/embed.go | 78 + .../db-remote-service/pkg/mysql_rpc/init.go | 105 + .../pkg/mysql_rpc/mysql_rpc.go | 2 + .../db-remote-service/pkg/parser/parser.go | 80 + .../pkg/proxy_rpc/proxy_rpc.go | 96 + .../db-remote-service/pkg/redis_rpc/client.go | 137 + .../db-remote-service/pkg/redis_rpc/common.go | 467 + .../db-remote-service/pkg/redis_rpc/init.go | 532 + .../pkg/redis_rpc/redis_rpc.go | 123 + .../pkg/redis_rpc/twemproxy_rpc.go | 67 + .../pkg/rpc_core/execute_cmd.go | 60 + .../pkg/rpc_core/execute_cmds_on_addr.go | 107 + .../db-remote-service/pkg/rpc_core/init.go | 16 + .../pkg/rpc_core/interface.go | 22 + .../pkg/rpc_core/rpc_core.go | 2 + .../pkg/rpc_core/rpc_wrapper.go | 36 + .../db-remote-service/pkg/rpc_core/run.go | 46 + .../pkg/service/handler_parser.go | 46 + .../service/handler_rpc/general_handler.go | 71 + .../pkg/service/handler_rpc/handler_rpc.go | 2 + .../pkg/service/handler_rpc/init.go | 18 + .../pkg/service/handler_rpc/mysql.go | 6 + .../pkg/service/handler_rpc/proxy.go | 8 + .../pkg/service/handler_rpc/redis.go | 9 + .../pkg/service/handler_rpc/utils.go | 21 + .../db-remote-service/pkg/service/router.go | 26 + .../db-remote-service/pkg/service/service.go | 2 + .../mysql/db-remote-service/readme.md | 220 + dbm-services/mysql/db-simulation/.gitignore | 10 + .../mysql/db-simulation/.golangci.yml | 57 + dbm-services/mysql/db-simulation/Dockerfile | 11 + dbm-services/mysql/db-simulation/Makefile | 47 + .../mysql/db-simulation/all_sql_commands.txt | 150 + dbm-services/mysql/db-simulation/app/app.go | 14 + .../mysql/db-simulation/app/config/config.go | 185 + .../db-simulation/app/service/kubernets.go | 377 + .../app/service/kubernets_test.go | 24 + .../db-simulation/app/service/service.go | 2 + .../app/service/simulation_task.go | 317 + .../app/syntax/alter_table_rule.go | 46 + .../app/syntax/create_db_rule.go | 18 + .../app/syntax/create_table_rule.go | 227 + .../db-simulation/app/syntax/definer_rule.go | 17 + .../db-simulation/app/syntax/dml_rule.go | 15 + .../db-simulation/app/syntax/mysql_keyword.go | 435 + .../mysql/db-simulation/app/syntax/rule.go | 230 + .../db-simulation/app/syntax/rule_test.go | 28 + .../db-simulation/app/syntax/spider_rule.go | 65 + .../mysql/db-simulation/app/syntax/syntax.go | 490 + .../db-simulation/app/syntax/syntax_test.go | 43 + .../db-simulation/app/syntax/tmysqlpase.go | 227 + dbm-services/mysql/db-simulation/go.mod | 75 + dbm-services/mysql/db-simulation/go.sum | 716 + .../mysql/db-simulation/handler/handler.go | 202 + .../mysql/db-simulation/handler/rule.go | 45 + .../db-simulation/handler/syntax_check.go | 139 + .../mysql/db-simulation/handler/updaterule.go | 86 + dbm-services/mysql/db-simulation/main.go | 53 + .../mysql/db-simulation/model/model.go | 82 + .../db-simulation/model/tb_request_record.go | 33 + .../db-simulation/model/tb_simulation_task.go | 109 + .../db-simulation/model/tb_syntax_rule.go | 240 + .../mysql/db-simulation/pkg/bkrepo/bkrepo.go | 173 + .../db-simulation/pkg/bkrepo/bkrepo_test.go | 45 + .../mysql/db-simulation/pkg/util/spider.go | 150 + .../mysql/db-simulation/pkg/util/util.go | 2 + .../mysql/db-simulation/router/router.go | 42 + dbm-services/mysql/db-simulation/rule.yaml | 59 + .../mysql/db-simulation/spider_rule.yaml | 43 + .../mysql/db-tools/dbactuator/.ci/codecc.yml | 29 + .../dbactuator/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + .../mysql/db-tools/dbactuator/.gitignore | 27 + .../mysql/db-tools/dbactuator/.golangci.yml | 57 + .../mysql/db-tools/dbactuator/LICENSE | 0 .../mysql/db-tools/dbactuator/Makefile | 35 + .../mysql/db-tools/dbactuator/README.md | 170 + .../mysql/db-tools/dbactuator/build.sh | 9 + .../mysql/db-tools/dbactuator/build_doc.sh | 10 + .../mysql/db-tools/dbactuator/cmd/cmd.go | 204 + .../mysql/db-tools/dbactuator/docs/.gitkeep | 0 .../db-tools/dbactuator/docs/dbactuator.md | 30 + .../mysql/db-tools/dbactuator/docs/docs.go | 2 + .../db-tools/dbactuator/docs/embed_docs.go | 8 + .../db-tools/dbactuator/docs/swagger.json | 3170 ++++ .../db-tools/dbactuator/docs/swagger.yaml | 2223 +++ .../example/import-sqlfile.example.md | 43 + .../mysql-backup-database-table.example.md | 16 + .../example/mysql-backup-download.example.md | 41 + .../mysql-backup-importfull.example.md | 30 + .../example/mysql-change-master.example.md | 40 + .../example/mysql-deploy-dbbackup.example.md | 98 + .../example/mysql-deploy-monitor.example.md | 389 + .../example/mysql-deploy.example.md | 125 + .../example/mysql-semantic-check.example.md | 45 + .../example/mysql-uninstall.example.md | 30 + .../example/proxy-deploy-monitor.example.md | 125 + .../example/proxy-deploy.example.md | 40 + dbm-services/mysql/db-tools/dbactuator/go.mod | 58 + dbm-services/mysql/db-tools/dbactuator/go.sum | 160 + .../internal/subcmd/commoncmd/cmd.go | 50 + .../internal/subcmd/commoncmd/commoncmd.go | 2 + .../subcmd/commoncmd/download_http.go | 102 + .../subcmd/commoncmd/download_ibs_query.go | 88 + .../subcmd/commoncmd/download_ibs_recover.go | 103 + .../internal/subcmd/commoncmd/download_scp.go | 94 + .../internal/subcmd/commoncmd/fileserver.go | 86 + .../subcmd/commoncmd/rm_large_file.go | 121 + .../subcmd/crontabcmd/clear_crontab.go | 68 + .../internal/subcmd/crontabcmd/crontabcmd.go | 2 + .../subcmd/mysqlcmd/backup_database_table.go | 85 + .../mysqlcmd/backup_truncate_database.go | 97 + .../mysqlcmd/build_master_slave_relation.go | 98 + .../internal/subcmd/mysqlcmd/clean_mysql.go | 91 + .../subcmd/mysqlcmd/clear_instance_config.go | 78 + .../subcmd/mysqlcmd/clone_client_grant.go | 79 + .../internal/subcmd/mysqlcmd/cmd.go | 67 + .../subcmd/mysqlcmd/deploy_mysql_crond.go | 95 + .../subcmd/mysqlcmd/find_local_backup.go | 91 + .../internal/subcmd/mysqlcmd/flashback.go | 89 + .../internal/subcmd/mysqlcmd/full_backup.go | 92 + .../internal/subcmd/mysqlcmd/grant_repl.go | 91 + .../subcmd/mysqlcmd/import_partitionsql.go | 80 + .../internal/subcmd/mysqlcmd/import_sqlfie.go | 90 + .../subcmd/mysqlcmd/install_checkusm.go | 101 + .../subcmd/mysqlcmd/install_dba_toolkit.go | 91 + .../subcmd/mysqlcmd/install_monitor.go | 94 + .../internal/subcmd/mysqlcmd/install_mysql.go | 138 + .../subcmd/mysqlcmd/install_new_dbbackup.go | 112 + .../mysqlcmd/install_new_rotatebinlog.go | 93 + .../internal/subcmd/mysqlcmd/mycnf_change.go | 91 + .../internal/subcmd/mysqlcmd/mycnf_clone.go | 91 + .../internal/subcmd/mysqlcmd/mysqlcmd.go | 2 + .../subcmd/mysqlcmd/parse_binlog_time.go | 84 + .../subcmd/mysqlcmd/pt_table_checksum.go | 96 + .../internal/subcmd/mysqlcmd/pt_table_sync.go | 82 + .../subcmd/mysqlcmd/recover_binlog.go | 98 + .../internal/subcmd/mysqlcmd/restore_dr.go | 123 + .../subcmd/mysqlcmd/semantic_check.go | 102 + .../subcmd/mysqlcmd/semantic_dump_schema.go | 88 + .../internal/subcmd/mysqlcmd/start_mysql.go | 52 + .../mysqlcmd/switch_backend_to_slave.go | 118 + .../subcmd/mysqlcmd/uninstall_mysql.go | 76 + .../subcmd/proxycmd/clone_proxy_user.go | 92 + .../internal/subcmd/proxycmd/cmd.go | 35 + .../subcmd/proxycmd/install_mysql_proxy.go | 100 + .../subcmd/proxycmd/restart_mysql_proxy.go | 70 + .../internal/subcmd/proxycmd/set_backend.go | 90 + .../subcmd/proxycmd/uninstall_mysql_proxy.go | 67 + .../internal/subcmd/spidercmd/cmd.go | 33 + .../subcmd/spidercmd/install_spider.go | 131 + .../subcmd/spidercmd/restart_spider.go | 73 + .../subcmd/spidercmd/uninstall_spider.go | 76 + .../add_spider_slave_relationship.go | 75 + .../spiderctlcmd/add_temporary_spider.go | 73 + .../internal/subcmd/spiderctlcmd/cmd.go | 35 + .../init_cluster_routing_relationship.go | 77 + .../subcmd/spiderctlcmd/install_spider_ctl.go | 135 + .../spiderctlcmd/uninstall_spider_ctl.go | 76 + .../dbactuator/internal/subcmd/subcmd.go | 341 + .../internal/subcmd/subcmd_helper.go | 338 + .../dbactuator/internal/subcmd/subcmd_util.go | 69 + .../internal/subcmd/sysinitcmd/sysinit.go | 79 + .../internal/subcmd/sysinitcmd/sysinitcmd.go | 2 + .../pkg/components/backup_download/backup.go | 23 + .../backup_download/backup_download.go | 2 + .../backup_download/backupsys_query_comp.go | 67 + .../backup_download/backupsys_recover_comp.go | 288 + .../pkg/components/backup_download/cos.go | 1 + .../pkg/components/backup_download/gse.go | 1 + .../components/backup_download/http_client.go | 233 + .../components/backup_download/http_comp.go | 177 + .../backup_download/ibs_recover_wild_comp.go | 130 + .../backup_download/ieg_backupsys.go | 330 + .../components/backup_download/scp_comp.go | 193 + .../dbactuator/pkg/components/base.go | 39 + .../dbactuator/pkg/components/components.go | 2 + .../pkg/components/computil/computil.go | 2 + .../pkg/components/computil/mysql_operate.go | 324 + .../components/computil/mysql_operate_test.go | 42 + .../pkg/components/crontab/clear_crontab.go | 58 + .../pkg/components/crontab/crontab.go | 2 + .../pkg/components/db_base_account.go | 247 + .../pkg/components/dbconfig/dbconfig.go | 2 + .../pkg/components/dbconfig/query_change.go | 1 + .../pkg/components/fileserver/README.md | 9 + .../pkg/components/fileserver/acl.go | 67 + .../pkg/components/fileserver/fileserver.go | 346 + .../dbactuator/pkg/components/medium.go | 56 + .../components/mysql/backup_database_table.go | 352 + .../mysql/backup_truncate_database.go | 293 + .../pkg/components/mysql/change_master.go | 284 + .../components/mysql/check_instance_idle.go | 1 + .../pkg/components/mysql/clean_mysql.go | 152 + .../components/mysql/clear_instance_config.go | 238 + .../pkg/components/mysql/common/common.go | 2 + .../components/mysql/common/helper_example.go | 51 + .../pkg/components/mysql/common/types.go | 34 + .../pkg/components/mysql/cutover/base.go | 517 + .../pkg/components/mysql/cutover/cutover.go | 350 + .../components/mysql/dbbackup/backup_index.go | 203 + .../components/mysql/dbbackup/backup_info.go | 253 + .../pkg/components/mysql/dbbackup/cst.go | 32 + .../pkg/components/mysql/dbbackup/dbbackup.go | 2 + .../pkg/components/mysql/dbbackup/types.go | 104 + .../components/mysql/deploy_mysql_crond.go | 348 + .../pkg/components/mysql/drop_large_table.go | 69 + .../pkg/components/mysql/excute_sql_file.go | 283 + .../components/mysql/execute_partition_sql.go | 363 + .../pkg/components/mysql/find_backup_local.go | 248 + .../pkg/components/mysql/full_backup.go | 208 + .../pkg/components/mysql/grant/clone.go | 1 + .../mysql/grant/clone_client_grant.go | 183 + .../mysql/grant/clone_instance_priv.go | 36 + .../pkg/components/mysql/grant/repl.go | 117 + .../pkg/components/mysql/install_checksum.go | 253 + .../components/mysql/install_dba_toolkit.go | 70 + .../pkg/components/mysql/install_monitor.go | 365 + .../pkg/components/mysql/install_mysql.go | 991 + .../components/mysql/install_new_dbbackup.go | 372 + .../components/mysql/install_rotatebinlog.go | 218 + .../pkg/components/mysql/mycnf_change.go | 228 + .../pkg/components/mysql/mycnf_clone.go | 136 + .../pkg/components/mysql/mycnf_diff.go | 1 + .../pkg/components/mysql/parse_binlog_time.go | 61 + .../pkg/components/mysql/pt_table_checksum.go | 393 + .../pkg/components/mysql/pt_table_sync.go | 420 + .../pkg/components/mysql/restore/README.md | 21 + .../pkg/components/mysql/restore/backup.go | 106 + .../pkg/components/mysql/restore/common.go | 33 + .../mysql/restore/dbloader/dbloader.go | 9 + .../mysql/restore/dbloader/dbloader_util.go | 54 + .../mysql/restore/dbloader/logical_loader.go | 148 + .../mysql/restore/dbloader/physical_loader.go | 99 + .../mysql/restore/dbloader/xtrabackup.go | 188 + .../restore/dbloader/xtrabackup_repaire.go | 289 + .../mysql/restore/dbloader_restore.go | 224 + .../components/mysql/restore/mload_restore.go | 191 + .../components/mysql/restore/mload_util.go | 286 + .../mysql/restore/recover_binlog.go | 708 + .../pkg/components/mysql/restore/restore.go | 236 + .../mysql/restore/xload_repaire_util.go | 267 + .../components/mysql/restore/xload_restore.go | 332 + .../components/mysql/restore/xload_util.go | 150 + .../components/mysql/rollback/flashback.go | 162 + .../mysql/rollback/flashback_check.go | 223 + .../mysql/rollback/flashback_download.go | 1 + .../mysql/rollback/flashback_import.go | 6 + .../mysql/rollback/flashback_rows.go | 1 + .../pkg/components/mysql/rollback/rollback.go | 2 + .../components/mysql/semantic_check_run.go | 453 + .../components/mysql/semantic_dump_schema.go | 221 + .../pkg/components/mysql/uninstall_mysql.go | 299 + .../mysql_proxy/clone_proxy_user.go | 79 + .../mysql_proxy/install_mysql_proxy.go | 408 + .../mysql_proxy/restart_mysql_proxy.go | 94 + .../pkg/components/mysql_proxy/set_backend.go | 79 + .../mysql_proxy/uninstall_mysql_proxy.go | 129 + .../dbactuator/pkg/components/output.go | 39 + .../pkg/components/spider/restart_spider.go | 78 + .../add_slave_cluster_relationship.go | 200 + .../spiderctl/add_temporary_spider.go | 65 + .../init_cluster_routing_relationship.go | 151 + .../pkg/components/spiderctl/spiderctl.go | 2 + .../pkg/components/sysinit/sysinit.go | 87 + .../dbactuator/pkg/core/codes/codes.go | 62 + .../db-tools/dbactuator/pkg/core/cst/const.go | 31 + .../db-tools/dbactuator/pkg/core/cst/cst.go | 2 + .../dbactuator/pkg/core/cst/dbbackup.go | 14 + .../db-tools/dbactuator/pkg/core/cst/mysql.go | 112 + .../db-tools/dbactuator/pkg/core/cst/os.go | 8 + .../db-tools/dbactuator/pkg/core/cst/proxy.go | 18 + .../core/staticembed/default_sys_schema.go | 11 + .../core/staticembed/default_sys_schema.sql | 63 + .../pkg/core/staticembed/external.sh | 15 + .../pkg/core/staticembed/staticembed.go | 2 + .../pkg/core/staticembed/sysinit_mysql.go | 19 + .../pkg/core/staticembed/sysinit_mysql.sh | 84 + .../db-tools/dbactuator/pkg/native/db.go | 146 + .../pkg/native/db_benchmark_test.go | 18 + .../db-tools/dbactuator/pkg/native/db_test.go | 217 + .../dbactuator/pkg/native/dbworker.go | 861 + .../db-tools/dbactuator/pkg/native/proxy.go | 123 + .../dbactuator/pkg/native/proxy_test.go | 57 + .../db-tools/dbactuator/pkg/native/spider.go | 12 + .../db-tools/dbactuator/pkg/native/types.go | 165 + .../dbactuator/pkg/rollback/rollback.go | 232 + .../dbactuator/pkg/rollback/rollback_test.go | 85 + .../db-tools/dbactuator/pkg/tools/impls.go | 38 + .../db-tools/dbactuator/pkg/tools/init.go | 147 + .../db-tools/dbactuator/pkg/tools/tools.go | 2 + .../db-tools/dbactuator/pkg/util/auth/auth.go | 2 + .../dbactuator/pkg/util/auth/jwt_token.go | 21 + .../dbactuator/pkg/util/bkrepo/bkrepo.go | 296 + .../dbactuator/pkg/util/bkrepo/bkrepo_test.go | 20 + .../util/db_table_filter/db_table_filter.go | 263 + .../db_table_filter/db_table_filter_test.go | 21 + .../util/db_table_filter/mydumper_regex.go | 67 + .../pkg/util/db_table_filter/tools.go | 87 + .../db-tools/dbactuator/pkg/util/dbcnf.go | 614 + .../dbactuator/pkg/util/dbcnf_test.go | 24 + .../db-tools/dbactuator/pkg/util/filelock.go | 199 + .../db-tools/dbactuator/pkg/util/helpers.go | 52 + .../dbactuator/pkg/util/httpclient/client.go | 73 + .../pkg/util/httpclient/httpclient.go | 130 + .../db-tools/dbactuator/pkg/util/logger.go | 10 + .../pkg/util/mysqlutil/change_master.go | 100 + .../pkg/util/mysqlutil/change_master_test.go | 23 + .../pkg/util/mysqlutil/hide_passowrd.go | 76 + .../pkg/util/mysqlutil/mysql_cnf.go | 23 + .../pkg/util/mysqlutil/mysql_cnf_test.go | 84 + .../pkg/util/mysqlutil/mysql_dumper.go | 280 + .../pkg/util/mysqlutil/mysql_dumper_test.go | 61 + .../dbactuator/pkg/util/mysqlutil/mysql_os.go | 249 + .../pkg/util/mysqlutil/mysql_os_test.go | 20 + .../pkg/util/mysqlutil/mysqlclient_exec.go | 192 + .../util/mysqlutil/mysqlclient_exec_test.go | 66 + .../pkg/util/mysqlutil/mysqlutil.go | 2 + .../pkg/util/mysqlutil/sql_builder.go | 67 + .../dbactuator/pkg/util/osutil/cmdexec.go | 169 + .../pkg/util/osutil/cmdexec_test.go | 15 + .../dbactuator/pkg/util/osutil/crontab.go | 246 + .../pkg/util/osutil/crontab_test.go | 6 + .../dbactuator/pkg/util/osutil/mountpoint.go | 72 + .../dbactuator/pkg/util/osutil/netutil.go | 57 + .../dbactuator/pkg/util/osutil/osutil.go | 670 + .../dbactuator/pkg/util/osutil/osutil_test.go | 26 + .../dbactuator/pkg/util/osutil/sysctl.go | 59 + .../dbactuator/pkg/util/osutil/unix_only.go | 241 + .../pkg/util/osutil/windows_only.go | 16 + .../dbactuator/pkg/util/proxyutil/proxy.go | 160 + .../pkg/util/proxyutil/proxy_cnf.go | 52 + .../pkg/util/proxyutil/proxy_cnf_test.go | 35 + .../pkg/util/proxyutil/proxyutil.go | 2 + .../db-tools/dbactuator/pkg/util/sftp/init.go | 77 + .../db-tools/dbactuator/pkg/util/sftp/sftp.go | 203 + .../dbactuator/pkg/util/sftp/sftp_test.go | 18 + .../db-tools/dbactuator/pkg/util/slice.go | 273 + .../mysql/db-tools/dbactuator/pkg/util/str.go | 45 + .../pkg/util/templates/cmd_groups.go | 21 + .../pkg/util/templates/normallizers.go | 32 + .../pkg/util/templates/templates.go | 2 + .../db-tools/dbactuator/pkg/util/util.go | 335 + .../dbactuator/pkg/util/xmlutil/xml.go | 60 + .../dbactuator/pkg/util/xmlutil/xmlutil.go | 2 + .../mysql/db-tools/mysql-crond/.ci/codecc.yml | 29 + .../mysql-crond/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + .../mysql/db-tools/mysql-crond/.gitignore | 5 + .../mysql/db-tools/mysql-crond/.golangci.yml | 121 + .../mysql/db-tools/mysql-crond/Makefile | 25 + .../mysql/db-tools/mysql-crond/README.md | 364 + .../mysql/db-tools/mysql-crond/api/api.go | 20 + .../mysql-crond/api/create_or_replace.go | 43 + .../mysql/db-tools/mysql-crond/api/delete.go | 33 + .../mysql/db-tools/mysql-crond/api/disable.go | 33 + .../mysql/db-tools/mysql-crond/api/do.go | 53 + .../mysql/db-tools/mysql-crond/api/entries.go | 59 + .../db-tools/mysql-crond/api/jobs_config.go | 25 + .../mysql/db-tools/mysql-crond/api/pause.go | 34 + .../mysql/db-tools/mysql-crond/api/quit.go | 12 + .../mysql/db-tools/mysql-crond/api/reload.go | 12 + .../mysql/db-tools/mysql-crond/api/resume.go | 33 + .../db-tools/mysql-crond/api/send_event.go | 24 + .../db-tools/mysql-crond/api/send_metrics.go | 24 + .../mysql/db-tools/mysql-crond/cmd/cmd.go | 2 + .../mysql/db-tools/mysql-crond/cmd/init.go | 22 + .../mysql/db-tools/mysql-crond/cmd/root.go | 154 + .../db-tools/mysql-crond/cmd/subcmd_list.go | 66 + .../mysql-crond/cmd/subcmd_version.go | 42 + .../mysql/db-tools/mysql-crond/go.mod | 55 + .../mysql/db-tools/mysql-crond/go.sum | 559 + .../db-tools/mysql-crond/jobs-config.yaml | 14 + .../mysql/db-tools/mysql-crond/main.go | 7 + .../mysql-crond/mysql-crond.conf.go.tpl | 27 + .../db-tools/mysql-crond/mysql-crond.conf.tpl | 27 + .../pkg/config/bk_monitor_beat_config.go | 22 + .../db-tools/mysql-crond/pkg/config/config.go | 91 + .../mysql-crond/pkg/config/job_config.go | 166 + .../mysql-crond/pkg/config/log_config.go | 10 + .../mysql-crond/pkg/config/runtime_config.go | 12 + .../pkg/config/send_bk_monitor_beat.go | 186 + .../db-tools/mysql-crond/pkg/config/sync.go | 133 + .../db-tools/mysql-crond/pkg/crond/crond.go | 120 + .../db-tools/mysql-crond/pkg/crond/error.go | 9 + .../mysql-crond/pkg/crond/find_entry.go | 20 + .../db-tools/mysql-crond/pkg/crond/job_add.go | 71 + .../mysql-crond/pkg/crond/job_delete.go | 65 + .../mysql-crond/pkg/crond/job_disable.go | 49 + .../mysql-crond/pkg/crond/job_list.go | 29 + .../mysql-crond/pkg/crond/job_pause.go | 33 + .../mysql-crond/pkg/crond/job_replace.go | 26 + .../mysql-crond/pkg/crond/job_resume.go | 47 + .../mysql-crond/pkg/schedule/once_schedule.go | 31 + .../mysql-crond/pkg/schedule/schedule.go | 2 + .../mysql-crond/pkg/service/service.go | 291 + .../mysql/db-tools/mysql-crond/project.yaml | 25 + .../mysql/db-tools/mysql-crond/run_local.sh | 6 + .../db-tools/mysql-crond/runtime-local.yaml | 27 + .../mysql/db-tools/mysql-crond/runtime.yaml | 27 + .../mysql/db-tools/mysql-crond/start.sh | 2 + .../db-tools/mysql-monitor/.ci/codecc.yml | 29 + .../mysql-monitor/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + .../mysql/db-tools/mysql-monitor/.gitignore | 4 + .../mysql/db-tools/mysql-monitor/Makefile | 30 + .../mysql/db-tools/mysql-monitor/README.md | 186 + .../mysql/db-tools/mysql-monitor/cmd/cmd.go | 2 + .../mysql/db-tools/mysql-monitor/cmd/init.go | 75 + .../mysql/db-tools/mysql-monitor/cmd/root.go | 27 + .../mysql-monitor/cmd/subcmd_clean.go | 63 + .../mysql-monitor/cmd/subcmd_hardcode_run.go | 48 + .../db-tools/mysql-monitor/cmd/subcmd_list.go | 23 + .../mysql-monitor/cmd/subcmd_reschedule.go | 179 + .../db-tools/mysql-monitor/cmd/subcmd_run.go | 48 + .../mysql-monitor/cmd/subcmd_version.go | 42 + .../mysql/db-tools/mysql-monitor/config.yaml | 20 + .../db-tools/mysql-monitor/config.yaml.go.tpl | 20 + .../mysql/db-tools/mysql-monitor/go.mod | 43 + .../mysql/db-tools/mysql-monitor/go.sum | 525 + .../mysql-monitor/items-config.tpl.yaml | 92 + .../db-tools/mysql-monitor/items-config.yaml | 48 + .../mysql/db-tools/mysql-monitor/main.go | 7 + .../mysql-monitor/pkg/config/config.go | 2 + .../db-tools/mysql-monitor/pkg/config/init.go | 150 + .../mysql-monitor/pkg/config/items_config.go | 39 + .../mysql-monitor/pkg/config/log_config.go | 10 + .../pkg/config/monitor_config.go | 35 + .../mysql-monitor/pkg/internal/cst/const.go | 8 + .../mysql-monitor/pkg/internal/cst/cst.go | 2 + .../character_consistency.go | 2 + .../character_consistency/checker.go | 71 + .../items_collect/definer/check_definer.go | 42 + .../pkg/items_collect/definer/definer.go | 2 + .../pkg/items_collect/definer/init.go | 99 + .../pkg/items_collect/definer/routine.go | 49 + .../pkg/items_collect/definer/trigger.go | 48 + .../definer/user_list_snap_shot.go | 25 + .../pkg/items_collect/definer/view.go | 48 + .../pkg/items_collect/engine/engine.go | 2 + .../pkg/items_collect/engine/hyper_engine.go | 17 + .../pkg/items_collect/engine/init.go | 86 + .../pkg/items_collect/engine/myisam.go | 19 + .../items_collect/ext3_check/ext3_check.go | 2 + .../items_collect/ext3_check/filter_dir_fs.go | 54 + .../ext3_check/find_huge_file.go | 32 + .../pkg/items_collect/ext3_check/init.go | 77 + .../ext3_check/query_mysql_dirs.go | 64 + .../items_collect/ext3_check/unique_dirs.go | 44 + .../ibd_statistic/collect_result.go | 60 + .../ibd_statistic/ibd_statistic.go | 2 + .../pkg/items_collect/ibd_statistic/init.go | 84 + .../ibd_statistic/report_metrics.go | 66 + .../mysql-monitor/pkg/items_collect/init.go | 73 + .../pkg/items_collect/items_collect.go | 2 + .../master_slave_heartbeat/heartbeat.go | 152 + .../master_slave_heartbeat.go | 2 + .../mysql_config_diff/mysql_config_diff.go | 127 + .../mysql_connlog/connlog_report.go | 118 + .../mysql_connlog/connlog_rotate.go | 129 + .../mysql_connlog/connlog_size.go | 71 + .../pkg/items_collect/mysql_connlog/init.go | 92 + .../mysql_connlog/mysql_connlog.go | 2 + .../mysql_errlog/errlog_snapshot.go | 172 + .../mysql_errlog/general_scan.go | 42 + .../pkg/items_collect/mysql_errlog/init.go | 191 + .../mysql_errlog/mysql_critical.go | 32 + .../mysql_errlog/mysql_errlog.go | 2 + .../mysql_errlog/mysql_notice.go | 26 + .../mysql_errlog/spider_critical.go | 5 + .../mysql_errlog/spider_notice.go | 5 + .../items_collect/mysql_errlog/spider_warn.go | 5 + .../items_collect/mysql_processlist/init.go | 67 + .../mysql_processlist/mysql_inject.go | 74 + .../mysql_processlist/mysql_lock.go | 114 + .../mysql_processlist/mysql_processlist.go | 2 + .../mysql_processlist/processlist_snapshot.go | 148 + .../proxy_backend/proxy_backend.go | 109 + .../proxy_user_list/proxy_user_list.go | 128 + .../rotate_slowlog/roate_slowlog.go | 142 + .../rotate_slowlog/rotate_slowlog.go | 2 + .../slave_status/ctl_replicate.go | 87 + .../slave_status/slave_status.go | 142 + .../mysql-monitor/pkg/main_loop/main_loop.go | 2 + .../mysql-monitor/pkg/main_loop/monitor.go | 82 + .../connection_collect.go | 152 + .../interface_define.go | 10 + .../monitor_item_interface.go | 2 + .../pkg/utils/send_monitor_event.go | 43 + .../pkg/utils/send_monitor_metrics.go | 48 + .../db-tools/mysql-monitor/pkg/utils/utils.go | 2 + .../db-tools/mysql-monitor/pt-config-diff | 6026 ++++++ .../mysql/db-tools/mysql-monitor/pt-summary | 2776 +++ .../db-tools/mysql-rotatebinlog/Makefile | 27 + .../db-tools/mysql-rotatebinlog/README.md | 99 + .../db-tools/mysql-rotatebinlog/cmd/cmd.go | 2 + .../db-tools/mysql-rotatebinlog/cmd/root.go | 64 + .../mysql-rotatebinlog/config.example.yaml | 60 + .../mysql/db-tools/mysql-rotatebinlog/go.mod | 65 + .../mysql/db-tools/mysql-rotatebinlog/go.sum | 1942 ++ .../mysql/db-tools/mysql-rotatebinlog/main.go | 7 + .../mysql-rotatebinlog/pkg/backup/backup.go | 9 + .../pkg/backup/backup_cos.go | 20 + .../pkg/backup/backup_ibs.go | 104 + .../mysql-rotatebinlog/pkg/backup/init.go | 41 + .../pkg/binlog-parser/parse_binlog_time.go | 289 + .../binlog-parser/parse_binlog_time_test.go | 30 + .../mysql-rotatebinlog/pkg/cst/cst.go | 21 + .../mysql-rotatebinlog/pkg/log/log.go | 2 + .../mysql-rotatebinlog/pkg/log/logger.go | 32 + .../mysql-rotatebinlog/pkg/log/reporter.go | 54 + .../mysql-rotatebinlog/pkg/models/dbmodel.go | 444 + .../mysql-rotatebinlog/pkg/models/migrate.go | 59 + .../migrations/000001_create_table.down.sql | 1 + .../migrations/000001_create_table.up.sql | 22 + .../000002_create_time_interval.down.sql | 1 + .../000002_create_time_interval.up.sql | 6 + .../mysql-rotatebinlog/pkg/models/models.go | 2 + .../mysql-rotatebinlog/pkg/rotate/config.go | 89 + .../mysql-rotatebinlog/pkg/rotate/main.go | 281 + .../mysql-rotatebinlog/pkg/rotate/rotate.go | 366 + .../pkg/rotate/rotate_binlog.go | 210 + .../mysql-rotatebinlog/pkg/util/balance.go | 48 + .../mysql-rotatebinlog/pkg/util/util.go | 149 + .../db-tools/mysql-table-checksum/.gitignore | 4 + .../mysql-table-checksum/.golangci.yml | 57 + .../db-tools/mysql-table-checksum/LICENSE | 0 .../db-tools/mysql-table-checksum/Makefile | 25 + .../db-tools/mysql-table-checksum/README.md | 2 + .../db-tools/mysql-table-checksum/cmd/init.go | 101 + .../db-tools/mysql-table-checksum/cmd/root.go | 26 + .../mysql-table-checksum/cmd/run_checksum.go | 92 + .../mysql-table-checksum/cmd/subcmd_clean.go | 61 + .../mysql-table-checksum/cmd/subcmd_demand.go | 29 + .../cmd/subcmd_general.go | 25 + .../cmd/subcmd_reschedule.go | 101 + .../cmd/subcmd_version.go | 42 + .../db-tools/mysql-table-checksum/go.mod | 38 + .../db-tools/mysql-table-checksum/go.sum | 512 + .../db-tools/mysql-table-checksum/main.go | 11 + .../mysql-table-checksum.sh.tpl | 25 + .../pkg/checker/checker.go | 295 + .../pkg/checker/command_args.go | 125 + .../pkg/checker/define.go | 52 + .../mysql-table-checksum/pkg/checker/init.go | 146 + .../pkg/checker/move_result.go | 77 + .../pkg/checker/report.go | 116 + .../pkg/checker/run_command.go | 147 + .../pkg/checker/strategy.go | 99 + .../pkg/checker/summary.go | 77 + .../mysql-table-checksum/pkg/checker/utils.go | 15 + .../mysql-table-checksum/pkg/config/config.go | 96 + .../mysql-table-checksum/pkg/config/init.go | 20 + .../pkg/config/log_config.go | 10 + .../mysql-table-checksum/pkg/reporter/init.go | 32 + .../pkg/reporter/report.go | 36 + .../pkg/reporter/reporter.go | 30 + .../mysql-table-checksum/pt-table-checksum | 14202 ++++++++++++++ .../mysql-table-checksum/pt-table-sync | 13106 +++++++++++++ .../slow-query-parser-service/.gitignore | 3 + .../slow-query-parser-service/Dockerfile | 6 + .../mysql/slow-query-parser-service/Makefile | 26 + .../mysql/slow-query-parser-service/README.md | 2 + .../mysql/slow-query-parser-service/go.mod | 41 + .../mysql/slow-query-parser-service/go.sum | 108 + .../mysql/slow-query-parser-service/main.go | 61 + .../pkg/mysql/mysql.go | 2 + .../pkg/mysql/parse.go | 85 + .../pkg/mysql/request.go | 6 + .../pkg/mysql/response.go | 12 + .../pkg/mysql/router.go | 34 + .../pkg/service/service.go | 16 + .../redis/db-tools/dbactuator/.ci/codecc.yml | 32 + .../dbactuator/.ci/open_source_check.yml | 84 + .../.ci/templates/open_source_gate.yml | 26 + .../redis/db-tools/dbactuator/.gitignore | 31 + .../redis/db-tools/dbactuator/LICENSE | 0 .../redis/db-tools/dbactuator/Makefile | 9 + .../redis/db-tools/dbactuator/README.md | 90 + .../redis/db-tools/dbactuator/cmd/root.go | 147 + .../dbactuator/doc/twemproxyredisinstance.txt | 11 + .../example/add_shard_to_cluster.example.md | 24 + .../example/bkdbmon_install.example.md | 93 + .../example/cluster_balancer.example.md | 20 + .../clustermeet_slotsassign.example.md | 80 + .../example/initiate_replicaset.example.md | 35 + .../example/mongo_add_user.example.md | 52 + .../example/mongo_deinstall.example.md | 22 + .../example/mongo_del_user.example.md | 34 + .../example/mongo_execute_script.example.md | 30 + .../example/mongo_process_restart.example.md | 42 + .../example/mongod_install.example.md | 60 + .../example/mongod_replace.example.md | 29 + .../example/mongod_step_down.example.md | 18 + .../example/mongos_install.example.md | 30 + .../example/os_mongo_init.example.md | 16 + .../example/predixy_install.example.md | 41 + .../example/redis_backup.example.md | 25 + .../example/redis_dts_datacheck.example.md | 36 + .../example/redis_dts_datarepaire.example.md | 36 + .../example/redis_install.example.md | 89 + .../example/redis_keysdelete_files.example.md | 35 + .../example/redis_keyspattern.example.md | 35 + .../dbactuator/example/redis_keyspattern.json | 21 + .../redis_keyspattern_delete.example.md | 39 + .../example/redis_migrate_slots.example.json | 16 + .../example/redis_migrate_slots.example.md | 43 + .../example/redis_replicaof.example.md | 34 + .../example/redis_replicaof_batch.example.md | 27 + .../dbactuator/example/sysinit.example.md | 15 + .../example/tendisssd_dr_restore.examle.md | 62 + .../example/twemproxy_install.example.md | 42 + .../dbactuator/example/twemproxy_install.json | 14 + .../example/twemproxy_operate.example.md | 21 + dbm-services/redis/db-tools/dbactuator/go.mod | 43 + dbm-services/redis/db-tools/dbactuator/go.sum | 120 + .../imgs/bk-dbactuator-redis_structur.png | Bin 0 -> 162444 bytes .../dbactuator/imgs/tendissd_redo_dr.png | Bin 0 -> 33015 bytes .../redis/db-tools/dbactuator/main.go | 12 + .../dbactuator/models/myredis/client.go | 1681 ++ .../dbactuator/models/myredis/cluster_info.go | 103 + .../models/myredis/cluster_nodes.go | 441 + .../models/myredis/cluster_nodes_test.go | 89 + .../dbactuator/models/myredis/myredis.go | 107 + .../dbactuator/models/myredis/slot.go | 238 + .../myredis/tendisplus_cluster_setslotinfo.go | 202 + .../models/myredis/tendisplus_infoRepl.go | 412 + .../redis/db-tools/dbactuator/mylog/mylog.go | 28 + .../atommongodb/add_shard_to_cluster.go | 247 + .../pkg/atomjobs/atommongodb/add_user.go | 262 + .../pkg/atomjobs/atommongodb/atommongodb.go | 2 + .../atomjobs/atommongodb/cluster_balancer.go | 158 + .../atommongodb/cluster_install_test.go | 888 + .../pkg/atomjobs/atommongodb/del_user.go | 203 + .../atommongodb/initiate_replicaset.go | 278 + .../atomjobs/atommongodb/mongo_deinstall.go | 229 + .../atommongodb/mongo_execute_script.go | 330 + .../atommongodb/mongo_process_restart.go | 398 + .../atommongodb/mongo_set_profiler.go | 185 + .../atomjobs/atommongodb/mongod_install.go | 491 + .../atomjobs/atommongodb/mongod_replace.go | 355 + .../atomjobs/atommongodb/mongos_install.go | 439 + .../atommongodb/replicaset_install_test.go | 338 + .../atommongodb/replicaset_stepdown.go | 129 + .../pkg/atomjobs/atomproxy/atomproxy.go | 2 + .../pkg/atomjobs/atomproxy/predixy_install.go | 394 + .../pkg/atomjobs/atomproxy/predixy_operate.go | 176 + .../atomproxy/twemproxy_check_backends.go | 150 + .../atomjobs/atomproxy/twemproxy_install.go | 632 + .../atomproxy/twemproxy_install_test.go | 13 + .../atomjobs/atomproxy/twemproxy_operate.go | 182 + .../pkg/atomjobs/atomredis/atomredis.go | 2 + .../pkg/atomjobs/atomredis/bkdbmon_install.go | 366 + .../atomredis/clustermeet_slotsassign.go | 334 + .../pkg/atomjobs/atomredis/redis_backup.go | 730 + .../atomjobs/atomredis/redis_dts_datacheck.go | 636 + .../atomredis/redis_dts_datarepaire.go | 92 + .../atomjobs/atomredis/redis_flush_data.go | 387 + .../pkg/atomjobs/atomredis/redis_install.go | 545 + .../atomredis/redis_keysdelete_files.go | 441 + .../atomjobs/atomredis/redis_keyspattern.go | 1454 ++ .../atomredis/redis_keyspattern_delete.go | 22 + .../atomjobs/atomredis/redis_migrate_slots.go | 1210 ++ .../pkg/atomjobs/atomredis/redis_replicaof.go | 425 + .../atomredis/redis_replicaof_batch.go | 123 + .../atomredis/redis_scene_kill_conn.go | 137 + .../atomredis/redis_scene_param_sync.go | 126 + .../atomredis/redis_scene_sync_check.go | 136 + .../pkg/atomjobs/atomredis/redis_shutdown.go | 265 + .../pkg/atomjobs/atomredis/redis_switch.go | 774 + .../atomredis/tendisssd_dr_restore.go | 598 + .../pkg/atomjobs/atomsys/atomsys.go | 2 + .../pkg/atomjobs/atomsys/os_mongo_init.go | 122 + .../pkg/atomjobs/atomsys/redis_capturer.go | 209 + .../pkg/atomjobs/atomsys/sysinit.go | 101 + .../pkg/atomjobs/atomsys/sysinit_mysql.sh | 122 + .../dbactuator/pkg/backupsys/backupsys.go | 230 + .../db-tools/dbactuator/pkg/common/common.go | 42 + .../dbactuator/pkg/common/exporter_conf.go | 46 + .../dbactuator/pkg/common/filelock.go | 34 + .../pkg/common/initiate_replicaset_conf.go | 37 + .../dbactuator/pkg/common/media_pkg.go | 129 + .../dbactuator/pkg/common/mongo_common.go | 592 + .../dbactuator/pkg/common/mongo_init_shell.go | 165 + .../dbactuator/pkg/common/mongo_user_conf.go | 35 + .../dbactuator/pkg/common/mongod_conf.go | 87 + .../dbactuator/pkg/common/mongos_conf.go | 46 + .../dbactuator/pkg/common/predixy_conf.go | 38 + .../pkg/common/repliccaset_member_conf.go | 24 + .../dbactuator/pkg/common/twemproxy_conf.go | 145 + .../db-tools/dbactuator/pkg/consts/consts.go | 277 + .../dbactuator/pkg/consts/data_dir.go | 325 + .../db-tools/dbactuator/pkg/consts/dts.go | 25 + .../db-tools/dbactuator/pkg/consts/test.go | 94 + .../db-tools/dbactuator/pkg/consts/user.go | 80 + .../dbactuator/pkg/customtime/customtime.go | 76 + .../dbactuator/pkg/jobmanager/jobmanager.go | 195 + .../dbactuator/pkg/jobruntime/jobrunner.go | 19 + .../dbactuator/pkg/jobruntime/jobruntime.go | 158 + .../dbactuator/pkg/report/filereport.go | 100 + .../dbactuator/pkg/report/reporter.go | 57 + .../db-tools/dbactuator/pkg/util/bkrepo.go | 101 + .../db-tools/dbactuator/pkg/util/compress.go | 205 + .../db-tools/dbactuator/pkg/util/file.go | 66 + .../redis/db-tools/dbactuator/pkg/util/net.go | 67 + .../db-tools/dbactuator/pkg/util/osCmd.go | 106 + .../dbactuator/pkg/util/proxy_tools.go | 103 + .../db-tools/dbactuator/pkg/util/redisutil.go | 102 + .../db-tools/dbactuator/pkg/util/reflect.go | 20 + .../db-tools/dbactuator/pkg/util/util.go | 269 + .../db-tools/dbactuator/pkg/util/version.go | 118 + .../db-tools/dbactuator/scripts/upload.sh | 182 + .../tests/clustertest/clustertest.go | 2 + .../tests/clustertest/predixy_cluster.go | 208 + .../tests/clustertest/predixy_switch.go | 67 + .../tests/clustertest/twemproxy_cluster.go | 240 + .../tests/clustertest/twemproxy_swtich.go | 255 + .../tests/proxytest/proxy_install.go | 442 + .../tests/proxytest/proxy_operate.go | 226 + .../dbactuator/tests/proxytest/proxytest.go | 55 + .../tests/redistest/bkdbmon_install.go | 265 + .../dbactuator/tests/redistest/commands.go | 263 + .../tests/redistest/redis_backup.go | 165 + .../tests/redistest/redis_cluster.go | 161 + .../tests/redistest/redis_dts_datacheck.go | 247 + .../tests/redistest/redis_dts_datarepaire.go | 78 + .../tests/redistest/redis_flushdata.go | 143 + .../tests/redistest/redis_install.go | 790 + .../tests/redistest/redis_keysdelete_files.go | 227 + .../tests/redistest/redis_keyspattern.go | 301 + .../tests/redistest/redis_migrate_slots.go | 196 + .../tests/redistest/redis_replicaof.go | 173 + .../dbactuator/tests/redistest/redis_scene.go | 130 + .../tests/redistest/redis_shutdown.go | 122 + .../tests/redistest/redis_switch.go | 127 + .../dbactuator/tests/redistest/redistest.go | 45 + .../tests/redistest/tendisssd_dr_restore.go | 185 + .../dbactuator/tests/systest/sysinit.go | 49 + .../redis/db-tools/dbactuator/tests/test.go | 318 + .../redis/db-tools/dbactuator/tests/test.sh | 244 + .../db-tools/dbactuator/tests/test_mongo.sh | 84 + .../redis/db-tools/dbmon/.ci/codecc.yml | 29 + .../db-tools/dbmon/.ci/open_source_check.yml | 84 + .../dbmon/.ci/templates/open_source_gate.yml | 26 + dbm-services/redis/db-tools/dbmon/.gitignore | 8 + dbm-services/redis/db-tools/dbmon/LICENSE | 0 dbm-services/redis/db-tools/dbmon/Makefile | 24 + dbm-services/redis/db-tools/dbmon/README.md | 47 + dbm-services/redis/db-tools/dbmon/cmd/root.go | 180 + .../redis/db-tools/dbmon/config/config.go | 145 + .../redis/db-tools/dbmon/config/instconfig.go | 40 + .../redis/db-tools/dbmon/config/keystat.go | 28 + .../redis/db-tools/dbmon/dbmon-config.yaml | 39 + .../db-tools/dbmon/embedfiles/embedfiles.go | 9 + .../db-tools/dbmon/embedfiles/js/login.js | 39 + dbm-services/redis/db-tools/dbmon/go.mod | 87 + dbm-services/redis/db-tools/dbmon/go.sum | 655 + .../dbmon/imgs/bk-dbmon-structurer.png | Bin 0 -> 81910 bytes dbm-services/redis/db-tools/dbmon/main.go | 12 + .../db-tools/dbmon/models/mymongo/mymongo.go | 46 + .../db-tools/dbmon/models/myredis/client.go | 1577 ++ .../dbmon/models/myredis/cluster_info.go | 103 + .../dbmon/models/myredis/cluster_nodes.go | 417 + .../models/myredis/cluster_nodes_test.go | 87 + .../db-tools/dbmon/models/myredis/myredis.go | 124 + .../myredis/tendisplus_cluster_setslotinfo.go | 202 + .../models/myredis/tendisplus_infoRepl.go | 445 + .../redis/db-tools/dbmon/mylog/gin.go | 86 + .../redis/db-tools/dbmon/mylog/mylog.go | 150 + dbm-services/redis/db-tools/dbmon/package.sh | 29 + .../db-tools/dbmon/pkg/backupsys/backupsys.go | 230 + .../redis/db-tools/dbmon/pkg/consts/consts.go | 249 + .../db-tools/dbmon/pkg/consts/data_dir.go | 181 + .../dbmon/pkg/consts/event_categories.go | 43 + .../redis/db-tools/dbmon/pkg/consts/mongo.go | 47 + .../dbmon/pkg/customtime/customtime.go | 76 + .../db-tools/dbmon/pkg/httpapi/httpapi.go | 40 + .../db-tools/dbmon/pkg/kafka/crypto_base.go | 43 + .../redis/db-tools/dbmon/pkg/kafka/kafka.go | 2 + .../db-tools/dbmon/pkg/kafka/kafka_client.go | 67 + .../db-tools/dbmon/pkg/keylifecycle/ctl.go | 122 + .../db-tools/dbmon/pkg/keylifecycle/job.go | 147 + .../db-tools/dbmon/pkg/keylifecycle/task.go | 380 + .../db-tools/dbmon/pkg/mongojob/backup_job.go | 126 + .../dbmon/pkg/mongojob/backup_task.go | 74 + .../dbmon/pkg/mongojob/check_service_job.go | 182 + .../redis/db-tools/dbmon/pkg/mongojob/cmd.go | 65 + .../db-tools/dbmon/pkg/mongojob/mongojob.go | 2 + .../redis/db-tools/dbmon/pkg/mongojob/msg.go | 39 + .../dbmon/pkg/redisbinlogbackup/job.go | 342 + .../redisbinlogbackup/redisbinlogbackup.go | 2 + .../dbmon/pkg/redisbinlogbackup/task.go | 418 + .../db-tools/dbmon/pkg/redisfullbackup/job.go | 385 + .../dbmon/pkg/redisfullbackup/task.go | 543 + .../db-tools/dbmon/pkg/redisheartbeat/job.go | 84 + .../db-tools/dbmon/pkg/redisheartbeat/task.go | 207 + .../dbmon/pkg/redismonitor/base_task.go | 39 + .../db-tools/dbmon/pkg/redismonitor/job.go | 99 + .../dbmon/pkg/redismonitor/predixy_task.go | 97 + .../dbmon/pkg/redismonitor/redis_task.go | 429 + .../dbmon/pkg/redismonitor/redismonitor.go | 2 + .../dbmon/pkg/redismonitor/twemproxy_task.go | 98 + .../dbmon/pkg/report/clear_history.go | 49 + .../db-tools/dbmon/pkg/report/filereport.go | 99 + .../redis/db-tools/dbmon/pkg/report/report.go | 2 + .../db-tools/dbmon/pkg/report/reporter.go | 56 + .../dbmon/pkg/sendwarning/bkmonitorbeat.go | 281 + .../dbmon/pkg/sendwarning/sendwarning.go | 2 + dbm-services/redis/db-tools/dbmon/start.sh | 60 + dbm-services/redis/db-tools/dbmon/stop.sh | 40 + .../redis/db-tools/dbmon/util/cmd_builder.go | 66 + .../redis/db-tools/dbmon/util/compress.go | 163 + .../redis/db-tools/dbmon/util/osCmd.go | 160 + .../redis/db-tools/dbmon/util/reflect.go | 20 + .../redis/db-tools/dbmon/util/util.go | 316 + .../redis/db-tools/dbmon/util/version.go | 118 + dbm-services/redis/redis-dts/.gitignore | 9 + dbm-services/redis/redis-dts/Makefile | 11 + dbm-services/redis/redis-dts/README.md | 16 + .../redis/redis-dts/bin/config-template.yaml | 38 + .../redis-dts/bin/redis-shake-template.conf | 227 + dbm-services/redis/redis-dts/bin/start.sh | 69 + dbm-services/redis/redis-dts/bin/stop.sh | 43 + .../bin/tendisplus-sync-template.conf | 22 + .../bin/tendisssd-sync-template.conf | 36 + dbm-services/redis/redis-dts/config/config.go | 28 + dbm-services/redis/redis-dts/go.mod | 54 + dbm-services/redis/redis-dts/go.sum | 585 + ...s\346\236\266\346\236\204\345\233\276.png" | Bin 0 -> 153281 bytes dbm-services/redis/redis-dts/main.go | 60 + .../redis/redis-dts/models/myredis/myredis.go | 403 + .../models/myredis/tendisplus_infoRepl.go | 302 + .../redis/redis-dts/models/mysql/init.go | 96 + .../redis/redis-dts/models/mysql/mysql.go | 2 + .../redis-dts/models/mysql/tendisdb/job.go | 88 + .../redis-dts/models/mysql/tendisdb/task.go | 591 + .../models/mysql/tendisdb/tendisdb.go | 2 + .../redis/redis-dts/pkg/constvar/constvar.go | 234 + .../redis/redis-dts/pkg/constvar/methods.go | 67 + .../redis-dts/pkg/customtime/customtime.go | 76 + .../redis/redis-dts/pkg/dtsJob/base.go | 312 + .../redis/redis-dts/pkg/dtsJob/dtsJob.go | 2 + .../redis-dts/pkg/dtsJob/redisCacheDtsJob.go | 220 + .../redis-dts/pkg/dtsJob/tendisSSDDtsJob.go | 239 + .../redis-dts/pkg/dtsJob/tendisplusDtsJob.go | 282 + .../redis/redis-dts/pkg/dtsTask/dtsTask.go | 2 + .../redis-dts/pkg/dtsTask/factory/factory.go | 46 + .../redis/redis-dts/pkg/dtsTask/init.go | 670 + .../pkg/dtsTask/rediscache/makeCacheSync.go | 746 + .../pkg/dtsTask/rediscache/rediscache.go | 2 + .../pkg/dtsTask/rediscache/watchCacheSync.go | 72 + .../redis-dts/pkg/dtsTask/saveSyncSeq.go | 196 + .../pkg/dtsTask/tendisplus/makeSync.go | 759 + .../pkg/dtsTask/tendisplus/tendisplus.go | 2 + .../pkg/dtsTask/tendisplus/watchSync.go | 60 + .../pkg/dtsTask/tendisssd/backupFileFetch.go | 118 + .../pkg/dtsTask/tendisssd/cmdsImporter.go | 810 + .../pkg/dtsTask/tendisssd/makeSync.go | 1015 + .../pkg/dtsTask/tendisssd/tendisBackup.go | 303 + .../pkg/dtsTask/tendisssd/tendisdump.go | 239 + .../pkg/dtsTask/tendisssd/tendisssd.go | 2 + .../pkg/dtsTask/tendisssd/watchOldSync.go | 76 + .../redis/redis-dts/pkg/osPerf/osPerf.go | 130 + .../redis-dts/pkg/remoteOperation/abs.go | 116 + .../redis-dts/pkg/remoteOperation/init.go | 7 + .../redis-dts/pkg/remoteOperation/ssh.go | 358 + .../redis-dts/pkg/scrdbclient/dtsRemote.go | 117 + .../redis-dts/pkg/scrdbclient/fileService.go | 21 + .../pkg/scrdbclient/jobapiRequest.go | 336 + .../redis-dts/pkg/scrdbclient/jobapiSchema.go | 203 + .../redis-dts/pkg/scrdbclient/scrdbclient.go | 273 + dbm-services/redis/redis-dts/tclog/tclog.go | 117 + .../redis/redis-dts/util/httpReqNew.go | 165 + dbm-services/redis/redis-dts/util/osCmd.go | 71 + .../redis/redis-dts/util/redis_util.go | 80 + dbm-services/redis/redis-dts/util/util.go | 289 + dbm-ui/.coveragerc | 2 + dbm-ui/.gitignore | 136 + dbm-ui/.pylintrc | 46 + dbm-ui/DBM_README.md | 224 + dbm-ui/Dockerfile | 85 + dbm-ui/backend/.flake8 | 19 + dbm-ui/backend/__init__.py | 18 + dbm-ui/backend/admin.py | 10 + dbm-ui/backend/asgi.py | 18 + dbm-ui/backend/bk_dataview/README.md | 256 + dbm-ui/backend/bk_dataview/__init__.py | 11 + dbm-ui/backend/bk_dataview/bkdbm.ini | 69 + .../backend/bk_dataview/dashboards/.gitkeep | 0 .../backend/bk_dataview/dashboards/dbm.yaml | 8 + .../bk_dataview/dashboards/json/es.json | 15373 ++++++++++++++++ .../bk_dataview/dashboards/json/hdfs.json | 8154 ++++++++ .../bk_dataview/dashboards/json/influxdb.json | 2299 +++ .../bk_dataview/dashboards/json/kafka.json | 5623 ++++++ .../bk_dataview/dashboards/json/pulsar.json | 11713 ++++++++++++ .../bk_dataview/dashboards/json/tendbha.json | 7822 ++++++++ .../dashboards/json/tendbsingle.json | 7387 ++++++++ .../dashboards/json/tendiscache.json | 9101 +++++++++ .../dashboards/json/tendisplus.json | 9421 ++++++++++ .../dashboards/json/tendisssd.json | 10145 ++++++++++ .../backend/bk_dataview/dashboards/readme.md | 19 + .../backend/bk_dataview/datasources/.gitkeep | 0 .../bk_dataview/datasources/__init__.py | 10 + .../datasources/bk_monitor_datasource.yaml | 37 + .../backend/bk_dataview/grafana/__init__.py | 13 + dbm-ui/backend/bk_dataview/grafana/apps.py | 19 + .../bk_dataview/grafana/authentication.py | 37 + .../bk_dataview/grafana/backends/__init__.py | 28 + .../bk_dataview/grafana/backends/api.py | 62 + .../bk_dataview/grafana/backends/db.py | 53 + dbm-ui/backend/bk_dataview/grafana/client.py | 138 + dbm-ui/backend/bk_dataview/grafana/models.py | 134 + .../bk_dataview/grafana/permissions.py | 57 + .../bk_dataview/grafana/provisioning.py | 106 + dbm-ui/backend/bk_dataview/grafana/router.py | 35 + .../backend/bk_dataview/grafana/settings.py | 73 + dbm-ui/backend/bk_dataview/grafana/urls.py | 20 + dbm-ui/backend/bk_dataview/grafana/utils.py | 75 + dbm-ui/backend/bk_dataview/grafana/views.py | 395 + dbm-ui/backend/bk_web/__init__.py | 10 + dbm-ui/backend/bk_web/constants.py | 46 + dbm-ui/backend/bk_web/handlers.py | 126 + dbm-ui/backend/bk_web/middleware.py | 67 + dbm-ui/backend/bk_web/models.py | 26 + dbm-ui/backend/bk_web/pagination.py | 27 + dbm-ui/backend/bk_web/renderers.py | 78 + dbm-ui/backend/bk_web/serializers.py | 44 + dbm-ui/backend/bk_web/swagger.py | 115 + dbm-ui/backend/bk_web/viewsets.py | 138 + dbm-ui/backend/components/__init__.py | 58 + dbm-ui/backend/components/base.py | 502 + dbm-ui/backend/components/bk.py | 101 + dbm-ui/backend/components/bklog/__init__.py | 10 + dbm-ui/backend/components/bklog/client.py | 61 + .../components/bkmonitorv3/__init__.py | 10 + .../backend/components/bkmonitorv3/client.py | 126 + dbm-ui/backend/components/cc/__init__.py | 10 + dbm-ui/backend/components/cc/client.py | 257 + dbm-ui/backend/components/cmsi/__init__.py | 10 + dbm-ui/backend/components/cmsi/client.py | 31 + dbm-ui/backend/components/constants.py | 23 + .../components/db_name_service/__init__.py | 10 + .../components/db_name_service/client.py | 101 + .../components/db_remote_service/__init__.py | 10 + .../components/db_remote_service/client.py | 68 + .../backend/components/dbconfig/__init__.py | 10 + dbm-ui/backend/components/dbconfig/client.py | 111 + .../backend/components/dbconfig/constants.py | 65 + .../backend/components/dbresource/__init__.py | 10 + .../backend/components/dbresource/client.py | 101 + dbm-ui/backend/components/domains.py | 45 + dbm-ui/backend/components/exception.py | 26 + dbm-ui/backend/components/gcs_dns/__init__.py | 10 + dbm-ui/backend/components/gcs_dns/client.py | 66 + dbm-ui/backend/components/gse/__init__.py | 10 + dbm-ui/backend/components/gse/client.py | 31 + dbm-ui/backend/components/hadb/__init__.py | 10 + dbm-ui/backend/components/hadb/client.py | 59 + dbm-ui/backend/components/itsm/__init__.py | 10 + dbm-ui/backend/components/itsm/client.py | 68 + dbm-ui/backend/components/itsm/constants.py | 21 + dbm-ui/backend/components/job/__init__.py | 10 + dbm-ui/backend/components/job/client.py | 80 + .../components/mysql_backup/__init__.py | 0 .../backend/components/mysql_backup/client.py | 46 + .../components/mysql_partition/__init__.py | 1 + .../components/mysql_partition/client.py | 86 + .../components/mysql_priv_manager/__init__.py | 10 + .../components/mysql_priv_manager/client.py | 150 + dbm-ui/backend/components/proxy_api.py | 41 + dbm-ui/backend/components/sops/__init__.py | 10 + dbm-ui/backend/components/sops/client.py | 65 + .../backend/components/sql_import/__init__.py | 10 + .../backend/components/sql_import/client.py | 63 + .../backend/components/usermanage/__init__.py | 10 + .../backend/components/usermanage/client.py | 39 + dbm-ui/backend/components/utils/__init__.py | 10 + dbm-ui/backend/components/utils/handlers.py | 24 + dbm-ui/backend/components/utils/params.py | 114 + dbm-ui/backend/configuration/__init__.py | 10 + dbm-ui/backend/configuration/admin.py | 42 + dbm-ui/backend/configuration/apps.py | 38 + dbm-ui/backend/configuration/constants.py | 100 + .../configuration/migrations/0001_initial.py | 119 + .../configuration/migrations/__init__.py | 10 + dbm-ui/backend/configuration/mock_data.py | 32 + .../backend/configuration/models/__init__.py | 14 + dbm-ui/backend/configuration/models/dba.py | 63 + .../configuration/models/ip_whitelist.py | 69 + .../configuration/models/password_policy.py | 41 + .../backend/configuration/models/profile.py | 24 + dbm-ui/backend/configuration/models/system.py | 94 + dbm-ui/backend/configuration/serializers.py | 94 + dbm-ui/backend/configuration/urls.py | 28 + .../backend/configuration/views/__init__.py | 10 + dbm-ui/backend/configuration/views/dba.py | 50 + .../configuration/views/ip_whitelist.py | 111 + .../configuration/views/password_policy.py | 52 + dbm-ui/backend/configuration/views/profile.py | 51 + dbm-ui/backend/configuration/views/system.py | 71 + dbm-ui/backend/constants.py | 59 + dbm-ui/backend/core/__init__.py | 10 + dbm-ui/backend/core/consts.py | 31 + dbm-ui/backend/core/encrypt/__init__.py | 10 + dbm-ui/backend/core/encrypt/aes.py | 55 + dbm-ui/backend/core/encrypt/apps.py | 17 + dbm-ui/backend/core/encrypt/constants.py | 56 + dbm-ui/backend/core/encrypt/exceptions.py | 23 + dbm-ui/backend/core/encrypt/handlers.py | 181 + .../core/encrypt/migrations/0001_initial.py | 45 + .../migrations/0002_auto_20220830_1635.py | 32 + .../migrations/0003_alter_rsakey_name.py | 30 + .../migrations/0004_alter_rsakey_name.py | 32 + .../core/encrypt/migrations/__init__.py | 10 + dbm-ui/backend/core/encrypt/models.py | 32 + dbm-ui/backend/core/encrypt/rsa.py | 194 + dbm-ui/backend/core/encrypt/serializers.py | 24 + dbm-ui/backend/core/encrypt/views.py | 34 + dbm-ui/backend/core/exceptions.py | 16 + dbm-ui/backend/core/storages/__init__.py | 10 + dbm-ui/backend/core/storages/admin.py | 27 + dbm-ui/backend/core/storages/apps.py | 17 + dbm-ui/backend/core/storages/base.py | 226 + dbm-ui/backend/core/storages/constants.py | 86 + dbm-ui/backend/core/storages/exceptions.py | 39 + dbm-ui/backend/core/storages/file_source.py | 257 + dbm-ui/backend/core/storages/handlers.py | 82 + .../core/storages/migrations/0001_initial.py | 62 + .../0002_alter_bkjobfilecredential_id.py | 28 + .../core/storages/migrations/__init__.py | 10 + dbm-ui/backend/core/storages/models.py | 63 + dbm-ui/backend/core/storages/serializers.py | 21 + dbm-ui/backend/core/storages/storage.py | 310 + dbm-ui/backend/core/storages/views.py | 42 + dbm-ui/backend/core/translation/__init__.py | 19 + dbm-ui/backend/core/translation/apps.py | 17 + dbm-ui/backend/core/translation/constants.py | 62 + dbm-ui/backend/core/translation/context.py | 93 + dbm-ui/backend/core/translation/exceptions.py | 27 + .../core/translation/language_finder.py | 418 + .../core/translation/management/__init__.py | 10 + .../management/commands/__init__.py | 10 + .../management/commands/language_finder.py | 76 + .../management/commands/translate.py | 33 + dbm-ui/backend/core/translation/translate.py | 124 + dbm-ui/backend/core/urls.py | 22 + dbm-ui/backend/db_event/__init__.py | 11 + dbm-ui/backend/db_event/apps.py | 28 + dbm-ui/backend/db_event/constants.py | 10 + .../backend/db_event/management/__init__.py | 10 + .../db_event/management/commands/__init__.py | 10 + .../management/commands/event_list.py | 27 + dbm-ui/backend/db_event/models.py | 10 + dbm-ui/backend/db_event/readme.md | 61 + dbm-ui/backend/db_event/serializers.py | 73 + dbm-ui/backend/db_event/urls.py | 20 + dbm-ui/backend/db_event/views/__init__.py | 10 + dbm-ui/backend/db_event/views/dbha.py | 96 + dbm-ui/backend/db_meta/__init__.py | 11 + dbm-ui/backend/db_meta/admin.py | 137 + dbm-ui/backend/db_meta/api/__init__.py | 24 + .../backend/db_meta/api/cluster/__init__.py | 28 + dbm-ui/backend/db_meta/api/cluster/apis.py | 51 + .../db_meta/api/cluster/base/__init__.py | 10 + .../backend/db_meta/api/cluster/base/graph.py | 268 + .../db_meta/api/cluster/base/handler.py | 55 + .../db_meta/api/cluster/es/__init__.py | 16 + .../backend/db_meta/api/cluster/es/create.py | 132 + .../backend/db_meta/api/cluster/es/destroy.py | 50 + .../backend/db_meta/api/cluster/es/detail.py | 53 + .../backend/db_meta/api/cluster/es/disable.py | 29 + .../backend/db_meta/api/cluster/es/enable.py | 29 + .../db_meta/api/cluster/es/scale_up.py | 94 + .../backend/db_meta/api/cluster/es/shrink.py | 93 + .../db_meta/api/cluster/hdfs/__init__.py | 17 + .../db_meta/api/cluster/hdfs/create.py | 94 + .../db_meta/api/cluster/hdfs/destroy.py | 52 + .../db_meta/api/cluster/hdfs/detail.py | 63 + .../db_meta/api/cluster/hdfs/disable.py | 29 + .../db_meta/api/cluster/hdfs/enable.py | 29 + .../db_meta/api/cluster/hdfs/replace.py | 63 + .../db_meta/api/cluster/hdfs/scale_up.py | 46 + .../db_meta/api/cluster/hdfs/shrink.py | 51 + .../db_meta/api/cluster/influxdb/__init__.py | 15 + .../db_meta/api/cluster/influxdb/create.py | 37 + .../db_meta/api/cluster/influxdb/destroy.py | 55 + .../db_meta/api/cluster/influxdb/disable.py | 31 + .../db_meta/api/cluster/influxdb/enable.py | 31 + .../db_meta/api/cluster/influxdb/replace.py | 67 + .../db_meta/api/cluster/kafka/__init__.py | 17 + .../db_meta/api/cluster/kafka/create.py | 105 + .../db_meta/api/cluster/kafka/destroy.py | 52 + .../db_meta/api/cluster/kafka/detail.py | 43 + .../db_meta/api/cluster/kafka/disable.py | 29 + .../db_meta/api/cluster/kafka/enable.py | 29 + .../db_meta/api/cluster/kafka/replace.py | 73 + .../db_meta/api/cluster/kafka/scale_up.py | 54 + .../db_meta/api/cluster/kafka/shrink.py | 54 + .../api/cluster/mongocluster/__init__.py | 13 + .../api/cluster/mongocluster/create.py | 201 + .../api/cluster/mongocluster/detail.py | 90 + .../api/cluster/mongocluster/handler.py | 84 + .../api/cluster/mongorepset/__init__.py | 12 + .../db_meta/api/cluster/mongorepset/create.py | 171 + .../db_meta/api/cluster/mongorepset/detail.py | 56 + .../api/cluster/mongorepset/handler.py | 75 + .../db_meta/api/cluster/nosqlcomm/__init__.py | 32 + .../db_meta/api/cluster/nosqlcomm/cc_ops.py | 169 + .../api/cluster/nosqlcomm/create_cluster.py | 284 + .../api/cluster/nosqlcomm/create_instances.py | 129 + .../api/cluster/nosqlcomm/decommission.py | 191 + .../api/cluster/nosqlcomm/detail_cluster.py | 97 + .../db_meta/api/cluster/nosqlcomm/other.py | 36 + .../db_meta/api/cluster/nosqlcomm/precheck.py | 86 + .../api/cluster/nosqlcomm/scale_proxy.py | 119 + .../api/cluster/nosqlcomm/scale_tendis.py | 237 + .../db_meta/api/cluster/pulsar/__init__.py | 17 + .../db_meta/api/cluster/pulsar/create.py | 116 + .../db_meta/api/cluster/pulsar/destroy.py | 50 + .../db_meta/api/cluster/pulsar/detail.py | 52 + .../db_meta/api/cluster/pulsar/disable.py | 29 + .../db_meta/api/cluster/pulsar/enable.py | 29 + .../db_meta/api/cluster/pulsar/replace.py | 72 + .../db_meta/api/cluster/pulsar/scale_up.py | 59 + .../db_meta/api/cluster/pulsar/shrink.py | 51 + .../api/cluster/tendbcluster/__init__.py | 12 + .../cluster/tendbcluster/create_cluster.py | 171 + .../tendbcluster/create_slave_cluster.py | 79 + .../api/cluster/tendbcluster/decommission.py | 97 + .../api/cluster/tendbcluster/handler.py | 218 + .../db_meta/api/cluster/tendbha/__init__.py | 18 + .../db_meta/api/cluster/tendbha/add_proxy.py | 59 + .../api/cluster/tendbha/create_cluster.py | 180 + .../api/cluster/tendbha/decommission.py | 82 + .../db_meta/api/cluster/tendbha/detail.py | 89 + .../db_meta/api/cluster/tendbha/handler.py | 145 + .../db_meta/api/cluster/tendbha/others.py | 74 + .../api/cluster/tendbha/status_flag.py | 28 + .../api/cluster/tendbha/storage_tuple.py | 37 + .../api/cluster/tendbha/switch_proxy.py | 78 + .../api/cluster/tendbha/switch_slave.py | 69 + .../api/cluster/tendbha/switch_storage.py | 58 + .../api/cluster/tendbsingle/__init__.py | 13 + .../api/cluster/tendbsingle/create_cluster.py | 90 + .../api/cluster/tendbsingle/decommission.py | 49 + .../db_meta/api/cluster/tendbsingle/detail.py | 23 + .../api/cluster/tendbsingle/handler.py | 103 + .../api/cluster/tendiscache/__init__.py | 11 + .../api/cluster/tendiscache/handler.py | 90 + .../api/cluster/tendispluscluster/__init__.py | 21 + .../api/cluster/tendispluscluster/create.py | 171 + .../api/cluster/tendispluscluster/detail.py | 90 + .../api/cluster/tendispluscluster/handler.py | 94 + .../api/cluster/tendissingle/__init__.py | 11 + .../api/cluster/tendissingle/handler.py | 80 + .../api/cluster/tendissingle/single.py | 342 + .../db_meta/api/cluster/tendisssd/__init__.py | 11 + .../db_meta/api/cluster/tendisssd/handler.py | 90 + dbm-ui/backend/db_meta/api/common/__init__.py | 11 + dbm-ui/backend/db_meta/api/common/common.py | 130 + .../backend/db_meta/api/db_module/__init__.py | 11 + dbm-ui/backend/db_meta/api/db_module/apis.py | 218 + dbm-ui/backend/db_meta/api/dbha/__init__.py | 11 + dbm-ui/backend/db_meta/api/dbha/apis.py | 298 + dbm-ui/backend/db_meta/api/entry/__init__.py | 11 + .../backend/db_meta/api/entry/clb/__init__.py | 11 + dbm-ui/backend/db_meta/api/entry/clb/apis.py | 49 + .../db_meta/api/entry/polaris/__init__.py | 11 + .../backend/db_meta/api/entry/polaris/apis.py | 50 + dbm-ui/backend/db_meta/api/fake/__init__.py | 12 + .../backend/db_meta/api/fake/fake_tendbha.py | 254 + .../db_meta/api/fake/fake_tendbsingle.py | 125 + .../backend/db_meta/api/machine/__init__.py | 11 + dbm-ui/backend/db_meta/api/machine/apis.py | 139 + dbm-ui/backend/db_meta/api/meta/__init__.py | 11 + dbm-ui/backend/db_meta/api/meta/apis.py | 103 + .../db_meta/api/priv_manager/__init__.py | 14 + .../db_meta/api/priv_manager/biz_clusters.py | 46 + .../api/priv_manager/cluster_instances.py | 96 + .../api/priv_manager/instance_detail.py | 39 + .../api/priv_manager/tendbcluster/__init__.py | 13 + .../priv_manager/tendbcluster/biz_clusters.py | 73 + .../tendbcluster/cluster_instances.py | 60 + .../tendbcluster/instance_detail.py | 62 + .../api/priv_manager/tendbha/__init__.py | 13 + .../api/priv_manager/tendbha/biz_clusters.py | 95 + .../priv_manager/tendbha/cluster_instances.py | 101 + .../priv_manager/tendbha/instance_detail.py | 62 + .../api/priv_manager/tendbsingle/__init__.py | 13 + .../priv_manager/tendbsingle/biz_clusters.py | 50 + .../tendbsingle/cluster_instances.py | 45 + .../tendbsingle/instance_detail.py | 50 + .../db_meta/api/proxy_instance/__init__.py | 11 + .../db_meta/api/proxy_instance/apis.py | 115 + .../db_meta/api/storage_instance/__init__.py | 11 + .../db_meta/api/storage_instance/apis.py | 192 + .../api/storage_instance_tuple/__init__.py | 11 + .../api/storage_instance_tuple/apis.py | 57 + dbm-ui/backend/db_meta/apps.py | 37 + dbm-ui/backend/db_meta/doc/app.md | 66 + dbm-ui/backend/db_meta/doc/cluster.md | 79 + dbm-ui/backend/db_meta/doc/domain.md | 40 + dbm-ui/backend/db_meta/doc/index.md | 99 + dbm-ui/backend/db_meta/doc/machine.md | 67 + .../backend/db_meta/doc/meta_cluster_type.md | 21 + dbm-ui/backend/db_meta/doc/meta_layer.md | 27 + dbm-ui/backend/db_meta/doc/meta_role.md | 34 + dbm-ui/backend/db_meta/doc/meta_type.md | 37 + dbm-ui/backend/db_meta/doc/proxyinstance.md | 78 + dbm-ui/backend/db_meta/doc/storageinstance.md | 104 + .../db_meta/doc/storageinstancetuple.md | 61 + dbm-ui/backend/db_meta/enums/__init__.py | 30 + dbm-ui/backend/db_meta/enums/access_layer.py | 18 + .../db_meta/enums/cluster_entry_role.py | 18 + .../db_meta/enums/cluster_entry_type.py | 18 + dbm-ui/backend/db_meta/enums/cluster_phase.py | 36 + .../backend/db_meta/enums/cluster_status.py | 38 + dbm-ui/backend/db_meta/enums/cluster_type.py | 39 + dbm-ui/backend/db_meta/enums/comm.py | 23 + .../db_meta/enums/instance_inner_role.py | 19 + .../backend/db_meta/enums/instance_phase.py | 36 + dbm-ui/backend/db_meta/enums/instance_role.py | 72 + .../backend/db_meta/enums/instance_status.py | 18 + dbm-ui/backend/db_meta/enums/machine_type.py | 46 + dbm-ui/backend/db_meta/enums/type_maps.py | 229 + dbm-ui/backend/db_meta/exceptions.py | 168 + dbm-ui/backend/db_meta/flatten/__init__.py | 15 + dbm-ui/backend/db_meta/flatten/cities.py | 23 + dbm-ui/backend/db_meta/flatten/machine.py | 87 + .../backend/db_meta/flatten/proxy_instance.py | 67 + .../db_meta/flatten/storage_instance.py | 94 + .../backend/db_meta/flatten/tendis_cluster.py | 137 + .../db_meta/migrations/0001_initial.py | 608 + ...napshotspec_spec_tendbclusterdeployplan.py | 58 + .../0003_clusterentry_forward_to.py | 27 + .../migrations/0004_auto_20230424_1920.py | 68 + .../migrations/0005_auto_20230426_1043.py | 94 + .../0006_alter_clusterentry_role.py | 22 + .../migrations/0006_auto_20230506_1148.py | 56 + .../migrations/0007_auto_20230510_1955.py | 52 + .../0008_storageinstance_is_stand_by.py | 18 + .../migrations/0009_auto_20230517_1047.py | 28 + .../migrations/0010_merge_20230523_2028.py | 13 + dbm-ui/backend/db_meta/migrations/__init__.py | 10 + dbm-ui/backend/db_meta/models/__init__.py | 25 + dbm-ui/backend/db_meta/models/app.py | 89 + dbm-ui/backend/db_meta/models/city_map.py | 40 + dbm-ui/backend/db_meta/models/cluster.py | 220 + .../backend/db_meta/models/cluster_entry.py | 115 + .../backend/db_meta/models/cluster_monitor.py | 235 + dbm-ui/backend/db_meta/models/db_module.py | 37 + dbm-ui/backend/db_meta/models/group.py | 39 + dbm-ui/backend/db_meta/models/instance.py | 201 + dbm-ui/backend/db_meta/models/machine.py | 146 + .../db_meta/models/proxy_instance_ext.py | 19 + dbm-ui/backend/db_meta/models/spec.py | 95 + .../db_meta/models/storage_instance_ext.py | 27 + .../db_meta/models/storage_instance_tuple.py | 36 + .../backend/db_meta/models/storage_set_dtl.py | 60 + dbm-ui/backend/db_meta/models/tag.py | 21 + dbm-ui/backend/db_meta/readme.md | 103 + .../db_meta/request_validator/__init__.py | 17 + .../backend/db_meta/request_validator/atom.py | 119 + .../db_meta/request_validator/common.py | 77 + .../backend/db_meta/request_validator/dbha.py | 59 + .../db_meta/request_validator/machine.py | 34 + .../request_validator/proxy_instance.py | 41 + .../db_meta/request_validator/serializers.py | 28 + .../request_validator/storage_instance.py | 83 + .../storage_instance_tuple.py | 31 + dbm-ui/backend/db_meta/tasks.py | 129 + dbm-ui/backend/db_meta/urls.py | 128 + dbm-ui/backend/db_meta/validators/__init__.py | 11 + dbm-ui/backend/db_meta/validators/impl.py | 25 + dbm-ui/backend/db_meta/views/__init__.py | 12 + dbm-ui/backend/db_meta/views/dbha/__init__.py | 11 + dbm-ui/backend/db_meta/views/dbha/views.py | 193 + dbm-ui/backend/db_meta/views/fake/__init__.py | 11 + dbm-ui/backend/db_meta/views/fake/views.py | 82 + .../backend/db_meta/views/helper/__init__.py | 11 + dbm-ui/backend/db_meta/views/helper/views.py | 96 + dbm-ui/backend/db_meta/views/meta/__init__.py | 11 + dbm-ui/backend/db_meta/views/meta/views.py | 44 + .../backend/db_meta/views/nosql/__init__.py | 11 + dbm-ui/backend/db_meta/views/nosql/views.py | 113 + .../db_meta/views/priv_manager/__init__.py | 11 + .../db_meta/views/priv_manager/views.py | 270 + dbm-ui/backend/db_monitor/__init__.py | 11 + dbm-ui/backend/db_monitor/apps.py | 28 + dbm-ui/backend/db_monitor/constants.py | 28 + .../backend/db_monitor/management/__init__.py | 10 + .../management/commands/__init__.py | 10 + .../management/commands/export_template.py | 68 + .../management/commands/extract_alarm.py | 141 + .../management/commands/extract_collect.py | 97 + .../db_monitor/migrations/0001_initial.py | 247 + .../backend/db_monitor/migrations/__init__.py | 10 + dbm-ui/backend/db_monitor/models.py | 130 + dbm-ui/backend/db_monitor/readme.md | 54 + dbm-ui/backend/db_monitor/serializers.py | 40 + dbm-ui/backend/db_monitor/tasks.py | 99 + .../db_monitor/tpls/alarm/0.es.5668.tpl64 | 1 + .../db_monitor/tpls/alarm/0.es.5669.tpl64 | 1 + .../db_monitor/tpls/alarm/0.es.5670.tpl64 | 1 + .../db_monitor/tpls/alarm/0.es.5671.tpl64 | 1 + .../db_monitor/tpls/alarm/0.es.5673.tpl64 | 1 + .../db_monitor/tpls/alarm/0.es.5674.tpl64 | 1 + .../db_monitor/tpls/alarm/0.es.5675.tpl64 | 1 + .../db_monitor/tpls/alarm/0.es.5757.tpl64 | 1 + .../db_monitor/tpls/alarm/0.hdfs.5891.tpl64 | 1 + .../db_monitor/tpls/alarm/0.hdfs.5894.tpl64 | 1 + .../db_monitor/tpls/alarm/0.hdfs.5895.tpl64 | 1 + .../db_monitor/tpls/alarm/0.hdfs.5898.tpl64 | 1 + .../db_monitor/tpls/alarm/0.hdfs.5899.tpl64 | 1 + .../tpls/alarm/0.influxdb.5946.tpl64 | 1 + .../tpls/alarm/0.influxdb.5947.tpl64 | 1 + .../tpls/alarm/0.influxdb.5948.tpl64 | 1 + .../tpls/alarm/0.influxdb.5949.tpl64 | 1 + .../tpls/alarm/0.influxdb.5950.tpl64 | 1 + .../tpls/alarm/0.influxdb.5951.tpl64 | 1 + .../tpls/alarm/0.influxdb.5952.tpl64 | 1 + .../db_monitor/tpls/alarm/0.kafka.5676.tpl64 | 1 + .../db_monitor/tpls/alarm/0.kafka.5677.tpl64 | 1 + .../db_monitor/tpls/alarm/0.kafka.5678.tpl64 | 1 + .../db_monitor/tpls/alarm/0.kafka.5679.tpl64 | 1 + .../db_monitor/tpls/alarm/0.kafka.5685.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5614.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5621.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5623.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5624.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5625.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5626.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5627.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5629.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5630.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5703.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5704.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5758.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5762.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.5763.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73363.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73370.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73376.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73377.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73378.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73379.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73380.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73381.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73382.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73418.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73456.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73471.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73472.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73473.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73474.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73669.tpl64 | 1 + .../db_monitor/tpls/alarm/0.mysql.73670.tpl64 | 1 + .../db_monitor/tpls/alarm/0.pulsar.5680.tpl64 | 1 + .../db_monitor/tpls/alarm/0.pulsar.5681.tpl64 | 1 + .../db_monitor/tpls/alarm/0.pulsar.5682.tpl64 | 1 + .../db_monitor/tpls/alarm/0.pulsar.5683.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.5765.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.5779.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.5780.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.5781.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.5782.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73741.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73742.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73756.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73757.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73759.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73760.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73761.tpl64 | 1 + .../db_monitor/tpls/alarm/0.redis.73762.tpl64 | 1 + .../0.es.dbm_elasticsearch_exporter.tpl64 | 1 + .../collect/0.hdfs.dbm_hdfs_exporter.tpl64 | 1 + .../0.influxdb.dbm_influxdb_bkpull.tpl64 | 1 + .../collect/0.kafka.dbm_kafka_bkpull.tpl64 | 1 + .../collect/0.kafka.dbm_kafka_exporter.tpl64 | 1 + .../collect/0.mysql.dbm_mysqld_exporter.tpl64 | 1 + .../0.mysql.dbm_mysqlproxy_exporter.tpl64 | 1 + ...0.pulsar.dbm_pulsarbookkeeper_bkpull.tpl64 | 1 + .../0.pulsar.dbm_pulsarbroker_bkpull.tpl64 | 1 + .../0.pulsar.dbm_pulsarzookeeper_bkpull.tpl64 | 1 + .../0.redis.dbm_predixy_exporter.tpl64 | 1 + .../collect/0.redis.dbm_redis_exporter.tpl64 | 1 + .../0.redis.dbm_twemproxy_exporter.tpl64 | 1 + dbm-ui/backend/db_monitor/urls.py | 20 + dbm-ui/backend/db_monitor/views/__init__.py | 10 + dbm-ui/backend/db_monitor/views/grafana.py | 74 + dbm-ui/backend/db_package/__init__.py | 10 + dbm-ui/backend/db_package/admin.py | 20 + dbm-ui/backend/db_package/apps.py | 18 + dbm-ui/backend/db_package/constants.py | 24 + dbm-ui/backend/db_package/exceptions.py | 24 + dbm-ui/backend/db_package/filters.py | 32 + .../db_package/migrations/0001_initial.py | 99 + .../backend/db_package/migrations/__init__.py | 10 + dbm-ui/backend/db_package/models.py | 64 + dbm-ui/backend/db_package/serializers.py | 31 + dbm-ui/backend/db_package/urls.py | 19 + dbm-ui/backend/db_package/views.py | 95 + dbm-ui/backend/db_proxy/__init__.py | 10 + dbm-ui/backend/db_proxy/admin.py | 34 + dbm-ui/backend/db_proxy/constants.py | 54 + dbm-ui/backend/db_proxy/exceptions.py | 7 + .../db_proxy/migrations/0001_initial.py | 94 + .../backend/db_proxy/migrations/__init__.py | 10 + dbm-ui/backend/db_proxy/models.py | 171 + dbm-ui/backend/db_proxy/nginxconf_tpl.py | 99 + dbm-ui/backend/db_proxy/tasks.py | 100 + dbm-ui/backend/db_proxy/urls.py | 37 + dbm-ui/backend/db_proxy/views/__init__.py | 10 + .../backend/db_proxy/views/bkrepo/__init__.py | 10 + .../db_proxy/views/bkrepo/serializers.py | 18 + dbm-ui/backend/db_proxy/views/bkrepo/urls.py | 10 + dbm-ui/backend/db_proxy/views/bkrepo/views.py | 46 + .../db_proxy/views/db_meta/__init__.py | 10 + .../db_proxy/views/db_meta/serializers.py | 107 + .../backend/db_proxy/views/db_meta/views.py | 180 + .../views/db_remote_service/serializers.py | 26 + .../db_proxy/views/db_remote_service/views.py | 54 + .../db_proxy/views/dbconfig/__init__.py | 10 + .../db_proxy/views/dbconfig/serializers.py | 57 + .../backend/db_proxy/views/dbconfig/views.py | 66 + .../db_proxy/views/gcs_dns/__init__.py | 10 + .../db_proxy/views/gcs_dns/serializers.py | 97 + .../backend/db_proxy/views/gcs_dns/views.py | 112 + .../backend/db_proxy/views/hadb/__init__.py | 10 + .../db_proxy/views/hadb/serializers.py | 21 + dbm-ui/backend/db_proxy/views/hadb/views.py | 77 + .../backend/db_proxy/views/jobapi/__init__.py | 10 + .../db_proxy/views/jobapi/serializers.py | 88 + dbm-ui/backend/db_proxy/views/jobapi/views.py | 149 + dbm-ui/backend/db_proxy/views/mock_data.py | 206 + .../db_proxy/views/nameservice/__init__.py | 10 + .../db_proxy/views/nameservice/serializers.py | 37 + .../db_proxy/views/nameservice/views.py | 95 + .../db_proxy/views/redis_dts/__init__.py | 10 + .../db_proxy/views/redis_dts/serializers.py | 116 + .../backend/db_proxy/views/redis_dts/views.py | 285 + dbm-ui/backend/db_proxy/views/serialiers.py | 71 + dbm-ui/backend/db_proxy/views/views.py | 114 + dbm-ui/backend/db_services/__init__.py | 10 + .../backend/db_services/bigdata/__init__.py | 10 + .../db_services/bigdata/es/__init__.py | 10 + .../db_services/bigdata/es/constants.py | 12 + .../backend/db_services/bigdata/es/query.py | 54 + dbm-ui/backend/db_services/bigdata/es/urls.py | 19 + .../backend/db_services/bigdata/es/views.py | 91 + .../db_services/bigdata/hdfs/__init_.py | 10 + .../db_services/bigdata/hdfs/constants.py | 16 + .../backend/db_services/bigdata/hdfs/query.py | 68 + .../backend/db_services/bigdata/hdfs/urls.py | 19 + .../backend/db_services/bigdata/hdfs/views.py | 189 + .../db_services/bigdata/influxdb/__init__.py | 10 + .../db_services/bigdata/influxdb/constants.py | 12 + .../db_services/bigdata/influxdb/query.py | 156 + .../bigdata/influxdb/serializers.py | 20 + .../db_services/bigdata/influxdb/urls.py | 19 + .../db_services/bigdata/influxdb/views.py | 59 + .../db_services/bigdata/kafka/__init__.py | 10 + .../db_services/bigdata/kafka/constants.py | 12 + .../db_services/bigdata/kafka/query.py | 35 + .../backend/db_services/bigdata/kafka/urls.py | 19 + .../db_services/bigdata/kafka/views.py | 75 + .../db_services/bigdata/pulsar/__init__.py | 10 + .../db_services/bigdata/pulsar/constants.py | 12 + .../db_services/bigdata/pulsar/query.py | 35 + .../db_services/bigdata/pulsar/urls.py | 19 + .../db_services/bigdata/pulsar/views.py | 75 + .../db_services/bigdata/resources/__init__.py | 10 + .../bigdata/resources/constants.py | 12 + .../db_services/bigdata/resources/query.py | 343 + .../db_services/bigdata/resources/urls.py | 25 + .../db_services/bigdata/resources/views.py | 165 + .../db_services/bigdata/resources/yasg_slz.py | 128 + dbm-ui/backend/db_services/bigdata/urls.py | 20 + dbm-ui/backend/db_services/cmdb/__init__.py | 10 + dbm-ui/backend/db_services/cmdb/biz.py | 72 + dbm-ui/backend/db_services/cmdb/exceptions.py | 23 + .../backend/db_services/cmdb/serializers.py | 51 + dbm-ui/backend/db_services/cmdb/urls.py | 25 + dbm-ui/backend/db_services/cmdb/views.py | 79 + dbm-ui/backend/db_services/dbbase/__init__.py | 10 + .../backend/db_services/dbbase/constants.py | 32 + .../db_services/dbbase/resources/__init__.py | 10 + .../db_services/dbbase/resources/constants.py | 20 + .../dbbase/resources/pagination.py | 50 + .../db_services/dbbase/resources/query.py | 68 + .../dbbase/resources/serializers.py | 83 + .../db_services/dbbase/resources/viewsets.py | 75 + .../db_services/dbbase/resources/yasg_slz.py | 26 + .../backend/db_services/dbconfig/__init__.py | 10 + .../db_services/dbconfig/config_item.py | 10 + .../backend/db_services/dbconfig/dataclass.py | 63 + .../db_services/dbconfig/exceptions.py | 28 + .../backend/db_services/dbconfig/handlers.py | 317 + .../backend/db_services/dbconfig/mock_data.py | 138 + .../db_services/dbconfig/serializers.py | 114 + dbm-ui/backend/db_services/dbconfig/urls.py | 19 + dbm-ui/backend/db_services/dbconfig/views.py | 175 + .../db_services/dbresource/__init__.py | 10 + .../db_services/dbresource/constants.py | 17 + .../db_services/dbresource/exceptions.py | 36 + .../backend/db_services/dbresource/filters.py | 35 + dbm-ui/backend/db_services/dbresource/mock.py | 61 + .../db_services/dbresource/serializers.py | 169 + dbm-ui/backend/db_services/dbresource/urls.py | 23 + .../dbresource/views/deploy_plan.py | 89 + .../db_services/dbresource/views/resource.py | 270 + .../db_services/dbresource/views/sepc.py | 128 + dbm-ui/backend/db_services/group/__init__.py | 10 + dbm-ui/backend/db_services/group/handlers.py | 28 + .../backend/db_services/group/serializers.py | 39 + dbm-ui/backend/db_services/group/urls.py | 19 + dbm-ui/backend/db_services/group/views.py | 114 + dbm-ui/backend/db_services/infras/__init__.py | 10 + .../backend/db_services/infras/constants.py | 19 + dbm-ui/backend/db_services/infras/host.py | 245 + .../backend/db_services/infras/serializers.py | 58 + dbm-ui/backend/db_services/infras/urls.py | 29 + dbm-ui/backend/db_services/infras/views.py | 124 + .../backend/db_services/ipchooser/__init__.py | 11 + dbm-ui/backend/db_services/ipchooser/apps.py | 15 + .../db_services/ipchooser/constants.py | 116 + .../db_services/ipchooser/exceptions.py | 29 + .../ipchooser/handlers/__init__.py | 10 + .../db_services/ipchooser/handlers/base.py | 110 + .../ipchooser/handlers/host_handler.py | 188 + .../ipchooser/handlers/topo_handler.py | 208 + .../ipchooser/migrations/__init__.py | 10 + .../db_services/ipchooser/mock_data.py | 195 + .../backend/db_services/ipchooser/models.py | 78 + .../db_services/ipchooser/query/__init__.py | 10 + .../db_services/ipchooser/query/resource.py | 467 + .../ipchooser/serializers/__init__.py | 10 + .../db_services/ipchooser/serializers/base.py | 97 + .../ipchooser/serializers/host_sers.py | 62 + .../ipchooser/serializers/topo_sers.py | 82 + dbm-ui/backend/db_services/ipchooser/tasks.py | 10 + .../db_services/ipchooser/tools/__init__.py | 10 + .../db_services/ipchooser/tools/topo_tool.py | 70 + dbm-ui/backend/db_services/ipchooser/types.py | 27 + dbm-ui/backend/db_services/ipchooser/urls.py | 21 + dbm-ui/backend/db_services/ipchooser/views.py | 208 + dbm-ui/backend/db_services/mysql/__init__.py | 10 + dbm-ui/backend/db_services/mysql/admin.py | 13 + dbm-ui/backend/db_services/mysql/apps.py | 16 + .../db_services/mysql/cluster/__init__.py | 10 + .../db_services/mysql/cluster/handlers.py | 212 + .../db_services/mysql/cluster/mock_data.py | 46 + .../db_services/mysql/cluster/serializers.py | 87 + .../backend/db_services/mysql/cluster/urls.py | 20 + .../db_services/mysql/cluster/views.py | 96 + dbm-ui/backend/db_services/mysql/constants.py | 18 + dbm-ui/backend/db_services/mysql/dataclass.py | 69 + .../mysql/excel_files/authorize_err_tpl.xlsx | Bin 0 -> 9489 bytes .../mysql/excel_files/authorize_tpl.xlsx | Bin 0 -> 9421 bytes .../mysql/fixpoint_rollback/__init__.py | 10 + .../mysql/fixpoint_rollback/constants.py | 16 + .../mysql/fixpoint_rollback/handlers.py | 333 + .../mysql/fixpoint_rollback/serializers.py | 31 + .../mysql/fixpoint_rollback/urls.py | 20 + .../mysql/fixpoint_rollback/views.py | 87 + .../db_services/mysql/instance/__init__.py | 10 + .../db_services/mysql/instance/handlers.py | 132 + .../db_services/mysql/instance/mock_data.py | 29 + .../db_services/mysql/instance/serializers.py | 50 + .../db_services/mysql/instance/urls.py | 20 + .../db_services/mysql/instance/views.py | 39 + .../db_services/mysql/permission/__init__.py | 10 + .../mysql/permission/authorize/__init__.py | 10 + .../mysql/permission/authorize/apps.py | 16 + .../mysql/permission/authorize/dataclass.py | 102 + .../mysql/permission/authorize/handlers.py | 199 + .../authorize/migrations/0001_initial.py | 43 + .../authorize/migrations/__init__.py | 10 + .../mysql/permission/authorize/mock_data.py | 72 + .../mysql/permission/authorize/models.py | 51 + .../mysql/permission/authorize/serializers.py | 120 + .../mysql/permission/authorize/views.py | 124 + .../mysql/permission/clone/__init__.py | 10 + .../mysql/permission/clone/apps.py | 16 + .../mysql/permission/clone/dataclass.py | 47 + .../mysql/permission/clone/handlers.py | 188 + .../clone/migrations/0001_initial.py | 47 + ..._mysqlpermissionclonerecord_bk_cloud_id.py | 19 + .../permission/clone/migrations/__init__.py | 10 + .../mysql/permission/clone/mock_data.py | 47 + .../mysql/permission/clone/models.py | 45 + .../mysql/permission/clone/serializers.py | 126 + .../mysql/permission/clone/views.py | 91 + .../db_services/mysql/permission/constants.py | 125 + .../mysql/permission/db_account/__init__.py | 10 + .../mysql/permission/db_account/dataclass.py | 54 + .../mysql/permission/db_account/handlers.py | 259 + .../mysql/permission/db_account/mock_data.py | 77 + .../mysql/permission/db_account/policy.py | 230 + .../permission/db_account/serializers.py | 161 + .../mysql/permission/db_account/views.py | 140 + .../mysql/permission/exceptions.py | 57 + .../db_services/mysql/permission/urls.py | 25 + .../mysql/remote_service/__init__.py | 10 + .../mysql/remote_service/handlers.py | 114 + .../mysql/remote_service/mock_data.py | 21 + .../mysql/remote_service/serializers.py | 45 + .../db_services/mysql/remote_service/urls.py | 20 + .../db_services/mysql/remote_service/views.py | 60 + .../db_services/mysql/resources/__init__.py | 10 + .../db_services/mysql/resources/constants.py | 11 + .../mysql/resources/tendbha/__init__.py | 10 + .../mysql/resources/tendbha/query.py | 279 + .../mysql/resources/tendbha/views.py | 68 + .../mysql/resources/tendbha/yasg_slz.py | 147 + .../mysql/resources/tendbsingle/__init__.py | 10 + .../mysql/resources/tendbsingle/query.py | 215 + .../mysql/resources/tendbsingle/views.py | 69 + .../mysql/resources/tendbsingle/yasg_slz.py | 75 + .../db_services/mysql/resources/urls.py | 29 + .../db_services/mysql/resources/views.py | 126 + .../db_services/mysql/sql_import/__init__.py | 10 + .../db_services/mysql/sql_import/constants.py | 49 + .../db_services/mysql/sql_import/dataclass.py | 77 + .../db_services/mysql/sql_import/handlers.py | 253 + .../db_services/mysql/sql_import/mock_data.py | 68 + .../mysql/sql_import/serializers.py | 189 + .../db_services/mysql/sql_import/urls.py | 20 + .../db_services/mysql/sql_import/views.py | 154 + dbm-ui/backend/db_services/mysql/urls.py | 21 + .../backend/db_services/partition/__init__.py | 0 .../db_services/partition/constants.py | 15 + .../db_services/partition/exceptions.py | 18 + .../backend/db_services/partition/handlers.py | 48 + .../db_services/partition/serializers.py | 101 + dbm-ui/backend/db_services/partition/urls.py | 18 + dbm-ui/backend/db_services/partition/views.py | 117 + dbm-ui/backend/db_services/plugin/__init__.py | 10 + .../plugin/nameservice/__init__.py | 10 + .../db_services/plugin/nameservice/clb.py | 99 + .../db_services/plugin/nameservice/polaris.py | 108 + dbm-ui/backend/db_services/redis/__init__.py | 10 + dbm-ui/backend/db_services/redis/apps.py | 16 + dbm-ui/backend/db_services/redis/constants.py | 19 + .../db_services/redis/resources/__init__.py | 10 + .../db_services/redis/resources/constants.py | 11 + .../redis/resources/redis_cluster/__init__.py | 10 + .../redis/resources/redis_cluster/query.py | 278 + .../resources/redis_cluster/serializers.py | 21 + .../redis/resources/redis_cluster/views.py | 123 + .../redis/resources/redis_cluster/yasg_slz.py | 115 + .../db_services/redis/resources/urls.py | 27 + .../db_services/redis/resources/views.py | 110 + .../db_services/redis/resources/yasg_slz.py | 10 + dbm-ui/backend/db_services/redis/urls.py | 15 + .../backend/db_services/taskflow/__init__.py | 10 + .../backend/db_services/taskflow/constants.py | 13 + .../db_services/taskflow/exceptions.py | 43 + .../backend/db_services/taskflow/handlers.py | 187 + .../db_services/taskflow/serializers.py | 67 + dbm-ui/backend/db_services/taskflow/task.py | 90 + dbm-ui/backend/db_services/taskflow/urls.py | 20 + .../db_services/taskflow/views/__init__.py | 10 + .../db_services/taskflow/views/flow.py | 166 + .../db_services/taskflow/views/redis.py | 141 + dbm-ui/backend/db_services/user/__init__.py | 10 + .../backend/db_services/user/serializers.py | 18 + dbm-ui/backend/db_services/user/urls.py | 19 + dbm-ui/backend/db_services/user/views.py | 39 + .../backend/db_services/version/__init__.py | 10 + .../backend/db_services/version/constants.py | 57 + .../db_services/version/serializers.py | 21 + dbm-ui/backend/db_services/version/urls.py | 19 + dbm-ui/backend/db_services/version/views.py | 88 + dbm-ui/backend/dbm_init/__init__.py | 10 + dbm-ui/backend/dbm_init/apps.py | 24 + dbm-ui/backend/dbm_init/constants.py | 13 + .../json_files/bklog/dbm_dbactuator.json | 32 + .../json_files/bklog/dbm_redis_record.json | 34 + .../json_files/bklog/mysql_backup_result.json | 34 + .../json_files/bklog/mysql_binlog_result.json | 273 + .../bklog/mysql_checksum_result.json | 34 + .../json_files/bklog/mysql_slowlog.json | 215 + dbm-ui/backend/dbm_init/json_files/format.py | 56 + .../dbm_init/json_files/itsm/itsm_dbm.json | 914 + .../backend/dbm_init/management/__init__.py | 10 + .../dbm_init/management/commands/__init__.py | 10 + .../management/commands/download_bkrepo.py | 64 + .../management/commands/services_init.py | 57 + .../management/commands/sync_from_bkrepo.py | 58 + .../management/commands/upload_bkrepo.py | 69 + .../backend/dbm_init/migrations/__init__.py | 10 + dbm-ui/backend/dbm_init/readme.md | 41 + dbm-ui/backend/dbm_init/services.py | 611 + dbm-ui/backend/env/__init__.py | 145 + dbm-ui/backend/env/apigw_domains.py | 39 + dbm-ui/backend/env/bkrepo.py | 30 + dbm-ui/backend/exceptions.py | 166 + dbm-ui/backend/flow/README.md | 13 + dbm-ui/backend/flow/__init__.py | 11 + dbm-ui/backend/flow/admin.py | 32 + dbm-ui/backend/flow/apps.py | 23 + dbm-ui/backend/flow/consts.py | 705 + dbm-ui/backend/flow/docs/README.md | 35 + dbm-ui/backend/flow/docs/bk-dbm-single.png | Bin 0 -> 604350 bytes dbm-ui/backend/flow/docs/bk-dbm-tendbha.png | Bin 0 -> 1486006 bytes dbm-ui/backend/flow/docs/flow_tree.json | 88 + dbm-ui/backend/flow/docs/redis_backup.png | Bin 0 -> 51440 bytes .../flow/docs/redis_cluster_open_close.png | Bin 0 -> 95658 bytes .../flow/docs/redis_cluster_shutdown.png | Bin 0 -> 192509 bytes dbm-ui/backend/flow/docs/redis_flush_data.png | Bin 0 -> 114386 bytes dbm-ui/backend/flow/docs/redis_key_delete.png | Bin 0 -> 70650 bytes .../flow/docs/tendis_cache_cluster_apply.png | Bin 0 -> 185422 bytes dbm-ui/backend/flow/engine/__init__.py | 11 + dbm-ui/backend/flow/engine/abstract.py | 41 + dbm-ui/backend/flow/engine/airflow.py | 10 + dbm-ui/backend/flow/engine/bamboo/__init__.py | 10 + dbm-ui/backend/flow/engine/bamboo/builder.py | 34 + dbm-ui/backend/flow/engine/bamboo/engine.py | 255 + .../flow/engine/bamboo/scene/__init__.py | 10 + .../engine/bamboo/scene/cloud/__init__.py | 10 + .../bamboo/scene/cloud/base_service_flow.py | 404 + .../bamboo/scene/cloud/dbha_service_flow.py | 211 + .../bamboo/scene/cloud/dns_service_flow.py | 175 + .../bamboo/scene/cloud/drs_service_flow.py | 189 + .../bamboo/scene/cloud/nginx_service_flow.py | 106 + .../engine/bamboo/scene/common/builder.py | 234 + .../bamboo/scene/common/get_file_list.py | 445 + .../bamboo/scene/common/get_real_version.py | 27 + .../bamboo/scene/common/machine_os_init.py | 74 + .../flow/engine/bamboo/scene/es/__init__.py | 10 + .../engine/bamboo/scene/es/es_apply_flow.py | 194 + .../engine/bamboo/scene/es/es_destroy_flow.py | 105 + .../engine/bamboo/scene/es/es_disable_flow.py | 81 + .../engine/bamboo/scene/es/es_enable_flow.py | 81 + .../flow/engine/bamboo/scene/es/es_flow.py | 201 + .../engine/bamboo/scene/es/es_reboot_flow.py | 89 + .../engine/bamboo/scene/es/es_replace_flow.py | 329 + .../bamboo/scene/es/es_scale_up_flow.py | 144 + .../engine/bamboo/scene/es/es_shrink_flow.py | 187 + .../flow/engine/bamboo/scene/hdfs/__init__.py | 10 + .../engine/bamboo/scene/hdfs/exceptions.py | 42 + .../bamboo/scene/hdfs/hdfs_apply_flow.py | 252 + .../bamboo/scene/hdfs/hdfs_destroy_flow.py | 129 + .../bamboo/scene/hdfs/hdfs_disable_flow.py | 106 + .../bamboo/scene/hdfs/hdfs_enable_flow.py | 177 + .../bamboo/scene/hdfs/hdfs_reboot_flow.py | 120 + .../bamboo/scene/hdfs/hdfs_replace_flow.py | 368 + .../bamboo/scene/hdfs/hdfs_scale_up_flow.py | 229 + .../bamboo/scene/hdfs/hdfs_shrink_flow.py | 248 + .../engine/bamboo/scene/influxdb/__init__.py | 10 + .../scene/influxdb/influxdb_apply_flow.py | 161 + .../scene/influxdb/influxdb_destroy_flow.py | 77 + .../scene/influxdb/influxdb_disable_flow.py | 77 + .../scene/influxdb/influxdb_enable_flow.py | 77 + .../scene/influxdb/influxdb_reboot_flow.py | 77 + .../scene/influxdb/influxdb_replace_flow.py | 205 + .../engine/bamboo/scene/kafka/__init__.py | 10 + .../bamboo/scene/kafka/kafka_apply_flow.py | 230 + .../bamboo/scene/kafka/kafka_destroy_flow.py | 106 + .../bamboo/scene/kafka/kafka_disable_flow.py | 81 + .../bamboo/scene/kafka/kafka_enable_flow.py | 81 + .../bamboo/scene/kafka/kafka_reboot_flow.py | 80 + .../bamboo/scene/kafka/kafka_replace_flow.py | 421 + .../bamboo/scene/kafka/kafka_scale_up_flow.py | 172 + .../bamboo/scene/kafka/kafka_shrink_flow.py | 217 + .../engine/bamboo/scene/mysql/__init__.py | 10 + .../bamboo/scene/mysql/common/__init__.py | 10 + .../scene/mysql/common/common_sub_flow.py | 411 + .../bamboo/scene/mysql/import_sqlfile_flow.py | 253 + .../bamboo/scene/mysql/install_mysql_ha.md | 3 + .../scene/mysql/install_mysql_single.md | 3 + .../scene/mysql/mysql_authorize_rules.py | 44 + .../bamboo/scene/mysql/mysql_checksum.py | 228 + .../bamboo/scene/mysql/mysql_clone_rules.py | 44 + .../scene/mysql/mysql_edit_config_flow.py | 89 + .../mysql/mysql_fake_sql_semantic_check.py | 67 + .../scene/mysql/mysql_flashback_flow.py | 84 + .../bamboo/scene/mysql/mysql_ha_apply_flow.py | 318 + .../scene/mysql/mysql_ha_db_table_backup.py | 162 + .../scene/mysql/mysql_ha_destroy_flow.py | 188 + .../scene/mysql/mysql_ha_disable_flow.py | 144 + .../scene/mysql/mysql_ha_enable_flow.py | 148 + .../scene/mysql/mysql_ha_full_backup_flow.py | 172 + .../scene/mysql/mysql_master_fail_over.py | 186 + .../scene/mysql/mysql_master_slave_switch.py | 318 + .../scene/mysql/mysql_migrate_cluster_flow.py | 582 + .../bamboo/scene/mysql/mysql_partition.py | 168 + .../scene/mysql/mysql_proxy_cluster_add.py | 268 + .../scene/mysql/mysql_proxy_cluster_switch.py | 373 + .../scene/mysql/mysql_rename_database_flow.py | 221 + .../scene/mysql/mysql_restore_slave_flow.py | 651 + .../scene/mysql/mysql_rollback_data_flow.py | 422 + .../scene/mysql/mysql_single_apply_flow.py | 188 + .../scene/mysql/mysql_single_destroy_flow.py | 164 + .../scene/mysql/mysql_single_disable_flow.py | 80 + .../scene/mysql/mysql_single_enable_flow.py | 108 + .../bamboo/scene/mysql/mysql_truncate_flow.py | 214 + .../bamboo/scene/mysql/pt_table_sync.py | 121 + .../bamboo/scene/name_service/__init__.py | 10 + .../bamboo/scene/name_service/name_service.py | 111 + .../engine/bamboo/scene/pulsar/exceptions.py | 30 + .../bamboo/scene/pulsar/pulsar_apply_flow.py | 238 + .../bamboo/scene/pulsar/pulsar_base_flow.py | 381 + .../scene/pulsar/pulsar_destroy_flow.py | 129 + .../scene/pulsar/pulsar_disable_flow.py | 92 + .../bamboo/scene/pulsar/pulsar_enable_flow.py | 89 + .../bamboo/scene/pulsar/pulsar_reboot_flow.py | 87 + .../scene/pulsar/pulsar_replace_flow.py | 299 + .../scene/pulsar/pulsar_scale_up_flow.py | 122 + .../bamboo/scene/pulsar/pulsar_shrink_flow.py | 111 + .../bamboo/scene/pulsar/pulsar_sub_flow.py | 288 + .../engine/bamboo/scene/redis/__init__.py | 10 + .../bamboo/scene/redis/atom_jobs/__init__.py | 17 + .../scene/redis/atom_jobs/redis_dbmon.py | 83 + .../scene/redis/atom_jobs/redis_install.py | 120 + .../scene/redis/atom_jobs/redis_makesync.py | 244 + .../scene/redis/atom_jobs/redis_repair.py | 111 + .../scene/redis/atom_jobs/redis_shutdown.py | 123 + .../scene/redis/atom_jobs/redis_switch.py | 140 + .../scene/redis/redis_cluster_apply_flow.py | 316 + .../scene/redis/redis_cluster_backup.py | 122 + .../bamboo/scene/redis/redis_cluster_dts.py | 188 + .../scene/redis/redis_cluster_open_close.py | 192 + .../scene/redis/redis_cluster_scene_master.py | 253 + .../scene/redis/redis_cluster_scene_slave.py | 242 + .../scene/redis/redis_cluster_shutdown.py | 258 + .../engine/bamboo/scene/redis/redis_dbmon.py | 106 + .../bamboo/scene/redis/redis_flush_data.py | 158 + .../bamboo/scene/redis/redis_keys_delete.py | 167 + .../bamboo/scene/redis/redis_keys_extract.py | 113 + .../bamboo/scene/redis/redis_proxy_scale.py | 261 + .../scene/redis/singele_redis_shutdown.py | 148 + .../scene/redis/single_proxy_shutdown.py | 59 + .../scene/redis/tendis_plus_apply_flow.py | 301 + .../scene/spider/import_sqlfile_flow.py | 238 + .../scene/spider/spider_add_tmp_node.py | 157 + .../bamboo/scene/spider/spider_checksum.py | 228 + .../scene/spider/spider_cluster_deploy.py | 413 + .../scene/spider/spider_cluster_destroy.py | 197 + .../spider/spider_cluster_disable_deploy.py | 134 + .../spider/spider_cluster_enable_deploy.py | 140 + .../spider_cluster_truncate_database.py | 312 + .../bamboo/scene/spider/spider_partition.py | 168 + .../spider/spider_rename_database_flow.py | 324 + .../spider/spider_slave_cluster_deploy.py | 283 + dbm-ui/backend/flow/engine/codes.py | 16 + dbm-ui/backend/flow/engine/consts.py | 17 + .../flow/engine/controller/__init__.py | 10 + dbm-ui/backend/flow/engine/controller/base.py | 34 + .../backend/flow/engine/controller/cloud.py | 115 + dbm-ui/backend/flow/engine/controller/es.py | 85 + dbm-ui/backend/flow/engine/controller/hdfs.py | 81 + .../flow/engine/controller/influxdb.py | 65 + .../backend/flow/engine/controller/kafka.py | 83 + .../backend/flow/engine/controller/mysql.py | 498 + .../flow/engine/controller/name_service.py | 46 + .../backend/flow/engine/controller/pulsar.py | 81 + .../backend/flow/engine/controller/redis.py | 137 + .../backend/flow/engine/controller/spider.py | 106 + dbm-ui/backend/flow/engine/exceptions.py | 28 + dbm-ui/backend/flow/engine/logger/__init__.py | 10 + dbm-ui/backend/flow/engine/logger/jsonfmt.py | 65 + .../backend/flow/migrations/0001_initial.py | 76 + dbm-ui/backend/flow/migrations/__init__.py | 10 + dbm-ui/backend/flow/models.py | 51 + dbm-ui/backend/flow/plugins/__init__.py | 10 + dbm-ui/backend/flow/plugins/apps.py | 16 + .../flow/plugins/components/__init__.py | 14 + .../components/collections/__init__.py | 14 + .../components/collections/cloud/__init__.py | 10 + .../collections/cloud/exec_service_script.py | 117 + .../collections/cloud/push_config_file.py | 63 + .../collections/cloud/service_proxy.py | 48 + .../collections/cloud/trans_files.py | 82 + .../components/collections/common/__init__.py | 10 + .../collections/common/base_service.py | 295 + .../common/bigdata_manager_service.py | 139 + .../collections/common/cc_service.py | 139 + .../collections/common/create_ticket.py | 55 + .../common/delete_cc_service_instance.py | 44 + .../collections/common/external_service.py | 49 + .../components/collections/common/pause.py | 89 + .../collections/common/sa_idle_check.py | 92 + .../components/collections/common/sa_init.py | 84 + .../collections/common/sleep_timer_service.py | 142 + .../components/collections/es/__init__.py | 10 + .../components/collections/es/es_db_meta.py | 45 + .../collections/es/es_dns_manage.py | 162 + .../collections/es/exec_es_actuator_script.py | 121 + .../collections/es/get_es_payload.py | 54 + .../collections/es/get_es_resource.py | 71 + .../collections/es/rewrite_es_config.py | 68 + .../components/collections/es/trans_files.py | 88 + .../components/collections/hdfs/__init__.py | 10 + .../collections/hdfs/check_cluster_status.py | 52 + .../collections/hdfs/exec_actuator_script.py | 126 + .../collections/hdfs/get_hdfs_payload.py | 55 + .../collections/hdfs/get_hdfs_resource.py | 75 + .../collections/hdfs/hdfs_db_meta.py | 46 + .../collections/hdfs/hdfs_dns_manage.py | 87 + .../collections/hdfs/rewrite_hdfs_config.py | 97 + .../collections/hdfs/trans_flies.py | 93 + .../collections/hdfs/update_hdfs_resource.py | 81 + .../influxdb/exec_actuator_script.py | 110 + .../collections/influxdb/influxdb_config.py | 90 + .../collections/influxdb/influxdb_db_meta.py | 48 + .../influxdb/influxdb_replace_config.py | 103 + .../collections/influxdb/trans_flies.py | 73 + .../collections/kafka/dns_manage.py | 107 + .../collections/kafka/exec_actuator_script.py | 110 + .../collections/kafka/get_kafka_resource.py | 64 + .../collections/kafka/kafka_config.py | 79 + .../collections/kafka/kafka_db_meta.py | 48 + .../collections/kafka/trans_flies.py | 73 + .../components/collections/mysql/__init__.py | 10 + .../mysql/add_user_for_cluster_switch.py | 56 + .../collections/mysql/authorize_rules.py | 121 + .../build_database_table_filter_regex.py | 59 + .../collections/mysql/clear_machine.py | 61 + .../collections/mysql/clone_rules.py | 125 + .../collections/mysql/clone_user.py | 80 + .../collections/mysql/create_user.py | 63 + .../collections/mysql/dns_manage.py | 100 + .../components/collections/mysql/drop_user.py | 59 + .../collections/mysql/exec_actuator_script.py | 171 + .../collections/mysql/fake_semantic_check.py | 73 + .../mysql/filter_database_table_from_regex.py | 105 + .../mysql/general_check_db_in_using.py | 138 + .../mysql/mysql_checksum_report.py | 147 + .../collections/mysql/mysql_db_meta.py | 55 + .../mysql/mysql_download_backupfile.py | 74 + .../mysql_ha_db_table_backup_response.py | 43 + .../mysql_master_slave_relationship_check.py | 87 + .../collections/mysql/mysql_os_init.py | 81 + .../mysql/mysql_partition_check.py | 53 + .../collections/mysql/pt_table_sync.py | 91 + .../rename_database_confirm_empty_from.py | 76 + .../mysql/rename_database_drop_from.py | 59 + .../mysql/rename_database_prepare_param.py | 37 + .../mysql/rollback_local_trans_flies.py | 60 + .../collections/mysql/rollback_trans_flies.py | 79 + .../collections/mysql/semantic_check.py | 130 + .../collections/mysql/slave_trans_flies.py | 88 + .../collections/mysql/trans_flies.py | 113 + .../truncate_data_create_stage_database.py | 64 + .../truncate_data_drop_stage_database.py | 57 + ...ncate_data_generate_stage_database_name.py | 44 + .../mysql/truncate_data_recreate_table.py | 70 + .../mysql/truncate_data_rename_table.py | 96 + .../collections/mysql/upload_file.py | 51 + .../collections/name_service/__init__.py | 10 + .../collections/name_service/name_service.py | 124 + .../components/collections/pulsar/__init__.py | 10 + .../pulsar/blank_schedule_service.py | 89 + .../pulsar/exec_actuator_script.py | 125 + .../collections/pulsar/get_pulsar_payload.py | 55 + .../collections/pulsar/get_pulsar_resource.py | 64 + .../collections/pulsar/pulsar_db_meta.py | 45 + .../collections/pulsar/pulsar_dns_manage.py | 77 + .../pulsar/pulsar_zk_dns_manage.py | 68 + .../pulsar/rewrite_pulsar_config.py | 139 + .../collections/pulsar/trans_files.py | 99 + .../components/collections/redis/EmptyAct.py | 32 + .../components/collections/redis/__init__.py | 10 + .../collections/redis/dns_manage.py | 103 + .../collections/redis/exec_actuator_script.py | 145 + .../collections/redis/exec_shell_script.py | 138 + .../collections/redis/get_redis_payload.py | 63 + .../collections/redis/get_redis_resource.py | 95 + .../collections/redis/redis_config.py | 56 + .../collections/redis/redis_db_meta.py | 63 + .../components/collections/redis/redis_dts.py | 989 + .../collections/redis/trans_flies.py | 109 + .../spider/add_system_user_in_cluster.py | 124 + .../spider/check_cluster_table_using_sub.py | 50 + .../clear_database_on_remote_service.py | 71 + .../spider/create_database_like_via_ctl.py | 119 + .../spider/drop_spider_table_via_ctl.py | 58 + .../collections/spider/spider_db_meta.py | 54 + ...truncate_database_drop_stage_db_via_ctl.py | 55 + ...te_database_old_new_map_adapter_service.py | 45 + .../truncate_database_on_spider_via_ctl.py | 153 + dbm-ui/backend/flow/signal/__init__.py | 10 + dbm-ui/backend/flow/signal/handlers.py | 93 + dbm-ui/backend/flow/tests.py | 10 + dbm-ui/backend/flow/urls.py | 256 + dbm-ui/backend/flow/utils/__init__.py | 10 + dbm-ui/backend/flow/utils/cc_manage.py | 140 + dbm-ui/backend/flow/utils/cloud/__init__.py | 10 + .../flow/utils/cloud/cloud_act_payload.py | 155 + .../utils/cloud/cloud_context_dataclass.py | 163 + .../flow/utils/cloud/cloud_db_proxy.py | 229 + .../flow/utils/cloud/cloud_module_operate.py | 196 + .../utils/cloud/script_template/__init__.py | 15 + .../cloud/script_template/dbha_template.py | 235 + .../cloud/script_template/dns_template.py | 147 + .../cloud/script_template/drs_template.py | 97 + .../cloud/script_template/nginx_template.py | 150 + .../backend/flow/utils/dict_to_dataclass.py | 33 + dbm-ui/backend/flow/utils/dns_manage.py | 174 + dbm-ui/backend/flow/utils/es/__init__.py | 10 + .../backend/flow/utils/es/es_act_payload.py | 387 + .../flow/utils/es/es_context_dataclass.py | 80 + dbm-ui/backend/flow/utils/es/es_db_meta.py | 157 + .../flow/utils/es/es_module_operate.py | 115 + .../flow/utils/es/es_script_template.py | 30 + dbm-ui/backend/flow/utils/extension_manage.py | 15 + dbm-ui/backend/flow/utils/filter_alias_ip.py | 40 + .../flow/utils/hdfs/bk_module_operate.py | 161 + dbm-ui/backend/flow/utils/hdfs/consts.py | 18 + .../flow/utils/hdfs/hdfs_act_playload.py | 682 + .../flow/utils/hdfs/hdfs_context_dataclass.py | 207 + .../backend/flow/utils/hdfs/hdfs_db_meta.py | 377 + .../flow/utils/hdfs/hdfs_script_template.py | 30 + .../flow/utils/influxdb/bk_module_operate.py | 98 + .../utils/influxdb/influxdb_act_playload.py | 121 + .../influxdb/influxdb_context_dataclass.py | 111 + .../flow/utils/influxdb/influxdb_db_meta.py | 194 + .../flow/utils/influxdb/script_template.py | 29 + .../flow/utils/kafka/bk_module_operate.py | 133 + .../flow/utils/kafka/kafka_act_playload.py | 214 + .../utils/kafka/kafka_context_dataclass.py | 112 + .../backend/flow/utils/kafka/kafka_db_meta.py | 289 + .../flow/utils/kafka/script_template.py | 29 + .../flow/utils/mysql/bk_module_operate.py | 153 + .../flow/utils/mysql/common/compare_time.py | 20 + .../utils/mysql/common/mysql_cluster_info.py | 100 + .../backend/flow/utils/mysql/db_resource.py | 109 + .../utils/mysql/db_table_filter/__init__.py | 11 + .../utils/mysql/db_table_filter/exception.py | 24 + .../utils/mysql/db_table_filter/filter.py | 96 + .../flow/utils/mysql/db_table_filter/tools.py | 53 + .../flow/utils/mysql/mysql_act_dataclass.py | 338 + .../mysql/mysql_act_dataclass_validator.py | 30 + .../flow/utils/mysql/mysql_act_playload.py | 1783 ++ .../utils/mysql/mysql_context_dataclass.py | 288 + .../backend/flow/utils/mysql/mysql_db_meta.py | 623 + dbm-ui/backend/flow/utils/pulsar/consts.py | 38 + .../flow/utils/pulsar/pulsar_act_payload.py | 474 + .../utils/pulsar/pulsar_context_dataclass.py | 97 + .../flow/utils/pulsar/pulsar_db_meta.py | 199 + .../utils/pulsar/pulsar_module_operate.py | 117 + .../utils/pulsar/pulsar_script_template.py | 30 + .../backend/flow/utils/redis/db_resource.py | 109 + .../flow/utils/redis/redis_act_playload.py | 894 + .../flow/utils/redis/redis_cluster_nodes.py | 422 + .../utils/redis/redis_context_dataclass.py | 237 + .../backend/flow/utils/redis/redis_db_meta.py | 390 + .../flow/utils/redis/redis_proxy_util.py | 121 + .../flow/utils/redis/redis_script_template.py | 26 + dbm-ui/backend/flow/utils/redis/redis_util.py | 18 + dbm-ui/backend/flow/utils/script_template.py | 56 + .../flow/utils/spider/spider_act_dataclass.py | 22 + .../flow/utils/spider/spider_bk_config.py | 32 + .../flow/utils/spider/spider_db_meta.py | 87 + dbm-ui/backend/flow/views/__init__.py | 10 + dbm-ui/backend/flow/views/base.py | 39 + dbm-ui/backend/flow/views/cloud_dbha_apply.py | 44 + .../flow/views/cloud_dns_bind_apply.py | 44 + dbm-ui/backend/flow/views/cloud_drs_apply.py | 49 + .../backend/flow/views/cloud_nginx_apply.py | 59 + dbm-ui/backend/flow/views/es_apply.py | 68 + dbm-ui/backend/flow/views/es_destroy.py | 44 + dbm-ui/backend/flow/views/es_disable.py | 44 + dbm-ui/backend/flow/views/es_enable.py | 44 + dbm-ui/backend/flow/views/es_reboot.py | 52 + dbm-ui/backend/flow/views/es_replace.py | 68 + dbm-ui/backend/flow/views/es_scale_up.py | 57 + dbm-ui/backend/flow/views/es_shrink.py | 57 + dbm-ui/backend/flow/views/hdfs_apply.py | 66 + dbm-ui/backend/flow/views/hdfs_destroy.py | 44 + dbm-ui/backend/flow/views/hdfs_disable.py | 44 + dbm-ui/backend/flow/views/hdfs_enable.py | 44 + dbm-ui/backend/flow/views/hdfs_reboot.py | 60 + dbm-ui/backend/flow/views/hdfs_replace.py | 70 + dbm-ui/backend/flow/views/hdfs_scale_up.py | 47 + dbm-ui/backend/flow/views/hdfs_shrink.py | 47 + .../flow/views/import_resource_init.py | 29 + dbm-ui/backend/flow/views/import_sqlfile.py | 66 + dbm-ui/backend/flow/views/influxdb_apply.py | 61 + dbm-ui/backend/flow/views/influxdb_destroy.py | 43 + dbm-ui/backend/flow/views/influxdb_disable.py | 43 + dbm-ui/backend/flow/views/influxdb_enable.py | 43 + dbm-ui/backend/flow/views/influxdb_reboot.py | 52 + dbm-ui/backend/flow/views/influxdb_replace.py | 53 + dbm-ui/backend/flow/views/kafka_apply.py | 74 + dbm-ui/backend/flow/views/kafka_destroy.py | 43 + dbm-ui/backend/flow/views/kafka_disable.py | 43 + dbm-ui/backend/flow/views/kafka_enable.py | 43 + dbm-ui/backend/flow/views/kafka_reboot.py | 52 + dbm-ui/backend/flow/views/kafka_replace.py | 55 + dbm-ui/backend/flow/views/kafka_scale_up.py | 53 + dbm-ui/backend/flow/views/kafka_shrink.py | 48 + dbm-ui/backend/flow/views/mysql_add_slave.py | 49 + dbm-ui/backend/flow/views/mysql_checksum.py | 24 + .../backend/flow/views/mysql_edit_config.py | 65 + dbm-ui/backend/flow/views/mysql_flashback.py | 54 + dbm-ui/backend/flow/views/mysql_ha_apply.py | 63 + .../flow/views/mysql_ha_db_table_backup.py | 40 + dbm-ui/backend/flow/views/mysql_ha_destroy.py | 71 + .../flow/views/mysql_ha_full_backup.py | 39 + .../flow/views/mysql_ha_master_fail_over.py | 36 + .../flow/views/mysql_ha_rename_database.py | 38 + dbm-ui/backend/flow/views/mysql_ha_switch.py | 36 + .../flow/views/mysql_ha_truncate_data.py | 38 + .../flow/views/mysql_migrate_cluster.py | 51 + dbm-ui/backend/flow/views/mysql_partition.py | 24 + dbm-ui/backend/flow/views/mysql_proxy_add.py | 36 + .../backend/flow/views/mysql_proxy_reduce.py | 35 + .../backend/flow/views/mysql_proxy_switch.py | 36 + .../backend/flow/views/mysql_pt_table_sync.py | 36 + .../flow/views/mysql_restore_local_slave.py | 49 + .../backend/flow/views/mysql_restore_slave.py | 55 + .../backend/flow/views/mysql_rollback_data.py | 53 + .../backend/flow/views/mysql_single_apply.py | 67 + .../flow/views/mysql_single_destroy.py | 68 + .../views/mysql_single_rename_database.py | 38 + .../flow/views/mysql_single_truncate_data.py | 38 + dbm-ui/backend/flow/views/name_service.py | 79 + dbm-ui/backend/flow/views/pulsar_apply.py | 103 + dbm-ui/backend/flow/views/pulsar_destroy.py | 44 + dbm-ui/backend/flow/views/pulsar_disable.py | 44 + dbm-ui/backend/flow/views/pulsar_enable.py | 44 + dbm-ui/backend/flow/views/pulsar_reboot.py | 60 + dbm-ui/backend/flow/views/pulsar_replace.py | 33 + dbm-ui/backend/flow/views/pulsar_scale_up.py | 51 + dbm-ui/backend/flow/views/pulsar_shrink.py | 33 + dbm-ui/backend/flow/views/redis_cluster.py | 314 + dbm-ui/backend/flow/views/redis_keys.py | 102 + dbm-ui/backend/flow/views/redis_scene.py | 93 + .../backend/flow/views/rollback_pipeline.py | 184 + .../backend/flow/views/spider_add_tmp_node.py | 17 + dbm-ui/backend/flow/views/spider_checksum.py | 25 + .../flow/views/spider_cluster_apply.py | 24 + .../flow/views/spider_cluster_destroy.py | 74 + .../views/spider_cluster_rename_database.py | 37 + .../views/spider_cluster_truncate_database.py | 37 + dbm-ui/backend/flow/views/spider_partition.py | 25 + .../flow/views/spider_semantic_check.py | 24 + .../backend/flow/views/spider_slave_apply.py | 32 + .../backend/flow/views/spider_sql_import.py | 17 + .../backend/flow/views/sql_semantic_check.py | 63 + dbm-ui/backend/homepage/__init__.py | 10 + dbm-ui/backend/homepage/views.py | 70 + dbm-ui/backend/iam_app/__init__.py | 10 + dbm-ui/backend/iam_app/admin.py | 13 + dbm-ui/backend/iam_app/apps.py | 16 + dbm-ui/backend/iam_app/dataclass/__init__.py | 10 + dbm-ui/backend/iam_app/dataclass/actions.py | 82 + dbm-ui/backend/iam_app/dataclass/resources.py | 99 + dbm-ui/backend/iam_app/exceptions.py | 66 + dbm-ui/backend/iam_app/handlers/__init__.py | 10 + dbm-ui/backend/iam_app/handlers/drf_perm.py | 174 + dbm-ui/backend/iam_app/handlers/permission.py | 352 + .../iam_app/migration_json_files/initial.json | 60 + .../iam_app/migrations/0001_initial.py | 26 + .../migrations/0002_bk-dbm_202303301547.py | 22 + .../migrations/0003_bk-dbm_202304031530.py | 22 + dbm-ui/backend/iam_app/migrations/__init__.py | 10 + dbm-ui/backend/iam_app/serializers.py | 22 + dbm-ui/backend/iam_app/urls.py | 26 + dbm-ui/backend/iam_app/views/__init__.py | 10 + dbm-ui/backend/iam_app/views/iam_provider.py | 129 + dbm-ui/backend/iam_app/views/views.py | 60 + dbm-ui/backend/redis_dts/__init__.py | 10 + dbm-ui/backend/redis_dts/admin.py | 28 + dbm-ui/backend/redis_dts/apis.py | 536 + dbm-ui/backend/redis_dts/apps.py | 16 + dbm-ui/backend/redis_dts/constants.py | 169 + dbm-ui/backend/redis_dts/exceptions.py | 30 + .../redis_dts/migrations/0001_initial.py | 164 + .../backend/redis_dts/migrations/__init__.py | 0 dbm-ui/backend/redis_dts/models/__init__.py | 11 + .../models/tb_dts_distribute_lock.py | 26 + .../models/tb_dts_server_blacklist.py | 19 + .../redis_dts/models/tb_tendis_dts_job.py | 63 + .../redis_dts/models/tb_tendis_dts_task.py | 102 + dbm-ui/backend/redis_dts/serializers.py | 38 + dbm-ui/backend/redis_dts/urls.py | 19 + dbm-ui/backend/redis_dts/util.py | 101 + dbm-ui/backend/redis_dts/views.py | 84 + dbm-ui/backend/redis_dts/yasg_slz.py | 10 + dbm-ui/backend/tests/__init__.py | 10 + .../backend/tests/configuration/__init__.py | 10 + .../tests/configuration/views/__init__.py | 10 + .../tests/configuration/views/conftest.py | 25 + .../configuration/views/password_policy.py | 39 + .../tests/configuration/views/profile.py | 40 + dbm-ui/backend/tests/conftest.py | 73 + dbm-ui/backend/tests/constants.py | 12 + dbm-ui/backend/tests/db_meta/__init__.py | 10 + dbm-ui/backend/tests/db_meta/api/__init__.py | 10 + .../tests/db_meta/api/cluster/__init__.py | 10 + .../db_meta/api/cluster/tendbha/__init__.py | 10 + .../api/cluster/tendbha/test_handler.py | 73 + .../api/cluster/tendbha/test_tendbha.py | 225 + .../tests/db_meta/api/db_module/__init__.py | 10 + .../tests/db_meta/api/db_module/test_apis.py | 39 + .../tests/db_meta/api/dbha/__init__.py | 10 + .../tests/db_meta/api/dbha/test_apis.py | 291 + .../tests/db_meta/api/entry/__init__.py | 10 + .../tests/db_meta/api/machine/__init__.py | 10 + .../tests/db_meta/api/machine/test_apis.py | 125 + .../db_meta/api/proxy_instance/__init__.py | 10 + .../db_meta/api/proxy_instance/test_apis.py | 125 + .../db_meta/api/storage_instance/__init__.py | 10 + .../db_meta/api/storage_instance/test_apis.py | 145 + .../api/storage_instance_tuple/__init__.py | 10 + .../api/storage_instance_tuple/test_apis.py | 122 + dbm-ui/backend/tests/db_services/__init__.py | 10 + .../tests/db_services/cmdb/__init__.py | 10 + .../tests/db_services/cmdb/test_api.py | 39 + .../tests/db_services/cmdb/test_biz.py | 29 + .../tests/db_services/dbconfig/__init__.py | 10 + .../tests/db_services/dbconfig/test_api.py | 143 + .../tests/db_services/infras/__init__.py | 10 + .../tests/db_services/infras/test_api.py | 41 + .../tests/db_services/mysql/__init__.py | 10 + .../db_services/mysql/cluster/__init__.py | 10 + .../db_services/mysql/cluster/test_handler.py | 49 + .../tests/db_services/mysql/conftest.py | 197 + .../db_services/mysql/instance/__init__.py | 10 + .../mysql/instance/test_handler.py | 47 + .../db_services/mysql/permission/__init__.py | 10 + .../mysql/permission/test_account_handler.py | 85 + .../permission/test_authorize_handler.py | 60 + .../mysql/permission/test_clone_handler.py | 68 + .../mysql/remote_service/__init__.py | 10 + .../mysql/remote_service/test_handlers.py | 30 + .../db_services/mysql/resources/__init__.py | 10 + .../db_services/mysql/resources/test_dbha.py | 95 + .../mysql/resources/test_dbsingle.py | 80 + .../mysql/resources/test_list_resource.py | 54 + .../mysql/test_sql_inport_handler.py | 39 + .../tests/db_services/taskflow/__init__.py | 10 + .../tests/db_services/taskflow/test_api.py | 92 + .../dbm_init/test_auto_create_services.py | 29 + .../flow/components/collections/__init__.py | 10 + .../tests/flow/components/collections/base.py | 71 + .../collections/mysql/permission/__init__.py | 10 + .../mysql/permission/test_authorize_rules.py | 41 + .../mysql/permission/test_clone_rules.py | 41 + .../mysql/test_exec_actuator_script.py | 76 + .../collections/mysql/test_mysql_db_meta.py | 81 + .../mysql/test_mysql_dns_manage.py | 59 + .../collections/mysql/test_trans_file.py | 66 + .../components/collections/mysql/utils.py | 178 + dbm-ui/backend/tests/mock_data/__init__.py | 10 + .../tests/mock_data/components/__init__.py | 10 + .../mock_data/components/bamboo_engine.py | 36 + .../tests/mock_data/components/bklog.py | 118 + .../backend/tests/mock_data/components/cc.py | 187 + .../mock_data/components/db_remote_service.py | 45 + .../tests/mock_data/components/dbconfig.py | 179 + .../tests/mock_data/components/gcs_dns.py | 29 + .../backend/tests/mock_data/components/gse.py | 19 + .../tests/mock_data/components/itsm.py | 100 + .../backend/tests/mock_data/components/job.py | 58 + .../components/mysql_priv_manager.py | 87 + .../tests/mock_data/components/sql_import.py | 31 + .../tests/mock_data/components/storage.py | 26 + dbm-ui/backend/tests/mock_data/constant.py | 22 + .../tests/mock_data/db_services/__init__.py | 10 + .../db_services/mysql/permission/account.py | 38 + .../db_services/mysql/permission/authorize.py | 35 + .../db_services/mysql/permission/clone.py | 25 + .../tests/mock_data/db_services/taskflow.py | 64 + .../flow/components/collections/mysql.py | 74 + .../tests/mock_data/iam_app/__init__.py | 10 + .../tests/mock_data/iam_app/permission.py | 47 + .../tests/mock_data/ticket/__init__.py | 10 + .../tests/mock_data/ticket/ticket_flow.py | 121 + .../mock_data/ticket/ticket_params_data.py | 46 + dbm-ui/backend/tests/mock_data/utils.py | 48 + dbm-ui/backend/tests/mysql/test_ticket.py | 76 + dbm-ui/backend/tests/ticket/__init__.py | 10 + .../backend/tests/ticket/test_ticket_flow.py | 149 + ...00\345\217\221\350\247\204\350\214\203.md" | 663 + dbm-ui/backend/ticket/__init__.py | 10 + dbm-ui/backend/ticket/admin.py | 30 + dbm-ui/backend/ticket/apps.py | 24 + dbm-ui/backend/ticket/builders/__init__.py | 416 + dbm-ui/backend/ticket/builders/cloud/base.py | 114 + .../backend/ticket/builders/cloud/dbha_add.py | 51 + .../ticket/builders/cloud/dbha_reduce.py | 65 + .../ticket/builders/cloud/dbha_reload.py | 50 + .../ticket/builders/cloud/dbha_replace.py | 66 + .../backend/ticket/builders/cloud/dns_add.py | 47 + .../ticket/builders/cloud/dns_reduce.py | 56 + .../ticket/builders/cloud/dns_reload.py | 47 + .../ticket/builders/cloud/dns_replace.py | 48 + .../backend/ticket/builders/cloud/drs_add.py | 47 + .../ticket/builders/cloud/drs_reduce.py | 56 + .../ticket/builders/cloud/drs_reload.py | 47 + .../ticket/builders/cloud/drs_replace.py | 48 + .../ticket/builders/cloud/nginx_reload.py | 53 + .../ticket/builders/cloud/nginx_replace.py | 57 + .../ticket/builders/cloud/service_apply.py | 138 + .../ticket/builders/common/__init__.py | 10 + dbm-ui/backend/ticket/builders/common/base.py | 280 + .../backend/ticket/builders/common/bigdata.py | 318 + .../ticket/builders/common/constants.py | 100 + dbm-ui/backend/ticket/builders/es/__init__.py | 10 + dbm-ui/backend/ticket/builders/es/es_apply.py | 130 + .../backend/ticket/builders/es/es_destroy.py | 36 + .../backend/ticket/builders/es/es_disable.py | 36 + .../backend/ticket/builders/es/es_enable.py | 36 + .../backend/ticket/builders/es/es_reboot.py | 38 + .../backend/ticket/builders/es/es_replace.py | 48 + .../backend/ticket/builders/es/es_scale_up.py | 99 + .../backend/ticket/builders/es/es_shrink.py | 78 + .../backend/ticket/builders/hdfs/__init__.py | 10 + .../ticket/builders/hdfs/hdfs_apply.py | 140 + .../ticket/builders/hdfs/hdfs_destroy.py | 36 + .../ticket/builders/hdfs/hdfs_disable.py | 36 + .../ticket/builders/hdfs/hdfs_enable.py | 36 + .../ticket/builders/hdfs/hdfs_reboot.py | 39 + .../ticket/builders/hdfs/hdfs_replace.py | 58 + .../ticket/builders/hdfs/hdfs_scale_up.py | 65 + .../ticket/builders/hdfs/hdfs_shrink.py | 75 + .../ticket/builders/influxdb/__init__.py | 10 + .../builders/influxdb/influxdb_apply.py | 76 + .../builders/influxdb/influxdb_destroy.py | 37 + .../builders/influxdb/influxdb_disable.py | 37 + .../builders/influxdb/influxdb_enable.py | 37 + .../builders/influxdb/influxdb_reboot.py | 39 + .../builders/influxdb/influxdb_replace.py | 47 + .../backend/ticket/builders/kafka/__init__.py | 10 + .../ticket/builders/kafka/kafka_apply.py | 134 + .../ticket/builders/kafka/kafka_destroy.py | 37 + .../ticket/builders/kafka/kafka_disable.py | 37 + .../ticket/builders/kafka/kafka_enable.py | 37 + .../ticket/builders/kafka/kafka_reboot.py | 39 + .../ticket/builders/kafka/kafka_replace.py | 49 + .../ticket/builders/kafka/kafka_scale_up.py | 66 + .../ticket/builders/kafka/kafka_shrink.py | 77 + .../backend/ticket/builders/mysql/__init__.py | 10 + dbm-ui/backend/ticket/builders/mysql/base.py | 194 + .../ticket/builders/mysql/mysql_add_slave.py | 88 + .../builders/mysql/mysql_authorize_rules.py | 83 + .../ticket/builders/mysql/mysql_checksum.py | 187 + .../builders/mysql/mysql_clone_rules.py | 71 + .../builders/mysql/mysql_data_repair.py | 51 + .../builders/mysql/mysql_fixpoint_rollback.py | 81 + .../ticket/builders/mysql/mysql_flashback.py | 68 + .../ticket/builders/mysql/mysql_ha_apply.py | 147 + .../ticket/builders/mysql/mysql_ha_backup.py | 58 + .../ticket/builders/mysql/mysql_ha_clear.py | 63 + .../ticket/builders/mysql/mysql_ha_destroy.py | 36 + .../ticket/builders/mysql/mysql_ha_disable.py | 36 + .../ticket/builders/mysql/mysql_ha_enable.py | 36 + .../builders/mysql/mysql_ha_full_backup.py | 47 + .../ticket/builders/mysql/mysql_ha_rename.py | 84 + .../builders/mysql/mysql_import_sqlfile.py | 208 + .../builders/mysql/mysql_master_fail_over.py | 44 + .../mysql/mysql_master_slave_switch.py | 67 + .../builders/mysql/mysql_migrate_cluster.py | 90 + .../ticket/builders/mysql/mysql_partition.py | 71 + .../ticket/builders/mysql/mysql_proxy_add.py | 85 + .../builders/mysql/mysql_proxy_switch.py | 100 + .../mysql/mysql_restore_local_slave.py | 67 + .../builders/mysql/mysql_restore_slave.py | 70 + .../builders/mysql/mysql_single_apply.py | 205 + .../builders/mysql/mysql_single_destroy.py | 32 + .../builders/mysql/mysql_single_disable.py | 32 + .../builders/mysql/mysql_single_enable.py | 32 + .../ticket/builders/pulsar/__init__.py | 10 + .../ticket/builders/pulsar/pulsar_apply.py | 94 + .../ticket/builders/pulsar/pulsar_destroy.py | 37 + .../ticket/builders/pulsar/pulsar_disable.py | 37 + .../ticket/builders/pulsar/pulsar_enable.py | 37 + .../ticket/builders/pulsar/pulsar_reboot.py | 39 + .../ticket/builders/pulsar/pulsar_replace.py | 48 + .../ticket/builders/pulsar/pulsar_scale_up.py | 48 + .../ticket/builders/pulsar/pulsar_shrink.py | 85 + .../backend/ticket/builders/redis/__init__.py | 10 + dbm-ui/backend/ticket/builders/redis/base.py | 102 + .../ticket/builders/redis/redis_backup.py | 88 + .../ticket/builders/redis/redis_close.py | 45 + .../builders/redis/redis_cluster_apply.py | 276 + .../ticket/builders/redis/redis_destroy.py | 51 + .../ticket/builders/redis/redis_key_delete.py | 108 + .../builders/redis/redis_key_extract.py | 87 + .../ticket/builders/redis/redis_open.py | 42 + .../ticket/builders/redis/redis_purge.py | 61 + .../builders/spider/spider_partition.py | 42 + dbm-ui/backend/ticket/constants.py | 345 + dbm-ui/backend/ticket/contexts.py | 31 + dbm-ui/backend/ticket/exceptions.py | 48 + dbm-ui/backend/ticket/exclusive_ticket.xlsx | Bin 0 -> 23799 bytes .../backend/ticket/flow_manager/__init__.py | 10 + dbm-ui/backend/ticket/flow_manager/base.py | 244 + .../backend/ticket/flow_manager/delivery.py | 95 + dbm-ui/backend/ticket/flow_manager/inner.py | 239 + dbm-ui/backend/ticket/flow_manager/itsm.py | 101 + dbm-ui/backend/ticket/flow_manager/manager.py | 96 + dbm-ui/backend/ticket/flow_manager/pause.py | 74 + .../backend/ticket/flow_manager/resource.py | 255 + dbm-ui/backend/ticket/flow_manager/timer.py | 110 + .../backend/ticket/migrations/0001_initial.py | 279 + dbm-ui/backend/ticket/migrations/__init__.py | 10 + dbm-ui/backend/ticket/mock_data.py | 183 + dbm-ui/backend/ticket/models/__init__.py | 13 + dbm-ui/backend/ticket/models/ticket.py | 311 + .../ticket/models/ticket_result_relation.py | 29 + dbm-ui/backend/ticket/models/todo.py | 86 + dbm-ui/backend/ticket/readme.md | 20 + dbm-ui/backend/ticket/serializers.py | 241 + dbm-ui/backend/ticket/tasks/ticket_tasks.py | 235 + dbm-ui/backend/ticket/todos/__init__.py | 110 + dbm-ui/backend/ticket/todos/pause_todo.py | 39 + dbm-ui/backend/ticket/todos/pipeline_todo.py | 62 + dbm-ui/backend/ticket/urls.py | 19 + dbm-ui/backend/ticket/views.py | 488 + dbm-ui/backend/ticket/yasg_slz.py | 13 + dbm-ui/backend/urls.py | 79 + dbm-ui/backend/utils/__init__.py | 10 + dbm-ui/backend/utils/basic.py | 181 + dbm-ui/backend/utils/batch_request.py | 173 + dbm-ui/backend/utils/cache.py | 93 + dbm-ui/backend/utils/enum.py | 66 + dbm-ui/backend/utils/env.py | 63 + dbm-ui/backend/utils/excel.py | 148 + dbm-ui/backend/utils/files.py | 151 + dbm-ui/backend/utils/http.py | 132 + dbm-ui/backend/utils/local.py | 98 + dbm-ui/backend/utils/log.py | 19 + dbm-ui/backend/utils/md5.py | 32 + dbm-ui/backend/utils/pytest.py | 38 + dbm-ui/backend/utils/redis.py | 56 + dbm-ui/backend/utils/string.py | 175 + dbm-ui/backend/utils/time.py | 124 + dbm-ui/backend/utils/validators/__init__.py | 10 + dbm-ui/backend/version_log/__init__.py | 12 + dbm-ui/backend/version_log/apps.py | 25 + dbm-ui/backend/version_log/config.py | 91 + dbm-ui/backend/version_log/decorators.py | 70 + dbm-ui/backend/version_log/middleware.py | 73 + .../version_log/migrations/0001_initial.py | 37 + .../version_log/migrations/__init__.py | 12 + dbm-ui/backend/version_log/models.py | 48 + dbm-ui/backend/version_log/urls.py | 27 + dbm-ui/backend/version_log/utils.py | 123 + dbm-ui/backend/version_log/views.py | 87 + dbm-ui/bin/build_frontend.sh | 10 + dbm-ui/bin/celery.sh | 7 + dbm-ui/bin/environ.sh | 13 + dbm-ui/bin/install_precommit.sh | 12 + dbm-ui/bin/makemigrations.sh | 4 + dbm-ui/bin/manage.sh | 7 + dbm-ui/bin/migrate.sh | 4 + dbm-ui/bin/pytest.sh | 7 + dbm-ui/blueking/component/__init__.py | 12 + dbm-ui/blueking/component/apis/__init__.py | 10 + dbm-ui/blueking/component/apis/bk_login.py | 44 + dbm-ui/blueking/component/apis/esb.py | 29 + dbm-ui/blueking/component/base.py | 111 + dbm-ui/blueking/component/client.py | 158 + dbm-ui/blueking/component/collections.py | 19 + dbm-ui/blueking/component/conf.py | 37 + dbm-ui/blueking/component/constants.py | 30 + dbm-ui/blueking/component/exceptions.py | 27 + dbm-ui/blueking/component/shortcuts.py | 66 + dbm-ui/blueking/component/utils.py | 58 + dbm-ui/config/__init__.py | 45 + dbm-ui/config/default.py | 437 + dbm-ui/config/dev.py | 76 + dbm-ui/config/prod.py | 15 + dbm-ui/config/stag.py | 39 + dbm-ui/etc/gunicorn.py | 42 + dbm-ui/frontend/.browserslistrc | 7 + dbm-ui/frontend/.editorconfig | 15 + dbm-ui/frontend/.env.production | 4 + dbm-ui/frontend/.eslintignore | 4 + dbm-ui/frontend/.eslintrc-auto-import.json | 53 + dbm-ui/frontend/.eslintrc.js | 127 + dbm-ui/frontend/.gitignore | 30 + dbm-ui/frontend/.husky/commit-msg | 5 + dbm-ui/frontend/.husky/pre-commit | 5 + dbm-ui/frontend/.npmrc | 2 + dbm-ui/frontend/.stylelintignore | 3 + dbm-ui/frontend/.stylelintrc.js | 202 + dbm-ui/frontend/README.md | 21 + dbm-ui/frontend/auto-copyright.js | 117 + dbm-ui/frontend/babel.config.js | 27 + dbm-ui/frontend/bkuiVueResolver.ts | 72 + dbm-ui/frontend/commitlint.config.js | 39 + dbm-ui/frontend/components.d.ts | 29 + dbm-ui/frontend/env.d.ts | 58 + dbm-ui/frontend/index.html | 33 + dbm-ui/frontend/package.json | 87 + dbm-ui/frontend/src/App.vue | 217 + dbm-ui/frontend/src/common/cache.ts | 53 + dbm-ui/frontend/src/common/const.ts | 312 + dbm-ui/frontend/src/common/importComps.ts | 49 + dbm-ui/frontend/src/common/regex.ts | 30 + dbm-ui/frontend/src/common/tippy.ts | 34 + .../src/components/app-selector/index.vue | 594 + .../src/components/app-selector/utils.ts | 25 + .../components/apply-items/BusinessItems.vue | 153 + .../src/components/apply-items/CloudItem.vue | 91 + .../components/apply-items/ClusterAlias.vue | 55 + .../components/apply-items/ClusterName.vue | 58 + .../src/components/apply-items/RegionItem.vue | 176 + .../src/components/auth/AuthComponent.tsx | 113 + .../frontend/src/components/auth/style.less | 23 + .../business-selector/BusinessSelector.vue | 154 + .../cluster-authorize/ClusterAuthorize.vue | 573 + .../cluster-common/OperationStatusTips.vue | 133 + .../cluster-common/RenderBaseInfo.vue | 99 + .../cluster-common/RenderInstanceStatus.vue | 50 + .../cluster-common/RenderNodeInstance.vue | 253 + .../cluster-common/RenderOperationTag.vue | 138 + .../cluster-common/RenderPassword.vue | 134 + .../components/cluster-common/RenderRole.vue | 56 + .../RenderSimpleClusterList.vue | 194 + .../cluster-common/RenderStatus.vue | 55 + .../big-data-host-table/HdfsHostTable.vue | 423 + .../big-data-host-table/RenderHostTable.vue | 329 + .../common/tableSetting.ts | 65 + .../es-host-table/components/EditHostNode.vue | 70 + .../es-host-table/index.vue | 444 + .../hook/useLocalPagination.ts | 83 + .../components/cluster-details/AsideList.vue | 297 + .../cluster-details/ClusterTopo.vue | 332 + .../cluster-details/common/graphData.ts | 478 + .../cluster-details/common/useRenderGraph.tsx | 349 + .../cluster-event-change/EventChange.vue | 158 + .../cluster-monitor/MonitorDashboard.vue | 131 + .../cluster-selector/ClusterSelector.vue | 612 + .../cluster-selector/CollapseMini.vue | 108 + .../src/components/cluster-selector/types.ts | 34 + .../cluster-selector/useClusterData.ts | 103 + .../src/components/cost-timer/CostTimer.vue | 45 + .../db-card-checkbox/CardCheckbox.vue | 168 + .../frontend/src/components/db-card/index.vue | 143 + .../db-collapse-table/DBCollapseTable.vue | 191 + .../frontend/src/components/db-diff/index.vue | 271 + .../src/components/db-empty/index.vue | 60 + .../frontend/src/components/db-form/index.vue | 98 + .../frontend/src/components/db-form/item.vue | 52 + .../frontend/src/components/db-icon/index.ts | 54 + .../frontend/src/components/db-icon/style.css | 6 + .../components/db-member-selector/index.vue | 135 + .../src/components/db-popconfirm/index.vue | 156 + .../src/components/db-search-select/index.vue | 146 + .../src/components/db-sideslider/index.vue | 146 + .../src/components/db-status/index.vue | 128 + .../src/components/db-table/OriginalTable.vue | 52 + .../src/components/db-table/index.vue | 314 + .../src/components/db-textarea/DbTextarea.vue | 265 + .../src/components/editable-info/index.vue | 282 + .../components/empty-status/EmptyStatus.vue | 74 + .../components/host-preview/HostPreview.vue | 249 + .../components/instance-selector/Index.vue | 169 + .../instance-selector/common/tableSettings.ts | 67 + .../instance-selector/common/types.ts | 26 + .../instance-selector/common/utils.ts | 20 + .../components/CollapseMini.vue | 103 + .../instance-selector/components/PanelTab.vue | 81 + .../components/PreviewResult.vue | 200 + .../components/RenderManualHost.vue | 256 + .../components/RenderManualInput.vue | 305 + .../components/RenderTopo.vue | 279 + .../components/RenderTopoHost.vue | 337 + .../src/components/ip-selector/IpSelector.vue | 650 + .../components/PreviewWhitelist.vue | 123 + .../src/components/layouts/Copyright.vue | 60 + .../src/components/layouts/LocaleSwitch.vue | 95 + .../frontend/src/components/layouts/Login.vue | 102 + .../components/layouts/MainBreadcrumbs.vue | 115 + .../src/components/layouts/MainView.vue | 165 + .../components/layouts/ResourceDetection.vue | 110 + .../frontend/src/components/layouts/common.ts | 80 + .../src/components/minimap/Minimap.vue | 164 + .../components/mysql-toolbox/BatchEdit.vue | 184 + .../src/components/mysql-toolbox/Success.vue | 211 + .../components/mysql-toolbox/ToolboxTable.vue | 232 + .../components/mysql-toolbox/common/const.ts | 24 + .../src/components/permission/Dialog.vue | 106 + .../src/components/permission/Main.vue | 97 + .../components/render-host-status/Index.vue | 49 + .../render-instances/RenderInstances.vue | 289 + .../src/components/render-row/index.vue | 95 + .../src/components/smart-action/index.vue | 146 + .../src/components/vue2/bk-log/index.less | 269 + .../src/components/vue2/bk-log/index.vue | 122 + .../src/components/vue2/ip-selector/index.js | 101 + .../src/components/vue2/ip-selector/index.ts | 80 + .../components/vue2/search-select/index.vue | 221 + .../frontend/src/directives/cursor/index.less | 16 + .../frontend/src/directives/cursor/index.ts | 102 + .../frontend/src/directives/cursor/lock.svg | 43 + dbm-ui/frontend/src/directives/index.ts | 35 + .../frontend/src/directives/overflowTips.ts | 167 + dbm-ui/frontend/src/env.d.ts | 26 + .../frontend/src/helper/local-cache/index.ts | 14 + .../helper/local-cache/listColumnsCache.ts | 48 + dbm-ui/frontend/src/helper/validator/index.ts | 14 + dbm-ui/frontend/src/helper/validator/is-ip.ts | 17 + dbm-ui/frontend/src/hooks/index.ts | 31 + dbm-ui/frontend/src/hooks/useApplyBase.ts | 115 + dbm-ui/frontend/src/hooks/useBeforeClose.ts | 41 + dbm-ui/frontend/src/hooks/useCopy.ts | 53 + dbm-ui/frontend/src/hooks/useDebouncedRef.ts | 32 + .../src/hooks/useDefaultPagination.ts | 37 + dbm-ui/frontend/src/hooks/useFormItem.ts | 21 + dbm-ui/frontend/src/hooks/useInfo.tsx | 150 + dbm-ui/frontend/src/hooks/useInfoWithIcon.tsx | 56 + dbm-ui/frontend/src/hooks/useListeners.ts | 31 + .../frontend/src/hooks/useLocalPagination.ts | 82 + dbm-ui/frontend/src/hooks/useModelProvider.ts | 67 + .../frontend/src/hooks/useSQLTaskNotify.tsx | 73 + dbm-ui/frontend/src/hooks/useStickyFooter.ts | 68 + .../frontend/src/hooks/useTableMaxHeight.ts | 46 + dbm-ui/frontend/src/hooks/useTableSettings.ts | 106 + dbm-ui/frontend/src/hooks/useTicketMessage.ts | 42 + dbm-ui/frontend/src/hooks/useUrlSearach.ts | 59 + dbm-ui/frontend/src/images/403.png | Bin 0 -> 9728 bytes dbm-ui/frontend/src/images/404.png | Bin 0 -> 9961 bytes dbm-ui/frontend/src/images/500.png | Bin 0 -> 11143 bytes .../frontend/src/images/architecture-01.png | Bin 0 -> 43855 bytes .../frontend/src/images/architecture-02.png | Bin 0 -> 10781 bytes dbm-ui/frontend/src/images/building.png | Bin 0 -> 8029 bytes dbm-ui/frontend/src/images/empty.png | Bin 0 -> 17149 bytes dbm-ui/frontend/src/images/es.png | Bin 0 -> 2429 bytes dbm-ui/frontend/src/images/flow-loading.png | Bin 0 -> 5179 bytes dbm-ui/frontend/src/images/kafka.png | Bin 0 -> 2393 bytes dbm-ui/frontend/src/images/loading.svg | 1 + dbm-ui/frontend/src/images/logo.png | Bin 0 -> 6849 bytes dbm-ui/frontend/src/images/mongo-db.png | Bin 0 -> 2592 bytes dbm-ui/frontend/src/images/monitoring.png | Bin 0 -> 95 bytes dbm-ui/frontend/src/images/mysql.png | Bin 0 -> 2443 bytes dbm-ui/frontend/src/images/nav-log.svg | 17 + dbm-ui/frontend/src/images/redis.png | Bin 0 -> 3439 bytes dbm-ui/frontend/src/images/tendis-cache.png | Bin 0 -> 44218 bytes dbm-ui/frontend/src/images/tendis-ssd.png | Bin 0 -> 44264 bytes dbm-ui/frontend/src/images/tendisplus.png | Bin 0 -> 41738 bytes dbm-ui/frontend/src/locales/en.json | 1342 ++ dbm-ui/frontend/src/locales/index.ts | 47 + dbm-ui/frontend/src/locales/zh-cn.json | 1380 ++ dbm-ui/frontend/src/main.ts | 80 + dbm-ui/frontend/src/router/index.ts | 94 + .../frontend/src/router/routerInterceptor.ts | 79 + dbm-ui/frontend/src/services/clusters.ts | 122 + dbm-ui/frontend/src/services/common.ts | 106 + dbm-ui/frontend/src/services/configs.ts | 80 + dbm-ui/frontend/src/services/dbResource.ts | 37 + dbm-ui/frontend/src/services/es.ts | 81 + dbm-ui/frontend/src/services/eventSwitch.ts | 27 + .../frontend/src/services/fixpointRollback.ts | 39 + dbm-ui/frontend/src/services/hdfs.ts | 84 + dbm-ui/frontend/src/services/http/index.ts | 168 + dbm-ui/frontend/src/services/influxdb.ts | 32 + dbm-ui/frontend/src/services/influxdbGroup.ts | 49 + dbm-ui/frontend/src/services/ip.ts | 80 + dbm-ui/frontend/src/services/kafka.ts | 82 + .../src/services/model/es/es-instance.ts | 109 + .../frontend/src/services/model/es/es-node.ts | 61 + .../src/services/model/es/es-password.ts | 26 + dbm-ui/frontend/src/services/model/es/es.ts | 171 + .../model/fixpoint-rollback/backup-log.ts | 36 + .../src/services/model/hdfs/hdfs-instance.ts | 109 + .../src/services/model/hdfs/hdfs-node.ts | 59 + .../src/services/model/hdfs/hdfs-password.ts | 26 + .../frontend/src/services/model/hdfs/hdfs.ts | 169 + .../model/influxdb/influxdbInstance.ts | 144 + .../services/model/kafka/kafka-instance.ts | 109 + .../src/services/model/kafka/kafka-node.ts | 51 + .../services/model/kafka/kafka-password.ts | 26 + .../src/services/model/kafka/kafka.ts | 167 + .../services/model/pulsar/pulsar-instance.ts | 109 + .../src/services/model/pulsar/pulsar-node.ts | 66 + .../services/model/pulsar/pulsar-password.ts | 28 + .../src/services/model/pulsar/pulsar.ts | 180 + .../model/resource-spec/resourceSpec.ts | 57 + .../model/sql-import/grammar-check.ts | 78 + .../model/sql-import/semantic-data.ts | 56 + .../model/sql-import/user-semantic-task.ts | 45 + dbm-ui/frontend/src/services/mysqlCluster.ts | 64 + dbm-ui/frontend/src/services/permission.ts | 87 + dbm-ui/frontend/src/services/pulsar.ts | 82 + dbm-ui/frontend/src/services/resourceSpec.ts | 48 + dbm-ui/frontend/src/services/sqlImport.ts | 79 + dbm-ui/frontend/src/services/staffSetting.ts | 29 + dbm-ui/frontend/src/services/storage.ts | 20 + dbm-ui/frontend/src/services/taskflow.ts | 68 + dbm-ui/frontend/src/services/ticket.tsx | 200 + .../frontend/src/services/types/clusters.ts | 387 + dbm-ui/frontend/src/services/types/common.ts | 170 + dbm-ui/frontend/src/services/types/configs.ts | 244 + dbm-ui/frontend/src/services/types/es.ts | 61 + .../src/services/types/eventSwitch.ts | 46 + dbm-ui/frontend/src/services/types/hdfs.ts | 55 + .../src/services/types/influxdbGroup.ts | 23 + dbm-ui/frontend/src/services/types/ip.ts | 132 + dbm-ui/frontend/src/services/types/kafka.ts | 56 + .../frontend/src/services/types/permission.ts | 204 + .../src/services/types/staffSetting.ts | 23 + .../frontend/src/services/types/taskflow.ts | 177 + dbm-ui/frontend/src/services/types/ticket.ts | 758 + .../src/services/types/versionFiles.ts | 95 + .../frontend/src/services/types/whitelist.ts | 31 + dbm-ui/frontend/src/services/versionFiles.ts | 41 + dbm-ui/frontend/src/services/whitelist.ts | 37 + dbm-ui/frontend/src/stores/globalBizs.ts | 104 + dbm-ui/frontend/src/stores/index.ts | 19 + dbm-ui/frontend/src/stores/mainView.ts | 29 + dbm-ui/frontend/src/stores/useMenu.ts | 65 + .../frontend/src/stores/useRelatedSystem.ts | 42 + dbm-ui/frontend/src/stores/useSQLTaskCount.ts | 33 + dbm-ui/frontend/src/stores/useUserProfile.ts | 67 + .../src/stores/useUserSemanticTasks.ts | 62 + dbm-ui/frontend/src/styles/applyInstance.less | 67 + dbm-ui/frontend/src/styles/base.less | 119 + dbm-ui/frontend/src/styles/common.less | 627 + dbm-ui/frontend/src/styles/iconCool.less | 33 + dbm-ui/frontend/src/styles/mixins.less | 27 + dbm-ui/frontend/src/styles/reset.less | 328 + dbm-ui/frontend/src/styles/tippy.less | 161 + dbm-ui/frontend/src/styles/variables.less | 51 + dbm-ui/frontend/src/types/auto-imports.d.ts | 69 + dbm-ui/frontend/src/types/bkui-vue.ts | 42 + dbm-ui/frontend/src/types/index.d.ts | 40 + dbm-ui/frontend/src/types/router.d.ts | 35 + dbm-ui/frontend/src/types/vite-env.d.ts | 17 + dbm-ui/frontend/src/utils/bytePretty.ts | 28 + dbm-ui/frontend/src/utils/classes.ts | 22 + dbm-ui/frontend/src/utils/deepMerge.ts | 35 + dbm-ui/frontend/src/utils/dom.ts | 88 + dbm-ui/frontend/src/utils/encode.ts | 48 + dbm-ui/frontend/src/utils/execCopy.ts | 28 + dbm-ui/frontend/src/utils/generateId.ts | 29 + .../frontend/src/utils/getCostTimeDisplay.ts | 55 + .../frontend/src/utils/getMenuListSearch.ts | 91 + .../src/utils/getSearchSelectorParams.ts | 29 + dbm-ui/frontend/src/utils/index.ts | 31 + dbm-ui/frontend/src/utils/isObject.ts | 21 + dbm-ui/frontend/src/utils/leaveConfirm.ts | 38 + dbm-ui/frontend/src/utils/makeMap.ts | 20 + dbm-ui/frontend/src/utils/message.ts | 38 + dbm-ui/frontend/src/utils/random.ts | 16 + dbm-ui/frontend/src/utils/recentDays.ts | 29 + dbm-ui/frontend/src/utils/url.ts | 98 + dbm-ui/frontend/src/utils/vNodeToHtml.ts | 54 + .../db-configure/business/ConfigBind.vue | 201 + .../db-configure/business/ConfigEdit.vue | 389 + .../views/db-configure/business/Content.vue | 285 + .../business/biz/ConfigDatabase.vue | 228 + .../business/biz/ConfigDetails.vue | 193 + .../db-configure/business/biz/ConfigInfo.vue | 61 + .../business/cluster/ConfigDetails.vue | 156 + .../business/cluster/ConfigInfo.vue | 86 + .../db-configure/business/common/types.ts | 41 + .../business/components/ConfigEmpty.vue | 84 + .../business/hooks/useBaseDetails.ts | 138 + .../business/hooks/useTreeData.ts | 214 + .../src/views/db-configure/business/index.vue | 68 + .../business/module/ConfigDetails.vue | 110 + .../business/module/ConfigInfo.vue | 92 + .../src/views/db-configure/common/const.ts | 89 + .../src/views/db-configure/common/types.ts | 45 + .../db-configure/components/DetailsBase.vue | 209 + .../db-configure/components/DiffCompare.vue | 129 + .../db-configure/components/EditBase.vue | 249 + .../components/ParameterTable.vue | 718 + .../db-configure/components/PublishRecord.vue | 281 + .../db-configure/components/RangeInput.vue | 198 + .../db-configure/components/ReadonlyTable.vue | 179 + .../views/db-configure/components/TopTab.vue | 79 + .../src/views/db-configure/hooks/useDiff.ts | 102 + .../db-configure/hooks/useLevelParams.ts | 72 + .../platform/ConfigureDetails.vue | 213 + .../db-configure/platform/ConfigureEdit.vue | 292 + .../src/views/db-configure/platform/index.vue | 220 + .../frontend/src/views/db-configure/routes.ts | 91 + .../src/views/deployment-plan/Index.vue | 16 + .../src/views/deployment-plan/list/Index.vue | 146 + .../list/components/Operation.vue | 114 + .../src/views/deployment-plan/routes.ts | 47 + dbm-ui/frontend/src/views/es-manage/Index.vue | 16 + .../src/views/es-manage/apply/Index.vue | 528 + .../src/views/es-manage/common/Expansion.vue | 222 + .../es-manage/common/common/ListNode.vue | 327 + .../views/es-manage/common/replace/Index.vue | 278 + .../replace/components/RenderNodeHostList.vue | 249 + .../views/es-manage/common/shrink/Index.vue | 242 + .../src/views/es-manage/detail/Index.vue | 116 + .../es-manage/detail/components/BaseInfo.vue | 68 + .../detail/components/node-list/Index.vue | 605 + .../node-list/components/InstanceDetail.vue | 292 + .../es-manage/list/hooks/useTableSetting.ts | 100 + .../src/views/es-manage/list/index.vue | 557 + dbm-ui/frontend/src/views/es-manage/routes.ts | 57 + .../components/SwtichEventDetatils.vue | 88 + .../event-center/pages/DBHASwitchEvents.vue | 368 + .../frontend/src/views/event-center/routes.ts | 33 + dbm-ui/frontend/src/views/exception/404.vue | 32 + .../src/views/exception/BizPermission.vue | 69 + dbm-ui/frontend/src/views/exception/Error.vue | 23 + .../frontend/src/views/hdfs-manage/Index.vue | 16 + .../src/views/hdfs-manage/apply/Index.vue | 499 + .../views/hdfs-manage/common/Expansion.vue | 161 + .../hdfs-manage/common/common/ListNode.vue | 264 + .../hdfs-manage/common/replace/Index.vue | 180 + .../replace/components/RenderNodeHostList.vue | 214 + .../views/hdfs-manage/common/shrink/Index.vue | 214 + .../src/views/hdfs-manage/detail/Index.vue | 116 + .../detail/components/BaseInfo.vue | 68 + .../detail/components/node-list/Index.vue | 573 + .../node-list/components/InstanceDetail.vue | 291 + .../list/components/ClusterSettings.vue | 152 + .../list/components/SettingsMonacoEditor.vue | 60 + .../hdfs-manage/list/hooks/useTableSetting.ts | 101 + .../src/views/hdfs-manage/list/index.vue | 606 + .../frontend/src/views/hdfs-manage/routes.ts | 56 + .../src/views/influxdb-manage/Index.vue | 59 + .../influxdb-manage/apply/common/base.ts | 36 + .../apply/components/GroupItem.vue | 255 + .../src/views/influxdb-manage/apply/index.vue | 310 + .../influxdb-manage/components/GroupInput.vue | 99 + .../influxdb-manage/components/GroupList.vue | 319 + .../components/InstanceList.vue | 846 + .../components/replace/Index.vue | 188 + .../replace/components/RenderNodeHostList.vue | 333 + .../influxdb-manage/details/AsideList.vue | 225 + .../views/influxdb-manage/details/Details.vue | 225 + .../src/views/influxdb-manage/routes.ts | 54 + .../frontend/src/views/kafka-manage/Index.vue | 16 + .../src/views/kafka-manage/apply/Index.vue | 446 + .../views/kafka-manage/common/Expansion.vue | 161 + .../kafka-manage/common/common/ListNode.vue | 264 + .../kafka-manage/common/replace/Index.vue | 211 + .../replace/components/RenderNodeHostList.vue | 212 + .../kafka-manage/common/shrink/Index.vue | 222 + .../src/views/kafka-manage/detail/Index.vue | 114 + .../detail/components/BaseInfo.vue | 68 + .../detail/components/node-list/Index.vue | 577 + .../node-list/components/InstanceDetail.vue | 291 + .../src/views/kafka-manage/list/Index.vue | 546 + .../list/hooks/useTableSetting.ts | 91 + .../frontend/src/views/kafka-manage/routes.ts | 56 + .../src/views/main-views/common/const.ts | 22 + .../main-views/common/getRouteChildren.ts | 79 + .../main-views/components/MenuToggleIcon.vue | 30 + .../src/views/main-views/hooks/useMenuInfo.ts | 49 + .../src/views/main-views/pages/Database.vue | 293 + .../src/views/main-views/pages/Platform.vue | 148 + .../src/views/main-views/pages/Services.vue | 81 + .../frontend/src/views/main-views/routes.ts | 108 + .../src/views/mission/common/const.ts | 21 + .../src/views/mission/common/graphCanvas.ts | 158 + .../src/views/mission/common/graphRender.tsx | 187 + .../src/views/mission/common/types.ts | 34 + .../src/views/mission/common/utils.ts | 487 + .../src/views/mission/components/NodeLog.vue | 410 + .../mission/components/RedisResultFiles.vue | 381 + .../mission/components/RetrySelector.vue | 252 + .../src/views/mission/hooks/useFetchData.ts | 101 + .../src/views/mission/pages/Details.vue | 1126 ++ .../src/views/mission/pages/index.vue | 318 + dbm-ui/frontend/src/views/mission/routes.ts | 42 + .../src/views/mysql/apply/ApplyMySQL.vue | 774 + .../src/views/mysql/apply/CreateModule.vue | 464 + .../mysql/apply/components/BatchEdit.vue | 256 + .../apply/components/MySQLDomainTable.vue | 223 + .../mysql/apply/components/PreviewTable.vue | 279 + .../views/mysql/apply/hooks/useMysqlData.ts | 210 + .../src/views/mysql/checksum/Index.vue | 1053 ++ .../src/views/mysql/checksum/common/types.ts | 25 + .../mysql/checksum/components/BatchInput.vue | 318 + .../edit-field/ClusterWithRelateCluster.vue | 452 + .../views/mysql/common/edit-field/DbName.vue | 160 + .../mysql/common/edit-field/TableName.vue | 154 + .../src/views/mysql/common/edit/DateTime.vue | 162 + .../src/views/mysql/common/edit/Input.vue | 340 + .../src/views/mysql/common/edit/Select.vue | 411 + .../src/views/mysql/common/edit/Tag.vue | 145 + .../mysql/common/edit/hooks/useValidtor.ts | 90 + .../views/mysql/common/hooks/useTaskCount.ts | 138 + .../mysql/common/render-table/HeadColumn.vue | 78 + .../views/mysql/common/render-table/Index.vue | 127 + .../render-table/hooks/useColumnResize.ts | 150 + .../mysql/common/ticket-success/Index.vue | 157 + .../src/views/mysql/db-backup/index.vue | 48 + .../mysql/db-backup/pages/page1/Index.vue | 160 + .../pages/page1/components/TargetCluster.vue | 194 + .../mysql/db-backup/pages/page2/Index.vue | 91 + .../src/views/mysql/db-clear/common/const.ts | 32 + .../src/views/mysql/db-clear/common/types.ts | 23 + .../mysql/db-clear/components/BatchInput.vue | 268 + .../src/views/mysql/db-clear/index.vue | 825 + .../mysql/db-rename/components/BatchInput.vue | 283 + .../src/views/mysql/db-rename/index.vue | 564 + .../src/views/mysql/db-table-backup/index.vue | 48 + .../db-table-backup/pages/page1/Index.vue | 210 + .../pages/page1/components/BatchEntry.vue | 385 + .../page1/components/RenderData/Index.vue | 77 + .../RenderData/RenderBackupSource.vue | 86 + .../components/RenderData/RenderCluster.vue | 176 + .../components/RenderData/RenderDbName.vue | 146 + .../components/RenderData/RenderHost.vue | 344 + .../components/RenderData/RenderTableName.vue | 140 + .../pages/page1/components/RenderData/Row.vue | 225 + .../db-table-backup/pages/page2/Index.vue | 92 + .../views/mysql/details/HaInstanceDetails.vue | 414 + .../src/views/mysql/details/MySQLDetails.vue | 305 + .../mysql/details/hooks/useInstancesData.ts | 90 + .../views/mysql/details/hooks/useListData.ts | 175 + .../src/views/mysql/flashback/Index.vue | 48 + .../mysql/flashback/pages/page1/Index.vue | 206 + .../pages/page1/components/BatchEntry.vue | 404 + .../page1/components/RenderData/Index.vue | 77 + .../components/RenderData/RenderCluster.vue | 175 + .../components/RenderData/RenderDbName.vue | 139 + .../components/RenderData/RenderStartTime.vue | 88 + .../components/RenderData/RenderTableName.vue | 134 + .../pages/page1/components/RenderData/Row.vue | 224 + .../mysql/flashback/pages/page2/Index.vue | 91 + .../src/views/mysql/list/HaInstanceList.vue | 335 + .../frontend/src/views/mysql/list/HaList.vue | 720 + .../src/views/mysql/list/SingleList.vue | 683 + .../list/components/MySQLExcelAuthorize.vue | 286 + .../list/components/OperationStatusTips.vue | 138 + .../list/components/RenderOperationTag.vue | 158 + .../src/views/mysql/master-failover/index.vue | 48 + .../master-failover/pages/page1/Index.vue | 241 + .../pages/page1/components/BatchEntry.vue | 341 + .../page1/components/RenderData/Index.vue | 67 + .../components/RenderData/RenderCluster.vue | 103 + .../components/RenderData/RenderMaster.vue | 152 + .../components/RenderData/RenderSlave.vue | 131 + .../pages/page1/components/RenderData/Row.vue | 183 + .../master-failover/pages/page2/Index.vue | 92 + .../views/mysql/master-slave-clone/index.vue | 48 + .../master-slave-clone/pages/page1/Index.vue | 209 + .../pages/page1/components/BatchEntry.vue | 350 + .../page1/components/RenderData/Index.vue | 63 + .../RenderData/RenderMasterSlaveHost.vue | 266 + .../pages/page1/components/RenderData/Row.vue | 188 + .../master-slave-clone/pages/page2/Index.vue | 92 + .../views/mysql/master-slave-swap/index.vue | 48 + .../master-slave-swap/pages/page1/Index.vue | 239 + .../pages/page1/components/BatchEntry.vue | 341 + .../page1/components/RenderData/Index.vue | 68 + .../components/RenderData/RenderCluster.vue | 104 + .../components/RenderData/RenderMaster.vue | 149 + .../components/RenderData/RenderSlave.vue | 127 + .../pages/page1/components/RenderData/Row.vue | 182 + .../master-slave-swap/pages/page2/Index.vue | 92 + .../views/mysql/permission/common/const.ts | 36 + .../views/mysql/permission/common/types.ts | 33 + .../permission/components/AccountDialog.vue | 412 + .../permission/components/CreateRule.vue | 404 + .../permission/hooks/usePermissionRules.ts | 52 + .../src/views/mysql/permission/index.vue | 458 + .../components/BatchInput.vue | 284 + .../mysql/privilege-clone-client/index.vue | 514 + .../components/BatchInput.vue | 283 + .../mysql/privilege-clone-inst/index.vue | 463 + .../src/views/mysql/proxy-add/index.vue | 48 + .../mysql/proxy-add/pages/page1/Index.vue | 205 + .../pages/page1/components/BatchEntry.vue | 353 + .../page1/components/RenderData/Index.vue | 64 + .../components/RenderData/RenderProxy.vue | 123 + .../pages/page1/components/RenderData/Row.vue | 183 + .../mysql/proxy-add/pages/page2/Index.vue | 92 + .../src/views/mysql/proxy-replace/index.vue | 48 + .../mysql/proxy-replace/pages/page1/Index.vue | 219 + .../pages/page1/components/BatchEntry.vue | 346 + .../page1/components/RenderData/Index.vue | 63 + .../RenderData/RenderOriginalProxy.vue | 430 + .../RenderData/RenderTargetProxyIp.vue | 147 + .../pages/page1/components/RenderData/Row.vue | 173 + .../mysql/proxy-replace/pages/page2/Index.vue | 92 + .../src/views/mysql/rollback/Index.vue | 48 + .../mysql/rollback/pages/page1/Index.vue | 213 + .../pages/page1/components/BatchEntry.vue | 460 + .../page1/components/RenderData/Index.vue | 88 + .../components/RenderData/RenderBackup.vue | 85 + .../components/RenderData/RenderCluster.vue | 244 + .../components/RenderData/RenderDbName.vue | 139 + .../components/RenderData/RenderHost.vue | 140 + .../components/RenderData/RenderMode.vue | 255 + .../components/RenderData/RenderTableName.vue | 134 + .../pages/page1/components/RenderData/Row.vue | 269 + .../mysql/rollback/pages/page2/Index.vue | 92 + dbm-ui/frontend/src/views/mysql/routes.ts | 381 + .../src/views/mysql/slave-add/common/types.ts | 29 + .../mysql/slave-add/components/BatchInput.vue | 287 + .../components/ClusterRelatedInput.vue | 220 + .../src/views/mysql/slave-add/index.vue | 557 + .../slave-rebuild/components/BatchInput.vue | 280 + .../src/views/mysql/slave-rebuild/index.vue | 502 + .../src/views/mysql/sql-execute/index.vue | 51 + .../mysql/sql-execute/steps/step1/Index.vue | 243 + .../steps/step1/components/ExecuteMode.vue | 177 + .../steps/step1/components/TargetCluster.vue | 203 + .../steps/step1/components/TaskTips.vue | 152 + .../steps/step1/components/backup/Index.vue | 113 + .../components/backup/RenderData/Index.vue | 38 + .../backup/RenderData/RenderBackupSource.vue | 87 + .../components/backup/RenderData/Row.vue | 182 + .../steps/step1/components/sql-file/Index.vue | 107 + .../components/sql-file/editor/Index.vue | 233 + .../sql-file/editor/MessageList.vue | 184 + .../components/sql-file/local-file/Index.vue | 389 + .../sql-file/local-file/SqlFileList.vue | 158 + .../local-file/components/CheckError.vue | 45 + .../local-file/components/CheckSuccess.vue | 38 + .../local-file/components/FileList.vue | 280 + .../sql-file/manual-input/Index.vue | 224 + .../components/SyntaxChecking.vue | 36 + .../manual-input/components/SyntaxError.vue | 34 + .../manual-input/components/SyntaxSuccess.vue | 34 + .../steps/step1/components/sql-file/utils.ts | 15 + .../step1/components/target-db/Index.vue | 95 + .../components/target-db/RenderData/Index.vue | 39 + .../target-db/RenderData/RenderDbName.vue | 141 + .../components/target-db/RenderData/Row.vue | 164 + .../mysql/sql-execute/steps/step2/Index.vue | 375 + .../steps/step2/components/FailedTips.vue | 59 + .../steps/step2/components/PendingTips.vue | 55 + .../steps/step2/components/RenderLog.vue | 140 + .../steps/step2/components/SuccessTips.vue | 80 + .../components/render-file-list/FileItem.vue | 115 + .../components/render-file-list/Index.vue | 100 + .../step2/components/render-status/Failed.vue | 57 + .../components/render-status/Pending.vue | 54 + .../components/render-status/Success.vue | 78 + .../steps/step2/hooks/useFlowStatus.ts | 64 + .../sql-execute/steps/step2/hooks/useLog.ts | 129 + .../mysql/sql-execute/steps/step3/Index.vue | 107 + .../src/views/mysql/toolbox/common/menus.ts | 149 + .../mysql/toolbox/components/TaskCount.vue | 141 + .../toolbox/components/ToolboxContent.vue | 53 + .../mysql/toolbox/components/ToolboxSide.vue | 448 + .../src/views/mysql/toolbox/index.vue | 69 + .../views/password-policy/PasswordPolicy.vue | 226 + .../src/views/password-policy/routes.ts | 37 + .../src/views/pulsar-manage/Index.vue | 16 + .../views/pulsar-manage/apply/common/base.ts | 45 + .../src/views/pulsar-manage/apply/index.vue | 474 + .../pulsar-manage/common/expansion/Index.vue | 312 + .../common/expansion/components/NodeList.vue | 134 + .../expansion/components/RenderNode.vue | 319 + .../pulsar-manage/common/replace/Index.vue | 303 + .../replace/components/RenderNodeHostList.vue | 331 + .../pulsar-manage/common/shrink/Index.vue | 350 + .../common/shrink/components/NodeList.vue | 126 + .../common/shrink/components/RenderNode.vue | 324 + .../components/RenderOriginalHostList.vue | 210 + .../src/views/pulsar-manage/detail/Index.vue | 117 + .../detail/components/BaseInfo.vue | 63 + .../detail/components/node-list/Index.vue | 595 + .../node-list/components/InstanceDetail.vue | 282 + .../src/views/pulsar-manage/list/Index.vue | 522 + .../list/components/ManagerPassword.vue | 183 + .../list/hooks/useTableSetting.ts | 82 + .../src/views/pulsar-manage/routes.ts | 56 + .../src/views/redis/apply/ApplyRedis.vue | 647 + .../src/views/redis/apply/common/const.ts | 68 + .../frontend/src/views/redis/common/types.ts | 31 + .../src/views/redis/details/Details.vue | 317 + .../src/views/redis/hooks/useRedisData.ts | 93 + dbm-ui/frontend/src/views/redis/list/List.vue | 900 + .../views/redis/list/components/Backup.vue | 364 + .../views/redis/list/components/BatchEdit.vue | 175 + .../redis/list/components/BatchEditKeys.vue | 177 + .../redis/list/components/ClusterPassword.vue | 179 + .../redis/list/components/DeleteKeys.vue | 500 + .../redis/list/components/ExtractKeys.vue | 429 + .../list/components/OperationStatusTips.vue | 134 + .../src/views/redis/list/components/Purge.vue | 290 + .../list/components/RenderOperationTag.vue | 152 + dbm-ui/frontend/src/views/redis/routes.ts | 52 + .../src/views/resource-pool/Index.vue | 16 + .../src/views/resource-pool/list/Index.vue | 176 + .../list/components/export-host/Index.vue | 37 + .../list/components/search-box/Index.vue | 87 + .../components/com-factory/AgentStatus.vue | 39 + .../components/com-factory/City.vue | 52 + .../search-box/components/com-factory/Cpu.vue | 33 + .../components/com-factory/DeviceClass.vue | 28 + .../components/com-factory/Disk.vue | 33 + .../components/com-factory/DiskType.vue | 36 + .../components/com-factory/ForBizs.vue | 26 + .../components/com-factory/Hosts.vue | 24 + .../components/com-factory/Index.vue | 72 + .../search-box/components/com-factory/Mem.vue | 33 + .../components/com-factory/MountPoint.vue | 36 + .../components/com-factory/ResourceTypes.vue | 24 + .../components/com-factory/Subzones.vue | 54 + .../search-box/components/field-config.ts | 70 + .../components/field-input/Index.vue | 161 + .../field-input/components/SearchItem.vue | 78 + .../search-box/components/field-tag/Index.vue | 72 + .../components/field-tag/ValueTag.vue | 86 + .../src/views/resource-pool/routes.ts | 47 + .../resource-spec/components/SpecCreate.vue | 304 + .../resource-spec/components/SpecList.vue | 346 + .../components/spec-form-item/SpecCPU.vue | 109 + .../components/spec-form-item/SpecDevice.vue | 78 + .../components/spec-form-item/SpecMem.vue | 109 + .../components/spec-form-item/SpecStorage.vue | 221 + .../spec-form-item/specFormItem.less | 53 + .../src/views/resource-spec/pages/Index.vue | 226 + .../src/views/resource-spec/routes.ts | 33 + .../components/ApplyCollapse.vue | 88 + .../src/views/service-apply/index.vue | 298 + .../src/views/service-apply/routes.ts | 33 + .../src/views/staff-setting/index.vue | 178 + .../src/views/staff-setting/routes.ts | 50 + .../src/views/tickets/common/types.ts | 39 + .../src/views/tickets/common/utils.ts | 101 + .../components/DetailsClusterOperation.vue | 90 + .../tickets/components/DetailsTable.less | 35 + .../views/tickets/components/FlowContent.vue | 173 + .../components/FlowContentInnerFlow.vue | 155 + .../tickets/components/FlowContentTodo.vue | 71 + .../src/views/tickets/components/FlowIcon.vue | 120 + .../tickets/components/TicketDetails.vue | 414 + .../bigdata/BigDataExpansionCapacity.vue | 118 + .../components/bigdata/BigDataReboot.vue | 93 + .../components/bigdata/BigDataReplace.vue | 158 + .../tickets/components/bigdata/DetailsES.vue | 176 + .../components/bigdata/DetailsHDFS.vue | 159 + .../components/bigdata/DetailsInfluxDB.vue | 151 + .../components/bigdata/DetailsKafka.vue | 146 + .../components/bigdata/DetailsPulsar.vue | 207 + .../influxdb/InfluxdbOperations.vue | 58 + .../components/influxdb/InfluxdbReplace.vue | 66 + .../tickets/components/mysql/DetailsMySQL.vue | 144 + .../components/mysql/MySQLChecksum.vue | 176 + .../tickets/components/mysql/MySQLClone.vue | 128 + .../mysql/MySQLClusterOperation.vue | 93 + .../components/mysql/MySQLFlashback.vue | 105 + .../components/mysql/MySQLFullBackup.vue | 147 + .../components/mysql/MySQLHATruncate.vue | 150 + .../components/mysql/MySQLImportSQLFile.vue | 420 + .../components/mysql/MySQLMasterFailOver.vue | 103 + .../mysql/MySQLMasterSlaveSwitch.vue | 103 + .../components/mysql/MySQLMigrateCluster.vue | 103 + .../components/mysql/MySQLOperation.vue | 227 + .../components/mysql/MySQLProxyAdd.vue | 97 + .../components/mysql/MySQLProxySwitch.vue | 103 + .../tickets/components/mysql/MySQLRename.vue | 86 + .../components/mysql/MySQLRestoreSlave.vue | 106 + .../components/mysql/MySQLRollbackCluster.vue | 112 + .../tickets/components/mysql/MySQLSlave.vue | 152 + .../components/mysql/MySQLTableBackup.vue | 146 + .../components/mysql/SqlLogDetails.vue | 183 + .../components/mysql/TargetClusterPreview.vue | 166 + .../tickets/components/redis/DetailsRedis.vue | 208 + .../components/redis/RedisOperation.vue | 228 + .../tickets/components/ticketDetails.less | 72 + .../src/views/tickets/hooks/logCounts.ts | 115 + .../views/tickets/hooks/targetClusterData.ts | 120 + .../views/tickets/my-tickets/MyTickets.vue | 60 + .../views/tickets/my-tickets/TicketList.vue | 483 + .../my-tickets/components/ListTabs.vue | 119 + .../my-tickets/components/TicketFlows.vue | 170 + .../my-tickets/components/flows/Common.vue | 56 + .../components/flows/MySqlFlows.vue | 139 + .../components/flows/RedisFlows.vue | 86 + .../src/views/tickets/my-todos/MyTodos.vue | 62 + .../src/views/tickets/my-todos/TicketList.vue | 489 + .../my-todos/components/TicketFlows.vue | 187 + .../my-todos/components/flows/Approve.vue | 207 + .../my-todos/components/flows/MySqlFlows.vue | 305 + .../my-todos/components/flows/RedisFlows.vue | 239 + dbm-ui/frontend/src/views/tickets/routes.ts | 43 + .../src/views/version-files/common/types.ts | 45 + .../version-files/hooks/useVersionFiles.ts | 96 + .../pages/VersionFileContent.vue | 445 + .../version-files/pages/VersionFiles.vue | 162 + .../src/views/version-files/routes.ts | 33 + .../components/WhitelistOperation.vue | 364 + .../src/views/whitelist/pages/Index.vue | 299 + dbm-ui/frontend/src/views/whitelist/routes.ts | 44 + dbm-ui/frontend/tsconfig.config.json | 8 + dbm-ui/frontend/tsconfig.json | 36 + dbm-ui/frontend/vite.config.ts | 107 + dbm-ui/locale/en/LC_MESSAGES/django.mo | Bin 0 -> 174952 bytes dbm-ui/locale/en/LC_MESSAGES/django.po | 12334 +++++++++++++ dbm-ui/locale/zh_hans/LC_MESSAGES/django.mo | Bin 0 -> 177805 bytes dbm-ui/locale/zh_hans/LC_MESSAGES/django.po | 11604 ++++++++++++ dbm-ui/manage.py | 23 + dbm-ui/poetry.lock | 2930 +++ dbm-ui/pyproject.toml | 95 + dbm-ui/pytest.ini | 21 + dbm-ui/release/README.md | 21 + dbm-ui/release/V1.0.0_20230424.md | 13 + dbm-ui/scripts/ci/bk_ci.sh | 27 + dbm-ui/scripts/ci/code_quality.sh | 55 + dbm-ui/scripts/ci/env.sh | 23 + dbm-ui/scripts/ci/install.sh | 67 + dbm-ui/scripts/ci/prepare_services.sh | 23 + dbm-ui/scripts/ci/upgrade.sh | 119 + dbm-ui/scripts/ci/upgrade_dbm.sh | 28 + dbm-ui/scripts/license/README.md | 27 + .../license/add_jscss_license_header.sh | 69 + .../scripts/license/add_py_license_header.py | 129 + .../license/headers/LICENSE_JSCSS_HEADER.txt | 7 + .../license/headers/LICENSE_PY_HEADER.txt | 9 + dbm-ui/scripts/make_ssl_pairs.sh | 19 + dbm-ui/scripts/snippets/uninstall_es.sh | 12 + dbm-ui/scripts/snippets/uninstall_hdfs.sh | 43 + dbm-ui/scripts/snippets/uninstall_kafka.sh | 5 + dbm-ui/scripts/snippets/uninstall_mysql.sh | 54 + dbm-ui/scripts/snippets/uninstall_pulsar.sh | 11 + dbm-ui/scripts/snippets/uninstall_redis.sh | 14 + dbm-ui/version_logs_html/V1.0.0.html | 13 + dbm-ui/wsgi.py | 17 + docs/resource/img/logo.png | Bin 0 -> 96505 bytes docs/resource/img/logo_zh.png | Bin 0 -> 127045 bytes helm-charts/.gitignore | 4 + helm-charts/README.md | 24 + helm-charts/bk-dbm/.helmignore | 23 + helm-charts/bk-dbm/Chart.lock | 39 + helm-charts/bk-dbm/Chart.yaml | 58 + .../bk-dbm/charts/db-dns-api/.helmignore | 22 + .../bk-dbm/charts/db-dns-api/Chart.yaml | 5 + .../charts/db-dns-api/templates/NOTES.txt | 21 + .../charts/db-dns-api/templates/_helpers.tpl | 66 + .../db-dns-api/templates/deployment.yaml | 76 + .../charts/db-dns-api/templates/ingress.yaml | 61 + .../charts/db-dns-api/templates/service.yaml | 16 + .../db-dns-api/templates/serviceaccount.yaml | 8 + .../templates/tests/test-connection.yaml | 15 + .../bk-dbm/charts/db-dns-api/values.yaml | 73 + .../charts/db-remote-service/.helmignore | 23 + .../charts/db-remote-service/Chart.yaml | 24 + .../db-remote-service/templates/NOTES.txt | 22 + .../db-remote-service/templates/_helpers.tpl | 62 + .../templates/deployment.yaml | 90 + .../db-remote-service/templates/hpa.yaml | 28 + .../db-remote-service/templates/ingress.yaml | 61 + .../db-remote-service/templates/service.yaml | 15 + .../templates/serviceaccount.yaml | 12 + .../charts/db-remote-service/values.yaml | 99 + .../bk-dbm/charts/db-resource/.helmignore | 23 + .../bk-dbm/charts/db-resource/Chart.yaml | 6 + .../charts/db-resource/templates/NOTES.txt | 22 + .../charts/db-resource/templates/_helpers.tpl | 62 + .../db-resource/templates/deployment.yaml | 67 + .../charts/db-resource/templates/hpa.yaml | 28 + .../charts/db-resource/templates/ingress.yaml | 61 + .../charts/db-resource/templates/service.yaml | 15 + .../db-resource/templates/serviceaccount.yaml | 12 + .../templates/tests/test-connection.yaml | 15 + .../bk-dbm/charts/db-resource/values.yaml | 83 + .../bk-dbm/charts/db-simulation/.helmignore | 23 + .../bk-dbm/charts/db-simulation/Chart.yaml | 6 + .../charts/db-simulation/templates/NOTES.txt | 22 + .../db-simulation/templates/_helpers.tpl | 62 + .../db-simulation/templates/deployment.yaml | 75 + .../charts/db-simulation/templates/hpa.yaml | 28 + .../db-simulation/templates/ingress.yaml | 61 + .../db-simulation/templates/service.yaml | 15 + .../templates/serviceaccount.yaml | 12 + .../templates/tests/test-connection.yaml | 15 + .../bk-dbm/charts/db-simulation/values.yaml | 84 + .../bk-dbm/charts/dbconfig/.helmignore | 23 + helm-charts/bk-dbm/charts/dbconfig/Chart.yaml | 6 + .../charts/dbconfig/templates/NOTES.txt | 22 + .../charts/dbconfig/templates/_helpers.tpl | 62 + .../charts/dbconfig/templates/deployment.yaml | 77 + .../bk-dbm/charts/dbconfig/templates/hpa.yaml | 28 + .../charts/dbconfig/templates/ingress.yaml | 61 + .../charts/dbconfig/templates/service.yaml | 15 + .../dbconfig/templates/serviceaccount.yaml | 12 + .../bk-dbm/charts/dbconfig/values.yaml | 82 + helm-charts/bk-dbm/charts/dbm/.helmignore | 23 + helm-charts/bk-dbm/charts/dbm/Chart.yaml | 6 + .../bk-dbm/charts/dbm/templates/_helpers.tpl | 115 + .../templates/deployments/celery-beater.yaml | 77 + .../templates/deployments/celery-worker.yaml | 77 + .../deployments/pipeline-worker.yaml | 77 + .../dbm/templates/deployments/saas-api.yaml | 81 + .../bk-dbm/charts/dbm/templates/hpa.yaml | 28 + .../bk-dbm/charts/dbm/templates/ingress.yaml | 61 + .../charts/dbm/templates/migrate-job.yaml | 39 + .../bk-dbm/charts/dbm/templates/service.yaml | 19 + .../charts/dbm/templates/serviceaccount.yaml | 35 + helm-charts/bk-dbm/charts/dbm/values.yaml | 1 + .../bk-dbm/charts/dbpartition/.helmignore | 23 + .../bk-dbm/charts/dbpartition/Chart.yaml | 7 + .../charts/dbpartition/templates/NOTES.txt | 22 + .../charts/dbpartition/templates/_helpers.tpl | 62 + .../dbpartition/templates/deployment.yaml | 60 + .../charts/dbpartition/templates/hpa.yaml | 28 + .../charts/dbpartition/templates/ingress.yaml | 61 + .../charts/dbpartition/templates/service.yaml | 15 + .../dbpartition/templates/serviceaccount.yaml | 12 + .../bk-dbm/charts/dbpartition/values.yaml | 84 + helm-charts/bk-dbm/charts/dbpriv/.helmignore | 23 + helm-charts/bk-dbm/charts/dbpriv/Chart.yaml | 25 + .../bk-dbm/charts/dbpriv/templates/NOTES.txt | 22 + .../charts/dbpriv/templates/_helpers.tpl | 62 + .../charts/dbpriv/templates/deployment.yaml | 86 + .../bk-dbm/charts/dbpriv/templates/hpa.yaml | 28 + .../charts/dbpriv/templates/ingress.yaml | 61 + .../charts/dbpriv/templates/service.yaml | 19 + .../dbpriv/templates/serviceaccount.yaml | 12 + helm-charts/bk-dbm/charts/dbpriv/values.yaml | 92 + helm-charts/bk-dbm/charts/grafana/.helmignore | 21 + helm-charts/bk-dbm/charts/grafana/Chart.lock | 6 + helm-charts/bk-dbm/charts/grafana/Chart.yaml | 27 + helm-charts/bk-dbm/charts/grafana/README.md | 684 + .../charts/grafana/charts/common/.helmignore | 22 + .../charts/grafana/charts/common/Chart.yaml | 23 + .../charts/grafana/charts/common/README.md | 350 + .../charts/common/templates/_affinities.tpl | 102 + .../charts/common/templates/_capabilities.tpl | 154 + .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 75 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 70 + .../charts/common/templates/_secrets.tpl | 140 + .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../common/templates/validations/_mysql.tpl | 103 + .../templates/validations/_postgresql.tpl | 129 + .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/grafana/charts/common/values.yaml | 5 + .../bk-dbm/charts/grafana/templates/NOTES.txt | 33 + .../charts/grafana/templates/_helpers.tpl | 237 + .../charts/grafana/templates/configmap.yaml | 34 + .../grafana/templates/dashboard-provider.yaml | 41 + .../charts/grafana/templates/deployment.yaml | 338 + .../charts/grafana/templates/extra-list.yaml | 4 + .../templates/image-renderer-deployment.yaml | 127 + .../image-renderer-prometheusrules.yaml | 24 + .../templates/image-renderer-service.yaml | 53 + .../image-renderer-servicemonitor.yaml | 46 + .../charts/grafana/templates/ingress.yaml | 66 + .../charts/grafana/templates/ldap-secret.yaml | 23 + .../grafana/templates/prometheusrules.yaml | 24 + .../bk-dbm/charts/grafana/templates/pvc.yaml | 34 + .../charts/grafana/templates/secret.yaml | 15 + .../charts/grafana/templates/service.yaml | 57 + .../grafana/templates/serviceaccount.yaml | 26 + .../grafana/templates/servicemonitor.yaml | 50 + .../charts/grafana/templates/smtp-secret.yaml | 16 + .../charts/grafana/templates/tls-secret.yaml | 46 + helm-charts/bk-dbm/charts/grafana/values.yaml | 1196 ++ helm-charts/bk-dbm/charts/hadb-api/Chart.yaml | 24 + .../charts/hadb-api/templates/NOTES.txt | 22 + .../charts/hadb-api/templates/_helpers.tpl | 62 + .../charts/hadb-api/templates/deployment.yaml | 70 + .../bk-dbm/charts/hadb-api/templates/hpa.yaml | 28 + .../charts/hadb-api/templates/ingress.yaml | 61 + .../charts/hadb-api/templates/service.yaml | 15 + .../hadb-api/templates/serviceaccount.yaml | 12 + .../templates/tests/test-connection.yaml | 15 + .../bk-dbm/charts/hadb-api/values.yaml | 83 + helm-charts/bk-dbm/templates/NOTES.txt | 12 + helm-charts/bk-dbm/templates/_helpers.tpl | 89 + helm-charts/bk-dbm/templates/bklogconfig.yaml | 42 + .../templates/db-dns-api-configmap.yaml | 18 + .../templates/db-resource-configmap.yaml | 37 + .../bk-dbm/templates/dbconfig-configmap.yaml | 57 + .../bk-dbm/templates/dbm-configmap.yaml | 70 + .../templates/dbpartition-configmap.yaml | 43 + .../bk-dbm/templates/dbpriv-configmap.yaml | 32 + .../templates/dbsimulation-configmap.yaml | 42 + .../templates/grafana-env-configmap.yaml | 24 + .../templates/grafana-ini-configmap.yaml | 66 + .../bk-dbm/templates/hadb-api-configmap.yaml | 25 + .../bk-dbm/templates/init-sql-configmap.yaml | 24 + helm-charts/bk-dbm/values.yaml | 369 + readme.md | 53 + readme_en.md | 45 + 4294 files changed, 615895 insertions(+) create mode 100644 .ci/open_source_check.yml create mode 100644 .ci/python_code_format.yml create mode 100644 .ci/python_unit_test.yml create mode 100644 .ci/templates/open_source_gate.yml create mode 100644 .code.yml create mode 100644 .github/CODEOWNERS create mode 100644 .github/CODE_OF_CONDUCT.md create mode 100644 .github/CONTRIBUTING.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report_zh.md create mode 100644 .github/ISSUE_TEMPLATE/enhancement.md create mode 100644 .github/gitflow.png create mode 100644 .github/workflows/check_hard_code_ip.yml create mode 100644 .github/workflows/go_code_check.yml create mode 100644 .github/workflows/python_code_check.yml create mode 100644 .github/workflows/python_unit_test.yml create mode 100644 .gitignore create mode 100644 .gtmproject.yaml create mode 100644 .pre-commit-config.yaml create mode 100644 LICENSE create mode 100644 build.yml create mode 100644 dbm-services/bigdata/db-tools/dbactuator/.ci/codecc.yml create mode 100644 dbm-services/bigdata/db-tools/dbactuator/.ci/open_source_check.yml create mode 100644 dbm-services/bigdata/db-tools/dbactuator/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/bigdata/db-tools/dbactuator/.gitignore create mode 100644 dbm-services/bigdata/db-tools/dbactuator/.golangci.yml create mode 100644 dbm-services/bigdata/db-tools/dbactuator/LICENSE create mode 100644 dbm-services/bigdata/db-tools/dbactuator/Makefile create mode 100644 dbm-services/bigdata/db-tools/dbactuator/README.md create mode 100755 dbm-services/bigdata/db-tools/dbactuator/build.sh create mode 100755 dbm-services/bigdata/db-tools/dbactuator/build_doc.sh create mode 100644 dbm-services/bigdata/db-tools/dbactuator/cmd/cmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/docs/.gitkeep create mode 100644 dbm-services/bigdata/db-tools/dbactuator/docs/dbactuator.md create mode 100644 dbm-services/bigdata/db-tools/dbactuator/docs/docs.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/docs/embed_docs.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/docs/swagger.json create mode 100644 dbm-services/bigdata/db-tools/dbactuator/docs/swagger.yaml create mode 100644 dbm-services/bigdata/db-tools/dbactuator/example/install-hdfs.md create mode 100644 dbm-services/bigdata/db-tools/dbactuator/go.mod create mode 100644 dbm-services/bigdata/db-tools/dbactuator/go.sum create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_connections.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_es.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_nodes.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_shards.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/cmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/decompress_pkg.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/escmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/exclude_node.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init_grant.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_client.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_cold.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_exporter.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_hot.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_kibana.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_master.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_supervisor.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_telegraf.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/replace_master.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/restart_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/start_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/stop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_active.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_decommission.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/cmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/data_clean.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/decompress_pkg.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/generate_key.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/hdfscmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/init_system_config.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_datanode.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_first_namenode.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_haproxy.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_journalnode.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_second_namenode.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_supervisor.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_telegraf.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zkfc.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zookeeper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/instance_operation.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/refresh_nodes.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/render_hdfs_config.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/scp_dir.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/start_component.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/stop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_dfs_host.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_host_mappng.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_zookeeper_config.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/write_key.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/cmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/decompress_influxdb_pkg.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/influxdbcmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init_user.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_influxdb.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_supervisor.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_telegraf.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/restart_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/start_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/stop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/check_reassignment.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/cmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/decompress_kafka_pkg.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init_kafkaUser.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_broker.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_manager.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_supervisor.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_zookeeper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/kafkacmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_add.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_remove.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reduce_broker.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/replace_broker.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_broker.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/start_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/stop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/add_hosts.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_broker_config.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_ledger_metadata.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_namespace_config.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_under_replicated.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/cmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decommission_bookie.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decompress_pkg.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_cluster.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_pulsar_manager.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_bookkeeper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_broker.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_pulsar_manager.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_supervisor.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_zookeeper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/modify_hosts.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/pulsarcmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/restart_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/set_bookie_readonly.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_broker.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/stop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/unset_bookie_readonly.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_helper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_util.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup_download.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/cos.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/gse.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/http.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/scp.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/base.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/components.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/computil/computil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/crontab.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/query_change.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_health.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_nodes.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/exclude_node.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/install_elasticsearch.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/replace_node.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/startstop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/README.md create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/acl.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/fileserver.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/check_nn_active create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/config_tpl.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-daemon-wrapper.sh create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-env.sh create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/haproxy.cfg create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/log4j.properties create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/rack-aware.sh create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/template.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/const.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/decompress_pkg.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/hdfs.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/init_system_config.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_haproxy.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_hdfs.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_zookeeper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/node_operation.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/replace_hdfs.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/shrink_hdfs.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/update_host_mapping.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/disk.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/http.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/util.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/xml_util.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/influxdb.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/install_influxdb.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/startstop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/decom_broker.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/install_kafka.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/kafka.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/reconfig.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/startstop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/medium.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/output.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/check_shrink.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/clean_data.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/install_pulsar.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/pulsar.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/startstop_process.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/essysinit.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/sysinit.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/codes/codes.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/base.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/init.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/const.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/cst.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/es.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/influxdb.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/kafka.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/mysql.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/os.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/proxy.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/pulsar.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/graceful.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/recover.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/safego.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/staticembed.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.sh create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.sh create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock_test.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback_test.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_helper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_operate.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/esutil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/helpers.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/client.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/httpclient.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/kafkautil/kafkautil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/logger.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/cmdexec.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab_test.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/mountpoint.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/netutil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil_test.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/sysctl.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/truncate.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/unix_only.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/windows_only.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_helper.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_operate.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsarutil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/init.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp_test.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/slice.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/str.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/command_groups.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/markdown.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/normallizers.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/templates.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/duration.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/timeutil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/util.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/validate/validate.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xml.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go create mode 100644 dbm-services/bigdata/db-tools/dbactuator/scripts/upload.sh create mode 100644 dbm-services/common/db-config/.ci/codecc.yml create mode 100644 dbm-services/common/db-config/.ci/open_source_check.yml create mode 100644 dbm-services/common/db-config/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/common/db-config/.gitignore create mode 100644 dbm-services/common/db-config/.golangci.yml create mode 100644 dbm-services/common/db-config/Dockerfile create mode 100644 dbm-services/common/db-config/Makefile create mode 100644 dbm-services/common/db-config/README.md create mode 100755 dbm-services/common/db-config/admin.sh create mode 100644 dbm-services/common/db-config/assets/assets.go create mode 100644 dbm-services/common/db-config/assets/migrate.go create mode 100644 dbm-services/common/db-config/assets/migrate.md create mode 100644 dbm-services/common/db-config/assets/migrations/000001_init.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000001_init.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000002_create_table.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000002_create_table.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000003_init_sensitive.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000010_common_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000010_common_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000011_es_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000011_es_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000012_hdfs_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000012_hdfs_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000013_kafka_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000013_kafka_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000018_tendb_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000018_tendb_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000020_tendbha_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000025_TendisX_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000025_TendisX_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000029_pulsar_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000029_pulsar_data.up.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000030_influxdb_data.down.sql create mode 100644 dbm-services/common/db-config/assets/migrations/000030_influxdb_data.up.sql create mode 100644 dbm-services/common/db-config/build.sh create mode 100755 dbm-services/common/db-config/build_doc.sh create mode 100644 dbm-services/common/db-config/cmd/bkconfigcli/bkconfigcli.go create mode 100644 dbm-services/common/db-config/cmd/bkconfigcli/main.go create mode 100644 dbm-services/common/db-config/cmd/bkconfigsvr/main.go create mode 100644 dbm-services/common/db-config/cmd/encryptcli/main.go create mode 100644 dbm-services/common/db-config/conf/config.yaml create mode 100644 dbm-services/common/db-config/conf/logger.yaml create mode 100644 dbm-services/common/db-config/docs/design/encrypt.md create mode 100644 dbm-services/common/db-config/docs/design/examples.md create mode 100644 dbm-services/common/db-config/docs/design/readme.md create mode 100644 dbm-services/common/db-config/docs/design/value_validate.md create mode 100644 dbm-services/common/db-config/docs/docs.go create mode 100644 dbm-services/common/db-config/docs/embed_apidoc.go create mode 100644 dbm-services/common/db-config/docs/swagger.json create mode 100644 dbm-services/common/db-config/docs/swagger.yaml create mode 100644 dbm-services/common/db-config/go.mod create mode 100644 dbm-services/common/db-config/go.sum create mode 100644 dbm-services/common/db-config/internal/api/api.go create mode 100644 dbm-services/common/db-config/internal/api/apply_config.go create mode 100644 dbm-services/common/db-config/internal/api/baseResponse.go create mode 100644 dbm-services/common/db-config/internal/api/bkapigw_user.go create mode 100644 dbm-services/common/db-config/internal/api/config_base.go create mode 100644 dbm-services/common/db-config/internal/api/config_file.go create mode 100644 dbm-services/common/db-config/internal/api/config_item.go create mode 100644 dbm-services/common/db-config/internal/api/config_meta.go create mode 100644 dbm-services/common/db-config/internal/api/config_plat.go create mode 100644 dbm-services/common/db-config/internal/api/config_version.go create mode 100644 dbm-services/common/db-config/internal/api/dbha.go create mode 100644 dbm-services/common/db-config/internal/api/simple_config.go create mode 100644 dbm-services/common/db-config/internal/handler/handler.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/batchget.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/config_apply.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/config_file.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/config_item.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/config_meta.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/config_plat.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/config_version.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/dbha.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/simple.go create mode 100644 dbm-services/common/db-config/internal/handler/simple/simple_item.go create mode 100644 dbm-services/common/db-config/internal/pkg/cst/const.go create mode 100644 dbm-services/common/db-config/internal/pkg/cst/cst.go create mode 100644 dbm-services/common/db-config/internal/pkg/cst/mysql.go create mode 100644 dbm-services/common/db-config/internal/pkg/errno/code.go create mode 100644 dbm-services/common/db-config/internal/pkg/errno/errno.go create mode 100644 dbm-services/common/db-config/internal/repository/migrate.go create mode 100644 dbm-services/common/db-config/internal/repository/migratespec/sensitive.go create mode 100644 dbm-services/common/db-config/internal/repository/model/batchget.go create mode 100644 dbm-services/common/db-config/internal/repository/model/cache.go create mode 100644 dbm-services/common/db-config/internal/repository/model/cache_config_file.go create mode 100644 dbm-services/common/db-config/internal/repository/model/cache_config_name.go create mode 100644 dbm-services/common/db-config/internal/repository/model/cache_crond.go create mode 100644 dbm-services/common/db-config/internal/repository/model/cache_file_node.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_apply.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_file.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_item.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_item_check.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_item_test.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_level.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_meta.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_plat.go create mode 100644 dbm-services/common/db-config/internal/repository/model/config_version.go create mode 100644 dbm-services/common/db-config/internal/repository/model/db.go create mode 100644 dbm-services/common/db-config/internal/repository/model/dbmeta.go create mode 100644 dbm-services/common/db-config/internal/repository/model/dbmeta_inside.go create mode 100644 dbm-services/common/db-config/internal/repository/model/dbtime.go create mode 100644 dbm-services/common/db-config/internal/repository/model/file_node.go create mode 100644 dbm-services/common/db-config/internal/repository/model/level_node.go create mode 100644 dbm-services/common/db-config/internal/repository/model/model.go create mode 100644 dbm-services/common/db-config/internal/repository/model/model_config_node.go create mode 100644 dbm-services/common/db-config/internal/repository/model/node_task.go create mode 100644 dbm-services/common/db-config/internal/repository/repository.go create mode 100644 dbm-services/common/db-config/internal/router/router.go create mode 100644 dbm-services/common/db-config/internal/router/router_restapi.go create mode 100644 dbm-services/common/db-config/internal/service/configcheck/README.MD create mode 100644 dbm-services/common/db-config/internal/service/configcheck/config_check.go create mode 100644 dbm-services/common/db-config/internal/service/configcheck/config_file.go create mode 100644 dbm-services/common/db-config/internal/service/dbha/batchget.go create mode 100644 dbm-services/common/db-config/internal/service/dbha/dbha.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_apply.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_file.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_item.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_item_check.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_item_format.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_item_merge.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_item_test.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_meta.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_plat.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/config_version.go create mode 100644 dbm-services/common/db-config/internal/service/simpleconfig/simple_config.go create mode 100644 dbm-services/common/db-config/pkg/constvar/const.go create mode 100644 dbm-services/common/db-config/pkg/constvar/constvar.go create mode 100644 dbm-services/common/db-config/pkg/constvar/mysql.go create mode 100644 dbm-services/common/db-config/pkg/constvar/os.go create mode 100644 dbm-services/common/db-config/pkg/core/config/base.go create mode 100644 dbm-services/common/db-config/pkg/core/config/config.go create mode 100644 dbm-services/common/db-config/pkg/core/config/logger.go create mode 100644 dbm-services/common/db-config/pkg/core/config/tls.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/README.md create mode 100644 dbm-services/common/db-config/pkg/core/logger/base/base.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/base/interface.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/example_test.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/init.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/log.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/logger.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/logrus/fields.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/logrus/log.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/logrus/logrus.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/logrus/new.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/lumberjack/lumberjack.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/zap/new.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/zap/zap.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/zap/zapfields.go create mode 100644 dbm-services/common/db-config/pkg/core/logger/zap/zaplog.go create mode 100644 dbm-services/common/db-config/pkg/core/safego/graceful.go create mode 100644 dbm-services/common/db-config/pkg/core/safego/recover.go create mode 100644 dbm-services/common/db-config/pkg/core/safego/safego.go create mode 100644 dbm-services/common/db-config/pkg/core/trace/file.go create mode 100644 dbm-services/common/db-config/pkg/core/trace/trace.go create mode 100644 dbm-services/common/db-config/pkg/httpclient/client.go create mode 100644 dbm-services/common/db-config/pkg/httpclient/httpclient.go create mode 100644 dbm-services/common/db-config/pkg/httpclient/response.go create mode 100644 dbm-services/common/db-config/pkg/httpclient/sign.go create mode 100644 dbm-services/common/db-config/pkg/middleware/cors.go create mode 100644 dbm-services/common/db-config/pkg/middleware/middleware.go create mode 100644 dbm-services/common/db-config/pkg/middleware/request_body.go create mode 100644 dbm-services/common/db-config/pkg/middleware/request_id.go create mode 100644 dbm-services/common/db-config/pkg/util/backoff.go create mode 100644 dbm-services/common/db-config/pkg/util/boolext.go create mode 100644 dbm-services/common/db-config/pkg/util/compress/compress.go create mode 100644 dbm-services/common/db-config/pkg/util/confvalue.go create mode 100644 dbm-services/common/db-config/pkg/util/crypt/auth.go create mode 100644 dbm-services/common/db-config/pkg/util/crypt/encrypt.go create mode 100644 dbm-services/common/db-config/pkg/util/datasize.go create mode 100644 dbm-services/common/db-config/pkg/util/dbutil/dbutil.go create mode 100644 dbm-services/common/db-config/pkg/util/dbutil/json.go create mode 100644 dbm-services/common/db-config/pkg/util/dbutil/time.go create mode 100644 dbm-services/common/db-config/pkg/util/durationext.go create mode 100644 dbm-services/common/db-config/pkg/util/map.go create mode 100644 dbm-services/common/db-config/pkg/util/serialize/serialize.go create mode 100644 dbm-services/common/db-config/pkg/util/set.go create mode 100644 dbm-services/common/db-config/pkg/util/slice.go create mode 100644 dbm-services/common/db-config/pkg/util/str.go create mode 100644 dbm-services/common/db-config/pkg/util/tls.go create mode 100644 dbm-services/common/db-config/pkg/util/trim.go create mode 100644 dbm-services/common/db-config/pkg/util/trim_test.go create mode 100644 dbm-services/common/db-config/pkg/util/util.go create mode 100644 dbm-services/common/db-config/pkg/validate/check_value.go create mode 100644 dbm-services/common/db-config/pkg/validate/check_value_test.go create mode 100644 dbm-services/common/db-config/pkg/validate/const.go create mode 100644 dbm-services/common/db-config/pkg/validate/validate.go create mode 100644 dbm-services/common/db-dns/dns-api/.gitignore create mode 100644 dbm-services/common/db-dns/dns-api/Dockerfile create mode 100644 dbm-services/common/db-dns/dns-api/Makefile create mode 100644 dbm-services/common/db-dns/dns-api/README.md create mode 100644 dbm-services/common/db-dns/dns-api/cmd/bk-dnsapi/main.go create mode 100644 dbm-services/common/db-dns/dns-api/docs/.gitkeep create mode 100644 dbm-services/common/db-dns/dns-api/go.mod create mode 100644 dbm-services/common/db-dns/dns-api/go.sum create mode 100644 dbm-services/common/db-dns/dns-api/internal/dao/dao.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/domain/entity/base.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/domain/entity/entity.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/domain/entity/error.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/base.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/domain.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/domain/service/.gitkeep create mode 100644 dbm-services/common/db-dns/dns-api/internal/handler/domain/base.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/handler/domain/delete.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/handler/domain/domain.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/handler/domain/insert.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/handler/domain/query.go create mode 100644 dbm-services/common/db-dns/dns-api/internal/handler/domain/update.go create mode 100644 dbm-services/common/db-dns/dns-api/pkg/README.md create mode 100755 dbm-services/common/db-dns/dns-api/pkg/errno/code.go create mode 100755 dbm-services/common/db-dns/dns-api/pkg/errno/errno.go create mode 100644 dbm-services/common/db-dns/dns-api/pkg/go.mod create mode 100644 dbm-services/common/db-dns/dns-api/pkg/go.sum create mode 100644 dbm-services/common/db-dns/dns-api/pkg/tools/tools.go create mode 100644 dbm-services/common/db-dns/dns-api/pkg/tools/util.go create mode 100644 dbm-services/common/db-dns/dns-api/scripts/ddl/init.sql create mode 100644 dbm-services/common/db-dns/dns-api/scripts/git/pre-commit create mode 100644 dbm-services/common/db-dns/dns-reload/Makefile create mode 100644 dbm-services/common/db-dns/dns-reload/api/api.go create mode 100644 dbm-services/common/db-dns/dns-reload/config/config.conf create mode 100644 dbm-services/common/db-dns/dns-reload/config/config.go create mode 100644 dbm-services/common/db-dns/dns-reload/config/init.go create mode 100644 dbm-services/common/db-dns/dns-reload/dao/dao.go create mode 100644 dbm-services/common/db-dns/dns-reload/dao/domain.go create mode 100644 dbm-services/common/db-dns/dns-reload/doc/named.conf_tpl create mode 100644 dbm-services/common/db-dns/dns-reload/go.mod create mode 100644 dbm-services/common/db-dns/dns-reload/go.sum create mode 100644 dbm-services/common/db-dns/dns-reload/logger/init.go create mode 100644 dbm-services/common/db-dns/dns-reload/logger/logger.go create mode 100644 dbm-services/common/db-dns/dns-reload/main/main.go create mode 100644 dbm-services/common/db-dns/dns-reload/service/dnsService.go create mode 100644 dbm-services/common/db-dns/dns-reload/service/service.go create mode 100644 dbm-services/common/db-dns/dns-reload/util/tools.go create mode 100644 dbm-services/common/db-dns/dns-reload/util/util.go create mode 100644 dbm-services/common/db-resource/.ci/codecc.yml create mode 100644 dbm-services/common/db-resource/.ci/open_source_check.yml create mode 100644 dbm-services/common/db-resource/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/common/db-resource/.gitignore create mode 100644 dbm-services/common/db-resource/.golangci.yml create mode 100644 dbm-services/common/db-resource/Dockerfile create mode 100644 dbm-services/common/db-resource/Makefile create mode 100644 dbm-services/common/db-resource/README.md create mode 100644 dbm-services/common/db-resource/go.mod create mode 100644 dbm-services/common/db-resource/go.sum create mode 100644 dbm-services/common/db-resource/internal/config/config.go create mode 100644 dbm-services/common/db-resource/internal/controller/apply/apply.go create mode 100644 dbm-services/common/db-resource/internal/controller/controller.go create mode 100644 dbm-services/common/db-resource/internal/controller/manage/manage.go create mode 100644 dbm-services/common/db-resource/internal/controller/manage/rs.go create mode 100644 dbm-services/common/db-resource/internal/controller/manage/rs_import.go create mode 100644 dbm-services/common/db-resource/internal/controller/manage/rs_lable.go create mode 100644 dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go create mode 100644 dbm-services/common/db-resource/internal/lock/lock.go create mode 100644 dbm-services/common/db-resource/internal/lock/redis_lock.go create mode 100644 dbm-services/common/db-resource/internal/lock/redis_lock_test.go create mode 100644 dbm-services/common/db-resource/internal/lock/spinlock.go create mode 100644 dbm-services/common/db-resource/internal/middleware/middleware.go create mode 100644 dbm-services/common/db-resource/internal/model/TbDeviceSpec.go create mode 100644 dbm-services/common/db-resource/internal/model/TbRequestLog.go create mode 100644 dbm-services/common/db-resource/internal/model/TbRpApplyDetailLog.go create mode 100644 dbm-services/common/db-resource/internal/model/TbRpDetail.go create mode 100644 dbm-services/common/db-resource/internal/model/TbRpDetailArchive.go create mode 100644 dbm-services/common/db-resource/internal/model/TbRpOperatorInfo.go create mode 100644 dbm-services/common/db-resource/internal/model/TbRpReturnDetail.go create mode 100644 dbm-services/common/db-resource/internal/model/model.go create mode 100644 dbm-services/common/db-resource/internal/routers/router.go create mode 100644 dbm-services/common/db-resource/internal/routers/routers.go create mode 100644 dbm-services/common/db-resource/internal/svr/apply/api.go create mode 100644 dbm-services/common/db-resource/internal/svr/apply/apply.go create mode 100644 dbm-services/common/db-resource/internal/svr/apply/core.go create mode 100644 dbm-services/common/db-resource/internal/svr/apply/instance.go create mode 100644 dbm-services/common/db-resource/internal/svr/bk/bk.go create mode 100644 dbm-services/common/db-resource/internal/svr/bk/cc.go create mode 100644 dbm-services/common/db-resource/internal/svr/bk/cc_test.go create mode 100644 dbm-services/common/db-resource/internal/svr/bk/disk.go create mode 100644 dbm-services/common/db-resource/internal/svr/bk/get_block_info.sh create mode 100644 dbm-services/common/db-resource/internal/svr/bk/job_v3.go create mode 100644 dbm-services/common/db-resource/internal/svr/bk/job_v3_test.go create mode 100644 dbm-services/common/db-resource/internal/svr/cloud/cloud.go create mode 100644 dbm-services/common/db-resource/internal/svr/cloud/tencent/tencent.go create mode 100644 dbm-services/common/db-resource/internal/svr/cloud/tencent/tencentcloud_test.go create mode 100644 dbm-services/common/db-resource/internal/svr/meta/meta.go create mode 100644 dbm-services/common/db-resource/internal/svr/task/task.go create mode 100644 dbm-services/common/db-resource/main.go create mode 100644 dbm-services/common/db-resource/pkg/errno/code.go create mode 100644 dbm-services/common/db-resource/pkg/errno/errno.go create mode 100644 dbm-services/common/db-resource/pkg/util/util.go create mode 100644 dbm-services/common/db-resource/scripts/.gitkeep create mode 100644 dbm-services/common/dbha/ha-module/.gitignore create mode 100644 dbm-services/common/dbha/ha-module/Dockerfile create mode 100644 dbm-services/common/dbha/ha-module/README.md create mode 100644 dbm-services/common/dbha/ha-module/agent/agent.go create mode 100644 dbm-services/common/dbha/ha-module/agent/connection.go create mode 100644 dbm-services/common/dbha/ha-module/agent/monitor_agent.go create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/.helmignore create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/Chart.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/NOTES.txt create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/_helpers.tpl create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/deployment.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/hpa.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/ingress.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/service.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/serviceaccount.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/templates/tests/test-connection.yaml create mode 100644 dbm-services/common/dbha/ha-module/bk-dbha/values.yaml create mode 100755 dbm-services/common/dbha/ha-module/build.sh create mode 100644 dbm-services/common/dbha/ha-module/client/client.go create mode 100644 dbm-services/common/dbha/ha-module/client/cmdb.go create mode 100644 dbm-services/common/dbha/ha-module/client/hadb.go create mode 100644 dbm-services/common/dbha/ha-module/client/name_service.go create mode 100644 dbm-services/common/dbha/ha-module/client/nc.go create mode 100644 dbm-services/common/dbha/ha-module/client/redis_client.go create mode 100644 dbm-services/common/dbha/ha-module/client/remote_config.go create mode 100644 dbm-services/common/dbha/ha-module/config/config.go create mode 100644 dbm-services/common/dbha/ha-module/constvar/constant.go create mode 100644 dbm-services/common/dbha/ha-module/constvar/constvar.go create mode 100644 dbm-services/common/dbha/ha-module/dbha.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/dbmodule.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_callback.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_detect.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_switch.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_callback.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_detect.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_proxy_handle.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_switch.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/mysql/mysql.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/lru_cache.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_callback.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_detect.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_switch.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/redis.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/redis_base.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/redis_callback.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/redis_detect.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/redis_switch.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/svr_password.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_callback.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_detect.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_switch.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_callback.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_detect.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_switch.go create mode 100644 dbm-services/common/dbha/ha-module/dbmodule/register.go create mode 100644 dbm-services/common/dbha/ha-module/dbutil/db_detect.go create mode 100644 dbm-services/common/dbha/ha-module/dbutil/db_switch.go create mode 100644 dbm-services/common/dbha/ha-module/dbutil/dbutil.go create mode 100644 dbm-services/common/dbha/ha-module/errno/code.go create mode 100644 dbm-services/common/dbha/ha-module/errno/errno.go create mode 100644 dbm-services/common/dbha/ha-module/gm/connection.go create mode 100644 dbm-services/common/dbha/ha-module/gm/gcm.go create mode 100644 dbm-services/common/dbha/ha-module/gm/gdm.go create mode 100644 dbm-services/common/dbha/ha-module/gm/gm.go create mode 100644 dbm-services/common/dbha/ha-module/gm/gmm.go create mode 100644 dbm-services/common/dbha/ha-module/gm/gqa.go create mode 100644 dbm-services/common/dbha/ha-module/go.mod create mode 100644 dbm-services/common/dbha/ha-module/go.sum create mode 100644 dbm-services/common/dbha/ha-module/ha.yaml create mode 100644 dbm-services/common/dbha/ha-module/log/log.go create mode 100644 dbm-services/common/dbha/ha-module/monitor/monitor.go create mode 100644 dbm-services/common/dbha/ha-module/monitor/monitor_api.go create mode 100644 dbm-services/common/dbha/ha-module/test/MySQL_test.go create mode 100644 dbm-services/common/dbha/ha-module/test/agent_test.go create mode 100644 dbm-services/common/dbha/ha-module/test/client_test.go create mode 100644 dbm-services/common/dbha/ha-module/test/log_test.go create mode 100644 dbm-services/common/dbha/ha-module/test/util_test.go create mode 100644 dbm-services/common/dbha/ha-module/types/types.go create mode 100644 dbm-services/common/dbha/ha-module/util/file_lock.go create mode 100644 dbm-services/common/dbha/ha-module/util/timezone.go create mode 100644 dbm-services/common/dbha/ha-module/util/util.go create mode 100644 dbm-services/common/dbha/hadb-api/Dockerfile create mode 100644 dbm-services/common/dbha/hadb-api/LICENSE create mode 100644 dbm-services/common/dbha/hadb-api/README.md create mode 100644 dbm-services/common/dbha/hadb-api/cmd/add.go create mode 100644 dbm-services/common/dbha/hadb-api/cmd/root.go create mode 100644 dbm-services/common/dbha/hadb-api/cmd/run.go create mode 100644 dbm-services/common/dbha/hadb-api/conf/config.yaml create mode 100644 dbm-services/common/dbha/hadb-api/go.mod create mode 100644 dbm-services/common/dbha/hadb-api/go.sum create mode 100644 dbm-services/common/dbha/hadb-api/initc/initc.go create mode 100644 dbm-services/common/dbha/hadb-api/initc/initconfig.go create mode 100644 dbm-services/common/dbha/hadb-api/log/log.go create mode 100644 dbm-services/common/dbha/hadb-api/main.go create mode 100644 dbm-services/common/dbha/hadb-api/model/DBStatus.go create mode 100644 dbm-services/common/dbha/hadb-api/model/HALogs.go create mode 100644 dbm-services/common/dbha/hadb-api/model/HAStatus.go create mode 100644 dbm-services/common/dbha/hadb-api/model/SwitchLogs.go create mode 100644 dbm-services/common/dbha/hadb-api/model/TbMonSwitchQueue.go create mode 100644 dbm-services/common/dbha/hadb-api/model/init.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/api/api.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/add_dbstatus.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/add_halogs.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/add_hastatus.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/add_switchlogs.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/add_switchqueue.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus_handler.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs_handler.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/handler.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus_handler.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlog.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlogs_handler.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue.go create mode 100644 dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue_handler.go create mode 100644 dbm-services/common/dbha/hadb-api/util/constants.go create mode 100644 dbm-services/common/dbha/hadb-api/util/timezone.go create mode 100644 dbm-services/common/dbha/hadb-api/util/util.go create mode 100644 dbm-services/common/go-pubpkg/.gitignore create mode 100644 dbm-services/common/go-pubpkg/README.md create mode 100644 dbm-services/common/go-pubpkg/cc.v3/README.md create mode 100644 dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_internal_module.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_internal_module_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_list.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_list_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_location.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_location_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_module.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_module_list.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_module_list_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_sensitive.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_set.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_set_list.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_set_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_topo_tree.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/biz_watch.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/client.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/clone_host_property.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/clont_host_service_instance_proc.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/dept_list.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_base_info.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_base_info_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_biz_relations.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_id_query.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_location.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_location_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_relation_info_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_relation_list.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_relation_watch.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_watch.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_watch_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/list_biz_hosts.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/module_watch.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/schema.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/set_watch.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_cmpy.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_comy_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/test_config.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/transfer_host.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/transfer_host_module.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/transfer_host_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/update_host.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/update_host_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/utils/utils.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/utils/utils_test.go create mode 100644 dbm-services/common/go-pubpkg/cc.v3/watch.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/command.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/db.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/file.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/map.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/mysql.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/randstring.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/ratelimit.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/remove_file_limit.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/sizebytes.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/slice.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/str.go create mode 100644 dbm-services/common/go-pubpkg/cmutil/util.go create mode 100644 dbm-services/common/go-pubpkg/go.mod create mode 100644 dbm-services/common/go-pubpkg/go.sum create mode 100644 dbm-services/common/go-pubpkg/logger/cst.go create mode 100644 dbm-services/common/go-pubpkg/logger/custom_field_test.go create mode 100644 dbm-services/common/go-pubpkg/logger/default.go create mode 100644 dbm-services/common/go-pubpkg/logger/default_test.go create mode 100644 dbm-services/common/go-pubpkg/logger/encoder.go create mode 100644 dbm-services/common/go-pubpkg/logger/field.go create mode 100644 dbm-services/common/go-pubpkg/logger/log.go create mode 100644 dbm-services/common/go-pubpkg/logger/logger.go create mode 100644 dbm-services/common/go-pubpkg/logger/rotate.go create mode 100644 dbm-services/common/go-pubpkg/logger/rotate_test.go create mode 100644 dbm-services/common/go-pubpkg/reportlog/report.go create mode 100644 dbm-services/common/go-pubpkg/reportlog/reportlog.go create mode 100644 dbm-services/common/go-pubpkg/timeutil/duration.go create mode 100644 dbm-services/common/go-pubpkg/timeutil/duration_ext.go create mode 100644 dbm-services/common/go-pubpkg/timeutil/timeutil.go create mode 100644 dbm-services/common/go-pubpkg/validate/validate.go create mode 100644 dbm-services/go.work create mode 100644 dbm-services/go.work.sum create mode 100644 dbm-services/mysql/db-partition/.ci/codecc.yml create mode 100644 dbm-services/mysql/db-partition/.ci/open_source_check.yml create mode 100644 dbm-services/mysql/db-partition/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/mysql/db-partition/.gitignore create mode 100644 dbm-services/mysql/db-partition/Dockerfile create mode 100644 dbm-services/mysql/db-partition/Makefile create mode 100644 dbm-services/mysql/db-partition/README.md create mode 100644 dbm-services/mysql/db-partition/assests/assests.go create mode 100644 dbm-services/mysql/db-partition/assests/migrate.go create mode 100644 dbm-services/mysql/db-partition/assests/migrations/000001_init.down.sql.sql create mode 100644 dbm-services/mysql/db-partition/assests/migrations/000001_init.up.sql create mode 100644 dbm-services/mysql/db-partition/assests/migrations/000002_create_table.down.sql create mode 100644 dbm-services/mysql/db-partition/assests/migrations/000002_create_table.up.sql create mode 100644 dbm-services/mysql/db-partition/cron/cron.go create mode 100644 dbm-services/mysql/db-partition/cron/cron_basic_func.go create mode 100644 dbm-services/mysql/db-partition/cron/cron_object.go create mode 100644 dbm-services/mysql/db-partition/errno/code.go create mode 100644 dbm-services/mysql/db-partition/errno/errno.go create mode 100644 dbm-services/mysql/db-partition/go.mod create mode 100644 dbm-services/mysql/db-partition/go.sum create mode 100644 dbm-services/mysql/db-partition/handler/handler.go create mode 100644 dbm-services/mysql/db-partition/main.go create mode 100644 dbm-services/mysql/db-partition/model/init_db.go create mode 100644 dbm-services/mysql/db-partition/model/init_env.go create mode 100644 dbm-services/mysql/db-partition/model/init_logger.go create mode 100644 dbm-services/mysql/db-partition/model/init_redis.go create mode 100644 dbm-services/mysql/db-partition/model/model.go create mode 100644 dbm-services/mysql/db-partition/monitor/monitor.go create mode 100644 dbm-services/mysql/db-partition/monitor/monitor_object.go create mode 100644 dbm-services/mysql/db-partition/router/router.go create mode 100644 dbm-services/mysql/db-partition/service/check_partition.go create mode 100644 dbm-services/mysql/db-partition/service/check_partition_base_func.go create mode 100644 dbm-services/mysql/db-partition/service/check_partition_object.go create mode 100644 dbm-services/mysql/db-partition/service/db_meta_service.go create mode 100644 dbm-services/mysql/db-partition/service/db_remote_service.go create mode 100644 dbm-services/mysql/db-partition/service/execute_partition_object.go create mode 100644 dbm-services/mysql/db-partition/service/manage_config.go create mode 100644 dbm-services/mysql/db-partition/service/manage_config_object.go create mode 100644 dbm-services/mysql/db-partition/util/client.go create mode 100644 dbm-services/mysql/db-partition/util/time.go create mode 100644 dbm-services/mysql/db-partition/util/util.go create mode 100644 dbm-services/mysql/db-priv/.ci/codecc.yml create mode 100644 dbm-services/mysql/db-priv/.ci/open_source_check.yml create mode 100644 dbm-services/mysql/db-priv/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/mysql/db-priv/.gitignore create mode 100644 dbm-services/mysql/db-priv/.golangci.yml create mode 100644 dbm-services/mysql/db-priv/Dockerfile create mode 100644 dbm-services/mysql/db-priv/Makefile create mode 100644 dbm-services/mysql/db-priv/README.md create mode 100644 dbm-services/mysql/db-priv/admin.sh create mode 100644 dbm-services/mysql/db-priv/assests/assests.go create mode 100644 dbm-services/mysql/db-priv/assests/migrate.go create mode 100644 dbm-services/mysql/db-priv/assests/migrations/000001_init.down.sql.sql create mode 100644 dbm-services/mysql/db-priv/assests/migrations/000001_init.up.sql create mode 100644 dbm-services/mysql/db-priv/assests/migrations/000002_init.down.sql create mode 100644 dbm-services/mysql/db-priv/assests/migrations/000002_init.up.sql create mode 100644 dbm-services/mysql/db-priv/errno/code.go create mode 100644 dbm-services/mysql/db-priv/errno/errno.go create mode 100644 dbm-services/mysql/db-priv/go.mod create mode 100644 dbm-services/mysql/db-priv/go.sum create mode 100644 dbm-services/mysql/db-priv/handler/account.go create mode 100644 dbm-services/mysql/db-priv/handler/account_rule.go create mode 100644 dbm-services/mysql/db-priv/handler/add_priv.go create mode 100644 dbm-services/mysql/db-priv/handler/clone_client_priv.go create mode 100644 dbm-services/mysql/db-priv/handler/clone_instance_priv.go create mode 100644 dbm-services/mysql/db-priv/handler/handler.go create mode 100644 dbm-services/mysql/db-priv/handler/public_key.go create mode 100644 dbm-services/mysql/db-priv/handler/register_routes.go create mode 100644 dbm-services/mysql/db-priv/main.go create mode 100644 dbm-services/mysql/db-priv/service/account.go create mode 100644 dbm-services/mysql/db-priv/service/account_object.go create mode 100644 dbm-services/mysql/db-priv/service/accout_rule.go create mode 100644 dbm-services/mysql/db-priv/service/accout_rule_object.go create mode 100644 dbm-services/mysql/db-priv/service/add_priv.go create mode 100644 dbm-services/mysql/db-priv/service/add_priv_base_func.go create mode 100644 dbm-services/mysql/db-priv/service/add_priv_object.go create mode 100644 dbm-services/mysql/db-priv/service/clone_client_priv.go create mode 100644 dbm-services/mysql/db-priv/service/clone_client_priv_base_func.go create mode 100644 dbm-services/mysql/db-priv/service/clone_client_priv_object.go create mode 100644 dbm-services/mysql/db-priv/service/clone_instance_priv.go create mode 100644 dbm-services/mysql/db-priv/service/clone_instance_priv_base_func.go create mode 100644 dbm-services/mysql/db-priv/service/clone_instance_priv_object.go create mode 100644 dbm-services/mysql/db-priv/service/db_meta_service.go create mode 100644 dbm-services/mysql/db-priv/service/db_remote_service.go create mode 100644 dbm-services/mysql/db-priv/service/init_db.go create mode 100644 dbm-services/mysql/db-priv/service/service.go create mode 100644 dbm-services/mysql/db-priv/util/base_func.go create mode 100644 dbm-services/mysql/db-priv/util/client.go create mode 100644 dbm-services/mysql/db-priv/util/db.go create mode 100644 dbm-services/mysql/db-priv/util/rsa.go create mode 100644 dbm-services/mysql/db-priv/util/time.go create mode 100644 dbm-services/mysql/db-priv/util/util.go create mode 100644 dbm-services/mysql/db-remote-service/.gitignore create mode 100644 dbm-services/mysql/db-remote-service/.golangci.yml create mode 100644 dbm-services/mysql/db-remote-service/Dockerfile create mode 100644 dbm-services/mysql/db-remote-service/LICENSE create mode 100644 dbm-services/mysql/db-remote-service/Makefile create mode 100644 dbm-services/mysql/db-remote-service/all_sql_commands.txt create mode 100644 dbm-services/mysql/db-remote-service/cmd/init.go create mode 100644 dbm-services/mysql/db-remote-service/cmd/root.go create mode 100644 dbm-services/mysql/db-remote-service/cmd/version.go create mode 100644 dbm-services/mysql/db-remote-service/go.mod create mode 100644 dbm-services/mysql/db-remote-service/go.sum create mode 100644 dbm-services/mysql/db-remote-service/main.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/config/config.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/mysql_rpc/embed.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/mysql_rpc/init.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/mysql_rpc/mysql_rpc.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/parser/parser.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/proxy_rpc/proxy_rpc.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/redis_rpc/client.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/redis_rpc/common.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/redis_rpc/init.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/redis_rpc/redis_rpc.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/redis_rpc/twemproxy_rpc.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmd.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmds_on_addr.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/rpc_core/init.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/rpc_core/interface.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_core.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_wrapper.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/rpc_core/run.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_parser.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/general_handler.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/handler_rpc.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/init.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/mysql.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/proxy.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/redis.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/utils.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/router.go create mode 100644 dbm-services/mysql/db-remote-service/pkg/service/service.go create mode 100644 dbm-services/mysql/db-remote-service/readme.md create mode 100644 dbm-services/mysql/db-simulation/.gitignore create mode 100644 dbm-services/mysql/db-simulation/.golangci.yml create mode 100644 dbm-services/mysql/db-simulation/Dockerfile create mode 100644 dbm-services/mysql/db-simulation/Makefile create mode 100644 dbm-services/mysql/db-simulation/all_sql_commands.txt create mode 100644 dbm-services/mysql/db-simulation/app/app.go create mode 100644 dbm-services/mysql/db-simulation/app/config/config.go create mode 100644 dbm-services/mysql/db-simulation/app/service/kubernets.go create mode 100644 dbm-services/mysql/db-simulation/app/service/kubernets_test.go create mode 100644 dbm-services/mysql/db-simulation/app/service/service.go create mode 100644 dbm-services/mysql/db-simulation/app/service/simulation_task.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/alter_table_rule.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/create_db_rule.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/create_table_rule.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/definer_rule.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/dml_rule.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/mysql_keyword.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/rule.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/rule_test.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/spider_rule.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/syntax.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/syntax_test.go create mode 100644 dbm-services/mysql/db-simulation/app/syntax/tmysqlpase.go create mode 100644 dbm-services/mysql/db-simulation/go.mod create mode 100644 dbm-services/mysql/db-simulation/go.sum create mode 100644 dbm-services/mysql/db-simulation/handler/handler.go create mode 100644 dbm-services/mysql/db-simulation/handler/rule.go create mode 100644 dbm-services/mysql/db-simulation/handler/syntax_check.go create mode 100644 dbm-services/mysql/db-simulation/handler/updaterule.go create mode 100644 dbm-services/mysql/db-simulation/main.go create mode 100644 dbm-services/mysql/db-simulation/model/model.go create mode 100644 dbm-services/mysql/db-simulation/model/tb_request_record.go create mode 100644 dbm-services/mysql/db-simulation/model/tb_simulation_task.go create mode 100644 dbm-services/mysql/db-simulation/model/tb_syntax_rule.go create mode 100644 dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo.go create mode 100644 dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo_test.go create mode 100644 dbm-services/mysql/db-simulation/pkg/util/spider.go create mode 100644 dbm-services/mysql/db-simulation/pkg/util/util.go create mode 100644 dbm-services/mysql/db-simulation/router/router.go create mode 100644 dbm-services/mysql/db-simulation/rule.yaml create mode 100644 dbm-services/mysql/db-simulation/spider_rule.yaml create mode 100644 dbm-services/mysql/db-tools/dbactuator/.ci/codecc.yml create mode 100644 dbm-services/mysql/db-tools/dbactuator/.ci/open_source_check.yml create mode 100644 dbm-services/mysql/db-tools/dbactuator/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/mysql/db-tools/dbactuator/.gitignore create mode 100644 dbm-services/mysql/db-tools/dbactuator/.golangci.yml create mode 100644 dbm-services/mysql/db-tools/dbactuator/LICENSE create mode 100644 dbm-services/mysql/db-tools/dbactuator/Makefile create mode 100644 dbm-services/mysql/db-tools/dbactuator/README.md create mode 100755 dbm-services/mysql/db-tools/dbactuator/build.sh create mode 100755 dbm-services/mysql/db-tools/dbactuator/build_doc.sh create mode 100644 dbm-services/mysql/db-tools/dbactuator/cmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/docs/.gitkeep create mode 100644 dbm-services/mysql/db-tools/dbactuator/docs/dbactuator.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/docs/docs.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/docs/embed_docs.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/docs/swagger.json create mode 100644 dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/import-sqlfile.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-database-table.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-download.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-importfull.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-change-master.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-dbbackup.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-monitor.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-semantic-check.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/mysql-uninstall.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy-monitor.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy.example.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/go.mod create mode 100644 dbm-services/mysql/db-tools/dbactuator/go.sum create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_query.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_recover.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_database_table.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_truncate_database.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/build_master_slave_relation.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clean_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clear_instance_config.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clone_client_grant.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/find_local_backup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/flashback.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/full_backup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/grant_repl.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_partitionsql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_sqlfie.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_rotatebinlog.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_change.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_clone.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/parse_binlog_time.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_checksum.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_sync.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/recover_binlog.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/restore_dr.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_check.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_dump_schema.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/start_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/switch_backend_to_slave.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/uninstall_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/clone_proxy_user.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/install_mysql_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/restart_mysql_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/set_backend.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/uninstall_mysql_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/install_spider.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/restart_spider.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/uninstall_spider.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_spider_slave_relationship.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_temporary_spider.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/init_cluster_routing_relationship.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/install_spider_ctl.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/uninstall_spider_ctl.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_helper.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_util.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup_download.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_query_comp.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_recover_comp.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/cos.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/gse.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_client.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_comp.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ibs_recover_wild_comp.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ieg_backupsys.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/scp_comp.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/base.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/components.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/computil.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/crontab.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/db_base_account.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/query_change.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/README.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/acl.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/fileserver.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/medium.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_database_table.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_truncate_database.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/change_master.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/check_instance_idle.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clean_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/common.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/helper_example.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/types.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/base.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/cutover.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_index.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_info.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/cst.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/dbbackup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/types.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/deploy_mysql_crond.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/drop_large_table.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/excute_sql_file.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/find_backup_local.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/full_backup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_client_grant.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_instance_priv.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/repl.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_checksum.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_monitor.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_new_dbbackup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_rotatebinlog.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_change.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_clone.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_diff.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/parse_binlog_time.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_checksum.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_sync.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/README.md create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/backup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/common.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader_util.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/logical_loader.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/physical_loader.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup_repaire.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader_restore.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_restore.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_util.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/recover_binlog.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/restore.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_restore.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_util.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_check.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_download.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_import.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_rows.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/rollback.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_check_run.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_dump_schema.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/uninstall_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/clone_proxy_user.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/install_mysql_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/restart_mysql_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/set_backend.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/uninstall_mysql_proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/output.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/spider/restart_spider.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_slave_cluster_relationship.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_temporary_spider.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/init_cluster_routing_relationship.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/spiderctl.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/sysinit/sysinit.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/codes/codes.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/const.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/cst.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/dbbackup.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/os.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/external.sh create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/staticembed.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/db.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/db_benchmark_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/db_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/dbworker.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/spider.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/native/types.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/tools/impls.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/tools/init.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/tools/tools.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/auth.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/jwt_token.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/mydumper_regex.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/tools.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/filelock.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/helpers.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/client.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/httpclient.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/logger.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/hide_passowrd.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlutil.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/sql_builder.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/mountpoint.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/netutil.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/sysctl.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/unix_only.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/windows_only.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxyutil.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/init.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp_test.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/slice.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/str.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/cmd_groups.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/normallizers.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/templates.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/util.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xml.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/.ci/codecc.yml create mode 100644 dbm-services/mysql/db-tools/mysql-crond/.ci/open_source_check.yml create mode 100644 dbm-services/mysql/db-tools/mysql-crond/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/mysql/db-tools/mysql-crond/.gitignore create mode 100644 dbm-services/mysql/db-tools/mysql-crond/.golangci.yml create mode 100644 dbm-services/mysql/db-tools/mysql-crond/Makefile create mode 100644 dbm-services/mysql/db-tools/mysql-crond/README.md create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/api.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/create_or_replace.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/delete.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/disable.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/do.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/entries.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/jobs_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/pause.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/quit.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/reload.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/resume.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/send_event.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/api/send_metrics.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/cmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/cmd/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/cmd/root.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_list.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_version.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/go.mod create mode 100644 dbm-services/mysql/db-tools/mysql-crond/go.sum create mode 100644 dbm-services/mysql/db-tools/mysql-crond/jobs-config.yaml create mode 100644 dbm-services/mysql/db-tools/mysql-crond/main.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.go.tpl create mode 100644 dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.tpl create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/config/bk_monitor_beat_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/config/config.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/config/job_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/config/log_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/config/runtime_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/config/send_bk_monitor_beat.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/config/sync.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/crond.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/error.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/find_entry.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_add.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_delete.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_disable.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_list.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_pause.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_replace.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_resume.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/once_schedule.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/schedule.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/pkg/service/service.go create mode 100644 dbm-services/mysql/db-tools/mysql-crond/project.yaml create mode 100644 dbm-services/mysql/db-tools/mysql-crond/run_local.sh create mode 100644 dbm-services/mysql/db-tools/mysql-crond/runtime-local.yaml create mode 100644 dbm-services/mysql/db-tools/mysql-crond/runtime.yaml create mode 100755 dbm-services/mysql/db-tools/mysql-crond/start.sh create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/.ci/codecc.yml create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/.ci/open_source_check.yml create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/.gitignore create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/Makefile create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/README.md create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/root.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_clean.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_hardcode_run.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_list.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_reschedule.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_run.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_version.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/config.yaml create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/config.yaml.go.tpl create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/go.mod create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/go.sum create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/items-config.tpl.yaml create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/main.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/config/config.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/config/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/config/items_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/config/log_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/config/monitor_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/const.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/cst.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/character_consistency.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/checker.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/check_definer.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/definer.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/routine.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/trigger.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/user_list_snap_shot.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/view.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/engine.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/hyper_engine.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/myisam.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/ext3_check.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/filter_dir_fs.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/find_huge_file.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/query_mysql_dirs.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/unique_dirs.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/collect_result.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/ibd_statistic.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/report_metrics.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/items_collect.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/heartbeat.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/master_slave_heartbeat.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_config_diff/mysql_config_diff.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_report.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_rotate.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_size.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/mysql_connlog.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/errlog_snapshot.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/general_scan.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_critical.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_errlog.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_notice.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_critical.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_notice.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_warn.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_inject.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_lock.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_processlist.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/processlist_snapshot.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_backend/proxy_backend.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_user_list/proxy_user_list.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/roate_slowlog.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/rotate_slowlog.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/ctl_replicate.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/slave_status.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/main_loop.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/monitor.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/connection_collect.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/interface_define.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/monitor_item_interface.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_event.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_metrics.go create mode 100644 dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/utils.go create mode 100755 dbm-services/mysql/db-tools/mysql-monitor/pt-config-diff create mode 100755 dbm-services/mysql/db-tools/mysql-monitor/pt-summary create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/Makefile create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/README.md create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/cmd.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/root.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/config.example.yaml create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/go.mod create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/go.sum create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/main.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_cos.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_ibs.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time_test.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst/cst.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/log.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/logger.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/reporter.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/dbmodel.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrate.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.down.sql create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.up.sql create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.down.sql create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.up.sql create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/models.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/config.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/main.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate_binlog.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/balance.go create mode 100644 dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/util.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/.gitignore create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/.golangci.yml create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/LICENSE create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/Makefile create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/README.md create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/root.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/run_checksum.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_clean.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_demand.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_general.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_reschedule.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_version.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/go.mod create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/go.sum create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/main.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/mysql-table-checksum.sh.tpl create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/checker.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/command_args.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/define.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/move_result.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/report.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/run_command.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/strategy.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/utils.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/config.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/log_config.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/init.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/report.go create mode 100644 dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/reporter.go create mode 100755 dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-checksum create mode 100755 dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-sync create mode 100644 dbm-services/mysql/slow-query-parser-service/.gitignore create mode 100644 dbm-services/mysql/slow-query-parser-service/Dockerfile create mode 100644 dbm-services/mysql/slow-query-parser-service/Makefile create mode 100644 dbm-services/mysql/slow-query-parser-service/README.md create mode 100644 dbm-services/mysql/slow-query-parser-service/go.mod create mode 100644 dbm-services/mysql/slow-query-parser-service/go.sum create mode 100644 dbm-services/mysql/slow-query-parser-service/main.go create mode 100644 dbm-services/mysql/slow-query-parser-service/pkg/mysql/mysql.go create mode 100644 dbm-services/mysql/slow-query-parser-service/pkg/mysql/parse.go create mode 100644 dbm-services/mysql/slow-query-parser-service/pkg/mysql/request.go create mode 100644 dbm-services/mysql/slow-query-parser-service/pkg/mysql/response.go create mode 100644 dbm-services/mysql/slow-query-parser-service/pkg/mysql/router.go create mode 100644 dbm-services/mysql/slow-query-parser-service/pkg/service/service.go create mode 100644 dbm-services/redis/db-tools/dbactuator/.ci/codecc.yml create mode 100644 dbm-services/redis/db-tools/dbactuator/.ci/open_source_check.yml create mode 100644 dbm-services/redis/db-tools/dbactuator/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/redis/db-tools/dbactuator/.gitignore create mode 100644 dbm-services/redis/db-tools/dbactuator/LICENSE create mode 100644 dbm-services/redis/db-tools/dbactuator/Makefile create mode 100644 dbm-services/redis/db-tools/dbactuator/README.md create mode 100644 dbm-services/redis/db-tools/dbactuator/cmd/root.go create mode 100644 dbm-services/redis/db-tools/dbactuator/doc/twemproxyredisinstance.txt create mode 100644 dbm-services/redis/db-tools/dbactuator/example/add_shard_to_cluster.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/bkdbmon_install.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/cluster_balancer.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/clustermeet_slotsassign.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/initiate_replicaset.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongo_add_user.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongo_deinstall.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongo_del_user.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongo_execute_script.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongo_process_restart.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongod_install.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongod_replace.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongod_step_down.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/mongos_install.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/os_mongo_init.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/predixy_install.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_backup.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_dts_datacheck.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_dts_datarepaire.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_install.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_keysdelete_files.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.json create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern_delete.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.json create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_replicaof.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/redis_replicaof_batch.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/sysinit.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/tendisssd_dr_restore.examle.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.json create mode 100644 dbm-services/redis/db-tools/dbactuator/example/twemproxy_operate.example.md create mode 100644 dbm-services/redis/db-tools/dbactuator/go.mod create mode 100644 dbm-services/redis/db-tools/dbactuator/go.sum create mode 100644 dbm-services/redis/db-tools/dbactuator/imgs/bk-dbactuator-redis_structur.png create mode 100644 dbm-services/redis/db-tools/dbactuator/imgs/tendissd_redo_dr.png create mode 100644 dbm-services/redis/db-tools/dbactuator/main.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/client.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_info.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes_test.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/myredis.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/slot.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_cluster_setslotinfo.go create mode 100644 dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_infoRepl.go create mode 100644 dbm-services/redis/db-tools/dbactuator/mylog/mylog.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_shard_to_cluster.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_user.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/atommongodb.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_balancer.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_install_test.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/del_user.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/initiate_replicaset.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_deinstall.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_execute_script.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_process_restart.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_set_profiler.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_replace.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongos_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_install_test.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_stepdown.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/atomproxy.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_operate.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_check_backends.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install_test.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_operate.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/atomredis.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/bkdbmon_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/clustermeet_slotsassign.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_backup.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datacheck.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datarepaire.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_flush_data.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keysdelete_files.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern_delete.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_migrate_slots.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof_batch.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_kill_conn.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_param_sync.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_sync_check.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_shutdown.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_switch.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/tendisssd_dr_restore.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/atomsys.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/os_mongo_init.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/redis_capturer.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit_mysql.sh create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/backupsys/backupsys.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/common.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/exporter_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/filelock.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/initiate_replicaset_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/media_pkg.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_common.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_init_shell.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_user_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/mongod_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/mongos_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/predixy_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/repliccaset_member_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/common/twemproxy_conf.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/consts/consts.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/consts/data_dir.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/consts/dts.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/consts/test.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/consts/user.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/customtime/customtime.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/jobmanager/jobmanager.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobrunner.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobruntime.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/report/filereport.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/report/reporter.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/bkrepo.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/compress.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/file.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/net.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/osCmd.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/proxy_tools.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/redisutil.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/reflect.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/util.go create mode 100644 dbm-services/redis/db-tools/dbactuator/pkg/util/version.go create mode 100644 dbm-services/redis/db-tools/dbactuator/scripts/upload.sh create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/clustertest/clustertest.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/clustertest/predixy_cluster.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/clustertest/predixy_switch.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_cluster.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_swtich.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_operate.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxytest.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/bkdbmon_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/commands.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_backup.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_cluster.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datacheck.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datarepaire.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_flushdata.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_install.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keysdelete_files.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keyspattern.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_migrate_slots.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_replicaof.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_scene.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_shutdown.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_switch.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/redistest.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/redistest/tendisssd_dr_restore.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/systest/sysinit.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/test.go create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/test.sh create mode 100644 dbm-services/redis/db-tools/dbactuator/tests/test_mongo.sh create mode 100644 dbm-services/redis/db-tools/dbmon/.ci/codecc.yml create mode 100644 dbm-services/redis/db-tools/dbmon/.ci/open_source_check.yml create mode 100644 dbm-services/redis/db-tools/dbmon/.ci/templates/open_source_gate.yml create mode 100644 dbm-services/redis/db-tools/dbmon/.gitignore create mode 100644 dbm-services/redis/db-tools/dbmon/LICENSE create mode 100644 dbm-services/redis/db-tools/dbmon/Makefile create mode 100644 dbm-services/redis/db-tools/dbmon/README.md create mode 100644 dbm-services/redis/db-tools/dbmon/cmd/root.go create mode 100644 dbm-services/redis/db-tools/dbmon/config/config.go create mode 100644 dbm-services/redis/db-tools/dbmon/config/instconfig.go create mode 100644 dbm-services/redis/db-tools/dbmon/config/keystat.go create mode 100644 dbm-services/redis/db-tools/dbmon/dbmon-config.yaml create mode 100644 dbm-services/redis/db-tools/dbmon/embedfiles/embedfiles.go create mode 100644 dbm-services/redis/db-tools/dbmon/embedfiles/js/login.js create mode 100644 dbm-services/redis/db-tools/dbmon/go.mod create mode 100644 dbm-services/redis/db-tools/dbmon/go.sum create mode 100644 dbm-services/redis/db-tools/dbmon/imgs/bk-dbmon-structurer.png create mode 100644 dbm-services/redis/db-tools/dbmon/main.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/mymongo/mymongo.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/myredis/client.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/myredis/cluster_info.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes_test.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/myredis/myredis.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_cluster_setslotinfo.go create mode 100644 dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_infoRepl.go create mode 100644 dbm-services/redis/db-tools/dbmon/mylog/gin.go create mode 100644 dbm-services/redis/db-tools/dbmon/mylog/mylog.go create mode 100644 dbm-services/redis/db-tools/dbmon/package.sh create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/backupsys/backupsys.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/consts/consts.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/consts/data_dir.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/consts/event_categories.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/consts/mongo.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/customtime/customtime.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/httpapi/httpapi.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/kafka/crypto_base.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka_client.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/ctl.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/job.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_job.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/mongojob/check_service_job.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/mongojob/cmd.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/mongojob/mongojob.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/mongojob/msg.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/job.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/redisbinlogbackup.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/job.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/job.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redismonitor/base_task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redismonitor/job.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redismonitor/predixy_task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redis_task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redismonitor.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/redismonitor/twemproxy_task.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/report/clear_history.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/report/filereport.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/report/report.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/report/reporter.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/sendwarning/bkmonitorbeat.go create mode 100644 dbm-services/redis/db-tools/dbmon/pkg/sendwarning/sendwarning.go create mode 100755 dbm-services/redis/db-tools/dbmon/start.sh create mode 100755 dbm-services/redis/db-tools/dbmon/stop.sh create mode 100644 dbm-services/redis/db-tools/dbmon/util/cmd_builder.go create mode 100644 dbm-services/redis/db-tools/dbmon/util/compress.go create mode 100644 dbm-services/redis/db-tools/dbmon/util/osCmd.go create mode 100644 dbm-services/redis/db-tools/dbmon/util/reflect.go create mode 100644 dbm-services/redis/db-tools/dbmon/util/util.go create mode 100644 dbm-services/redis/db-tools/dbmon/util/version.go create mode 100644 dbm-services/redis/redis-dts/.gitignore create mode 100644 dbm-services/redis/redis-dts/Makefile create mode 100644 dbm-services/redis/redis-dts/README.md create mode 100644 dbm-services/redis/redis-dts/bin/config-template.yaml create mode 100644 dbm-services/redis/redis-dts/bin/redis-shake-template.conf create mode 100755 dbm-services/redis/redis-dts/bin/start.sh create mode 100755 dbm-services/redis/redis-dts/bin/stop.sh create mode 100644 dbm-services/redis/redis-dts/bin/tendisplus-sync-template.conf create mode 100755 dbm-services/redis/redis-dts/bin/tendisssd-sync-template.conf create mode 100644 dbm-services/redis/redis-dts/config/config.go create mode 100644 dbm-services/redis/redis-dts/go.mod create mode 100644 dbm-services/redis/redis-dts/go.sum create mode 100644 "dbm-services/redis/redis-dts/images/redis-dts\346\236\266\346\236\204\345\233\276.png" create mode 100644 dbm-services/redis/redis-dts/main.go create mode 100644 dbm-services/redis/redis-dts/models/myredis/myredis.go create mode 100644 dbm-services/redis/redis-dts/models/myredis/tendisplus_infoRepl.go create mode 100644 dbm-services/redis/redis-dts/models/mysql/init.go create mode 100644 dbm-services/redis/redis-dts/models/mysql/mysql.go create mode 100644 dbm-services/redis/redis-dts/models/mysql/tendisdb/job.go create mode 100644 dbm-services/redis/redis-dts/models/mysql/tendisdb/task.go create mode 100644 dbm-services/redis/redis-dts/models/mysql/tendisdb/tendisdb.go create mode 100644 dbm-services/redis/redis-dts/pkg/constvar/constvar.go create mode 100644 dbm-services/redis/redis-dts/pkg/constvar/methods.go create mode 100644 dbm-services/redis/redis-dts/pkg/customtime/customtime.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsJob/base.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsJob/dtsJob.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsJob/redisCacheDtsJob.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsJob/tendisSSDDtsJob.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsJob/tendisplusDtsJob.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/dtsTask.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/factory/factory.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/init.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/makeCacheSync.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/rediscache.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/watchCacheSync.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/saveSyncSeq.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/makeSync.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/tendisplus.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/watchSync.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/backupFileFetch.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/cmdsImporter.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/makeSync.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisBackup.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisdump.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisssd.go create mode 100644 dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/watchOldSync.go create mode 100644 dbm-services/redis/redis-dts/pkg/osPerf/osPerf.go create mode 100644 dbm-services/redis/redis-dts/pkg/remoteOperation/abs.go create mode 100644 dbm-services/redis/redis-dts/pkg/remoteOperation/init.go create mode 100644 dbm-services/redis/redis-dts/pkg/remoteOperation/ssh.go create mode 100644 dbm-services/redis/redis-dts/pkg/scrdbclient/dtsRemote.go create mode 100644 dbm-services/redis/redis-dts/pkg/scrdbclient/fileService.go create mode 100644 dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiRequest.go create mode 100644 dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiSchema.go create mode 100644 dbm-services/redis/redis-dts/pkg/scrdbclient/scrdbclient.go create mode 100644 dbm-services/redis/redis-dts/tclog/tclog.go create mode 100644 dbm-services/redis/redis-dts/util/httpReqNew.go create mode 100644 dbm-services/redis/redis-dts/util/osCmd.go create mode 100644 dbm-services/redis/redis-dts/util/redis_util.go create mode 100644 dbm-services/redis/redis-dts/util/util.go create mode 100644 dbm-ui/.coveragerc create mode 100644 dbm-ui/.gitignore create mode 100644 dbm-ui/.pylintrc create mode 100644 dbm-ui/DBM_README.md create mode 100644 dbm-ui/Dockerfile create mode 100644 dbm-ui/backend/.flake8 create mode 100644 dbm-ui/backend/__init__.py create mode 100644 dbm-ui/backend/admin.py create mode 100644 dbm-ui/backend/asgi.py create mode 100644 dbm-ui/backend/bk_dataview/README.md create mode 100644 dbm-ui/backend/bk_dataview/__init__.py create mode 100644 dbm-ui/backend/bk_dataview/bkdbm.ini create mode 100644 dbm-ui/backend/bk_dataview/dashboards/.gitkeep create mode 100644 dbm-ui/backend/bk_dataview/dashboards/dbm.yaml create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/es.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/hdfs.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/influxdb.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/kafka.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/pulsar.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/tendbha.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/tendbsingle.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/tendiscache.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/tendisplus.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/json/tendisssd.json create mode 100644 dbm-ui/backend/bk_dataview/dashboards/readme.md create mode 100644 dbm-ui/backend/bk_dataview/datasources/.gitkeep create mode 100644 dbm-ui/backend/bk_dataview/datasources/__init__.py create mode 100644 dbm-ui/backend/bk_dataview/datasources/bk_monitor_datasource.yaml create mode 100644 dbm-ui/backend/bk_dataview/grafana/__init__.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/apps.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/authentication.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/backends/__init__.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/backends/api.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/backends/db.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/client.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/models.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/permissions.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/provisioning.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/router.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/settings.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/urls.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/utils.py create mode 100644 dbm-ui/backend/bk_dataview/grafana/views.py create mode 100644 dbm-ui/backend/bk_web/__init__.py create mode 100644 dbm-ui/backend/bk_web/constants.py create mode 100644 dbm-ui/backend/bk_web/handlers.py create mode 100644 dbm-ui/backend/bk_web/middleware.py create mode 100644 dbm-ui/backend/bk_web/models.py create mode 100644 dbm-ui/backend/bk_web/pagination.py create mode 100644 dbm-ui/backend/bk_web/renderers.py create mode 100644 dbm-ui/backend/bk_web/serializers.py create mode 100644 dbm-ui/backend/bk_web/swagger.py create mode 100644 dbm-ui/backend/bk_web/viewsets.py create mode 100644 dbm-ui/backend/components/__init__.py create mode 100644 dbm-ui/backend/components/base.py create mode 100644 dbm-ui/backend/components/bk.py create mode 100644 dbm-ui/backend/components/bklog/__init__.py create mode 100644 dbm-ui/backend/components/bklog/client.py create mode 100644 dbm-ui/backend/components/bkmonitorv3/__init__.py create mode 100644 dbm-ui/backend/components/bkmonitorv3/client.py create mode 100644 dbm-ui/backend/components/cc/__init__.py create mode 100644 dbm-ui/backend/components/cc/client.py create mode 100644 dbm-ui/backend/components/cmsi/__init__.py create mode 100644 dbm-ui/backend/components/cmsi/client.py create mode 100644 dbm-ui/backend/components/constants.py create mode 100644 dbm-ui/backend/components/db_name_service/__init__.py create mode 100644 dbm-ui/backend/components/db_name_service/client.py create mode 100644 dbm-ui/backend/components/db_remote_service/__init__.py create mode 100644 dbm-ui/backend/components/db_remote_service/client.py create mode 100644 dbm-ui/backend/components/dbconfig/__init__.py create mode 100644 dbm-ui/backend/components/dbconfig/client.py create mode 100644 dbm-ui/backend/components/dbconfig/constants.py create mode 100644 dbm-ui/backend/components/dbresource/__init__.py create mode 100644 dbm-ui/backend/components/dbresource/client.py create mode 100644 dbm-ui/backend/components/domains.py create mode 100644 dbm-ui/backend/components/exception.py create mode 100644 dbm-ui/backend/components/gcs_dns/__init__.py create mode 100644 dbm-ui/backend/components/gcs_dns/client.py create mode 100644 dbm-ui/backend/components/gse/__init__.py create mode 100644 dbm-ui/backend/components/gse/client.py create mode 100644 dbm-ui/backend/components/hadb/__init__.py create mode 100644 dbm-ui/backend/components/hadb/client.py create mode 100644 dbm-ui/backend/components/itsm/__init__.py create mode 100644 dbm-ui/backend/components/itsm/client.py create mode 100644 dbm-ui/backend/components/itsm/constants.py create mode 100644 dbm-ui/backend/components/job/__init__.py create mode 100644 dbm-ui/backend/components/job/client.py create mode 100644 dbm-ui/backend/components/mysql_backup/__init__.py create mode 100644 dbm-ui/backend/components/mysql_backup/client.py create mode 100644 dbm-ui/backend/components/mysql_partition/__init__.py create mode 100644 dbm-ui/backend/components/mysql_partition/client.py create mode 100644 dbm-ui/backend/components/mysql_priv_manager/__init__.py create mode 100644 dbm-ui/backend/components/mysql_priv_manager/client.py create mode 100644 dbm-ui/backend/components/proxy_api.py create mode 100644 dbm-ui/backend/components/sops/__init__.py create mode 100644 dbm-ui/backend/components/sops/client.py create mode 100644 dbm-ui/backend/components/sql_import/__init__.py create mode 100644 dbm-ui/backend/components/sql_import/client.py create mode 100644 dbm-ui/backend/components/usermanage/__init__.py create mode 100644 dbm-ui/backend/components/usermanage/client.py create mode 100644 dbm-ui/backend/components/utils/__init__.py create mode 100644 dbm-ui/backend/components/utils/handlers.py create mode 100644 dbm-ui/backend/components/utils/params.py create mode 100644 dbm-ui/backend/configuration/__init__.py create mode 100644 dbm-ui/backend/configuration/admin.py create mode 100644 dbm-ui/backend/configuration/apps.py create mode 100644 dbm-ui/backend/configuration/constants.py create mode 100644 dbm-ui/backend/configuration/migrations/0001_initial.py create mode 100644 dbm-ui/backend/configuration/migrations/__init__.py create mode 100644 dbm-ui/backend/configuration/mock_data.py create mode 100644 dbm-ui/backend/configuration/models/__init__.py create mode 100644 dbm-ui/backend/configuration/models/dba.py create mode 100644 dbm-ui/backend/configuration/models/ip_whitelist.py create mode 100644 dbm-ui/backend/configuration/models/password_policy.py create mode 100644 dbm-ui/backend/configuration/models/profile.py create mode 100644 dbm-ui/backend/configuration/models/system.py create mode 100644 dbm-ui/backend/configuration/serializers.py create mode 100644 dbm-ui/backend/configuration/urls.py create mode 100644 dbm-ui/backend/configuration/views/__init__.py create mode 100644 dbm-ui/backend/configuration/views/dba.py create mode 100644 dbm-ui/backend/configuration/views/ip_whitelist.py create mode 100644 dbm-ui/backend/configuration/views/password_policy.py create mode 100644 dbm-ui/backend/configuration/views/profile.py create mode 100644 dbm-ui/backend/configuration/views/system.py create mode 100644 dbm-ui/backend/constants.py create mode 100644 dbm-ui/backend/core/__init__.py create mode 100644 dbm-ui/backend/core/consts.py create mode 100644 dbm-ui/backend/core/encrypt/__init__.py create mode 100644 dbm-ui/backend/core/encrypt/aes.py create mode 100644 dbm-ui/backend/core/encrypt/apps.py create mode 100644 dbm-ui/backend/core/encrypt/constants.py create mode 100644 dbm-ui/backend/core/encrypt/exceptions.py create mode 100644 dbm-ui/backend/core/encrypt/handlers.py create mode 100644 dbm-ui/backend/core/encrypt/migrations/0001_initial.py create mode 100644 dbm-ui/backend/core/encrypt/migrations/0002_auto_20220830_1635.py create mode 100644 dbm-ui/backend/core/encrypt/migrations/0003_alter_rsakey_name.py create mode 100644 dbm-ui/backend/core/encrypt/migrations/0004_alter_rsakey_name.py create mode 100644 dbm-ui/backend/core/encrypt/migrations/__init__.py create mode 100644 dbm-ui/backend/core/encrypt/models.py create mode 100644 dbm-ui/backend/core/encrypt/rsa.py create mode 100644 dbm-ui/backend/core/encrypt/serializers.py create mode 100644 dbm-ui/backend/core/encrypt/views.py create mode 100644 dbm-ui/backend/core/exceptions.py create mode 100644 dbm-ui/backend/core/storages/__init__.py create mode 100644 dbm-ui/backend/core/storages/admin.py create mode 100644 dbm-ui/backend/core/storages/apps.py create mode 100644 dbm-ui/backend/core/storages/base.py create mode 100644 dbm-ui/backend/core/storages/constants.py create mode 100644 dbm-ui/backend/core/storages/exceptions.py create mode 100644 dbm-ui/backend/core/storages/file_source.py create mode 100644 dbm-ui/backend/core/storages/handlers.py create mode 100644 dbm-ui/backend/core/storages/migrations/0001_initial.py create mode 100644 dbm-ui/backend/core/storages/migrations/0002_alter_bkjobfilecredential_id.py create mode 100644 dbm-ui/backend/core/storages/migrations/__init__.py create mode 100644 dbm-ui/backend/core/storages/models.py create mode 100644 dbm-ui/backend/core/storages/serializers.py create mode 100644 dbm-ui/backend/core/storages/storage.py create mode 100644 dbm-ui/backend/core/storages/views.py create mode 100644 dbm-ui/backend/core/translation/__init__.py create mode 100644 dbm-ui/backend/core/translation/apps.py create mode 100644 dbm-ui/backend/core/translation/constants.py create mode 100644 dbm-ui/backend/core/translation/context.py create mode 100644 dbm-ui/backend/core/translation/exceptions.py create mode 100644 dbm-ui/backend/core/translation/language_finder.py create mode 100644 dbm-ui/backend/core/translation/management/__init__.py create mode 100644 dbm-ui/backend/core/translation/management/commands/__init__.py create mode 100644 dbm-ui/backend/core/translation/management/commands/language_finder.py create mode 100644 dbm-ui/backend/core/translation/management/commands/translate.py create mode 100644 dbm-ui/backend/core/translation/translate.py create mode 100644 dbm-ui/backend/core/urls.py create mode 100644 dbm-ui/backend/db_event/__init__.py create mode 100644 dbm-ui/backend/db_event/apps.py create mode 100644 dbm-ui/backend/db_event/constants.py create mode 100644 dbm-ui/backend/db_event/management/__init__.py create mode 100644 dbm-ui/backend/db_event/management/commands/__init__.py create mode 100644 dbm-ui/backend/db_event/management/commands/event_list.py create mode 100644 dbm-ui/backend/db_event/models.py create mode 100644 dbm-ui/backend/db_event/readme.md create mode 100644 dbm-ui/backend/db_event/serializers.py create mode 100644 dbm-ui/backend/db_event/urls.py create mode 100644 dbm-ui/backend/db_event/views/__init__.py create mode 100644 dbm-ui/backend/db_event/views/dbha.py create mode 100644 dbm-ui/backend/db_meta/__init__.py create mode 100644 dbm-ui/backend/db_meta/admin.py create mode 100644 dbm-ui/backend/db_meta/api/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/apis.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/base/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/base/graph.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/base/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/destroy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/disable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/enable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/scale_up.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/es/shrink.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/destroy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/disable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/enable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/replace.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/scale_up.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/hdfs/shrink.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/influxdb/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/influxdb/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/influxdb/destroy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/influxdb/disable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/influxdb/enable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/influxdb/replace.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/destroy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/disable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/enable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/replace.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/scale_up.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/kafka/shrink.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongocluster/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongocluster/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongocluster/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongocluster/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongorepset/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongorepset/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongorepset/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/mongorepset/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/cc_ops.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/create_cluster.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/create_instances.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/decommission.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/detail_cluster.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/other.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/precheck.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/scale_proxy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/nosqlcomm/scale_tendis.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/destroy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/disable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/enable.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/replace.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/scale_up.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/pulsar/shrink.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbcluster/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbcluster/create_cluster.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbcluster/create_slave_cluster.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbcluster/decommission.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbcluster/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/add_proxy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/create_cluster.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/decommission.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/others.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/status_flag.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/storage_tuple.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/switch_proxy.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/switch_slave.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbha/switch_storage.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbsingle/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbsingle/create_cluster.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbsingle/decommission.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbsingle/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendbsingle/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendiscache/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendiscache/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendispluscluster/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendispluscluster/create.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendispluscluster/detail.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendispluscluster/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendissingle/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendissingle/handler.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendissingle/single.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendisssd/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/cluster/tendisssd/handler.py create mode 100644 dbm-ui/backend/db_meta/api/common/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/common/common.py create mode 100644 dbm-ui/backend/db_meta/api/db_module/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/db_module/apis.py create mode 100644 dbm-ui/backend/db_meta/api/dbha/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/dbha/apis.py create mode 100644 dbm-ui/backend/db_meta/api/entry/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/entry/clb/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/entry/clb/apis.py create mode 100644 dbm-ui/backend/db_meta/api/entry/polaris/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/entry/polaris/apis.py create mode 100644 dbm-ui/backend/db_meta/api/fake/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/fake/fake_tendbha.py create mode 100644 dbm-ui/backend/db_meta/api/fake/fake_tendbsingle.py create mode 100644 dbm-ui/backend/db_meta/api/machine/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/machine/apis.py create mode 100644 dbm-ui/backend/db_meta/api/meta/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/meta/apis.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/biz_clusters.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/cluster_instances.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/instance_detail.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbcluster/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbcluster/biz_clusters.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbcluster/cluster_instances.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbcluster/instance_detail.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbha/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbha/biz_clusters.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbha/cluster_instances.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbha/instance_detail.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbsingle/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbsingle/biz_clusters.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbsingle/cluster_instances.py create mode 100644 dbm-ui/backend/db_meta/api/priv_manager/tendbsingle/instance_detail.py create mode 100644 dbm-ui/backend/db_meta/api/proxy_instance/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/proxy_instance/apis.py create mode 100644 dbm-ui/backend/db_meta/api/storage_instance/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/storage_instance/apis.py create mode 100644 dbm-ui/backend/db_meta/api/storage_instance_tuple/__init__.py create mode 100644 dbm-ui/backend/db_meta/api/storage_instance_tuple/apis.py create mode 100644 dbm-ui/backend/db_meta/apps.py create mode 100644 dbm-ui/backend/db_meta/doc/app.md create mode 100644 dbm-ui/backend/db_meta/doc/cluster.md create mode 100644 dbm-ui/backend/db_meta/doc/domain.md create mode 100644 dbm-ui/backend/db_meta/doc/index.md create mode 100644 dbm-ui/backend/db_meta/doc/machine.md create mode 100644 dbm-ui/backend/db_meta/doc/meta_cluster_type.md create mode 100644 dbm-ui/backend/db_meta/doc/meta_layer.md create mode 100644 dbm-ui/backend/db_meta/doc/meta_role.md create mode 100644 dbm-ui/backend/db_meta/doc/meta_type.md create mode 100644 dbm-ui/backend/db_meta/doc/proxyinstance.md create mode 100644 dbm-ui/backend/db_meta/doc/storageinstance.md create mode 100644 dbm-ui/backend/db_meta/doc/storageinstancetuple.md create mode 100644 dbm-ui/backend/db_meta/enums/__init__.py create mode 100644 dbm-ui/backend/db_meta/enums/access_layer.py create mode 100644 dbm-ui/backend/db_meta/enums/cluster_entry_role.py create mode 100644 dbm-ui/backend/db_meta/enums/cluster_entry_type.py create mode 100644 dbm-ui/backend/db_meta/enums/cluster_phase.py create mode 100644 dbm-ui/backend/db_meta/enums/cluster_status.py create mode 100644 dbm-ui/backend/db_meta/enums/cluster_type.py create mode 100644 dbm-ui/backend/db_meta/enums/comm.py create mode 100644 dbm-ui/backend/db_meta/enums/instance_inner_role.py create mode 100644 dbm-ui/backend/db_meta/enums/instance_phase.py create mode 100644 dbm-ui/backend/db_meta/enums/instance_role.py create mode 100644 dbm-ui/backend/db_meta/enums/instance_status.py create mode 100644 dbm-ui/backend/db_meta/enums/machine_type.py create mode 100644 dbm-ui/backend/db_meta/enums/type_maps.py create mode 100644 dbm-ui/backend/db_meta/exceptions.py create mode 100644 dbm-ui/backend/db_meta/flatten/__init__.py create mode 100644 dbm-ui/backend/db_meta/flatten/cities.py create mode 100644 dbm-ui/backend/db_meta/flatten/machine.py create mode 100644 dbm-ui/backend/db_meta/flatten/proxy_instance.py create mode 100644 dbm-ui/backend/db_meta/flatten/storage_instance.py create mode 100644 dbm-ui/backend/db_meta/flatten/tendis_cluster.py create mode 100644 dbm-ui/backend/db_meta/migrations/0001_initial.py create mode 100644 dbm-ui/backend/db_meta/migrations/0002_snapshotspec_spec_tendbclusterdeployplan.py create mode 100644 dbm-ui/backend/db_meta/migrations/0003_clusterentry_forward_to.py create mode 100644 dbm-ui/backend/db_meta/migrations/0004_auto_20230424_1920.py create mode 100644 dbm-ui/backend/db_meta/migrations/0005_auto_20230426_1043.py create mode 100644 dbm-ui/backend/db_meta/migrations/0006_alter_clusterentry_role.py create mode 100644 dbm-ui/backend/db_meta/migrations/0006_auto_20230506_1148.py create mode 100644 dbm-ui/backend/db_meta/migrations/0007_auto_20230510_1955.py create mode 100644 dbm-ui/backend/db_meta/migrations/0008_storageinstance_is_stand_by.py create mode 100644 dbm-ui/backend/db_meta/migrations/0009_auto_20230517_1047.py create mode 100644 dbm-ui/backend/db_meta/migrations/0010_merge_20230523_2028.py create mode 100644 dbm-ui/backend/db_meta/migrations/__init__.py create mode 100644 dbm-ui/backend/db_meta/models/__init__.py create mode 100644 dbm-ui/backend/db_meta/models/app.py create mode 100644 dbm-ui/backend/db_meta/models/city_map.py create mode 100644 dbm-ui/backend/db_meta/models/cluster.py create mode 100644 dbm-ui/backend/db_meta/models/cluster_entry.py create mode 100644 dbm-ui/backend/db_meta/models/cluster_monitor.py create mode 100644 dbm-ui/backend/db_meta/models/db_module.py create mode 100644 dbm-ui/backend/db_meta/models/group.py create mode 100644 dbm-ui/backend/db_meta/models/instance.py create mode 100644 dbm-ui/backend/db_meta/models/machine.py create mode 100644 dbm-ui/backend/db_meta/models/proxy_instance_ext.py create mode 100644 dbm-ui/backend/db_meta/models/spec.py create mode 100644 dbm-ui/backend/db_meta/models/storage_instance_ext.py create mode 100644 dbm-ui/backend/db_meta/models/storage_instance_tuple.py create mode 100644 dbm-ui/backend/db_meta/models/storage_set_dtl.py create mode 100644 dbm-ui/backend/db_meta/models/tag.py create mode 100644 dbm-ui/backend/db_meta/readme.md create mode 100644 dbm-ui/backend/db_meta/request_validator/__init__.py create mode 100644 dbm-ui/backend/db_meta/request_validator/atom.py create mode 100644 dbm-ui/backend/db_meta/request_validator/common.py create mode 100644 dbm-ui/backend/db_meta/request_validator/dbha.py create mode 100644 dbm-ui/backend/db_meta/request_validator/machine.py create mode 100644 dbm-ui/backend/db_meta/request_validator/proxy_instance.py create mode 100644 dbm-ui/backend/db_meta/request_validator/serializers.py create mode 100644 dbm-ui/backend/db_meta/request_validator/storage_instance.py create mode 100644 dbm-ui/backend/db_meta/request_validator/storage_instance_tuple.py create mode 100644 dbm-ui/backend/db_meta/tasks.py create mode 100644 dbm-ui/backend/db_meta/urls.py create mode 100644 dbm-ui/backend/db_meta/validators/__init__.py create mode 100644 dbm-ui/backend/db_meta/validators/impl.py create mode 100644 dbm-ui/backend/db_meta/views/__init__.py create mode 100644 dbm-ui/backend/db_meta/views/dbha/__init__.py create mode 100644 dbm-ui/backend/db_meta/views/dbha/views.py create mode 100644 dbm-ui/backend/db_meta/views/fake/__init__.py create mode 100644 dbm-ui/backend/db_meta/views/fake/views.py create mode 100644 dbm-ui/backend/db_meta/views/helper/__init__.py create mode 100644 dbm-ui/backend/db_meta/views/helper/views.py create mode 100644 dbm-ui/backend/db_meta/views/meta/__init__.py create mode 100644 dbm-ui/backend/db_meta/views/meta/views.py create mode 100644 dbm-ui/backend/db_meta/views/nosql/__init__.py create mode 100644 dbm-ui/backend/db_meta/views/nosql/views.py create mode 100644 dbm-ui/backend/db_meta/views/priv_manager/__init__.py create mode 100644 dbm-ui/backend/db_meta/views/priv_manager/views.py create mode 100644 dbm-ui/backend/db_monitor/__init__.py create mode 100644 dbm-ui/backend/db_monitor/apps.py create mode 100644 dbm-ui/backend/db_monitor/constants.py create mode 100644 dbm-ui/backend/db_monitor/management/__init__.py create mode 100644 dbm-ui/backend/db_monitor/management/commands/__init__.py create mode 100644 dbm-ui/backend/db_monitor/management/commands/export_template.py create mode 100644 dbm-ui/backend/db_monitor/management/commands/extract_alarm.py create mode 100644 dbm-ui/backend/db_monitor/management/commands/extract_collect.py create mode 100644 dbm-ui/backend/db_monitor/migrations/0001_initial.py create mode 100644 dbm-ui/backend/db_monitor/migrations/__init__.py create mode 100644 dbm-ui/backend/db_monitor/models.py create mode 100644 dbm-ui/backend/db_monitor/readme.md create mode 100644 dbm-ui/backend/db_monitor/serializers.py create mode 100644 dbm-ui/backend/db_monitor/tasks.py create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5668.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5669.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5670.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5671.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5673.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5674.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5675.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.es.5757.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.hdfs.5891.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.hdfs.5894.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.hdfs.5895.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.hdfs.5898.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.hdfs.5899.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.influxdb.5946.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.influxdb.5947.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.influxdb.5948.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.influxdb.5949.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.influxdb.5950.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.influxdb.5951.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.influxdb.5952.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.kafka.5676.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.kafka.5677.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.kafka.5678.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.kafka.5679.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.kafka.5685.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5614.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5621.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5623.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5624.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5625.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5626.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5627.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5629.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5630.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5703.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5704.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5758.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5762.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.5763.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73363.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73370.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73376.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73377.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73378.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73379.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73380.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73381.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73382.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73418.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73456.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73471.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73472.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73473.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73474.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73669.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.mysql.73670.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.pulsar.5680.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.pulsar.5681.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.pulsar.5682.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.pulsar.5683.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.5765.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.5779.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.5780.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.5781.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.5782.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73741.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73742.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73756.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73757.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73759.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73760.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73761.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/alarm/0.redis.73762.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.es.dbm_elasticsearch_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.hdfs.dbm_hdfs_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.influxdb.dbm_influxdb_bkpull.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.kafka.dbm_kafka_bkpull.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.kafka.dbm_kafka_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.mysql.dbm_mysqld_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.mysql.dbm_mysqlproxy_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.pulsar.dbm_pulsarbookkeeper_bkpull.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.pulsar.dbm_pulsarbroker_bkpull.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.pulsar.dbm_pulsarzookeeper_bkpull.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.redis.dbm_predixy_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.redis.dbm_redis_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/tpls/collect/0.redis.dbm_twemproxy_exporter.tpl64 create mode 100644 dbm-ui/backend/db_monitor/urls.py create mode 100644 dbm-ui/backend/db_monitor/views/__init__.py create mode 100644 dbm-ui/backend/db_monitor/views/grafana.py create mode 100644 dbm-ui/backend/db_package/__init__.py create mode 100644 dbm-ui/backend/db_package/admin.py create mode 100644 dbm-ui/backend/db_package/apps.py create mode 100644 dbm-ui/backend/db_package/constants.py create mode 100644 dbm-ui/backend/db_package/exceptions.py create mode 100644 dbm-ui/backend/db_package/filters.py create mode 100644 dbm-ui/backend/db_package/migrations/0001_initial.py create mode 100644 dbm-ui/backend/db_package/migrations/__init__.py create mode 100644 dbm-ui/backend/db_package/models.py create mode 100644 dbm-ui/backend/db_package/serializers.py create mode 100644 dbm-ui/backend/db_package/urls.py create mode 100644 dbm-ui/backend/db_package/views.py create mode 100644 dbm-ui/backend/db_proxy/__init__.py create mode 100644 dbm-ui/backend/db_proxy/admin.py create mode 100644 dbm-ui/backend/db_proxy/constants.py create mode 100644 dbm-ui/backend/db_proxy/exceptions.py create mode 100644 dbm-ui/backend/db_proxy/migrations/0001_initial.py create mode 100644 dbm-ui/backend/db_proxy/migrations/__init__.py create mode 100644 dbm-ui/backend/db_proxy/models.py create mode 100644 dbm-ui/backend/db_proxy/nginxconf_tpl.py create mode 100644 dbm-ui/backend/db_proxy/tasks.py create mode 100644 dbm-ui/backend/db_proxy/urls.py create mode 100644 dbm-ui/backend/db_proxy/views/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/bkrepo/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/bkrepo/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/bkrepo/urls.py create mode 100644 dbm-ui/backend/db_proxy/views/bkrepo/views.py create mode 100644 dbm-ui/backend/db_proxy/views/db_meta/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/db_meta/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/db_meta/views.py create mode 100644 dbm-ui/backend/db_proxy/views/db_remote_service/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/db_remote_service/views.py create mode 100644 dbm-ui/backend/db_proxy/views/dbconfig/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/dbconfig/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/dbconfig/views.py create mode 100644 dbm-ui/backend/db_proxy/views/gcs_dns/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/gcs_dns/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/gcs_dns/views.py create mode 100644 dbm-ui/backend/db_proxy/views/hadb/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/hadb/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/hadb/views.py create mode 100644 dbm-ui/backend/db_proxy/views/jobapi/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/jobapi/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/jobapi/views.py create mode 100644 dbm-ui/backend/db_proxy/views/mock_data.py create mode 100644 dbm-ui/backend/db_proxy/views/nameservice/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/nameservice/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/nameservice/views.py create mode 100644 dbm-ui/backend/db_proxy/views/redis_dts/__init__.py create mode 100644 dbm-ui/backend/db_proxy/views/redis_dts/serializers.py create mode 100644 dbm-ui/backend/db_proxy/views/redis_dts/views.py create mode 100644 dbm-ui/backend/db_proxy/views/serialiers.py create mode 100644 dbm-ui/backend/db_proxy/views/views.py create mode 100644 dbm-ui/backend/db_services/__init__.py create mode 100644 dbm-ui/backend/db_services/bigdata/__init__.py create mode 100644 dbm-ui/backend/db_services/bigdata/es/__init__.py create mode 100644 dbm-ui/backend/db_services/bigdata/es/constants.py create mode 100644 dbm-ui/backend/db_services/bigdata/es/query.py create mode 100644 dbm-ui/backend/db_services/bigdata/es/urls.py create mode 100644 dbm-ui/backend/db_services/bigdata/es/views.py create mode 100644 dbm-ui/backend/db_services/bigdata/hdfs/__init_.py create mode 100644 dbm-ui/backend/db_services/bigdata/hdfs/constants.py create mode 100644 dbm-ui/backend/db_services/bigdata/hdfs/query.py create mode 100644 dbm-ui/backend/db_services/bigdata/hdfs/urls.py create mode 100644 dbm-ui/backend/db_services/bigdata/hdfs/views.py create mode 100644 dbm-ui/backend/db_services/bigdata/influxdb/__init__.py create mode 100644 dbm-ui/backend/db_services/bigdata/influxdb/constants.py create mode 100644 dbm-ui/backend/db_services/bigdata/influxdb/query.py create mode 100644 dbm-ui/backend/db_services/bigdata/influxdb/serializers.py create mode 100644 dbm-ui/backend/db_services/bigdata/influxdb/urls.py create mode 100644 dbm-ui/backend/db_services/bigdata/influxdb/views.py create mode 100644 dbm-ui/backend/db_services/bigdata/kafka/__init__.py create mode 100644 dbm-ui/backend/db_services/bigdata/kafka/constants.py create mode 100644 dbm-ui/backend/db_services/bigdata/kafka/query.py create mode 100644 dbm-ui/backend/db_services/bigdata/kafka/urls.py create mode 100644 dbm-ui/backend/db_services/bigdata/kafka/views.py create mode 100644 dbm-ui/backend/db_services/bigdata/pulsar/__init__.py create mode 100644 dbm-ui/backend/db_services/bigdata/pulsar/constants.py create mode 100644 dbm-ui/backend/db_services/bigdata/pulsar/query.py create mode 100644 dbm-ui/backend/db_services/bigdata/pulsar/urls.py create mode 100644 dbm-ui/backend/db_services/bigdata/pulsar/views.py create mode 100644 dbm-ui/backend/db_services/bigdata/resources/__init__.py create mode 100644 dbm-ui/backend/db_services/bigdata/resources/constants.py create mode 100644 dbm-ui/backend/db_services/bigdata/resources/query.py create mode 100644 dbm-ui/backend/db_services/bigdata/resources/urls.py create mode 100644 dbm-ui/backend/db_services/bigdata/resources/views.py create mode 100644 dbm-ui/backend/db_services/bigdata/resources/yasg_slz.py create mode 100644 dbm-ui/backend/db_services/bigdata/urls.py create mode 100644 dbm-ui/backend/db_services/cmdb/__init__.py create mode 100644 dbm-ui/backend/db_services/cmdb/biz.py create mode 100644 dbm-ui/backend/db_services/cmdb/exceptions.py create mode 100644 dbm-ui/backend/db_services/cmdb/serializers.py create mode 100644 dbm-ui/backend/db_services/cmdb/urls.py create mode 100644 dbm-ui/backend/db_services/cmdb/views.py create mode 100644 dbm-ui/backend/db_services/dbbase/__init__.py create mode 100644 dbm-ui/backend/db_services/dbbase/constants.py create mode 100644 dbm-ui/backend/db_services/dbbase/resources/__init__.py create mode 100644 dbm-ui/backend/db_services/dbbase/resources/constants.py create mode 100644 dbm-ui/backend/db_services/dbbase/resources/pagination.py create mode 100644 dbm-ui/backend/db_services/dbbase/resources/query.py create mode 100644 dbm-ui/backend/db_services/dbbase/resources/serializers.py create mode 100644 dbm-ui/backend/db_services/dbbase/resources/viewsets.py create mode 100644 dbm-ui/backend/db_services/dbbase/resources/yasg_slz.py create mode 100644 dbm-ui/backend/db_services/dbconfig/__init__.py create mode 100644 dbm-ui/backend/db_services/dbconfig/config_item.py create mode 100644 dbm-ui/backend/db_services/dbconfig/dataclass.py create mode 100644 dbm-ui/backend/db_services/dbconfig/exceptions.py create mode 100644 dbm-ui/backend/db_services/dbconfig/handlers.py create mode 100644 dbm-ui/backend/db_services/dbconfig/mock_data.py create mode 100644 dbm-ui/backend/db_services/dbconfig/serializers.py create mode 100644 dbm-ui/backend/db_services/dbconfig/urls.py create mode 100644 dbm-ui/backend/db_services/dbconfig/views.py create mode 100644 dbm-ui/backend/db_services/dbresource/__init__.py create mode 100644 dbm-ui/backend/db_services/dbresource/constants.py create mode 100644 dbm-ui/backend/db_services/dbresource/exceptions.py create mode 100644 dbm-ui/backend/db_services/dbresource/filters.py create mode 100644 dbm-ui/backend/db_services/dbresource/mock.py create mode 100644 dbm-ui/backend/db_services/dbresource/serializers.py create mode 100644 dbm-ui/backend/db_services/dbresource/urls.py create mode 100644 dbm-ui/backend/db_services/dbresource/views/deploy_plan.py create mode 100644 dbm-ui/backend/db_services/dbresource/views/resource.py create mode 100644 dbm-ui/backend/db_services/dbresource/views/sepc.py create mode 100644 dbm-ui/backend/db_services/group/__init__.py create mode 100644 dbm-ui/backend/db_services/group/handlers.py create mode 100644 dbm-ui/backend/db_services/group/serializers.py create mode 100644 dbm-ui/backend/db_services/group/urls.py create mode 100644 dbm-ui/backend/db_services/group/views.py create mode 100644 dbm-ui/backend/db_services/infras/__init__.py create mode 100644 dbm-ui/backend/db_services/infras/constants.py create mode 100644 dbm-ui/backend/db_services/infras/host.py create mode 100644 dbm-ui/backend/db_services/infras/serializers.py create mode 100644 dbm-ui/backend/db_services/infras/urls.py create mode 100644 dbm-ui/backend/db_services/infras/views.py create mode 100644 dbm-ui/backend/db_services/ipchooser/__init__.py create mode 100644 dbm-ui/backend/db_services/ipchooser/apps.py create mode 100644 dbm-ui/backend/db_services/ipchooser/constants.py create mode 100644 dbm-ui/backend/db_services/ipchooser/exceptions.py create mode 100644 dbm-ui/backend/db_services/ipchooser/handlers/__init__.py create mode 100644 dbm-ui/backend/db_services/ipchooser/handlers/base.py create mode 100644 dbm-ui/backend/db_services/ipchooser/handlers/host_handler.py create mode 100644 dbm-ui/backend/db_services/ipchooser/handlers/topo_handler.py create mode 100644 dbm-ui/backend/db_services/ipchooser/migrations/__init__.py create mode 100644 dbm-ui/backend/db_services/ipchooser/mock_data.py create mode 100644 dbm-ui/backend/db_services/ipchooser/models.py create mode 100644 dbm-ui/backend/db_services/ipchooser/query/__init__.py create mode 100644 dbm-ui/backend/db_services/ipchooser/query/resource.py create mode 100644 dbm-ui/backend/db_services/ipchooser/serializers/__init__.py create mode 100644 dbm-ui/backend/db_services/ipchooser/serializers/base.py create mode 100644 dbm-ui/backend/db_services/ipchooser/serializers/host_sers.py create mode 100644 dbm-ui/backend/db_services/ipchooser/serializers/topo_sers.py create mode 100644 dbm-ui/backend/db_services/ipchooser/tasks.py create mode 100644 dbm-ui/backend/db_services/ipchooser/tools/__init__.py create mode 100644 dbm-ui/backend/db_services/ipchooser/tools/topo_tool.py create mode 100644 dbm-ui/backend/db_services/ipchooser/types.py create mode 100644 dbm-ui/backend/db_services/ipchooser/urls.py create mode 100644 dbm-ui/backend/db_services/ipchooser/views.py create mode 100644 dbm-ui/backend/db_services/mysql/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/admin.py create mode 100644 dbm-ui/backend/db_services/mysql/apps.py create mode 100644 dbm-ui/backend/db_services/mysql/cluster/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/cluster/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/cluster/mock_data.py create mode 100644 dbm-ui/backend/db_services/mysql/cluster/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/cluster/urls.py create mode 100644 dbm-ui/backend/db_services/mysql/cluster/views.py create mode 100644 dbm-ui/backend/db_services/mysql/constants.py create mode 100644 dbm-ui/backend/db_services/mysql/dataclass.py create mode 100644 dbm-ui/backend/db_services/mysql/excel_files/authorize_err_tpl.xlsx create mode 100644 dbm-ui/backend/db_services/mysql/excel_files/authorize_tpl.xlsx create mode 100644 dbm-ui/backend/db_services/mysql/fixpoint_rollback/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/fixpoint_rollback/constants.py create mode 100644 dbm-ui/backend/db_services/mysql/fixpoint_rollback/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/fixpoint_rollback/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/fixpoint_rollback/urls.py create mode 100644 dbm-ui/backend/db_services/mysql/fixpoint_rollback/views.py create mode 100644 dbm-ui/backend/db_services/mysql/instance/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/instance/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/instance/mock_data.py create mode 100644 dbm-ui/backend/db_services/mysql/instance/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/instance/urls.py create mode 100644 dbm-ui/backend/db_services/mysql/instance/views.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/apps.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/dataclass.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/migrations/0001_initial.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/migrations/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/mock_data.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/models.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/authorize/views.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/apps.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/dataclass.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/migrations/0001_initial.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/migrations/0002_mysqlpermissionclonerecord_bk_cloud_id.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/migrations/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/mock_data.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/models.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/clone/views.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/constants.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/db_account/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/db_account/dataclass.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/db_account/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/db_account/mock_data.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/db_account/policy.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/db_account/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/db_account/views.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/exceptions.py create mode 100644 dbm-ui/backend/db_services/mysql/permission/urls.py create mode 100644 dbm-ui/backend/db_services/mysql/remote_service/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/remote_service/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/remote_service/mock_data.py create mode 100644 dbm-ui/backend/db_services/mysql/remote_service/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/remote_service/urls.py create mode 100644 dbm-ui/backend/db_services/mysql/remote_service/views.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/constants.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbha/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbha/query.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbha/views.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbha/yasg_slz.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbsingle/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbsingle/query.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbsingle/views.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/tendbsingle/yasg_slz.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/urls.py create mode 100644 dbm-ui/backend/db_services/mysql/resources/views.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/__init__.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/constants.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/dataclass.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/handlers.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/mock_data.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/serializers.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/urls.py create mode 100644 dbm-ui/backend/db_services/mysql/sql_import/views.py create mode 100644 dbm-ui/backend/db_services/mysql/urls.py create mode 100644 dbm-ui/backend/db_services/partition/__init__.py create mode 100644 dbm-ui/backend/db_services/partition/constants.py create mode 100644 dbm-ui/backend/db_services/partition/exceptions.py create mode 100644 dbm-ui/backend/db_services/partition/handlers.py create mode 100644 dbm-ui/backend/db_services/partition/serializers.py create mode 100644 dbm-ui/backend/db_services/partition/urls.py create mode 100644 dbm-ui/backend/db_services/partition/views.py create mode 100644 dbm-ui/backend/db_services/plugin/__init__.py create mode 100644 dbm-ui/backend/db_services/plugin/nameservice/__init__.py create mode 100644 dbm-ui/backend/db_services/plugin/nameservice/clb.py create mode 100644 dbm-ui/backend/db_services/plugin/nameservice/polaris.py create mode 100644 dbm-ui/backend/db_services/redis/__init__.py create mode 100644 dbm-ui/backend/db_services/redis/apps.py create mode 100644 dbm-ui/backend/db_services/redis/constants.py create mode 100644 dbm-ui/backend/db_services/redis/resources/__init__.py create mode 100644 dbm-ui/backend/db_services/redis/resources/constants.py create mode 100644 dbm-ui/backend/db_services/redis/resources/redis_cluster/__init__.py create mode 100644 dbm-ui/backend/db_services/redis/resources/redis_cluster/query.py create mode 100644 dbm-ui/backend/db_services/redis/resources/redis_cluster/serializers.py create mode 100644 dbm-ui/backend/db_services/redis/resources/redis_cluster/views.py create mode 100644 dbm-ui/backend/db_services/redis/resources/redis_cluster/yasg_slz.py create mode 100644 dbm-ui/backend/db_services/redis/resources/urls.py create mode 100644 dbm-ui/backend/db_services/redis/resources/views.py create mode 100644 dbm-ui/backend/db_services/redis/resources/yasg_slz.py create mode 100644 dbm-ui/backend/db_services/redis/urls.py create mode 100644 dbm-ui/backend/db_services/taskflow/__init__.py create mode 100644 dbm-ui/backend/db_services/taskflow/constants.py create mode 100644 dbm-ui/backend/db_services/taskflow/exceptions.py create mode 100644 dbm-ui/backend/db_services/taskflow/handlers.py create mode 100644 dbm-ui/backend/db_services/taskflow/serializers.py create mode 100644 dbm-ui/backend/db_services/taskflow/task.py create mode 100644 dbm-ui/backend/db_services/taskflow/urls.py create mode 100644 dbm-ui/backend/db_services/taskflow/views/__init__.py create mode 100644 dbm-ui/backend/db_services/taskflow/views/flow.py create mode 100644 dbm-ui/backend/db_services/taskflow/views/redis.py create mode 100644 dbm-ui/backend/db_services/user/__init__.py create mode 100644 dbm-ui/backend/db_services/user/serializers.py create mode 100644 dbm-ui/backend/db_services/user/urls.py create mode 100644 dbm-ui/backend/db_services/user/views.py create mode 100644 dbm-ui/backend/db_services/version/__init__.py create mode 100644 dbm-ui/backend/db_services/version/constants.py create mode 100644 dbm-ui/backend/db_services/version/serializers.py create mode 100644 dbm-ui/backend/db_services/version/urls.py create mode 100644 dbm-ui/backend/db_services/version/views.py create mode 100644 dbm-ui/backend/dbm_init/__init__.py create mode 100644 dbm-ui/backend/dbm_init/apps.py create mode 100644 dbm-ui/backend/dbm_init/constants.py create mode 100644 dbm-ui/backend/dbm_init/json_files/bklog/dbm_dbactuator.json create mode 100644 dbm-ui/backend/dbm_init/json_files/bklog/dbm_redis_record.json create mode 100644 dbm-ui/backend/dbm_init/json_files/bklog/mysql_backup_result.json create mode 100644 dbm-ui/backend/dbm_init/json_files/bklog/mysql_binlog_result.json create mode 100644 dbm-ui/backend/dbm_init/json_files/bklog/mysql_checksum_result.json create mode 100644 dbm-ui/backend/dbm_init/json_files/bklog/mysql_slowlog.json create mode 100644 dbm-ui/backend/dbm_init/json_files/format.py create mode 100644 dbm-ui/backend/dbm_init/json_files/itsm/itsm_dbm.json create mode 100644 dbm-ui/backend/dbm_init/management/__init__.py create mode 100644 dbm-ui/backend/dbm_init/management/commands/__init__.py create mode 100644 dbm-ui/backend/dbm_init/management/commands/download_bkrepo.py create mode 100644 dbm-ui/backend/dbm_init/management/commands/services_init.py create mode 100644 dbm-ui/backend/dbm_init/management/commands/sync_from_bkrepo.py create mode 100644 dbm-ui/backend/dbm_init/management/commands/upload_bkrepo.py create mode 100644 dbm-ui/backend/dbm_init/migrations/__init__.py create mode 100644 dbm-ui/backend/dbm_init/readme.md create mode 100644 dbm-ui/backend/dbm_init/services.py create mode 100644 dbm-ui/backend/env/__init__.py create mode 100644 dbm-ui/backend/env/apigw_domains.py create mode 100644 dbm-ui/backend/env/bkrepo.py create mode 100644 dbm-ui/backend/exceptions.py create mode 100644 dbm-ui/backend/flow/README.md create mode 100644 dbm-ui/backend/flow/__init__.py create mode 100644 dbm-ui/backend/flow/admin.py create mode 100644 dbm-ui/backend/flow/apps.py create mode 100644 dbm-ui/backend/flow/consts.py create mode 100644 dbm-ui/backend/flow/docs/README.md create mode 100644 dbm-ui/backend/flow/docs/bk-dbm-single.png create mode 100644 dbm-ui/backend/flow/docs/bk-dbm-tendbha.png create mode 100644 dbm-ui/backend/flow/docs/flow_tree.json create mode 100644 dbm-ui/backend/flow/docs/redis_backup.png create mode 100644 dbm-ui/backend/flow/docs/redis_cluster_open_close.png create mode 100644 dbm-ui/backend/flow/docs/redis_cluster_shutdown.png create mode 100644 dbm-ui/backend/flow/docs/redis_flush_data.png create mode 100644 dbm-ui/backend/flow/docs/redis_key_delete.png create mode 100644 dbm-ui/backend/flow/docs/tendis_cache_cluster_apply.png create mode 100644 dbm-ui/backend/flow/engine/__init__.py create mode 100644 dbm-ui/backend/flow/engine/abstract.py create mode 100644 dbm-ui/backend/flow/engine/airflow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/builder.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/engine.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/cloud/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/cloud/base_service_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/cloud/dbha_service_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/cloud/dns_service_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/cloud/drs_service_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/cloud/nginx_service_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/common/builder.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/common/get_real_version.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/common/machine_os_init.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_destroy_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_disable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_enable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_reboot_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_replace_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_scale_up_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/es/es_shrink_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/exceptions.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_destroy_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_disable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_enable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_reboot_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_replace_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_scale_up_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/hdfs/hdfs_shrink_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/influxdb/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/influxdb/influxdb_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/influxdb/influxdb_destroy_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/influxdb/influxdb_disable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/influxdb/influxdb_enable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/influxdb/influxdb_reboot_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/influxdb/influxdb_replace_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_destroy_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_disable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_enable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_reboot_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_replace_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_scale_up_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/kafka/kafka_shrink_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/common/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/common/common_sub_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/import_sqlfile_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/install_mysql_ha.md create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/install_mysql_single.md create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_authorize_rules.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_checksum.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_clone_rules.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_edit_config_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_fake_sql_semantic_check.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_flashback_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_db_table_backup.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_destroy_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_disable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_enable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_ha_full_backup_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_master_fail_over.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_master_slave_switch.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_add.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_cluster_switch.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_rename_database_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_rollback_data_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_single_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_single_destroy_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_single_disable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_single_enable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_truncate_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/pt_table_sync.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/name_service/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/name_service/name_service.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/exceptions.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_base_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_destroy_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_disable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_enable_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_reboot_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_replace_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_scale_up_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_shrink_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/pulsar/pulsar_sub_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/atom_jobs/__init__.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/atom_jobs/redis_dbmon.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/atom_jobs/redis_install.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/atom_jobs/redis_makesync.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/atom_jobs/redis_repair.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/atom_jobs/redis_shutdown.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/atom_jobs/redis_switch.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_cluster_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_cluster_backup.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_cluster_dts.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_cluster_open_close.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_cluster_scene_master.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_cluster_scene_slave.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_cluster_shutdown.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_dbmon.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_flush_data.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_keys_delete.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_keys_extract.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/redis_proxy_scale.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/singele_redis_shutdown.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/single_proxy_shutdown.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/redis/tendis_plus_apply_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/import_sqlfile_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_add_tmp_node.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_checksum.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_deploy.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_destroy.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_disable_deploy.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_enable_deploy.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_cluster_truncate_database.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_rename_database_flow.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_slave_cluster_deploy.py create mode 100644 dbm-ui/backend/flow/engine/codes.py create mode 100644 dbm-ui/backend/flow/engine/consts.py create mode 100644 dbm-ui/backend/flow/engine/controller/__init__.py create mode 100644 dbm-ui/backend/flow/engine/controller/base.py create mode 100644 dbm-ui/backend/flow/engine/controller/cloud.py create mode 100644 dbm-ui/backend/flow/engine/controller/es.py create mode 100644 dbm-ui/backend/flow/engine/controller/hdfs.py create mode 100644 dbm-ui/backend/flow/engine/controller/influxdb.py create mode 100644 dbm-ui/backend/flow/engine/controller/kafka.py create mode 100644 dbm-ui/backend/flow/engine/controller/mysql.py create mode 100644 dbm-ui/backend/flow/engine/controller/name_service.py create mode 100644 dbm-ui/backend/flow/engine/controller/pulsar.py create mode 100644 dbm-ui/backend/flow/engine/controller/redis.py create mode 100644 dbm-ui/backend/flow/engine/controller/spider.py create mode 100644 dbm-ui/backend/flow/engine/exceptions.py create mode 100644 dbm-ui/backend/flow/engine/logger/__init__.py create mode 100644 dbm-ui/backend/flow/engine/logger/jsonfmt.py create mode 100644 dbm-ui/backend/flow/migrations/0001_initial.py create mode 100644 dbm-ui/backend/flow/migrations/__init__.py create mode 100644 dbm-ui/backend/flow/models.py create mode 100644 dbm-ui/backend/flow/plugins/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/apps.py create mode 100644 dbm-ui/backend/flow/plugins/components/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/cloud/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/cloud/exec_service_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/cloud/push_config_file.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/cloud/service_proxy.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/cloud/trans_files.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/base_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/bigdata_manager_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/cc_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/create_ticket.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/delete_cc_service_instance.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/external_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/pause.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/sa_idle_check.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/sa_init.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/common/sleep_timer_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/es_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/es_dns_manage.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/exec_es_actuator_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/get_es_payload.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/get_es_resource.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/rewrite_es_config.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/es/trans_files.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/check_cluster_status.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/exec_actuator_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/get_hdfs_payload.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/get_hdfs_resource.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/hdfs_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/hdfs_dns_manage.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/rewrite_hdfs_config.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/hdfs/update_hdfs_resource.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/influxdb/exec_actuator_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/influxdb/influxdb_config.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/influxdb/influxdb_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/influxdb/influxdb_replace_config.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/influxdb/trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/kafka/dns_manage.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/kafka/exec_actuator_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/kafka/get_kafka_resource.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/kafka/kafka_config.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/kafka/kafka_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/kafka/trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/add_user_for_cluster_switch.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/authorize_rules.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/build_database_table_filter_regex.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/clear_machine.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/clone_rules.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/clone_user.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/create_user.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/dns_manage.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/drop_user.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/exec_actuator_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/fake_semantic_check.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/filter_database_table_from_regex.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/general_check_db_in_using.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_checksum_report.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_download_backupfile.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_ha_db_table_backup_response.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_master_slave_relationship_check.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_os_init.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_partition_check.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/pt_table_sync.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/rename_database_confirm_empty_from.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/rename_database_drop_from.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/rename_database_prepare_param.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/rollback_local_trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/rollback_trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/semantic_check.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/slave_trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/truncate_data_create_stage_database.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/truncate_data_drop_stage_database.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/truncate_data_generate_stage_database_name.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/truncate_data_recreate_table.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/truncate_data_rename_table.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/upload_file.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/name_service/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/name_service/name_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/blank_schedule_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/exec_actuator_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/get_pulsar_payload.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/get_pulsar_resource.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/pulsar_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/pulsar_dns_manage.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/pulsar_zk_dns_manage.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/rewrite_pulsar_config.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/pulsar/trans_files.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/EmptyAct.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/__init__.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/dns_manage.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/exec_actuator_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/exec_shell_script.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/get_redis_payload.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/get_redis_resource.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/redis_config.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/redis_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/redis_dts.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/redis/trans_flies.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/add_system_user_in_cluster.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/check_cluster_table_using_sub.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/clear_database_on_remote_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/create_database_like_via_ctl.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/drop_spider_table_via_ctl.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/spider_db_meta.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/truncate_database_drop_stage_db_via_ctl.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/truncate_database_old_new_map_adapter_service.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/truncate_database_on_spider_via_ctl.py create mode 100644 dbm-ui/backend/flow/signal/__init__.py create mode 100644 dbm-ui/backend/flow/signal/handlers.py create mode 100644 dbm-ui/backend/flow/tests.py create mode 100644 dbm-ui/backend/flow/urls.py create mode 100644 dbm-ui/backend/flow/utils/__init__.py create mode 100644 dbm-ui/backend/flow/utils/cc_manage.py create mode 100644 dbm-ui/backend/flow/utils/cloud/__init__.py create mode 100644 dbm-ui/backend/flow/utils/cloud/cloud_act_payload.py create mode 100644 dbm-ui/backend/flow/utils/cloud/cloud_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/cloud/cloud_db_proxy.py create mode 100644 dbm-ui/backend/flow/utils/cloud/cloud_module_operate.py create mode 100644 dbm-ui/backend/flow/utils/cloud/script_template/__init__.py create mode 100644 dbm-ui/backend/flow/utils/cloud/script_template/dbha_template.py create mode 100644 dbm-ui/backend/flow/utils/cloud/script_template/dns_template.py create mode 100644 dbm-ui/backend/flow/utils/cloud/script_template/drs_template.py create mode 100644 dbm-ui/backend/flow/utils/cloud/script_template/nginx_template.py create mode 100644 dbm-ui/backend/flow/utils/dict_to_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/dns_manage.py create mode 100644 dbm-ui/backend/flow/utils/es/__init__.py create mode 100644 dbm-ui/backend/flow/utils/es/es_act_payload.py create mode 100644 dbm-ui/backend/flow/utils/es/es_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/es/es_db_meta.py create mode 100644 dbm-ui/backend/flow/utils/es/es_module_operate.py create mode 100644 dbm-ui/backend/flow/utils/es/es_script_template.py create mode 100644 dbm-ui/backend/flow/utils/extension_manage.py create mode 100644 dbm-ui/backend/flow/utils/filter_alias_ip.py create mode 100644 dbm-ui/backend/flow/utils/hdfs/bk_module_operate.py create mode 100644 dbm-ui/backend/flow/utils/hdfs/consts.py create mode 100644 dbm-ui/backend/flow/utils/hdfs/hdfs_act_playload.py create mode 100644 dbm-ui/backend/flow/utils/hdfs/hdfs_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/hdfs/hdfs_db_meta.py create mode 100644 dbm-ui/backend/flow/utils/hdfs/hdfs_script_template.py create mode 100644 dbm-ui/backend/flow/utils/influxdb/bk_module_operate.py create mode 100644 dbm-ui/backend/flow/utils/influxdb/influxdb_act_playload.py create mode 100644 dbm-ui/backend/flow/utils/influxdb/influxdb_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/influxdb/influxdb_db_meta.py create mode 100644 dbm-ui/backend/flow/utils/influxdb/script_template.py create mode 100644 dbm-ui/backend/flow/utils/kafka/bk_module_operate.py create mode 100644 dbm-ui/backend/flow/utils/kafka/kafka_act_playload.py create mode 100644 dbm-ui/backend/flow/utils/kafka/kafka_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/kafka/kafka_db_meta.py create mode 100644 dbm-ui/backend/flow/utils/kafka/script_template.py create mode 100644 dbm-ui/backend/flow/utils/mysql/bk_module_operate.py create mode 100644 dbm-ui/backend/flow/utils/mysql/common/compare_time.py create mode 100644 dbm-ui/backend/flow/utils/mysql/common/mysql_cluster_info.py create mode 100644 dbm-ui/backend/flow/utils/mysql/db_resource.py create mode 100644 dbm-ui/backend/flow/utils/mysql/db_table_filter/__init__.py create mode 100644 dbm-ui/backend/flow/utils/mysql/db_table_filter/exception.py create mode 100644 dbm-ui/backend/flow/utils/mysql/db_table_filter/filter.py create mode 100644 dbm-ui/backend/flow/utils/mysql/db_table_filter/tools.py create mode 100644 dbm-ui/backend/flow/utils/mysql/mysql_act_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/mysql/mysql_act_dataclass_validator.py create mode 100644 dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py create mode 100644 dbm-ui/backend/flow/utils/mysql/mysql_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py create mode 100644 dbm-ui/backend/flow/utils/pulsar/consts.py create mode 100644 dbm-ui/backend/flow/utils/pulsar/pulsar_act_payload.py create mode 100644 dbm-ui/backend/flow/utils/pulsar/pulsar_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/pulsar/pulsar_db_meta.py create mode 100644 dbm-ui/backend/flow/utils/pulsar/pulsar_module_operate.py create mode 100644 dbm-ui/backend/flow/utils/pulsar/pulsar_script_template.py create mode 100644 dbm-ui/backend/flow/utils/redis/db_resource.py create mode 100644 dbm-ui/backend/flow/utils/redis/redis_act_playload.py create mode 100644 dbm-ui/backend/flow/utils/redis/redis_cluster_nodes.py create mode 100644 dbm-ui/backend/flow/utils/redis/redis_context_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/redis/redis_db_meta.py create mode 100644 dbm-ui/backend/flow/utils/redis/redis_proxy_util.py create mode 100644 dbm-ui/backend/flow/utils/redis/redis_script_template.py create mode 100644 dbm-ui/backend/flow/utils/redis/redis_util.py create mode 100644 dbm-ui/backend/flow/utils/script_template.py create mode 100644 dbm-ui/backend/flow/utils/spider/spider_act_dataclass.py create mode 100644 dbm-ui/backend/flow/utils/spider/spider_bk_config.py create mode 100644 dbm-ui/backend/flow/utils/spider/spider_db_meta.py create mode 100644 dbm-ui/backend/flow/views/__init__.py create mode 100644 dbm-ui/backend/flow/views/base.py create mode 100644 dbm-ui/backend/flow/views/cloud_dbha_apply.py create mode 100644 dbm-ui/backend/flow/views/cloud_dns_bind_apply.py create mode 100644 dbm-ui/backend/flow/views/cloud_drs_apply.py create mode 100644 dbm-ui/backend/flow/views/cloud_nginx_apply.py create mode 100644 dbm-ui/backend/flow/views/es_apply.py create mode 100644 dbm-ui/backend/flow/views/es_destroy.py create mode 100644 dbm-ui/backend/flow/views/es_disable.py create mode 100644 dbm-ui/backend/flow/views/es_enable.py create mode 100644 dbm-ui/backend/flow/views/es_reboot.py create mode 100644 dbm-ui/backend/flow/views/es_replace.py create mode 100644 dbm-ui/backend/flow/views/es_scale_up.py create mode 100644 dbm-ui/backend/flow/views/es_shrink.py create mode 100644 dbm-ui/backend/flow/views/hdfs_apply.py create mode 100644 dbm-ui/backend/flow/views/hdfs_destroy.py create mode 100644 dbm-ui/backend/flow/views/hdfs_disable.py create mode 100644 dbm-ui/backend/flow/views/hdfs_enable.py create mode 100644 dbm-ui/backend/flow/views/hdfs_reboot.py create mode 100644 dbm-ui/backend/flow/views/hdfs_replace.py create mode 100644 dbm-ui/backend/flow/views/hdfs_scale_up.py create mode 100644 dbm-ui/backend/flow/views/hdfs_shrink.py create mode 100644 dbm-ui/backend/flow/views/import_resource_init.py create mode 100644 dbm-ui/backend/flow/views/import_sqlfile.py create mode 100644 dbm-ui/backend/flow/views/influxdb_apply.py create mode 100644 dbm-ui/backend/flow/views/influxdb_destroy.py create mode 100644 dbm-ui/backend/flow/views/influxdb_disable.py create mode 100644 dbm-ui/backend/flow/views/influxdb_enable.py create mode 100644 dbm-ui/backend/flow/views/influxdb_reboot.py create mode 100644 dbm-ui/backend/flow/views/influxdb_replace.py create mode 100644 dbm-ui/backend/flow/views/kafka_apply.py create mode 100644 dbm-ui/backend/flow/views/kafka_destroy.py create mode 100644 dbm-ui/backend/flow/views/kafka_disable.py create mode 100644 dbm-ui/backend/flow/views/kafka_enable.py create mode 100644 dbm-ui/backend/flow/views/kafka_reboot.py create mode 100644 dbm-ui/backend/flow/views/kafka_replace.py create mode 100644 dbm-ui/backend/flow/views/kafka_scale_up.py create mode 100644 dbm-ui/backend/flow/views/kafka_shrink.py create mode 100644 dbm-ui/backend/flow/views/mysql_add_slave.py create mode 100644 dbm-ui/backend/flow/views/mysql_checksum.py create mode 100644 dbm-ui/backend/flow/views/mysql_edit_config.py create mode 100644 dbm-ui/backend/flow/views/mysql_flashback.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_apply.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_db_table_backup.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_destroy.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_full_backup.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_master_fail_over.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_rename_database.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_switch.py create mode 100644 dbm-ui/backend/flow/views/mysql_ha_truncate_data.py create mode 100644 dbm-ui/backend/flow/views/mysql_migrate_cluster.py create mode 100644 dbm-ui/backend/flow/views/mysql_partition.py create mode 100644 dbm-ui/backend/flow/views/mysql_proxy_add.py create mode 100644 dbm-ui/backend/flow/views/mysql_proxy_reduce.py create mode 100644 dbm-ui/backend/flow/views/mysql_proxy_switch.py create mode 100644 dbm-ui/backend/flow/views/mysql_pt_table_sync.py create mode 100644 dbm-ui/backend/flow/views/mysql_restore_local_slave.py create mode 100644 dbm-ui/backend/flow/views/mysql_restore_slave.py create mode 100644 dbm-ui/backend/flow/views/mysql_rollback_data.py create mode 100644 dbm-ui/backend/flow/views/mysql_single_apply.py create mode 100644 dbm-ui/backend/flow/views/mysql_single_destroy.py create mode 100644 dbm-ui/backend/flow/views/mysql_single_rename_database.py create mode 100644 dbm-ui/backend/flow/views/mysql_single_truncate_data.py create mode 100644 dbm-ui/backend/flow/views/name_service.py create mode 100644 dbm-ui/backend/flow/views/pulsar_apply.py create mode 100644 dbm-ui/backend/flow/views/pulsar_destroy.py create mode 100644 dbm-ui/backend/flow/views/pulsar_disable.py create mode 100644 dbm-ui/backend/flow/views/pulsar_enable.py create mode 100644 dbm-ui/backend/flow/views/pulsar_reboot.py create mode 100644 dbm-ui/backend/flow/views/pulsar_replace.py create mode 100644 dbm-ui/backend/flow/views/pulsar_scale_up.py create mode 100644 dbm-ui/backend/flow/views/pulsar_shrink.py create mode 100644 dbm-ui/backend/flow/views/redis_cluster.py create mode 100644 dbm-ui/backend/flow/views/redis_keys.py create mode 100644 dbm-ui/backend/flow/views/redis_scene.py create mode 100644 dbm-ui/backend/flow/views/rollback_pipeline.py create mode 100644 dbm-ui/backend/flow/views/spider_add_tmp_node.py create mode 100644 dbm-ui/backend/flow/views/spider_checksum.py create mode 100644 dbm-ui/backend/flow/views/spider_cluster_apply.py create mode 100644 dbm-ui/backend/flow/views/spider_cluster_destroy.py create mode 100644 dbm-ui/backend/flow/views/spider_cluster_rename_database.py create mode 100644 dbm-ui/backend/flow/views/spider_cluster_truncate_database.py create mode 100644 dbm-ui/backend/flow/views/spider_partition.py create mode 100644 dbm-ui/backend/flow/views/spider_semantic_check.py create mode 100644 dbm-ui/backend/flow/views/spider_slave_apply.py create mode 100644 dbm-ui/backend/flow/views/spider_sql_import.py create mode 100644 dbm-ui/backend/flow/views/sql_semantic_check.py create mode 100644 dbm-ui/backend/homepage/__init__.py create mode 100644 dbm-ui/backend/homepage/views.py create mode 100644 dbm-ui/backend/iam_app/__init__.py create mode 100644 dbm-ui/backend/iam_app/admin.py create mode 100644 dbm-ui/backend/iam_app/apps.py create mode 100644 dbm-ui/backend/iam_app/dataclass/__init__.py create mode 100644 dbm-ui/backend/iam_app/dataclass/actions.py create mode 100644 dbm-ui/backend/iam_app/dataclass/resources.py create mode 100644 dbm-ui/backend/iam_app/exceptions.py create mode 100644 dbm-ui/backend/iam_app/handlers/__init__.py create mode 100644 dbm-ui/backend/iam_app/handlers/drf_perm.py create mode 100644 dbm-ui/backend/iam_app/handlers/permission.py create mode 100644 dbm-ui/backend/iam_app/migration_json_files/initial.json create mode 100644 dbm-ui/backend/iam_app/migrations/0001_initial.py create mode 100644 dbm-ui/backend/iam_app/migrations/0002_bk-dbm_202303301547.py create mode 100644 dbm-ui/backend/iam_app/migrations/0003_bk-dbm_202304031530.py create mode 100644 dbm-ui/backend/iam_app/migrations/__init__.py create mode 100644 dbm-ui/backend/iam_app/serializers.py create mode 100644 dbm-ui/backend/iam_app/urls.py create mode 100644 dbm-ui/backend/iam_app/views/__init__.py create mode 100644 dbm-ui/backend/iam_app/views/iam_provider.py create mode 100644 dbm-ui/backend/iam_app/views/views.py create mode 100644 dbm-ui/backend/redis_dts/__init__.py create mode 100644 dbm-ui/backend/redis_dts/admin.py create mode 100644 dbm-ui/backend/redis_dts/apis.py create mode 100644 dbm-ui/backend/redis_dts/apps.py create mode 100644 dbm-ui/backend/redis_dts/constants.py create mode 100644 dbm-ui/backend/redis_dts/exceptions.py create mode 100644 dbm-ui/backend/redis_dts/migrations/0001_initial.py create mode 100644 dbm-ui/backend/redis_dts/migrations/__init__.py create mode 100644 dbm-ui/backend/redis_dts/models/__init__.py create mode 100644 dbm-ui/backend/redis_dts/models/tb_dts_distribute_lock.py create mode 100644 dbm-ui/backend/redis_dts/models/tb_dts_server_blacklist.py create mode 100644 dbm-ui/backend/redis_dts/models/tb_tendis_dts_job.py create mode 100644 dbm-ui/backend/redis_dts/models/tb_tendis_dts_task.py create mode 100644 dbm-ui/backend/redis_dts/serializers.py create mode 100644 dbm-ui/backend/redis_dts/urls.py create mode 100644 dbm-ui/backend/redis_dts/util.py create mode 100644 dbm-ui/backend/redis_dts/views.py create mode 100644 dbm-ui/backend/redis_dts/yasg_slz.py create mode 100644 dbm-ui/backend/tests/__init__.py create mode 100644 dbm-ui/backend/tests/configuration/__init__.py create mode 100644 dbm-ui/backend/tests/configuration/views/__init__.py create mode 100644 dbm-ui/backend/tests/configuration/views/conftest.py create mode 100644 dbm-ui/backend/tests/configuration/views/password_policy.py create mode 100644 dbm-ui/backend/tests/configuration/views/profile.py create mode 100644 dbm-ui/backend/tests/conftest.py create mode 100644 dbm-ui/backend/tests/constants.py create mode 100644 dbm-ui/backend/tests/db_meta/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/cluster/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/cluster/tendbha/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/cluster/tendbha/test_handler.py create mode 100644 dbm-ui/backend/tests/db_meta/api/cluster/tendbha/test_tendbha.py create mode 100644 dbm-ui/backend/tests/db_meta/api/db_module/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/db_module/test_apis.py create mode 100644 dbm-ui/backend/tests/db_meta/api/dbha/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/dbha/test_apis.py create mode 100644 dbm-ui/backend/tests/db_meta/api/entry/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/machine/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/machine/test_apis.py create mode 100644 dbm-ui/backend/tests/db_meta/api/proxy_instance/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/proxy_instance/test_apis.py create mode 100644 dbm-ui/backend/tests/db_meta/api/storage_instance/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/storage_instance/test_apis.py create mode 100644 dbm-ui/backend/tests/db_meta/api/storage_instance_tuple/__init__.py create mode 100644 dbm-ui/backend/tests/db_meta/api/storage_instance_tuple/test_apis.py create mode 100644 dbm-ui/backend/tests/db_services/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/cmdb/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/cmdb/test_api.py create mode 100644 dbm-ui/backend/tests/db_services/cmdb/test_biz.py create mode 100644 dbm-ui/backend/tests/db_services/dbconfig/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/dbconfig/test_api.py create mode 100644 dbm-ui/backend/tests/db_services/infras/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/infras/test_api.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/cluster/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/cluster/test_handler.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/conftest.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/instance/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/instance/test_handler.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/permission/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/permission/test_account_handler.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/permission/test_authorize_handler.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/permission/test_clone_handler.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/remote_service/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/remote_service/test_handlers.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/resources/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/resources/test_dbha.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/resources/test_dbsingle.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/resources/test_list_resource.py create mode 100644 dbm-ui/backend/tests/db_services/mysql/test_sql_inport_handler.py create mode 100644 dbm-ui/backend/tests/db_services/taskflow/__init__.py create mode 100644 dbm-ui/backend/tests/db_services/taskflow/test_api.py create mode 100644 dbm-ui/backend/tests/dbm_init/test_auto_create_services.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/__init__.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/base.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/permission/__init__.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/permission/test_authorize_rules.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/permission/test_clone_rules.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/test_exec_actuator_script.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/test_mysql_db_meta.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/test_mysql_dns_manage.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/test_trans_file.py create mode 100644 dbm-ui/backend/tests/flow/components/collections/mysql/utils.py create mode 100644 dbm-ui/backend/tests/mock_data/__init__.py create mode 100644 dbm-ui/backend/tests/mock_data/components/__init__.py create mode 100644 dbm-ui/backend/tests/mock_data/components/bamboo_engine.py create mode 100644 dbm-ui/backend/tests/mock_data/components/bklog.py create mode 100644 dbm-ui/backend/tests/mock_data/components/cc.py create mode 100644 dbm-ui/backend/tests/mock_data/components/db_remote_service.py create mode 100644 dbm-ui/backend/tests/mock_data/components/dbconfig.py create mode 100644 dbm-ui/backend/tests/mock_data/components/gcs_dns.py create mode 100644 dbm-ui/backend/tests/mock_data/components/gse.py create mode 100644 dbm-ui/backend/tests/mock_data/components/itsm.py create mode 100644 dbm-ui/backend/tests/mock_data/components/job.py create mode 100644 dbm-ui/backend/tests/mock_data/components/mysql_priv_manager.py create mode 100644 dbm-ui/backend/tests/mock_data/components/sql_import.py create mode 100644 dbm-ui/backend/tests/mock_data/components/storage.py create mode 100644 dbm-ui/backend/tests/mock_data/constant.py create mode 100644 dbm-ui/backend/tests/mock_data/db_services/__init__.py create mode 100644 dbm-ui/backend/tests/mock_data/db_services/mysql/permission/account.py create mode 100644 dbm-ui/backend/tests/mock_data/db_services/mysql/permission/authorize.py create mode 100644 dbm-ui/backend/tests/mock_data/db_services/mysql/permission/clone.py create mode 100644 dbm-ui/backend/tests/mock_data/db_services/taskflow.py create mode 100644 dbm-ui/backend/tests/mock_data/flow/components/collections/mysql.py create mode 100644 dbm-ui/backend/tests/mock_data/iam_app/__init__.py create mode 100644 dbm-ui/backend/tests/mock_data/iam_app/permission.py create mode 100644 dbm-ui/backend/tests/mock_data/ticket/__init__.py create mode 100644 dbm-ui/backend/tests/mock_data/ticket/ticket_flow.py create mode 100644 dbm-ui/backend/tests/mock_data/ticket/ticket_params_data.py create mode 100644 dbm-ui/backend/tests/mock_data/utils.py create mode 100644 dbm-ui/backend/tests/mysql/test_ticket.py create mode 100644 dbm-ui/backend/tests/ticket/__init__.py create mode 100644 dbm-ui/backend/tests/ticket/test_ticket_flow.py create mode 100644 "dbm-ui/backend/tests/\345\215\225\345\205\203\346\265\213\350\257\225\345\274\200\345\217\221\350\247\204\350\214\203.md" create mode 100644 dbm-ui/backend/ticket/__init__.py create mode 100644 dbm-ui/backend/ticket/admin.py create mode 100644 dbm-ui/backend/ticket/apps.py create mode 100644 dbm-ui/backend/ticket/builders/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/base.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dbha_add.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dbha_reduce.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dbha_reload.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dbha_replace.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dns_add.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dns_reduce.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dns_reload.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/dns_replace.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/drs_add.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/drs_reduce.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/drs_reload.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/drs_replace.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/nginx_reload.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/nginx_replace.py create mode 100644 dbm-ui/backend/ticket/builders/cloud/service_apply.py create mode 100644 dbm-ui/backend/ticket/builders/common/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/common/base.py create mode 100644 dbm-ui/backend/ticket/builders/common/bigdata.py create mode 100644 dbm-ui/backend/ticket/builders/common/constants.py create mode 100644 dbm-ui/backend/ticket/builders/es/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_apply.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_disable.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_enable.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_reboot.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_replace.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_scale_up.py create mode 100644 dbm-ui/backend/ticket/builders/es/es_shrink.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_apply.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_disable.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_enable.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_reboot.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_replace.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_scale_up.py create mode 100644 dbm-ui/backend/ticket/builders/hdfs/hdfs_shrink.py create mode 100644 dbm-ui/backend/ticket/builders/influxdb/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/influxdb/influxdb_apply.py create mode 100644 dbm-ui/backend/ticket/builders/influxdb/influxdb_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/influxdb/influxdb_disable.py create mode 100644 dbm-ui/backend/ticket/builders/influxdb/influxdb_enable.py create mode 100644 dbm-ui/backend/ticket/builders/influxdb/influxdb_reboot.py create mode 100644 dbm-ui/backend/ticket/builders/influxdb/influxdb_replace.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_apply.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_disable.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_enable.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_reboot.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_replace.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_scale_up.py create mode 100644 dbm-ui/backend/ticket/builders/kafka/kafka_shrink.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/base.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_add_slave.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_authorize_rules.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_checksum.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_clone_rules.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_data_repair.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_fixpoint_rollback.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_flashback.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_apply.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_backup.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_clear.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_disable.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_enable.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_full_backup.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_ha_rename.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_import_sqlfile.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_master_fail_over.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_master_slave_switch.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_partition.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_proxy_add.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_proxy_switch.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_restore_local_slave.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_restore_slave.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_single_apply.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_single_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_single_disable.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_single_enable.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_apply.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_disable.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_enable.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_reboot.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_replace.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_scale_up.py create mode 100644 dbm-ui/backend/ticket/builders/pulsar/pulsar_shrink.py create mode 100644 dbm-ui/backend/ticket/builders/redis/__init__.py create mode 100644 dbm-ui/backend/ticket/builders/redis/base.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_backup.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_close.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_cluster_apply.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_destroy.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_key_delete.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_key_extract.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_open.py create mode 100644 dbm-ui/backend/ticket/builders/redis/redis_purge.py create mode 100644 dbm-ui/backend/ticket/builders/spider/spider_partition.py create mode 100644 dbm-ui/backend/ticket/constants.py create mode 100644 dbm-ui/backend/ticket/contexts.py create mode 100644 dbm-ui/backend/ticket/exceptions.py create mode 100644 dbm-ui/backend/ticket/exclusive_ticket.xlsx create mode 100644 dbm-ui/backend/ticket/flow_manager/__init__.py create mode 100644 dbm-ui/backend/ticket/flow_manager/base.py create mode 100644 dbm-ui/backend/ticket/flow_manager/delivery.py create mode 100644 dbm-ui/backend/ticket/flow_manager/inner.py create mode 100644 dbm-ui/backend/ticket/flow_manager/itsm.py create mode 100644 dbm-ui/backend/ticket/flow_manager/manager.py create mode 100644 dbm-ui/backend/ticket/flow_manager/pause.py create mode 100644 dbm-ui/backend/ticket/flow_manager/resource.py create mode 100644 dbm-ui/backend/ticket/flow_manager/timer.py create mode 100644 dbm-ui/backend/ticket/migrations/0001_initial.py create mode 100644 dbm-ui/backend/ticket/migrations/__init__.py create mode 100644 dbm-ui/backend/ticket/mock_data.py create mode 100644 dbm-ui/backend/ticket/models/__init__.py create mode 100644 dbm-ui/backend/ticket/models/ticket.py create mode 100644 dbm-ui/backend/ticket/models/ticket_result_relation.py create mode 100644 dbm-ui/backend/ticket/models/todo.py create mode 100644 dbm-ui/backend/ticket/readme.md create mode 100644 dbm-ui/backend/ticket/serializers.py create mode 100644 dbm-ui/backend/ticket/tasks/ticket_tasks.py create mode 100644 dbm-ui/backend/ticket/todos/__init__.py create mode 100644 dbm-ui/backend/ticket/todos/pause_todo.py create mode 100644 dbm-ui/backend/ticket/todos/pipeline_todo.py create mode 100644 dbm-ui/backend/ticket/urls.py create mode 100644 dbm-ui/backend/ticket/views.py create mode 100644 dbm-ui/backend/ticket/yasg_slz.py create mode 100644 dbm-ui/backend/urls.py create mode 100644 dbm-ui/backend/utils/__init__.py create mode 100644 dbm-ui/backend/utils/basic.py create mode 100644 dbm-ui/backend/utils/batch_request.py create mode 100644 dbm-ui/backend/utils/cache.py create mode 100644 dbm-ui/backend/utils/enum.py create mode 100644 dbm-ui/backend/utils/env.py create mode 100644 dbm-ui/backend/utils/excel.py create mode 100644 dbm-ui/backend/utils/files.py create mode 100644 dbm-ui/backend/utils/http.py create mode 100644 dbm-ui/backend/utils/local.py create mode 100644 dbm-ui/backend/utils/log.py create mode 100644 dbm-ui/backend/utils/md5.py create mode 100644 dbm-ui/backend/utils/pytest.py create mode 100644 dbm-ui/backend/utils/redis.py create mode 100644 dbm-ui/backend/utils/string.py create mode 100644 dbm-ui/backend/utils/time.py create mode 100644 dbm-ui/backend/utils/validators/__init__.py create mode 100644 dbm-ui/backend/version_log/__init__.py create mode 100644 dbm-ui/backend/version_log/apps.py create mode 100644 dbm-ui/backend/version_log/config.py create mode 100644 dbm-ui/backend/version_log/decorators.py create mode 100644 dbm-ui/backend/version_log/middleware.py create mode 100644 dbm-ui/backend/version_log/migrations/0001_initial.py create mode 100644 dbm-ui/backend/version_log/migrations/__init__.py create mode 100644 dbm-ui/backend/version_log/models.py create mode 100644 dbm-ui/backend/version_log/urls.py create mode 100644 dbm-ui/backend/version_log/utils.py create mode 100644 dbm-ui/backend/version_log/views.py create mode 100755 dbm-ui/bin/build_frontend.sh create mode 100755 dbm-ui/bin/celery.sh create mode 100755 dbm-ui/bin/environ.sh create mode 100755 dbm-ui/bin/install_precommit.sh create mode 100755 dbm-ui/bin/makemigrations.sh create mode 100755 dbm-ui/bin/manage.sh create mode 100755 dbm-ui/bin/migrate.sh create mode 100755 dbm-ui/bin/pytest.sh create mode 100644 dbm-ui/blueking/component/__init__.py create mode 100644 dbm-ui/blueking/component/apis/__init__.py create mode 100644 dbm-ui/blueking/component/apis/bk_login.py create mode 100644 dbm-ui/blueking/component/apis/esb.py create mode 100644 dbm-ui/blueking/component/base.py create mode 100644 dbm-ui/blueking/component/client.py create mode 100644 dbm-ui/blueking/component/collections.py create mode 100644 dbm-ui/blueking/component/conf.py create mode 100644 dbm-ui/blueking/component/constants.py create mode 100644 dbm-ui/blueking/component/exceptions.py create mode 100644 dbm-ui/blueking/component/shortcuts.py create mode 100644 dbm-ui/blueking/component/utils.py create mode 100644 dbm-ui/config/__init__.py create mode 100644 dbm-ui/config/default.py create mode 100644 dbm-ui/config/dev.py create mode 100644 dbm-ui/config/prod.py create mode 100644 dbm-ui/config/stag.py create mode 100644 dbm-ui/etc/gunicorn.py create mode 100644 dbm-ui/frontend/.browserslistrc create mode 100644 dbm-ui/frontend/.editorconfig create mode 100644 dbm-ui/frontend/.env.production create mode 100644 dbm-ui/frontend/.eslintignore create mode 100644 dbm-ui/frontend/.eslintrc-auto-import.json create mode 100644 dbm-ui/frontend/.eslintrc.js create mode 100644 dbm-ui/frontend/.gitignore create mode 100755 dbm-ui/frontend/.husky/commit-msg create mode 100755 dbm-ui/frontend/.husky/pre-commit create mode 100644 dbm-ui/frontend/.npmrc create mode 100644 dbm-ui/frontend/.stylelintignore create mode 100644 dbm-ui/frontend/.stylelintrc.js create mode 100644 dbm-ui/frontend/README.md create mode 100644 dbm-ui/frontend/auto-copyright.js create mode 100644 dbm-ui/frontend/babel.config.js create mode 100644 dbm-ui/frontend/bkuiVueResolver.ts create mode 100644 dbm-ui/frontend/commitlint.config.js create mode 100644 dbm-ui/frontend/components.d.ts create mode 100644 dbm-ui/frontend/env.d.ts create mode 100644 dbm-ui/frontend/index.html create mode 100644 dbm-ui/frontend/package.json create mode 100644 dbm-ui/frontend/src/App.vue create mode 100644 dbm-ui/frontend/src/common/cache.ts create mode 100644 dbm-ui/frontend/src/common/const.ts create mode 100644 dbm-ui/frontend/src/common/importComps.ts create mode 100644 dbm-ui/frontend/src/common/regex.ts create mode 100644 dbm-ui/frontend/src/common/tippy.ts create mode 100644 dbm-ui/frontend/src/components/app-selector/index.vue create mode 100644 dbm-ui/frontend/src/components/app-selector/utils.ts create mode 100644 dbm-ui/frontend/src/components/apply-items/BusinessItems.vue create mode 100644 dbm-ui/frontend/src/components/apply-items/CloudItem.vue create mode 100644 dbm-ui/frontend/src/components/apply-items/ClusterAlias.vue create mode 100644 dbm-ui/frontend/src/components/apply-items/ClusterName.vue create mode 100644 dbm-ui/frontend/src/components/apply-items/RegionItem.vue create mode 100644 dbm-ui/frontend/src/components/auth/AuthComponent.tsx create mode 100644 dbm-ui/frontend/src/components/auth/style.less create mode 100644 dbm-ui/frontend/src/components/business-selector/BusinessSelector.vue create mode 100644 dbm-ui/frontend/src/components/cluster-authorize/ClusterAuthorize.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/OperationStatusTips.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderBaseInfo.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderInstanceStatus.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderNodeInstance.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderOperationTag.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderPassword.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderRole.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderSimpleClusterList.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/RenderStatus.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/big-data-host-table/HdfsHostTable.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/big-data-host-table/RenderHostTable.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/big-data-host-table/common/tableSetting.ts create mode 100644 dbm-ui/frontend/src/components/cluster-common/big-data-host-table/es-host-table/components/EditHostNode.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/big-data-host-table/es-host-table/index.vue create mode 100644 dbm-ui/frontend/src/components/cluster-common/big-data-host-table/hook/useLocalPagination.ts create mode 100644 dbm-ui/frontend/src/components/cluster-details/AsideList.vue create mode 100644 dbm-ui/frontend/src/components/cluster-details/ClusterTopo.vue create mode 100644 dbm-ui/frontend/src/components/cluster-details/common/graphData.ts create mode 100644 dbm-ui/frontend/src/components/cluster-details/common/useRenderGraph.tsx create mode 100644 dbm-ui/frontend/src/components/cluster-event-change/EventChange.vue create mode 100644 dbm-ui/frontend/src/components/cluster-monitor/MonitorDashboard.vue create mode 100644 dbm-ui/frontend/src/components/cluster-selector/ClusterSelector.vue create mode 100644 dbm-ui/frontend/src/components/cluster-selector/CollapseMini.vue create mode 100644 dbm-ui/frontend/src/components/cluster-selector/types.ts create mode 100644 dbm-ui/frontend/src/components/cluster-selector/useClusterData.ts create mode 100644 dbm-ui/frontend/src/components/cost-timer/CostTimer.vue create mode 100644 dbm-ui/frontend/src/components/db-card-checkbox/CardCheckbox.vue create mode 100644 dbm-ui/frontend/src/components/db-card/index.vue create mode 100644 dbm-ui/frontend/src/components/db-collapse-table/DBCollapseTable.vue create mode 100644 dbm-ui/frontend/src/components/db-diff/index.vue create mode 100644 dbm-ui/frontend/src/components/db-empty/index.vue create mode 100644 dbm-ui/frontend/src/components/db-form/index.vue create mode 100644 dbm-ui/frontend/src/components/db-form/item.vue create mode 100644 dbm-ui/frontend/src/components/db-icon/index.ts create mode 100644 dbm-ui/frontend/src/components/db-icon/style.css create mode 100644 dbm-ui/frontend/src/components/db-member-selector/index.vue create mode 100644 dbm-ui/frontend/src/components/db-popconfirm/index.vue create mode 100644 dbm-ui/frontend/src/components/db-search-select/index.vue create mode 100644 dbm-ui/frontend/src/components/db-sideslider/index.vue create mode 100644 dbm-ui/frontend/src/components/db-status/index.vue create mode 100644 dbm-ui/frontend/src/components/db-table/OriginalTable.vue create mode 100644 dbm-ui/frontend/src/components/db-table/index.vue create mode 100644 dbm-ui/frontend/src/components/db-textarea/DbTextarea.vue create mode 100644 dbm-ui/frontend/src/components/editable-info/index.vue create mode 100644 dbm-ui/frontend/src/components/empty-status/EmptyStatus.vue create mode 100644 dbm-ui/frontend/src/components/host-preview/HostPreview.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/Index.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/common/tableSettings.ts create mode 100644 dbm-ui/frontend/src/components/instance-selector/common/types.ts create mode 100644 dbm-ui/frontend/src/components/instance-selector/common/utils.ts create mode 100644 dbm-ui/frontend/src/components/instance-selector/components/CollapseMini.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/components/PanelTab.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/components/PreviewResult.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/components/RenderManualHost.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/components/RenderManualInput.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/components/RenderTopo.vue create mode 100644 dbm-ui/frontend/src/components/instance-selector/components/RenderTopoHost.vue create mode 100644 dbm-ui/frontend/src/components/ip-selector/IpSelector.vue create mode 100644 dbm-ui/frontend/src/components/ip-selector/components/PreviewWhitelist.vue create mode 100644 dbm-ui/frontend/src/components/layouts/Copyright.vue create mode 100644 dbm-ui/frontend/src/components/layouts/LocaleSwitch.vue create mode 100644 dbm-ui/frontend/src/components/layouts/Login.vue create mode 100644 dbm-ui/frontend/src/components/layouts/MainBreadcrumbs.vue create mode 100644 dbm-ui/frontend/src/components/layouts/MainView.vue create mode 100644 dbm-ui/frontend/src/components/layouts/ResourceDetection.vue create mode 100644 dbm-ui/frontend/src/components/layouts/common.ts create mode 100644 dbm-ui/frontend/src/components/minimap/Minimap.vue create mode 100644 dbm-ui/frontend/src/components/mysql-toolbox/BatchEdit.vue create mode 100644 dbm-ui/frontend/src/components/mysql-toolbox/Success.vue create mode 100644 dbm-ui/frontend/src/components/mysql-toolbox/ToolboxTable.vue create mode 100644 dbm-ui/frontend/src/components/mysql-toolbox/common/const.ts create mode 100644 dbm-ui/frontend/src/components/permission/Dialog.vue create mode 100644 dbm-ui/frontend/src/components/permission/Main.vue create mode 100644 dbm-ui/frontend/src/components/render-host-status/Index.vue create mode 100644 dbm-ui/frontend/src/components/render-instances/RenderInstances.vue create mode 100644 dbm-ui/frontend/src/components/render-row/index.vue create mode 100644 dbm-ui/frontend/src/components/smart-action/index.vue create mode 100644 dbm-ui/frontend/src/components/vue2/bk-log/index.less create mode 100644 dbm-ui/frontend/src/components/vue2/bk-log/index.vue create mode 100644 dbm-ui/frontend/src/components/vue2/ip-selector/index.js create mode 100644 dbm-ui/frontend/src/components/vue2/ip-selector/index.ts create mode 100644 dbm-ui/frontend/src/components/vue2/search-select/index.vue create mode 100644 dbm-ui/frontend/src/directives/cursor/index.less create mode 100644 dbm-ui/frontend/src/directives/cursor/index.ts create mode 100644 dbm-ui/frontend/src/directives/cursor/lock.svg create mode 100644 dbm-ui/frontend/src/directives/index.ts create mode 100644 dbm-ui/frontend/src/directives/overflowTips.ts create mode 100644 dbm-ui/frontend/src/env.d.ts create mode 100644 dbm-ui/frontend/src/helper/local-cache/index.ts create mode 100644 dbm-ui/frontend/src/helper/local-cache/listColumnsCache.ts create mode 100644 dbm-ui/frontend/src/helper/validator/index.ts create mode 100644 dbm-ui/frontend/src/helper/validator/is-ip.ts create mode 100644 dbm-ui/frontend/src/hooks/index.ts create mode 100644 dbm-ui/frontend/src/hooks/useApplyBase.ts create mode 100644 dbm-ui/frontend/src/hooks/useBeforeClose.ts create mode 100644 dbm-ui/frontend/src/hooks/useCopy.ts create mode 100644 dbm-ui/frontend/src/hooks/useDebouncedRef.ts create mode 100644 dbm-ui/frontend/src/hooks/useDefaultPagination.ts create mode 100644 dbm-ui/frontend/src/hooks/useFormItem.ts create mode 100644 dbm-ui/frontend/src/hooks/useInfo.tsx create mode 100644 dbm-ui/frontend/src/hooks/useInfoWithIcon.tsx create mode 100644 dbm-ui/frontend/src/hooks/useListeners.ts create mode 100644 dbm-ui/frontend/src/hooks/useLocalPagination.ts create mode 100644 dbm-ui/frontend/src/hooks/useModelProvider.ts create mode 100644 dbm-ui/frontend/src/hooks/useSQLTaskNotify.tsx create mode 100644 dbm-ui/frontend/src/hooks/useStickyFooter.ts create mode 100644 dbm-ui/frontend/src/hooks/useTableMaxHeight.ts create mode 100644 dbm-ui/frontend/src/hooks/useTableSettings.ts create mode 100644 dbm-ui/frontend/src/hooks/useTicketMessage.ts create mode 100644 dbm-ui/frontend/src/hooks/useUrlSearach.ts create mode 100644 dbm-ui/frontend/src/images/403.png create mode 100644 dbm-ui/frontend/src/images/404.png create mode 100644 dbm-ui/frontend/src/images/500.png create mode 100644 dbm-ui/frontend/src/images/architecture-01.png create mode 100644 dbm-ui/frontend/src/images/architecture-02.png create mode 100644 dbm-ui/frontend/src/images/building.png create mode 100644 dbm-ui/frontend/src/images/empty.png create mode 100644 dbm-ui/frontend/src/images/es.png create mode 100644 dbm-ui/frontend/src/images/flow-loading.png create mode 100644 dbm-ui/frontend/src/images/kafka.png create mode 100644 dbm-ui/frontend/src/images/loading.svg create mode 100644 dbm-ui/frontend/src/images/logo.png create mode 100644 dbm-ui/frontend/src/images/mongo-db.png create mode 100644 dbm-ui/frontend/src/images/monitoring.png create mode 100644 dbm-ui/frontend/src/images/mysql.png create mode 100644 dbm-ui/frontend/src/images/nav-log.svg create mode 100644 dbm-ui/frontend/src/images/redis.png create mode 100644 dbm-ui/frontend/src/images/tendis-cache.png create mode 100644 dbm-ui/frontend/src/images/tendis-ssd.png create mode 100644 dbm-ui/frontend/src/images/tendisplus.png create mode 100644 dbm-ui/frontend/src/locales/en.json create mode 100644 dbm-ui/frontend/src/locales/index.ts create mode 100644 dbm-ui/frontend/src/locales/zh-cn.json create mode 100644 dbm-ui/frontend/src/main.ts create mode 100644 dbm-ui/frontend/src/router/index.ts create mode 100644 dbm-ui/frontend/src/router/routerInterceptor.ts create mode 100644 dbm-ui/frontend/src/services/clusters.ts create mode 100644 dbm-ui/frontend/src/services/common.ts create mode 100644 dbm-ui/frontend/src/services/configs.ts create mode 100644 dbm-ui/frontend/src/services/dbResource.ts create mode 100644 dbm-ui/frontend/src/services/es.ts create mode 100644 dbm-ui/frontend/src/services/eventSwitch.ts create mode 100644 dbm-ui/frontend/src/services/fixpointRollback.ts create mode 100644 dbm-ui/frontend/src/services/hdfs.ts create mode 100644 dbm-ui/frontend/src/services/http/index.ts create mode 100644 dbm-ui/frontend/src/services/influxdb.ts create mode 100644 dbm-ui/frontend/src/services/influxdbGroup.ts create mode 100644 dbm-ui/frontend/src/services/ip.ts create mode 100644 dbm-ui/frontend/src/services/kafka.ts create mode 100644 dbm-ui/frontend/src/services/model/es/es-instance.ts create mode 100644 dbm-ui/frontend/src/services/model/es/es-node.ts create mode 100644 dbm-ui/frontend/src/services/model/es/es-password.ts create mode 100644 dbm-ui/frontend/src/services/model/es/es.ts create mode 100644 dbm-ui/frontend/src/services/model/fixpoint-rollback/backup-log.ts create mode 100644 dbm-ui/frontend/src/services/model/hdfs/hdfs-instance.ts create mode 100644 dbm-ui/frontend/src/services/model/hdfs/hdfs-node.ts create mode 100644 dbm-ui/frontend/src/services/model/hdfs/hdfs-password.ts create mode 100644 dbm-ui/frontend/src/services/model/hdfs/hdfs.ts create mode 100644 dbm-ui/frontend/src/services/model/influxdb/influxdbInstance.ts create mode 100644 dbm-ui/frontend/src/services/model/kafka/kafka-instance.ts create mode 100644 dbm-ui/frontend/src/services/model/kafka/kafka-node.ts create mode 100644 dbm-ui/frontend/src/services/model/kafka/kafka-password.ts create mode 100644 dbm-ui/frontend/src/services/model/kafka/kafka.ts create mode 100644 dbm-ui/frontend/src/services/model/pulsar/pulsar-instance.ts create mode 100644 dbm-ui/frontend/src/services/model/pulsar/pulsar-node.ts create mode 100644 dbm-ui/frontend/src/services/model/pulsar/pulsar-password.ts create mode 100644 dbm-ui/frontend/src/services/model/pulsar/pulsar.ts create mode 100644 dbm-ui/frontend/src/services/model/resource-spec/resourceSpec.ts create mode 100644 dbm-ui/frontend/src/services/model/sql-import/grammar-check.ts create mode 100644 dbm-ui/frontend/src/services/model/sql-import/semantic-data.ts create mode 100644 dbm-ui/frontend/src/services/model/sql-import/user-semantic-task.ts create mode 100644 dbm-ui/frontend/src/services/mysqlCluster.ts create mode 100644 dbm-ui/frontend/src/services/permission.ts create mode 100644 dbm-ui/frontend/src/services/pulsar.ts create mode 100644 dbm-ui/frontend/src/services/resourceSpec.ts create mode 100644 dbm-ui/frontend/src/services/sqlImport.ts create mode 100644 dbm-ui/frontend/src/services/staffSetting.ts create mode 100644 dbm-ui/frontend/src/services/storage.ts create mode 100644 dbm-ui/frontend/src/services/taskflow.ts create mode 100644 dbm-ui/frontend/src/services/ticket.tsx create mode 100644 dbm-ui/frontend/src/services/types/clusters.ts create mode 100644 dbm-ui/frontend/src/services/types/common.ts create mode 100644 dbm-ui/frontend/src/services/types/configs.ts create mode 100644 dbm-ui/frontend/src/services/types/es.ts create mode 100644 dbm-ui/frontend/src/services/types/eventSwitch.ts create mode 100644 dbm-ui/frontend/src/services/types/hdfs.ts create mode 100644 dbm-ui/frontend/src/services/types/influxdbGroup.ts create mode 100644 dbm-ui/frontend/src/services/types/ip.ts create mode 100644 dbm-ui/frontend/src/services/types/kafka.ts create mode 100644 dbm-ui/frontend/src/services/types/permission.ts create mode 100644 dbm-ui/frontend/src/services/types/staffSetting.ts create mode 100644 dbm-ui/frontend/src/services/types/taskflow.ts create mode 100644 dbm-ui/frontend/src/services/types/ticket.ts create mode 100644 dbm-ui/frontend/src/services/types/versionFiles.ts create mode 100644 dbm-ui/frontend/src/services/types/whitelist.ts create mode 100644 dbm-ui/frontend/src/services/versionFiles.ts create mode 100644 dbm-ui/frontend/src/services/whitelist.ts create mode 100644 dbm-ui/frontend/src/stores/globalBizs.ts create mode 100644 dbm-ui/frontend/src/stores/index.ts create mode 100644 dbm-ui/frontend/src/stores/mainView.ts create mode 100644 dbm-ui/frontend/src/stores/useMenu.ts create mode 100644 dbm-ui/frontend/src/stores/useRelatedSystem.ts create mode 100644 dbm-ui/frontend/src/stores/useSQLTaskCount.ts create mode 100644 dbm-ui/frontend/src/stores/useUserProfile.ts create mode 100644 dbm-ui/frontend/src/stores/useUserSemanticTasks.ts create mode 100644 dbm-ui/frontend/src/styles/applyInstance.less create mode 100644 dbm-ui/frontend/src/styles/base.less create mode 100644 dbm-ui/frontend/src/styles/common.less create mode 100644 dbm-ui/frontend/src/styles/iconCool.less create mode 100644 dbm-ui/frontend/src/styles/mixins.less create mode 100644 dbm-ui/frontend/src/styles/reset.less create mode 100644 dbm-ui/frontend/src/styles/tippy.less create mode 100644 dbm-ui/frontend/src/styles/variables.less create mode 100644 dbm-ui/frontend/src/types/auto-imports.d.ts create mode 100644 dbm-ui/frontend/src/types/bkui-vue.ts create mode 100644 dbm-ui/frontend/src/types/index.d.ts create mode 100644 dbm-ui/frontend/src/types/router.d.ts create mode 100644 dbm-ui/frontend/src/types/vite-env.d.ts create mode 100644 dbm-ui/frontend/src/utils/bytePretty.ts create mode 100644 dbm-ui/frontend/src/utils/classes.ts create mode 100644 dbm-ui/frontend/src/utils/deepMerge.ts create mode 100644 dbm-ui/frontend/src/utils/dom.ts create mode 100644 dbm-ui/frontend/src/utils/encode.ts create mode 100644 dbm-ui/frontend/src/utils/execCopy.ts create mode 100644 dbm-ui/frontend/src/utils/generateId.ts create mode 100644 dbm-ui/frontend/src/utils/getCostTimeDisplay.ts create mode 100644 dbm-ui/frontend/src/utils/getMenuListSearch.ts create mode 100644 dbm-ui/frontend/src/utils/getSearchSelectorParams.ts create mode 100644 dbm-ui/frontend/src/utils/index.ts create mode 100644 dbm-ui/frontend/src/utils/isObject.ts create mode 100644 dbm-ui/frontend/src/utils/leaveConfirm.ts create mode 100644 dbm-ui/frontend/src/utils/makeMap.ts create mode 100644 dbm-ui/frontend/src/utils/message.ts create mode 100644 dbm-ui/frontend/src/utils/random.ts create mode 100644 dbm-ui/frontend/src/utils/recentDays.ts create mode 100644 dbm-ui/frontend/src/utils/url.ts create mode 100644 dbm-ui/frontend/src/utils/vNodeToHtml.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/business/ConfigBind.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/ConfigEdit.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/Content.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/biz/ConfigDatabase.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/biz/ConfigDetails.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/biz/ConfigInfo.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/cluster/ConfigDetails.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/cluster/ConfigInfo.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/common/types.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/business/components/ConfigEmpty.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/hooks/useBaseDetails.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/business/hooks/useTreeData.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/business/index.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/module/ConfigDetails.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/business/module/ConfigInfo.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/common/const.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/common/types.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/components/DetailsBase.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/components/DiffCompare.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/components/EditBase.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/components/ParameterTable.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/components/PublishRecord.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/components/RangeInput.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/components/ReadonlyTable.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/components/TopTab.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/hooks/useDiff.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/hooks/useLevelParams.ts create mode 100644 dbm-ui/frontend/src/views/db-configure/platform/ConfigureDetails.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/platform/ConfigureEdit.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/platform/index.vue create mode 100644 dbm-ui/frontend/src/views/db-configure/routes.ts create mode 100644 dbm-ui/frontend/src/views/deployment-plan/Index.vue create mode 100644 dbm-ui/frontend/src/views/deployment-plan/list/Index.vue create mode 100644 dbm-ui/frontend/src/views/deployment-plan/list/components/Operation.vue create mode 100644 dbm-ui/frontend/src/views/deployment-plan/routes.ts create mode 100644 dbm-ui/frontend/src/views/es-manage/Index.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/apply/Index.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/common/Expansion.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/common/common/ListNode.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/common/replace/Index.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/common/replace/components/RenderNodeHostList.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/common/shrink/Index.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/detail/Index.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/detail/components/BaseInfo.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/detail/components/node-list/Index.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/detail/components/node-list/components/InstanceDetail.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/list/hooks/useTableSetting.ts create mode 100644 dbm-ui/frontend/src/views/es-manage/list/index.vue create mode 100644 dbm-ui/frontend/src/views/es-manage/routes.ts create mode 100644 dbm-ui/frontend/src/views/event-center/components/SwtichEventDetatils.vue create mode 100644 dbm-ui/frontend/src/views/event-center/pages/DBHASwitchEvents.vue create mode 100644 dbm-ui/frontend/src/views/event-center/routes.ts create mode 100644 dbm-ui/frontend/src/views/exception/404.vue create mode 100644 dbm-ui/frontend/src/views/exception/BizPermission.vue create mode 100644 dbm-ui/frontend/src/views/exception/Error.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/Index.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/apply/Index.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/common/Expansion.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/common/common/ListNode.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/common/replace/Index.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/common/replace/components/RenderNodeHostList.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/common/shrink/Index.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/detail/Index.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/detail/components/BaseInfo.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/detail/components/node-list/Index.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/detail/components/node-list/components/InstanceDetail.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/list/components/ClusterSettings.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/list/components/SettingsMonacoEditor.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/list/hooks/useTableSetting.ts create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/list/index.vue create mode 100644 dbm-ui/frontend/src/views/hdfs-manage/routes.ts create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/Index.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/apply/common/base.ts create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/apply/components/GroupItem.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/apply/index.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/components/GroupInput.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/components/GroupList.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/components/InstanceList.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/components/replace/Index.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/components/replace/components/RenderNodeHostList.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/details/AsideList.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/details/Details.vue create mode 100644 dbm-ui/frontend/src/views/influxdb-manage/routes.ts create mode 100644 dbm-ui/frontend/src/views/kafka-manage/Index.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/apply/Index.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/common/Expansion.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/common/common/ListNode.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/common/replace/Index.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/common/replace/components/RenderNodeHostList.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/common/shrink/Index.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/detail/Index.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/detail/components/BaseInfo.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/detail/components/node-list/Index.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/detail/components/node-list/components/InstanceDetail.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/list/Index.vue create mode 100644 dbm-ui/frontend/src/views/kafka-manage/list/hooks/useTableSetting.ts create mode 100644 dbm-ui/frontend/src/views/kafka-manage/routes.ts create mode 100644 dbm-ui/frontend/src/views/main-views/common/const.ts create mode 100644 dbm-ui/frontend/src/views/main-views/common/getRouteChildren.ts create mode 100644 dbm-ui/frontend/src/views/main-views/components/MenuToggleIcon.vue create mode 100644 dbm-ui/frontend/src/views/main-views/hooks/useMenuInfo.ts create mode 100644 dbm-ui/frontend/src/views/main-views/pages/Database.vue create mode 100644 dbm-ui/frontend/src/views/main-views/pages/Platform.vue create mode 100644 dbm-ui/frontend/src/views/main-views/pages/Services.vue create mode 100644 dbm-ui/frontend/src/views/main-views/routes.ts create mode 100644 dbm-ui/frontend/src/views/mission/common/const.ts create mode 100644 dbm-ui/frontend/src/views/mission/common/graphCanvas.ts create mode 100644 dbm-ui/frontend/src/views/mission/common/graphRender.tsx create mode 100644 dbm-ui/frontend/src/views/mission/common/types.ts create mode 100644 dbm-ui/frontend/src/views/mission/common/utils.ts create mode 100644 dbm-ui/frontend/src/views/mission/components/NodeLog.vue create mode 100644 dbm-ui/frontend/src/views/mission/components/RedisResultFiles.vue create mode 100644 dbm-ui/frontend/src/views/mission/components/RetrySelector.vue create mode 100644 dbm-ui/frontend/src/views/mission/hooks/useFetchData.ts create mode 100644 dbm-ui/frontend/src/views/mission/pages/Details.vue create mode 100644 dbm-ui/frontend/src/views/mission/pages/index.vue create mode 100644 dbm-ui/frontend/src/views/mission/routes.ts create mode 100644 dbm-ui/frontend/src/views/mysql/apply/ApplyMySQL.vue create mode 100644 dbm-ui/frontend/src/views/mysql/apply/CreateModule.vue create mode 100644 dbm-ui/frontend/src/views/mysql/apply/components/BatchEdit.vue create mode 100644 dbm-ui/frontend/src/views/mysql/apply/components/MySQLDomainTable.vue create mode 100644 dbm-ui/frontend/src/views/mysql/apply/components/PreviewTable.vue create mode 100644 dbm-ui/frontend/src/views/mysql/apply/hooks/useMysqlData.ts create mode 100644 dbm-ui/frontend/src/views/mysql/checksum/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/checksum/common/types.ts create mode 100644 dbm-ui/frontend/src/views/mysql/checksum/components/BatchInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit-field/ClusterWithRelateCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit-field/DbName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit-field/TableName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit/DateTime.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit/Input.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit/Select.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit/Tag.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/edit/hooks/useValidtor.ts create mode 100644 dbm-ui/frontend/src/views/mysql/common/hooks/useTaskCount.ts create mode 100644 dbm-ui/frontend/src/views/mysql/common/render-table/HeadColumn.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/render-table/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/common/render-table/hooks/useColumnResize.ts create mode 100644 dbm-ui/frontend/src/views/mysql/common/ticket-success/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-backup/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/TargetCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-backup/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-clear/common/const.ts create mode 100644 dbm-ui/frontend/src/views/mysql/db-clear/common/types.ts create mode 100644 dbm-ui/frontend/src/views/mysql/db-clear/components/BatchInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-clear/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-rename/components/BatchInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-rename/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/RenderData/RenderBackupSource.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/RenderData/RenderCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/RenderData/RenderDbName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/RenderData/RenderHost.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/RenderData/RenderTableName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/db-table-backup/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/details/HaInstanceDetails.vue create mode 100644 dbm-ui/frontend/src/views/mysql/details/MySQLDetails.vue create mode 100644 dbm-ui/frontend/src/views/mysql/details/hooks/useInstancesData.ts create mode 100644 dbm-ui/frontend/src/views/mysql/details/hooks/useListData.ts create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/components/RenderData/RenderCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/components/RenderData/RenderDbName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/components/RenderData/RenderStartTime.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/components/RenderData/RenderTableName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/flashback/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/list/HaInstanceList.vue create mode 100644 dbm-ui/frontend/src/views/mysql/list/HaList.vue create mode 100644 dbm-ui/frontend/src/views/mysql/list/SingleList.vue create mode 100644 dbm-ui/frontend/src/views/mysql/list/components/MySQLExcelAuthorize.vue create mode 100644 dbm-ui/frontend/src/views/mysql/list/components/OperationStatusTips.vue create mode 100644 dbm-ui/frontend/src/views/mysql/list/components/RenderOperationTag.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page1/components/RenderData/RenderCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page1/components/RenderData/RenderMaster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page1/components/RenderData/RenderSlave.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-failover/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-clone/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/components/RenderData/RenderMasterSlaveHost.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-clone/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page1/components/RenderData/RenderCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page1/components/RenderData/RenderMaster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page1/components/RenderData/RenderSlave.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/master-slave-swap/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/permission/common/const.ts create mode 100644 dbm-ui/frontend/src/views/mysql/permission/common/types.ts create mode 100644 dbm-ui/frontend/src/views/mysql/permission/components/AccountDialog.vue create mode 100644 dbm-ui/frontend/src/views/mysql/permission/components/CreateRule.vue create mode 100644 dbm-ui/frontend/src/views/mysql/permission/hooks/usePermissionRules.ts create mode 100644 dbm-ui/frontend/src/views/mysql/permission/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/privilege-clone-client/components/BatchInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/privilege-clone-client/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/privilege-clone-inst/components/BatchInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/privilege-clone-inst/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-add/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-add/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-add/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-add/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-add/pages/page1/components/RenderData/RenderProxy.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-add/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-add/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/pages/page1/components/RenderData/RenderOriginalProxy.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/pages/page1/components/RenderData/RenderTargetProxyIp.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/proxy-replace/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/BatchEntry.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/RenderBackup.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/RenderCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/RenderDbName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/RenderHost.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/RenderMode.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/RenderTableName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page1/components/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/rollback/pages/page2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/routes.ts create mode 100644 dbm-ui/frontend/src/views/mysql/slave-add/common/types.ts create mode 100644 dbm-ui/frontend/src/views/mysql/slave-add/components/BatchInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/slave-add/components/ClusterRelatedInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/slave-add/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/slave-rebuild/components/BatchInput.vue create mode 100644 dbm-ui/frontend/src/views/mysql/slave-rebuild/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/ExecuteMode.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/TargetCluster.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/TaskTips.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/backup/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/backup/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/backup/RenderData/RenderBackupSource.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/backup/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/editor/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/editor/MessageList.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/SqlFileList.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/components/CheckError.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/components/CheckSuccess.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/local-file/components/FileList.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/manual-input/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/manual-input/components/SyntaxChecking.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/manual-input/components/SyntaxError.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/manual-input/components/SyntaxSuccess.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/sql-file/utils.ts create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/target-db/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/target-db/RenderData/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/target-db/RenderData/RenderDbName.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step1/components/target-db/RenderData/Row.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/FailedTips.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/PendingTips.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/RenderLog.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/SuccessTips.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/render-file-list/FileItem.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/render-file-list/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/render-status/Failed.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/render-status/Pending.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/components/render-status/Success.vue create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/hooks/useFlowStatus.ts create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step2/hooks/useLog.ts create mode 100644 dbm-ui/frontend/src/views/mysql/sql-execute/steps/step3/Index.vue create mode 100644 dbm-ui/frontend/src/views/mysql/toolbox/common/menus.ts create mode 100644 dbm-ui/frontend/src/views/mysql/toolbox/components/TaskCount.vue create mode 100644 dbm-ui/frontend/src/views/mysql/toolbox/components/ToolboxContent.vue create mode 100644 dbm-ui/frontend/src/views/mysql/toolbox/components/ToolboxSide.vue create mode 100644 dbm-ui/frontend/src/views/mysql/toolbox/index.vue create mode 100644 dbm-ui/frontend/src/views/password-policy/PasswordPolicy.vue create mode 100644 dbm-ui/frontend/src/views/password-policy/routes.ts create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/Index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/apply/common/base.ts create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/apply/index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/expansion/Index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/expansion/components/NodeList.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/expansion/components/RenderNode.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/replace/Index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/replace/components/RenderNodeHostList.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/shrink/Index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/shrink/components/NodeList.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/shrink/components/RenderNode.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/common/shrink/components/RenderOriginalHostList.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/detail/Index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/detail/components/BaseInfo.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/detail/components/node-list/Index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/detail/components/node-list/components/InstanceDetail.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/list/Index.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/list/components/ManagerPassword.vue create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/list/hooks/useTableSetting.ts create mode 100644 dbm-ui/frontend/src/views/pulsar-manage/routes.ts create mode 100644 dbm-ui/frontend/src/views/redis/apply/ApplyRedis.vue create mode 100644 dbm-ui/frontend/src/views/redis/apply/common/const.ts create mode 100644 dbm-ui/frontend/src/views/redis/common/types.ts create mode 100644 dbm-ui/frontend/src/views/redis/details/Details.vue create mode 100644 dbm-ui/frontend/src/views/redis/hooks/useRedisData.ts create mode 100644 dbm-ui/frontend/src/views/redis/list/List.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/Backup.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/BatchEdit.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/BatchEditKeys.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/ClusterPassword.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/DeleteKeys.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/ExtractKeys.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/OperationStatusTips.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/Purge.vue create mode 100644 dbm-ui/frontend/src/views/redis/list/components/RenderOperationTag.vue create mode 100644 dbm-ui/frontend/src/views/redis/routes.ts create mode 100644 dbm-ui/frontend/src/views/resource-pool/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/export-host/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/AgentStatus.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/City.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/Cpu.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/DeviceClass.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/Disk.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/DiskType.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/ForBizs.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/Hosts.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/Mem.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/MountPoint.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/ResourceTypes.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/com-factory/Subzones.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/field-config.ts create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/field-input/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/field-input/components/SearchItem.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/field-tag/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/list/components/search-box/components/field-tag/ValueTag.vue create mode 100644 dbm-ui/frontend/src/views/resource-pool/routes.ts create mode 100644 dbm-ui/frontend/src/views/resource-spec/components/SpecCreate.vue create mode 100644 dbm-ui/frontend/src/views/resource-spec/components/SpecList.vue create mode 100644 dbm-ui/frontend/src/views/resource-spec/components/spec-form-item/SpecCPU.vue create mode 100644 dbm-ui/frontend/src/views/resource-spec/components/spec-form-item/SpecDevice.vue create mode 100644 dbm-ui/frontend/src/views/resource-spec/components/spec-form-item/SpecMem.vue create mode 100644 dbm-ui/frontend/src/views/resource-spec/components/spec-form-item/SpecStorage.vue create mode 100644 dbm-ui/frontend/src/views/resource-spec/components/spec-form-item/specFormItem.less create mode 100644 dbm-ui/frontend/src/views/resource-spec/pages/Index.vue create mode 100644 dbm-ui/frontend/src/views/resource-spec/routes.ts create mode 100644 dbm-ui/frontend/src/views/service-apply/components/ApplyCollapse.vue create mode 100644 dbm-ui/frontend/src/views/service-apply/index.vue create mode 100644 dbm-ui/frontend/src/views/service-apply/routes.ts create mode 100644 dbm-ui/frontend/src/views/staff-setting/index.vue create mode 100644 dbm-ui/frontend/src/views/staff-setting/routes.ts create mode 100644 dbm-ui/frontend/src/views/tickets/common/types.ts create mode 100644 dbm-ui/frontend/src/views/tickets/common/utils.ts create mode 100644 dbm-ui/frontend/src/views/tickets/components/DetailsClusterOperation.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/DetailsTable.less create mode 100644 dbm-ui/frontend/src/views/tickets/components/FlowContent.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/FlowContentInnerFlow.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/FlowContentTodo.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/FlowIcon.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/TicketDetails.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/BigDataExpansionCapacity.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/BigDataReboot.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/BigDataReplace.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/DetailsES.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/DetailsHDFS.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/DetailsInfluxDB.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/DetailsKafka.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/bigdata/DetailsPulsar.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/influxdb/InfluxdbOperations.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/influxdb/InfluxdbReplace.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/DetailsMySQL.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLChecksum.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLClone.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLClusterOperation.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLFlashback.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLFullBackup.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLHATruncate.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLImportSQLFile.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLMasterFailOver.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLMasterSlaveSwitch.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLMigrateCluster.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLOperation.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLProxyAdd.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLProxySwitch.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLRename.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLRestoreSlave.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLRollbackCluster.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLSlave.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/MySQLTableBackup.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/SqlLogDetails.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/mysql/TargetClusterPreview.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/redis/DetailsRedis.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/redis/RedisOperation.vue create mode 100644 dbm-ui/frontend/src/views/tickets/components/ticketDetails.less create mode 100644 dbm-ui/frontend/src/views/tickets/hooks/logCounts.ts create mode 100644 dbm-ui/frontend/src/views/tickets/hooks/targetClusterData.ts create mode 100644 dbm-ui/frontend/src/views/tickets/my-tickets/MyTickets.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-tickets/TicketList.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-tickets/components/ListTabs.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-tickets/components/TicketFlows.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-tickets/components/flows/Common.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-tickets/components/flows/MySqlFlows.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-tickets/components/flows/RedisFlows.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-todos/MyTodos.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-todos/TicketList.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-todos/components/TicketFlows.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-todos/components/flows/Approve.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-todos/components/flows/MySqlFlows.vue create mode 100644 dbm-ui/frontend/src/views/tickets/my-todos/components/flows/RedisFlows.vue create mode 100644 dbm-ui/frontend/src/views/tickets/routes.ts create mode 100644 dbm-ui/frontend/src/views/version-files/common/types.ts create mode 100644 dbm-ui/frontend/src/views/version-files/hooks/useVersionFiles.ts create mode 100644 dbm-ui/frontend/src/views/version-files/pages/VersionFileContent.vue create mode 100644 dbm-ui/frontend/src/views/version-files/pages/VersionFiles.vue create mode 100644 dbm-ui/frontend/src/views/version-files/routes.ts create mode 100644 dbm-ui/frontend/src/views/whitelist/components/WhitelistOperation.vue create mode 100644 dbm-ui/frontend/src/views/whitelist/pages/Index.vue create mode 100644 dbm-ui/frontend/src/views/whitelist/routes.ts create mode 100644 dbm-ui/frontend/tsconfig.config.json create mode 100644 dbm-ui/frontend/tsconfig.json create mode 100644 dbm-ui/frontend/vite.config.ts create mode 100644 dbm-ui/locale/en/LC_MESSAGES/django.mo create mode 100644 dbm-ui/locale/en/LC_MESSAGES/django.po create mode 100644 dbm-ui/locale/zh_hans/LC_MESSAGES/django.mo create mode 100644 dbm-ui/locale/zh_hans/LC_MESSAGES/django.po create mode 100755 dbm-ui/manage.py create mode 100644 dbm-ui/poetry.lock create mode 100644 dbm-ui/pyproject.toml create mode 100644 dbm-ui/pytest.ini create mode 100644 dbm-ui/release/README.md create mode 100644 dbm-ui/release/V1.0.0_20230424.md create mode 100755 dbm-ui/scripts/ci/bk_ci.sh create mode 100755 dbm-ui/scripts/ci/code_quality.sh create mode 100755 dbm-ui/scripts/ci/env.sh create mode 100755 dbm-ui/scripts/ci/install.sh create mode 100755 dbm-ui/scripts/ci/prepare_services.sh create mode 100755 dbm-ui/scripts/ci/upgrade.sh create mode 100644 dbm-ui/scripts/ci/upgrade_dbm.sh create mode 100644 dbm-ui/scripts/license/README.md create mode 100644 dbm-ui/scripts/license/add_jscss_license_header.sh create mode 100644 dbm-ui/scripts/license/add_py_license_header.py create mode 100644 dbm-ui/scripts/license/headers/LICENSE_JSCSS_HEADER.txt create mode 100644 dbm-ui/scripts/license/headers/LICENSE_PY_HEADER.txt create mode 100755 dbm-ui/scripts/make_ssl_pairs.sh create mode 100644 dbm-ui/scripts/snippets/uninstall_es.sh create mode 100644 dbm-ui/scripts/snippets/uninstall_hdfs.sh create mode 100644 dbm-ui/scripts/snippets/uninstall_kafka.sh create mode 100644 dbm-ui/scripts/snippets/uninstall_mysql.sh create mode 100644 dbm-ui/scripts/snippets/uninstall_pulsar.sh create mode 100644 dbm-ui/scripts/snippets/uninstall_redis.sh create mode 100644 dbm-ui/version_logs_html/V1.0.0.html create mode 100644 dbm-ui/wsgi.py create mode 100644 docs/resource/img/logo.png create mode 100644 docs/resource/img/logo_zh.png create mode 100644 helm-charts/.gitignore create mode 100644 helm-charts/README.md create mode 100644 helm-charts/bk-dbm/.helmignore create mode 100644 helm-charts/bk-dbm/Chart.lock create mode 100644 helm-charts/bk-dbm/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/.helmignore create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/templates/tests/test-connection.yaml create mode 100644 helm-charts/bk-dbm/charts/db-dns-api/values.yaml create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/.helmignore create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/db-remote-service/values.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/.helmignore create mode 100644 helm-charts/bk-dbm/charts/db-resource/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/templates/tests/test-connection.yaml create mode 100644 helm-charts/bk-dbm/charts/db-resource/values.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/.helmignore create mode 100644 helm-charts/bk-dbm/charts/db-simulation/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/templates/tests/test-connection.yaml create mode 100644 helm-charts/bk-dbm/charts/db-simulation/values.yaml create mode 100644 helm-charts/bk-dbm/charts/dbconfig/.helmignore create mode 100644 helm-charts/bk-dbm/charts/dbconfig/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/dbconfig/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/dbconfig/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/dbconfig/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/dbconfig/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/dbconfig/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/dbconfig/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/dbconfig/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/dbconfig/values.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/.helmignore create mode 100644 helm-charts/bk-dbm/charts/dbm/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/deployments/celery-beater.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/deployments/celery-worker.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/deployments/pipeline-worker.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/deployments/saas-api.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/migrate-job.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/dbm/values.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpartition/.helmignore create mode 100644 helm-charts/bk-dbm/charts/dbpartition/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpartition/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/dbpartition/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/dbpartition/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpartition/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpartition/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpartition/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpartition/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpartition/values.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpriv/.helmignore create mode 100644 helm-charts/bk-dbm/charts/dbpriv/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpriv/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/dbpriv/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/dbpriv/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpriv/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpriv/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpriv/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpriv/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/dbpriv/values.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/.helmignore create mode 100644 helm-charts/bk-dbm/charts/grafana/Chart.lock create mode 100644 helm-charts/bk-dbm/charts/grafana/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/README.md create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/.helmignore create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/README.md create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_affinities.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_capabilities.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_errors.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_images.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_ingress.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_labels.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_names.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_secrets.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_storage.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_tplvalues.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_utils.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/_warnings.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/validations/_cassandra.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/validations/_mariadb.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/validations/_mongodb.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/validations/_mysql.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/validations/_postgresql.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/validations/_redis.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/templates/validations/_validations.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/charts/common/values.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/configmap.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/dashboard-provider.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/extra-list.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/image-renderer-deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/image-renderer-prometheusrules.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/image-renderer-service.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/image-renderer-servicemonitor.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/ldap-secret.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/prometheusrules.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/pvc.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/secret.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/servicemonitor.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/smtp-secret.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/templates/tls-secret.yaml create mode 100644 helm-charts/bk-dbm/charts/grafana/values.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/Chart.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/deployment.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/hpa.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/ingress.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/service.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/serviceaccount.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/templates/tests/test-connection.yaml create mode 100644 helm-charts/bk-dbm/charts/hadb-api/values.yaml create mode 100644 helm-charts/bk-dbm/templates/NOTES.txt create mode 100644 helm-charts/bk-dbm/templates/_helpers.tpl create mode 100644 helm-charts/bk-dbm/templates/bklogconfig.yaml create mode 100644 helm-charts/bk-dbm/templates/db-dns-api-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/db-resource-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/dbconfig-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/dbm-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/dbpartition-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/dbpriv-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/dbsimulation-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/grafana-env-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/grafana-ini-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/hadb-api-configmap.yaml create mode 100644 helm-charts/bk-dbm/templates/init-sql-configmap.yaml create mode 100644 helm-charts/bk-dbm/values.yaml create mode 100644 readme.md create mode 100644 readme_en.md diff --git a/.ci/open_source_check.yml b/.ci/open_source_check.yml new file mode 100644 index 0000000000..632732fcff --- /dev/null +++ b/.ci/open_source_check.yml @@ -0,0 +1,95 @@ + +version: "v2.0" +name: "开源检查" +label: [] +variables: {} +on: + mr: + target-branches: [ "*" ] +stages: +- name: "开源检查" + label: + - "Build" + jobs: + job_AfK: + name: "构建环境-LINUX" + runs-on: + pool-name: "docker" + container: + image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0" + needs: {} + steps: + - checkout: self + - name: "敏感信息检查-部门RTX" + uses: "SensitiveRtxChecker@3.*" + - name: "腾讯代码分析(官方-代码分析工作组)" + uses: "CodeccCheckAtomDebug@4.*" + with: + beAutoLang: false + languages: + - "JS" + - "PYTHON" + - "TYPESCRIPT" + - "CSS" + - "JSON" + - "GO" + checkerSetType: "communityOpenScan" + tools: + - "PYLINT" + - "WOODPECKER_COMMITSCAN" + - "ESLINT" + - "SCC" + - "PECKER_SECURITY" + - "SENSITIVE" + - "DUPC" + - "IP_CHECK" + - "WOODPECKER_SENSITIVE" + - "HORUSPY" + - "XCHECK" + - "CCN" + asyncTask: false + asyncTaskId: "" + scriptType: "SHELL" + script: |- + # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷 + # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh + # 确保build.sh能够编译代码 + # cd path/to/build.sh + # sh build.sh + languageRuleSetMap: {} + checkerSetEnvType: "prod" + multiPipelineMark: "" + rtxReceiverType: "1" + botWebhookUrl: "" + botRemindRange: "2" + botRemindSeverity: "7" + botRemaindTools: [] + emailReceiverType: "1" + emailCCReceiverList: [] + instantReportStatus: "2" + reportDate: [] + reportTime: "" + reportTools: [] + toolScanType: "1" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1 + diffBranch: "" + byFile: false + mrCommentEnable: true + prohibitIgnore: false + newDefectJudgeFromDate: "" + transferAuthorList: [] + path: [] + customPath: [] + scanTestSource: false + openScanPrj: false + openScanFilterEnable: false + issueSystem: "TAPD" + issueSubSystem: "" + issueResolvers: [] + issueReceivers: [] + issueFindByVersion: "" + maxIssue: 1000 + issueAutoCommit: false + check-out: + gates: + - template: open_source_gate.yml + timeout-hours: 10 \ No newline at end of file diff --git a/.ci/python_code_format.yml b/.ci/python_code_format.yml new file mode 100644 index 0000000000..14365fd4b3 --- /dev/null +++ b/.ci/python_code_format.yml @@ -0,0 +1,37 @@ +version: v2.0 + +stages: + - name: "python代码格式检查" + jobs: + code_format: + name: "python代码格式检查" + runs-on: + pool-name: docker #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0 + steps: + - checkout: self + - name: "安装环境" + run: |- + pip install flake8 black=="20.8b1" + - name: "flake8" + run: |- + FLAKE8_RESULT=$(flake8 dbm-ui/backend --config=dbm-ui/backend/.flake8) + if [[ $FLAKE8_RESULT==0 ]]; then + echo "flake8 test passed" + else + echo ${FLAKE8_RESULT} + echo "flake8 test failed, please check if you have install pre-commit" + exit 1 + fi + + - name: "black" + run: |- + BLACK_RESULT=$(black dbm-ui/backend 2>&1) + if [[ $BLACK_RESULT =~ "reformatted" ]]; then + echo ${BLACK_RESULT} + echo "black test failed, please check if you have install pre-commit" + exit 1 + else + echo "black test passed" + fi diff --git a/.ci/python_unit_test.yml b/.ci/python_unit_test.yml new file mode 100644 index 0000000000..31b42f290d --- /dev/null +++ b/.ci/python_unit_test.yml @@ -0,0 +1,56 @@ +version: v2.0 + +on: + mr: + target-branches: [ "*" ] + paths: + - dbm-ui/backend/* + +stages: + - name: "python单元测试" + jobs: + unit_test: + name: "单元测试" + runs-on: + pool-name: docker #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0 + steps: + - checkout: self + - uses: "calculatehash@1.*" + id: "calculate" + with: + file_path: "dbm-ui/poetry.lock" + calculate_func: "md5" + - uses: cache@2.* + id: cache + with: + cacheKey: "poetry_pip_${{ steps.calculate.outputs.hash_value }}" + cachePaths: "/tmp/ci_py_venv" + restoreKeys: "poetry_pip_" + skipSaveWhenCacheHit: false + - name: "Mysql服务" + id: "mysql" + uses: "mysqlservice@1.*" + with: + imageName: "mysql:5.7" + port: "3306" + mysqlPw: "db_for_unit_test" + initCmd: "set names utf8mb4;" + - name: "Redis服务" + id: "redis" + uses: "redisservice@1.*" + with: + imageName: "redis:5.0.9" + redisPw: "redis_for_unit_test" + - name: "执行单元测试" + run: |- + source ./dbm-ui/scripts/ci/env.sh + export DB_HOST=${{ jobs.unit_test.steps.mysql.outputs.MYSQL_IP }} + export DBA_APP_BK_BIZ_ID=0 + export DB_PASSWORD="db_for_unit_test" + export REDIS_HOST=${{ jobs.unit_test.steps.redis.outputs.REDIS_IP }} + export REDIS_PORT=${{ jobs.unit_test.steps.redis.outputs.REDIS_PORT }} + export REDIS_PASSWORD="redis_for_unit_test" + export BROKER_URL="redis://:${REDIS_PASSWORD}@${REDIS_HOST}:${REDIS_PORT}/1" + ./dbm-ui/scripts/ci/bk_ci.sh diff --git a/.ci/templates/open_source_gate.yml b/.ci/templates/open_source_gate.yml new file mode 100644 index 0000000000..9869ca05a6 --- /dev/null +++ b/.ci/templates/open_source_gate.yml @@ -0,0 +1,23 @@ +parameters: +- name: receivers + type: array + default: [ "${{ ci.actor }}" ] + +gates: +- name: open-source-gate + rule: + - "CodeccCheckAtomDebug.all_risk <= 0" + - "CodeccCheckAtomDebug.high_med_new_issue <= 0" + - "CodeccCheckAtomDebug.ccn_new_max_value <= 55" + - "CodeccCheckAtomDebug.sensitive_defect <= 0" + - "CodeccCheckAtomDebug.dupc_average <= 15" + - "CodeccCheckAtomDebug.ccn_average <= 3.5" + - "CodeccCheckAtomDebug.ccn_funcmax <= 55" + - "CodeccCheckAtomDebug.pyLint_after_defect <= 0" + - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0" + - "CodeccCheckAtomDebug.horuspy_all_defect <= 0" + - "CodeccCheckAtomDebug.go_serious_defect <= 0" + - "CodeccCheckAtomDebug.go_all_defect <= 100" + notify-on-fail: + - type: wework-message + receivers: ${{ parameters.receivers }} diff --git a/.code.yml b/.code.yml new file mode 100644 index 0000000000..b53d93970a --- /dev/null +++ b/.code.yml @@ -0,0 +1,17 @@ +source: + # 文件或目录使用绝对路径,绝对路径按代码库根目录计算,以/开头。 + # 提供产品代码库中编写的测试代码存放目录或文件名格式,以便后续代码统计环节进行排除等特殊处理 + # 不要使用.*/等正则表达式屏蔽掉所有代码,会导致部门代码量骤减从而影响部门代码得分,还会导致Coverity扫不到任何代码而失败 + # 请勿使用Tab键,将会导致yaml解析出错 + test_source: + #用于匹配文件; 匹配方式为正则表达式。 + filepath_regex: [".*/test.py"] + # 提供产品代码库中工具或框架自动生成的且在代码库中的代码,没有可为空。以便后续代码统计环节进行排除等特殊处理。 + auto_generate_source: + # 自动生成代码文件的正则表达式,若无统一标识格式,可以指定具体目录,样例可参考test_source举例 + filepath_regex: [".*/migrations/.*"] + # 提供产品代码库中直接以源码形式存在的第三方代码目录或代码文件名的正则表达。 + # 此处备注的第三方代码在后续统计代码量环节会被排除,若代码库中不存在需要排除的第三方代码,该项配置标识可为空 + third_party_source: + #第三方代码文件的正则表达式,若无统一标识格式,可以指定具体目录,样例可参考test_source举例 + filepath_regex: \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..baf6f8aad6 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,32 @@ +# public +/.github @zhangzhw8 @gaohongsong @iSecloud +/docs @zhangzhw8 @gaohongsong @iSecloud +/*.md @zhangzhw8 @gaohongsong @iSecloud +/helm-charts @zhangzhw8 @gaohongsong @iSecloud + +# dbm-ui +/dbm-ui @zhangzhw8 @gaohongsong @iSecloud +/dbm-ui/frontend @Daryl0819 @hLinx + +# dbm-services common +/dbm-services/common @seanlook @xfwduke @lukemakeit @xiepaup @ymakedaq +/dbm-services/common/db-config @seanlook @xfwduke +/dbm-services/common/db-resource @ymakedaq @seanlook @xfwduke +/dbm-services/common/db-dns @omg-by @xiepaup @lukemakeit +/dbm-services/common/dbha @zyqlzr @xjxia + +# bigdata +/dbm-services/bigdata @zhangrq5 @zvictorino +/dbm-services/es @zhangrq5 @zvictorino +/dbm-services/hdfs @zhangrq5 @zvictorino +/dbm-services/kafka @zhangrq5 @zvictorino + +# mysql +/dbm-services/mysql @seanlook @xfwduke @yksitu @ymakedaq +/dbm-services/mysql/db-partition @fanfanyangyang @xfwduke @seanlook +/dbm-services/mysql/db-priv @fanfanyangyang @xfwduke @seanlook +/dbm-services/mysql/db-remote-service @xfwduke @seanlook +/dbm-services/mysql/db-simulation @seanlook @xfwduke @ymakedaq + +# redis +/dbm-services/redis @lukemakeit @xiepaup @omg-by @mikluo @yyhenryyy @cuiguobin diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..632e673c50 --- /dev/null +++ b/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,50 @@ +# 参与者公约 +## 我们的承诺 +为建设开放友好的环境,我们贡献者和维护者承诺:不论年龄、体型、身体健全与否、民族、性征、性别认同与表征、经验水平、教育程度、社会地位、 +国籍、相貌、种族、信仰、性取向,我们项目和社区的参与者皆免于骚扰。 + +## 我们的准则 + +有助于创造积极环境的行为包括但不限于: + +- 措辞友好且包容 +- 尊重不同的观点和经验 +- 耐心接受有益批评 +- 关注对社区最有利的事情 +- 与社区其他成员友善相处 + +参与者不应采取的行为包括但不限于: + +- 发布与性有关的言论或图像、不受欢迎地献殷勤 +- 捣乱/煽动/造谣行为、侮辱/贬损的评论、人身及政治攻击 +- 公开或私下骚扰 +- 未经明确授权便发布他人的资料,如住址、电子邮箱等 +- 其他有理由认定为违反职业操守的不当行为 + +## 我们的义务 + +项目维护者有义务诠释何谓“妥当行为”,并妥善公正地纠正已发生的不当行为。 + +项目维护者有权利和义务去删除、编辑、拒绝违背本行为标准的评论(comments)、提交(commits)、代码、wiki 编辑、问题(issues)等贡献; +项目维护者可暂时或永久地封禁任何他们认为行为不当、威胁、冒犯、有害的参与者。 + +## 适用范围 + +本行为标准适用于本项目。当有人代表本项目或本社区时,本标准亦适用于此人所处的公共平台。 + +代表本项目或本社区的情形包括但不限于:使用项目的官方电子邮件、通过官方媒体账号发布消息、作为指定代表参与在线或线下活动等。 + +代表本项目的行为可由项目维护者进一步定义及解释。 + +## 贯彻落实 + +违反行为准则的行为可以报告给本仓库的 OWNER,向项目团队举报滥用、 +骚扰及不当行为。 + +维护团队将审议并调查全部投诉,妥善地予以必要的回应。项目团队有义务保密举报者信息。具体执行方针或将另行发布。 + +未切实遵守或执行本行为标准的项目维护人员,经项目负责人或其他成员决议,可能被暂时或永久地剥夺参与本项目的资格。 + +## 来源 + +本行为标准改编自参与者公约,版本 1.4 可在此查阅:https://www.contributor-covenant.org/zh-cn/version/1/4/code-of-conduct.html \ No newline at end of file diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000000..909401f96f --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,66 @@ +## 开发流程 + +建议按下图的流程来使用 Git (可使用 gtm 工具来快速实现) + +![](gitflow.png) + +## 所有的开发都必须基于 Issue +必须从最新的主分支或者目标功能分支切换出新的本地个人开发分支进行 + +### 请安装 pre-commit 以保证代码提交前符合基本的开发规范 + +### 单据类型 + +根据单据类型,可分为以下几种。创建单据时可根据实际开发内容进行选择。 + +单据类型虽然并不影响开发流程,但后续会作为研效的评判依据,因此请准确填写。如果实在拿不准,可以遵循一个简单的原则:问题修复用 `fix`,其他用 `feat` + +| 类型 | 中文 | Emoji | 说明 | +| -------- | ---------- | ----- | ------------------------------------------------------------ | +| feat | 特性 | ✨ | A new feature. Correlates with MINOR in SemVer | +| fix | 修复 | 🐛 | A bug fix. Correlates with PATCH in SemVer | +| docs | 文档 | 📚 | Documentation only changes | +| style | (代码)样式 | 💎 | Changes that do not affect the meaning of the code (white-space, formatting, etc) | +| refactor | 重构 | 📦 | A code change that neither fixes a bug nor adds a feature | +| perf | 性能优化 | 🚀 | A code change that improves performance | +| test | 测试 | 🚨 | Adding missing or correcting existing tests | +| chore | 琐事 | ♻️ | Changes to the build process or auxiliary tools and libraries such as documentation generation | + + + +### 开发前准备 +- 使用命令 gtm create 来创建或关联 Issue,此命令完成后会在 upstream 仓库中创建对应的 Branch 和 Pull Request + ``` + gtm c + ``` +- 按照提示执行以下命令开始开发 + - 同步上游仓库分支 + ``` + git fetch upstream + ``` + - 切换到功能开发分支,以 `feat/ipv6` 为例 + ``` + git checkout feat/ipv6 + ``` + - 推送分支到个人仓库 + ``` + git push --set-upstream origin feat/ipv6 + ``` + +### 现在可以开始 coding 了 +- 提交代码时,commit message 注意按规范进行书写 + +### 完成开发后 +- 假如你本次开发有多个 commits,建议使用 `rebase` 来整理你的commits,原则上一个 Pull Request 只对应一条 commit记录 + ``` + # git log --oneline 查询提交记录并找到需要 rebase 的目标 commit-id + git rebase -i [commit-id] + ``` +- 将本地分支推送到 origin 仓库 + ``` + git push + ``` +- 使用 `gtm pr` 创建 origin -> upstream 的 Pull Request + ``` + gtm pr + ``` \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report_zh.md b/.github/ISSUE_TEMPLATE/bug_report_zh.md new file mode 100644 index 0000000000..27bcc54d0f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report_zh.md @@ -0,0 +1,29 @@ +--- +name: Bug反馈 +about: Bug反馈 +title: "[BUG]" +labels: bug +assignees: zhangzhw8 + +--- + +**问题描述** +简明扼要地描述bug是什么 + +**重现方法** +列出如何重现的方法或操作步骤 +1. 转到 '....' +2. 点击 '....' +3. 错误现象 '....' + +**预期现象** +简明扼要地描述预期的现象是什么 + +**截屏** +请提供截屏来解释你的问题,当然这也能更好地帮助我们理解问题。 + +**请提供以下信息** + + - [x] blueking-dbm 版本 (发布版本号 或 git tag): + - [ ] 蓝鲸PaaS 版本: + - [ ] blueking-dbm 异常日志: diff --git a/.github/ISSUE_TEMPLATE/enhancement.md b/.github/ISSUE_TEMPLATE/enhancement.md new file mode 100644 index 0000000000..2bda70115b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/enhancement.md @@ -0,0 +1,13 @@ +--- +name: 需求、优化、建议 +about: 需求、优化、建议 +title: "[FEATURE]" +labels: feature +assignees: zhangzhw8 + +--- +**你想要什么功能** + +**为什么需要** + +**建议如何实现** \ No newline at end of file diff --git a/.github/gitflow.png b/.github/gitflow.png new file mode 100644 index 0000000000000000000000000000000000000000..4a0252ae2c19a8edf93b0082082ba1ed95bdf7ec GIT binary patch literal 45618 zcmY&=by(By*Z+GP-67qeZ&FIwm;e^58O;`uG{Q$MI4-@B?&YbZJG8Z^J(NC^!#LyQ~p?VP_2vUFJR<>*EQqZNS@i&rViWZ+lJq5ihkDY*k<>=q%bc&g-}~mX zE`LnRpNH;|_l)sM107 zHok@b8aX5n_k8YXfq`8vS}hv6-|-2yFy;Cr$o3W^)YP~)Ss37Q>|8l5rZXH^dPECY0r5RYj~c-H+TSdwMdKI{r*C? z2I~BY6k_&a7~yDPmP`f``Z5~`JD;?SqYOlOn?b%%mNgeOA_sTi$GA?n{ zowm5l`2W81(v(acy7cR#Xa-07cKwDUM_cU$x-n0%rhRY!@b>lQ+y(>tf=uMwPgiH3 zx{n7pkFHPD14m^#IIhwdXtl%EGdrvr6{99lG<@V%y6gJiQkxu(?Qn8s-$3z(mOC34 zv5#v;361b4#m~`|oHNwy#{YZMa5^B8;7O;1McY&bo`WCFdhUJFidmz}KFa0+WezP^#{(*xSJMs*(i<@&%BtwH~IHmLA zLluAN9U4xQma8?t4isZC)$%m3J7k!S4C2dR9~t?$JPL{QBd_D9EjLj1 z?RI-7dqsd`e*>!QSpD8li%<6M?2ixlsnLoNF{de6WXFYR4V#kG1A8Hg4AzzPZazk! z+zWx<(2LLrlcayof83*G-NXQ)cT|6BzOnTH-&N}Mzjs;i3CPe|NIRkjy3{b1dj!94 zv-`3CnK4k&10xX(I}GE`u#1}oX)$J;FCBM8lalz+4 zE`e)sC$|8Be+s=zjn;KuiCZFzXo~C72+2<0;Gx{FFJ!umw_TS`3A+g*jswPHwH)=B z2D0Nb>{|G-Z(kDHWLgK?AfHqnJE8~ct~UnH6EFxd;eMKOtil#JMyG=xMVREcob0wHerIRvaa3#h zqiI)Jr28Q5eYyde`@#F)mi{bUvn@x(h~^`LrIU-qv*){mWaImdsqaJXE21w4MHd`N zMnOo~oIgOyoj^}==K7Q@OjnDFerD3)qOyy#0nm-f&Jzw%zy zNZd&hp>0zU*9|WV<9|X2@+v+%k0p;p9T8OyFOPgk{hEZI0P6lokoGX2Cci7CA6`|a z10qEfB}6#-*}bA4Wy&y_=pVj=> z=N?%B)MpRq;V%!pzYL@udXEgeb|{mee;CA<2JzD2Q#~%T)%>)n857`XhwSc%_tPUuFB$k9PZ1$1jT;YRzzlV3)|CRXe z{HH@ApA;{SwB+-gbE=;t*i%O)6;5lf<}wTOya~^|+q`KZsRZTq?|2aO6 zB)$pGz~$77^p3&%p=#_wq?Tj&*5yAULDK0_d*0R-edp@3fnExD51%H;vI^}eb$v4! z>{EIWwgVmPmS2TL*?(ea=UXT4G4e1XeD$G)=()~nZOwYcVI2pUFPZK9{^IPjsm`FU zPfSGBgR044isBHn@Di51Y?}-z87LhPbaHGRPn#m|Fc>$ab#9;*-@dyMIaN~OFeHDKMmU%6j`wI?j!^I$FSdM6Hd7q_GR79~VfbfVLoEil!hO9p^KW*^ z34qXk*oLwn`)1^tE?WX&qP$!+%XAv3Qp*XwQ`THSeW2c3>C6bzy2Zu0+A)8T2F|pe zsWtg#9QP#k&QJwI(jXr`w0KaHj%K5*PTcy~;BP{bf_rsa0Wq;tZ%MZ-WnM^_MhWo)rU?9B@Qyr(1pH!3K9k@`B}Udlaw z_6QC6rm}$a*YUh3VHTHv0l_Q)4Sc@(u>ub^N$wk0tLHG{O(w8TEZOUjAbF4-^g;~5 z$AFUjEz#&>_M*Gw@CHt+CB z4*hvg3oH6=&>O8q$-wzld+I~!I*x`{d6Et-97p(I_Xyk&SEi~mOk`yoEb0rMzdl4lch(MyDBU%RK?^UuD z+AJ3S%25h}j(ggbt}5MVCK7=etWj8dE!9#qbBi99sBX6NP#B^wE1gXqQqoeo6syW1 z=prH(Ig62##>HjHaHjl~l|+eJwl?Yt25*3#eSp6vKj}|O9cpP-IWQHk=!ISr2x=tb zec_iFE2L5)GIdpgC~;<9RXxClRYIR8TpC_xw|B$g&5ek&YvS zkNzj<3$}BeiDG~0;P>j+_<1Q*rA+jCHRiaBE||6G^>+JD5BgLEY`4QMQ*q|;2I;1z z#S;p#CpO36anYc982J`PYy0oedudau0Gh}&24)mhdF?xE^tj?kT?;;lZU;h6l`_=R zWa>Wu9kCinVaI+At$EThntcPp}~y7ifOZAAW^J@`ae0nkaA7w zblNu+@S%qe*A`_8*vXQ9PsM+)yzzic@F+5~bk2==bf=vjfJySImYL%|$;VI@v z$x^6SBtI$8Jm|3i!B+l&W&q@O+>cO$q%h!Pl}{vIGURd|0=$wpR3JUXWeTp(L33P* zve?MCUQ7U2oA^LD#^^q#;>Qk&NmidENHpN>c%6{Pc#p?}BfGO6KZ=cnl;_E`8(d^m z32Q#cmx}>rfjXm2+WqpXH#VP#_`0vQ5)txayaXkzX5 z{eObWP#u!Bkr*Zga}y>|X}oJr;*grGw2Bh*-*}vN^yA=~DTYmct56_5G~;A4X@uCz z)}~$uG{g*W*tlCmm{~AdD^p!QC7lP`r%2xa)*R&;XfX2N!Jeb_kU-g0$k}6t)`Hgk z8WHIA6^1fB*#Fw`Y^;rgv?H&5J9LBYr~C(nBbg5bTkq{%HrBr8&VCwqs(B7KVv~|8oX8`aMh1axM6+d;5va#_#s9U<)NII!rNaDQs%@>b&+7 z*cW8q5wPlmXBiw1Li!Do!P0aWB3bKoK25e1Hq1LebNCn8{4I(m!SJta&9B`#(K}eJ z{Ta#!$}_iZ{pNp$YLH~h+)U@rJ$Jk`SS#`foLkosb(a4N@qY9ow3F7~)-Y|W} z^PUofq&}avj-p|aL6*vIC?K&>X!v6FKvU9XtG<{Pqh&u)B&{W zK$_E`+)Q=6A!zmam<{8JtC(ll3a-2?{a2=0SsJ3R8+@pTnNFP~HEl6F&_06H1t;U zpC{mYM%PUzR_qb>6$R4kWC7HZT+0imSKma$PH%mV(K%u)qw^3I?nXXByO{gnL7AiL zo5K#$liN<-yC%mVx>FRMZrS$=^-9eX)xAG>l8cN=?v?nT8r>BPa}lfXoY=>6^?wj` z$5n;e`YY$(61L!FCxmJd1I`-nE023mkTAXF%!(xwQz9L3nM-s0_n`3~g>y_W53qLP z1i1R{{LXWPxRr`Ak4ojLma$B$obVR~T+iSiy&;L=TuNCLH4#US&s>|vr{|7WCMr`)-s2fxSJ_v4KuyS6?TRo1zq#p~dL`9o8rNN3pXmI?JY4Rk8d05V&>a>|UR+ZFtZy8e{Pf$41`gFo6v`&dd*P2XI*WJD=Z zXH7XYNe3|$fLxU%;NM;_q3CO*K?(cvNo_8ZoTlW|HEpzc(PSx;HxW^<$@m?(4MT;) zGA;4oI`fd&qg&6>@p2-MSX6}DSnJJ zBZq~U1lc>GEyI{lPdmwA3Fg@djo+L}G>{i8er4&DQE^(-w&0eB%n&oh)GWzXrq5DR zt~0P3@Y*U;k+wR8lT!vf6#U&ul34q~fYn}+keU5tJbNu3vCjhAG-kv>!-!K%7%pI^Pfz4 zeh|Z*-9ZLxf)gNnD}~WuPXNs8^K_D&hq{KwMo^0QqWU&FH0xseEc_k}>)%g>5x z5FGA(#=j`EPu_cr4Ct&cF(BS3s$@I;GUaH6zcKBSs038?Qk_^tc%L*B=AS0AwFzo`(r_BD7EO=)O3! z>pn3&Qs<%Q`BqZ5_~Rb?5!sgd1AlRl&=4ynv+2m~p^MFVh>5Qv*kL{fJ)97}m$Ab5 zN@lvR^~7%kLAS#ZSljpFLGsRiZ*>SS;eSN6!Grp}OEiA#&fNIsw=C{l3wEUCM?^^< zGKpA=0UuTi2qX)E)t{hD==XW{`Ih5@qa63yR>4j=u7Rz1Fp|fK3|L~w@&Vk7;`K~K z)JpGc_c0AGnSM=~KyPEEjMXlIGcNV~>sKuVFi8_MK?cC^Sdf{o6~CT+Nv$`6P&MsQ z>5AKXVe~`ggGjMd*^r-ZZO?mn((z##0Y_A5SgGp%s}sW0g0Fo?>iC0lH_7-xPvl-p z=35e43%JvwyN>7}Bgm9BOG^N5KPRZzCv|joN4ah%XpnANc&))@bZTECrR~)*6G{nN zl9G96zoqbEH(2S$RoM!XFT`ZV@reyw8DyXw)GI8z{95gKw& zRJQLHywWmBl~h3%VL_1((aff~le8JqhQ)CcUbFy@iIVfY(e~pCY0&Nu<)Fv1VJpFI zX|1!iDV>F1aXx$es7Qrrvw|G3l!SBHSOK)tq@0Dj5!b99$#*s!Ld&T{LkyauEj?o@0D;N$d(FjAA&d&5B*b$m6ExaLb>Raw7k<<2(gK%Q-WZmeuvAl8t-Fu-A~gF z)9gO^GZOIEvi}samkGaYG2J$%-rO^9bIdC1Q5zhx`&|cEI=^kgqcWm6ahUG-VNItL z_t(d5AW-I;%)Eln;u6?L$7OOEp~hx_u8m;;?dhnz$altQm>C0UN6**%hkflt4Y689 z{@-^;iMO{2xY^g7oc4rur+DFCiM%ho+s_D~*uZyv4i1Ac(Mhv&A|DGgc8f_7QO&$f ztmmy`uwL?8(|l{CvaiAEM(j`plMu4Z-9*=y1h)vFrPGYh44Mk?!SwW;3A-`k7Arzv z6*JmVSGE*aIu(aM1cB9p?^2&WF2FoPuNIIJ1s!wbhN-asY&*jRRuzeBkL!^oRsg<* z@mTN#T6+-q=_{&1*KR*aHh4I~;`;ql4|yoZxj{zO^u5zfz=X#p=;bFe~<@^#6 zP~s;JU#S8QQrSO^IPJM53dCM)d<)v_AE6vdhdAdUB$uNdqaYE25J%n)S++UO@O|B= zmnUrI9d3pD_dg>XNikaJaamzzZ)P55>rCfz1h3S7kFg9z#+${fLhmB#P6w66;*w6h z@>sQQEhQDk^Wm$L+5{>icUjhPJ?x;WOu zlJSCQGc`EMvzmt;fMFW(JgK9bmA2m!3^a#*yl?RV^oht}&YG;%g3?FP>*C2lbJ+Hd z6Yn#4{I#`@3Y|x_CcLq7ys0|z<2^SffOH_RGa+mr;eHoQk`I0W6I}*B0T?kx*RT5d z(KPBHUc}#$5|MDdZV)!NbhSf82$ZoFb1AK$=Q2Jmu~ajSr}BCB`s;K{Tks0o$L>ap zWkk54zDQ{)$t|Q4K&?L zy3Vcnt{W(Ga2PuVYtpQ@HFzKNgN`mkLB@@(z8auE?+^2Sx^}L5J{*FdSMUHe4a);fIHMgrKjl{zWu=;i*+cKbeOLKCPET>F5#ljpAGHdC;dIyo0 zT=$nGb$>5N24yB{M!Eq8k4=M)IyJn!F7srlK1?pYx0RG00$>1f?#+Ds0T2S91JN74 z*d*lV7V(liRb-#O5sU&@T5X85xeYk)pVe12e_4+rOlZ-H*cE`l)k6*)t^?X)&$kEvBsQBp#_KOMI%Y%tDqgyQI?&sGVE{_d%#My~jmSP1t$s7q z-h|6yGv+P%<&ov;PWTpIYD(fXuv9xyh^FQI1Vp3W4nJHtgBUd~|Dh_F7Xt-Iw*XrN zKN=q>xDQH12>Ma4TD+PjIRJfm_>(RkL|Q%`G^ok!_q7*+e_Y$~LQo=iHX6J~tM3e*Uv0lMrTyF#c>RS0g`n;; z0*Sof=z7i-_#R+UiIzpsu-E*mYMqSGxS|PsMIR6kN^ray`lKO;xP8b-2wNWtClBHi zKI3l(4M`jC%EMQvEhtfw8Uq1>p-qtqZ%@%TWD*r%7j=~4aB<#*V`%>oyfy{R3}l)i z>dod5Q6|n4Pb8Z2Y9AK}HN3YDs8S5MM;NfW2PJ4Kdp}J}`SZ0?|M$~F$!Z6T(;DJC zKFoEm-*Wf01_APf9KCA0bb0vxrD$0B#gFZ|q2TI{s@9*2HDn5CQ^hiQf6Vk8PT`bP zh^QD6!vc2r8&5!YEpruX(8J~Smct7nHoi5i_&lQt!iiJ&4WqF9q`jj&&((Uer!N`K zNKiy&cV6!GIy12v$RnuG;PQuP?k@<(p(b3^r6coj5r@N;g4SMttbuVrh%?f71xhMY z6R5J_Y)RsfWD9z+S`Iy;&q0}VuifgBDm^~kkz7BFEpg1w_zr@_Ufz|=VKo3u7NLSS z7Y80vDFD5!>b}?5AqlEJD=V;&I?op@*me_S{)8Pt7kn{8KzDQ8n>8l%@w-gFBtgY{ zDWpTiJnqbyeF;O6R~BOBIW3l5?9e5%jryS>N%n9aV0^HGXU;D!R~l zMyxK=BtXP^IvPe5J&CRWt(%`$NP0ouNI$xG%;MJF-3sWAw`220DM0T7uJG<_ad^Q3 zx*VR;8RS8Q$T+(v2-nB(E1$QxvFE|uAZD!`vlOW6j}?Hi4doBz#XBQxj`mCk(~TNI zPM!uog~ulbdMU|W;|QodpqtL@{WFNEc4snrpW?3?v4p#SnCr&KGu%V>bwEu@zdI|{ za?Y>Y$WXmrdEe!;=uvo-V79P{kQ)?P144K`#b?@s5QDVhS~IU)=G$~N#v!XT)v_0T zDz8g7!*irXv|5sk;Ij7>@u4a^iGT24z3YX)T7=!%6|;qI&tG`IYfu6(H{X^XTBLs` zhiUi65#*8}`e+Q;>6CMYnPDy3XZYoe*_vvUHw-lqg>DC|C4m6_*(+ra8?b4BG}BUl zEuapfTJ>fn+HC#`lW0#`E3U!!WR;7K?8T*1c!om%!ZQ^Xc8g)*%;-@|0^}RUq(yCP z6OW&}Y#yELN=3`>+rJ47=u>ZjEHB2RVjm0n^4MN}2BluZ573|GvVOhI!iP&y2iB7h zzlk#EPpbd2!vt5X2bT>cbn=ySy}lu-PeA!CXw1w*EB1EmPy_$FKlko#KK%w%rr^rH zf@bf`=@gPI&3E1h(!3qe{)P9>N=GKyh+gcn(%6?BMD$6vSU&Mg=O@1AM{FG-eZ=|T zW`2gTK%NpRi>W_^I^I&H+r$V z+iM!w@R~B$_=o3~E!F%Y%tfw!Ten>+z`8b8NDc$xw6z z1CMjA13~l;ztAnX?IuhC_eFNBY?KP> z*M?txtQf3qz|{|}^td4Y)(oS+)>tA9A_a-I0syq+Y?oI5X`bT<9lBS-Rz&~B=LFiw ze#9+=#=I*Sy1FuqKBrTI+Pn(rTPJq7v#u*Y7{mASaQIIM5lTcj_v+??iR2c}DLD*C z(PMli%j0~`NP3$Xxh3s)d~ml%#{i1aSmVQz(hjt5`p_vLO*E7$rnxnW{W+$)7yb}# z^%R%*b9)?;;?t_L%UT{(ivVWTnrz)hyH2<(CLE!UlY(>h$C_C*7s3umKhv3qip@nhR>uSua@41~m`D)Nqhup|J^m=Egx&PVAp_w-W zhssTC%t;Jv#*TSfEh~VTIfF&!K1-c|#_`KEqHZ6JXN7$a3v_iLPw~!6GLH4&tZ;UE|8aF%x^@&b?x;Mw%_M{An## z0comTD6bYm;~BHV z6cIx#aU$YYpHJ^wJk)B40Hv6gOi6p~oBe*tctw~q1BQRf@TLFr&`N^1Pc1Kq`tsJlcbpy`oTp^QkDOnP@;uM4PD=rH+Ic0zcoE3`W1nH)urx{3$yh7Py#3( z{3}(+p9Z&r-4`2U3#KweJ#W6B_PEfZ);m{QdUw^Yz+QmXL^u&z)?5G%x+f0*s-ex8 zRMAt1vk_t!ME{#@B5uG+KFjpA~ah)96RN-ak)LKe>S?tRi#nure|Zru;99_pkmT-3~s8; zXRy<=sbg2TQaJirQ)iJ|F~b#A!+f9Fk(sWFg>Iy#>vZbkE{VZW@^4xX<|MxDCt}Xf zMm@`oGKJL;V&ed7>8Sp>xZCDU_#2bFFWwSwo(D|AQiLB#!TnkZaN$2G!5 z2?2WyB9Dj|y}~dM2tB@H6I7 z59;sVJlT;ui>aIUJV;zDqn#ITSmUOw|44PP^6UL#;D+vfW<98)Q1g3ylsJ&<_9)#= zuF4Jl01}1Atq?fDxZPD$P)Hhm;r!%DE3D>TPC^{0gXA$nbj9Tuo_Px0mT?a@(jVT7z0zXHDI&ut{+lmO@;$?RXc3F3ho%B<8i2cRo+*A8-j=8lp z^A8E^iSWQU?b!j{5gP|(zy0i4$SfZ8`E5#&3C$OK{N;h8o^X}N!szki&r*vwP^UGn ziEC=se#Rsy$$@vChM2H;N9N7rL`XF7~+FyQW zB5`FNEeev!k4&ZsYqBbL9me~L_0g}IMYdn9$#3L%S4)-A<=$fE7t~j%v*yWp3?v-C z@I7Rbp`_0!R0ON za(^GE4FowjQaG~G;tDn=3n^nU^0EJVDI;z3PI^Z$#OG^k$dI!eh@SigyjZmj9g9Qs zj||q-d%L3U%E~BhQ;`$T6?##_jowtM;>S089@~*KuWsk|#h5P%@Hu0-qvS%{6|>&4 z5OIs*zJoZ4gq*=vkXap(A~A5T9%#W!zES$sA-p*-R_z&&^NdH+P*{&`^s(SbDMCX* z&@ne??L$fnZwt|MFwZ@d-}Ky4TX^P3cgaMrUzaO6x=Z11YjSg#J(P8uyG48bzw@PxGp|_k^&|`3^y=0JK<>eXDT%rZThg zA!NVLkFpR~ok$;CNxn%(iH`4X>L=5>u%Ha*XPgx>ip9c=T11k|HYCG0SUmXjSvILR zZ}mEAALahFxiA6JBCo@1b+#a^*uC&;!z``sX28=8WI6~IGfn_j)Cb~3)E6IoKd-l> zw5~NsYK3JKRf2GqZvWfTHvv|2Zj;kKJ%QbnOvG6|cJ*M#C- z=Hb-ga6ru_7NFRjAy_km8GKx)gSZ1+%cM`Z!n-VLutKgN`XS~Nx|9@Pew}FzMuciu z3a}rbG+=VX$l@k`#V#bUSclL4UC+MA^Jb8G-g{1xw8K1!bq1T`$e-1JZ(9f=m8y7+ z{8r(7iBV@l96Q0wu(E;`@d*s1V%=*@3YPlg=3dn;t5uMgJ~%6;udxY8z|psQoG20MPwhJ(m|P*I41ln+H#Ot}>icV!vaSD^L!T{D@ybW?MY z-WjAQNm#CfD&xZQoz)qycMPJk+P!Wz8VF3Q^yd= z?V`q@G{dx8$#192KG*8}oz3JJK+Ha2@X>XDDs@0YJ(aVD`DN6cItF3lXu1h`GQyTJ zFU^Wsg0&_9*^knEuK|YSf)WW-Gx5ZnfXs$xH5dK0?e-zCUeRv`z$Ls1$gC6&v5!sYACF^&d4b+({ zg|=kE`mZ(8hj3NO--+&|JTA?b2lxtPQbu*|uXpz6_J6lW&w984^o7C6&0ahO*Fn4Y z#Z=e8%ge)E6E|gP;9iC0i9}_rUTQrx=;#k0R}ZQ0FFuWF2!iOXAgA-6WxUyQ6%pE~ z-0NG2DmFcv;v6T}vF1!V)n^&N(N0d=ehY!B$B8w&L(G>Jlxqwo)@pNkLC~JY;tz*F zVAwH7>R6+6LfuBZgy)stFv_l;opBDwAUgRwEiBRjRhIKHy8WT&^sxYG+e?f;X;UIm zlui-M3Fr~cVt>eE#e(bZyO^{}LTxh}*z-H*G_Gs<3`@3)jkc#rbU$SG>32_rteyU$ zEEhkCcUM=iPm@etU(iC|U8!o?^&rg^Yym8iRV5V-CR`q%!QZ9&nZ`H`N*8T=?=Y$T z=EfT{<~>R9URLe(X^ADs+IxnFI-TrGE_~O@;PRKH>j$D{UKLJh9AfH1h)39c!tbu^4+Nj=F(rVz<8Wy6E9GpX|*3=ZvN) zdxq)VM^-zh0bTM(H{Eco03%-bUj9&Wbw@3Gxn)2VE)oj{RBQHFl>8a)e$496k7z8q zQXlPWtcGAT$GCw6w+Ejg?2_dI$ix@w3o{6`0^B7XN8AdGy9tlI7ZW=Ei_@0{EIUto zF}^X>m?9JB7`59=29SFe)=qBvZE=5}S#{zQq3UV+oZy`*v$08Rv<7}7AE_Wfdwu*J zie?DE7t`ST>=i?5i6|M$X0WJ^R+-=lCT*!XCIcU!cYZd-*^?tjNgrYU2_!U(H!QLH zVd^?if*>o`qW_ys4E9qcBjXC?bQ0B?irJ1Ismxz4g#<{*m-bMa^LAqmxX7r>>Q;%p zD-or|q+)aJCb|nZ?wnQtk&1O07g3T-WbGvB_r{HS%xmAKpvcd66+S^=!)?(S5Lkde zK#|k|mj2ZUEW$E=>o_#b@C9bZ>av39;XE}|E9*Q4^vgv$9-mW!qXh0SW)sSStW}6p z)mWJ=l9h2O3-kaiNf>`H`I22Uvy`)7{;id}qA-90 zcofVJO;+ALe779uh<1iqOp}vx(k(+tAo*se#=-UeSUekLji2RQ?*H&Qa2B-x6b+f; zLzL=7{(jFsKN9+}_7kBv1hXn-fWC_WB`GC!v_I2Qqi7x*m_U=f1;m-3V0`wSe+USv zXnx2jZOD4<4J3$%00hf-y9pEkPF4txb?(tqK$?6UTP?ecf4aho&HHr-CSGh7MKC4v z^&8C*gh{R`Z)WW6+#))8-uwe%4~AxbXx7)(W7zK1sTWeQF`v2IE3Q1_*xW{?g38pH2!swTlhHCir8<8a@B6Ks+HquU-Wt z61I(P$mkBpAFxCQWbL*6kF^rqYRUbKKx{cy&jB&8ItY;L3GkR~EZG+d6Yw}~cknA3 zJqj8CL6hQM#l*P2^ZTKE-X*W{>N36m-fF;r5S>aXxP}qMc%;6VPE2V=0$W#=&5tOf zdgAcr9)$-doIV((38Kd$*vYLo1B}Cxl^wZ+*BbF)PqR-0J|Z|r*f#E`4D;g*g(x|7 z{@vWNV#rXS0P7*;{b|C+M}RAtq8{sMMscXkcH!EQ`b7c&sYBwLtxBv1#Ry(qZUCp= zp2}~Y!0C-Td)%nmH5XB`FU+ikK)CZW`=ejk-`Ioa>&84jGXwD4MwbsTT?=EroQuzFA7zcSra zAu{aB#y1fki5I!6V7NZ~XJOLRZ(}$JPAtmcye4utH}^Q)fZUR~dn&HXNP69_0ZES^s<*k)Hz_Af7|@ETlD59J9K#mGF! zD3*6trBurF*RJfCq}nBdA_BbrX<`w>S}f!~n4Cg~Sk$b6gk3cRKGE30(2$ft^IbWF z#@V+N-$Qdw^)(~{e+1GGM&L!C;y+mb#*?emuPzIRB&NP@vHeCNrY@Tb4xx^gbz)WS zLI|nENpLCXvzi}rb2RC*j?ltm8T8KTLN_Y+ZIg8_3c(Nawg~X&`xchQUjU><;|44V zMngrIB&++=!Z^MgM)EBP{uzW``1n)GNQ;3(*EH3B?esm!+{f#E$PpZH#qzyPe z=HRWA`<8u1_Li9qwhKiFim}Y-{=npW9kwv*=w`j5Rb-=*E#?Hc#hK(~pHtI-ENW0e z&x-wJt9>N{Rif8SHtS^&_inXNU|q0HTt`k^SM?%f4{O$5I`z!^97%)d*CaXg6rL0T zyOBZR*ySkiX=lc4`i^5FWbZ;kq&S6d-I^P&e+c9Tg6TI?M4&CnzKhN#AfnuR&FnC( z?xf$4e2~)6XS9^`A^3hS1@c44c2 zGsI?p!2!9x+O3E|$@iG2#khv}m;yj5TgX-N;*5k!x+qj418;OCKqi%x{Sm1nh0G^FRoPo_pUA`(AWBM_gbS5rgoBmlPusBJ>DZ6c9Y0u;Yazb+6PI_H0CA=9b< z7%kan6D4;VUBO$+i(B)v!4WNOaAUj=8W7BcjABXa(Jh9I4{mK}7ai6mhwzXilRTP< zW^?v-kWkRezax(!Tuax|)>7f}^AeQeDqj)C10s!`HLaO$7KF#E9t70g;6od?B?{EA z9ea^OHWTLeRdI>sSO*O4t8&&$m%8(HYl5EDrznakU;hJ&mp2 zS((=0r^Ty%@99#Wp0zKK!jiRw$2waR>X|X56G1gZ0 zK02BOUH`(-e$DcV<8_XFEcVJj|DyQ!n_jDEq)su2VuBF2u$r5pzr$|%f);Bp=f{o- zO-kUmR))dtezn@!kC`n zHT)|0u?8d%W(6zzgfm**Dz_v>7MEf(_5UtjO6TDAf}GGUyo0BA`g0y!jT^AxM)F4f zr7Bx346cgkSC6wqR9npx8~^-rp6d%j5%otzaZ_UUbn{&?X&@X=o1E{TWzV;y+MN;j zj|%i-SZN4apS4}!8q4uZH!fn{?pJ zP88e8dn}ze*CSmbRQKO~QztaDD7pLesZgt#wnOcKli#-lDGF5auf0xGb+j;?#K4ry zQU#-UN6FUKb2}p}A=R0V;K;h_MuY&98Dd00H6c#x&6D&u3uQPg?P!7P?QOgPxwcA5 zLE@(&W(d-?+Ds6t*hn;eYxwSPM6XpOB6G8oq^XKgg-)yoxA|6B9}jwDqlEuReC^JW z?eO;^%>QnwVgf)95<9wFGon{|Ev3=%8C{vBeq(z3tr|p6+?L0Lo6=`K?0B3q zx&Szw$6wZe=s##|Wb_qx>GO9A<4Xn-Je|VLgA|;Gm9riGcl%bj!-*V5ldoYFx~TDU zVZbKxPnOS)qdeN>Cp~AMR*u~L*y}?>w%ibALO72dKpJ~Dc(>M_(H08mEeW!E{2$>C zaU{aKU}Tc~KpTNBr*jTpoVZ0m?y5uNGu-30Z3`@z59%MNLomB; zD>mbdmBWd(f)se-1OqiU{9fC!RV&D5{w*W{FDo{7%2Gn~Oka@4&HexS&$mJxMFTQW zH%r%l#`ED~;k}Po6)zrC1#a?;ewUK*CVQYGQ37Zcfz>o>75*ANNbM2hxKZ247!co= z(O9b?78MiU3X;95xe>94U)k>}PN8cPoT3qdNtjL0kcF2hoKDq()^D(nGe8Dhoc?2w zdR#E(G6=mFIDE+=Qq#H?YNoq-diBF6x9tffy=VT1RrcxvbsIaRIpgjCAp#JAUBk#Z zBG;%b<64b&1VM618xA20Mc7oM8dZZ ziMa7p8FQJQQ(`Up4#tr;&X*>6$af`8K`1UtDqFThx0i(MH&vnokdUVF+WRcs|JT;j zdrN28QDAx%)9z8w3bJx~h%vn3!8Kv^%$dF(r0trZOa=H(m?Zr3|95C1 zsB&hBPW!htkXN3NDsGf`>*3`-QS;U!EgWQAOGt`a3{ALZ)(%4uR>D|NTbfT#zAs3; z`HyGe$gUSV0Nm~UQa&WiUwyG$ZPypMWKlyHB0ZIV%ai2xJ6F^*HUe1z#pP&cB>_Eq zVCs7S4%e*17GeM2Ly-qyjP}{CA(#MUpL@S@gZD;Xq7o(YbU zCib6rXORcmoeB>KLhvs?H|PR*k%UbMZl!LB*voJ-_|{)K=#>0(BE#d8%0-%A7@=_G zb!rdD7MEhjUv}t^l!4;+69M`HiaqyLF$ z+)h;k>%F_ol_BGb+*e&D8Kw7BH|JRKGG7YzXm1Yb&THQXJgH*`cJXHno1TUJxHv2? z%A;$At#|sL8C-3)UH`so8D^mwIni6TM^);XPi#0LEx>{$?sm|S$1Q{%jBm?)s2s-; z5C3aR92ZS-4W$-?=#obed*as4nZlm4BJd4gZIZ-t>(Ar!QE};?8XoTdj+O-T0h^Z} z+Jsctuy{(b7XpOW&&B<$!2T~3yW(Pqc$Ax>Oq|p$V!PXGDG>|{8O4O`6Ki7k|36hO zfk+R~**Egb!Q6-7`&+Y+`v9rJ-ALLzUo!SGS87=Nr(H>c1hIUgZDjy2=Vi1XG>wv- zk28K>JwI-*{!19hqo*jbn5V5T`XxmTy?sC}QPBm0@vM&Wg>cslL%vvl!oQUE|M?*Y z9IgS~L=zd}BbU!QCwS1@;j1)+D*unQ_l|0^iMmIhgx*2v9Z~5`K@e$*h=Mc`2t9y^ zbWzZNlqgaJDN0k3qEsP3K$=JiC4iuUbm>(yVrb+t#Acj`v1g#GjI(FB@J|Z{T!mgLEaEM zjhxQP1vta<@tlj5n1-2XICB-m=y{5kD*2d_`Qz~q+(?+_xJU~N)Oczdu#;cL|B1pD zG@b(_k455g$x_bXw)lH0q3N#3-KcuI92xt}!R^oy%6yvP5Cw>{D@|Vsx`Jl3s+))_ zef&&d2`Z3QG>;V(5wpFah$O}4YpsX*oJi73Ecrzl{NttpP~}&%`pwWah1*v8{J%I#M0&!7PT3VTe^-t)ezuh{nGWH&qydShT!^P>(+X{>$Uru^Zh(| zvUZ1hwA!}VKXOJ5lYOG=j1NSB!f~zR=P4y#mcCnm@~zkrY^%{hifGlYk0bYXgZh4U z`GV!a-OjTwE}e#pwvEK}QYdgOcVt8d@pvM(LIspEyvJsWV~omY`j@jr2H97CYtt^s z4Ysfs8ww{FP%Bsz7lA{{Kh7#(k1g2FVS~Efc=T34M|DYbeHLa);(!=YMaMT^Gzo>| z-8cFr9cRfxG54+}pdT*=mZ}Sv@XxT9rbyH$%RM$57?^E;sT-s?`CVS9j zL}Q**wZac8Ji~Q|JQ9#}Su<=k4{aa-w_rr$eabjW<$>lgb_g|mlnChnH++}njw;OE z$)+M32=revcGWJn`!g}WFeMV?+z~U9JOf%pTGsOBj;S*JgM#8Tj)B%cxsia10#xim zKV`1=*|kYKGNWg#@dgo}diCw^mhI6v{;qU>xxbu(LS^^X3AvPP;L+(ok&E|7lFSza zA!CA%jXmrP=1=?+FrX#~!-b}U7)jqpK&eHm33>gT(YZV)MgvNyLPx7L8qxxKBL5o4 zts}TB0b-hVsbO1uWZp~2=#J7oV zR;;h&)X5vXJ|orhM1Rj8A9Ajmis`035A0(0Y8}x;;(#C9Nj{&c!H9sND_}AhmtvXx zg*Y}DTk6mJPIjOUd3EV1yvrXlKOO9z z54~r{Zn}rVntul@;S1j$qnVT(*ym%94s=V{sF6clj-BXvvrf48Y z#Q6%<`;ot%^bJgC!*?>?0J?J#=(j`{IfdqrIi13*lH5jfsu(WnO;=MdbM*bm$%O8k z794>`URtYA(c#C17@TKvB@Tow*pyqged(?g`JB*+b!t9zfC0cRsLDmJcH&m4qAoC- z`)bu&@Qrp3_+R~ol@Y58l>TcP+0aEyxVIlAaJs6l9jR_FKNs#_{rYwLsG@Uk>~>09 zN~I@xe}YWVsi%LA0l#LS4IMDo>)yy`%;W9yc1pEnW!WO1rWhZTMu(*=7ndNl!7*D( z{2i-cX&QPxwP!);-;RyM_XVus)_r99Ki5JSCWV_t0*3lJ2w=Kg_r- zdI@6YNj&*ldiDVNOuBx19A+~pK8okb8Z;(2KK)x+hC&z#g|fWzspagZ1J8cVri~;& z%dNzf`9hO69+#^0{_W;GA!YA&yN~1u)?|j8sD_=oVfR61>Z4CiW%_WzZhZE@JIy_v z&RYI{uEC|c2RKG3elA&i(ew;5WF5AoMqg?8iLoC3Bwwx+$legZa&@e1oWbVO_O6ue z@AUFs4eFXan0o>t1QZXto9Qf9(;#ERb6yScN}V3Kn#Hc+mlR_!ATd%Lktr&@atS98 z2+!On2*bPgIs>fONz0S*kiz2XYd4kLmG`Vz$tDCKGo)`K`1_?79zk5(+)UDa6J+$+3r@2}CY>vW5 zK^KXecNbs+)T@Kc4@v1sFvQ4ripppTiv+btF`^`*)DhQTTrXmcR-QlhbZlh8+ONj9 zLsG4TcSnQ!Y5V9wrS+>{klsK07(i_DjP{zgU>J)7(>Riz(T!mK2+EoEQuB6ENOMs5 zYoGFNvgpFjzewA1*JR@LLBy`Er~T5Zfc-~)iO-m;uH7)6wl`u|!O8@YeSJk}t`xFjkK6Z)Z5PN9!L)z3aGiF?f058E6jH-3@1Dyp|xjs*} z3T4`DlG{@IKNDSjs}y!32H8kISn4w>pVjDQ#!K|;GB#|O&#Hg33zq)RV6C1if@$F4 zXAw0|h8a}67#IyubP!+vMzZ<6y(DP9st6}+7$w(y5-n8`=`P3a0-#keevB z+{C^=_Z$-7{M-?;?v86c-*>MFDsvtb_EJ4D(Vz zZH@hC>Fb5gDdMFhukG`g^kkvUurDRBtJ8${<73tMNeea_{(4Q<(4s0NOFVSU+&R)N z-5tc($a<>)IfI>MglIK$VoqV7E*ftaAzr%L^n6CJqWDatln-%K(DDzSs`jq)sditp zzlU%9$z^QVSW_*SV>DyZk}<6K=Z8G&R(Qd!_p(!_k7p%xi>^`s$g#el_oK9};q~1x zitDu-6z%ADhklufixv@LsV^W{mUQBKNQMjY?YCb4qHhqDD^t>pRgJC|T4S$Pz}YaQ z`s(aiQe+MzVGhf9e*vDcrtMIdAikC4_RW}WI zInQ(A6t30+j6@3t)OwC=+~P{hS6Pm|FN_KwLdvAtbG`qV$Ee3*MPvJTjE$depU67u zFhbEkx}pSnFb{g?WOHt1pSHWxKtx)4z=*A~&xu_E1uVk&(oN=Tdk7~2C-Nu)ph8q5y zcbpi|_;@>rIqE(fLK$PehuSndt#D2@D|wAwRYY7niG-1(Y#<88;kWB2?FVivdX7!J zQ&jwz;=1yT?1=YlW`$zvKun!Y!cWq2m4?o~2c_Dl?JM{$vy&laMFunZs5|K!eX+{u z?!;r$w3xyR4f9>%p~Z)ZGEb+1q@>xTytATUAH?JMkkC$N6V!RYgd$6&z1%MTlr zwh;4%0P>ODk5+b0G(8g53L%1P`#eQ$e#+O1c-#Sp16wZI=vhoj7(-Gij(UsYLZfS1 z51s06!~42ZSQ*;qmtHF9M=Q#N6G>jIgl95hAr(k-hmDANhYiXOtcn>`;T_OhSh-;y z|NFIqEr)s3S(<%6)bh5M=kycM9O{tR>u{Sm%d+cf1W{pYQuwLt9q5=O`H18oI$n4t zbdmH7gt?v6Jt&sOue4?3PE-%%F`vdjTaWjWrw*wwkg zfqnlqDWcXX+NUs0d)l0_+vS04CN#Iu@gK>>njHuphNi?w`yaOz5a)ExmXtfskQi!~ z5qdjScRvdOnIo?qIYLJCc1x2;iULHNe3_nDml^}d$Paj-NyZ~BhT+to`UYyv5;|=K2RHJxyeIcZ6npnrHu6vNR_9 zCoj^CrE@ zu*q%bL#{c7mwf&>ZQ9tyY<&ZbEm(mcYYO) z|M+Kuf$EbZ&Pyo%s2j%r^a35B;x~t?&>1R~HzJYq=ShnlUy%Y0poICl_5XT|O7(-7 zSc8E;`(1G$uH`uNJ7g@D5ot3_vAHwP0fcb)4>ybbJ&vwB;IMG`t3~|)J_!8tgH4Yk z*Q{RoS8Ag1XDcV;hb%p~(}8*rxS|nkhX@gS9O*1S9?3_Hm;m_u=_6PMB6+@NqoJ&UZX_tKC)#~y)m1Bx-1Ttw+sQV+u2HSx-rg~5_>sAa^frj=K3#2!Ca%>p#_r3>EYUF#$^)UsX-!Z zG!w7??T3sOLa{dV&&Xz;bL=0-(NvAAdhL(V@N@O4x&sQU^se95j0N+4cHi>?=;33S zcPeaKP-G&}k>--?Su{yL6P8+S}T@$SHl0q{AFZVy^7h+1L9C-%6h|XojqPdY*5`~6O z47kiz!@Z`;RF{!3A$JBi5);}ES8=lBF$#wPN}3ijyx@BGtzezOW$g6Fhmb+PTI2d< z^$(r4FStVqpyPk*7U0~l6A%NEXmi*TZUXEkf$~+urnpU^T?&B?J3ee>%D$-09=qfP zv#qmIbbup*CAAuGN_g`i_Hh@0wf`#Y(q;QF9Jo za$Wj#I^ez#sc6$zFLcdZj+Q@2(8J>8E}J55|@-&Bf${ zS0%z0Cpm+wo7GA<2h&j8S=Tj=ihMnqf3>`j6BU0wQBK>eUixaYM)ultrJzN5Kj^3Q zwf?E4L9^y$n~HmJK-?|Eq0O1lzoH$$w%^PfN(DT4(%oW>|N34tE#k5{X#Iro9!}Zs z!yp95-yZ~cZS3J#2JQ0?pf5mk2p%(W`78Vg)%ah}`hWftzyq%Z<9;VC=b5%#k##JQ zwFiLW5A7t<0yy~*V7ykoJ)l25q9GE)o$Z%K20&VO!*4FeH?2cSMNh)^K#DUK3!%Hp z@lPr)=s1ORepmRhZBSz;#GjUuhgOLIXu5h26u%+BkIfl-JrS{wyWM!QA7a8zeW_!n zKq$NQFpMC;enX$F=JphZS>^nE;XfBT5(F#~N@vd^NERl>BKq}WfeG8SgS{T}iOGfC z%Ub~6OG6U2(X*RCDkpN+}{60Jwf!G3wzEtgf5VMTeWT$34) z9_ZkeIwDvxIJR_^ow?-r4A5gh!3I2%3B|(~R&027A)K{h`Gf$Cf+0|1$y}DvBQXJc zzb{2W$oFWslVHRA*B_0rah@i|d{LYGP_xs~<5-j^+_()$e&T*hb(7=;;^OjctOUfC zV95Dox9zOZ;zM0W}*TVk7F9-1K@CsWGz0(jTM}=;&w$CpcCul%&?P|1VqkhsU#%6-; z-;Amu(sQ)rDbGn8rTiefzIoNFU%b_e;#xD`9#lb1GqD>mDX2eC* z@-Q(z&&O!-R1Q3aA(A+fA<0Gg@#M?{%PJp_-Rl#oBXf0LqRdHs#)r)In1T6Lr#d)) z;(5PtlO_{s&v+&4k{~~al7J&`ZR?nfxOsFUg=UH@B?~BW|8+{o;6Y)vrl${Q9?RqA z`05!0*UO^JLN(L#*7^hM?{YqfMPCXNa!!LOSkZl<0SG{kDiFG4|N5$Tje-@tohbyH zd5NQSr3;_qzBr)@%Y8ADut4{Gp3nu|Cu?*V{!}w#K*Y=wX7Rs=OM!@&!6}wg$iahY zbqDY)N=ZbK2KFy)y6ZwiaFX;?21to1%#5 zI`xlbyqE>4Xm-S_BiL6F)n2=md{;+`uHEOIxjc016K}-r3F)t26u4vRs)6ul;h+S}GBLun!19sEw>e006biH`b z2(UH(Od*hd(X>$kXA;P$=!r7vlhu0b$MTwjYS5?_f7>;4@Fe4IC{!b;$qsm@Qw!HJ z^!k1^t(~2;@3pUc`g4{i5|C~%OJJ*gtrND zEx|Bx6{DXQ%~fE`_IiC5ptkN*{{nQ~K}A%HR)*^a$ii4i4Ci20f@HKDI)*9U_)$o| z0=fggUUES$ELP<*L_Dl3WdSVNqb>J+&zkw|Oja=@6Zd%aoiL)Vrjm@hAL@>NKN3kMTG-bAx|joX={#f$i%eKvCn7Ap%ISL2om1?NUhB>8JL+C$_H8C! ztrP)na#yZXa+SUHm5(Ac@8g(h5MGyydJX#HpC|hNN^QU}i#3Bp0d^TpAt_CXCDwur zJ9&xo7`a8-o{>%W(4y7w4aH~?w3xWMNwG0F;H>F^C-hDU_uqwX$jt4VYBHHg2_{xA znoqMwgVtYWgPbdBJLewWEpZKDMTjZz1fZKkU29b<0sUW_$3#-IMD~kFm_?o6R<;EP zE~F&t@?CJTfU>BnNv<)dsMqvP6N}EiBsq#U+*n;)k^PqXHq0QR#hP~)oIaR!np@*cISPPV;eY zClCX+GnpX~#GG4i!(JE*`a>=xfD}o{H=kLb-EZD=rKBBpNSkVWk=-E=L-_4{wDoLX zKsdjXuLKN+Nm!^$Svnva>zD7TtJUDOUbAAmR**1Dh}k^6^xl zgPB1GOa_;b3{{Mk5ka9;acgkVri*M#@*_U-wljI8sL{Blr-icu^!zyZYjQA|j~U{e zPn{qhCc)Z&HvAnEV9hyc4n!{Q;mAU(gI*aO%P-Gh#q}7g(XoR;_pUyTxHbgM9Q5Vq zl&^Lw8zpXOwoq>Sw}*@Kl+79$;%9!1gbq#O-8#qO%0h2dG1$h-jjQuQQoAz8P^_5o z!C1o{k>>ILsIyi)py1CHR$RWx@wnte%db3m6p63D7cN4YRQ6^kVn~dvZtCIQO}9ld z&-}okQoqa@^PjIhafV6I%tsziH8dmc=YiruoIfO57DjZ#=VxbjoBJhFQI0MT+yxY- zCta9#ufU@D$WjkW-GTebjkTLz-3RU*OT~SWgnS69ux4aDgwYJmQ1%4)ZR~@418m_F zdj}T97A?txkRgc<$Zm{fq(oOciB^p1oY=6`h=dYgNK)`?eS*V!O6X!(i=RD#sC&L7 zsl2FsW^O@z;p2wLg!1mNN}i$D=L`c^DEmmF2vXR5DAWxU0vrTk!7C_M;kLr`SIswX zU5&Q~ZA?dB6unzjD0*{-z1NsvJksR+AFplX5sX^OSba-g3L@iO&mmIzxl6<^ec=h2>KG+lTi0 z8)kc-Zh!9W@Htq$oNZk`T#medMDJw12pq9C>EcAAg`9{1nBdWd24o zAN@yY?GqwXDj{b2wDWWC8bX$g5_qP-nZ^pp!F9}}X;V@&@{JW^CNylH#A&1lA?PoHTU|lkHNsPP!vJ^!YL__{|+08ph!owkE&!zku z=_bP*r>wEYkENyFDG_UVu4>gvyYx9wBaHkC`irI;4Z_S!N3WDEXmE%$ z{|vd-YOWLFf1fR{t5hB(wnmC|7{SA@yX^YytF!ieT|Pjq>yr@;sn;z$C9EqNoV72D z*|2weMEMEikDf70`jezLPPJ~@KUI*q88{*Rbf~DNTO%ouFNX;G>k5!XJ&SZlWtOdS}weclCy!$^!eNKXPW8t8pt@p?%K!oVeq)YGXeL}F0p6SjOH?Q z`nq0C(bm^~OJ9r}C-0ebE}cr3006Aa>Nd*MJib4+*HmQ8AbD$4s5gW%9!JEtLzc91`r z3RfMU>I`u@#kw@)YE{J`{)&5iyq)36QPy5?}Xs)HL6S z3Vr}YqF*sHj^9t#-!cb{o$u+w$g`DN=BA8Qk|I)CM5n*4fFh1GGX4TnRxw8n{JQzx zrx8^7ChYb#fbqbu>RAZUEW8D8!f6o1)C`?rpQTE|$V%)b}6KvQY05PR;q{2cx5b**n{1JlG0pb#-S0T=Xt|HPM z!$XL9YgYX2C>HsEDEr$nR|R!6yW1xgR%^uxYvu8G$7muiT9{2ejuMEWUTP%6!C#P* z&EAp*23CYjt8mSm{MF|8`Tg-`CHf!_6t*$S$BF~Zw-tVOngfebB3!BhG23)c!8^)< zrrYDPSxK&u-FOpPMgSb75vJh60UTZHMf`br99d7z(G#rjYtS>jV`69$bil$rHBTbd z>ne3o-NCjsFg_SN_o>k%5z7$D$ujvQu15a^4t{4bYb2&kY5MI+r5Hv)KMau49O8b; zuWux<6H-DO@j_|xJ>M#v3Eva*Y&o_SZ5Y6&0pRmEu4JqB%XE zp~dS`AK-#3_tcfxlhkA_sS(S!DX^tI*L_EyDSLtzcBtA1gOg?R)3+ZH4;jCF3in9^bETytHxn2CtlbSxg1t#>p6citaV)*;I`6tP(g_D{(Kpy4;m@Tbch_6|_;EBNDvSq3 zVoi9jwkREN{l^4L4?it$x{IdgFq)S)(uRR3cAc)2pUc*T@>eARi2ugah-oQj(+-J_ zD(a6|K;M2jB1upJ5la1dbU=v)`h)~?D-pWY=aPje(P>oR;wGes!c496C!ncI-}KS6 zFt`cPji6bUj9Vag57p5I4X zxgkwh=-wD>TMoHoK?J-Wm?sd9FVWye0)RPm#C2qVF8$lig4>d}BT^@kCWLDOKsrbI zJ2Ha$OO+m;+WJKxkI}^uB1ghbNmINlBll$IRYnH6KN305u_ZYlvU=SNm4E$`f+++H zQ0n^7DqOb2fruZg~7l=M=8&Iw0j{V4?~ahEUe@>F!OT zmJ=DP_HsMvg}pj8J+#eIy!Z)?mCimiT#W`JkTdv*Sa|8V7F>f+$bh*gkmku$py9pA zA-f{MUKmjqX~`ML(GY%XYP~j1tHg|bkm*zCrH$h@dFUFqsL=Fj8ie6chZOEDwOz-7 ztWhs>0Kp=kLlxAI88E7EK8?kbAHHyY!#93;yqOf3CqVcy9@JQL7Fp&*(2wi1gb=#njDaxhbG9-}q)Z^4YQW4rW*pitP>0;)YDaK>UyWO5^YmsB(!mQD2Q z6w3*TJbjnN0j%DVrKFHrZmUbsVJ+$l zy)IZY@&_`pij#Yn3vI$$0tnzN=M^T*2q&_K7`ma=?*{)Sh2MS1_i8X^n{y;4-!kf|CoJcQv*&l|9S^ z?^`MW6!<^dJ^)=#?XA)sTqd-yDI!Un*51ti{r}l7r)bMJNixWEt=Jmj#ce1;>;gOR zKQv&VEDP?Y+*`V9!Eo{W*4#D%$mss#>;IzEFplL&+h?d~Hk(US{rzQj3226Jwm@g0 zkRQt^w&n_0z^UUu;yqB`MUfUY%%F3WsPD=Ca-08i+u#_ z{>jm&GLmQ;Brzp|i{$i))4VgmdWY-#Y( zfr$M=xf!knVe#Hwbvz5tkd;hkXrAZ&rPz|ahcL%ye_8-%v~XpUBT0-ivX$@U7Mkvc z^l?|@mrlQN%ptAIuUNG4yM;4xKh(tjVflCPIPQa_49xTYRT_Zu?EmKniny>YTh%o9 zNuEmIPhNFiZS89*7hhgKx9{8c+*?Z|W6qyo9sK8kMZjKtW3J_K!PV@6dcP{A+mC@g;lE&X zJdK1IczvOjWa@U?lg*vZFfZQI5$=p=2=)@(XzFZW%u|{+I4^N-9dET|bKk78E=uOs zS@XCkQfclxQ*U6{#X1@hBelabWZVavne>}x38{@RGkCqeuD_!ifFnoOY)|HRxj>jg z4C8}fYaBcsExkHoDozvW@q;IiZp$E0r&r}e-+|QwMEy!#**U*eSu$t;y%ceU8{-D- z&bDk49Mc*pd$aNDQH8H<9v_a$_r*la^d6(1@@NuI4y&nDFK<%~vhtE3_~;_}bm&`G zYU0WTad9|NQc*Nn&v_gTwJkAE-DgMobN+( zrQoW0uy>&vw`i=8zD1u8PBWrb@UQq@fe>iMNH{0#3dC?5UM_i&9DqRp5TOZ>1fNFF*_eFQoocX9lL z)*NzYod4ugABKzJ{D;$x6E{SKg07<(e!_H#_2HsOGS{Q3e&euL=uA|pHn2Mpnx_P< z#}d$R^oqgvsa3Em5f`8ZlgX3ciuY!|x!BZZ(WEDld?|i0HJ|2Z8~vL=URbtXZ9?te zVa0_-(Ul&6OHY>*#*)BBvLaLrOgg8s>%28r7L+^OOpU=Vk3-7Zc zeTK_6#Lm(JI{2YQ@r4QrJCBP>6gwm zBhEKp&cXDYBF^M1cSuoU1vL{MiFp>U>z?{3+CCIWy+|E1xso45wYNqJ+PkzHG2U+jKf}Hgh8D?_tm)3@#45%BE~(l~$1ZTS&tU)W&9X zx_y7;J8<2kSk~d?2kMI5|FOS)`R8trKU}2$>mP^*Dw-3G(=+xMueyAhLj;<3Qc-!I zL59ZAn6(<4Y>OqhL-aT zL1%(&5)*^208hdHt^iJ_KpaV^>@45!wmFdHT-fp5l+gTo9@xf-{b`PW7lmSr3Qi%# zGXHeQlF{ERjLov*YeV$6F@E_%e=_pLbmL0?t2U}DdE zOykP?hIvZft_>Ft`3D+Y2_3E+hh#^FpbU-`Cc+j&ms(8M`iEnL6MIb=l|Q;tToubc zb)e&W+iv!oHHsnPh4a}z8!sESUiZ7iT;)=@5^|tu>Cx!EpULdl8CqljaxJkt#Hwm^ znk=?P-(W2nv+(6-y&Z#~E5{+KUl%JJTkJcCW_l-#Tx+9)j8I9BCWQmi-HI3sSjATt z{6ORKPaSB}h%UOw1Ph&JTCW-pHZX7LELDBf{LZNR++d|RHOqRsJC^28_<@2K)cvuy z7?BBP7S>!p-V2XR#?DCOks6I#ZDLW_owHe2B~H!+NO&?>V2HTrm}n2lavZ5)2vem# zGR#Mb@>D5Y*+S645hZYO=0pEjJFOHbyFFpj;UG{pk46U)f0%Mz5#hB#!f}EAT&bVE&pSD=m(UO@QrxdQyFC~Y zm-;2-64b?KwZ}Rr0`4rQ)=}6hg1y*pcMKOpzlV&8mNVWu1yN}F@F0Mj-EUH37pns~ zHqbM}W&dYM=$JObynky=y~fdNwA{amS|fYe=SUcYsRbdWECaTUL|Eur%$5FGzBU+CCs@Xt5Gc9MDf4DwDypAm)ax7h@LYub0nNneH}a@l$Sp%(>sM{~G%v6}unI zZ;5bHICu*wsxWz8KKC=Nf!JreIPbC7ro#3Mqrg+d_d!V(4e6*bZpGAlA*twCCtu2= z&$KD~?&a*U=9z&R7-Qo;p6Eb9ohCdo^{gU^O6t1G5#98W>b#-7>9j4fcNoweSRKWp zXd=W8M?&HF2wyT+COj98jKKmMHi&+hs2@@m$o26-^Rz8p-X*T#>J;~0(-z7>%*x=-!$?|S zz2j{sN5@K(+T^HE80rCa8!w=Y=yge=pWXWze}?$h*iqbD#n(1JNA;~=&Sejr&Emjr zZ#~eLh73_t=$sR_q+lsNJrvpgB+>mFISnCszSfSA2#i_(^eW7;*)C}b8Ix3=K_8)O zVk8Rdvh{Owo|=R>V;`PS&?OhV*3YyUU=(`CYz&ofi)yh>l0Q)}a2p+|<5HEv6G!5t zCou7_XhqgXQia9yvZ_;18OJJe$rndt5JbLSD2Rg~skti`&tI1*@20!8p!FT6%csK_ z?#<^e{kdm}e&pJr92H1~>-Fs2R=Pn$g1I)%7k0{CF`pQLqp=Wr)C$IeKp(w*k(2p$ zz~a@BIrt-sjllL$4~{c|Uaa#(uUL{`LvO|lIFEu)C~Le?`AHr-zE-*X-8K(l3a$FyjE^o zyr~y%b%_L@H?;Xxm-GccR|Iokb-Y@6*YR5aTj1?$)rL2mERYJdk5G9p%HAAF zj!|LOYR8KI6)`3j5AOAIAsrg{)R{L1b_Pb9XJpr5q0+-48I-OMszqw(t(YQoS$&_5 z78Y_k_H6eT>dmyDG3+GA{j41SK%AZ>bv+m%N>V~(5_9v;0!4)7hwlOp@}~0fhjueD ziNIa3g%+3QKF;j3%$N6$NIFKGdzpXJ=ZLO-Hlf+K^LEk)(mx@=2u~*uQD>?SDJO2& z7|knA4a0i;+F1$oUsB9B>lgIF7hVxjQRLPvB9||Yyv>9o^Ie)tEim~VRFTTNjRx%8 z3c~jOG=IHv#IZ|PqzvKN2#DDtN3VKrC5sXrKqA4sKtUZ9Xt<_fpl8hTzT#*GNuA=y z_7Bnnr8$R~;$64!wU336Blu&B=K5z-Z70o{U{vQmc0}!=H!^q-l$v8%k44g5c(t=- zrAt%Zdvh=!k~(52g?Camr>hafRY&{7Gn&yw3jKn^T#c0N;*jx&K}^Pa;0y6Y_8aRP z+V=;uCo}pA)eg^F9;9XKckyXwvQ>@5>jH69UfZ3}w{B2&p(?R4HOrA`?vr#}4e=zA zFEOPKn?%~nnMdv>E*EMKSOf_+XMdfQn-TqkezlE$)pe-Pk6ENa?*cnmB{hF3p9NGUU53nKDA z3r&hfYllF!4YMo1ia^Kw)Of}N;$K^Rs$Wp$Gh>5az9fbo(-x`Sqnm@^>oEj$L+ZL) z;#T=q_K>*J_RIJXu!GarZOq+YpxKHR5O*{NoMqs}#IEDhxLhF!s8PpGBfn&{#ed6P z*powd{EX7}yjfEUOa#vcF%fo>rTy$&ilck@wNww2P_bXCB+S1B_YWN@a-ZpieJNZU zgL);FPPhEjLnH1*ZK0D=}Yd0lywzK)6E|#MLVa z{Nnqy@CPBFG~hx~WcDupD8duA;|&9Hy#K(49$vV6X`6T%GJA#JFf!y~D7XB`vuDpL zFMZ=z?;cRrj)1cH2Jn{GqgF)apqPL$py-m)yG0kF=*rKh4Hmerm!E+xFyFqU$k9?I zNG+4zSH!FjaMbvd$gPNP?$;;XfLk2ErB?X%z;&hz&qa1DvK@0Pr`T?3oOg)!lGv?F zNh`^8-P~L87Z4e!EJ6|54H$^I$|l=bW(gbl&(EM+BKi>+HW|VS0Q3j<_9xm(QYSMp z{w!$9g0DlX)%$1K?;%17x>&8F*w&m4CB_S6sA0T=p#bKrP#rdX68Z11Sf^^W;?C9? zT95JVXLME1BgswWzEJ_{$v3|8M$YG)BZ^oF&o2abXsj>nuT=?E-WmKcdiP-NS~efx z*epfXhV(7?p}=)1fXoKY4US5W&v^|5hQWMYHnTM^V7HH4esx`Ej>cn*k5H#PW{dqj zurF{V*GjWGlXB^1y7j`x5)+yyV^odoSmqw!J;#!~@@ek=W2^J&CYzt==FKS8iHILR zi$ktUJ7@RDm`lFeo3_g1V zsD~_!T&dNwP3q;a{wS!<8I%`~j{yxUU^WZT@-Ju^vQaYc&u)3*7WZaDL)F)PU8IjI z?x{e+%S0wqt3zg7*FWtT#2R5(`qnw-SqLu^bS2VQvF<*(qb2qO(wvsGNQ8_Ok;N>Y5CuGPKE3%FKP^@6;^Mg$n(O>|*92ua@Fb z{uyy{x-W|_)9H@bwt2X*!zQZsT`etkyEHA^jS|3}m7otz)WsRRDS{M$^kcSin-riU zD){z5@&%@&&kf;)M{ukm9p#AjpMscaLPM4mlk2;XCb0fcP)jpF_9xfDeo5d3=9v#X zFvn%2od2m`Z4iq^lu*dE7QWqtae= zDIrNaBL$ldL@J4F*s@wrh+d^GyEM@A6(m4>e2JdUnVZ-c&=H=I?M2uO0o+9k90uor zHCnrOzgZ1t&H#GHD1`tvRAQ-dibh^VCruzvg>_z0be%!5Yoh4s5RC z%0&vZyj4c)Fm%BD?l%8O;Q5T&qFbFJ;UWnRUi|`{py2v+q9mHfF1q)cgU-8!IIn_^ z%E41J-5xiSRZZM+@Nj8w0rcp*ap|PdLbS*-9O11lBu9b0oT5~xRFJ?}ZwR0&&}qUy zm$jbZ+ISpKt`J7x9b)ydegIxAf>1jkEY+@x|U=Ru_*4c-TDJEy-SKv_kgmO*~gzC_Bn4W zu8=)crW$Ec^-Q=^accI9SDU9p7$T9pFkI)Sb3Zc|HaM*2eJV;=>7mHHl|MuULlk-< zyr>c5YAK^1UjZ&XNUB6jr@kHSu2fRd@BDPZ^G>AplTs04Gbw}G=jB8fC)ET3<~=tB zG<#2n18qf3hxEX!tGl^F;5e{CO27naPpgSq1uKsC5@)g=)5-ITHExMMu1F{DcL{#W zLw-jyVZgXL+~FBav(_J{L#;_^9Y-cCCaV!$O_lyey-w-WjVZnheH|D|q`e>|o$5B- z_bi=P*wX^^FL^i*h8Hgyc+pvX|2F*T7&leG@VefTY36Kd4{4wL#C=~`_akTLVnCnH zVGtO93MnV1p1x`FTr%f!i@t$z#SEup`{NG;tYZ%ECgT!Hi@+IBDu$Fak%(UOgbzRa zsj-4d1vr-mI$&xycVk7H1aD$8RvstG<40K|V>(gBm0lpqZCQ?dgmL%R>N!Tl%ucEslAzIT z6w}o3@3WfFc(ocyMlQZ6&D^0)?d+3o3YMTiueEa#AkTcgF-Z3~{LyCSiG(X$!sm{U zB6j5->B6utALgtB(npghN*;{lF;*Pd6ma_6{pO7RrBND!cY!bDwP zEcfm~qUFr@{U(>_H*7lj!v%nOJ)3AogKzXW{%zvXuGfr^pn{&!sn!EXhwzf z&(R>>=bVLMChxy+cvHR}%=Nm_QRcGpjP^Nrxks6laoBtW%ux#Dg)kCCmD^AGs{Rt# z2*vj1S_!kOXCKcESZtl=2XJcMb~^zpaM;e!DW^P-SD3D)Da*YgLId6pHe(}D!% zGQXtLAf8@Wd~h=Mi-RGj-N|d(E!F=bcI`&y=m274R6)4XkC+iM?Tl(hYU0cw?VCNy z+k;-qk#Q>4B+zNmM$NQ$rhQjR5>o7mVsj@@eycvZCVkRmXtmkSsF-piurW$KGJF4= zyD@WBUDX0}=Rw8SY=-zVNVh@XHP3^r&7_5$7Q@;CGRJDLOZjwCAjo-#3)W`n;l;{QveWRpcS^YrfM^quo{hT>Yet-B!2cUzHfCeiF_{vwL zqE`lE-1m45zR11B0b30+qyI$%R1ubN__e$fJyMg|1^l`)a1yppF$;FZ2SD){Kv z;`y@dTWOFZGhu3%&fdJ_qvc#J@}@8`&Nzyc6+ii-uq2TwIdq6dHCKV0o`{)_8EfJB z5{ez7-I1f6($0z{V%9(Ln6IWO&(G^E@UpmuN;zMDy1G~qruU?emsvS{Z4nGJ-^3!e2z1!GFO{_ zYa@D{Md-K1-ka4T?wQjVD`paR;D^@bY^dqd&EOY%03cQoE8`yqPIkV9;`lB*D?Zi& zQz?d4{~pG&E3m+My5dpC9q{RVcO0Rj4iB;IUu0+Vv?`TBHGI6mH?ghq$QC`l=agiV z_f5R|ocX}pquD$74jE=A!syDu339e*n;Xq|&g>~cBu4~KtKW#0{xawNMe%8CLbP;z zyMhe*Q712HiqnVnYEZj35k?x*svdY+=K?`A$^dC1&a<4*g*f>}xE?^cagXzOas>kS z?0codIghbk=4b_`0i0%bgTk|>G2?PQ2-;Fd_vTMWR&Nf)o9*gt>aYA{u2Sl%!8_cA zYrYcIQSBQq|F{AT8h_q`K0FYxId4)^Y4-7*sqPuvT`95YZh52ImMSP>nuiriKk?R% ztZg1=W(`GfNJ_$xRGCw{C=z~mrHWQFe+5TzTI>haBlIvke zpjIRt?l1$X_l?Lo;EC4X4L@^c^M&-Ow|7j6eQUyQPUCb3agK&^n8mj5+smkO#SxG^T|>#lm@*D=CHU%k-)YHoH9xbN+&ZY!OEJ+T z`P8M(8+uNk+7DBg4xEX~L7>(J#WY+j+8RTE84nR5bAS}RnRZLR?SOHQ9(52GDja{+ zKqnI=68SYoc^RNM2>1ox7rE7N#Uf?xhBHtEnQj%kN3j)zS{f>t$+^=% zTgCoO_&n=u2_FR~ZFQ`}d^uF8>#zM|5%8}8gGV2hNM>;DxhkeAlhy<7@s?^Qw4is071zOGm3*Xf7V8Z4ybFRVs5=K#*GJCi21eC(|i$ZuP`JxzXo3t;=> zHw3VsZBM9q{4?-)jtS;_YlzE=@6y;#H|7>P_;SsUmZj8(;Dl$tM7Z+B%ZOf2W&_8E z9|@Y^85Ia17^{15i~wCWU)f{YIGTfUcQTcH+=jZmH~aSFfV^0nt}w-71Ce8jkHwZ`7xINS>@)9Zvpgs4ik(xVJ4?*phB8he$I=u$vZO-O31GJtNS7e z%+|*R8yPUvu88SsjgyzQ#L<5v6O_U^;A9pTqFK`c+6y}-JBZO}{F-eYsNw7*=S zRhdh`)P`%z!Kun;^+@M@^Uu7arXoi=9Nne|FGRi+l6=#dvd6Z z$=b<9tRLh6@1Xit8Q&7ci9LcwXZ`Y{AaePx-^}KHAnndLz|$3rdlwokr*L@&yd4nV zg9bKtD6QCLNN!EEif7_U%oty>cmnrAp9FL3g*xX|)0C*q&J9i)66$mXxy>xeIP&@t zBT_ZthtlNbd4U09X322|;z972tyj~j)1VS^{h1@~ofG)SeaeYWCO2D!2W;+)UyT%8 z+COE~lWI=;yOqOM3lTFyF3JMZ4pkyaJBOz=T8jnJB8VrD=z!;}iy=8^4_3;#0GQwh z1v_MM_Xa&>eH{pCgf#|sloYe~hYZ5x-C+?LDWkM{>{9y%sg|c{vvz?ge3UcNk@mG+ za}9T|d0YK_pu)2nTH2^(j5AG8FD|0X80%G=g++YC^huz=ZIBKv+vn#`^NjdV&-G}vxc?T~P zD91HmC=L;rA&2JjV_ECWB3>g(_uMEaa2MNgF;`yfLlo#8w>-&~b*x6dN#S^h>gY)^ zXSRK47=X9Yb)1G=fdnEH8(5Ps1C;xtqo>E8lWB~~9d5@5i1NuXn_^?&gDwdE{lmHR zYu!?Z;;_Qdx2MnD;2SN^ONC3!ZAv-7u-$(|rSGW_#owxRPnpU9Ug$g>P4!TcR79E_ z8VmGK%SMV8K@r9T>{Wojj7m?Rp>*-gGPE`Ju}4GNKu0E&qA@9qsdWBW8@mZ;^fF_z zGkuJOBLTI|)mb)@&gA%jY-RCnv9xxn`XGw^k3$~Rk3U_W!)&laVPmJX6rM% z3ae3IsxS13hKXHKy1Kf5MDczh6WdcY|yu3wP26j&4JvT-v5N;KTgxw#Nw zc4y2^SC3HiU_QxR6ON8>c}PoVmS}ww?(U_+4=n_waBQ~H5--`2mr=)-QXD>7Lesq< z=H1^&68+4^-~wb~3MBsf|-D4lD(j`x00;X&6Niq5Ik3LHn@qv%ED%%wQ@$Kus9(*kDXW))aM+d>zD2j*Hpia ztIOPgF3?)+HbM%=k{+0RQqevWj_vLp={xq52&}fwlD1FTbRbXKv=`?MGZGWX?_kM> zGo_BB^Cut|g1g!#xTR(mBh)!+`~2#m{Ho}df-UBdZI&esWAjchnR2@lXCg1~=cAx~ z`r5Vn3j+|RXu`ViB#Z~ySNP_K6w;Z)hG$RP+wgOPDujPBUOc%sd&o0tC_e{s-RH=` zbp1F<@W7VHR*|#h_2{YRdFU^~HW(~at{^U!mte$tXcZr0oM5h13{4BcY1sp&a6A&b*Izp3Nh9NQ+M?ngh< zXn(?ad`=beXQHN^tC!?@BE;{16%g0u`yePp`%Y<3Yi^?j(lm64cEi^eu+&x9D zrv}@!IO^;j2Yr>AB9`Vt?#UnP9W!Ef^r5&4fyczFLIwoxo^=wQgKmz*fIY{+isPCw zGs>(t3Tfevs-&G6IRjByq0&&k#+NE{GPg-3*dXyrS^?zvc$L}GW0BTJ)?*rU*@eeR z)(x{xo{|`I zIk8hyEBjkP|s0p*55h&IkDlb8I{q^SN`&+wX`UeTi72cLLmbZ0GQPp@Og8y++ z7XIvv=VhIj#n*?}7c>XdHUdh++ok-4LX<*()^(mr>Ig_$CVJcG&McnYs&(GJ25lTU zO-aqXfZM+PQl!I$p@~aG`bt5;Z%sc0m*Z*UUee?~VdzdvU5eDX`BI&0yK{9FpIQ=G zn*O}eq^@=TDv;Z%Ka%og`_(de&+lm}kxLr~j@-~hP%vOlvxbjCSjp?-wMM7!Cb1<$ z&9h2AR~P}L!z3=2kz=(%m}H;bMaK%|ygmV#u$UmpSl=)wG1`kFT?(XODe3sjVp+=m zyL$-`63ztvoNi(E=H?mGXyz+*D2p7N$!%;j+6sv`Gqsb|INl*4}C=7!`d9R_CGS8Uv8;Hcn4A(Oaitd>r31MI=5t~ z_?HW0*_eNF_(5%A4mjEMGIC;Zb@8eSr6Q+B#yKDz$(U;w&1U#!++?oXYO%mmD7XTa z0*c@O8(8dd%n^`K;g3R0=t?@wcSaQx;_da9DyHwOi9Q_OTsls*!sh;czNgxcveY`t zNlEoCTYh^B&$Ce&iVNLiKtIOnn-cOPN5C7lq8|S*@9wV>FZ_hD>*?1$Zap)+~atqaO|G+zl{BZR`dINsu7@n?0+Gj_Q=5H)+G7Ga`A4 z)31Jw;C$)_o!CgW$>HrQ{l6>Iu~R>%`s4+@#Iz;g&-{{YDJC(fV#roSZUg)oeL#QV7QF^{6t;<5YqKMeDy#aoFeJ z7}xz8<@L`@h&mkj*@4b<@LXfEcsUn5KM*CDH+EZ9Z}eIxc$M10ya?S_U5rU|yLm}$ z0w#;%D>(_If5D}W$$dx(SEaC!^n>Sq#80NR9GRu5Th<26I^c{n5!)XCTE3UMKul?K z!d99a_K+1UuKO!h9O3uh-1N4UfvXhtXII}OWf;tTFCh_Heu1UzKL?R?+6%_qWv@qF z)yvGTts8rnO87e*P`LQ1I0yyce*L4Ot%Vm0=gyYUkrHby*Pmt|ftak+=<5nc=GB6f z&!L4nhVw(kue$v0`^q3mcELj#TFECk*gmd)+T03^X>g4Dfd~u2HqNl1AK}+(zdo>? zP4s*2dz^B=g{xT6qo-TVfss3N)p<4j257IQfGm0?U_4G$+lRVPQh*e-Tt>`J5JZzs zb^v2JLTkA|^(p>&`VSQkC9;jjrmtH-LU_DP84Dnrv7fAeR}B#TU~~y;@;UAk^R}eK zmIg;EL(nofNo1b@8xSo3M#{aZ6~Js>4r9KL=DIf83_BYDGhZ}CB`I_9XBkDgoEbhFO6y)#t~nuf?qDqG^>wd~~w*zA4X0HOVmy=9)vT=Kw9dHi%AdrMz^^{)F5R2-j)2Dyee~&VTVHH$BuKXWCN=pGn@D9*lf|t`7K#dfj4DG`1mnNQ|-?uhIn+ z^QISe^Yv&74HnLgxq~n;KY_C70H$0b%~D^T!*R^Z;9%>}tV8k1$K_`>8SA*eUChJW zBEx=?EdPoVAahyL$NY_B!2t{l!*6rgYe1eXzXKzi!{$%QQ@q@W&X)X`Jn^J|9LPsq z(%t>;1(JrjJ*+@8IZWSJ4&g+bZ3r60 zH2>OjEghR^6o>AJjQBLgH9?EqCH%s~%xSawxl^E6$f=II9DbqO(z=Mm_-wBQuaQ(e zbJggo4SKtdlCs1d&Qk+w+oQ#P(M!ifu|c#bhirXBL+VFHlC^RhLwLJb|G@21HL`?? z`V}D+iA$Tc*9w0AI6H?l`+M_}Y7q$I+5)1T%g><3$!^k@aEu%fBsdqvm2~2Mv&PAFQ)vISZm^hSC;up73Ut=iqE2yG0`ikrx$%_w9@{jE!VwEb|d)Anc2ZzG;nf%X=GmeYC5QZEBvtgh*UMx(kt%QF++!*GXJ*nEpejmx+F zGG;&uK>UOx3&jor`rT()NaoCU(M-{}62qBvNSYdoHyAbrpivA{sD`16^@I|75$0l-tc!{mh%S?|pz8V*DkdI*VaW~` zz4HOOUSYJ#bK^dkP!<_=L((sonp3mKly#a{NOyT2=^wFov81D}NJPCWJ{R&DI2G%> z;vA_5zcAN*K-~}JK-<*R*I@cG&8(a6(KB3)MiscL3uP#-lN(V4{Flh$p>=FI>oO0J z<0k~M+r74Nc01-tK0Z*v@Y9$_$(x3493vLc0YI5t6k=NmL!+{*{QwLg+!)5#-Lv0} zph!#XOFpoBAuP>{f$078c&~56cE5BXEfJ9f;omMtymZ^VqGP}CjuX^Nw2HuXlh+YK zT*U;owYWGdV;}+m-=rM$3VIlB-&H(b&n(;88M%WidMuf;Y!wR_mz>z;Q8Cfw?uI*Y zS)C4NS^WkdaoSx{dh}@JXZD*c1gIK8EjR*c*cusEu zw0qLezZmgZNJA7ntHL{BjhR#~+VdcM1rm(zp)*|^AB7m<(vf!KVg_**ZV;(hmGd=E z$8yqtny$PO27dvXlc7ZT6Z}trM_d8v+8xl`gm6$PLG+JsqSACVwB4i9y#|Fai*X>B zk_D{*RFLj!xbu{IJ#4LBb8*(U(5j5Yr*QPUsoar?FzwAYIQytDk_}fmmIVAIP)&RIhsH@KBShn&9a`I`Om7N>)-33XzVnH!RR+A zfNjP0Vq37^RatB=3+puC(OZ3w{NxYuxhe#v1fPBmx2EkKi?a_mrG}mtgIhRtPxgmx zLtTVF(_+`rs(wUFZglkhAv%*Q!>MTKIZ@}vFjTU)IE=qUc98E)hJ%ggDW|k;TD2!m zE;vnM_Jhu*5`_3@F4yzrlyYfH!G1vIqEVh1^>XW2f`F-#B53W+!2`aUO(*(UPk75|g zs6+qP6Sc{q!us1& zJ6N<;@+ndqB%60~*1oy=5KSTb@|*8`MPNKIz^;H-5RZ2Qf$t-r zmNwanGg-6l$>_B)vvJXl+Vm_as!gP{J|1fuTZ9Sj*4he;#4(#{M^kkc_seU2$ug6!ah-m2J@0@dLE+qiytU2u=(O^9p@4d|0!82~ zc!>A&W7ZPQb<0d*B5I}#C`rqYA1ZD#j_5S9xv#YbH{zuFgYi224B*a^#i;LDfqs99 zUWf~e(5*vC(0_*n9TGrBt+zf;y!2ui1G?L8vB3X@5*tS82`K4fn1~zor!e5YA9bgI zV}!E=EwQ$x5*#BEk^8Ftu5%J6zs~Z{QH9VPD$Q67J3bryo(!bvYL@0#pODP3*{sP} zVP<2{TJgOdH8(=~Wv;1=2@&N@%JWF7gd~1y3IZd^X49uGV{C0$*ho0lO&Kxnv1pe{ zn)pG%0`mqg`ydSq=eqw!RkjJ4*plgk=L#$j+NBea#mrV&b=&sxKPaKdyC-ogqEK|< zW_jvI7ShgXsz=R4A>?0B6s%mJ0>Up@wpSh26$S-Df^_asU?n_A1xmj7-^?ECE%fe_ zEN*o7XkV7=+!zH9^V2=@14OW$P5Ms9_@zPulSCrmaEUfN9<(e zTn1@(^K1Mlbw({pOhkJ2jzmnA}ltR6Kbx2S& z4AI7toYa@h)H&2}K;q&K|AxXD<0SS&z^3qhar=%IRF~oMzy(vYdJTODqC*?|A=hkW zjR##3t545bd;tCYm7%1=rzL57#FHeBl9AY|#|^Xq?9GAGI?64?O6=#u7=PEqkm>8- zs~%x!&r}P`Xm!q%dax&9C1H)6)A8@pv7CwFWf5t>r8x6quGX;~2{E-Y7z0&1WK&)O zWwO@GfCDF>x(Z$L`y{1)TZygSrbDF(cI%T}JLS`jWkC~F+Y*%^rY7Bs)4Z(}Nb#EK zIL-uo2v>9Zwe%~wqseY>p=!3AOrMvc$#un;IBlgC%e0SnBHj; zJ3Y5tOuJC3d60S+A=1@h(DM?}K{r>I_SBSreKI1n_45MZA8FgwlKzPK34v{vgThG@PFgDLCf@! z7YKaN|0T8mH5&gfX7!&}*Rfc;@f|}SF}SsG&(|f-(WE1345!%%rbbwF-x$Wd6F>WW zJkH+6HP0tQ>a)mMYvqD+@A?3M|4XfRteS_Lx63k1?`s7w%9Hm2T1DF!H)sDlk7M#4 tC^VKn1?zgz;MJ!`KluOsMo#?n2NuF7QT5LB(&1) + if [[ $BLACK_RESULT =~ "reformatted" ]]; then + echo ${BLACK_RESULT} + echo "black test failed, please check if you have install pre-commit" + exit 1 + else + echo "black test passed" + fi diff --git a/.github/workflows/python_unit_test.yml b/.github/workflows/python_unit_test.yml new file mode 100644 index 0000000000..f85ff0f86e --- /dev/null +++ b/.github/workflows/python_unit_test.yml @@ -0,0 +1,50 @@ +name: Unit Test + +on: + workflow_dispatch: + push: + paths: + - dbm-ui/backend/* + pull_request: + paths: + - dbm-ui/backend/* + +jobs: + run: + runs-on: ubuntu-20.04 + env: + OS: ubuntu-latest + PYTHON: "3.6" + services: + # Label used to access the service container + redis: + # Docker Hub image + image: redis + # Set health checks to wait until redis has started + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + # Maps port 6379 on service container to the host + - 6379:6379 + steps: + - name: Checkout Repository + uses: actions/checkout@v2 + - name: Setup Python + uses: actions/setup-python@v2 + with: + python-version: 3.6 + - name: Setup Mysql + run: | + sudo systemctl start mysql.service + - name: "执行单元测试" + run: |- + source ./dbm-ui/scripts/ci/env.sh + export DBA_APP_BK_BIZ_ID=0 + export DB_PASSWORD=root + export REDIS_HOST="localhost" + export REDIS_PORT=6379 + export BROKER_URL="redis://localhost:6379/0" + ./dbm-ui/scripts/ci/bk_ci.sh \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000..ef16ad1ee8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +# Rope project settings +.ropeproject +.DS_Store +.idea +.codecc +.vscode +.gtm.yaml \ No newline at end of file diff --git a/.gtmproject.yaml b/.gtmproject.yaml new file mode 100644 index 0000000000..1215fd453c --- /dev/null +++ b/.gtmproject.yaml @@ -0,0 +1,25 @@ +github: + # 上游项目仓库的组织 + repo_org: "TencentBlueKing" + + # 上游项目仓库的名称 + repo_name: "blueking-dbm" + + # 指定里程碑ID, + milestone_id: "1" + +project: + # 默认目标分支 + default_target_branch: "master" + # 范围,根据项目的模块或者分工设定,可为空 + scopes: + - backend + - frontend + - mysql + - redis + - kafka + - es + - hdfs + - influxdb + - pulsar + - dbm-services diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..192599b36c --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,31 @@ +files: ^dbm-ui/backend/ +fail_fast: true +repos: +- repo: https://github.com/timothycrosley/isort + rev: 5.7.0 + hooks: + - id: isort + exclude: > + (?x)^( + backend/packages/.* + )$ + additional_dependencies: [toml] +- repo: https://github.com/psf/black + rev: 20.8b1 + hooks: + - id: black + args: [--config=./dbm-ui/pyproject.toml] + language_version: "3.6" + exclude: > + (?x)^( + backend/packages/.* + )$ +- repo: https://github.com/PyCQA/flake8 + rev: 3.8.0 + hooks: + - id: flake8 + args: [--config=./dbm-ui/backend/.flake8] + exclude: > + (?x)^( + backend/packages/.* + )$ diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..801a04aae9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 腾讯蓝鲸 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/build.yml b/build.yml new file mode 100644 index 0000000000..e6eade9756 --- /dev/null +++ b/build.yml @@ -0,0 +1,29 @@ +version: v2.0 + +stages: + - name: "stage1" + jobs: + job1: + runs-on: + pool-name: docker-on-devcloud #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.1.0 + needs: + jdk: "1.8.0_161" + steps: + - uses: syncLocalCode@latest + name: 同步文件到云端插件 + with: + syncGitRepository: true + - uses: CodeccCheckAtomDebug@4.* + name: 腾讯代码分析 + with: + languages: + - "JS" + - "PYTHON" + - "TYPESCRIPT" + - "CSS" + - "JSON" + - "GO" + checkerSetType: "openScan" #openScan对应按开源治理要求配置规则集,epcScan对应按PCG EPC要求配置,normal对应自主配置规则集 + toolScanType: "0" # 扫描方式。0是全量扫描,1是增量扫描。 \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/.ci/codecc.yml b/dbm-services/bigdata/db-tools/dbactuator/.ci/codecc.yml new file mode 100644 index 0000000000..c824dddd90 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/.ci/codecc.yml @@ -0,0 +1,29 @@ +version: v2.0 +resources: + repositories: + - repository: ci_templates/public/codecc + name: codecc +on: + mr: + target-branches: [ "*" ] +stages: + - name: "代码检查" + check-out: + gates: + - template: commonGate.yml@codecc + timeout-hours: 10 + jobs: + codecc: + name: "CodeCC代码检查" + runs-on: + pool-name: docker #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0 + steps: + - checkout: self + - uses: CodeccCheckAtomDebug@4.* + name: 腾讯代码分析 + with: + beAutoLang: true # 自动检测项目语言 + checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置 + toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1 diff --git a/dbm-services/bigdata/db-tools/dbactuator/.ci/open_source_check.yml b/dbm-services/bigdata/db-tools/dbactuator/.ci/open_source_check.yml new file mode 100644 index 0000000000..f421f315f3 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/.ci/open_source_check.yml @@ -0,0 +1,84 @@ +version: "v2.0" +name: "开源检查" +label: [] +variables: {} +stages: +- name: "开源检查" + label: + - "Build" + jobs: + job_AfK: + name: "构建环境-LINUX" + runs-on: + pool-name: "docker" + container: + image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0" + needs: {} + steps: + - checkout: self + - name: "敏感信息检查-部门RTX" + uses: "SensitiveRtxChecker@3.*" + - name: "腾讯代码分析(官方-代码分析工作组)" + uses: "CodeccCheckAtomDebug@4.*" + with: + beAutoLang: true + languages: + - "GOLANG" + checkerSetType: "communityOpenScan" + tools: + - "WOODPECKER_COMMITSCAN" + - "SCC" + - "PECKER_SECURITY" + - "SENSITIVE" + - "DUPC" + - "IP_CHECK" + - "WOODPECKER_SENSITIVE" + - "HORUSPY" + - "XCHECK" + - "CCN" + asyncTask: false + asyncTaskId: "" + scriptType: "SHELL" + script: |- + # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷 + # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh + # 确保build.sh能够编译代码 + # cd path/to/build.sh + # sh build.sh + languageRuleSetMap: {} + checkerSetEnvType: "prod" + multiPipelineMark: "" + rtxReceiverType: "1" + botWebhookUrl: "" + botRemindRange: "2" + botRemindSeverity: "7" + botRemaindTools: [] + emailReceiverType: "1" + emailCCReceiverList: [] + instantReportStatus: "2" + reportDate: [] + reportTime: "" + reportTools: [] + toolScanType: "1" + diffBranch: "" + byFile: false + mrCommentEnable: true + prohibitIgnore: false + newDefectJudgeFromDate: "" + transferAuthorList: [] + path: [] + customPath: [] + scanTestSource: false + openScanPrj: false + openScanFilterEnable: false + issueSystem: "TAPD" + issueSubSystem: "" + issueResolvers: [] + issueReceivers: [] + issueFindByVersion: "" + maxIssue: 1000 + issueAutoCommit: false + check-out: + gates: + - template: open_source_gate.yml + timeout-hours: 10 \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/.ci/templates/open_source_gate.yml b/dbm-services/bigdata/db-tools/dbactuator/.ci/templates/open_source_gate.yml new file mode 100644 index 0000000000..d14127e08c --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/.ci/templates/open_source_gate.yml @@ -0,0 +1,26 @@ +parameters: + - name: receivers + type: array + default: [ "${{ ci.actor }}" ] + +gates: + - name: open-source-gate + rule: + - "CodeccCheckAtomDebug.all_risk <= 0" + - "CodeccCheckAtomDebug.high_med_new_issue <= 0" + - "CodeccCheckAtomDebug.ccn_new_max_value <= 40" + - "CodeccCheckAtomDebug.sensitive_defect <= 0" + - "CodeccCheckAtomDebug.dupc_average <= 15" + - "CodeccCheckAtomDebug.ccn_average <= 3" + - "CodeccCheckAtomDebug.ccn_new_defect <= 0" + - "CodeccCheckAtomDebug.ccn_funcmax <= 20" + - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0" + - "CodeccCheckAtomDebug.horuspy_all_defect <= 0" + - "CodeccCheckAtomDebug.go_serious_defect <= 0" + - "CodeccCheckAtomDebug.go_all_defect <= 100" + notify-on-fail: + - type: wework-message + receivers: ${{ parameters.receivers }} + continue-on-fail: + gatekeepers: + - "${{ ci.actor }}" \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/.gitignore b/dbm-services/bigdata/db-tools/dbactuator/.gitignore new file mode 100644 index 0000000000..407e536b9e --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/.gitignore @@ -0,0 +1,26 @@ +!.gitkeep +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +# Test binary, built with `go test -c` +*.test +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +# Dependency directories (remove the comment below to include it) +vendor/ +# Go workspace file +go.work +configs/* +log/ +build/ +conf/ +*exe +*.log +.idea/ +.DS_Store +sync_test.sh +.vscode/ +scripts/upload_media.sh \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/.golangci.yml b/dbm-services/bigdata/db-tools/dbactuator/.golangci.yml new file mode 100644 index 0000000000..b165022e4c --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/.golangci.yml @@ -0,0 +1,121 @@ +# 完整版本在 https://golangci-lint.run/usage/configuration/ +linters-settings: + funlen: + lines: 80 + statements: 80 + govet: + check-shadowing: true + lll: + line-length: 120 + errcheck: + check-type-assertions: true + goconst: + min-len: 2 + min-occurrences: 2 + gocyclo: + min-complexity: 20 + goimports: + revive: + confidence: 0 + rules: + - name: var-declaration + - name: package-comments + - name: dot-imports + - name: blank-imports + - name: exported + - name: var-naming + - name: indent-error-flow + - name: range + - name: errorf + - name: error-naming + - name: error-strings + - name: receiver-naming + - name: increment-decrement + - name: error-return + #- name: unexported-return + - name: time-naming + - name: context-keys-type + - name: context-as-argument + - name: argument-limit + severity: warning + disabled: false + arguments: [ 5 ] + gocritic: + enabled-checks: + - nestingReduce + - commentFormatting + settings: + nestingReduce: + bodyWidth: 5 + +linters: + disable-all: true + enable: + - deadcode + - funlen + - goconst + - gocyclo + - gofmt + - ineffassign + - staticcheck + - structcheck # 当非导出结构嵌入另一个结构, 前一个结构被使用就不会监测到, 这个需要每个业务自己屏蔽 + - typecheck + - goimports + - revive + - gosimple + - govet + - lll + - rowserrcheck + - errcheck + - unused + - varcheck + - sqlclosecheck + - gocritic + # - bodyclose https://github.com/timakin/bodyclose/issues 问题太多了,屏蔽都屏蔽不过来,显式不使用它 + +run: + # default concurrency is a available CPU number + concurrency: 4 + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 2m + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + # include test files or not, default is true + tests: false + # default is true. Enables skipping of directories: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs-use-default: true + skip-files: + - ".*/mock/.*.go" + - ".*testing.go" + - "docs/*.go" + +issues: + exclude-use-default: true + # The list of ids of default excludes to include or disable. By default it's empty. + # 下面的规则,golangci-lint认为应该屏蔽,但是我们选择不屏蔽。所以,`exclude-use-default: true`屏蔽一部分,把下面的再捞出来。 + # golanglint-ci维护的忽略列表里有一些是我们不想屏蔽的,捞出来。这里说一下,使用白名单是好于黑名单的。名单随着golanglint-ci引入更多工具,我们跟进享受好处。我们搞黑名单,就变成自己维护,不如golanglint-ci去维护,更好。 + include: + - EXC0004 # govet (possible misuse of unsafe.Pointer|should have signature) + - EXC0005 # staticcheck ineffective break statement. Did you mean to break out of the outer loop + - EXC0012 # revive exported (method|function|type|const) (.+) should have comment or be unexported + - EXC0013 # revive package comment should be of the form "(.+)... + - EXC0014 # revive comment on exported (.+) should be of the form "(.+)..." + - EXC0015 # revive should have a package comment, unless it's in another file for this package + exclude-rules: + - path: _test\.go + linters: + - funlen # 规范说单测函数,单个函数可以到160行,但是工具不好做区分处理,这里就直接不检查单测的函数长度 + - linters: + - staticcheck + text: "SA6002: argument should be pointer-like to avoid allocations" # sync.pool.Put(buf), slice `var buf []byte` will tiger this + - linters: + - lll + source: "^//go:generate " # Exclude lll issues for long lines with go:generate + max-same-issues: 0 + new: false + max-issues-per-linter: 0 +output: + sort-results: true +service: + golangci-lint-version: 1.28.x diff --git a/dbm-services/bigdata/db-tools/dbactuator/LICENSE b/dbm-services/bigdata/db-tools/dbactuator/LICENSE new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dbm-services/bigdata/db-tools/dbactuator/Makefile b/dbm-services/bigdata/db-tools/dbactuator/Makefile new file mode 100644 index 0000000000..0597f378bb --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/Makefile @@ -0,0 +1,22 @@ +SHELL := /bin/bash +BASE_DIR = $(shell pwd) +VERSION = 0.0.1 +APPNAME = dbactuator +GOOS ?= linux +BUILD_FLAG = "-X main.version=${VERSION} -X main.buildstamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.githash=`git rev-parse HEAD` " + +.PHONY: all build clean + +build: + cd ${BASE_DIR}/cmd && CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD} -ldflags ${BUILD_FLAG} -o $(BASE_DIR)/build/$(APPNAME) -v . + +clean: + cd ${BASE_DIR}/build && rm -rf ${APPNAME} + +gotool: + @-gofmt -w . + +help: + @echo "make - compile go source" + @echo "make gotool - run gofmt" + @echo "make clean - do some clean job" diff --git a/dbm-services/bigdata/db-tools/dbactuator/README.md b/dbm-services/bigdata/db-tools/dbactuator/README.md new file mode 100644 index 0000000000..a1fffab742 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/README.md @@ -0,0 +1,134 @@ +# git.tencent.com/dbs/bk-dbactuator + +## dbactuator + +数据库操作指令集合,实现MySQL、Proxy、监控、备份 部署,MySQL、Proxy 变更等原子任务操作,由上层Pipeline 编排组合不同的指令,来完成不同的场景化的任务 +``` +Db Operation Command Line Interface +Version: 0.0.1 +Githash: 212617a717c3a3a968eb0c7d3a2c4ea2bc21abc2 +Buildstamp:2022-05-27_06:42:56AM + +Usage: + dbactuator [flags] + dbactuator [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + help Help about any command + mysql MySQL Operation Command Line Interface + proxy MySQL Proxy Operation Command Line Interface + sysinit Exec sysinit_mysql.sh,Init mysql default os user,password + +Flags: + -h, --help help for dbactuator + -n, --node_id string 节点id + -p, --payload string command payload + -r, --rollback 回滚任务 + -x, --show-payload show payload for man + -u, --uid string 单据id + +Use "dbactuator [command] --help" for more information about a command. +``` + +## 文档 + +### subcommand 开发 + +#### 给 payload 添加说明和 example (swagger) +##### **查看注释** +``` +./dbactuator mysql find-local-backup --helper +``` + +##### **怎么增加注释到 --helper** +在 subcommand 定义上添加注释,示例: +``` +// FindLocalBackupCommand godoc +// +// @Summary 查找本地备份 +// @Description 查找本地备份 +// @Tags mysql +// @Accept json +// @Param body body mysql.FindLocalBackupParam true "short description" +// @Success 200 {object} mysql.FindLocalBackupResp +// @Router /mysql/find-local-backup [post] +func FindLocalBackupCommand() *cobra.Command { +... +``` + +- `@Param` 把中间的 `mysql.FindLocalBackupParam` 替换成 subcommand 的参数 struct 定义,且 struct 需要能被当前包引用 + swagger 使用 `@param` 来解析参数,所以不要与其它函数注释冲突,否则可能 build doc 失败,output example 见下文 +- `@Router` 格式 `/cmd/subcmd [post]`,保留后面的`[post]` +- 如果没有输出则去掉 `@Success` 这行,output example 见下文 + +**param struct 的字段注释示例:** +``` +// field 枚举说明. 字段注释可以在字段上一行,或者字段行后 +Field1 int `json:"field1" enums:"0,1,2"` // 枚举类型 +Field2 string `json:"field2" validate:"required" example:"test"` // 必填项,example内容 +Field3 int `json:"field2" valildate:"gte:999,lte:0" default:"2"` // 最大值最小值,默认值 +``` + +##### **怎么增加 example** +在 component 的 struct 增加 `Example() interface()` 方法,示例: +``` +func (f *FindLocalBackupComp) Example() interface{} { + comp := FindLocalBackupComp{ + Params: FindLocalBackupParam{ + BackupDirs: []string{"/data/dbbak", "/data1/dbbak"}, + TgtInstance: &common.InstanceExample, + FileServer: false, + }, + } + return comp +} +``` +填充你需要的示例字段,能序列化成 json 格式。 + +然后在 subcommand 定义里面完善 `Example` 字段,示例: +``` +cmd := &cobra.Command{ + Use: "find-local-backup", + Example: fmt.Sprintf(`dbactuator mysql find-local-backup %s %s`, + subcmd.CmdBaseExampleStr, common.ToPrettyJson(act.Service.Example())), + ... + } +``` + +如果有输出 output 示例需求,可以参照 `mysql restore-dr` 写一个 `ExampleOutput()`。 + +##### **生成注释** +需要先从 https://github.com/swaggo/swag 下载 `swag` 命令(推荐 v1.8.12,低版本可能不适应go1.19)。 +``` +# 需要想先让 swagger 生成注释 docs/swagger.json +# 需要关注注释是否编译成功 +./build_doc.sh + +# 再编译打包进二进制 +make +``` +或者一步 `./build.sh` + +目前为了避免代码冲突,.gitignore 忽略了 docs/swagger.json, docs/swagger.yaml + +### Go开发规范 +[https://github.com/golang/go/wiki/CodeReviewComments](https://github.com/golang/go/wiki/CodeReviewComments) + + +#### 格式化 +- 代码都必须用 `gofmt` 格式化。(使用不用ide的同学注意调整) + +#### import 规范 +- 使用 `goimports` 自动格式化引入的包名,import 规范原则上以 `goimports` 规则为准。 + +#### 包命名 +- 保持 `package` 的名字和目录一致。 +- 包名应该为小写单词,不要使用下划线或者混合大小写,使用多级目录来划分层级。 +- 不要使用无意义的包名,如:`util`、`common`、`misc`、`global`。`package`名字应该追求清晰且越来越收敛,符合‘单一职责’原则。而不是像`common`一样,什么都能往里面放,越来越膨胀,让依赖关系变得复杂,不利于阅读、复用、重构。注意,`xx/util/encryption`这样的包名是允许的。 + +#### 文件命名 +- 文件名应该采用小写,并且使用下划线分割各个单词。 + +#### 变量命名 +- 变量名必须遵循驼峰式,首字母根据访问控制决定使用大写或小写。 diff --git a/dbm-services/bigdata/db-tools/dbactuator/build.sh b/dbm-services/bigdata/db-tools/dbactuator/build.sh new file mode 100755 index 0000000000..3672cc3574 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +workDir=`pwd` + +# unit test +cd $workDir +./build_doc.sh +make \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/build_doc.sh b/dbm-services/bigdata/db-tools/dbactuator/build_doc.sh new file mode 100755 index 0000000000..c41f7a2ca5 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/build_doc.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# https://github.com/swaggo/swag +# --parseDependency to avoid: ParseComment ... cannot find type definition: json.RawMessage +swag init -g cmd/cmd.go --o docs/ --ot json,yaml --parseDependency +if [ $? -gt 0 ];then + echo "generate swagger api docs failed" + exit 1 +fi +tree docs/ \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/cmd/cmd.go b/dbm-services/bigdata/db-tools/dbactuator/cmd/cmd.go new file mode 100644 index 0000000000..979b201128 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/cmd/cmd.go @@ -0,0 +1,191 @@ +// Package main TODO +/* + * @Description: dbactuator 入口函数,主要实现数据侧一些操作的命令,比如安装mysql 等等一系列的操作集合 + * @Useage: dbactuator --help + */ +package main + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd" + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + "dbm-services/common/go-pubpkg/logger" + "fmt" + "os" + "runtime/debug" + "time" + + "github.com/spf13/cobra" +) + +const ( + // CMD TODO + CMD = "dbactuator" +) + +var ( + buildstamp = "" + githash = "" + version = "0.0.1" +) + +// @title dbactuator API +// @version 0.0.1 +// @description This is a dbactuator command collection. +// @termsOfService http://swagger.io/terms/ +// @Schemes http +// @contact.name API Support +// @contact.url http://www.swagger.io/support +// @contact.email support@swagger.io + +// @license.name Apache 2.0 +// @license.url http://www.apache.org/licenses/LICENSE-2.0.html + +// @host ./dbactuator +// @BasePath / + +// main godoc +func main() { + defer func() { + if err := recover(); err != nil { + fmt.Println(err) + logger.Error("panic goroutine inner error!%v;%s", err, string(debug.Stack())) + os.Exit(1) + return + } + }() + if err := NewDbActuatorCommand().Execute(); err != nil { + fmt.Fprint(os.Stderr, err.Error()) + logger.Error("NewDbActuatorCommand run failed:%s", err.Error()) + os.Exit(1) + } +} + +// NewDbActuatorCommand TODO +func NewDbActuatorCommand() *cobra.Command { + cmds := &cobra.Command{ + Use: CMD, + Short: fmt.Sprintf(`Db Operation Command Line Interface + Version: %s + Githash: %s + Buildstamp:%s`, version, githash, buildstamp), + Args: cobra.OnlyValidArgs, + PersistentPreRun: func(cmd *cobra.Command, args []string) { + if !cmd.IsAvailableCommand() { + runHelp(cmd, args) + return + } + subcmd.SetLogger(subcmd.GBaseOptions) + if subcmd.PrintSubCommandHelper(cmd, subcmd.GBaseOptions) { + runHelp(cmd, args) + } + // 定时输出标准心跳输出 + startHeartbeat(10 * time.Second) + }, + Run: runHelp, + SuggestFor: []string{CMD}, + } + groups := templates.CommandGroups{ + { + Message: "sysinit operation sets", + Commands: []*cobra.Command{ + sysinitcmd.NewSysInitCommand(), + }, + }, + { + Message: "crontab operation sets", + Commands: []*cobra.Command{ + crontabcmd.ClearCrontabCommand(), + }, + }, + { + Message: "common operation sets", + Commands: []*cobra.Command{ + commoncmd.NewCommonCommand(), + }, + }, + { + Message: "download operation sets", + Commands: []*cobra.Command{ + commoncmd.NewDownloadCommand(), + }, + }, + { + Message: "es operation sets", + Commands: []*cobra.Command{ + escmd.NewEsCommand(), + }, + }, + { + Message: "kafka operation sets", + Commands: []*cobra.Command{ + kafkacmd.NewKafkaCommand(), + }, + }, + { + Message: "pulsar operation sets", + Commands: []*cobra.Command{ + pulsarcmd.NewPulsarCommand(), + }, + }, + { + Message: "influxdb operation sets", + Commands: []*cobra.Command{ + influxdbcmd.NewInfluxdbCommand(), + }, + }, + { + Message: "hdfs operation sets", + Commands: []*cobra.Command{ + hdfscmd.HdfsCommand(), + }, + }, + } + groups.Add(cmds) + // 标志可以是 "persistent" 的,这意味着该标志将可用于分配给它的命令以及该命令下的每个命令。对于全局标志,将标志分配为根上的持久标志。 + // 默认每个subcomand 都默认带这些参数 + cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.Payload, "payload", "p", subcmd.GBaseOptions.Payload, + "command payload ") + cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.PayloadFormat, "payload-format", "m", + subcmd.GBaseOptions.PayloadFormat, "command payload format, default base64, value_allowed: base64|raw") + cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.Uid, "uid", "U", subcmd.GBaseOptions.Uid, "单据id") + cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.RootId, "root_id", "R", subcmd.GBaseOptions.NodeId, "流程id") + cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.NodeId, "node_id", "N", subcmd.GBaseOptions.NodeId, "节点id") + cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.VersionId, "version_id", "V", subcmd.GBaseOptions.NodeId, + "运行版本id") + cmds.PersistentFlags().BoolVarP(&subcmd.GBaseOptions.ShowPayload, "show-payload", "x", subcmd.GBaseOptions.ShowPayload, + "show payload for man") + cmds.PersistentFlags().BoolVarP(&subcmd.GBaseOptions.RollBack, "rollback", "r", subcmd.GBaseOptions.RollBack, "回滚任务") + cmds.PersistentFlags().BoolVarP(&subcmd.GBaseOptions.Helper, "helper", "E", subcmd.GBaseOptions.Helper, "payload参数说明") + // @todo add --daemon mode to serve http to call subcmd/components + return cmds +} + +func runHelp(cmd *cobra.Command, args []string) { + cmd.Help() + os.Exit(1) +} + +// startHeartbeat 定時输出日志 +func startHeartbeat(period time.Duration) { + go func() { + ticker := time.NewTicker(period) + defer ticker.Stop() + var hearbeatTime string + for { + select { + case <-ticker.C: + hearbeatTime = time.Now().Local().Format(cst.TIMELAYOUT) + fmt.Fprintf(os.Stdin, "["+hearbeatTime+"]hearbeating ...\n") + } + } + }() +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/docs/.gitkeep b/dbm-services/bigdata/db-tools/dbactuator/docs/.gitkeep new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dbm-services/bigdata/db-tools/dbactuator/docs/dbactuator.md b/dbm-services/bigdata/db-tools/dbactuator/docs/dbactuator.md new file mode 100644 index 0000000000..3fa75ae070 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/docs/dbactuator.md @@ -0,0 +1,30 @@ +# dbactuator + +数据库操作指令集合,实现MySQL、Proxy、监控、备份 部署,MySQL、Proxy 变更等原子任务操作,由上层Pipeline 编排组合不同的指令,来完成不同的场景化的任务 +``` +Db Operation Command Line Interface +Version: 0.0.1 +Githash: 212617a717c3a3a968eb0c7d3a2c4ea2bc21abc2 +Buildstamp:2022-05-27_06:42:56AM + +Usage: + dbactuator [flags] + dbactuator [command] + +Available Commands: + completion Generate the autocompletion script for the specified shell + help Help about any command + mysql MySQL Operation Command Line Interface + proxy MySQL Proxy Operation Command Line Interface + sysinit Exec sysinit_mysql.sh,Init mysql default os user,password + +Flags: + -h, --help help for dbactuator + -n, --node_id string 节点id + -p, --payload string command payload + -r, --rollback 回滚任务 + -x, --show-payload show payload for man + -u, --uid string 单据id + +Use "dbactuator [command] --help" for more information about a command. +``` \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/docs/docs.go b/dbm-services/bigdata/db-tools/dbactuator/docs/docs.go new file mode 100644 index 0000000000..67384e3939 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/docs/docs.go @@ -0,0 +1,2 @@ +// Package docs TODO +package docs diff --git a/dbm-services/bigdata/db-tools/dbactuator/docs/embed_docs.go b/dbm-services/bigdata/db-tools/dbactuator/docs/embed_docs.go new file mode 100644 index 0000000000..f7c1683e24 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/docs/embed_docs.go @@ -0,0 +1,8 @@ +package docs + +import "embed" + +// SwaggerDocs TODO +// +//go:embed swagger.json +var SwaggerDocs embed.FS diff --git a/dbm-services/bigdata/db-tools/dbactuator/docs/swagger.json b/dbm-services/bigdata/db-tools/dbactuator/docs/swagger.json new file mode 100644 index 0000000000..0335f91806 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/docs/swagger.json @@ -0,0 +1,346 @@ +{ + "schemes": [ + "http" + ], + "swagger": "2.0", + "info": { + "description": "This is a dbactuator command collection.", + "title": "dbactuator API", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "name": "API Support", + "url": "http://www.swagger.io/support", + "email": "support@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + }, + "version": "0.0.1" + }, + "host": "./dbactuator", + "basePath": "/", + "paths": { + "/common/file-server": { + "post": { + "description": "通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份\n在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件", + "consumes": [ + "application/json" + ], + "tags": [ + "common" + ], + "summary": "简单文件服务", + "parameters": [ + { + "description": "short description", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServerComp" + } + } + ], + "responses": {} + } + }, + "/common/rm-file": { + "post": { + "consumes": [ + "application/json" + ], + "tags": [ + "common" + ], + "summary": "限速删除大文件", + "parameters": [ + { + "description": "short description", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/internal_subcmd_commoncmd.RMLargeFileParam" + } + } + ], + "responses": {} + } + }, + "/download/http": { + "post": { + "description": "支持限速、basicAuth 认证. 一般配合 common fileserver 使用\n# server1\n./dbactuator common file-server \\\n--payload-format raw \\\n--payload '{\"extend\":{\"bind_address\":\":8082\",\"mount_path\":\"/data/dbbak\",\"user\":\"xiaog\",\"password\":\"xxxx\",\"proc_maxidle_duration\":\"60s\"}}'\n\n# server2\ncurl -u 'xiaog:xxxx' 'http://server1:8082/datadbbak8082/dbactuator' -o dbactuator.bin --limit-rate 10k", + "consumes": [ + "application/json" + ], + "tags": [ + "common" + ], + "summary": "http下载文件", + "parameters": [ + { + "description": "short description", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam" + } + } + ], + "responses": {} + } + }, + "/download/scp": { + "post": { + "description": "支持限速", + "consumes": [ + "application/json" + ], + "tags": [ + "common" + ], + "summary": "scp下载文件", + "parameters": [ + { + "description": "short description", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFScpParam" + } + } + ], + "responses": {} + } + } + }, + "definitions": { + "dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam": { + "type": "object", + "required": [ + "file_list", + "path_tgt", + "server" + ], + "properties": { + "auth_pass": { + "description": "http url basic auth pass", + "type": "string" + }, + "auth_user": { + "description": "http url basic auth user", + "type": "string" + }, + "bk_biz_id": { + "type": "integer" + }, + "bwlimit_mb": { + "description": "单文件下载限速,单位 MB/s", + "type": "integer" + }, + "curl_options": { + "type": "array", + "items": { + "type": "string" + } + }, + "curl_path": { + "description": "curl 命令路径,默认留空. 目前只用于测试 url", + "type": "string" + }, + "file_list": { + "description": "下载哪些文件", + "type": "array", + "items": { + "type": "string" + } + }, + "max_concurrency": { + "description": "并发下载数", + "type": "integer" + }, + "path_tgt": { + "description": "文件存放到本机哪个目录", + "type": "string" + }, + "server": { + "description": "下载 url", + "type": "string" + } + } + }, + "dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFScpParam": { + "type": "object", + "required": [ + "file_src", + "file_tgt" + ], + "properties": { + "bk_biz_id": { + "type": "integer" + }, + "bwlimit_mb": { + "description": "单文件下载限速,单位 MB/s", + "type": "integer" + }, + "file_src": { + "description": "下载源", + "allOf": [ + { + "$ref": "#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileSrc" + } + ] + }, + "file_tgt": { + "description": "下载目标", + "allOf": [ + { + "$ref": "#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileTgt" + } + ] + }, + "max_concurrency": { + "description": "并发下载数", + "type": "integer" + } + } + }, + "dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileSrc": { + "type": "object", + "required": [ + "file_list", + "path", + "ssh_host", + "ssh_port", + "ssh_user" + ], + "properties": { + "file_list": { + "description": "源文件名列表,相对上面的 path", + "type": "array", + "items": { + "type": "string" + } + }, + "match": { + "type": "string" + }, + "path": { + "description": "源文件所在目录", + "type": "string" + }, + "ssh_host": { + "type": "string" + }, + "ssh_pass": { + "type": "string" + }, + "ssh_port": { + "type": "string" + }, + "ssh_user": { + "type": "string" + } + } + }, + "dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileTgt": { + "type": "object", + "required": [ + "path" + ], + "properties": { + "path": { + "description": "文件下载目标目录", + "type": "string" + } + } + }, + "dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServer": { + "type": "object", + "required": [ + "auth_user", + "bind_address", + "mount_path" + ], + "properties": { + "acls": { + "description": "访问来源限制,从前往后匹配。格式 `[\"allow 127.0.0.1/32\", \"deny all\"]`", + "type": "array", + "items": { + "type": "string" + }, + "example": [ + "allow all" + ] + }, + "auth_pass": { + "description": "http basic auth pass,为空时会随机生成密码", + "type": "string" + }, + "auth_user": { + "description": "http basic auth user", + "type": "string" + }, + "bind_address": { + "description": "http file-server 监听地址. 不提供端口,会在 12000-19999 之间随机选择一个端口,不提供 ip 时默认 localhost", + "type": "string" + }, + "enable_tls": { + "description": "暂不支持", + "type": "boolean" + }, + "max_connections": { + "description": "限制最大连接数,超过需要等待. 为 0 时表示不限制", + "type": "integer" + }, + "mount_path": { + "description": "将本地哪个目录通过 http 分享", + "type": "string" + }, + "path_prefix": { + "description": "path_prefix 用在生成 url 时的路径前缀. 可留空", + "type": "string" + }, + "print_download": { + "description": "输出 download http 的信息,方便使用", + "type": "boolean" + }, + "proc_maxidle_duration": { + "description": "超过最大空闲时间,自动退出. 示例 3600s, 60m, 1h", + "type": "string", + "example": "1h" + } + } + }, + "dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServerComp": { + "type": "object", + "properties": { + "extend": { + "$ref": "#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServer" + } + } + }, + "internal_subcmd_commoncmd.RMLargeFileParam": { + "type": "object", + "required": [ + "bw_limit_mb", + "filename" + ], + "properties": { + "bw_limit_mb": { + "description": "删除速度,MB/s,默认 30", + "type": "integer", + "default": 30, + "maximum": 1000, + "minimum": 1 + }, + "filename": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/docs/swagger.yaml b/dbm-services/bigdata/db-tools/dbactuator/docs/swagger.yaml new file mode 100644 index 0000000000..be478f303f --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/docs/swagger.yaml @@ -0,0 +1,250 @@ +basePath: / +definitions: + dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam: + properties: + auth_pass: + description: http url basic auth pass + type: string + auth_user: + description: http url basic auth user + type: string + bk_biz_id: + type: integer + bwlimit_mb: + description: 单文件下载限速,单位 MB/s + type: integer + curl_options: + items: + type: string + type: array + curl_path: + description: curl 命令路径,默认留空. 目前只用于测试 url + type: string + file_list: + description: 下载哪些文件 + items: + type: string + type: array + max_concurrency: + description: 并发下载数 + type: integer + path_tgt: + description: 文件存放到本机哪个目录 + type: string + server: + description: 下载 url + type: string + required: + - file_list + - path_tgt + - server + type: object + dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFScpParam: + properties: + bk_biz_id: + type: integer + bwlimit_mb: + description: 单文件下载限速,单位 MB/s + type: integer + file_src: + allOf: + - $ref: '#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileSrc' + description: 下载源 + file_tgt: + allOf: + - $ref: '#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileTgt' + description: 下载目标 + max_concurrency: + description: 并发下载数 + type: integer + required: + - file_src + - file_tgt + type: object + dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileSrc: + properties: + file_list: + description: 源文件名列表,相对上面的 path + items: + type: string + type: array + match: + type: string + path: + description: 源文件所在目录 + type: string + ssh_host: + type: string + ssh_pass: + type: string + ssh_port: + type: string + ssh_user: + type: string + required: + - file_list + - path + - ssh_host + - ssh_port + - ssh_user + type: object + dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.FileTgt: + properties: + path: + description: 文件下载目标目录 + type: string + required: + - path + type: object + dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServer: + properties: + acls: + description: 访问来源限制,从前往后匹配。格式 `["allow 127.0.0.1/32", "deny all"]` + example: + - allow all + items: + type: string + type: array + auth_pass: + description: http basic auth pass,为空时会随机生成密码 + type: string + auth_user: + description: http basic auth user + type: string + bind_address: + description: http file-server 监听地址. 不提供端口,会在 12000-19999 之间随机选择一个端口,不提供 ip + 时默认 localhost + type: string + enable_tls: + description: 暂不支持 + type: boolean + max_connections: + description: 限制最大连接数,超过需要等待. 为 0 时表示不限制 + type: integer + mount_path: + description: 将本地哪个目录通过 http 分享 + type: string + path_prefix: + description: path_prefix 用在生成 url 时的路径前缀. 可留空 + type: string + print_download: + description: 输出 download http 的信息,方便使用 + type: boolean + proc_maxidle_duration: + description: 超过最大空闲时间,自动退出. 示例 3600s, 60m, 1h + example: 1h + type: string + required: + - auth_user + - bind_address + - mount_path + type: object + dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServerComp: + properties: + extend: + $ref: '#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServer' + type: object + internal_subcmd_commoncmd.RMLargeFileParam: + properties: + bw_limit_mb: + default: 30 + description: 删除速度,MB/s,默认 30 + maximum: 1000 + minimum: 1 + type: integer + filename: + type: string + required: + - bw_limit_mb + - filename + type: object +host: ./dbactuator +info: + contact: + email: support@swagger.io + name: API Support + url: http://www.swagger.io/support + description: This is a dbactuator command collection. + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + termsOfService: http://swagger.io/terms/ + title: dbactuator API + version: 0.0.1 +paths: + /common/file-server: + post: + consumes: + - application/json + description: |- + 通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份 + 在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件 + parameters: + - description: short description + in: body + name: body + required: true + schema: + $ref: '#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_fileserver.FileServerComp' + responses: {} + summary: 简单文件服务 + tags: + - common + /common/rm-file: + post: + consumes: + - application/json + parameters: + - description: short description + in: body + name: body + required: true + schema: + $ref: '#/definitions/internal_subcmd_commoncmd.RMLargeFileParam' + responses: {} + summary: 限速删除大文件 + tags: + - common + /download/http: + post: + consumes: + - application/json + description: |- + 支持限速、basicAuth 认证. 一般配合 common fileserver 使用 + # server1 + ./dbactuator common file-server \ + --payload-format raw \ + --payload '{"extend":{"bind_address":":8082","mount_path":"/data/dbbak","user":"xiaog","password":"xxxx","proc_maxidle_duration":"60s"}}' + + # server2 + curl -u 'xiaog:xxxx' 'http://server1:8082/datadbbak8082/dbactuator' -o dbactuator.bin --limit-rate 10k + parameters: + - description: short description + in: body + name: body + required: true + schema: + $ref: '#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam' + responses: {} + summary: http下载文件 + tags: + - common + /download/scp: + post: + consumes: + - application/json + description: 支持限速 + parameters: + - description: short description + in: body + name: body + required: true + schema: + $ref: '#/definitions/dbm-services_bigdata_db-tools_dbactuator_pkg_components_backup_download.DFScpParam' + responses: {} + summary: scp下载文件 + tags: + - common +schemes: +- http +swagger: "2.0" diff --git a/dbm-services/bigdata/db-tools/dbactuator/example/install-hdfs.md b/dbm-services/bigdata/db-tools/dbactuator/example/install-hdfs.md new file mode 100644 index 0000000000..4d323c6fa1 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/example/install-hdfs.md @@ -0,0 +1,31 @@ +# dbactuator hdfs xxx + +## hdfs集群部署 + + +### 原始payload +``` +{ + "general": {}, + "extend": { + "host": "127.x.x.x", --本机IP + -- 集群配置 + + "core-site": { + "fs.defaultFS": "hdfs://{{cluster_name}}", + "fs.trash.interval": "1440", + "io.file.buffer.size": "131072", + "net.topology.script.file.name": "/data/hadoopenv/hadoop/etc/hadoop/rack-aware.sh" + }, + "hdfs-site": {...} + "zoo.cfg": {...} + "install": {...} + "cluster_name": "richie-hdfs" + "http_port": 50070, + "rpc_port": 9000, + + + } +} + +``` \ No newline at end of file diff --git a/dbm-services/bigdata/db-tools/dbactuator/go.mod b/dbm-services/bigdata/db-tools/dbactuator/go.mod new file mode 100644 index 0000000000..0d21e151c7 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/go.mod @@ -0,0 +1,52 @@ +module dbm-services/bigdata/db-tools/dbactuator + +go 1.19 + +require ( + github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 + github.com/go-ini/ini v1.67.0 + github.com/go-playground/validator/v10 v10.12.0 + github.com/golang/glog v1.1.1 + github.com/pkg/errors v0.9.1 + github.com/robfig/cron/v3 v3.0.1 + github.com/spf13/cobra v1.7.0 + go.uber.org/zap v1.24.0 + gopkg.in/ini.v1 v1.67.0 +) + +require ( + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect +) + +require ( + github.com/MakeNowJust/heredoc v1.0.0 + github.com/dustin/go-humanize v1.0.1 + github.com/elastic/go-elasticsearch/v7 v7.17.1 + github.com/fatih/color v1.13.0 // indirect + github.com/go-playground/locales v0.14.1 + github.com/go-playground/universal-translator v0.18.1 + github.com/hashicorp/go-version v1.6.0 + github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f // indirect + github.com/juju/ratelimit v1.0.2 + github.com/kr/pretty v0.3.0 // indirect + github.com/leodido/go-urn v1.2.3 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/pkg/sftp v1.13.5 + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/russross/blackfriday v1.6.0 + github.com/shirou/gopsutil v3.21.11+incompatible + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.12 // indirect + go.uber.org/multierr v1.8.0 // indirect + golang.org/x/crypto v0.8.0 + golang.org/x/net v0.9.0 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v2 v2.4.0 +) diff --git a/dbm-services/bigdata/db-tools/dbactuator/go.sum b/dbm-services/bigdata/db-tools/dbactuator/go.sum new file mode 100644 index 0000000000..5f551cb68f --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/go.sum @@ -0,0 +1,157 @@ +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 h1:ZBbLwSJqkHBuFDA6DUhhse0IGJ7T5bemHyNILUjvOq4= +github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2/go.mod h1:VSw57q4QFiWDbRnjdX8Cb3Ow0SFncRw+bA/ofY6Q83w= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/elastic/go-elasticsearch/v7 v7.17.1 h1:49mHcHx7lpCL8cW1aioEwSEVKQF3s+Igi4Ye/QTWwmk= +github.com/elastic/go-elasticsearch/v7 v7.17.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= +github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw= +github.com/golang/glog v1.1.1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8= +github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= +github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= +github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= +github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go new file mode 100644 index 0000000000..212fb26ec0 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go @@ -0,0 +1,48 @@ +package commoncmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + + "github.com/spf13/cobra" +) + +// NewCommonCommand TODO +// @todo 将来可以把 download 单独作为一个子命令 +func NewCommonCommand() *cobra.Command { + cmds := &cobra.Command{ + Use: "common [common operation]", + Short: "Common components Operation Command Line Interface", + RunE: subcmd.ValidateSubCommand(), + } + groups := templates.CommandGroups{ + { + Message: "common operation sets", + Commands: []*cobra.Command{ + CommandFileServer(), + RMLargeFileCommand(), + }, + }, + } + groups.Add(cmds) + return cmds +} + +// NewDownloadCommand TODO +func NewDownloadCommand() *cobra.Command { + cmds := &cobra.Command{ + Use: "download [download operation]", + Short: "download components Operation Command Line Interface", + } + groups := templates.CommandGroups{ + { + Message: "download operation sets", + Commands: []*cobra.Command{ + CommandDownloadScp(), + CommandDownloadHttp(), + }, + }, + } + groups.Add(cmds) + return cmds +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go new file mode 100644 index 0000000000..b9608079a6 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go @@ -0,0 +1,2 @@ +// Package commoncmd TODO +package commoncmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go new file mode 100644 index 0000000000..8c23687df1 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go @@ -0,0 +1,100 @@ +package commoncmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "fmt" + + "github.com/spf13/cobra" +) + +// DownloadHttpAct TODO +type DownloadHttpAct struct { + *subcmd.BaseOptions + Payload backup_download.DFHttpComp +} + +// CommandDownloadHttp godoc +// +// @Summary http下载文件 +// @Description 支持限速、basicAuth 认证. 一般配合 common fileserver 使用 +// @Description # server1 +// @Description ./dbactuator common file-server \ +// @Description --payload-format raw \ +// @Description --payload '{"extend":{"bind_address":":8082","mount_path":"/data/dbbak","user":"xiaog","password":"xxxx","proc_maxidle_duration":"60s"}}' +// @Description +// @Description # server2 +// @Description curl -u 'xiaog:xxxx' 'http://server1:8082/datadbbak8082/dbactuator' -o dbactuator.bin --limit-rate 10k +// @Tags common +// @Accept json +// @Param body body backup_download.DFHttpParam true "short description" +// @Router /download/http [post] +func CommandDownloadHttp() *cobra.Command { + act := DownloadHttpAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "http", + Short: "http下载文件", + Example: fmt.Sprintf(`dbactuator download http %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example())), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Init TODO +func (d *DownloadHttpAct) Init() (err error) { + if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate + return err + } + if err = d.Deserialize(&d.Payload.Params); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + return +} + +// Validate TODO +func (d *DownloadHttpAct) Validate() error { + return nil +} + +// Run TODO +func (d *DownloadHttpAct) Run() error { + steps := subcmd.Steps{ + { + FunName: "测试目标连接性", + Func: d.Payload.Init, + }, + { + FunName: "下载预检查", + Func: d.Payload.PreCheck, + }, + { + FunName: "开始下载", + Func: d.Payload.Start, + }, + { + FunName: "等待下载完成", + Func: d.Payload.WaitDone, + }, + { + FunName: "完成校验", + Func: d.Payload.PostCheck, + }, + } + + if err := steps.Run(); err != nil { + return err + } + + logger.Info("download files successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go new file mode 100644 index 0000000000..86af52efa2 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go @@ -0,0 +1,92 @@ +package commoncmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "fmt" + + "github.com/spf13/cobra" +) + +// DownloadScpAct TODO +type DownloadScpAct struct { + *subcmd.BaseOptions + Payload backup_download.DFScpComp +} + +// CommandDownloadScp godoc +// +// @Summary scp下载文件 +// @Description 支持限速 +// @Tags common +// @Accept json +// @Param body body backup_download.DFScpParam true "short description" +// @Router /download/scp [post] +func CommandDownloadScp() *cobra.Command { + act := DownloadScpAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "scp", + Short: "scp下载文件", + Example: fmt.Sprintf(`dbactuator download scp %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example())), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Init TODO +func (d *DownloadScpAct) Init() (err error) { + if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate + return err + } + if err = d.Deserialize(&d.Payload.Params); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + return +} + +// Validate TODO +func (d *DownloadScpAct) Validate() error { + return nil +} + +// Run TODO +func (d *DownloadScpAct) Run() error { + steps := subcmd.Steps{ + { + FunName: "测试目标连接性", + Func: d.Payload.Init, + }, + { + FunName: "下载预检查", + Func: d.Payload.PreCheck, + }, + { + FunName: "开始下载", + Func: d.Payload.Start, + }, + { + FunName: "等待下载完成", + Func: d.Payload.WaitDone, + }, + { + FunName: "完成校验", + Func: d.Payload.PostCheck, + }, + } + + if err := steps.Run(); err != nil { + return err + } + logger.Info("download files successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go new file mode 100644 index 0000000000..0084549dda --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go @@ -0,0 +1,85 @@ +package commoncmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "fmt" + + "github.com/spf13/cobra" +) + +// FileServerAct TODO +type FileServerAct struct { + *subcmd.BaseOptions + Payload fileserver.FileServerComp +} + +// CommandFileServer godoc +// +// @Summary 简单文件服务 +// @Description 通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份 +// @Description 在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件 +// @Tags common +// @Accept json +// @Param body body fileserver.FileServerComp true "short description" +// @Router /common/file-server [post] +func CommandFileServer() *cobra.Command { + act := FileServerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "file-server", + Short: "启动文件服务", + Example: fmt.Sprintf(`dbactuator file-server %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example())), + Run: func(cmd *cobra.Command, args []string) { + // util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Init TODO +func (d *FileServerAct) Init() (err error) { + if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate + return err + } + if err = d.Deserialize(&d.Payload.Params); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + // d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return +} + +// Run TODO +func (d *FileServerAct) Run() error { + steps := subcmd.Steps{ + { + FunName: "初始化参数", + Func: d.Payload.Params.New, + }, + { + FunName: "启动fileserver", + Func: d.Payload.Params.Start, + }, + { + FunName: "等待结束", + Func: d.Payload.Params.WaitDone, + }, + { + FunName: "是否打印download 信息", + Func: d.Payload.Params.OutputCtx, + }, + } + + if err := steps.Run(); err != nil { + return err + } + // logger.Info("fileserver start successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go new file mode 100644 index 0000000000..f8ed184758 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go @@ -0,0 +1,119 @@ +package commoncmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil" + "dbm-services/common/go-pubpkg/logger" + "fmt" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +// RMLargeFileCmd TODO +type RMLargeFileCmd struct { + *subcmd.BaseOptions + Payload RMLargeFileParam +} + +// RMLargeFileParam TODO +type RMLargeFileParam struct { + Filename string `json:"filename" validate:"required"` + // 删除速度,MB/s,默认 30 + BWLimitMB int `json:"bw_limit_mb" validate:"required,gte=1,lte=1000" default:"30"` +} + +// Example TODO +func (p RMLargeFileParam) Example() interface{} { + comp := RMLargeFileParam{ + Filename: "xxx", + BWLimitMB: 30, + } + return comp +} + +// PreCheck TODO +func (p RMLargeFileParam) PreCheck() error { + if !util.FileExists(p.Filename) { + return errors.Errorf("file not exists %s", p.Filename) + } else if util.IsDirectory(p.Filename) { + return errors.Errorf("path is directory %s", p.Filename) + } + if p.BWLimitMB == 0 { + p.BWLimitMB = 30 + } + // writable? + return nil +} + +// Start TODO +func (p RMLargeFileParam) Start() error { + if err := osutil.TruncateFile(p.Filename, p.BWLimitMB); err != nil { + logger.Error(errors.WithStack(err).Error()) + return err + } + return nil +} + +// RMLargeFileCommand godoc +// +// @Summary 限速删除大文件 +// @Tags common +// @Accept json +// @Param body body RMLargeFileParam true "short description" +// @Router /common/rm-file [post] +func RMLargeFileCommand() *cobra.Command { + act := RMLargeFileCmd{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "rm-file", + Short: "限速删除大文件", + Example: fmt.Sprintf(`dbactuator common rm-file %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example())), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Init TODO +func (d *RMLargeFileCmd) Init() (err error) { + if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate + return err + } + if err = d.DeserializeSimple(&d.Payload); err != nil { + logger.Error("DeserializeSimple err %s", err.Error()) + return err + } + return +} + +// Validate TODO +func (d *RMLargeFileCmd) Validate() error { + return nil +} + +// Run TODO +func (d *RMLargeFileCmd) Run() error { + steps := subcmd.Steps{ + { + FunName: "预检查", + Func: d.Payload.PreCheck, + }, + { + FunName: "删除", + Func: d.Payload.Start, + }, + } + + if err := steps.Run(); err != nil { + return err + } + logger.Info("rm file %s successfully", d.Payload.Filename) + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go new file mode 100644 index 0000000000..7d3e04e54d --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go @@ -0,0 +1,64 @@ +package crontabcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "fmt" + + "github.com/spf13/cobra" +) + +// ClearCrontabAct TODO +type ClearCrontabAct struct { + *subcmd.BaseOptions + Service crontab.ClearCrontabParam +} + +// ClearCrontabCommand TODO +func ClearCrontabCommand() *cobra.Command { + act := ClearCrontabAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "clear-crontab", + Short: "清理crontab", + Example: fmt.Sprintf(`dbactuator clear-crontab %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Init TODO +func (d *ClearCrontabAct) Init() (err error) { + if err = d.DeserializeAndValidate(&d.Service); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + return +} + +// Run TODO +func (s *ClearCrontabAct) Run() (err error) { + steps := []subcmd.StepFunc{ + { + FunName: "清理机器的crontab", + Func: s.Service.CleanCrontab, + }, + } + logger.Info("start clean crontab ...") + for idx, f := range steps { + if err = f.Func(); err != nil { + logger.Error("step <%d>, run [%s] occur %v", idx, f.FunName, err) + return err + } + logger.Info("step <%d>, run [%s] successfully", idx, f.FunName) + } + logger.Info("clean crontab successfully") + return +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go new file mode 100644 index 0000000000..cce8a4a6ca --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go @@ -0,0 +1,2 @@ +// Package crontabcmd TODO +package crontabcmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_connections.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_connections.go new file mode 100644 index 0000000000..546ad42812 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_connections.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckConnectionsAct TODO +type CheckConnectionsAct struct { + *subcmd.BaseOptions + Service elasticsearch.ExcludeEsNodeComp +} + +// CheckConnectionsCommand TODO +func CheckConnectionsCommand() *cobra.Command { + act := CheckConnectionsAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_connections ", + Short: "检查活动连接", + Example: fmt.Sprintf(`dbactuator es check_connections %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckConnectionsAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckConnectionsAct) Init() (err error) { + logger.Info("CheckConnectionsAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckConnectionsAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckConnectionsAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "检查活动连接", + Func: d.Service.CheckConnections, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_connections successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_es.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_es.go new file mode 100644 index 0000000000..612fa025c2 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_es.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckEsHealthAct TODO +type CheckEsHealthAct struct { + *subcmd.BaseOptions + Service elasticsearch.CheckEsHealthComp +} + +// CheckEsHealthCommand TODO +func CheckEsHealthCommand() *cobra.Command { + act := CheckEsHealthAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_es", + Short: "检查es节点状态", + Example: fmt.Sprintf(`dbactuator es check_es %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckEsHealthAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckEsHealthAct) Init() (err error) { + logger.Info("CheckEsHealthAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckEsHealthAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckEsHealthAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "es节点健康检查", + Func: d.Service.CheckEsHealth, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_es successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_nodes.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_nodes.go new file mode 100644 index 0000000000..07ca0ae236 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_nodes.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckEsNodesAct TODO +type CheckEsNodesAct struct { + *subcmd.BaseOptions + Service elasticsearch.CheckEsNodeComp +} + +// CheckNodesCommand TODO +func CheckNodesCommand() *cobra.Command { + act := CheckEsNodesAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_nodes", + Short: "检查扩容的节点数", + Example: fmt.Sprintf(`dbactuator es check_nodes %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckEsNodesAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckEsNodesAct) Init() (err error) { + logger.Info("CheckNodesAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckEsNodesAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckEsNodesAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "检查扩容节点数", + Func: d.Service.CheckEsNodes, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_nodes successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_shards.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_shards.go new file mode 100644 index 0000000000..b7f6bae658 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/check_shards.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckShardsAct TODO +type CheckShardsAct struct { + *subcmd.BaseOptions + Service elasticsearch.ExcludeEsNodeComp +} + +// CheckShardsCommand TODO +func CheckShardsCommand() *cobra.Command { + act := CheckShardsAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_shards", + Short: "检查分片数", + Example: fmt.Sprintf(`dbactuator es check_shards %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckShardsAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckShardsAct) Init() (err error) { + logger.Info("CheckShardsAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckShardsAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckShardsAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "检查分片", + Func: d.Service.CheckShards, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_shards successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/clean_data.go new file mode 100644 index 0000000000..fa7342731f --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/clean_data.go @@ -0,0 +1,97 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CleanDataAct TODO +type CleanDataAct struct { + *subcmd.BaseOptions + Service elasticsearch.CleanDataComp +} + +// CleanDataCommand TODO +func CleanDataCommand() *cobra.Command { + act := CleanDataAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "clean_data", + Short: "清理es目录", + Example: fmt.Sprintf(`dbactuator es clean_data %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CleanDataAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CleanDataAct) Init() (err error) { + logger.Info("CleanDataAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CleanDataAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CleanDataAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "清理数据目录", + Func: d.Service.CleanData, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("clean_data successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/cmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/cmd.go new file mode 100644 index 0000000000..46eacdc7fc --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/cmd.go @@ -0,0 +1,48 @@ +package escmd + +// Todo +import ( + . "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + + "github.com/spf13/cobra" +) + +// NewEsCommand TODO +// Todo +func NewEsCommand() *cobra.Command { + cmds := &cobra.Command{ + Use: "es [es opreation]", + Short: "ES Operation Command Line Interface", + RunE: ValidateSubCommand(), + } + groups := templates.CommandGroups{ + { + Message: "es opreation sets", + Commands: []*cobra.Command{ + InstallEsClientCommand(), + InstallEsColdCommand(), + InstallEsHotCommand(), + InstallEsMasterCommand(), + InstallKibanaCommand(), + InstallSupervisorCommand(), + InitCommand(), + DecompressEsPkgCommand(), + InitGrantCommand(), + InstallTelegrafCommand(), + InstallExporterCommand(), + ExcludeNodeCommand(), + CleanDataCommand(), + StartProcessCommand(), + StopProcessCommand(), + RestartProcessCommand(), + ReplaceMasterCommand(), + CheckShardsCommand(), + CheckConnectionsCommand(), + CheckNodesCommand(), + }, + }, + } + groups.Add(cmds) + return cmds +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/decompress_pkg.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/decompress_pkg.go new file mode 100644 index 0000000000..8e7c6fc203 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/decompress_pkg.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// DecompressEsPkgAct TODO +type DecompressEsPkgAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// DecompressEsPkgCommand TODO +func DecompressEsPkgCommand() *cobra.Command { + act := DecompressEsPkgAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "decompress_pkg", + Short: "解压缩", + Example: fmt.Sprintf(`dbactuator es decompress_pkg %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *DecompressEsPkgAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *DecompressEsPkgAct) Init() (err error) { + logger.Info("DecompressEsPkgAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *DecompressEsPkgAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *DecompressEsPkgAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "解压缩", + Func: d.Service.DecompressEsPkg, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("decompress_pkg successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/escmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/escmd.go new file mode 100644 index 0000000000..fa39059513 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/escmd.go @@ -0,0 +1,2 @@ +// Package escmd TODO +package escmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/exclude_node.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/exclude_node.go new file mode 100644 index 0000000000..47d79daf43 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/exclude_node.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ExcludeNodeAct TODO +type ExcludeNodeAct struct { + *subcmd.BaseOptions + Service elasticsearch.ExcludeEsNodeComp +} + +// ExcludeNodeCommand TODO +func ExcludeNodeCommand() *cobra.Command { + act := ExcludeNodeAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "exclude_node", + Short: "剔除node", + Example: fmt.Sprintf(`dbactuator es exclude_node %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ExcludeNodeAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ExcludeNodeAct) Init() (err error) { + logger.Info("ExcludeNodeAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ExcludeNodeAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ExcludeNodeAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "剔除节点", + Func: d.Service.ExcludeEsNode, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("exclude_node successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init.go new file mode 100644 index 0000000000..9f5afe55c5 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitAct TODO +type InitAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InitCommand TODO +func InitCommand() *cobra.Command { + act := InitAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init", + Short: "es初始化s", + Example: fmt.Sprintf(`dbactuator es init %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitAct) Init() (err error) { + logger.Info("InitAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化", + Func: d.Service.InitEsDirs, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init_grant.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init_grant.go new file mode 100644 index 0000000000..ee6a663f58 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/init_grant.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitGrantAct TODO +type InitGrantAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InitGrantCommand TODO +func InitGrantCommand() *cobra.Command { + act := InitGrantAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init_grant", + Short: "账号权限初始化", + Example: fmt.Sprintf(`dbactuator es init_grant %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitGrantAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitGrantAct) Init() (err error) { + logger.Info("InitGrantAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitGrantAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitGrantAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "账号权限初始化", + Func: d.Service.InitGrant, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init_grant successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_client.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_client.go new file mode 100644 index 0000000000..2cd1a584ec --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_client.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallEsClientAct TODO +type InstallEsClientAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallEsClientCommand TODO +func InstallEsClientCommand() *cobra.Command { + act := InstallEsClientAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_client", + Short: "部署clientr实例", + Example: fmt.Sprintf(`dbactuator es install_client %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallEsClientAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallEsClientAct) Init() (err error) { + logger.Info("DeployMySQLAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallEsClientAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallEsClientAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署client", + Func: d.Service.InstallClient, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_client successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_cold.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_cold.go new file mode 100644 index 0000000000..d1ceb64ff4 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_cold.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallEsColdAct TODO +type InstallEsColdAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallEsColdCommand TODO +func InstallEsColdCommand() *cobra.Command { + act := InstallEsColdAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_cold", + Short: "部署cold实例", + Example: fmt.Sprintf(`dbactuator es install_cold %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallEsColdAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallEsColdAct) Init() (err error) { + logger.Info("DeployEsColdAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallEsColdAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallEsColdAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署cold", + Func: d.Service.InstallCold, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_cold successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_exporter.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_exporter.go new file mode 100644 index 0000000000..55bcd68ddf --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_exporter.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallExporterAct TODO +type InstallExporterAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallExporterCommand TODO +func InstallExporterCommand() *cobra.Command { + act := InstallExporterAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_exporter", + Short: "部署Exporter", + Example: fmt.Sprintf(`dbactuator es install_exporter %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallExporterAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallExporterAct) Init() (err error) { + logger.Info("InstallExporterAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallExporterAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallExporterAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Exporter", + Func: d.Service.InstallNodeExporter, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_exporter successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_hot.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_hot.go new file mode 100644 index 0000000000..f590abee17 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_hot.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallEsHotAct TODO +type InstallEsHotAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallEsHotCommand TODO +func InstallEsHotCommand() *cobra.Command { + act := InstallEsHotAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_hot", + Short: "部署hot实例", + Example: fmt.Sprintf(`dbactuator es install_hot %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallEsHotAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallEsHotAct) Init() (err error) { + logger.Info("DeployEsHotAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallEsHotAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallEsHotAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Hot", + Func: d.Service.InstallHot, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_Hot successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_kibana.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_kibana.go new file mode 100644 index 0000000000..4596d435c2 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_kibana.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallKibanaAct TODO +type InstallKibanaAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallKibanaCommand TODO +func InstallKibanaCommand() *cobra.Command { + act := InstallKibanaAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_kibana", + Short: "部署kibana", + Example: fmt.Sprintf(`dbactuator es install_kibana %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallKibanaAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallKibanaAct) Init() (err error) { + logger.Info("InstallKibanaAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallKibanaAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallKibanaAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署kibana", + Func: d.Service.InstallKibana, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_kibana successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_master.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_master.go new file mode 100644 index 0000000000..593f7a8177 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_master.go @@ -0,0 +1,111 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallEsMasterAct TODO +type InstallEsMasterAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallEsMasterCommand TODO +func InstallEsMasterCommand() *cobra.Command { + act := InstallEsMasterAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_master", + Short: "部署master实例", + Example: fmt.Sprintf(`dbactuator es install_master %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallEsMasterAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallEsMasterAct) Init() (err error) { + logger.Info("DeployMySQLAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallEsMasterAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallEsMasterAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化", + Func: d.Service.InitEsDirs, + }, + { + FunName: "下载并且解压安装包", + Func: d.Service.DecompressEsPkg, + }, + { + FunName: "部署master", + Func: d.Service.InstallMaster, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_master successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_supervisor.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_supervisor.go new file mode 100644 index 0000000000..5b46450d57 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_supervisor.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallSupervisorAct TODO +type InstallSupervisorAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallSupervisorCommand TODO +func InstallSupervisorCommand() *cobra.Command { + act := InstallSupervisorAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_supervisor", + Short: "部署supervisor", + Example: fmt.Sprintf(`dbactuator es install_supervisor %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallSupervisorAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallSupervisorAct) Init() (err error) { + logger.Info("InstallSupervisorAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallSupervisorAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallSupervisorAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Supervisor", + Func: d.Service.InstallSupervisor, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_supervisor successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_telegraf.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_telegraf.go new file mode 100644 index 0000000000..6ace725bab --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/install_telegraf.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallTelegrafAct TODO +type InstallTelegrafAct struct { + *subcmd.BaseOptions + Service elasticsearch.InstallEsComp +} + +// InstallTelegrafCommand TODO +func InstallTelegrafCommand() *cobra.Command { + act := InstallTelegrafAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_telegraf", + Short: "部署Telegraf", + Example: fmt.Sprintf(`dbactuator es install_telegraf %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallTelegrafAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallTelegrafAct) Init() (err error) { + logger.Info("InstallTelegrafAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallTelegrafAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallTelegrafAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Telegraf", + Func: d.Service.InstallTelegraf, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_telegraf successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/replace_master.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/replace_master.go new file mode 100644 index 0000000000..1caf9d9989 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/replace_master.go @@ -0,0 +1,103 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ReplaceMasterAct TODO +type ReplaceMasterAct struct { + *subcmd.BaseOptions + Service elasticsearch.ReplaceEsNodeComp +} + +// ReplaceMasterCommand TODO +func ReplaceMasterCommand() *cobra.Command { + act := ReplaceMasterAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "replace_master", + Short: "修改master指向", + Example: fmt.Sprintf(`dbactuator es replace_master %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ReplaceMasterAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ReplaceMasterAct) Init() (err error) { + logger.Info("ReplaceMasterAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ReplaceMasterAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ReplaceMasterAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "替换master", + Func: d.Service.ReplaceMasterNode, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("replace_master successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/restart_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/restart_process.go new file mode 100644 index 0000000000..175f499248 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/restart_process.go @@ -0,0 +1,97 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// RestartProcessAct TODO +type RestartProcessAct struct { + *subcmd.BaseOptions + Service elasticsearch.StartStopProcessComp +} + +// RestartProcessCommand TODO +func RestartProcessCommand() *cobra.Command { + act := RestartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "restart_process", + Short: "重启es进程", + Example: fmt.Sprintf(`dbactuator es restart_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *RestartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *RestartProcessAct) Init() (err error) { + logger.Info("RestartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *RestartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *RestartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "重启es进程", + Func: d.Service.RestartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("restart_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/start_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/start_process.go new file mode 100644 index 0000000000..9604d5c04c --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/start_process.go @@ -0,0 +1,97 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StartProcessAct TODO +type StartProcessAct struct { + *subcmd.BaseOptions + Service elasticsearch.StartStopProcessComp +} + +// StartProcessCommand TODO +func StartProcessCommand() *cobra.Command { + act := StartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "start_process", + Short: "启动es进程", + Example: fmt.Sprintf(`dbactuator es start_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StartProcessAct) Init() (err error) { + logger.Info("StartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "启动es进程", + Func: d.Service.StartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("start_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/stop_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/stop_process.go new file mode 100644 index 0000000000..01f20667f7 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/escmd/stop_process.go @@ -0,0 +1,97 @@ +package escmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StopProcessAct TODO +type StopProcessAct struct { + *subcmd.BaseOptions + Service elasticsearch.StartStopProcessComp +} + +// StopProcessCommand TODO +func StopProcessCommand() *cobra.Command { + act := StopProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "stop_process", + Short: "停止es进程", + Example: fmt.Sprintf(`dbactuator es stop_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StopProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StopProcessAct) Init() (err error) { + logger.Info("StopProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StopProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StopProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "停止es进程", + Func: d.Service.StopProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("stop_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_active.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_active.go new file mode 100644 index 0000000000..444e2032e0 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_active.go @@ -0,0 +1,100 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckActiveAct TODO +type CheckActiveAct struct { + *subcmd.BaseOptions + Service hdfs.CheckActiveService +} + +// CheckActiveCommand TODO +func CheckActiveCommand() *cobra.Command { + act := CheckActiveAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check-active", + Short: "检查主备节点", + Example: fmt.Sprintf(`dbactuator hdfs check-active %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckActiveAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckActiveAct) Init() (err error) { + logger.Info("CheckActiveAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckActiveAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckActiveAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "检查主备节点操作", + Func: d.Service.CheckActive, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("Check Active successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_decommission.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_decommission.go new file mode 100644 index 0000000000..e1d2c9905a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/check_decommission.go @@ -0,0 +1,100 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckDecommissionAct TODO +type CheckDecommissionAct struct { + *subcmd.BaseOptions + Service hdfs.CheckDecommissionService +} + +// CheckDecommissionCommand TODO +func CheckDecommissionCommand() *cobra.Command { + act := CheckDecommissionAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check-decommission", + Short: "检查退役节点", + Example: fmt.Sprintf(`dbactuator hdfs check-decommission %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckDecommissionAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckDecommissionAct) Init() (err error) { + logger.Info("CheckDecommissionAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckDecommissionAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckDecommissionAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "检查DN节点是否退役", + Func: d.Service.CheckDatanodeDecommission, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("Check Decommission successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/cmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/cmd.go new file mode 100644 index 0000000000..5130e5b6b3 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/cmd.go @@ -0,0 +1,51 @@ +package hdfscmd + +import ( + . "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + + "github.com/spf13/cobra" +) + +// HdfsCommand TODO +func HdfsCommand() *cobra.Command { + commands := &cobra.Command{ + Use: "hdfs [hdfs operation]", + Short: "HDFS Operation Command Line Interface", + RunE: ValidateSubCommand(), + } + groups := templates.CommandGroups{ + { + Message: "hdfs operation sets", + Commands: []*cobra.Command{ + InitSystemConfigCommand(), + DecompressPkgCommand(), + RenderHdfsConfigCommand(), + InstallSupervisorCommand(), + InstallZookeeperCommand(), + InstallJournalNodeCommand(), + InstallNn1Command(), + InstallNn2Command(), + InstallZKFCCommand(), + InstallDataNodeCommand(), + InstallTelegrafCommand(), + InstallHaProxyCommand(), + UpdateHostMappingCommand(), + StopProcessCommand(), + DataCleanCommand(), + StartComponentCommand(), + UpdateDfsHostCommand(), + RefreshNodesCommand(), + CheckDecommissionCommand(), + GenerateKeyCommand(), + WriteKeyCommand(), + ScpDirCommand(), + InstanceOperationCommand(), + CheckActiveCommand(), + UpdateZooKeeperConfigCommand(), + }, + }, + } + groups.Add(commands) + return commands +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/data_clean.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/data_clean.go new file mode 100644 index 0000000000..8e675efd04 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/data_clean.go @@ -0,0 +1,103 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// DataCleanAct TODO +type DataCleanAct struct { + *subcmd.BaseOptions + Service hdfs.NodeOperationService +} + +// DataCleanCommand TODO +func DataCleanCommand() *cobra.Command { + act := DataCleanAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "clean-data", + Short: "清理HDFS数据", + Example: fmt.Sprintf(`dbactuator hdfs clean-data %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *DataCleanAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *DataCleanAct) Init() (err error) { + logger.Info("DataCleanAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *DataCleanAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *DataCleanAct) Run() (err error) { + steps := subcmd.Steps{ + + { + FunName: "停止Haproxy", + Func: d.Service.StopHaProxy, + }, + { + FunName: "清理HDFS目录及进程", + Func: d.Service.CleanData, + }, + } + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("stop-process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/decompress_pkg.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/decompress_pkg.go new file mode 100644 index 0000000000..22930c4f91 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/decompress_pkg.go @@ -0,0 +1,105 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// DecompressPkgAct TODO +type DecompressPkgAct struct { + *subcmd.BaseOptions + // Service hdfs.InstallHdfsService + Service hdfs.DecompressPkgService +} + +// DecompressPkgCommand TODO +func DecompressPkgCommand() *cobra.Command { + act := DecompressPkgAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "decompress_pkg", + Short: "解压缩", + Example: fmt.Sprintf(`dbactuator hdfs decompress_pkg %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *DecompressPkgAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *DecompressPkgAct) Init() (err error) { + logger.Info("DecompressPkgAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *DecompressPkgAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *DecompressPkgAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + { + FunName: "解压缩", + Func: d.Service.DecompressPkg, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("decompress_pkg successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/generate_key.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/generate_key.go new file mode 100644 index 0000000000..c5101e4f69 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/generate_key.go @@ -0,0 +1,100 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// GenerateKeyAct TODO +type GenerateKeyAct struct { + *subcmd.BaseOptions + Service hdfs.GenerateKeyService +} + +// GenerateKeyCommand TODO +func GenerateKeyCommand() *cobra.Command { + act := GenerateKeyAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "generate-key", + Short: "生成免密key", + Example: fmt.Sprintf(`dbactuator hdfs generate-key %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *GenerateKeyAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *GenerateKeyAct) Init() (err error) { + logger.Info("GenerateKeyAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *GenerateKeyAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *GenerateKeyAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "生成Key", + Func: d.Service.GenerateKey, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("Generate Key successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/hdfscmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/hdfscmd.go new file mode 100644 index 0000000000..c2671c691b --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/hdfscmd.go @@ -0,0 +1,2 @@ +// Package hdfscmd TODO +package hdfscmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/init_system_config.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/init_system_config.go new file mode 100644 index 0000000000..2963b1e324 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/init_system_config.go @@ -0,0 +1,98 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitSystemConfigAct TODO +type InitSystemConfigAct struct { + *subcmd.BaseOptions + Service hdfs.InitSystemConfigService +} + +// InitSystemConfigCommand TODO +func InitSystemConfigCommand() *cobra.Command { + act := InitSystemConfigAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init", + Short: "hdfs 初始化系统配置", + Example: fmt.Sprintf(`dbactuator hdfs init %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitSystemConfigAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitSystemConfigAct) Init() (err error) { + logger.Info("InitSystemConfigAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InitSystemConfigAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitSystemConfigAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "初始化系统配置", + Func: d.Service.InitSystemConfig, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_datanode.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_datanode.go new file mode 100644 index 0000000000..ad62e11779 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_datanode.go @@ -0,0 +1,104 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallDataNodeAct TODO +type InstallDataNodeAct struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// InstallDataNodeCommand TODO +func InstallDataNodeCommand() *cobra.Command { + act := InstallDataNodeAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-dn", + Short: "hdfs 安装datanode", + Example: fmt.Sprintf(`dbactuator hdfs install_nn1 %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallDataNodeAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallDataNodeAct) Init() (err error) { + logger.Info("InstallDataNodeAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallDataNodeAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallDataNodeAct) Run() (err error) { + steps := subcmd.Steps{ + + // { + // FunName: "预检查", + // Func: d.Service.PreCheck, + // }, + + { + FunName: "安装DataNode", + Func: d.Service.InstallDataNode, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_first_namenode.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_first_namenode.go new file mode 100644 index 0000000000..7732a9453a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_first_namenode.go @@ -0,0 +1,104 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallNn1Act TODO +type InstallNn1Act struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// InstallNn1Command TODO +func InstallNn1Command() *cobra.Command { + act := InstallNn1Act{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-nn1", + Short: "hdfs 安装nn1", + Example: fmt.Sprintf(`dbactuator hdfs install-nn1 %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallNn1Act) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallNn1Act) Init() (err error) { + logger.Info("InstallNn1Act Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallNn1Act) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallNn1Act) Run() (err error) { + steps := subcmd.Steps{ + + // { + // FunName: "预检查", + // Func: d.Service.PreCheck, + // }, + + { + FunName: "安装NN1", + Func: d.Service.InstallNn1, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_haproxy.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_haproxy.go new file mode 100644 index 0000000000..60d9eab195 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_haproxy.go @@ -0,0 +1,111 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallHaProxyAct TODO +type InstallHaProxyAct struct { + *subcmd.BaseOptions + Service hdfs.InstallHaproxyService +} + +// InstallHaProxyCommand TODO +func InstallHaProxyCommand() *cobra.Command { + act := InstallHaProxyAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-haproxy", + Short: "hdfs 安装HAProxy", + Example: fmt.Sprintf(`dbactuator hdfs install-haproxy %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallHaProxyAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallHaProxyAct) Init() (err error) { + logger.Info("InstallHaProxyAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallHaProxyAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallHaProxyAct) Run() (err error) { + steps := subcmd.Steps{ + + // { + // FunName: "预检查", + // Func: d.Service.PreCheck, + // }, + { + FunName: "安装HaProxy", + Func: d.Service.InstallHaProxy, + }, + { + FunName: "渲染HaProxy配置", + Func: d.Service.RenderHaProxyConfig, + }, + { + FunName: "启动HaProxy", + Func: d.Service.StartHaProxy, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_journalnode.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_journalnode.go new file mode 100644 index 0000000000..f0a1565a89 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_journalnode.go @@ -0,0 +1,104 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallJournalNodeAct TODO +type InstallJournalNodeAct struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// InstallJournalNodeCommand TODO +func InstallJournalNodeCommand() *cobra.Command { + act := InstallJournalNodeAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-journalnode", + Short: "hdfs 安装journalnode", + Example: fmt.Sprintf(`dbactuator hdfs zookeeper %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallJournalNodeAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallJournalNodeAct) Init() (err error) { + logger.Info("InstallJournalNodeAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallJournalNodeAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallJournalNodeAct) Run() (err error) { + steps := subcmd.Steps{ + + // { + // FunName: "预检查", + // Func: d.Service.PreCheck, + // }, + + { + FunName: "启动JournalNode", + Func: d.Service.InstallJournalNode, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_second_namenode.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_second_namenode.go new file mode 100644 index 0000000000..6152ea9912 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_second_namenode.go @@ -0,0 +1,104 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallNn2Act TODO +type InstallNn2Act struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// InstallNn2Command TODO +func InstallNn2Command() *cobra.Command { + act := InstallNn2Act{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-nn2", + Short: "hdfs 安装nn2", + Example: fmt.Sprintf(`dbactuator hdfs install-nn2 %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallNn2Act) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallNn2Act) Init() (err error) { + logger.Info("InstallNn2Act Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallNn2Act) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallNn2Act) Run() (err error) { + steps := subcmd.Steps{ + + // { + // FunName: "预检查", + // Func: d.Service.PreCheck, + // }, + + { + FunName: "安装NN2", + Func: d.Service.InstallNn2, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_supervisor.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_supervisor.go new file mode 100644 index 0000000000..83a2e98986 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_supervisor.go @@ -0,0 +1,104 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallSupervisorAct TODO +type InstallSupervisorAct struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// InstallSupervisorCommand TODO +func InstallSupervisorCommand() *cobra.Command { + act := InstallSupervisorAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-supervisor", + Short: "部署supervisor", + Example: fmt.Sprintf(`dbactuator hdfs install-supervisor %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallSupervisorAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallSupervisorAct) Init() (err error) { + logger.Info("DeployEsHotAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallSupervisorAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallSupervisorAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Supervisor", + Func: d.Service.InstallSupervisor, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_supervisor successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_telegraf.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_telegraf.go new file mode 100644 index 0000000000..7d7965e637 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_telegraf.go @@ -0,0 +1,111 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallTelegrafAct TODO +type InstallTelegrafAct struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// InstallTelegrafCommand TODO +func InstallTelegrafCommand() *cobra.Command { + act := InstallTelegrafAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-telegraf", + Short: "hdfs 安装telegraf", + Example: fmt.Sprintf(`dbactuator hdfs zookeeper %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallTelegrafAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallTelegrafAct) Init() (err error) { + logger.Info("InstallTelegrafAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallTelegrafAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallTelegrafAct) Run() (err error) { + steps := subcmd.Steps{ + + // { + // FunName: "预检查", + // Func: d.Service.PreCheck, + // }, + { + FunName: "安装HaProxy", + Func: d.Service.InstallHaProxy, + }, + { + FunName: "渲染HaProxy配置", + Func: d.Service.RenderHaProxyConfig, + }, + { + FunName: "启动HaProxy", + Func: d.Service.StartHaProxy, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zkfc.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zkfc.go new file mode 100644 index 0000000000..a1922a09ea --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zkfc.go @@ -0,0 +1,104 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallZKFCAct TODO +type InstallZKFCAct struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// InstallZKFCCommand TODO +func InstallZKFCCommand() *cobra.Command { + act := InstallZKFCAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-zkfc", + Short: "hdfs 安装ZKFC", + Example: fmt.Sprintf(`dbactuator hdfs install_zkfc %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallZKFCAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallZKFCAct) Init() (err error) { + logger.Info("InstallZKFCAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallZKFCAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallZKFCAct) Run() (err error) { + steps := subcmd.Steps{ + + // { + // FunName: "预检查", + // Func: d.Service.PreCheck, + // }, + + { + FunName: "启动ZKFC", + Func: d.Service.InstallZKFC, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zookeeper.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zookeeper.go new file mode 100644 index 0000000000..979ec3cf93 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/install_zookeeper.go @@ -0,0 +1,103 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallZookeeperAct TODO +type InstallZookeeperAct struct { + *subcmd.BaseOptions + Service hdfs.InstallZookeeperService +} + +// InstallZookeeperCommand TODO +func InstallZookeeperCommand() *cobra.Command { + act := InstallZookeeperAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install-zookeeper", + Short: "hdfs 安装zookeeper", + Example: fmt.Sprintf(`dbactuator hdfs zookeeper %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallZookeeperAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallZookeeperAct) Init() (err error) { + logger.Info("InstallZookeeperAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *InstallZookeeperAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallZookeeperAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "渲染zookeeper 配置", + Func: d.Service.RenderZookeeperConfig, + }, + + { + FunName: "安装ZooKeeper", + Func: d.Service.InstallZookeeper, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/instance_operation.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/instance_operation.go new file mode 100644 index 0000000000..c7e4c49dc2 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/instance_operation.go @@ -0,0 +1,98 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstanceOperationAct TODO +type InstanceOperationAct struct { + *subcmd.BaseOptions + Service hdfs.NodeOperationService +} + +// InstanceOperationCommand TODO +func InstanceOperationCommand() *cobra.Command { + act := InstanceOperationAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "instance-operation", + Short: "启停HDFS集群实例", + Example: fmt.Sprintf(`dbactuator hdfs instance-operation %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstanceOperationAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstanceOperationAct) Init() (err error) { + logger.Info("InstanceOperationAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstanceOperationAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstanceOperationAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "启停HDFS组件", + Func: d.Service.StartComponent, + }, + } + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("instance-operation successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/refresh_nodes.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/refresh_nodes.go new file mode 100644 index 0000000000..917b45d978 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/refresh_nodes.go @@ -0,0 +1,98 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// RefreshNodesAct TODO +type RefreshNodesAct struct { + *subcmd.BaseOptions + Service hdfs.RefreshNodesService +} + +// RefreshNodesCommand TODO +func RefreshNodesCommand() *cobra.Command { + act := RefreshNodesAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "refresh-nodes", + Short: "hdfs 更新节点", + Example: fmt.Sprintf(`dbactuator hdfs refresh-nodes %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *RefreshNodesAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *RefreshNodesAct) Init() (err error) { + logger.Info("RefreshNodesAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *RefreshNodesAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *RefreshNodesAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "更新节点", + Func: d.Service.RefreshNodes, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/render_hdfs_config.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/render_hdfs_config.go new file mode 100644 index 0000000000..236b890dda --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/render_hdfs_config.go @@ -0,0 +1,104 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// RenderHdfsConfigAct TODO +type RenderHdfsConfigAct struct { + *subcmd.BaseOptions + Service hdfs.InstallHdfsService +} + +// RenderHdfsConfigCommand TODO +func RenderHdfsConfigCommand() *cobra.Command { + act := RenderHdfsConfigAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "render-config", + Short: "hdfs 渲染集群配置", + Example: fmt.Sprintf(`dbactuator hdfs render-config %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *RenderHdfsConfigAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *RenderHdfsConfigAct) Init() (err error) { + logger.Info("RenderHdfsConfigAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *RenderHdfsConfigAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *RenderHdfsConfigAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "渲染HDFS配置", + Func: d.Service.RenderHdfsConfig, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/scp_dir.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/scp_dir.go new file mode 100644 index 0000000000..f36a9383e2 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/scp_dir.go @@ -0,0 +1,100 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ScpDirAct TODO +type ScpDirAct struct { + *subcmd.BaseOptions + Service hdfs.ScpDirService +} + +// ScpDirCommand TODO +func ScpDirCommand() *cobra.Command { + act := ScpDirAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "scp-dir", + Short: "传输元数据目录", + Example: fmt.Sprintf(`dbactuator hdfs scp-dir %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ScpDirAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ScpDirAct) Init() (err error) { + logger.Info("ScpDirAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ScpDirAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ScpDirAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "传输元数据目录", + Func: d.Service.ScpDir, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("Scp Dir successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/start_component.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/start_component.go new file mode 100644 index 0000000000..fc60f6df60 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/start_component.go @@ -0,0 +1,98 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StartComponentAct TODO +type StartComponentAct struct { + *subcmd.BaseOptions + Service hdfs.NodeOperationService +} + +// StartComponentCommand TODO +func StartComponentCommand() *cobra.Command { + act := StartComponentAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "start-component", + Short: "启动HDFS集群组件", + Example: fmt.Sprintf(`dbactuator hdfs start-component %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StartComponentAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StartComponentAct) Init() (err error) { + logger.Info("StartComponentAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StartComponentAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StartComponentAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "启动HDFS组件", + Func: d.Service.StartComponent, + }, + } + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("stop-process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/stop_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/stop_process.go new file mode 100644 index 0000000000..c019ebf51d --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/stop_process.go @@ -0,0 +1,98 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StopProcessAct TODO +type StopProcessAct struct { + *subcmd.BaseOptions + Service hdfs.NodeOperationService +} + +// StopProcessCommand TODO +func StopProcessCommand() *cobra.Command { + act := StopProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "stop-process", + Short: "停止所有进程", + Example: fmt.Sprintf(`dbactuator hdfs stop-process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StopProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StopProcessAct) Init() (err error) { + logger.Info("StopProcessAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StopProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StopProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "停止进程", + Func: d.Service.StopAllProcess, + }, + } + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("stop-process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_dfs_host.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_dfs_host.go new file mode 100644 index 0000000000..e1b2c9182a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_dfs_host.go @@ -0,0 +1,100 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// UpdateDfsHostAct TODO +type UpdateDfsHostAct struct { + *subcmd.BaseOptions + Service hdfs.UpdateDfsHostService +} + +// UpdateDfsHostCommand TODO +func UpdateDfsHostCommand() *cobra.Command { + act := UpdateDfsHostAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "dfs-host", + Short: "更新dfs hosts", + Example: fmt.Sprintf(`dbactuator hdfs dfs-host %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *UpdateDfsHostAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *UpdateDfsHostAct) Init() (err error) { + logger.Info("UpdateDfsHostAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *UpdateDfsHostAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *UpdateDfsHostAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "更新dfs hosts", + Func: d.Service.UpdateDfsHost, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("decompress_pkg successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_host_mappng.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_host_mappng.go new file mode 100644 index 0000000000..669357a03a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_host_mappng.go @@ -0,0 +1,97 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// UpdateHostMappingAct TODO +type UpdateHostMappingAct struct { + *subcmd.BaseOptions + Service hdfs.UpdateHostMappingService +} + +// UpdateHostMappingCommand TODO +func UpdateHostMappingCommand() *cobra.Command { + act := UpdateHostMappingAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "update-hosts", + Short: "hdfs 更新主机映射", + Example: fmt.Sprintf(`dbactuator hdfs update-hosts %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *UpdateHostMappingAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *UpdateHostMappingAct) Init() (err error) { + logger.Info("UpdateHostMappingAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return nil +} + +// Rollback TODO +// @receiver d +// +// @return err +func (d *UpdateHostMappingAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *UpdateHostMappingAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "更新主机映射", + Func: d.Service.UpdateHostMapping, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("update host mapping successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_zookeeper_config.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_zookeeper_config.go new file mode 100644 index 0000000000..3c586532d9 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/update_zookeeper_config.go @@ -0,0 +1,100 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// UpdateZooKeeperConfigAct TODO +type UpdateZooKeeperConfigAct struct { + *subcmd.BaseOptions + Service hdfs.UpdateZooKeeperConfigService +} + +// UpdateZooKeeperConfigCommand TODO +func UpdateZooKeeperConfigCommand() *cobra.Command { + act := UpdateZooKeeperConfigAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "update-zk-conf", + Short: "更新ZK配置", + Example: fmt.Sprintf(`dbactuator hdfs update-zk-conf %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *UpdateZooKeeperConfigAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *UpdateZooKeeperConfigAct) Init() (err error) { + logger.Info("UpdateZooKeeperConfigAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *UpdateZooKeeperConfigAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *UpdateZooKeeperConfigAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "更新ZK配置子步骤", + Func: d.Service.UpdateZooKeeperConfig, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("Update ZooKeeper Config successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/write_key.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/write_key.go new file mode 100644 index 0000000000..a89df995e2 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/hdfscmd/write_key.go @@ -0,0 +1,100 @@ +package hdfscmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// WriteKeyAct TODO +type WriteKeyAct struct { + *subcmd.BaseOptions + Service hdfs.WriteKeyService +} + +// WriteKeyCommand TODO +func WriteKeyCommand() *cobra.Command { + act := WriteKeyAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "write-key", + Short: "写入key", + Example: fmt.Sprintf(`dbactuator hdfs write-key %s`, subcmd.CmdBaseExampleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *WriteKeyAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *WriteKeyAct) Init() (err error) { + logger.Info("WriteKeyAct Init") + // 获取db-flow 传进来的extend 参数 + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + // 获取HDFS集群 安装配置,e.g. 安装目录,安装JDK版本,安装haproxy文件等 + d.Service.InstallParams = hdfs.InitDefaultInstallParam() + return nil +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *WriteKeyAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *WriteKeyAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "写入Key", + Func: d.Service.WriteKey, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("Write Key successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/clean_data.go new file mode 100644 index 0000000000..39ecdfe78f --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/clean_data.go @@ -0,0 +1,97 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CleanDataAct TODO +type CleanDataAct struct { + *subcmd.BaseOptions + Service influxdb.CleanDataComp +} + +// CleanDataCommand TODO +func CleanDataCommand() *cobra.Command { + act := CleanDataAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "clean_data", + Short: "清理influxdb目录", + Example: fmt.Sprintf(`dbactuator influxdb clean_data %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CleanDataAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CleanDataAct) Init() (err error) { + logger.Info("CleanDataAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CleanDataAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CleanDataAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "清理数据目录", + Func: d.Service.CleanData, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("clean_data successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/cmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/cmd.go new file mode 100644 index 0000000000..080a6f556e --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/cmd.go @@ -0,0 +1,38 @@ +package influxdbcmd + +// Todo +import ( + . "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + + "github.com/spf13/cobra" +) + +// NewInfluxdbCommand TODO +// Todo +func NewInfluxdbCommand() *cobra.Command { + cmds := &cobra.Command{ + Use: "influxdb [influxdb operation]", + Short: "Influxdb Operation Command Line Interface", + RunE: ValidateSubCommand(), + } + groups := templates.CommandGroups{ + { + Message: "influxdb operation sets", + Commands: []*cobra.Command{ + InitCommand(), + DecompressInfluxdbPkgCommand(), + InstallSupervisorCommand(), + InstallInfluxdbCommand(), + InitUserCommand(), + InstallTelegrafCommand(), + CleanDataCommand(), + StartProcessCommand(), + StopProcessCommand(), + RestartProcessCommand(), + }, + }, + } + groups.Add(cmds) + return cmds +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/decompress_influxdb_pkg.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/decompress_influxdb_pkg.go new file mode 100644 index 0000000000..2e50df2555 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/decompress_influxdb_pkg.go @@ -0,0 +1,103 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// DecompressInfluxdbPkgAct TODO +type DecompressInfluxdbPkgAct struct { + *subcmd.BaseOptions + Service influxdb.InstallInfluxdbComp +} + +// DecompressInfluxdbPkgCommand TODO +func DecompressInfluxdbPkgCommand() *cobra.Command { + act := DecompressInfluxdbPkgAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "decompress_pkg", + Short: "解压缩", + Example: fmt.Sprintf(`dbactuator influxdb decompress_pkg %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *DecompressInfluxdbPkgAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *DecompressInfluxdbPkgAct) Init() (err error) { + logger.Info("DecompressInfluxdbPkgAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *DecompressInfluxdbPkgAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *DecompressInfluxdbPkgAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "解压缩", + Func: d.Service.DecompressInfluxdbPkg, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("decompress_pkg successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/influxdbcmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/influxdbcmd.go new file mode 100644 index 0000000000..88bd1dc283 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/influxdbcmd.go @@ -0,0 +1,2 @@ +// Package influxdbcmd TODO +package influxdbcmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init.go new file mode 100644 index 0000000000..f9c66f1c2a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init.go @@ -0,0 +1,103 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitAct TODO +type InitAct struct { + *subcmd.BaseOptions + Service influxdb.InstallInfluxdbComp +} + +// InitCommand TODO +func InitCommand() *cobra.Command { + act := InitAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init", + Short: "influxdb初始化", + Example: fmt.Sprintf(`dbactuator influxdb init %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitAct) Init() (err error) { + logger.Info("InitAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化", + Func: d.Service.InitInfluxdbNode, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init_user.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init_user.go new file mode 100644 index 0000000000..5a272b69d8 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/init_user.go @@ -0,0 +1,103 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitUserAct TODO +type InitUserAct struct { + *subcmd.BaseOptions + Service influxdb.InstallInfluxdbComp +} + +// InitUserCommand TODO +func InitUserCommand() *cobra.Command { + act := InitUserAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init_user", + Short: "influxdb user初始化", + Example: fmt.Sprintf(`dbactuator influxdb init_user %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitUserAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitUserAct) Init() (err error) { + logger.Info("InitUserAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitUserAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitUserAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化influxdb user", + Func: d.Service.InitUser, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init_user successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_influxdb.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_influxdb.go new file mode 100644 index 0000000000..19bccc5466 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_influxdb.go @@ -0,0 +1,103 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallInfluxdbAct TODO +type InstallInfluxdbAct struct { + *subcmd.BaseOptions + Service influxdb.InstallInfluxdbComp +} + +// InstallInfluxdbCommand TODO +func InstallInfluxdbCommand() *cobra.Command { + act := InstallInfluxdbAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_influxdb", + Short: "部署influxdb实例", + Example: fmt.Sprintf(`dbactuator influxdb install_influxdb %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallInfluxdbAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallInfluxdbAct) Init() (err error) { + logger.Info("InstallInfluxdbAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallInfluxdbAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallInfluxdbAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Influxdb", + Func: d.Service.InstallInfluxdb, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_Influxdb successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_supervisor.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_supervisor.go new file mode 100644 index 0000000000..1e762dfa34 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_supervisor.go @@ -0,0 +1,103 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallSupervisorAct TODO +type InstallSupervisorAct struct { + *subcmd.BaseOptions + Service influxdb.InstallInfluxdbComp +} + +// InstallSupervisorCommand TODO +func InstallSupervisorCommand() *cobra.Command { + act := InstallSupervisorAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_supervisor", + Short: "部署supervisor", + Example: fmt.Sprintf(`dbactuator influxdb install_supervisor %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallSupervisorAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallSupervisorAct) Init() (err error) { + logger.Info("InstallSupervisorAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallSupervisorAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallSupervisorAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Supervisor", + Func: d.Service.InstallSupervisor, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_supervisor successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_telegraf.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_telegraf.go new file mode 100644 index 0000000000..b24c14ae9b --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/install_telegraf.go @@ -0,0 +1,103 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallTelegrafAct TODO +type InstallTelegrafAct struct { + *subcmd.BaseOptions + Service influxdb.InstallInfluxdbComp +} + +// InstallTelegrafCommand TODO +func InstallTelegrafCommand() *cobra.Command { + act := InstallTelegrafAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_telegraf", + Short: "部署Telegraf", + Example: fmt.Sprintf(`dbactuator influxdb install_telegraf %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallTelegrafAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallTelegrafAct) Init() (err error) { + logger.Info("InstallTelegrafAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallTelegrafAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallTelegrafAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Telegraf", + Func: d.Service.InstallTelegraf, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_telegraf successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/restart_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/restart_process.go new file mode 100644 index 0000000000..94d527dc2a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/restart_process.go @@ -0,0 +1,97 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// RestartProcessAct TODO +type RestartProcessAct struct { + *subcmd.BaseOptions + Service kafka.StartStopProcessComp +} + +// RestartProcessCommand TODO +func RestartProcessCommand() *cobra.Command { + act := RestartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "restart_process", + Short: "重启kafka进程", + Example: fmt.Sprintf(`dbactuator influxdb restart_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *RestartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *RestartProcessAct) Init() (err error) { + logger.Info("RestartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *RestartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *RestartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "重启influxdb进程", + Func: d.Service.RestartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("restart_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/start_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/start_process.go new file mode 100644 index 0000000000..aea24ca45d --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/start_process.go @@ -0,0 +1,97 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StartProcessAct TODO +type StartProcessAct struct { + *subcmd.BaseOptions + Service influxdb.StartStopProcessComp +} + +// StartProcessCommand TODO +func StartProcessCommand() *cobra.Command { + act := StartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "start_process", + Short: "启动influxdb进程", + Example: fmt.Sprintf(`dbactuator influxdb start_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StartProcessAct) Init() (err error) { + logger.Info("StartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "启动influxdb进程", + Func: d.Service.StartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("start_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/stop_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/stop_process.go new file mode 100644 index 0000000000..7d4fc883be --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/influxdbcmd/stop_process.go @@ -0,0 +1,97 @@ +package influxdbcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StopProcessAct TODO +type StopProcessAct struct { + *subcmd.BaseOptions + Service kafka.StartStopProcessComp +} + +// StopProcessCommand TODO +func StopProcessCommand() *cobra.Command { + act := StopProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "stop_process", + Short: "停止influxdb进程", + Example: fmt.Sprintf(`dbactuator influxdb stop_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StopProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StopProcessAct) Init() (err error) { + logger.Info("StopProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StopProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StopProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "停止influxdb进程", + Func: d.Service.StopProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("stop_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/check_reassignment.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/check_reassignment.go new file mode 100644 index 0000000000..35967c51b8 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/check_reassignment.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckReassignmentAct TODO +type CheckReassignmentAct struct { + *subcmd.BaseOptions + Service kafka.DecomBrokerComp +} + +// CheckReassignmentCommand TODO +func CheckReassignmentCommand() *cobra.Command { + act := CheckReassignmentAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_reassign", + Short: "检查搬迁进度", + Example: fmt.Sprintf(`dbactuator kafka check_reassign %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckReassignmentAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckReassignmentAct) Init() (err error) { + logger.Info("CheckReassignmentAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckReassignmentAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckReassignmentAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "Broker缩容", + Func: d.Service.DoPartitionCheck, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_reassign successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/clean_data.go new file mode 100644 index 0000000000..eea09c5a39 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/clean_data.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CleanDataAct TODO +type CleanDataAct struct { + *subcmd.BaseOptions + Service kafka.CleanDataComp +} + +// CleanDataCommand TODO +func CleanDataCommand() *cobra.Command { + act := CleanDataAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "clean_data", + Short: "清理kafka目录", + Example: fmt.Sprintf(`dbactuator kafka clean_data %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CleanDataAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CleanDataAct) Init() (err error) { + logger.Info("CleanDataAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CleanDataAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CleanDataAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "清理数据目录", + Func: d.Service.CleanData, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("clean_data successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/cmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/cmd.go new file mode 100644 index 0000000000..e05491e318 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/cmd.go @@ -0,0 +1,45 @@ +package kafkacmd + +// Todo +import ( + . "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + + "github.com/spf13/cobra" +) + +// NewKafkaCommand TODO +// Todo +func NewKafkaCommand() *cobra.Command { + cmds := &cobra.Command{ + Use: "kafka [kafka operation]", + Short: "Kafka Operation Command Line Interface", + RunE: ValidateSubCommand(), + } + groups := templates.CommandGroups{ + { + Message: "kafka operation sets", + Commands: []*cobra.Command{ + InitCommand(), + DecompressKafkaPkgCommand(), + InstallSupervisorCommand(), + InstallZookeeperCommand(), + InitKafkaUserCommand(), + InstallBrokerCommand(), + InstallManagerCommand(), + CleanDataCommand(), + StartProcessCommand(), + StopProcessCommand(), + RestartProcessCommand(), + CheckReassignmentCommand(), + ReduceBrokerCommand(), + ReconfigAddCommand(), + ReconfigRemoveCommand(), + RestartBrokerCommand(), + ReplaceBrokerCommand(), + }, + }, + } + groups.Add(cmds) + return cmds +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/decompress_kafka_pkg.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/decompress_kafka_pkg.go new file mode 100644 index 0000000000..0073080b07 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/decompress_kafka_pkg.go @@ -0,0 +1,103 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// DecompressKafkaPkgAct TODO +type DecompressKafkaPkgAct struct { + *subcmd.BaseOptions + Service kafka.InstallKafkaComp +} + +// DecompressKafkaPkgCommand TODO +func DecompressKafkaPkgCommand() *cobra.Command { + act := DecompressKafkaPkgAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "decompress_pkg", + Short: "解压缩", + Example: fmt.Sprintf(`dbactuator kafka decompress_pkg %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *DecompressKafkaPkgAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *DecompressKafkaPkgAct) Init() (err error) { + logger.Info("DecompressKafkaPkgAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *DecompressKafkaPkgAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *DecompressKafkaPkgAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "解压缩", + Func: d.Service.DecompressKafkaPkg, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("decompress_pkg successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init.go new file mode 100644 index 0000000000..01171bde78 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init.go @@ -0,0 +1,103 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitAct TODO +type InitAct struct { + *subcmd.BaseOptions + Service kafka.InstallKafkaComp +} + +// InitCommand TODO +func InitCommand() *cobra.Command { + act := InitAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init", + Short: "kafka初始化", + Example: fmt.Sprintf(`dbactuator kafka init %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitAct) Init() (err error) { + logger.Info("InitAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化", + Func: d.Service.InitKafkaNode, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init_kafkaUser.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init_kafkaUser.go new file mode 100644 index 0000000000..d79ac51c26 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/init_kafkaUser.go @@ -0,0 +1,103 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitKafkaUserAct TODO +type InitKafkaUserAct struct { + *subcmd.BaseOptions + Service kafka.InstallKafkaComp +} + +// InitKafkaUserCommand TODO +func InitKafkaUserCommand() *cobra.Command { + act := InitKafkaUserAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init_kafkaUser", + Short: "kafkaUser初始化", + Example: fmt.Sprintf(`dbactuator kafka init_kafkaUser %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitKafkaUserAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitKafkaUserAct) Init() (err error) { + logger.Info("InitKafkaUserAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitKafkaUserAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitKafkaUserAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化kafkaUser", + Func: d.Service.InitKafkaUser, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_broker.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_broker.go new file mode 100644 index 0000000000..8af99a3dfc --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_broker.go @@ -0,0 +1,103 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallKafkaBrokerAct TODO +type InstallKafkaBrokerAct struct { + *subcmd.BaseOptions + Service kafka.InstallKafkaComp +} + +// InstallBrokerCommand TODO +func InstallBrokerCommand() *cobra.Command { + act := InstallKafkaBrokerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_broker", + Short: "部署broker实例", + Example: fmt.Sprintf(`dbactuator kafka install_broker %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallKafkaBrokerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallKafkaBrokerAct) Init() (err error) { + logger.Info("InstallKafkaBrokerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallKafkaBrokerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallKafkaBrokerAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Broker", + Func: d.Service.InstallBroker, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_Broker successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_manager.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_manager.go new file mode 100644 index 0000000000..989b05a04f --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_manager.go @@ -0,0 +1,103 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallKafkaManagerAct TODO +type InstallKafkaManagerAct struct { + *subcmd.BaseOptions + Service kafka.InstallKafkaComp +} + +// InstallManagerCommand TODO +func InstallManagerCommand() *cobra.Command { + act := InstallKafkaManagerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_manager", + Short: "部署kafka manager", + Example: fmt.Sprintf(`dbactuator kafka install_manager %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallKafkaManagerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallKafkaManagerAct) Init() (err error) { + logger.Info("InstallKafkaManagerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallKafkaManagerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallKafkaManagerAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Manager", + Func: d.Service.InstallManager, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_Broker successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_supervisor.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_supervisor.go new file mode 100644 index 0000000000..3c0853f478 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_supervisor.go @@ -0,0 +1,103 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallSupervisorAct TODO +type InstallSupervisorAct struct { + *subcmd.BaseOptions + Service kafka.InstallKafkaComp +} + +// InstallSupervisorCommand TODO +func InstallSupervisorCommand() *cobra.Command { + act := InstallSupervisorAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_supervisor", + Short: "部署supervisor", + Example: fmt.Sprintf(`dbactuator kafka install_supervisor %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallSupervisorAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallSupervisorAct) Init() (err error) { + logger.Info("InstallSupervisorAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallSupervisorAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallSupervisorAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Supervisor", + Func: d.Service.InstallSupervisor, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_supervisor successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_zookeeper.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_zookeeper.go new file mode 100644 index 0000000000..8ed70d7e98 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/install_zookeeper.go @@ -0,0 +1,103 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallKafkaZookeeperAct TODO +type InstallKafkaZookeeperAct struct { + *subcmd.BaseOptions + Service kafka.InstallKafkaComp +} + +// InstallZookeeperCommand TODO +func InstallZookeeperCommand() *cobra.Command { + act := InstallKafkaZookeeperAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_zookeeper", + Short: "部署zookeeper实例", + Example: fmt.Sprintf(`dbactuator kafka install_zookeeper %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallKafkaZookeeperAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallKafkaZookeeperAct) Init() (err error) { + logger.Info("InstallKafkaZookeeperAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallKafkaZookeeperAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallKafkaZookeeperAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署zookeeper", + Func: d.Service.InstallZookeeper, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_zookeeper successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/kafkacmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/kafkacmd.go new file mode 100644 index 0000000000..deebd8350d --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/kafkacmd.go @@ -0,0 +1,2 @@ +// Package kafkacmd TODO +package kafkacmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_add.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_add.go new file mode 100644 index 0000000000..2ae81e8f81 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_add.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ReconfigAddAct TODO +type ReconfigAddAct struct { + *subcmd.BaseOptions + Service kafka.ReconfigComp +} + +// ReconfigAddCommand TODO +func ReconfigAddCommand() *cobra.Command { + act := ReconfigAddAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "reconfig_add", + Short: "增加zookeeper节点", + Example: fmt.Sprintf(`dbactuator kafka reconfig_add %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ReconfigAddAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ReconfigAddAct) Init() (err error) { + logger.Info("ReconfigAddAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ReconfigAddAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ReconfigAddAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "增加zookeeper节点", + Func: d.Service.ReconfigAdd, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("reconfig_add successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_remove.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_remove.go new file mode 100644 index 0000000000..9b0c38888b --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reconfig_remove.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ReconfigRemoveAct TODO +type ReconfigRemoveAct struct { + *subcmd.BaseOptions + Service kafka.ReconfigComp +} + +// ReconfigRemoveCommand TODO +func ReconfigRemoveCommand() *cobra.Command { + act := ReconfigRemoveAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "reconfig_remove", + Short: "减少zookeeper节点", + Example: fmt.Sprintf(`dbactuator kafka reconfig_remove %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ReconfigRemoveAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ReconfigRemoveAct) Init() (err error) { + logger.Info("ReconfigRemoveAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ReconfigRemoveAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ReconfigRemoveAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "减少zookeeper节点", + Func: d.Service.ReconfigRemove, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("reconfig_remove successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reduce_broker.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reduce_broker.go new file mode 100644 index 0000000000..5c00805581 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/reduce_broker.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ReduceBrokerAct TODO +type ReduceBrokerAct struct { + *subcmd.BaseOptions + Service kafka.DecomBrokerComp +} + +// ReduceBrokerCommand TODO +func ReduceBrokerCommand() *cobra.Command { + act := ReduceBrokerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "reduce_broker", + Short: "broker缩容", + Example: fmt.Sprintf(`dbactuator kafka reduce_broker %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ReduceBrokerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ReduceBrokerAct) Init() (err error) { + logger.Info("ReduceBrokerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ReduceBrokerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ReduceBrokerAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "Broker缩容", + Func: d.Service.DoDecomBrokers, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("reduce_broker successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/replace_broker.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/replace_broker.go new file mode 100644 index 0000000000..e86d7a155d --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/replace_broker.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ReplaceBrokerAct TODO +type ReplaceBrokerAct struct { + *subcmd.BaseOptions + Service kafka.DecomBrokerComp +} + +// ReplaceBrokerCommand TODO +func ReplaceBrokerCommand() *cobra.Command { + act := ReplaceBrokerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "replace_broker", + Short: "broker替换", + Example: fmt.Sprintf(`dbactuator kafka replace_broker %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ReplaceBrokerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ReplaceBrokerAct) Init() (err error) { + logger.Info("ReplaceBrokerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ReplaceBrokerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ReplaceBrokerAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "Broker替换", + Func: d.Service.DoReplaceBrokers, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("replace_broker successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_broker.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_broker.go new file mode 100644 index 0000000000..02ec642f16 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_broker.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// RestartBrokerAct TODO +type RestartBrokerAct struct { + *subcmd.BaseOptions + Service kafka.StartStopProcessComp +} + +// RestartBrokerCommand TODO +func RestartBrokerCommand() *cobra.Command { + act := RestartBrokerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "restart_broker", + Short: "重启broker进程", + Example: fmt.Sprintf(`dbactuator kafka restart_broker %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *RestartBrokerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *RestartBrokerAct) Init() (err error) { + logger.Info("RestartBrokerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *RestartBrokerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *RestartBrokerAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "重启broker进程", + Func: d.Service.RestartBroker, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("restart_broker successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_process.go new file mode 100644 index 0000000000..2857f5de87 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/restart_process.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// RestartProcessAct TODO +type RestartProcessAct struct { + *subcmd.BaseOptions + Service kafka.StartStopProcessComp +} + +// RestartProcessCommand TODO +func RestartProcessCommand() *cobra.Command { + act := RestartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "restart_process", + Short: "重启kafka进程", + Example: fmt.Sprintf(`dbactuator kafka restart_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *RestartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *RestartProcessAct) Init() (err error) { + logger.Info("RestartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *RestartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *RestartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "重启kafka进程", + Func: d.Service.RestartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("restart_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/start_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/start_process.go new file mode 100644 index 0000000000..e8abd40fcf --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/start_process.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StartProcessAct TODO +type StartProcessAct struct { + *subcmd.BaseOptions + Service kafka.StartStopProcessComp +} + +// StartProcessCommand TODO +func StartProcessCommand() *cobra.Command { + act := StartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "start_process", + Short: "启动kafka进程", + Example: fmt.Sprintf(`dbactuator kafka start_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StartProcessAct) Init() (err error) { + logger.Info("StartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "启动kafka进程", + Func: d.Service.StartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("start_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/stop_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/stop_process.go new file mode 100644 index 0000000000..a6b671f057 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/kafkacmd/stop_process.go @@ -0,0 +1,97 @@ +package kafkacmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StopProcessAct TODO +type StopProcessAct struct { + *subcmd.BaseOptions + Service kafka.StartStopProcessComp +} + +// StopProcessCommand TODO +func StopProcessCommand() *cobra.Command { + act := StopProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "stop_process", + Short: "停止kafka进程", + Example: fmt.Sprintf(`dbactuator kafka stop_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StopProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StopProcessAct) Init() (err error) { + logger.Info("StopProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StopProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StopProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "停止kafka进程", + Func: d.Service.StopProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("stop_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/add_hosts.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/add_hosts.go new file mode 100644 index 0000000000..41a375f28f --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/add_hosts.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// AddHostsAct TODO +type AddHostsAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// AddHostsCommand TODO +func AddHostsCommand() *cobra.Command { + act := AddHostsAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "add_hosts", + Short: "增加hosts文件配置", + Example: fmt.Sprintf(`dbactuator pulsar add_hosts %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *AddHostsAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *AddHostsAct) Init() (err error) { + logger.Info("AddHostsAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *AddHostsAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *AddHostsAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "增加hosts文件配置", + Func: d.Service.AddHostsFile, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("add hosts successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_broker_config.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_broker_config.go new file mode 100644 index 0000000000..48bf17c822 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_broker_config.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckBrokerConfigAct TODO +type CheckBrokerConfigAct struct { + *subcmd.BaseOptions + Service pulsar.CheckPulsarShrinkComp +} + +// CheckBrokerConfigCommand TODO +func CheckBrokerConfigCommand() *cobra.Command { + act := CheckBrokerConfigAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_broker_config", + Short: "检查pulsar broker配置", + Example: fmt.Sprintf(`dbactuator pulsar check_broker_config %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckBrokerConfigAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckBrokerConfigAct) Init() (err error) { + logger.Info("CheckBrokerConfigAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckBrokerConfigAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckBrokerConfigAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "pulsar broker配置检查", + Func: d.Service.CheckBrokerConf, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_broker_config successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_ledger_metadata.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_ledger_metadata.go new file mode 100644 index 0000000000..592601b74a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_ledger_metadata.go @@ -0,0 +1,101 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckLedgerMetadataAct 检查ledger的metadata +type CheckLedgerMetadataAct struct { + *subcmd.BaseOptions + Service pulsar.CheckPulsarShrinkComp +} + +// CheckLedgerMetadataCommand 检查ledger metadata的命令 +func CheckLedgerMetadataCommand() *cobra.Command { + // dbactuator pulsar check_ledger_metadata --payload xxxxx + act := CheckLedgerMetadataAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_ledger_metadata", + Short: "检查ledger的元数据", + Example: fmt.Sprintf(`dbactuator pulsar check_ledger_metadata %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate 校验函数 +func (d *CheckLedgerMetadataAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init 初始化函数 +func (d *CheckLedgerMetadataAct) Init() (err error) { + logger.Info("CheckLedgerMetadataAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback 回滚函数 +func (d *CheckLedgerMetadataAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run 运行函数 +func (d *CheckLedgerMetadataAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "pulsar 检查Ledger的元数据", + Func: d.Service.CheckLedgerMetadata, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_ledger_metadata successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_namespace_config.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_namespace_config.go new file mode 100644 index 0000000000..2fe9508f75 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_namespace_config.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckNamespaceConfigAct TODO +type CheckNamespaceConfigAct struct { + *subcmd.BaseOptions + Service pulsar.CheckPulsarShrinkComp +} + +// CheckNamespaceConfigCommand TODO +func CheckNamespaceConfigCommand() *cobra.Command { + act := CheckNamespaceConfigAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_namespace_config", + Short: "检查pulsar namespace配置", + Example: fmt.Sprintf(`dbactuator pulsar check_namespace_config %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckNamespaceConfigAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckNamespaceConfigAct) Init() (err error) { + logger.Info("CheckNamespaceConfigAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckNamespaceConfigAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckNamespaceConfigAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "pulsar namespace配置检查", + Func: d.Service.CheckNamespaceEnsembleSize, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_namespace_config successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_under_replicated.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_under_replicated.go new file mode 100644 index 0000000000..245b54c40a --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/check_under_replicated.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CheckUnderReplicatedAct TODO +type CheckUnderReplicatedAct struct { + *subcmd.BaseOptions + Service pulsar.CheckPulsarShrinkComp +} + +// CheckUnderReplicatedCommand TODO +func CheckUnderReplicatedCommand() *cobra.Command { + act := CheckUnderReplicatedAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "check_under_replicated", + Short: "检查未复制的ledger", + Example: fmt.Sprintf(`dbactuator pulsar check_under_replicated %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CheckUnderReplicatedAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CheckUnderReplicatedAct) Init() (err error) { + logger.Info("CheckUnderReplicatedAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CheckUnderReplicatedAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CheckUnderReplicatedAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "pulsar 检查未复制的Ledger", + Func: d.Service.CheckUnderReplicated, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("check_under_replicated successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/clean_data.go new file mode 100644 index 0000000000..f2b2a58966 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/clean_data.go @@ -0,0 +1,97 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// CleanDataAct TODO +type CleanDataAct struct { + *subcmd.BaseOptions + Service pulsar.CleanDataComp +} + +// CleanDataCommand TODO +func CleanDataCommand() *cobra.Command { + act := CleanDataAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "clean_data", + Short: "清理pulsar目录", + Example: fmt.Sprintf(`dbactuator pulsar clean_data %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *CleanDataAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *CleanDataAct) Init() (err error) { + logger.Info("CleanDataAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *CleanDataAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *CleanDataAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "清理数据目录", + Func: d.Service.CleanData, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("clean_data successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/cmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/cmd.go new file mode 100644 index 0000000000..4fbb55a512 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/cmd.go @@ -0,0 +1,49 @@ +package pulsarcmd + +import ( + . "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + + "github.com/spf13/cobra" +) + +// NewPulsarCommand TODO +func NewPulsarCommand() *cobra.Command { + cmds := &cobra.Command{ + Use: "pulsar [pulsar operation]", + Short: "Pulsar Operation Command Line Interface", + RunE: ValidateSubCommand(), + } + groups := templates.CommandGroups{ + { + Message: "pulsar operation sets", + Commands: []*cobra.Command{ + InitCommand(), + DecompressPulsarPkgCommand(), + InitPulsarClusterCommand(), + InstallPulsarZookeeperCommand(), + InstallPulsarBookkeeperCommand(), + InstallPulsarBrokerCommand(), + InstallSupervisorCommand(), + // StartPulsarBrokerCommand(), + CleanDataCommand(), + CheckNamespaceConfigCommand(), + CheckBrokerConfigCommand(), + CheckUnderReplicatedCommand(), + DecommissionBookieCommand(), + StopProcessCommand(), + StartProcessCommand(), + RestartProcessCommand(), + InstallPulsarManagerCommand(), + AddHostsCommand(), + ModifyHostsCommand(), + InitPulsarManagerCommand(), + SetBookieReadOnlyCommand(), + UnsetBookieReadOnlyCommand(), + CheckLedgerMetadataCommand(), + }, + }, + } + groups.Add(cmds) + return cmds +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decommission_bookie.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decommission_bookie.go new file mode 100644 index 0000000000..68635e1815 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decommission_bookie.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// DecommissionBookieAct TODO +type DecommissionBookieAct struct { + *subcmd.BaseOptions + Service pulsar.CheckPulsarShrinkComp +} + +// DecommissionBookieCommand TODO +func DecommissionBookieCommand() *cobra.Command { + act := DecommissionBookieAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "decommission_bookie", + Short: "缩容bookie", + Example: fmt.Sprintf(`dbactuator pulsar docommission_bookie %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *DecommissionBookieAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *DecommissionBookieAct) Init() (err error) { + logger.Info("DecommissionBookieAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *DecommissionBookieAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *DecommissionBookieAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "pulsar 缩容bookie", + Func: d.Service.DecommissionBookie, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("decommission_bookie successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decompress_pkg.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decompress_pkg.go new file mode 100644 index 0000000000..b32fe9e453 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/decompress_pkg.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// DecompressPulsarPkgAct TODO +type DecompressPulsarPkgAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// DecompressPulsarPkgCommand TODO +func DecompressPulsarPkgCommand() *cobra.Command { + act := DecompressPulsarPkgAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "decompress_pkg", + Short: "解压缩", + Example: fmt.Sprintf(`dbactuator pulsar decompress_pkg %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *DecompressPulsarPkgAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *DecompressPulsarPkgAct) Init() (err error) { + logger.Info("DecompressPulsarPkgAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *DecompressPulsarPkgAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *DecompressPulsarPkgAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "解压缩", + Func: d.Service.DecompressPulsarPkg, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("decompress_pkg successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init.go new file mode 100644 index 0000000000..604656eefa --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitAct TODO +type InitAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InitCommand TODO +func InitCommand() *cobra.Command { + act := InitAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init", + Short: "pulsar初始化", + Example: fmt.Sprintf(`dbactuator pulsar init %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitAct) Init() (err error) { + logger.Info("InitAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化", + Func: d.Service.InitPulsarDirs, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_cluster.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_cluster.go new file mode 100644 index 0000000000..ab516c70e4 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_cluster.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitPulsarClusterAct TODO +type InitPulsarClusterAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InitPulsarClusterCommand TODO +func InitPulsarClusterCommand() *cobra.Command { + act := InitPulsarClusterAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init_cluster", + Short: "pulsar集群初始化", + Example: fmt.Sprintf(`dbactuator pulsar init_cluster %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitPulsarClusterAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitPulsarClusterAct) Init() (err error) { + logger.Info("InitPulsarClusterAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitPulsarClusterAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitPulsarClusterAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化", + Func: d.Service.InitCluster, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init cluster successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_pulsar_manager.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_pulsar_manager.go new file mode 100644 index 0000000000..64f3bb9d4c --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/init_pulsar_manager.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InitPulsarManagerAct TODO +type InitPulsarManagerAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InitPulsarManagerCommand TODO +func InitPulsarManagerCommand() *cobra.Command { + act := InitPulsarManagerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "init_pulsar_manager", + Short: "pulsar manager初始化", + Example: fmt.Sprintf(`dbactuator pulsar init_pulsar_manager %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InitPulsarManagerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InitPulsarManagerAct) Init() (err error) { + logger.Info("InitPulsarManagerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InitPulsarManagerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InitPulsarManagerAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "初始化", + Func: d.Service.InitPulsarManager, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("init pulsar manager successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_bookkeeper.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_bookkeeper.go new file mode 100644 index 0000000000..f158a0b7c0 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_bookkeeper.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallPulsarBookkeeperAct TODO +type InstallPulsarBookkeeperAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InstallPulsarBookkeeperCommand TODO +func InstallPulsarBookkeeperCommand() *cobra.Command { + act := InstallPulsarBookkeeperAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_bookkeeper", + Short: "pulsar安装bookkeeper", + Example: fmt.Sprintf(`dbactuator pulsar instalL_bookkeeper %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallPulsarBookkeeperAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallPulsarBookkeeperAct) Init() (err error) { + logger.Info("InstallPulsarBookkeeperAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallPulsarBookkeeperAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallPulsarBookkeeperAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "Pulsar安装bookkeeper", + Func: d.Service.InstallBookkeeper, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install bookkeeper successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_broker.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_broker.go new file mode 100644 index 0000000000..69c56ab47d --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_broker.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallPulsarBrokerAct TODO +type InstallPulsarBrokerAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InstallPulsarBrokerCommand TODO +func InstallPulsarBrokerCommand() *cobra.Command { + act := InstallPulsarBrokerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_broker", + Short: "安装pulsar broker", + Example: fmt.Sprintf(`dbactuator pulsar install_broker %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallPulsarBrokerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallPulsarBrokerAct) Init() (err error) { + logger.Info("InstallPulsarBrokerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallPulsarBrokerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallPulsarBrokerAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "安装Pulsar Broker", + Func: d.Service.InstallBroker, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install broker successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_pulsar_manager.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_pulsar_manager.go new file mode 100644 index 0000000000..d5d675e165 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_pulsar_manager.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallPulsarManagerAct TODO +type InstallPulsarManagerAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InstallPulsarManagerCommand TODO +func InstallPulsarManagerCommand() *cobra.Command { + act := InstallPulsarManagerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_pulsar_manager", + Short: "安装pulsar manager", + Example: fmt.Sprintf(`dbactuator pulsar install_pulsar_manager %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallPulsarManagerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallPulsarManagerAct) Init() (err error) { + logger.Info("InstallPulsarManagerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallPulsarManagerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallPulsarManagerAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "安装pulsar manager", + Func: d.Service.InstallPulsarManager, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install pulsar manager successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_supervisor.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_supervisor.go new file mode 100644 index 0000000000..7a0d49d793 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_supervisor.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallSupervisorAct TODO +type InstallSupervisorAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InstallSupervisorCommand TODO +func InstallSupervisorCommand() *cobra.Command { + act := InstallSupervisorAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_supervisor", + Short: "部署supervisor", + Example: fmt.Sprintf(`dbactuator pulsar install_supervisor %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallSupervisorAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallSupervisorAct) Init() (err error) { + logger.Info("InstallSupervisorAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallSupervisorAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallSupervisorAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "部署Supervisor", + Func: d.Service.InstallSupervisor, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install_supervisor successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_zookeeper.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_zookeeper.go new file mode 100644 index 0000000000..9e44cb078e --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/install_zookeeper.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// InstallPulsarZookeeperAct TODO +type InstallPulsarZookeeperAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// InstallPulsarZookeeperCommand TODO +func InstallPulsarZookeeperCommand() *cobra.Command { + act := InstallPulsarZookeeperAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "install_zookeeper", + Short: "安装pulsar zookeeper", + Example: fmt.Sprintf(`dbactuator pulsar install_zookeeper %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *InstallPulsarZookeeperAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *InstallPulsarZookeeperAct) Init() (err error) { + logger.Info("InstallPulsarZookeeperAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *InstallPulsarZookeeperAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *InstallPulsarZookeeperAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "安装pulsar zookeeper", + Func: d.Service.InstallZookeeper, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("install zookeeper successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/modify_hosts.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/modify_hosts.go new file mode 100644 index 0000000000..346410a6e6 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/modify_hosts.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// ModifyHostsAct TODO +type ModifyHostsAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// ModifyHostsCommand TODO +func ModifyHostsCommand() *cobra.Command { + act := ModifyHostsAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "modify_hosts", + Short: "修改hosts文件配置", + Example: fmt.Sprintf(`dbactuator pulsar modify_hosts %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *ModifyHostsAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *ModifyHostsAct) Init() (err error) { + logger.Info("ModifyHostsAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *ModifyHostsAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *ModifyHostsAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "修改hosts文件配置", + Func: d.Service.ModifyHostsFile, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("modify hosts successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/pulsarcmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/pulsarcmd.go new file mode 100644 index 0000000000..53ce85a4ce --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/pulsarcmd.go @@ -0,0 +1,2 @@ +// Package pulsarcmd TODO +package pulsarcmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/restart_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/restart_process.go new file mode 100644 index 0000000000..7ce1238966 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/restart_process.go @@ -0,0 +1,97 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// RestartProcessAct TODO +type RestartProcessAct struct { + *subcmd.BaseOptions + Service pulsar.StartStopProcessComp +} + +// RestartProcessCommand TODO +func RestartProcessCommand() *cobra.Command { + act := RestartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "restart_process", + Short: "重启pulsar进程", + Example: fmt.Sprintf(`dbactuator pulsar restart_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *RestartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *RestartProcessAct) Init() (err error) { + logger.Info("RestartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *RestartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *RestartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "重启pulsar进程", + Func: d.Service.RestartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("restart_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/set_bookie_readonly.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/set_bookie_readonly.go new file mode 100644 index 0000000000..e5e6478824 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/set_bookie_readonly.go @@ -0,0 +1,95 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// SetBookieReadOnlyAct 设置bookie只读结构体 +type SetBookieReadOnlyAct struct { + *subcmd.BaseOptions + Service pulsar.CheckPulsarShrinkComp +} + +// SetBookieReadOnlyCommand 设置bookie只读命令 +func SetBookieReadOnlyCommand() *cobra.Command { + // dbactuator pulsar set_bookie_readonly --payload xxxxx + act := SetBookieReadOnlyAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "set_bookie_readonly", + Short: "设置bookie只读状态", + Example: fmt.Sprintf(`dbactuator pulsar set_bookie_readonly %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate 校验函数 +func (d *SetBookieReadOnlyAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init 初始化函数 +func (d *SetBookieReadOnlyAct) Init() (err error) { + logger.Info("UnsetBookieReadOnlyAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback 回滚函数 +func (d *SetBookieReadOnlyAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run 运行函数 +func (d *SetBookieReadOnlyAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "设置bookie只读状态", + Func: d.Service.SetBookieReadonly, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("set_bookie_readonly successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_broker.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_broker.go new file mode 100644 index 0000000000..25cc63c0d5 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_broker.go @@ -0,0 +1,103 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StartPulsarBrokerAct TODO +type StartPulsarBrokerAct struct { + *subcmd.BaseOptions + Service pulsar.InstallPulsarComp +} + +// StartPulsarBrokerCommand TODO +func StartPulsarBrokerCommand() *cobra.Command { + act := StartPulsarBrokerAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "start_broker", + Short: "启动pulsar broker", + Example: fmt.Sprintf(`dbactuator pulsar start_broker %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StartPulsarBrokerAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StartPulsarBrokerAct) Init() (err error) { + logger.Info("StartPulsarBrokerAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.InitDefaultParam() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StartPulsarBrokerAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StartPulsarBrokerAct) Run() (err error) { + steps := subcmd.Steps{ + /* Todo + { + FunName: "预检查", + Func: d.Service.PreCheck, + }, + */ + { + FunName: "启动broker", + Func: d.Service.StartBroker, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("start broker successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_process.go new file mode 100644 index 0000000000..f713f08410 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/start_process.go @@ -0,0 +1,97 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StartProcessAct TODO +type StartProcessAct struct { + *subcmd.BaseOptions + Service pulsar.StartStopProcessComp +} + +// StartProcessCommand TODO +func StartProcessCommand() *cobra.Command { + act := StartProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "start_process", + Short: "启动pulsar进程", + Example: fmt.Sprintf(`dbactuator pulsar start_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StartProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StartProcessAct) Init() (err error) { + logger.Info("StartProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StartProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StartProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "启动pulsar进程", + Func: d.Service.StartProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("start_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/stop_process.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/stop_process.go new file mode 100644 index 0000000000..1daf3d4476 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/stop_process.go @@ -0,0 +1,97 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// StopProcessAct TODO +type StopProcessAct struct { + *subcmd.BaseOptions + Service pulsar.StartStopProcessComp +} + +// StopProcessCommand TODO +func StopProcessCommand() *cobra.Command { + act := StopProcessAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "stop_process", + Short: "停止pulsar进程", + Example: fmt.Sprintf(`dbactuator pulsar stop_process %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *StopProcessAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *StopProcessAct) Init() (err error) { + logger.Info("StopProcessAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback TODO +// +// @receiver d +// @return err +func (d *StopProcessAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run TODO +func (d *StopProcessAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "停止pulsar进程", + Func: d.Service.StopProcess, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("stop_process successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/unset_bookie_readonly.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/unset_bookie_readonly.go new file mode 100644 index 0000000000..850173c399 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/pulsarcmd/unset_bookie_readonly.go @@ -0,0 +1,95 @@ +package pulsarcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar" + "dbm-services/bigdata/db-tools/dbactuator/pkg/rollback" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" +) + +// UnsetBookieReadOnlyAct 取消bookie只读结构体 +type UnsetBookieReadOnlyAct struct { + *subcmd.BaseOptions + Service pulsar.CheckPulsarShrinkComp +} + +// UnsetBookieReadOnlyCommand 取消bookie只读命令 +func UnsetBookieReadOnlyCommand() *cobra.Command { + // dbactuator pulsar unset_bookie_readonly --payload xxxxx + act := UnsetBookieReadOnlyAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "unset_bookie_readonly", + Short: "取消bookie只读状态", + Example: fmt.Sprintf(`dbactuator pulsar unset_bookie_readonly %s`, subcmd.CmdBaseExapmleStr), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + if act.RollBack { + util.CheckErr(act.Rollback()) + return + } + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate 校验函数 +func (d *UnsetBookieReadOnlyAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init 初始化函数 +func (d *UnsetBookieReadOnlyAct) Init() (err error) { + logger.Info("UnsetBookieReadOnlyAct Init") + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return d.Service.Init() +} + +// Rollback 回滚函数 +func (d *UnsetBookieReadOnlyAct) Rollback() (err error) { + var r rollback.RollBackObjects + if err = d.DeserializeAndValidate(&r); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + err = r.RollBack() + if err != nil { + logger.Error("roll back failed %s", err.Error()) + } + return +} + +// Run 运行函数 +func (d *UnsetBookieReadOnlyAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "取消bookie只读状态", + Func: d.Service.UnsetBookieReadonly, + }, + } + + if err := steps.Run(); err != nil { + rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext) + if rerr != nil { + logger.Error("json Marshal %s", err.Error()) + fmt.Printf("Can't RollBack\n") + } + fmt.Printf("%s\n", string(rollbackCtxb)) + return err + } + + logger.Info("unset_bookie_readonly successfully") + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd.go new file mode 100644 index 0000000000..c4ecfbaa3c --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd.go @@ -0,0 +1,294 @@ +// Package subcmd TODO +package subcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/pkg/components" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/validate" + "dbm-services/common/go-pubpkg/logger" + "encoding/base64" + "encoding/json" + "fmt" + "os" + + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +const ( + // CmdBaseExampleStr TODO + CmdBaseExampleStr = "-u {uid} -n {node_id} -p {base64}" + // CmdBaseExapmleStr TODO + CmdBaseExapmleStr = "-u {uid} -n {node_id} -p {base64}" + // PayloadFormatRaw TODO + PayloadFormatRaw = "raw" +) + +// GBaseOptions TODO +var GBaseOptions *BaseOptions + +// GeneralRuntimeParam TODO +var GeneralRuntimeParam *components.GeneralParam + +func init() { + GBaseOptions = &BaseOptions{} + GeneralRuntimeParam = &components.GeneralParam{} +} + +// BaseOptions TODO +/* + 此参数是json字符串的base64编码之后的字符串 +*/ +type BaseOptions struct { + Uid string + RootId string + NodeId string + VersionId string + Payload string + PayloadFormat string + ShowPayload bool + RollBack bool + Helper bool +} + +const ( + // StepStateDefault TODO + StepStateDefault = "default" + // StepStateRunning TODO + StepStateRunning = "running" + // StepStateSucc TODO + StepStateSucc = "success" + // StepStateSkip TODO + StepStateSkip = "skipped" // 用户主动跳过该 step + // StepStateStop TODO + StepStateStop = "stopped" // 用户主动暂停,特殊形式的 failed + // StepStateFail TODO + StepStateFail = "failed" +) + +// StepFunc TODO +type StepFunc struct { + FunName string + Func func() error + State string + FuncRetry func() error + FuncRollback func() error + FuncStop func() error + Retries int +} + +// Steps TODO +type Steps []StepFunc + +// Run TODO +func (s Steps) Run() (err error) { + for idx, step := range s { + logMessage := fmt.Sprintf("step <%d>, ready start run [%s]", idx, step.FunName) + logger.Info(logMessage) + if err = step.Func(); err != nil { + logger.Error("step<%d>: %s失败 , 错误: %s", idx, step.FunName, err) + // @todo + // 顺便输出接下来还有哪些 step 未允许 + return err + } + logger.Info("step <%d>, start run [%s] successfully", idx, step.FunName) + } + return nil +} + +// DeserializeAndValidate TODO +/* + 反序列化payload,并校验参数 + ps: 参数校验 from golang validate v10 +*/ +func (b *BaseOptions) DeserializeAndValidate(s interface{}) (err error) { + var bp []byte + if b.PayloadFormat == PayloadFormatRaw { + bp = []byte(b.Payload) + } else { + bp, err = base64.StdEncoding.DecodeString(b.Payload) + if err != nil { + return err + } + } + logger.Info("payload received: %s", bp) + defer logger.Info("payload parsed: %+v", s) + if err = json.Unmarshal(bp, s); err != nil { + logger.Error("json.Unmarshal failed, %v", s, err) + return + } + if err = validate.GoValidateStruct(s, false, true); err != nil { + logger.Error("validate struct failed, %v", s, err) + return + } + return nil +} + +// Deserialize TODO +/* + { + "general":{} // + "extend":{} // 实际参数 + } + 反序列化payload,并校验参数 + ps: 参数校验 from golang validate v10 +*/ +func (b *BaseOptions) Deserialize(s interface{}) (err error) { + var bp []byte + if b.PayloadFormat == PayloadFormatRaw { + bp = []byte(b.Payload) + } else { + bp, err = base64.StdEncoding.DecodeString(b.Payload) + if err != nil { + return err + } + } + bip := components.BaseInputParam{ + ExtendParam: s, + } + logger.Info("payload received: %s", bp) + defer logger.Info("payload parsed: %+v", bip) + if err = json.Unmarshal(bp, &bip); err != nil { + logger.Error("json.Unmarshal failed, %v", s, err) + err = errors.WithMessage(err, "参数解析错误") + return + } + if err = validate.GoValidateStruct(bip, false, true); err != nil { + logger.Error("validate struct failed, %v", s, err) + err = errors.WithMessage(err, "参数输入错误") + return + } + GeneralRuntimeParam = bip.GeneralParam + return nil +} + +// DeserializeSimple 简单 payload 不需要 {"extend":{body}},直接传入 body +func (b *BaseOptions) DeserializeSimple(s interface{}) (err error) { + var body []byte + if b.PayloadFormat == PayloadFormatRaw { + body = []byte(b.Payload) + } else { + body, err = base64.StdEncoding.DecodeString(b.Payload) + if err != nil { + return err + } + } + logger.Info("payload received: %s", body) + defer logger.Info("payload parsed: %+v", s) + if err = json.Unmarshal(body, &s); err != nil { + logger.Error("json.Unmarshal failed, %v", s, err) + err = errors.WithMessage(err, "参数解析错误") + return + } + if err = validate.GoValidateStruct(s, false, true); err != nil { + logger.Error("validate struct failed, %v", s, err) + err = errors.WithMessage(err, "参数输入错误") + return + } + return nil +} + +// Validate TODO +func (b BaseOptions) Validate() (err error) { + if len(b.Payload) == 0 { + return fmt.Errorf("payload need input") + } + return nil +} + +// OutputCtx TODO +// +// @receiver b +func (b BaseOptions) OutputCtx(ctx string) { + fmt.Printf("%s", ctx) +} + +// SetLogger TODO +func SetLogger(opt *BaseOptions) { + var file *os.File + var err error + var format = true + mode := os.Getenv("MODE") + + switch mode { + case "dev": + file = os.Stdout + format = false + default: + os.MkdirAll("logs/", 0755) + fileName := fmt.Sprintf("logs/actuator_%s_%s.log", opt.Uid, opt.NodeId) + file, err = os.OpenFile(fileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) + if err != nil { + panic(err) + } + } + + extMap := map[string]string{ + "uid": opt.Uid, + "node_id": opt.NodeId, + "root_id": opt.RootId, + "version_id": opt.VersionId, + } + l := logger.New(file, format, logger.InfoLevel, extMap) + logger.ResetDefault(l) + defer logger.Sync() +} + +// ValidateSubCommand TODO +func ValidateSubCommand() func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= 0 { + return fmt.Errorf( + "You must specify the type of Operation Describe. %s\n", + SuggestAPIResources(cmd.Parent().Name()), + ) + } + curName := args[0] + var subCommands []string + for _, c := range cmd.Commands() { + subCommands = append(subCommands, c.Name()) + } + if len(subCommands) <= 0 { + return nil + } + if !util.StringsHas(subCommands, curName) { + return fmt.Errorf("Unknown subcommand %s\n", curName) + } + return nil + } +} + +// PrintSubCommandHelper 返回是否成功打印 helper +// 如果打印,同时运行下 runHelp +func PrintSubCommandHelper(cmd *cobra.Command, opt *BaseOptions) bool { + if opt.Helper { + if cmd.Parent().Name() == "dbactuator" { + fmt.Println("--helper need sub-command to show payload parameter") + os.Exit(1) + } + if cmd.Name() != "" { + subcmdPath := fmt.Sprintf("%s %s", cmd.Parent().Name(), cmd.Name()) + if err := GetPathDefinitionHelper(subcmdPath); err != nil { + fmt.Println(err) + os.Exit(1) + } else { + return true + } + } else { + fmt.Println("--example need sub-command") + } + } + return false +} + +// SuggestAPIResources returns a suggestion to use the "api-resources" command +// to retrieve a supported list of resources +func SuggestAPIResources(parent string) string { + return templates.LongDesc( + fmt.Sprintf( + "Use \"%s {Operation Type}\" for a complete list of supported resources.", + parent, + ), + ) +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_helper.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_helper.go new file mode 100644 index 0000000000..f9d5f56156 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_helper.go @@ -0,0 +1,334 @@ +package subcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/docs" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + "log" + "strings" +) + +const ( + // DTString TODO + DTString = "string" + // DTInteger TODO + DTInteger = "integer" + // DTNumber TODO + DTNumber = "number" + // DTObject TODO + DTObject = "object" + // DTArray TODO + DTArray = "array" + // DTArrayObject TODO + DTArrayObject = "array object" + // DTBOOLEAN TODO + DTBOOLEAN = "boolean" + // DTUndefined TODO + DTUndefined = "undefined ref" + // RefMaxDepth TODO + RefMaxDepth = 9 +) + +const ( + // DefinitionPrefix TODO + DefinitionPrefix = "#/definitions/" + // RefKey TODO + RefKey = "$ref" + // IndentStep TODO + IndentStep = " " + // DefinitionKey TODO + DefinitionKey = "post" +) + +// PostPath TODO +type PostPath map[string]*Path // "post": {} +// Path TODO +type Path struct { + Description string `json:"description"` + Summary string `json:"summary"` + Parameters []Param `json:"parameters"` // parameters[0].schema.$ref + Responses map[string]Param `json:"responses"` +} + +// PrintDescription TODO +func (p *Path) PrintDescription() { + fmt.Printf("# Summary: %s\n", p.Summary) + if p.Description != "" { + fmt.Printf("# Description: %s\n", p.Description) + } +} + +// Param TODO +type Param struct { + Schema RefMap `json:"schema"` // {"$ref":""} + Name string `json:"name"` + Description string `json:"description"` +} + +// RefMap TODO +type RefMap map[string]string // "$ref":"#/definitions/xx" + +// RefMapObj TODO +type RefMapObj struct { + Ref string `json:"$ref"` +} + +// Parameter TODO +type Parameter struct { + Type string `json:"type"` + // Properties components.BaseInputParam `json:"properties"` + GeneralParam components.GeneralParam `json:"generalParam"` // generalParam.$ref + Params Definition `json:"params"` // params.$ref +} + +// Definition TODO +type Definition struct { + Type string `json:"type"` + Required []string `json:"required"` + Properties map[string]*Property `json:"properties"` + description string + depth int // 禁止无限套娃 + name string + expanded bool +} + +// PrintProperties TODO +func (d *Definition) PrintProperties(indent string, header string) { + if indent == "" { + fmt.Printf("%s: %s\n", header, d.description) + } + indent = IndentStep + indent + for _, prop := range d.Properties { + prop.Print(indent) + } +} + +// NestedRef TODO +type NestedRef struct { + Type string `json:"type"` + RefMapObj + Items *NestedRef `json:"items"` +} + +// Property TODO +type Property struct { + Type string `json:"type"` + Description string `json:"description"` + Example interface{} `json:"example"` + Default interface{} `json:"default"` + Enum []interface{} `json:"enum"` + AdditionalProperties *NestedRef `json:"additionalProperties"` // additionalProperties.$ref + Ref string `json:"$ref"` // $ref, RefKey + Items *NestedRef `json:"items"` // array: items.$ref + + additionalProperties map[string]*Definition + ref *Definition + required bool + name string + depth int // 禁止无限套娃 +} + +func wrapperBoolean(flag bool) string { + if flag { + return " Required," + } else { + return " " // Optional + } +} + +func wrapperType(t string) string { + if t == DTObject { + return "dict" + } else if t == DTNumber { + return "float" + } + return t +} + +func wrapperEnum(v []interface{}) string { + var enumStr = "" + if v != nil && len(v) > 0 { + enumStr = fmt.Sprintf(` Enum oneof%v,`, v) + } + return enumStr +} + +// Print TODO +func (p *Property) Print(indent string) { + leftMaxPad := "20" + left := fmt.Sprintf("%s%s:", indent, p.name) + + leftWithPad := fmt.Sprintf("%-"+leftMaxPad+"s", left) + ss := fmt.Sprintf("%s\t%s,%s%s %s", + leftWithPad, p.Type, wrapperBoolean(p.required), wrapperEnum(p.Enum), p.Description) + if p.Example != nil { + ss += fmt.Sprintf(". 例: %v", p.Example) + } + if p.Default != nil { + ss += fmt.Sprintf(", 默认值: %v", p.Default) + } + if p.ref != nil { + fmt.Println(ss) + p.ref.PrintProperties(indent, p.ref.description) + } else { + fmt.Println(ss) + } +} + +// Definitions TODO +type Definitions map[string]*Definition + +// JsonSpec TODO +type JsonSpec struct { + Paths map[string]PostPath `json:"paths"` + Definitions Definitions `json:"definitions"` +} + +// GetOneDefinition TODO +func (ds *Definitions) GetOneDefinition(name string) *Definition { + name = strings.TrimPrefix(name, DefinitionPrefix) + if obj, ok := (*ds)[name]; ok { + return obj + } else { + // 未定义的 definition name + } + return nil +} + +// expandProperties 将 ref definition 展开 +func (ds *Definitions) expandProperties() { + for defName, d := range *ds { + d.name = defName + if !d.expanded { // 因为展开时,一直在操作同一个引用,不要重复展开 + d.ExpandProperties(ds) + } + } +} + +// ExpandProperties 展开 definition 的 property +// 因为 property 可能引用其它 definition +func (d *Definition) ExpandProperties(defs *Definitions) { + d.expanded = true + if d.Type != DTObject { + logger.Info("helper definition is no object %v", d) + return + } + for pname, prop := range d.Properties { + prop.depth = d.depth + prop.name = pname + if util.StringsHas(d.Required, pname) { + prop.required = true + } + + refObjName := prop.getRef() + if refObjName != "" { + prop.ref = defs.GetOneDefinition(refObjName) + if prop.ref == nil { + prop.Type = DTUndefined // 未知 definition, 置空 + prop.Ref = "" + continue + } + prop.ref.depth = prop.depth + 1 + d.depth = prop.ref.depth + if d.depth > RefMaxDepth { + fmt.Printf("ref max depth exceed, definition name:%v, depth:%d, depth def:%v\n", + d.name, d.depth, prop.ref) + continue + } + prop.ref.ExpandProperties(defs) // 递归 + prop.ref.description = prop.Description + if prop.Type == "" { + prop.Type = DTObject + } + } + } +} + +// getRef 判断该 property 是否有下级嵌套 +// 如果有则存到 ref 中,且修改 Type 加上 嵌套类型 +func (p *Property) getRef() string { + if p.Ref != "" { + p.Type += " " + DTObject + return p.Ref + } else if p.AdditionalProperties != nil { + p.Type += ":map[string]" + " " + p.AdditionalProperties.Type // DTString + return p.getItemsNestedRef(p.AdditionalProperties) + } else if p.Items != nil { + p.Type += " " + p.Items.Type + return p.getItemsNestedRef(p.Items) + } + return "" +} + +func (p *Property) getItemsNestedRef(subRef *NestedRef) string { + if ref := subRef.RefMapObj.Ref; ref != "" { + p.Ref = ref + p.Type += " " + DTObject // DTArrayObject + return ref + } else if subRef.Items != nil { + if ref = subRef.Items.RefMapObj.Ref; ref != "" { + p.Ref = ref + p.Type += " " + DTObject // DTArrayObject + return ref + } + p.Type += " " + subRef.Items.Type + } + return "" +} + +// GetPathDefinitionHelper 结束命令字符串,打印描述 +// mysql mycnf-change +// /mysql/mycnf-change +func GetPathDefinitionHelper(subcmd string) error { + defer func() { + if r := recover(); r != nil { + // logger.Error("get helper failed %s: %s", subcmd, r, string(debug.Stack())) + } + }() + if strings.Contains(subcmd, " ") { + tmp := strings.Split(strings.TrimSpace(subcmd), " ") + subcmd = "/" + strings.Join(tmp, "/") + } + f := docs.SwaggerDocs + doc := "swagger.json" + b, err := f.ReadFile(doc) + if err != nil { + return err + } + jsonSpec := JsonSpec{} + if err := json.Unmarshal(b, &jsonSpec); err != nil { + fmt.Println(err) + log.Fatalln("docs/swagger.json 解析失败") + } + if pathObj, ok := jsonSpec.Paths[subcmd]; !ok { + fmt.Printf("未找到参数定义 %s\n", subcmd) + } else { + if params, ok := pathObj[DefinitionKey]; !ok { + fmt.Printf("未找到参数定义post %s\n", subcmd) + } else if len(params.Parameters) == 0 { + fmt.Printf("未找到参数定义param %s\n", subcmd) + } + } + // jsonSpec.Definitions.ExpandProperties() + pathDefinition := jsonSpec.Paths[subcmd][DefinitionKey] + pathDefinition.PrintDescription() + // parameters + reqSchema := pathDefinition.Parameters[0].Schema + schemaName := strings.TrimPrefix(reqSchema[RefKey], DefinitionPrefix) + thisDef := jsonSpec.Definitions[schemaName] + thisDef.ExpandProperties(&jsonSpec.Definitions) + thisDef.PrintProperties("", "\n# Param") + + // responses + for code, resp := range pathDefinition.Responses { + respSchema := resp.Schema + schemaName = strings.TrimPrefix(respSchema[RefKey], DefinitionPrefix) + thisDef = jsonSpec.Definitions[schemaName] + thisDef.ExpandProperties(&jsonSpec.Definitions) // 如果 param 对象里面包含了 resp 的对象,这里可能重复展开。暂不处理 + thisDef.PrintProperties("", "\n# Response "+code) + } + return nil +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_util.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_util.go new file mode 100644 index 0000000000..379b69605c --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/subcmd_util.go @@ -0,0 +1,12 @@ +package subcmd + +import "encoding/json" + +// ToPrettyJson TODO +func ToPrettyJson(v interface{}) string { + if data, err := json.MarshalIndent(v, "", " "); err == nil { + // ss := "\n# use --helper to show explanations. example for payload:\n --payload-format raw --payload '%s'" + return string(data) + } + return "未找到合法的 example " +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go new file mode 100644 index 0000000000..923ccb7ca9 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go @@ -0,0 +1,68 @@ +package sysinitcmd + +import ( + "dbm-services/bigdata/db-tools/dbactuator/internal/subcmd" + "dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "fmt" + + "github.com/spf13/cobra" +) + +// SysInitAct TODO +type SysInitAct struct { + *subcmd.BaseOptions + Service sysinit.SysInitParam +} + +// NewSysInitCommand TODO +func NewSysInitCommand() *cobra.Command { + act := SysInitAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "sysinit", + Short: "Exec sysinit_mysql.sh,Init mysql default os user,password", + Example: `dbactuator sysinit -p eyJ1c2VyIjoiIiwicHdkIjoiIn0=`, + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Init TODO +func (d *SysInitAct) Init() (err error) { + if err = d.DeserializeAndValidate(&d.Service); err != nil { + logger.Error("DeserializeAndValidate err %s", err.Error()) + return err + } + return +} + +// Run TODO +func (s *SysInitAct) Run() (err error) { + steps := []subcmd.StepFunc{ + { + FunName: "执行sysInit脚本", + Func: s.Service.SysInitMachine, + }, + { + FunName: fmt.Sprintf("重置%sOS密码", s.Service.OsMysqlUser), + Func: s.Service.SetOsPassWordForMysql, + }, + } + logger.Info("start sysinit ...") + for idx, f := range steps { + if err = f.Func(); err != nil { + logger.Error("step <%d>, run [%s] occur %v", idx, f.FunName, err) + return err + } + logger.Info("step <%d>, run [%s] successfully", idx, f.FunName) + } + logger.Info("sysinit successfully") + return +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go new file mode 100644 index 0000000000..19eb9248ea --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go @@ -0,0 +1,2 @@ +// Package sysinitcmd TODO +package sysinitcmd diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup.go new file mode 100644 index 0000000000..9366278415 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup.go @@ -0,0 +1,23 @@ +package backup_download + +// DownloadFile TODO +type DownloadFile interface { + Init() error + PreCheck() error + Start() error + Pause() error + Stop() error + Resume() error + Rollback() error + GetStatus() error + GetAction() error +} + +// DFBase TODO +type DFBase struct { + BKBizID int `json:"bk_biz_id"` + // 单文件下载限速,单位 MB/s + BWLimitMB int64 `json:"bwlimit_mb"` + // 并发下载数 + Concurrency int `json:"max_concurrency"` // @todo 同时下载最大并发 +} diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup_download.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup_download.go new file mode 100644 index 0000000000..db87845d71 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/backup_download.go @@ -0,0 +1,2 @@ +// Package backup_download TODO +package backup_download diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/cos.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/cos.go new file mode 100644 index 0000000000..3c52241046 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/cos.go @@ -0,0 +1 @@ +package backup_download diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/gse.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/gse.go new file mode 100644 index 0000000000..3c52241046 --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/gse.go @@ -0,0 +1 @@ +package backup_download diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/http.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/http.go new file mode 100644 index 0000000000..c2a754839d --- /dev/null +++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/http.go @@ -0,0 +1,172 @@ +package backup_download + +import ( + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient" + "dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil" + "dbm-services/common/go-pubpkg/logger" + "fmt" + "strings" + "time" +) + +// DFHttpComp 允许在目标机器上 +type DFHttpComp struct { + Params DFHttpParam `json:"extend"` + progress *progress +} + +// DFHttpParam TODO +type DFHttpParam struct { + DFBase + HttpGet +} + +// HttpGet TODO +type HttpGet struct { + // 下载 url + Server string `json:"server" validate:"required,url"` + // 下载哪些文件 + FileList []string `json:"file_list" validate:"required"` + // 文件存放到本机哪个目录 + PathTgt string `json:"path_tgt" validate:"required"` + // http url basic auth user + AuthUser string `json:"auth_user"` + // http url basic auth pass + AuthPass string `json:"auth_pass"` + // curl 命令路径,默认留空. 目前只用于测试 url + CurlPath string `json:"curl_path"` + CurlOptions []string `json:"curl_options"` + + curlCmd string +} + +// Example TODO +func (d *DFHttpComp) Example() interface{} { + comp := DFHttpComp{ + Params: DFHttpParam{ + DFBase: DFBase{ + BWLimitMB: 30, + Concurrency: 1, + }, + HttpGet: HttpGet{ + Server: "http://server1:8082/datadbbak8082/", + PathTgt: "/data/dbbak", + FileList: []string{"xx.info", "xx"}, + AuthUser: "xx", + AuthPass: "yy", + }, + }, + } + return comp +} + +// Init TODO +func (d *DFHttpComp) Init() error { + if d.Params.CurlPath == "" { + d.Params.CurlPath = "curl" + } + if d.Params.BWLimitMB == 0 { + d.Params.BWLimitMB = 20 + } + if !util.StringsHas(d.Params.CurlOptions, "--limit-rate") { + // d.Params.CurlOptions = append(d.Params.CurlOptions, fmt.Sprintf(" --limit-rate %dm", d.Params.BWLimitMB)) + d.Params.CurlOptions = append(d.Params.CurlOptions, + "--limit-rate", fmt.Sprintf("%dm", d.Params.BWLimitMB)) + } + if !util.StringsHas(d.Params.CurlOptions, " -s ") { + d.Params.CurlOptions = append(d.Params.CurlOptions, "-s") + } + // -XGET + if d.Params.AuthUser != "" { + d.Params.CurlOptions = append(d.Params.CurlOptions, + fmt.Sprintf(`-u "%s:%s"`, d.Params.AuthUser, d.Params.AuthPass)) + /* + authPassBase64 := base64.StdEncoding.EncodeToString([]byte(d.Params.AuthPass)) + d.Params.CurlOptions = append(d.Params.CurlOptions, + "-H", fmt.Sprintf(`"Authorization: Basic %s"`, authPassBase64)) + */ + } + d.Params.curlCmd = fmt.Sprintf("%s %s", d.Params.CurlPath, strings.Join(d.Params.CurlOptions, " ")) + return nil +} + +// PreCheck TODO +func (d *DFHttpComp) PreCheck() error { + testCurl := fmt.Sprintf("%s '%s'", d.Params.curlCmd, d.Params.Server) + logger.Info("test command: %s", testCurl) + + if out, err := osutil.ExecShellCommand(false, testCurl); err != nil { + return err + } else { + if !strings.Contains(out, "
") {
+			return fmt.Errorf("no file list returned")
+		}
+	}
+	return nil
+
+}
+
+// PostCheck TODO
+func (d *DFHttpComp) PostCheck() error {
+	return nil
+}
+
+// Start TODO
+func (d *DFHttpComp) Start() error {
+	if d.progress == nil {
+		d.progress = &progress{
+			Success: []string{},
+			Failed:  []string{},
+			Todo:    []string{},
+			Doing:   []string{},
+		}
+	}
+
+	fileList := d.Params.FileList
+	p := d.Params
+	for _, f := range fileList {
+		if util.HasElem(f, d.progress.Success) {
+			continue
+		}
+		/*
+			shellDownload := fmt.Sprintf("%s '%s%s' -o '%s/%s'",
+				p.curlCmd, p.Server, f, p.PathTgt, f)
+			logger.Info("download command: %s", shellDownload)
+			out, err := osutil.ExecShellCommand(false, shellDownload)
+			// 拼接的 curl 命令,可能被攻击。比如 bash -c "curl --limit-rate 20m -s -u \"xx:yy\" http://server1:8082/datadbbak8082/ls  -o /data1/dbbak/ls ;cd .. ; ls"
+		*/
+
+		err := httpclient.Download(p.Server, p.PathTgt, f, p.AuthUser, p.AuthPass, p.BWLimitMB)
+		if err != nil {
+			logger.Error("download %s got error %s", f, err.Error())
+			d.progress.Failed = append(d.progress.Failed, f)
+			return err
+		}
+		/*
+			else if strings.TrimSpace(out) != "" {
+				d.progress.Failed = append(d.progress.Failed, f)
+				return fmt.Errorf("download %s expect stdout is empty, got %s", f, out)
+			}
+		*/
+		d.progress.Success = append(d.progress.Success, f)
+	}
+	return nil
+}
+
+// WaitDone TODO
+func (d *DFHttpComp) WaitDone() error {
+	totalList := d.Params.FileList
+	for true {
+		if len(d.progress.Success)+len(d.progress.Failed) < len(totalList) && len(totalList) > 0 {
+			time.Sleep(5 * time.Second)
+		} else {
+			break
+		}
+	}
+	logger.Info("files download %+v", d.progress)
+	if len(d.progress.Failed) > 0 {
+		return fmt.Errorf("files download failed %d", len(d.progress.Failed))
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/scp.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/scp.go
new file mode 100644
index 0000000000..ba1e24024a
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download/scp.go
@@ -0,0 +1,193 @@
+package backup_download
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"log"
+	"time"
+)
+
+// DFScpComp 允许在目标机器上
+type DFScpComp struct {
+	Params DFScpParam `json:"extend"`
+
+	scpConfig sftp.Config
+	scpClient sftp.Client
+	progress  *progress
+}
+
+type progress struct {
+	Success []string
+	Failed  []string
+	Todo    []string
+	Doing   []string
+}
+
+// DFScpParam TODO
+type DFScpParam struct {
+	DFBase
+	// 下载源
+	FileSrc FileSrc `json:"file_src" validate:"required"`
+	// 下载目标
+	FileTgt FileTgt `json:"file_tgt" validate:"required"`
+}
+
+// Example TODO
+func (d *DFScpComp) Example() interface{} {
+	comp := DFScpComp{
+		Params: DFScpParam{
+			DFBase: DFBase{
+				BWLimitMB:   30,
+				Concurrency: 1,
+			},
+			FileSrc: FileSrc{
+				Path:     "/data/dbbak",
+				FileList: []string{"xx.info", "xx"},
+				SSHConfig: SSHConfig{
+					SshHost: "source_host",
+					SshPort: "22",
+					SshUser: "mysql",
+					SshPass: "xx",
+				},
+			},
+			FileTgt: FileTgt{
+				Path: "/data/dbbak",
+			},
+		},
+	}
+	return comp
+}
+
+// Init TODO
+func (d *DFScpComp) Init() error {
+	src := d.Params.FileSrc.SSHConfig
+	scpConfig := sftp.Config{
+		Username: src.SshUser,
+		Password: src.SshPass,
+		Server:   fmt.Sprintf("%s:%s", src.SshHost, src.SshPort),
+		Timeout:  time.Second * 10,
+	}
+	if scpClient, err := sftp.New(scpConfig); err != nil {
+		return err
+	} else {
+		scpClient.Close()
+		// d.sshClient = sshClient
+	}
+	d.scpConfig = scpConfig
+
+	if d.Params.BWLimitMB == 0 {
+		d.Params.BWLimitMB = 20 // 20 MB/s by default
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (d *DFScpComp) PreCheck() error {
+	// 创建本地目录
+	return nil
+}
+
+// PostCheck TODO
+func (d *DFScpComp) PostCheck() error {
+	// 文件数、文件md5、文件连续性校验
+	return nil
+}
+
+// Start TODO
+func (d *DFScpComp) Start() error {
+	if d.progress == nil {
+		d.progress = &progress{
+			Success: []string{},
+			Failed:  []string{},
+			Todo:    []string{},
+			Doing:   []string{},
+		}
+	}
+
+	fileList := d.Params.FileSrc.FileList
+	p := d.Params
+	for _, f := range fileList {
+		if util.HasElem(f, d.progress.Success) {
+			continue
+		}
+		err := sftp.Download(d.scpConfig, p.FileSrc.Path, p.FileTgt.Path, f, p.BWLimitMB) // @todo 下载超时2h
+		if err != nil {
+			log.Println(err)
+			d.progress.Failed = append(d.progress.Failed, f)
+			return err
+		}
+		d.progress.Success = append(d.progress.Success, f)
+	}
+
+	return nil
+}
+
+// Pause TODO
+func (d *DFScpComp) Pause() error {
+	return nil
+}
+
+// Stop TODO
+func (d *DFScpComp) Stop() error {
+	return nil
+}
+
+// Resume TODO
+func (d *DFScpComp) Resume() error {
+	return d.Start()
+}
+
+// Rollback TODO
+func (d *DFScpComp) Rollback() error {
+	return nil
+}
+
+// GetStatus TODO
+func (d *DFScpComp) GetStatus() error {
+	return nil
+}
+
+// WaitDone TODO
+func (d *DFScpComp) WaitDone() error {
+	totalList := d.Params.FileSrc.FileList
+	for true {
+		if len(d.progress.Success)+len(d.progress.Failed) < len(totalList) && len(totalList) > 0 {
+			time.Sleep(5 * time.Second)
+		} else {
+			break
+		}
+	}
+	logger.Info("files download %+v", d.progress)
+
+	if len(d.progress.Failed) > 0 {
+		return fmt.Errorf("files download failed %d", len(d.progress.Failed))
+	}
+	return nil
+}
+
+// SSHConfig ssh信息
+type SSHConfig struct {
+	SshHost string `json:"ssh_host" validate:"required"`
+	SshPort string `json:"ssh_port" validate:"required"`
+	SshUser string `json:"ssh_user" validate:"required"`
+	SshPass string `json:"ssh_pass"`
+}
+
+// FileSrc TODO
+type FileSrc struct {
+	// scp 源机器地址
+	SSHConfig
+	// 源文件所在目录
+	Path  string `json:"path" validate:"required"`
+	Match string `json:"match"`
+	// 源文件名列表,相对上面的 path
+	FileList []string `json:"file_list" validate:"required"`
+}
+
+// FileTgt TODO
+type FileTgt struct {
+	// 文件下载目标目录
+	Path string `json:"path" validate:"required"`
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/base.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/base.go
new file mode 100644
index 0000000000..09380d888f
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/base.go
@@ -0,0 +1,17 @@
+package components
+
+// BaseInputParam TODO
+type BaseInputParam struct {
+	GeneralParam *GeneralParam `json:"general"`
+	ExtendParam  interface{}   `json:"extend"`
+}
+
+// GeneralParam TODO
+type GeneralParam struct {
+	RuntimeAccountParam RuntimeAccountParam `json:"runtime_account"`
+	// more Runtime Struct
+}
+
+// RuntimeAccountParam TODO
+type RuntimeAccountParam struct {
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/components.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/components.go
new file mode 100644
index 0000000000..7fb03b9cf0
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/components.go
@@ -0,0 +1,2 @@
+// Package components TODO
+package components
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/computil/computil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/computil/computil.go
new file mode 100644
index 0000000000..527a1153d2
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/computil/computil.go
@@ -0,0 +1,2 @@
+// Package computil TODO
+package computil
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go
new file mode 100644
index 0000000000..ff49c3d01e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go
@@ -0,0 +1,27 @@
+package crontab
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+)
+
+// ClearCrontabParam TODO
+type ClearCrontabParam struct {
+}
+
+/*
+	执行系统初始化脚本 原来的sysinit.sh
+	创建mysql账户等操作
+*/
+
+// CleanCrontab  注释掉Crontab
+//
+//	@receiver u
+//	@return err
+func (u *ClearCrontabParam) CleanCrontab() (err error) {
+	logger.Info("开始清理机器上的crontab")
+	if err = osutil.CleanLocalCrontab(); err != nil {
+		return err
+	}
+	return
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/crontab.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/crontab.go
new file mode 100644
index 0000000000..0061fab3ab
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/crontab/crontab.go
@@ -0,0 +1,2 @@
+// Package crontab TODO
+package crontab
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go
new file mode 100644
index 0000000000..6a418f6e16
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go
@@ -0,0 +1,2 @@
+// Package dbconfig TODO
+package dbconfig
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/query_change.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/query_change.go
new file mode 100644
index 0000000000..041f57e44e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/dbconfig/query_change.go
@@ -0,0 +1 @@
+package dbconfig
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_health.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_health.go
new file mode 100644
index 0000000000..30dd65220e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_health.go
@@ -0,0 +1,70 @@
+package elasticsearch
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"os"
+	"strconv"
+)
+
+// CheckEsHealthComp TODO
+type CheckEsHealthComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *CheckEsHealthParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CheckEsHealthParams TODO
+type CheckEsHealthParams struct {
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CheckEsHealthComp) Init() (err error) {
+	logger.Info("Reduce es node fake init")
+	return nil
+}
+
+// CheckEsHealth TODO
+func (d *CheckEsHealthComp) CheckEsHealth() (err error) {
+	username := os.Getenv("ES_USERNAME")
+	password := os.Getenv("ES_PASSWORD")
+	localIp, err := esutil.GetEsLocalIp()
+	if err != nil {
+		logger.Error("get local ip failed, %s", err)
+		return err
+	}
+	ports, err := esutil.GetEsLocalPorts()
+	if err != nil {
+		logger.Error("get ports failed, %s", err)
+		return err
+	}
+
+	// 检查端口
+	var errors string
+	for _, port := range ports {
+		iPort, _ := strconv.Atoi(port)
+		e := esutil.EsInsObject{
+			Host:     localIp,
+			HttpPort: iPort,
+			UserName: username,
+			Password: password,
+		}
+		err = e.CheckEsHealth()
+		errors += err.Error()
+	}
+	if len(errors) != 0 {
+		logger.Error("节点可能挂掉了, %s", errors)
+		return fmt.Errorf("节点挂了,请检查[%s]", errors)
+	}
+
+	logger.Info("节点检查健康")
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_nodes.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_nodes.go
new file mode 100644
index 0000000000..dc267a1f38
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/check_nodes.go
@@ -0,0 +1,83 @@
+package elasticsearch
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"time"
+)
+
+// CheckEsNodeComp TODO
+type CheckEsNodeComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *CheckEsNodeParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CheckEsNodeParams TODO
+type CheckEsNodeParams struct {
+	HttpPort    int           `json:"http_port" ` // http端口
+	Host        string        `json:"host" validate:"required,ip" `
+	ClusterName string        `json:"cluster_name"` // 集群名
+	Username    string        `json:"username"`
+	Password    string        `json:"password"`
+	Nodes       []esutil.Node `json:"nodes"` //
+}
+
+// Nodes TODO
+type Nodes struct {
+	Ip          string `json:"Ip" validate:"required"`
+	InstanceNum int    `json:"instance_num"  validate:"required"`
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CheckEsNodeComp) Init() (err error) {
+	logger.Info("Reduce es node fake init")
+	return nil
+}
+
+// CheckEsNodes TODO
+/**
+ *  @description: 剔除节点
+ *  @return
+ */
+func (d *CheckEsNodeComp) CheckEsNodes() (err error) {
+	const MaxRetry = 5
+	count := 0
+	// 先等待60s
+	time.Sleep(60 * time.Second)
+
+	e := esutil.EsInsObject{
+		Host:     d.Params.Host,
+		HttpPort: d.Params.HttpPort,
+		UserName: d.Params.Username,
+		Password: d.Params.Password,
+	}
+
+	nodes := d.Params.Nodes
+
+	logger.Info("开始检查扩容的状态")
+	for {
+		count++
+		logger.Info("开始第[%d]次检查", count)
+		ok, err := e.CheckNodes(nodes)
+		if ok {
+			logger.Info("所有节点启动成功, %v", err)
+			break
+		}
+		if count == MaxRetry {
+			logger.Error("检查扩容状态超时, %v", err)
+			return err
+		}
+		time.Sleep(60 * time.Second)
+
+	}
+	logger.Info("检查扩容状态完毕")
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/clean_data.go
new file mode 100644
index 0000000000..c5b740ce9d
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/clean_data.go
@@ -0,0 +1,102 @@
+package elasticsearch
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+)
+
+// CleanDataComp TODO
+type CleanDataComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *CleanDataParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CleanDataParams TODO
+type CleanDataParams struct{}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) Init() (err error) {
+	logger.Info("Clean data fake init")
+	return nil
+}
+
+// CleanData TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) CleanData() (err error) {
+	// 清除crontab
+	logger.Info("获取crontab")
+	out, err := osutil.ListCrontb(cst.DefaultExecUser)
+	if err != nil {
+		logger.Error("获取crontab失败", err)
+		return err
+	}
+	logger.Debug("crontab: ", out)
+	if len(out) > 0 {
+		extraCmd := "crontab -u mysql -r"
+		logger.Info("清除crontab, [%s]", extraCmd)
+		if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("[%s] execute failed, %v", extraCmd, err)
+			return err
+		}
+	}
+
+	// 强杀进程
+	extraCmd :=
+		`ps -ef | egrep 'supervisord|node_exporter|telegraf|x-pack-ml'|grep -v grep |awk {'print "kill -9 " $2'}|sh`
+	logger.Info("强杀进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `rm -f /etc/supervisord.conf /usr/local/bin/supervisorctl /usr/local/bin/supervisord /usr/bin/java`
+	logger.Info("删除软链, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// clean profile
+	extraCmd = `sed -i '/esprofile/d' /etc/profile`
+	logger.Info("clean provile, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	// 删除esenv
+	extraCmd = `rm -rf /data/esenv*`
+	logger.Info("删除esenv, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除数据目录
+	extraCmd = `rm -rf /data*/esdata*`
+	logger.Info("删除esdata, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除日志目录
+	extraCmd = `rm -rf /data*/eslog*`
+	logger.Info("删除eslog, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/exclude_node.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/exclude_node.go
new file mode 100644
index 0000000000..20e901393e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/exclude_node.go
@@ -0,0 +1,127 @@
+package elasticsearch
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"errors"
+	"fmt"
+	"strings"
+)
+
+// ExcludeEsNodeComp TODO
+type ExcludeEsNodeComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *ExcludeEsNodeParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ExcludeEsNodeParams TODO
+type ExcludeEsNodeParams struct {
+	HttpPort     int      `json:"http_port" ` // http端口
+	Host         string   `json:"host" validate:"required,ip" `
+	ClusterName  string   `json:"cluster_name"` // 集群名
+	Username     string   `json:"username"`
+	Password     string   `json:"password"`
+	ExcludeNodes []string `json:"exclude_nodes"` //
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *ExcludeEsNodeComp) Init() (err error) {
+	logger.Info("Reduce es node fake init")
+	return nil
+}
+
+// ExcludeEsNode TODO
+/**
+ *  @description: 剔除节点
+ *  @return
+ */
+func (d *ExcludeEsNodeComp) ExcludeEsNode() (err error) {
+	e := esutil.EsInsObject{
+		Host:     d.Params.Host,
+		HttpPort: d.Params.HttpPort,
+		UserName: d.Params.Username,
+		Password: d.Params.Password,
+	}
+
+	nodes := d.Params.ExcludeNodes
+
+	logger.Info("执行exclude", nodes)
+	if err := e.DoExclude(nodes); err != nil {
+		logger.Error("执行exclude失败", err)
+		return err
+	}
+
+	/* 放在后面做
+	logger.Info("检查节点是否为空")
+	if err := e.CheckEmpty(nodes); err != nil {
+		logger.Error("检查空节点失败", err)
+		return err
+	}
+	*/
+
+	logger.Info("执行exclud成功")
+
+	return nil
+}
+
+// CheckShards TODO
+func (d *ExcludeEsNodeComp) CheckShards() (err error) {
+	e := esutil.EsInsObject{
+		Host:     d.Params.Host,
+		HttpPort: d.Params.HttpPort,
+		UserName: d.Params.Username,
+		Password: d.Params.Password,
+	}
+
+	nodes := d.Params.ExcludeNodes
+	logger.Info("检查节点是否为空")
+	shards, ok, err := e.CheckEmptyOnetime(nodes)
+	if err != nil {
+		logger.Error("检查空节点失败", err)
+		return err
+	}
+
+	if ok {
+		logger.Info("Shard搬迁完毕")
+		err = nil
+	} else {
+		errMsg := fmt.Sprintf("Shard变迁未完成, 剩余%d", shards)
+		logger.Error(errMsg)
+		err = errors.New(errMsg)
+	}
+
+	return err
+}
+
+// CheckConnections TODO
+func (d *ExcludeEsNodeComp) CheckConnections() (err error) {
+
+	host := d.Params.Host
+	httpPort := d.Params.HttpPort
+
+	extraCmd := fmt.Sprintf(`ss -tn|grep ESTAB|grep -w %d|awk '{if($5 !~ "%s" && $5 !~ "127.0.0.1")  {print}}'`, httpPort,
+		host)
+	logger.Info("检查连接数, [%s]", extraCmd)
+	output, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+	}
+	if len(strings.TrimSuffix(output, "\n")) == 0 {
+		logger.Info("活动连接为空")
+		err = nil
+	} else {
+		errMsg := fmt.Sprintf("还有活动连接, %s", output)
+		logger.Error(errMsg)
+		err = errors.New(errMsg)
+	}
+
+	return err
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/install_elasticsearch.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/install_elasticsearch.go
new file mode 100644
index 0000000000..df8db01332
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/install_elasticsearch.go
@@ -0,0 +1,798 @@
+// Package elasticsearch TODO
+package elasticsearch
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/user"
+	"runtime"
+	"strings"
+	"time"
+
+	"github.com/hashicorp/go-version"
+	"gopkg.in/yaml.v2"
+)
+
+// InstallEsComp TODO
+type InstallEsComp struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallEsParams
+	ElasticsearchYaml
+	ElasticsearchConfig
+	KibanaYaml
+	RollBackContext rollback.RollBackObjects
+}
+
+// InstallEsParams TODO
+type InstallEsParams struct {
+	EsConfigs      json.RawMessage `json:"es_configs"`                      // elasticsearch.yml
+	EsVersion      string          `json:"es_version"  validate:"required"` // 版本号eg: 7.10.2
+	HttpPort       int             `json:"http_port" `                      // http端口
+	MasterIp       string          `json:"master_ip"`                       // master ip, eg: ip1,ip2,ip3
+	MasterNodename string          `json:"master_nodename" `                // master ip, eg: ip1,ip2,ip3
+	JvmMem         string          `json:"jvm_mem"`                         //  eg: 10g
+	Host           string          `json:"host" validate:"required,ip" `
+	HotInstances   int             `json:"hot_instances"`   // 热节点实例数
+	ColdInstances  int             `json:"cold_instances" ` // 冷节点实例数
+	Instances      int             `json:"instances"`
+	ClusterName    string          `json:"cluster_name"` // 集群名
+	Role           string          `json:"role"`         //  eg: master, hot, cold, client
+	Username       string          `json:"username" `
+	Password       string          `json:"password" `
+	BkBizId        int             `json:"bk_biz_id"`
+	DbType         string          `json:"db_type"`
+	ServiceType    string          `json:"service_type"`
+}
+
+// InitDirs TODO
+type InitDirs = []string
+
+// Port TODO
+type Port = int
+type socket = string
+
+// ElasticsearchConfig 目录定义等
+type ElasticsearchConfig struct {
+	InstallDir string `json:"install_dir"` // /data
+	EsenvDir   string `json:"esenv_dir"`   //  /data/esenv
+	PkgDir     string `json:"pkg_idr"`     // /data/install/
+	EsDir      string
+}
+
+// ElasticsearchYaml TODO
+// elaticsearch.yml
+type ElasticsearchYaml struct {
+	ClusterName                           string   `yaml:"cluster.name"` // cluster.name
+	NodeName                              string   `yaml:"node.name"`    // node.name
+	NodeAttrTag                           string   `yaml:"node.attr.tag"`
+	NetworkHost                           string   `yaml:"network.host"`         // network.host
+	NetworkPublishhost                    string   `yaml:"network.publish_host"` // network.publish_host
+	Nodedata                              bool     `yaml:"node.data"`            //  node.data
+	NodeIngest                            bool     `yaml:"node.ingest"`          // node.Ingest
+	NodeMaster                            bool     `yaml:"node.master"`          //  node.master
+	NodeMl                                bool     `yaml:"node.ml"`
+	HttpPort                              int      `yaml:"http.port"`                         //  http.port
+	PathData                              string   `yaml:"path.data"`                         // path.data
+	PathLogs                              string   `yaml:"path.logs"`                         // path.logs
+	XpackSecurityEnabled                  bool     `yaml:"xpack.security.enabled"`            // xpack.monitoring.collection.enabled
+	DiscoverySeedHosts                    []string `yaml:"discovery.seed_hosts,flow"`         // discovery.seed_hosts
+	ClusterInitialMasterNodes             []string `yaml:"cluster.initial_master_nodes,flow"` // 	cluster.initial_master_nodes
+	Processors                            int      `yaml:"processors"`                        // rrocessors
+	BootstrapMemoryLock                   bool     `yaml:"bootstrap.memory_lock"`
+	BootstrapSystemCallFilter             bool     `yaml:"bootstrap.system_call_filter"`
+	XpackMonitoringCollectionEnabled      bool     `yaml:"xpack.monitoring.collection.enabled"`
+	ClusterRoutingAllocationSameShardHost bool     `yaml:"cluster.routing.allocation.same_shard.host"`
+}
+
+// KibanaYaml TODO
+// kibana.yml for kibana 7
+type KibanaYaml struct {
+	ServerName                                     string   `yaml:"server.name"`
+	ServerHost                                     string   `yaml:"server.host"`
+	ServerBasePath                                 string   `yaml:"server.basePath"`
+	ServerRewriteBasePath                          bool     `yaml:"server.rewriteBasePath"`
+	ElasticsearchHosts                             string   `yaml:"elasticsearch.hosts"`
+	ElasticsearchSslVerificationMode               string   `yaml:"elasticsearch.ssl.verificationMode"`
+	ElasticsearchUsername                          string   `yaml:"elasticsearch.username"`
+	ElasticsearchPassword                          string   `yaml:"elasticsearch.password"`
+	ElasticsearchRequestHeadersWhitelist           []string `yaml:"elasticsearch.requestHeadersWhitelist,flow"`
+	OpendistroSecurityMultitenancyEnabled          bool     `yaml:"opendistro_security.multitenancy.enabled"`
+	OpendistroSecurityMultitenancyTenantsPreferred []string `yaml:"opendistro_security.multitenancy.tenants.preferred,flow"`
+	OpendistroSecurityReadonlyModeRoles            []string `yaml:"opendistro_security.readonly_mode.roles,flow"`
+	OpendistroSecuritySessionKeepalive             bool     `yaml:"opendistro_security.session.keepalive"`
+	KXpackSecurityEnabled                          bool     `yaml:"xpack.security.enabled"`
+	XpackSpacesEnabled                             bool     `yaml:"xpack.spaces.enabled"`
+}
+
+// RenderConfig 需要替换的配置值 Todo
+type RenderConfig struct {
+	ClusterName          string
+	NodeName             string
+	HttpPort             int
+	CharacterSetServer   string
+	InnodbBufferPoolSize string
+	Logdir               string
+	ServerId             uint64
+}
+
+// InitDefaultParam TODO
+func (i *InstallEsComp) InitDefaultParam() (err error) {
+	logger.Info("start InitDefaultParam")
+	// var mountpoint string
+	i.InstallDir = cst.DefaultInstallDir
+	i.EsenvDir = cst.DefaulEsEnv
+	i.PkgDir = cst.DefaultPkgDir
+	i.EsDir = cst.DefaultEsDir
+	i.PathLogs = cst.DefaulEsLogDir
+	i.HttpPort = cst.DefaultHttpPort
+	i.NodeIngest = cst.IsNodeIngest
+	i.NodeMl = cst.IsNodeMl
+	i.BootstrapMemoryLock = cst.IsBootstrapMemoryLock
+	i.BootstrapSystemCallFilter = cst.IsBootstrapSystemCall
+	i.XpackSecurityEnabled = cst.IsXpackSecurityEnabled
+	i.XpackMonitoringCollectionEnabled = cst.IsXpackMoinitorEnabled
+	return nil
+}
+
+// InitEsDirs TODO
+/*
+创建实例相关的数据,日志目录以及修改权限
+*/
+func (i *InstallEsComp) InitEsDirs() (err error) {
+
+	instances := i.Params.Instances
+
+	username := i.Params.Username
+	password := i.Params.Password
+
+	execUser := cst.DefaultExecUser
+	logger.Info("检查用户[%s]是否存在", execUser)
+	if _, err := user.Lookup(execUser); err != nil {
+		logger.Info("用户[%s]不存在,开始创建", execUser)
+		if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("useradd %s -g root -s /bin/bash -d /home/mysql",
+			execUser)); err != nil {
+			logger.Error("创建系统用户[%s]失败,%s, %v", execUser, output, err.Error())
+			return err
+		}
+		logger.Info("用户[%s]创建成功", execUser)
+	} else {
+		logger.Info("用户[%s]存在, 跳过创建", execUser)
+	}
+
+	// mkdir
+	extraCmd := fmt.Sprintf("mkdir -p %s ;mkdir -p %s ; mkdir -p %s ; mkdir -p %s ; chown -R mysql %s",
+		cst.DefaultInstallDir, cst.DefaulEsEnv, cst.DefaulEsDataDir, cst.DefaulEsLogDir, "/data/es*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("修改系统参数")
+	memlock := []byte(`* soft memlock unlimited
+* hard memlock unlimited
+mysql soft memlock unlimited
+mysql hard memlock unlimited`)
+
+	limitFile := "/etc/security/limits.d/es-nolock.conf"
+	if err = ioutil.WriteFile(limitFile, memlock, 0644); err != nil {
+		logger.Error("write %s failed, %v", limitFile, err)
+	}
+
+	extraCmd =
+		`sed -i -e "/vm.max_map_count/d" -e "/vm.swappiness/d" /etc/sysctl.conf ;echo -e "vm.max_map_count=262144\nvm.swappiness=1" >> /etc/sysctl.conf ;sysctl -p`
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改系统参数失败", err.Error())
+	}
+
+	logger.Info("写入/etc/profile")
+	scripts := []byte(fmt.Sprintf(`cat << 'EOF' > /data/esenv/esprofile
+export JAVA_HOME=/data/esenv/es_1/jdk
+export CLASSPATH=".:$JAVA_HOME/lib:$JRE/lib:$CLASSPATH"
+export ES_HOME=/data/esenv/es_1
+export ES_CONF_DIR=$ES_HOME/config
+export PATH=${JAVA_HOME}/bin:${ES_HOME}/bin:${ES_HOME}/sbin:$PATH
+export ES_USERNAME=%s
+export ES_PASSWORD=%s
+EOF
+
+chown mysql  /data/esenv/esprofile
+
+sed -i '/esprofile/d' /etc/profile
+echo "source /data/esenv/esprofile" >>/etc/profile`, username, password))
+
+	scriptFile := "/data/esenv/init.sh"
+	if err = ioutil.WriteFile(scriptFile, scripts, 0644); err != nil {
+		logger.Info("write %s failed, %v", scriptFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("bash %s", scriptFile)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Info("修改系统参数失败:%s", err.Error())
+	}
+
+	for ins := 1; ins <= instances; ins++ {
+		logDir := fmt.Sprintf("%s%d", cst.DefaulEsLogDir, ins)
+		dataDir := fmt.Sprintf("%s%d", cst.DefaulEsDataDir, ins)
+		extraCmd := fmt.Sprintf(`mkdir -p %s ;
+		chown -R mysql  %s ;
+		mkdir -p %s ;
+		chown -R mysql %s`, logDir, logDir, dataDir, dataDir)
+		if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("初始化实例目录失败:%s", err.Error())
+			return err
+		}
+	}
+
+	return nil
+}
+
+// InstallMaster TODO
+/**
+ * @description: 安装master
+ * @return {*}
+ */
+func (i *InstallEsComp) InstallMaster() (err error) {
+
+	logger.Info("部署master开始...")
+	if err := i.InstallEsBase(i.Params.Role, 1); err != nil {
+		logger.Error("部署master失败. %v", err)
+		return err
+	}
+	logger.Info("部署master结束...")
+
+	return nil
+}
+
+// InstallClient TODO
+/**
+ * @description: 安装client
+ * @return {*}
+ */
+func (i *InstallEsComp) InstallClient() (err error) {
+
+	logger.Info("部署ES client开始...")
+	if err := i.InstallEsBase(i.Params.Role, 1); err != nil {
+		logger.Error("部署ES client失败. %v", err)
+		return err
+	}
+	logger.Info("部署client结束...")
+
+	return nil
+}
+
+// InstallHot TODO
+/**
+ * @description: 安装hot
+ * @return {*}
+ */
+func (i *InstallEsComp) InstallHot() (err error) {
+
+	logger.Info("部署ES 热节点开始...")
+	if err := i.InstallEsBase(i.Params.Role, i.Params.Instances); err != nil {
+		logger.Error("部署ES 热节点失败. %v", err)
+		return err
+	}
+	logger.Info("部署热节点结束...")
+
+	return nil
+}
+
+// InstallCold TODO
+/**
+ * @description: 安装cold
+ * @return {*}
+ */
+func (i *InstallEsComp) InstallCold() (err error) {
+	logger.Info("部署ES 冷节点开始...")
+	if err := i.InstallEsBase(i.Params.Role, i.Params.Instances); err != nil {
+		logger.Error("部署ES 冷节点失败. %v", err)
+		return err
+	}
+	logger.Info("部署冷节点结束...")
+	return nil
+}
+
+// InstallEsBase 安装ES基础方法
+/**
+ * @description: 安装ES基础方法
+ * @return {*}
+ */
+func (i *InstallEsComp) InstallEsBase(role string, instances int) error {
+	var (
+		nodeIP         string          = i.Params.Host
+		nodeName       string          = fmt.Sprintf("%s-%s_1", role, nodeIP)
+		version        string          = i.Params.EsVersion
+		processors     int             = runtime.NumCPU() / instances
+		clusterName    string          = i.Params.ClusterName
+		masterIp       []string        = strings.Split(i.Params.MasterIp, ",")
+		masterNodename []string        = strings.Split(i.Params.MasterNodename, ",")
+		port           int             = i.Params.HttpPort
+		esBaseDir      string          = fmt.Sprintf("%s/elasticsearch-%s", cst.DefaulEsEnv, version)
+		esConfig       json.RawMessage = i.Params.EsConfigs
+	)
+	isMaster, isData := esutil.GetTfByRole(role)
+
+	esLink := fmt.Sprintf("%s/es", cst.DefaulEsEnv)
+	extraCmd := fmt.Sprintf("ln -s %s %s ", esBaseDir, esLink)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("link failed, %s, %s", output, err.Error())
+		return err
+	}
+	cfgMap := make(map[string]interface{})
+
+	if err := json.Unmarshal(esConfig, &cfgMap); err != nil {
+		logger.Error("parse esconfig json failed, %s", err)
+		return err
+	}
+
+	pathData, ok := cfgMap["path_data"].(string)
+	if !ok && pathData == "" {
+		pathData = cst.DefaulEsDataDir
+	}
+
+	pathLog, ok := cfgMap["path_log"].(string)
+	if !ok && pathLog == "" {
+		pathLog = cst.DefaulEsLogDir
+	}
+
+	transportPass, ok := cfgMap["transport_pemkey_password"].(string)
+	if !ok && transportPass == "" {
+		return errors.New("transport_pemkey_password is empty, please check dbonfig")
+	}
+
+	httpPass, ok := cfgMap["http_pemkey_password"].(string)
+	if !ok && httpPass == "" {
+		return errors.New("http_pemkey_password is empty, please check dbonfig")
+	}
+
+	// deal with multi-disk
+	localDisks := esutil.GetPath()
+	diskCount := len(localDisks)
+	seed := diskCount / instances
+	var esdataDir string
+	for ins := 1; ins <= instances; ins++ {
+
+		if diskCount != 0 {
+			// Generate path, eg: /data1/esdata1, /data2/esdata1 ...
+			tPaths := esutil.GenPath(ins, seed, localDisks)
+			for k, v := range tPaths {
+				// /data -> /data/esdata1
+				tPaths[k] = fmt.Sprintf("%s/esdata%d", v, ins)
+
+				// create data dir
+				extraCmd := fmt.Sprintf(`mkdir -p %s; chown -R mysql %s`, tPaths[k], tPaths[k])
+				logger.Info("Doing create dir [%s]", extraCmd)
+				if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+					logger.Error("Command [%s] failed, message: [%s]", extraCmd, err)
+					return err
+				}
+			}
+			esdataDir = strings.Join(tPaths, ",")
+		} else {
+			esdataDir = fmt.Sprintf("%s%d", pathData, ins)
+			extraCmd := fmt.Sprintf(`mkdir -p %s ;chown -R mysql  %s`, esdataDir, esdataDir)
+			if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+				logger.Error("Command [%s] failed: %s", extraCmd, err)
+				return err
+			}
+		}
+
+		logger.Info("Instanc [%d] path.data:: [%s] ", esdataDir)
+
+		// create log dir
+		eslogDir := fmt.Sprintf("%s%d", pathLog, ins)
+		extraCmd := fmt.Sprintf(`mkdir -p %s ;chown -R mysql  %s`, eslogDir, eslogDir)
+		if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("Create logdir failed, command [%s]", eslogDir, err)
+			return err
+		}
+
+		nodeName = fmt.Sprintf("%s-%s_%d", role, nodeIP, ins)
+		// cp /data/esenv/elasticsearch-$version /data/esenv/elasticsearch-$version_1
+		esBaseDirIns := fmt.Sprintf("%s_%d", esBaseDir, ins)
+		extraCmd = fmt.Sprintf("cp -a %s %s", esBaseDir, esBaseDirIns)
+		if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("copy basedir failed, %s, %s", output, err.Error())
+			return err
+		}
+
+		// ln -s /data/esenv/elasticsearch-$version_1 /data/esenv/es_1
+		esLink := fmt.Sprintf("%s/es_%d", cst.DefaulEsEnv, ins)
+		extraCmd = fmt.Sprintf("ln -s %s %s ", esBaseDirIns, esLink)
+		if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("copy basedir failed, %s, %s", output, err.Error())
+			return err
+		}
+
+		logger.Info("开始渲染elasticsearch.yml")
+		i.ElasticsearchYaml = ElasticsearchYaml{
+			ClusterName:                           clusterName,
+			NodeName:                              nodeName,
+			NodeAttrTag:                           role,
+			NetworkHost:                           nodeIP,
+			NetworkPublishhost:                    nodeIP,
+			Nodedata:                              isData,
+			NodeMaster:                            isMaster,
+			NodeMl:                                i.NodeMl,
+			NodeIngest:                            i.NodeIngest,
+			HttpPort:                              port,
+			PathData:                              esdataDir,
+			PathLogs:                              eslogDir,
+			XpackSecurityEnabled:                  i.XpackSecurityEnabled,
+			DiscoverySeedHosts:                    masterIp,
+			ClusterInitialMasterNodes:             masterNodename,
+			Processors:                            processors,
+			BootstrapMemoryLock:                   i.BootstrapMemoryLock,
+			BootstrapSystemCallFilter:             i.BootstrapSystemCallFilter,
+			XpackMonitoringCollectionEnabled:      i.XpackMonitoringCollectionEnabled,
+			ClusterRoutingAllocationSameShardHost: true,
+		}
+
+		data, err := yaml.Marshal(&i.ElasticsearchYaml)
+		if err != nil {
+			logger.Error("生成yaml失败 ", err)
+		}
+
+		esYamlFile := fmt.Sprintf("%s/config/elasticsearch.yml", esLink)
+		if err = ioutil.WriteFile(esYamlFile, data, 0644); err != nil {
+			logger.Error("write %s failed, %v", esYamlFile, err)
+		}
+		if err = esutil.WriteCerToYaml(esYamlFile, transportPass, httpPass); err != nil {
+			logger.Error("写入open_security配置失败", err)
+		}
+		logger.Info("生成jvm参数")
+		heapSize, err := esutil.GetInstHeapByIP(uint64(instances))
+		if err != nil {
+			logger.Error("生成heap失败 ", err)
+		}
+
+		jvmOp := esutil.GenerateHeapOption(heapSize)
+		heapSizeFile := fmt.Sprintf("%s/heap.options", cst.DefaultJvmOptionD)
+		if err = ioutil.WriteFile(heapSizeFile, jvmOp, 0644); err != nil {
+			logger.Error("write %s failed, %v", heapSizeFile, err)
+		}
+
+		logger.Info("生成elasticsearch.ini文件")
+		esini := esutil.GenEsini(uint64(ins))
+		esiniFile := fmt.Sprintf("%s/elasticsearch%d.ini", cst.DefaultSupervisorConf, ins)
+		if err = ioutil.WriteFile(esiniFile, esini, 0644); err != nil {
+			logger.Error("write %s failed, %v", esiniFile, err)
+		}
+		port++
+	}
+
+	if err := esutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisort update failed %v", err)
+		return err
+	}
+
+	// sleep 60s for wating es up
+	time.Sleep(60 * time.Second)
+
+	return nil
+}
+
+// DecompressEsPkg TODO
+/**
+ * @description:  校验、解压es安装包
+ * @return {*}
+ */
+func (i *InstallEsComp) DecompressEsPkg() (err error) {
+	if err = os.Chdir(i.EsenvDir); err != nil {
+		return fmt.Errorf("cd to dir %s failed, err:%w", i.InstallDir, err)
+	}
+	// 判断 /data/esenv/es 目录是否已经存在,如果存在则删除掉
+	if util.FileExists(i.EsDir) {
+		if _, err = osutil.ExecShellCommand(false, "rm -rf "+i.EsDir); err != nil {
+			logger.Error("rm -rf %s error: %w", i.EsenvDir, err)
+			return err
+		}
+	}
+	pkgAbPath := fmt.Sprintf("%s/espack-%s.tar.gz", i.PkgDir, i.Params.EsVersion)
+	if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("tar zxf %s", pkgAbPath)); err != nil {
+		logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error())
+		return err
+	}
+
+	logger.Info("es binary directory: %s", i.EsenvDir)
+	if _, err := os.Stat(i.EsenvDir); err != nil {
+		logger.Error("%s check failed, %v", i.EsenvDir, err)
+		return err
+	}
+	logger.Info("decompress es pkg successfully")
+	return nil
+}
+
+// InstallSupervisor TODO
+/**
+ * @description:  安装supervisor
+ * @return {*}
+ */
+func (i *InstallEsComp) InstallSupervisor() (err error) {
+	// Todo: check supervisor exist
+	// supervisor
+
+	if !util.FileExists(cst.DefaultSupervisorConf) {
+		logger.Error("supervisor not exist, %v", err)
+		return err
+
+	}
+
+	extraCmd := fmt.Sprintf("ln -sf %s %s", i.EsenvDir+"/"+"supervisor/conf/supervisord.conf", "/etc/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.EsenvDir+"/"+"supervisor/bin/supervisorctl", "/usr/local/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.EsenvDir+"/"+"python/bin/supervisord", "/usr/local/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s ", i.EsenvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// crontab
+	extraCmd = `crontab  -l -u mysql >/home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+	}
+
+	extraCmd = `cp /home/mysql/crontab.bak /home/mysql/crontab.tmp`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `sed -i '/check_supervisord.sh/d' /home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd =
+		`echo '*/1 * * * *  /data/esenv/supervisor/check_supervisord.sh >> /data/esenv/supervisor/check_supervisord.err 2>&1' >>/home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `crontab -u mysql /home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	startCmd := `su - mysql -c "/usr/local/bin/supervisord -c /data/esenv/supervisor/conf/supervisord.conf"`
+	logger.Info(fmt.Sprintf("execute supervisor [%s] begin", startCmd))
+	pid, err := osutil.RunInBG(false, startCmd)
+	logger.Info(fmt.Sprintf("execute supervisor [%s] end, pid: %d", startCmd, pid))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// InstallKibana TODO
+/**
+ *  @description: 安装kibana
+ *  @return
+ */
+func (i *InstallEsComp) InstallKibana() error {
+	// check package
+
+	ver := i.Params.EsVersion
+	v1, _ := version.NewVersion(ver)
+	v2, _ := version.NewVersion("7.0")
+	kibanaPkgDir := fmt.Sprintf("%s/kibana-%s-linux-x86_64", cst.DefaulEsEnv, ver)
+	kibanaLink := fmt.Sprintf("%s/kibana", cst.DefaulEsEnv)
+	extraCmd := fmt.Sprintf("ln -sf %s %s", kibanaPkgDir, kibanaLink)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	if v1.GreaterThan(v2) {
+		i.KibanaYaml = KibanaYaml{
+
+			ServerName: "kibana",
+			ServerHost: "0",
+			ServerBasePath: fmt.Sprintf("/%d/%s/%s/%s", i.Params.BkBizId, i.Params.DbType,
+				i.Params.ClusterName, i.Params.ServiceType), // {bk_biz_id}/{db_type}/{cluster_name}/{service_type}
+			ServerRewriteBasePath:                          false,
+			ElasticsearchHosts:                             fmt.Sprintf("http://%s:%d", i.Params.Host, i.Params.HttpPort),
+			ElasticsearchSslVerificationMode:               "none",
+			ElasticsearchUsername:                          i.Params.Username,
+			ElasticsearchPassword:                          i.Params.Password,
+			ElasticsearchRequestHeadersWhitelist:           cst.KibanaWhiteList,
+			OpendistroSecurityMultitenancyEnabled:          false,
+			OpendistroSecurityMultitenancyTenantsPreferred: cst.Kibanatenancy,
+			OpendistroSecurityReadonlyModeRoles:            cst.KibanaRole,
+			OpendistroSecuritySessionKeepalive:             true,
+			KXpackSecurityEnabled:                          false,
+			XpackSpacesEnabled:                             false,
+		}
+		// 生成elasticsearch.yml
+		data, err := yaml.Marshal(&i.KibanaYaml)
+		if err != nil {
+			logger.Error("生成yaml失败 ", err)
+		}
+		kyaml := "/data/esenv/kibana/config/kibana.yml"
+		if err = ioutil.WriteFile(kyaml, data, 0644); err != nil {
+			logger.Error("write %s failed, %v", kyaml, err)
+		}
+
+	}
+
+	// kibana.ini
+	data := esutil.GenKibanaini()
+	kini := "/data/esenv/supervisor/conf/kibana.ini"
+	if err := ioutil.WriteFile(kini, data, 0644); err != nil {
+		logger.Error("write %s failed, %v", kini, err)
+	}
+	if err := esutil.SupervisorctlUpdate(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// InitGrant TODO
+/**
+ *  @description: 权限初始化
+ *  @return
+ */
+func (i *InstallEsComp) InitGrant() (err error) {
+	username := i.Params.Username
+	password := i.Params.Password
+	host := i.Params.Host
+	version := i.Params.EsVersion
+
+	scripts := []byte(`
+creater_user=$1
+passwd=$2
+local_ip=$3
+version=$4
+echo $local_ip
+	
+cd /data/esenv/
+esdirs=$(ls -F|grep 'es.*@'|awk -F @ '{print $1}')
+	
+	
+if [[ $version > "7.0.0" ]]
+then
+userpasswd=$(sh /data/esenv/es_1/plugins/opendistro_security/tools/hash.sh -p "$passwd")
+cd /data/esenv/es_1
+
+[[ ! -e ./plugins/opendistro_security/securityconfig/internal_users.yml.tml ]] && cp ./plugins/opendistro_security/securityconfig/internal_users.yml ./plugins/opendistro_security/securityconfig/internal_users.yml.tml
+
+cp ./plugins/opendistro_security/securityconfig/internal_users.yml.tml  ./plugins/opendistro_security/securityconfig/internal_users.yml
+
+		echo "
+$creater_user:
+  hash: \"$userpasswd\"
+  reserved: true
+  backend_roles:
+  - \"admin\"
+  description: \"admin user\"
+" >>  ./plugins/opendistro_security/securityconfig/internal_users.yml
+	
+	
+		cd /data/esenv/es_1/plugins/opendistro_security/tools
+
+		JAVA_OPTS="-Xms128m -Xmx128m" sh /data/esenv/es_1/plugins/opendistro_security/tools/securityadmin.sh -h "$local_ip"  -p 9300  -cacert /data/esenv/es_1/config/root-ca.pem  -cert /data/esenv/es_1/config/kirk.pem  -key /data/esenv/es_1/config/kirk.key  -keypass ba9H4Q6esq0x  -dg -arc -nhnv -icl -ff -cd /data/esenv/es_1/plugins/opendistro_security/securityconfig
+	
+	else 
+		if [[ $version > "6.0.0" ]]
+		then
+			sgdir=/data/esenv/es_1/plugins/search-guard-6
+		else
+			sgdir=/data/esenv/es_1/plugins/search-guard-5
+		fi
+		userpasswd=$(sh $sgdir/tools/hash.sh -p $passwd)
+	
+		cd /data/esenv/es_1
+		[[ ! -e $sgdir/sgconfig/sg_internal_users.yml.tml ]] && cp $sgdir/sgconfig/sg_internal_users.yml $sgdir/sgconfig/sg_internal_users.yml.tml
+		cp $sgdir/sgconfig/sg_internal_users.yml.tml $sgdir/sgconfig/sg_internal_users.yml.tml
+
+		echo "$creater_user:
+  hash: $userpasswd
+  roles:
+  - admin" >> $sgdir/sgconfig/sg_internal_users.yml
+	
+	
+		# once
+		cd $sgdir/tools/
+		sh sgadmin.sh  -h $local_ip -p 9300  -cacert /data/esenv/es_1/config/root-ca.pem  -cert /data/esenv/es_1/config/kirk.pem  -key /data/esenv/es_1/config/kirk.key  -keypass ba9H4Q6esq0x  -nhnv -icl -cd ../sgconfig
+	fi`)
+
+	scriptFile := "/data/esenv/boost.sh"
+	if err = ioutil.WriteFile(scriptFile, scripts, 0644); err != nil {
+		logger.Error("write %s failed, %v", scriptFile, err)
+	}
+
+	extraCmd := fmt.Sprintf("bash %s %s %s %s %s", scriptFile, username, password, host, version)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
+
+// InstallTelegraf TODO
+/**
+ *  @description: 部署telegraf
+ *  @return
+ */
+func (i *InstallEsComp) InstallTelegraf() (err error) {
+	clusterName := i.Params.ClusterName
+	host := i.Params.Host
+	esHost := fmt.Sprintf("%s:%d", host, i.Params.HttpPort)
+	teConfFile := "/data/esenv/telegraf/etc/telegraf/telegraf.conf"
+	extraCmd := fmt.Sprintf(`sed -i -e "s/CLUSTER_NAME/%s/" -e "s/HOSTNAME/%s/"  -e "s/ESHOST/%s/"  %s`, clusterName, host,
+		esHost, teConfFile)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = "cp -a  /data/esenv/telegraf/telegraf.ini /data/esenv/supervisor/conf/"
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	esutil.SupervisorctlUpdate()
+
+	return nil
+}
+
+// InstallNodeExporter TODO
+/**
+ *  @description: 部署node exporter
+ *  @return
+ */
+func (i *InstallEsComp) InstallNodeExporter() (err error) {
+	data := []byte(`[program:node_exporter]
+command=/data/esenv/node_exporter/node_exporter --web.listen-address=":9100" ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/esenv/node_exporter/node_exporter_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`)
+
+	exporterIni := "/data/esenv/supervisor/conf/node_exporter.ini"
+
+	if err = ioutil.WriteFile(exporterIni, data, 0644); err != nil {
+		logger.Error("write %s failed, %v", exporterIni, err)
+	}
+
+	esutil.SupervisorctlUpdate()
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/replace_node.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/replace_node.go
new file mode 100644
index 0000000000..0e65a3ae6e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/replace_node.go
@@ -0,0 +1,60 @@
+package elasticsearch
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"strings"
+)
+
+// ReplaceEsNodeComp TODO
+type ReplaceEsNodeComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *ReplaceEsNodeParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ReplaceEsNodeParams TODO
+type ReplaceEsNodeParams struct {
+	Masters []string `json:"masters"` //
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (r *ReplaceEsNodeComp) Init() (err error) {
+	logger.Info("Reduce es node fake init")
+	return nil
+}
+
+// ReplaceMasterNode TODO
+/**
+ *  @description: 更新master
+ *  @return
+ */
+func (r *ReplaceEsNodeComp) ReplaceMasterNode() (err error) {
+	masters := r.Params.Masters
+	ipStr := strings.Join(masters[:], ",")
+	masterStr := esutil.ToMasterStr(masters)
+	seedHosts := fmt.Sprintf("[%s]", ipStr)
+	initMaster := fmt.Sprintf("[%s]", masterStr)
+	esenv := cst.DefaulEsEnv
+	yamlPaths := fmt.Sprintf(`%s/es_*/config/elasticsearch.yml`, esenv)
+
+	extraCmd := fmt.Sprintf(
+		`sed -i -e '/discovery.seed_hosts/s/\[.*\]/%s/' -e '/cluster.initial_master_nodes/s/\[.*\]/%s/' %s`, seedHosts,
+		initMaster, yamlPaths)
+	logger.Info("更新master, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/startstop_process.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/startstop_process.go
new file mode 100644
index 0000000000..9dfef43b27
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/elasticsearch/startstop_process.go
@@ -0,0 +1,106 @@
+package elasticsearch
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+)
+
+// StartStopProcessComp TODO
+type StartStopProcessComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *ProcessParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ProcessParams TODO
+type ProcessParams struct {
+	Node string `json:"node"`
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) Init() (err error) {
+	logger.Info("Destory cluster fake init")
+	return nil
+}
+
+// StopProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StopProcess() (err error) {
+
+	// 停止进程
+	node := d.Params.Node
+	processId, err := esutil.GetNumByNode(node)
+	if err != nil {
+		return err
+	}
+	extraCmd := fmt.Sprintf("supervisorctl stop %s", processId)
+	logger.Info("停止所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// StartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StartProcess() (err error) {
+
+	// 启动进程
+	node := d.Params.Node
+	processId, err := esutil.GetNumByNode(node)
+	if err != nil {
+		return err
+	}
+	extraCmd := fmt.Sprintf("supervisorctl start %s", processId)
+	logger.Info("启动所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// RestartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) RestartProcess() (err error) {
+
+	// 停止进程
+	node := d.Params.Node
+	processId, err := esutil.GetNumByNode(node)
+	if err != nil {
+		return err
+	}
+	extraCmd := fmt.Sprintf("supervisorctl stop %s", processId)
+	logger.Info("停止所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 启动进程
+	extraCmd = fmt.Sprintf("supervisorctl start %s", processId)
+	logger.Info("启动所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/README.md b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/README.md
new file mode 100644
index 0000000000..e453dd114a
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/README.md
@@ -0,0 +1,9 @@
+### 组件:简单文件服务
+
+通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份。在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件。
+
+功能:
+1. 使用 basic auth 认证,使用随机密码
+2. 限制来源访问 ip,可以动态增加允许ip
+3. 限制最大连接数,超过需要等待
+4. 超过最大空闲时间,自动退出
\ No newline at end of file
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/acl.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/acl.go
new file mode 100644
index 0000000000..d440508b57
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/acl.go
@@ -0,0 +1,67 @@
+package fileserver
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// ACL TODO
+type ACL struct {
+	Action string `json:"action"`
+	Who    string `json:"who"`
+	Rule   string `json:"rule"`
+}
+
+// checkACL godoc
+// TODO 允许 1.1.1.1 1.1.1.1/32 都合法
+func checkACL(acls []string, remoteAddr net.Addr, clientAddr string) error {
+	if len(acls) == 0 {
+		return nil
+	}
+	var remoteIP net.IP
+	if clientAddr == "" {
+		clientAddr = remoteAddr.String()
+	}
+	host, _, err := net.SplitHostPort(clientAddr)
+	if err != nil {
+		return fmt.Errorf("BUG: invalid remote address %q", clientAddr)
+	}
+	remoteIP = net.ParseIP(host)
+	if remoteIP == nil {
+		return fmt.Errorf("BUG: invalid remote host %s", host)
+	}
+	for _, acl := range acls {
+		// TODO(performance): move ACL parsing to config-time to make ACL checks
+		// less expensive
+		i := strings.Index(acl, " ")
+		if i < 0 {
+			return fmt.Errorf("invalid acl: %q (no space found)", acl)
+		}
+		action, who := acl[:i], acl[i+len(" "):]
+		if action != "allow" && action != "deny" {
+			return fmt.Errorf("invalid acl: %q (syntax: allow|deny )", acl)
+		}
+		if who == "all" {
+			// The all keyword matches any remote IP address
+		} else {
+			_, net, err := net.ParseCIDR(who)
+			if err != nil {
+				return fmt.Errorf("invalid acl: %q (syntax: allow|deny )", acl)
+			}
+			if !net.Contains(remoteIP) {
+				// Skip this instruction, the remote IP does not match
+				continue
+			}
+		}
+		switch action {
+		case "allow":
+			return nil
+		case "deny":
+			return fmt.Errorf("access denied (acl %q)", acl)
+		default:
+			return fmt.Errorf("invalid acl: %q (syntax: allow|deny )", acl)
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/fileserver.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/fileserver.go
new file mode 100644
index 0000000000..9d5e16dbbd
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/fileserver/fileserver.go
@@ -0,0 +1,342 @@
+// Package fileserver TODO
+package fileserver
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"math/rand"
+	"net"
+	"net/http"
+	"strconv"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components/backup_download"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+
+	"github.com/pkg/errors"
+	"golang.org/x/net/netutil"
+)
+
+// FileServerComp TODO
+type FileServerComp struct {
+	Params FileServer `json:"extend"`
+}
+
+// FileServer TODO
+type FileServer struct {
+	// http file-server 监听地址. 不提供端口,会在 12000-19999 之间随机选择一个端口,不提供 ip 时默认 localhost
+	BindAddress string `json:"bind_address" validate:"required"`
+	// 将本地哪个目录通过 http 分享
+	MountPath string `json:"mount_path" validate:"required"`
+	// path_prefix 用在生成 url 时的路径前缀. 可留空
+	PathPrefix string `json:"path_prefix"`
+	// http basic auth user
+	AuthUser string `json:"auth_user" validate:"required"`
+	// http basic auth pass,为空时会随机生成密码
+	AuthPass string `json:"auth_pass"`
+	// 访问来源限制,从前往后匹配。格式 `["allow 1.1.1.1/32", "deny all"]`
+	ACLs []string `json:"acls" example:"allow all"`
+	// 暂不支持
+	EnableTls bool `json:"enable_tls"`
+	// 输出 download http 的信息,方便使用
+	PrintDownload bool `json:"print_download"`
+
+	bindHost       string
+	bindPort       string
+	procName       string
+	procStartTime  time.Time
+	lastActiveTime time.Time
+
+	// 限制最大连接数,超过需要等待. 为 0 时表示不限制
+	MaxConnections int `json:"max_connections"`
+	// 超过最大空闲时间,自动退出. 示例 3600s, 60m, 1h
+	ProcMaxIdleDuration string `json:"proc_maxidle_duration" example:"1h"`
+
+	procMaxIdleDuration time.Duration
+	server              *http.Server
+	cw                  *ConnectionWatcher
+}
+
+// Example TODO
+func (s *FileServerComp) Example() interface{} {
+	comp := FileServerComp{
+		Params: FileServer{
+			BindAddress:         "1.1.1.1:18081",
+			MountPath:           "/data/dbbak",
+			PathPrefix:          "",
+			AuthUser:            "test_bk_biz_id",
+			AuthPass:            "",
+			ACLs:                []string{"allow 127.0.0.1/32", "deny all"},
+			MaxConnections:      10,
+			ProcMaxIdleDuration: "1h",
+		},
+	}
+	return comp
+}
+
+// New TODO
+func (s *FileServer) New() error {
+	var err error
+	if s.BindAddress, err = s.getBindAddress(); err != nil {
+		return err
+	}
+	if err = s.Validate(); err != nil {
+		return err
+	}
+	if s.AuthUser == "" {
+		return fmt.Errorf("no access user provided")
+	}
+	if s.AuthPass == "" {
+		s.AuthPass = cmutil.RandomString(12)
+	}
+	if s.MaxConnections == 0 {
+		s.MaxConnections = 9999
+	}
+	if s.ProcMaxIdleDuration == "" {
+		s.procMaxIdleDuration = 3600 * time.Second
+	} else {
+		s.procMaxIdleDuration, err = time.ParseDuration(s.ProcMaxIdleDuration)
+		if err != nil {
+			return errors.Wrap(err, s.ProcMaxIdleDuration)
+		}
+	}
+	if s.PathPrefix == "" {
+		s.PathPrefix = fmt.Sprintf("/%s/", s.procName)
+	}
+	if len(s.ACLs) == 0 {
+		s.ACLs = []string{fmt.Sprintf("allow %s/32", s.bindHost)}
+	}
+	// always "deny all"
+	s.ACLs = append(s.ACLs, "deny all")
+	// logger.Info("FileServer %+v", s)
+	// print dbactuactor params format
+	fmt.Println(s)
+	return nil
+}
+
+// String 用于打印
+func (s *FileServer) String() string {
+	str, _ := json.Marshal(s)
+	return string(str)
+}
+
+func (s *FileServer) getBindAddress() (string, error) {
+	var host, port string
+	var err error
+	if s.BindAddress == "" {
+		host = hostDefault
+		port = getRandPort()
+	} else {
+		if host, port, err = net.SplitHostPort(s.BindAddress); err != nil {
+			if strings.Contains(err.Error(), "missing port") {
+				host = s.BindAddress
+				port = getRandPort()
+			} else {
+				return "", err
+			}
+		} else {
+			if host == "" {
+				host = hostDefault
+			}
+			if port == "" {
+				port = getRandPort()
+			}
+		}
+	}
+	s.bindHost = host
+	s.bindPort = port
+	s.BindAddress = fmt.Sprintf("%s:%s", host, port)
+	return s.BindAddress, nil
+}
+
+// Validate TODO
+func (s *FileServer) Validate() error {
+	if s.MountPath == "" || s.MountPath == "/" || !strings.HasPrefix(s.MountPath, "/data") {
+		return fmt.Errorf("path should start with /data")
+	}
+	// @todo should check mount_path exists or not
+
+	pathID := util.RegexReplaceSubString(s.MountPath, `%|/| `, "")
+	if pathID == "" {
+		return fmt.Errorf("invalid path %s", s.MountPath)
+	}
+	s.procName = fmt.Sprintf("%s%s", pathID, s.bindPort)
+	return nil
+}
+
+func (s *FileServer) handleFileServer(prefix string, handler http.Handler) http.HandlerFunc {
+	// realHandler := http.StripPrefix(prefix, handler)
+	// h := http.StripPrefix(prefix, handler)
+
+	return func(w http.ResponseWriter, req *http.Request) {
+		s.lastActiveTime = time.Now()
+		handler.ServeHTTP(w, req)
+	}
+}
+
+// Start TODO
+func (s *FileServer) Start() error {
+	if err := s.Validate(); err != nil {
+		log.Fatalln(err)
+	}
+
+	handler := http.StripPrefix(s.PathPrefix, http.FileServer(http.Dir(s.MountPath)))
+	hFunc := aclHandler(s.ACLs, s.handleBasicAuth(s.handleFileServer(s.PathPrefix, handler)))
+	http.HandleFunc(s.PathPrefix, hFunc)
+
+	s.cw = &ConnectionWatcher{}
+	server := &http.Server{
+		Addr:      s.BindAddress,
+		Handler:   nil,
+		ConnState: s.cw.OnStateChange,
+	}
+	s.server = server
+
+	// http.Handle(s.Prefix, http.StripPrefix(s.Prefix, http.FileServer(http.Dir(s.Path))))
+	s.procStartTime = time.Now()
+	s.lastActiveTime = time.Now()
+	li, err := net.Listen("tcp", s.BindAddress)
+	if err != nil {
+		log.Fatalln()
+	}
+	li = netutil.LimitListener(li, s.MaxConnections) // 最大连接数
+
+	go func() {
+		if err := server.Serve(li); err != nil {
+			log.Fatalln(err)
+		}
+	}()
+	// s.WaitDone()
+	return nil
+}
+
+// WaitDone TODO
+func (s *FileServer) WaitDone() error {
+	for true {
+		time.Sleep(5 * time.Second)
+		idleDura := time.Now().Sub(s.lastActiveTime)
+		if s.cw.Count() > 0 {
+			logger.Info("server connections %d", s.cw.Count())
+			s.lastActiveTime = time.Now()
+		} else if idleDura > s.procMaxIdleDuration && s.cw.Count() == 0 && s.procMaxIdleDuration > 0 {
+			logger.Info("server idle %s exceed max_idle_duration %s", idleDura, s.ProcMaxIdleDuration)
+			s.server.Close()
+			break
+		} else {
+			logger.Debug("server idle %v", idleDura)
+		}
+	}
+	return nil
+}
+
+// OutputCtx TODO
+func (s *FileServer) OutputCtx() error {
+	if !s.PrintDownload {
+		return nil
+	}
+	httpGet := backup_download.DFHttpComp{
+		Params: backup_download.DFHttpParam{
+			DFBase: backup_download.DFBase{
+				BWLimitMB:   50,
+				Concurrency: 1,
+			},
+			HttpGet: backup_download.HttpGet{
+				Server:   fmt.Sprintf("http://%s%s", s.BindAddress, s.PathPrefix),
+				PathTgt:  "/data/dbbak",
+				FileList: []string{"xx", "yy"},
+				AuthUser: s.AuthUser,
+				AuthPass: s.AuthPass,
+			},
+		},
+	}
+	components.PrintOutputCtx(components.ToPrettyJson(httpGet))
+	return nil
+}
+
+func (s *FileServer) handleBasicAuth(next http.HandlerFunc) http.HandlerFunc {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if r.Method != http.MethodGet {
+			w.WriteHeader(http.StatusMethodNotAllowed)
+			return
+		}
+		// basicAuthPrefix := "Basic "
+		// auth := r.Header.Get("Authorization")
+		w.Header().Set("Content-Type", r.Header.Get("Content-Type"))
+		u, p, ok := r.BasicAuth()
+		if ok {
+			if u == s.AuthUser && p == s.AuthPass {
+				logger.Info("requested %s", r.URL)
+				// w.WriteHeader(200)
+				s.lastActiveTime = time.Now()
+				if next != nil {
+					next.ServeHTTP(w, r)
+				}
+				return
+			}
+		}
+		w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`)
+		// w.WriteHeader(http.StatusUnauthorized)
+		http.Error(w, "Unauthorized BA", http.StatusUnauthorized)
+	})
+}
+
+func aclHandler(acls []string, next http.HandlerFunc) http.HandlerFunc {
+	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		if err := checkACL(acls, nil, r.RemoteAddr); err != nil {
+			http.Error(w, "Unauthorized IP", http.StatusUnauthorized)
+			return
+		}
+		if next != nil {
+			next.ServeHTTP(w, r)
+		}
+		return
+	})
+}
+
+func (s *FileServer) addAcl(acl string) {
+	// is acl valid?
+	s.ACLs = append([]string{acl}, s.ACLs...)
+}
+
+var portRange []int = []int{12000, 19999}
+var hostDefault = "localhost"
+
+func getRandPort() string {
+	diff := portRange[1] - portRange[0]
+	port := rand.Intn(diff) + portRange[0]
+	return strconv.Itoa(port)
+}
+
+// ConnectionWatcher TODO
+type ConnectionWatcher struct {
+	n int64
+}
+
+// OnStateChange records open connections in response to connection
+// state changes. Set net/http Server.ConnState to this method
+// as value.
+func (cw *ConnectionWatcher) OnStateChange(conn net.Conn, state http.ConnState) {
+	switch state {
+	case http.StateNew:
+		cw.Add(1)
+	case http.StateHijacked, http.StateClosed:
+		cw.Add(-1)
+	}
+}
+
+// Count returns the number of connections at the time
+// the call.
+func (cw *ConnectionWatcher) Count() int {
+	return int(atomic.LoadInt64(&cw.n))
+}
+
+// Add adds c to the number of active connections.
+func (cw *ConnectionWatcher) Add(c int64) {
+	atomic.AddInt64(&cw.n, c)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/check_nn_active b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/check_nn_active
new file mode 100644
index 0000000000..81d7a7458b
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/check_nn_active
@@ -0,0 +1,10 @@
+#!/bin/sh
+if [ -z "$HAPROXY_SERVER_NAME" ]; then
+  namenodeId="nn1"
+else 
+  namenodeId="$HAPROXY_SERVER_NAME"
+fi
+
+export HADOOP_USER_NAME=hadoop
+export JAVA_HOME=/data/hadoopenv/java
+hdfs haadmin -getServiceState $namenodeId | grep active > /dev/null 
\ No newline at end of file
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/config_tpl.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/config_tpl.go
new file mode 100644
index 0000000000..1a12c5b263
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/config_tpl.go
@@ -0,0 +1,2 @@
+// Package config_tpl TODO
+package config_tpl
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-daemon-wrapper.sh b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-daemon-wrapper.sh
new file mode 100644
index 0000000000..9d7809fbc0
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-daemon-wrapper.sh
@@ -0,0 +1,234 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Runs a Hadoop command as a daemon.
+#
+# Environment Variables
+#
+#   HADOOP_CONF_DIR  Alternate conf dir. Default is ${HADOOP_PREFIX}/conf.
+#   HADOOP_LOG_DIR   Where log files are stored.  PWD by default.
+#   HADOOP_MASTER    host:path where hadoop code should be rsync'd from
+#   HADOOP_PID_DIR   The pid files are stored. /tmp by default.
+#   HADOOP_IDENT_STRING   A string representing this instance of hadoop. $USER by default
+#   HADOOP_NICENESS The scheduling priority for daemons. Defaults to 0.
+##
+
+usage="Usage: hadoop-daemon.sh [--config ] [--hosts hostlistfile] [--script script] (start|stop)  "
+
+# if no args specified, show usage
+if [ $# -le 1 ]; then
+  echo $usage
+  exit 1
+fi
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+. $HADOOP_LIBEXEC_DIR/hadoop-config.sh
+
+# get arguments
+
+#default value
+hadoopScript="$HADOOP_PREFIX"/bin/hadoop
+if [ "--script" = "$1" ]
+  then
+    shift
+    hadoopScript=$1
+    shift
+fi
+startStop=$1
+shift
+command=$1
+shift
+
+hadoop_rotate_log ()
+{
+    log=$1;
+    num=5;
+    if [ -n "$2" ]; then
+        num=$2
+    fi
+    if [ -f "$log" ]; then # rotate logs
+        while [ $num -gt 1 ]; do
+            prev=`expr $num - 1`
+            [ -f "$log.$prev" ] && mv "$log.$prev" "$log.$num"
+            num=$prev
+        done
+        mv "$log" "$log.$num";
+    fi
+}
+
+hadoop_kill_pid ()
+{
+    tpid=$1
+    if kill -0 $tpid > /dev/null 2>&1; then
+      echo stopping $command
+      kill $tpid
+      sleep $HADOOP_STOP_TIMEOUT
+      if kill -0 $tpid > /dev/null 2>&1; then
+        echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
+        kill -9 $tpid
+      fi
+    else
+      echo no $command pid=$tpid to stop
+    fi
+}
+
+if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
+  . "${HADOOP_CONF_DIR}/hadoop-env.sh"
+fi
+
+# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
+if [ "$command" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
+  export HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+  export HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+  export HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+  starting_secure_dn="true"
+fi
+
+#Determine if we're starting a privileged NFS, if so, redefine the appropriate variables
+if [ "$command" == "nfs3" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_PRIVILEGED_NFS_USER" ]; then
+    export HADOOP_PID_DIR=$HADOOP_PRIVILEGED_NFS_PID_DIR
+    export HADOOP_LOG_DIR=$HADOOP_PRIVILEGED_NFS_LOG_DIR
+    export HADOOP_IDENT_STRING=$HADOOP_PRIVILEGED_NFS_USER
+    starting_privileged_nfs="true"
+fi
+
+if [ "$HADOOP_IDENT_STRING" = "" ]; then
+  export HADOOP_IDENT_STRING="$USER"
+fi
+
+
+# get log directory
+if [ "$HADOOP_LOG_DIR" = "" ]; then
+  export HADOOP_LOG_DIR="$HADOOP_PREFIX/logs"
+fi
+
+if [ ! -w "$HADOOP_LOG_DIR" ] ; then
+  mkdir -p "$HADOOP_LOG_DIR"
+  chown $HADOOP_IDENT_STRING $HADOOP_LOG_DIR
+fi
+
+if [ "$HADOOP_PID_DIR" = "" ]; then
+  HADOOP_PID_DIR=/tmp
+fi
+
+# some variables
+export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
+export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
+export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
+log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
+echo "$HADOOP_PID_DIR"
+pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
+HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
+
+# Set default scheduling priority
+if [ "$HADOOP_NICENESS" = "" ]; then
+    export HADOOP_NICENESS=0
+fi
+
+case $startStop in
+
+  (start|start-foreground)
+
+    [ -w "$HADOOP_PID_DIR" ] ||  mkdir -p "$HADOOP_PID_DIR"
+
+    if [ -f $pid ]; then
+      if kill -0 `cat $pid` > /dev/null 2>&1; then
+        echo $command running as process `cat $pid`.  Stop it first.
+        exit 1
+      fi
+    fi
+
+    if [ "$HADOOP_MASTER" != "" ]; then
+      echo rsync from $HADOOP_MASTER
+      rsync -a -e ssh --delete --exclude=.svn --exclude='logs/*' --exclude='contrib/hod/logs/*' $HADOOP_MASTER/ "$HADOOP_PREFIX"
+    fi
+
+    hadoop_rotate_log $log
+    echo starting $command, logging to $log
+    cd "$HADOOP_PREFIX"
+    case $command in
+      namenode|secondarynamenode|datanode|journalnode|dfs|dfsadmin|fsck|balancer|zkfc)
+        if [ -z "$HADOOP_HDFS_HOME" ]; then
+          hdfsScript="$HADOOP_PREFIX"/bin/hdfs
+        else
+          hdfsScript="$HADOOP_HDFS_HOME"/bin/hdfs
+        fi
+        if [ "$startStop" = "start-foreground" ];then
+        nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1
+        if [ $? -gt 0 ];then
+          echo "start $command fore-ground error" >> $log
+          exit 1
+        fi
+        else
+        nohup nice -n $HADOOP_NICENESS $hdfsScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+        fi
+      ;;
+      (*)
+        nohup nice -n $HADOOP_NICENESS $hadoopScript --config $HADOOP_CONF_DIR $command "$@" > "$log" 2>&1 < /dev/null &
+      ;;
+    esac
+    echo $! > $pid
+    sleep 1
+    head "$log"
+    # capture the ulimit output
+    if [ "true" = "$starting_secure_dn" ]; then
+      echo "ulimit -a for secure datanode user $HADOOP_SECURE_DN_USER" >> $log
+      # capture the ulimit info for the appropriate user
+      su --shell=/bin/bash $HADOOP_SECURE_DN_USER -c 'ulimit -a' >> $log 2>&1
+    elif [ "true" = "$starting_privileged_nfs" ]; then
+        echo "ulimit -a for privileged nfs user $HADOOP_PRIVILEGED_NFS_USER" >> $log
+        su --shell=/bin/bash $HADOOP_PRIVILEGED_NFS_USER -c 'ulimit -a' >> $log 2>&1
+    else
+      echo "ulimit -a for user $USER" >> $log
+      ulimit -a >> $log 2>&1
+    fi
+    sleep 3;
+    if ! ps -p $! > /dev/null ; then
+      exit 1
+    fi
+    ;;
+          
+  (stop)
+
+    if [ -f $pid ]; then
+      TARGET_PID=`cat $pid`
+      hadoop_kill_pid $TARGET_PID
+      rm -f $pid
+    else
+      ps_pid=`ps -ef|grep hadoop|grep proc_${command}|grep -v grep|wc -l`
+      if [ -n "$ps_pid" ];then
+        TARGET_PID=$ps_pid
+        hadoop_kill_pid $TARGET_PID
+      else
+        echo no $command to stop
+      fi
+    fi
+    ;;
+
+  (*)
+    echo $usage
+    exit 1
+    ;;
+
+esac
+
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-env.sh b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-env.sh
new file mode 100644
index 0000000000..8891cf215b
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/hadoop-env.sh
@@ -0,0 +1,28 @@
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
+for f in $HADOOP_HOME/contrib/capacity-scheduler/*.jar; do
+  if [ "$HADOOP_CLASSPATH" ]; then
+    export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$f
+  else
+    export HADOOP_CLASSPATH=$f
+  fi
+done
+export HADOOP_HEAPSIZE="1024"
+export HADOOP_NAMENODE_INIT_HEAPSIZE="2048"
+export HADOOP_NAMENODE_OPTS="{{NN_JVM_MEM}}"
+export HADOOP_DATANODE_OPTS="{{DN_JVM_MEM}}"
+export JVM_GC_OPTS="-verbose:gc -Xloggc:$HADOOP_HOME/logs/node-jvm-gc.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=1M"
+export HDFS_AUDIT_LOGGER="INFO,RFAAUDIT"
+export HADOOP_OPTS="-XX:-UseGCOverheadLimit -XX:+UseParNewGC -XX:+UseConcMarkSweepGC -XX:CMSInitiatingOccupancyFraction=70 -XX:+CMSParallelRemarkEnabled"
+export HADOOP_OPTS="$HADOOP_OPTS -Djava.net.preferIPv4Stack=true"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS $JVM_GC_OPTS"
+export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS $JVM_GC_OPTS"
+export HADOOP_NFS3_OPTS="$HADOOP_NFS3_OPTS"
+export HADOOP_PORTMAP_OPTS="-Xmx512m $HADOOP_PORTMAP_OPTS"
+export HADOOP_CLIENT_OPTS="-Xmx512m $HADOOP_CLIENT_OPTS"
+export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
+export HADOOP_LOG_DIR="/data/hadoopenv/hadoop/logs"
+export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
+export HADOOP_PID_DIR=/data/hadoopenv/hadoop/pids
+export HADOOP_SECURE_DN_PID_DIR=${HADOOP_PID_DIR}
+export HADOOP_IDENT_STRING=$USER
\ No newline at end of file
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/haproxy.cfg b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/haproxy.cfg
new file mode 100644
index 0000000000..7f12e9ddd5
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/haproxy.cfg
@@ -0,0 +1,60 @@
+global
+    log         127.0.0.1 local2
+    external-check
+    pidfile     /var/run/haproxy.pid
+    maxconn     100000
+    user        hadoop
+    group       root
+    stats socket /var/lib/haproxy/stats
+
+defaults
+    mode                    tcp
+    log                     global
+    option                  tcplog
+    option                  dontlognull
+    option http-server-close
+    option forwardfor       except 127.0.0.0/8
+    option                  redispatch
+    retries                 3
+    timeout http-request    5s
+    timeout queue           1m
+    timeout connect         5s
+    timeout client          1m
+    timeout server          1m
+    timeout http-keep-alive 5s
+    timeout check           5s
+    maxconn                 100000
+    default_backend active_nn
+
+userlist webhdfs_users
+    user root insecure-password {{haproxy_passwd}}
+
+frontend  rpc_{{cluster_name}}
+    bind :{{rpc_port}}
+    default_backend active_rpc_nn
+
+frontend web_{{cluster_name}}
+    bind :{{http_port}}
+    default_backend active_nn
+
+backend active_nn
+    balance first
+    acl auth_webhdfs http_auth(webhdfs_users)
+    http-request auth realm webhdfs_users if !auth_webhdfs
+    http-request del-header Authorization
+
+    #reqirep ^([^\ :]*)\ /dbproxy/stm/(.*)  \1\ /\2
+    http-request set-path %[path,regsub(/dbproxy/stm/webhdfs/v1/,/webhdfs/v1/,g)]
+    option httpchk GET /jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus
+    http-check expect rstring \"State\"\s*:\s*\"active\"
+    server nn1_{{nn1_host}} {{nn1_host}}:{{http_port}} check inter 10000 rise 2 fall 2
+    server nn2_{{nn2_host}} {{nn2_host}}:{{http_port}} check inter 10000 rise 2 fall 2 backup
+
+backend active_rpc_nn
+    mode tcp
+    balance first
+    option external-check
+    external-check command /usr/bin/check_nn_active
+    external-check path "/usr/java/TencentKona-8.0.9-322/bin:/usr/java/TencentKona-8.0.9-322/jre/bin:/usr/bin:/bin:/data/hadoopenv/hadoop/bin:/data/hadoopenv/hadoop/sbin"
+    server nn1 {{nn1_host}}:{{rpc_port}} check inter 10000 rise 2 fall 2 
+    server nn2 {{nn2_host}}:{{rpc_port}} check inter 10000 rise 2 fall 2 backup
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/log4j.properties b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/log4j.properties
new file mode 100644
index 0000000000..c928639249
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/log4j.properties
@@ -0,0 +1,59 @@
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+log4j.threshold=ALL
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.console=org.apache.log4j.varia.NullAppender
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+hadoop.security.logger=INFO,NullAppender
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+hdfs.audit.logger=INFO,RFAAUDIT
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
\ No newline at end of file
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/rack-aware.sh b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/rack-aware.sh
new file mode 100644
index 0000000000..35025098d9
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/rack-aware.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# To be enable aware Hadoop Rack strategy
+
+for i in `echo $@`
+do
+  echo "/default-rack"
+done
\ No newline at end of file
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/template.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/template.go
new file mode 100644
index 0000000000..8782d2b2c6
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl/template.go
@@ -0,0 +1,51 @@
+package config_tpl
+
+import "embed"
+
+// Log4jPropertiesFileName TODO
+var Log4jPropertiesFileName = "log4j.properties"
+
+// Log4jPropertiesFile TODO
+//
+//go:embed log4j.properties
+var Log4jPropertiesFile embed.FS
+
+// RackAwareFileName TODO
+var RackAwareFileName = "rack-aware.sh"
+
+// RackAwareFile TODO
+//
+//go:embed rack-aware.sh
+var RackAwareFile embed.FS
+
+// HadoopEnvFileName TODO
+var HadoopEnvFileName = "hadoop-env.sh"
+
+// HadoopEnvFile TODO
+//
+//go:embed hadoop-env.sh
+var HadoopEnvFile embed.FS
+
+// HaproxyCfgFileName TODO
+var HaproxyCfgFileName = "haproxy.cfg"
+
+// HaproxyCfgFile TODO
+//
+//go:embed haproxy.cfg
+var HaproxyCfgFile embed.FS
+
+// HadoopDaemonWrapperFileName TODO
+var HadoopDaemonWrapperFileName = "hadoop-daemon-wrapper.sh"
+
+// HadoopDaemonWrapper TODO
+//
+//go:embed hadoop-daemon-wrapper.sh
+var HadoopDaemonWrapper embed.FS
+
+// ExternalCheckFileName TODO
+var ExternalCheckFileName = "check_nn_active"
+
+// ExternalCheckFile TODO
+//
+//go:embed check_nn_active
+var ExternalCheckFile embed.FS
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/const.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/const.go
new file mode 100644
index 0000000000..c0cc02cac0
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/const.go
@@ -0,0 +1,59 @@
+package hdfs
+
+const (
+	// DefaultPkgDir TODO
+	DefaultPkgDir = "/data/install"
+	// DefaultInstallDir TODO
+	DefaultInstallDir = "/data/hadoopenv"
+	// DefaultJdkDir TODO
+	DefaultJdkDir = DefaultInstallDir + "/java"
+	// DefaultHttpPort TODO
+	DefaultHttpPort = 50070
+	// DefaultRpcPort TODO
+	DefaultRpcPort = 9000
+	// DefaultMetaDataDir TODO
+	DefaultMetaDataDir = "/data/hadoopdata"
+	// DefaultHdfsHomeDir TODO
+	DefaultHdfsHomeDir = DefaultInstallDir + "/hadoop"
+	// DefaultSupervisorConfDir TODO
+	DefaultSupervisorConfDir = DefaultInstallDir + "/supervisor/conf"
+	// DefaultExecuteUser TODO
+	DefaultExecuteUser = "hadoop"
+	// DefaultHdfsConfDir TODO
+	DefaultHdfsConfDir = DefaultHdfsHomeDir + "/etc/hadoop"
+	// DefaultJdkVersion TODO
+	DefaultJdkVersion = "TencentKona-8.0.9-322"
+	// DefaultZkVersion TODO
+	DefaultZkVersion = "3.4.5-cdh5.4.11"
+)
+
+const (
+	// All TODO
+	All = "all"
+	// ZooKeeper TODO
+	ZooKeeper = "zookeeper"
+	// JournalNode TODO
+	JournalNode = "journalnode"
+	// NameNode TODO
+	NameNode = "namenode"
+	// ZKFC TODO
+	ZKFC = "zkfc"
+	// DataNode TODO
+	DataNode = "datanode"
+)
+
+const (
+	// Stop TODO
+	Stop = "stop"
+	// Start TODO
+	Start = "start"
+	// Restart TODO
+	Restart = "restart"
+)
+
+const (
+	// Add TODO
+	Add = "add"
+	// Remove TODO
+	Remove = "remove"
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/decompress_pkg.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/decompress_pkg.go
new file mode 100644
index 0000000000..69b3afbaf2
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/decompress_pkg.go
@@ -0,0 +1,79 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"os"
+)
+
+// DecompressPkgParams TODO
+type DecompressPkgParams struct {
+	HdfsSite      map[string]string `json:"hdfs-site"`
+	CoreSite      map[string]string `json:"core-site"`
+	ZooCfg        map[string]string `json:"zoo.cfg"`
+	InstallConfig `json:"install"`
+	Version       string `json:"version"  validate:"required"` // 版本号eg: 2.6.0-cdh-5.4.11
+
+}
+
+// DecompressPkgService TODO
+type DecompressPkgService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params *DecompressPkgParams
+
+	RollBackContext rollback.RollBackObjects
+}
+
+// PreCheck TODO
+func (i *DecompressPkgService) PreCheck() (err error) {
+
+	// 检查解压目标目录是否已创建
+	if err = os.Chdir(i.InstallDir); err != nil {
+		return fmt.Errorf("cd to dir %s failed, err:%w", i.InstallDir, err)
+	}
+	// 判断 Hadoop Home 目录是否已经存在,如果存在则删除掉
+	if util.FileExists(i.HdfsHomeDir) {
+		if _, err = osutil.ExecShellCommand(false, "rm -rf "+i.HdfsHomeDir); err != nil {
+			logger.Error("rm -rf %s error: %w", i.HdfsHomeDir, err)
+			return err
+		}
+	}
+	return nil
+}
+
+// DecompressPkg TODO
+func (i *DecompressPkgService) DecompressPkg() (err error) {
+	// 压缩包中包含jdk, hadoop, supervisor, telegraf, haproxy
+	pkgAbPath := i.PkgDir + "/hdfspack-" + i.Params.Version + ".tar.gz"
+	if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("tar zxf %s -C %s", pkgAbPath,
+		i.InstallDir)); err != nil {
+		logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error())
+		return err
+	}
+
+	// 配置hadoop软链
+	extraCmd := fmt.Sprintf("ln -sf %s %s", i.InstallDir+"/hadoop-"+i.Params.Version, i.HdfsHomeDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	// 配置JDK软链
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InstallDir+"/"+i.JdkVersion, i.JdkDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	// 配置ZK软链
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InstallDir+"/zookeeper-"+i.ZkVersion, "zookeeper")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	logger.Info("decompress hdfs pkg successfully")
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/hdfs.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/hdfs.go
new file mode 100644
index 0000000000..f2678949fc
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/hdfs.go
@@ -0,0 +1,2 @@
+// Package hdfs TODO
+package hdfs
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/init_system_config.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/init_system_config.go
new file mode 100644
index 0000000000..ac9f143037
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/init_system_config.go
@@ -0,0 +1,59 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+)
+
+// InitSystemConfigParams TODO
+type InitSystemConfigParams struct {
+	InstallConfig `json:"install"`
+	HostMap       map[string]string `json:"host_map"`
+}
+
+// InitSystemConfigService TODO
+type InitSystemConfigService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params *InitSystemConfigParams
+
+	RollBackContext rollback.RollBackObjects
+}
+
+// InitSystemConfig TODO
+func (i *InitSystemConfigService) InitSystemConfig() (err error) {
+
+	for k, v := range i.Params.HostMap {
+		deleteCommand := fmt.Sprintf("sed -i '/%s$/d' /etc/hosts", v)
+		if _, err := osutil.ExecShellCommand(false, deleteCommand); err != nil {
+			logger.Error("exec delete hostname failed %s", err.Error())
+		}
+		echoCommand := fmt.Sprintf("echo \"%s %s\" >> /etc/hosts", k, v)
+		_, err = osutil.ExecShellCommand(false, echoCommand)
+		if err != nil {
+			logger.Error("exec update host failed %s", err.Error())
+		}
+	}
+	data, err := staticembed.SysInitHdfsScript.ReadFile(staticembed.SysInitHdfsScriptFileName)
+	if err != nil {
+		logger.Error("read sysinit script failed %s", err.Error())
+		return err
+	}
+	tmpScriptName := "/tmp/sysinit.sh"
+	if err = ioutil.WriteFile(tmpScriptName, data, 07555); err != nil {
+		logger.Error("write tmp script failed %s", err.Error())
+		return err
+	}
+	command := fmt.Sprintf("/bin/bash -c \"%s\"", tmpScriptName)
+	_, err = osutil.ExecShellCommand(false, command)
+	if err != nil {
+		logger.Error("exec sysinit script failed %s", err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_haproxy.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_haproxy.go
new file mode 100644
index 0000000000..df7000693e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_haproxy.go
@@ -0,0 +1,102 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+	"strings"
+)
+
+// InstallHaproxyService TODO
+type InstallHaproxyService struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallHaproxyParams
+	InstallParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// InstallHaproxyParams from db_flow
+type InstallHaproxyParams struct {
+	Host          string `json:"host" validate:"required,ip"`
+	InstallConfig `json:"install"`
+	HttpPort      int               `json:"http_port" validate:"required"`
+	RpcPort       int               `json:"rpc_port" validate:"required"`
+	ClusterName   string            `json:"cluster_name" validate:"required"` // 集群名
+	HaproxyPasswd string            `json:"haproxy_passwd"`                   // haproxy密码
+	HostMap       map[string]string `json:"host_map"`
+	Nn1Ip         string            `json:"nn1_ip" validate:"required"` // nn1 ip, eg: ip1
+	Nn2Ip         string            `json:"nn2_ip" validate:"required"` // nn2 ip, eg: ip1
+}
+
+// InstallHaProxy TODO
+func (i *InstallHaproxyService) InstallHaProxy() (err error) {
+	osVersion := "7"
+	linuxVersion, _ := osutil.ExecShellCommand(false, "cat /etc/redhat-release | awk '{print $4}'")
+	if strings.HasPrefix(linuxVersion, "6") {
+		osVersion = "6"
+	}
+
+	rpmPackages := strings.Split(i.Params.InstallConfig.HaProxyRpm, ",")
+	for _, value := range rpmPackages {
+		if strings.Contains(value, "el"+osVersion) {
+			extraCmd := fmt.Sprintf("rpm -ivh --nodeps %s/%s > /dev/null", i.InstallDir, value)
+			if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+				logger.Error("%s execute failed, %v", extraCmd, err)
+			}
+		}
+	}
+	return nil
+}
+
+// RenderHaProxyConfig TODO
+func (i *InstallHaproxyService) RenderHaProxyConfig() (err error) {
+
+	// 写入健康检查脚本
+	shellContent, err := config_tpl.ExternalCheckFile.ReadFile(config_tpl.ExternalCheckFileName)
+	if err != nil {
+		logger.Error("read external check template failed %s", err.Error())
+		return err
+	}
+	if err = ioutil.WriteFile("/usr/bin/"+config_tpl.ExternalCheckFileName, shellContent, 07555); err != nil {
+		logger.Error("write haproxy external check failed %s", err.Error())
+		return err
+	}
+
+	// 写入haproxy.cfg
+	data, err := config_tpl.HaproxyCfgFile.ReadFile(config_tpl.HaproxyCfgFileName)
+	if err != nil {
+		logger.Error("read config template failed %s", err.Error())
+		return err
+	}
+	if err = ioutil.WriteFile("/etc/haproxy/haproxy.cfg", data, 07555); err != nil {
+		logger.Error("write haproxy config failed %s", err.Error())
+		return err
+	}
+	// sed 替换参数
+	nn1Host := i.Params.HostMap[i.Params.Nn1Ip]
+	nn2Host := i.Params.HostMap[i.Params.Nn2Ip]
+	extraCmd := fmt.Sprintf(`sed -i -e "s/{{cluster_name}}/%s/g" -e "s/{{nn1_host}}/%s/g"  -e "s/{{nn2_host}}/%s/g"  %s`,
+		i.Params.ClusterName,
+		nn1Host, nn2Host, "/etc/haproxy/haproxy.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf(`sed -i -e "s/{{rpc_port}}/%d/g" -e "s/{{http_port}}/%d/" -e "s/{{haproxy_passwd}}/%s/g" %s`,
+		i.Params.RpcPort, i.Params.HttpPort, i.Params.HaproxyPasswd, "/etc/haproxy/haproxy.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+
+}
+
+// StartHaProxy TODO
+func (i *InstallHaproxyService) StartHaProxy() (err error) {
+	return ServiceCommand("haproxy", Start)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_hdfs.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_hdfs.go
new file mode 100644
index 0000000000..4c97619857
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_hdfs.go
@@ -0,0 +1,465 @@
+package hdfs
+
+import (
+	"bytes"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/config_tpl"
+	util2 "dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+	"math"
+	"strings"
+)
+
+// InstallHdfsService TODO
+type InstallHdfsService struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallHdfsParams
+
+	InstallParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// InstallHdfsParams from db_flow
+type InstallHdfsParams struct {
+	Host          string          `json:"host" validate:"required,ip"`
+	HdfsSite      util2.ConfigMap `json:"hdfs-site"`
+	CoreSite      util2.ConfigMap `json:"core-site"`
+	ZooCfg        util2.ConfigMap `json:"zoo.cfg"`
+	InstallConfig `json:"install"`
+
+	HttpPort      int               `json:"http_port" validate:"required"`
+	RpcPort       int               `json:"rpc_port" validate:"required"`
+	Version       string            `json:"version"  validate:"required"`     // 版本号eg: 2.6.0-cdh-5.4.11
+	ClusterName   string            `json:"cluster_name" validate:"required"` // 集群名
+	HaproxyPasswd string            `json:"haproxy_passwd"`                   // haproxy密码
+	HostMap       map[string]string `json:"host_map"`
+	Nn1Ip         string            `json:"nn1_ip" validate:"required"` // nn1 ip, eg: ip1
+	Nn2Ip         string            `json:"nn2_ip" validate:"required"` // nn2 ip, eg: ip1
+	ZkIps         string            `json:"zk_ips" validate:"required"` // master ip, eg: ip1,ip2,ip3
+	JnIps         string            `json:"jn_ips" validate:"required"` // master ip, eg: ip1,ip2,ip3
+	DnIps         string            `json:"dn_ips" validate:"required"` // master ip, eg: ip1,ip2,ip3
+
+}
+
+// InstallParams HDFS安装配置 now by default
+type InstallParams struct {
+	InstallDir        string `json:"install_dir"`
+	JdkDir            string `json:"jdk_dir"`
+	PkgDir            string `json:"pkg_dir"`
+	MetaDataDir       string `json:"meta_data_dir"`
+	HdfsHomeDir       string `json:"hdfs_home_dir"`
+	HdfsConfDir       string `json:"hdfs_conf_dir"`
+	SupervisorConfDir string `json:"supervisor_conf_dir"`
+	ExecuteUser       string `json:"exec_user"`
+	JdkVersion        string `json:"jdk_version"`
+	ZkVersion         string `json:"zk_version"`
+}
+
+// InstallConfig TODO
+type InstallConfig struct {
+	JdkVersion string `json:"jdkVersion"` // JDK 版本号
+	HaProxyRpm string `json:"haproxy_rpm"`
+}
+
+// HaProxyConfig TODO
+type HaProxyConfig struct {
+	clusterName string
+}
+
+// InitDefaultInstallParam TODO
+func InitDefaultInstallParam() (params InstallParams) {
+	logger.Info("start InitDefaultInstallParam")
+
+	return InstallParams{
+		PkgDir:            DefaultPkgDir,
+		InstallDir:        DefaultInstallDir,
+		JdkDir:            DefaultJdkDir,
+		MetaDataDir:       DefaultMetaDataDir,
+		HdfsHomeDir:       DefaultHdfsHomeDir,
+		SupervisorConfDir: DefaultSupervisorConfDir,
+		ExecuteUser:       DefaultExecuteUser,
+		JdkVersion:        DefaultJdkVersion,
+		HdfsConfDir:       DefaultHdfsConfDir,
+		ZkVersion:         DefaultZkVersion,
+	}
+
+}
+
+// InstallSupervisor TODO
+func (i *InstallHdfsService) InstallSupervisor() (err error) {
+
+	// 默认supervisor目录已解压到安装目录下
+	if !util.FileExists(DefaultSupervisorConfDir) {
+		logger.Error("supervisor not exist, %v", err)
+		return err
+
+	}
+
+	extraCmd := fmt.Sprintf("ln -sf %s %s", i.InstallDir+"/"+"supervisor/conf/supervisord.conf", "/etc/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InstallDir+"/"+"supervisor/bin/supervisorctl", "/usr/local/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InstallDir+"/"+"python/bin/supervisord", "/usr/local/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 写入hadopo-daemon-wrapper.sh
+	data, err := config_tpl.HadoopDaemonWrapper.ReadFile(config_tpl.HadoopDaemonWrapperFileName)
+	if err != nil {
+		logger.Error("read shell template failed %s", err.Error())
+		return err
+	}
+	if err = ioutil.WriteFile(i.HdfsHomeDir+"/sbin/"+config_tpl.HadoopDaemonWrapperFileName, data, 07555); err != nil {
+		logger.Error("write hadoop-daemon shell failed %s", err.Error())
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R %s %s ", i.ExecuteUser, i.InstallDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	startCmd := "/usr/local/bin/supervisord -c /data/hadoopenv/supervisor/conf/supervisord.conf"
+	logger.Info(fmt.Sprintf("execute supervisor [%s] begin", startCmd))
+	pid, err := osutil.RunInBG(false, startCmd)
+	logger.Info(fmt.Sprintf("execute supervisor [%s] end, pid: %d", startCmd, pid))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// InstallJournalNode TODO
+func (i *InstallHdfsService) InstallJournalNode() (err error) {
+	return i.SupervisorUpdateConfig(JournalNode)
+}
+
+// InstallNn1 TODO
+func (i *InstallHdfsService) InstallNn1() (err error) {
+
+	formatCommand := fmt.Sprintf("su - %s -c \"source /etc/profile ; hdfs namenode -format -force 2> /dev/null\"",
+		i.ExecuteUser)
+	if _, err = osutil.ExecShellCommand(false, formatCommand); err != nil {
+		// 不通过指令是否返回0判断是否初始化完成
+		logger.Error("%s execute failed, %v", formatCommand, err)
+	}
+	// 判断是否生成了快照文件
+	nameDir, err := osutil.ExecShellCommand(false, "hdfs getconf -confKey dfs.namenode.name.dir | xargs echo -n")
+	if err != nil {
+		logger.Error("get metadata dir execute failed, %v", err)
+		return err
+	}
+	logger.Info("richie-test: %s", nameDir)
+	if strings.HasPrefix(nameDir, "file://") {
+		nameDir = strings.TrimPrefix(nameDir, "file://")
+	}
+	logger.Info("richie-test after Trim Prefix: %s", nameDir)
+
+	checkCmd := fmt.Sprintf("ls -ltr %s/current/ | grep fsimage", nameDir)
+	if _, err = osutil.ExecShellCommand(false, checkCmd); err != nil {
+		logger.Error("%s execute failed, %v", checkCmd, err)
+		return err
+	}
+
+	// 默认执行成功,不捕获err,不校验(校验需要到JN部署机器上进行,若非部署在同一台比较难实现)
+	initSharedEditsCommand := fmt.Sprintf(
+		"su - %s -c \"source /etc/profile ; hdfs namenode -initializeSharedEdits -force 2> /dev/null\"", i.ExecuteUser)
+	if _, err = osutil.ExecShellCommand(false, initSharedEditsCommand); err != nil {
+		logger.Error("%s execute failed, %v", initSharedEditsCommand, err)
+	}
+	return i.SupervisorUpdateConfig(NameNode)
+}
+
+// InstallNn2 TODO
+func (i *InstallHdfsService) InstallNn2() (err error) {
+
+	standbyCommand := fmt.Sprintf("su - %s -c \"hdfs namenode -bootstrapStandby -force 2> /dev/null\"", i.ExecuteUser)
+	if _, err = osutil.ExecShellCommand(false, standbyCommand); err != nil {
+		logger.Error("%s execute failed, %v", standbyCommand, err)
+	}
+	// 判断是否生成了快照文件
+	nameDir, err := osutil.ExecShellCommand(false, "hdfs getconf -confKey dfs.namenode.name.dir | xargs echo -n")
+	if err != nil {
+		logger.Error("get metadata dir execute failed, %v", err)
+		return err
+	}
+	if strings.HasPrefix(nameDir, "file://") {
+		nameDir = strings.TrimPrefix(nameDir, "file://")
+	}
+	checkCmd := fmt.Sprintf("ls -ltr %s/current/ | grep fsimage", nameDir)
+	if _, err = osutil.ExecShellCommand(false, checkCmd); err != nil {
+		logger.Error("%s execute failed, %v", checkCmd, err)
+		return err
+	}
+	sleepCommand := fmt.Sprintf("su - %s -c \"sleep 30 \"", i.ExecuteUser)
+	if _, err = osutil.ExecShellCommand(false, sleepCommand); err != nil {
+		logger.Error("%s execute failed, %v", sleepCommand, err)
+	}
+
+	if err = i.SupervisorUpdateConfig(NameNode); err != nil {
+		logger.Error("SupervisorUpdateConfig failed, %v", err)
+		return err
+	}
+
+	formatZKCommand := fmt.Sprintf("su - %s -c \"source /etc/profile; hdfs zkfc -formatZK -force 2> /dev/null\"",
+		i.ExecuteUser)
+	if _, err = osutil.ExecShellCommand(false, formatZKCommand); err != nil {
+		logger.Error("%s execute failed, %v", formatZKCommand, err)
+	}
+
+	return nil
+}
+
+// InstallZKFC TODO
+func (i *InstallHdfsService) InstallZKFC() (err error) {
+	return i.SupervisorUpdateConfig(ZKFC)
+}
+
+// InstallDataNode TODO
+func (i *InstallHdfsService) InstallDataNode() (err error) {
+	if _, err = osutil.ExecShellCommand(false, "rm -rf /data/hadoopdata/data"); err != nil {
+		logger.Error("delete data dir failed, %v", err)
+	}
+	extraCmd := fmt.Sprintf(`sed -i -e "s/{{dn_host}}/%s/g" %s`, i.Params.HostMap[i.Params.Host],
+		i.HdfsConfDir+"/hdfs-site.xml")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+	}
+	// 更新DN的数据目录配置及初始化
+	hdfsDirs := util2.GetHdfsDataMountDir()
+	hdfsDirStr := ""
+	for _, hdfsDir := range hdfsDirs {
+		mkdirCmd := fmt.Sprintf("mkdir -p %s/hadoopdata", hdfsDir)
+		if _, err = osutil.ExecShellCommand(false, mkdirCmd); err != nil {
+			logger.Error("%s execute failed, %v", mkdirCmd, err)
+		}
+		chownCmd := fmt.Sprintf("chown -R %s %s ", i.ExecuteUser, hdfsDir)
+		if _, err = osutil.ExecShellCommand(false, chownCmd); err != nil {
+			logger.Error("%s execute failed, %v", chownCmd, err)
+		}
+		hdfsDirStr = fmt.Sprintf("%s%s/hadoopdata/data,", hdfsDirStr, hdfsDir)
+	}
+	hdfsDirStr = strings.TrimSuffix(hdfsDirStr, ",")
+	replaceConfCmd := fmt.Sprintf(`sed -i -e "s/file:\/\/\/data\/hadoopdata\/data/%s/g" %s`,
+		strings.ReplaceAll(hdfsDirStr, "/", "\\/"),
+		i.HdfsConfDir+"/hdfs-site.xml")
+	if _, err = osutil.ExecShellCommand(false, replaceConfCmd); err != nil {
+		logger.Error("%s execute failed, %v", replaceConfCmd, err)
+	}
+	return i.SupervisorUpdateConfig(DataNode)
+}
+
+// ServiceCommand TODO
+func ServiceCommand(service string, command string) error {
+	execCommand := fmt.Sprintf("service %s %s", service, command)
+	_, err := osutil.RunInBG(false, execCommand)
+	return err
+}
+
+// InstallHaProxy TODO
+func (i *InstallHdfsService) InstallHaProxy() (err error) {
+	osVersion := "7"
+	linuxVersion, _ := osutil.ExecShellCommand(false, "cat /etc/redhat-release | awk '{print $4}'")
+	if strings.HasPrefix(linuxVersion, "6") {
+		osVersion = "6"
+	}
+
+	rpmPackages := strings.Split(i.Params.InstallConfig.HaProxyRpm, ",")
+	for _, value := range rpmPackages {
+		if strings.Contains(value, "el"+osVersion) {
+			extraCmd := fmt.Sprintf("rpm -ivh --nodeps %s/%s > /dev/null", i.InstallDir, value)
+			if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+				logger.Error("%s execute failed, %v", extraCmd, err)
+			}
+		}
+	}
+	return nil
+}
+
+// RenderHaProxyConfig TODO
+func (i *InstallHdfsService) RenderHaProxyConfig() (err error) {
+
+	// 写入haproxy.cfg
+	data, err := config_tpl.HaproxyCfgFile.ReadFile(config_tpl.HaproxyCfgFileName)
+	if err != nil {
+		logger.Error("read config template failed %s", err.Error())
+		return err
+	}
+	if err = ioutil.WriteFile("/etc/haproxy/haproxy.cfg", data, 07555); err != nil {
+		logger.Error("write haproxy config failed %s", err.Error())
+		return err
+	}
+	// sed 替换参数
+	nn1Host := i.Params.HostMap[i.Params.Nn1Ip]
+	nn2Host := i.Params.HostMap[i.Params.Nn2Ip]
+	extraCmd := fmt.Sprintf(`sed -i -e "s/{{cluster_name}}/%s/g" -e "s/{{nn1_host}}/%s/g"  -e "s/{{nn2_host}}/%s/g"  %s`,
+		i.Params.ClusterName,
+		nn1Host, nn2Host, "/etc/haproxy/haproxy.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf(
+		`sed -i -e "s/{{nn1_ip}}/%s/g" -e "s/{{nn2_ip}}/%s/g"  -e "s/{{http_port}}/%d/" -e "s/{{haproxy_passwd}}/%s/g" %s`,
+		i.Params.Nn1Ip,
+		i.Params.Nn2Ip, i.Params.HttpPort, i.Params.HaproxyPasswd, "/etc/haproxy/haproxy.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+
+}
+
+// StartHaProxy TODO
+func (i *InstallHdfsService) StartHaProxy() (err error) {
+	return ServiceCommand("haproxy", Start)
+}
+
+// HadoopDaemonCommand TODO
+func HadoopDaemonCommand(component string, options string) (err error) {
+	execCommand := fmt.Sprintf("su - hadoop -c \"hadoop-daemon.sh %s %s\"", options, component)
+	if _, err = osutil.ExecShellCommand(false, execCommand); err != nil {
+		logger.Error("%s execute failed, %v", execCommand, err)
+		return err
+	}
+	return nil
+}
+
+// SupervisorUpdateConfig TODO
+func (i *InstallHdfsService) SupervisorUpdateConfig(component string) (err error) {
+	return SupervisorUpdateHdfsConfig(i.SupervisorConfDir, component)
+}
+
+// RenderHdfsConfig TODO
+func (i *InstallHdfsService) RenderHdfsConfig() (err error) {
+
+	logger.Info("now hdfs conf dir is %s", i.HdfsConfDir)
+	// 封装hdfs-site.xml
+	hdfsSiteData, _ := util2.TransMap2Xml(i.Params.HdfsSite)
+	if err = ioutil.WriteFile(i.HdfsConfDir+"/"+"hdfs-site.xml", hdfsSiteData, 0644); err != nil {
+		logger.Error("write config failed %s", err.Error())
+		return err
+	}
+	// 封装core-site.xml
+	coreSiteData, _ := util2.TransMap2Xml(i.Params.CoreSite)
+	if err = ioutil.WriteFile(i.HdfsConfDir+"/"+"core-site.xml", coreSiteData, 0644); err != nil {
+		logger.Error("write config failed %s", err.Error())
+		return err
+	}
+
+	dnIps := strings.Split(i.Params.DnIps, ",")
+	buf := bytes.NewBufferString("")
+	for index, _ := range dnIps {
+		dnHost := i.Params.HostMap[dnIps[index]]
+		buf.WriteString(fmt.Sprintln(dnHost))
+	}
+	if err = ioutil.WriteFile(i.HdfsConfDir+"/"+"dfs.include", buf.Bytes(), 0644); err != nil {
+		logger.Error("write config failed %s", err.Error())
+		return err
+	}
+
+	nn1Host := i.Params.HostMap[i.Params.Nn1Ip]
+	nn2Host := i.Params.HostMap[i.Params.Nn2Ip]
+	extraCmd := fmt.Sprintf(`sed -i -e "s/{{cluster_name}}/%s/" -e "s/{{nn1_host}}/%s/"  -e "s/{{nn2_host}}/%s/"  %s`,
+		i.Params.ClusterName,
+		nn1Host, nn2Host, i.HdfsConfDir+"/*")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf(`sed -i -e "s/{{http_port}}/%d/" -e "s/{{rpc_port}}/%d/" %s`, i.Params.HttpPort,
+		i.Params.RpcPort, i.HdfsConfDir+"/*")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	zkArr := strings.Split(i.Params.ZkIps, ",")
+	extraCmd = fmt.Sprintf(`sed -i -e "s/{{zk0_ip}}/%s/" -e "s/{{zk1_ip}}/%s/" -e "s/{{zk2_ip}}/%s/" %s`, zkArr[0],
+		zkArr[1], zkArr[2], i.HdfsConfDir+"/*")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	jnArr := strings.Split(i.Params.JnIps, ",")
+	extraCmd = fmt.Sprintf(`sed -i -e "s/{{jn0_host}}/%s/" -e "s/{{jn1_host}}/%s/" -e "s/{{jn2_host}}/%s/" %s`,
+		jnArr[0], jnArr[1], jnArr[2], i.HdfsConfDir+"/*")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 配置jvm参数
+	var instMem uint64
+
+	if instMem, err = esutil.GetInstMem(); err != nil {
+		logger.Error("获取实例内存失败, err: %w", err)
+		return fmt.Errorf("获取实例内存失败, err: %w", err)
+	}
+	jvmNameNodeSize := int(math.Floor(0.8 * float64(instMem) / 1024))
+	jvmDataNodeSize := int(math.Floor(0.6 * float64(instMem) / 1024))
+
+	// Todo 修改DataNode 数据目录配置,mkdir
+	extraCmd = fmt.Sprintf(`sed -i -e "s/{{NN_JVM_MEM}}/%s/" -e "s/{{DN_JVM_MEM}}/%s/" %s`,
+		fmt.Sprintf("-Xms%dG -Xmx%dG", jvmNameNodeSize, jvmNameNodeSize),
+		fmt.Sprintf("-Xms%dG -Xmx%dG", jvmDataNodeSize, jvmDataNodeSize),
+		i.HdfsConfDir+"/hadoop-env.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+
+}
+
+// RenderHdfsConfigWithoutParams TODO
+func (i *InstallHdfsService) RenderHdfsConfigWithoutParams() (err error) {
+	// 写入log4j.properties
+	data, err := config_tpl.Log4jPropertiesFile.ReadFile(config_tpl.Log4jPropertiesFileName)
+	if err != nil {
+		logger.Error("read config template failed %s", err.Error())
+		return err
+	}
+	if err = ioutil.WriteFile(i.HdfsConfDir+"/"+config_tpl.Log4jPropertiesFileName, data, 07555); err != nil {
+		logger.Error("write tmp config failed %s", err.Error())
+		return err
+	}
+
+	// 写入rack-aware.sh
+	data, err = config_tpl.RackAwareFile.ReadFile(config_tpl.RackAwareFileName)
+	if err != nil {
+		logger.Error("read config template script failed %s", err.Error())
+		return err
+	}
+	if err = ioutil.WriteFile(i.HdfsConfDir+"/"+config_tpl.RackAwareFileName, data, 07555); err != nil {
+		logger.Error("write tmp config failed %s", err.Error())
+		return err
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_zookeeper.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_zookeeper.go
new file mode 100644
index 0000000000..5aa9ad8153
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/install_zookeeper.go
@@ -0,0 +1,123 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// InstallZookeeperService TODO
+type InstallZookeeperService struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallHdfsParams
+	ZookeeperConfig
+	InstallParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ZookeeperConfig TODO
+type ZookeeperConfig struct {
+	TickTime   int
+	InitLimit  int
+	SyncLimit  int
+	DataDir    string
+	DataLogDir string
+	ClientPort int
+	MyId       int
+}
+
+// RenderZookeeperConfig TODO
+func (i *InstallZookeeperService) RenderZookeeperConfig() (err error) {
+
+	extraCmd := fmt.Sprintf("mkdir -p %s %s; chown -R hadoop:root %s",
+		i.InstallDir+"/zookeeper/data", i.InstallDir+"/zookeeper/logs", i.InstallDir+"/zookeeper/")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	nodeIp := i.Params.Host
+	zookeeperIpList := strings.Split(i.Params.ZkIps, ",")
+
+	logger.Info("zoo.cfg")
+	extraCmd = fmt.Sprintf(`echo "tickTime=2000
+initLimit=10
+syncLimit=5
+dataDir=/data/hadoopenv/zookeeper/data
+dataLogDir=/data/hadoopenv/zookeeper/logs
+clientPort=2181
+autopurge.snapRetainCount=3
+autopurge.purgeInterval=1
+server.0=%s:2888:3888
+server.1=%s:2888:3888
+server.2=%s:2888:3888" > %s`, zookeeperIpList[0], zookeeperIpList[1], zookeeperIpList[2], "/data/hadoopenv/zookeeper/conf/zoo.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("myid")
+	myidNum := 0
+	for i := 0; i < len(zookeeperIpList); i++ {
+		if nodeIp == zookeeperIpList[i] {
+			myidNum = i
+			break
+		}
+	}
+	extraCmd = fmt.Sprintf(`echo %d > %s`, myidNum, i.InstallDir+"/zookeeper/data/myid")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
+
+// InstallZookeeper TODO
+func (i *InstallZookeeperService) InstallZookeeper() (err error) {
+
+	if err := i.RenderZookeeperConfig(); err != nil {
+		logger.Error("RenderZookeeperConfig failed, %v", err)
+		return err
+	}
+	return SupervisorUpdateZooKeeperConfig(i.SupervisorConfDir)
+}
+
+// UpdateZooKeeperConfigParams TODO
+type UpdateZooKeeperConfigParams struct {
+	Host   string `json:"host" validate:"required,ip"`
+	OldIps string `json:"old_zk_ips" validate:"required"`
+	NewIps string `json:"new_zk_ips" validate:"required"`
+}
+
+// UpdateZooKeeperConfigService TODO
+type UpdateZooKeeperConfigService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params          *UpdateZooKeeperConfigParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// UpdateZooKeeperConfig TODO
+func (i *UpdateZooKeeperConfigService) UpdateZooKeeperConfig() (err error) {
+
+	configName := "/data/hadoopenv/zookeeper/conf/zoo.cfg"
+	oldZkList := strings.Split(i.Params.OldIps, ",")
+	newZkList := strings.Split(i.Params.NewIps, ",")
+	if len(oldZkList) != len(newZkList) {
+		return errors.New("替换ZK IP数量不一致")
+	}
+	for i, _ := range oldZkList {
+		replaceCommand := fmt.Sprintf("sed -i 's/\\<%s\\>/%s/g' %s",
+			oldZkList[i], newZkList[i], configName)
+		if _, err = osutil.ExecShellCommand(false, replaceCommand); err != nil {
+			logger.Error("%s execute failed, %v", replaceCommand, err)
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/node_operation.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/node_operation.go
new file mode 100644
index 0000000000..da0fd2bb32
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/node_operation.go
@@ -0,0 +1,187 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+)
+
+// NodeOperationParams TODO
+type NodeOperationParams struct {
+	Host      string `json:"host" validate:"required,ip"`
+	Component string `json:"component"`
+	Operation string `json:"operation"`
+}
+
+// NodeOperationService TODO
+type NodeOperationService struct {
+	GeneralParam    *components.GeneralParam
+	Params          *NodeOperationParams
+	RollBackContext rollback.RollBackObjects
+	InstallParams
+}
+
+// StopAllProcess TODO
+func (i *NodeOperationService) StopAllProcess() (err error) {
+	// 停止进程
+	if err := SupervisorCommand(Stop, All); err != nil {
+		logger.Error("shell execute failed, %v", err)
+		return err
+	}
+	logger.Info("Stop hdfs all process successfully")
+	return nil
+}
+
+// StopHaProxy TODO
+func (i *NodeOperationService) StopHaProxy() (err error) {
+	// 停止进程
+	extraCmd := "service stop haproxy"
+	logger.Info("停止HaProxy, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+	}
+	logger.Info("Stop hdfs all process successfully")
+	return nil
+}
+
+// CleanData TODO
+func (i *NodeOperationService) CleanData() (err error) {
+	// 清除crontab
+	logger.Info("获取crontab")
+	out, err := osutil.ListCrontb(i.ExecuteUser)
+	if err != nil {
+		logger.Error("获取crontab失败", err)
+		return err
+	}
+	logger.Debug("crontab: ", out)
+	if len(out) > 0 {
+		extraCmd := fmt.Sprintf("crontab -u %s -r", i.ExecuteUser)
+		logger.Info("清除crontab, [%s]", extraCmd)
+		if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("[%s] execute failed, %v", extraCmd, err)
+			return err
+		}
+	}
+
+	// 强杀进程
+	extraCmd := `ps -ef | egrep 'supervisord|telegraf|consul'|grep -v grep |awk {'print "kill -9 " $2'}|sh`
+	logger.Info("强杀进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `rm -f /etc/supervisord.conf /usr/local/bin/supervisorctl /usr/local/bin/supervisord /usr/bin/java`
+	logger.Info("删除软链, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// clean profile
+	extraCmd = `sed -i '/hdfsProfile/d' /etc/profile`
+	logger.Info("clean provile, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	// 删除hadoopenv
+	extraCmd = `rm -rf /data/hadoopenv`
+	logger.Info("hadoopenv, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除数据目录
+	extraCmd = `df |grep data|grep -vw '/data'|awk '{print $NF}'|while read line;do rm  -rf $line/hadoopdata*;done`
+	logger.Info("删除hadoopdata, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// StartComponent TODO
+func (i *NodeOperationService) StartComponent() (err error) {
+
+	// 如果为启动,判断是否存在supervisor配置,不存在则添加并update
+	if i.Params.Operation == Start {
+		judgeCommand := fmt.Sprintf("ls -l %s/%s.ini", i.SupervisorConfDir, i.Params.Component)
+		if _, err = osutil.ExecShellCommand(false, judgeCommand); err != nil {
+			logger.Error("shell execute failed, need init supervisor ini")
+			if i.Params.Component == ZooKeeper {
+				return SupervisorUpdateZooKeeperConfig(i.SupervisorConfDir)
+			} else {
+				return SupervisorUpdateHdfsConfig(i.SupervisorConfDir, i.Params.Component)
+			}
+		}
+	}
+	if err := SupervisorCommand(i.Params.Operation, i.Params.Component); err != nil {
+		logger.Error("shell execute failed, %v", err)
+		return err
+	}
+	logger.Info("%s hdfs process %s successfully", i.Params.Operation, i.Params.Component)
+	return nil
+}
+
+// SupervisorCommand TODO
+func SupervisorCommand(command string, component string) error {
+	execCommand := fmt.Sprintf("supervisorctl %s %s", command, component)
+	if _, err := osutil.ExecShellCommand(false, execCommand); err != nil {
+		logger.Error("[%s] execute failed, %v", execCommand, err)
+		return err
+	}
+	return nil
+}
+
+// SupervisorUpdateHdfsConfig TODO
+func SupervisorUpdateHdfsConfig(supervisorConfDir string, component string) error {
+	data := []byte(`[program:` + component + `]
+command=hadoop-daemon-wrapper.sh start-foreground ` + component + ` ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+stopsignal=TERM ;
+stopasgroup=true ;
+killasgroup=true ;
+autostart=true ; start at supervisord start (default: true)
+user=hadoop ;
+redirect_stdout=false ; 
+redirect_stderr=false ; redirect proc stderr to stdout (default false)`)
+
+	componentIni := supervisorConfDir + "/" + component + ".ini"
+
+	if err := ioutil.WriteFile(componentIni, data, 0644); err != nil {
+		logger.Error("write %s failed, %v", componentIni, err)
+	}
+
+	return SupervisorCommand("update", "")
+}
+
+// SupervisorUpdateZooKeeperConfig TODO
+func SupervisorUpdateZooKeeperConfig(supervisorConfDir string) error {
+	data := []byte(`[program:zookeeper]
+command=/data/hadoopenv/zookeeper/bin/zkServer.sh start-foreground ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=hadoop ;
+stopsignal=KILL ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/hadoopenv/zookeeper/zk_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`)
+
+	componentIni := supervisorConfDir + "/zookeeper.ini"
+
+	if err := ioutil.WriteFile(componentIni, data, 0644); err != nil {
+		logger.Error("write %s failed, %v", componentIni, err)
+	}
+	return SupervisorCommand("update", "")
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/replace_hdfs.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/replace_hdfs.go
new file mode 100644
index 0000000000..e466bdd87c
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/replace_hdfs.go
@@ -0,0 +1,177 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// GenerateKeyParams TODO
+type GenerateKeyParams struct {
+	Host string `json:"host" validate:"required,ip"`
+}
+
+// GenerateKeyService TODO
+type GenerateKeyService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params *GenerateKeyParams
+
+	RollBackContext rollback.RollBackObjects
+}
+
+// GenerateKeyResult TODO
+type GenerateKeyResult struct {
+	Key string `json:"key"`
+}
+
+// GenerateKey TODO
+func (i *GenerateKeyService) GenerateKey() (err error) {
+
+	executeCmd := "if [ ! -f ~/.ssh/id_rsa.pub ]; then ssh-keygen -t rsa -N '' -f ~/.ssh/id_rsa -q; fi;"
+	if _, err := osutil.ExecShellCommand(false, executeCmd); err != nil {
+		logger.Error("%s execute failed, %v", executeCmd, err)
+	}
+	catKeyCmd := " cat ~/.ssh/id_rsa.pub | xargs echo -n"
+	if result, err := osutil.ExecShellCommand(false, catKeyCmd); err != nil {
+		logger.Error("%s execute failed, %v", catKeyCmd, err)
+		return err
+	} else {
+		resultStruct := GenerateKeyResult{
+			Key: result,
+		}
+		jsonBytes, err := json.Marshal(resultStruct)
+		if err != nil {
+			logger.Error("transfer resultStruct to json failed", err.Error())
+			return err
+		}
+		// 标准输出 json返回结果
+		fmt.Printf("%s", string(jsonBytes))
+		return nil
+	}
+}
+
+// WriteKeyParams TODO
+type WriteKeyParams struct {
+	Host string `json:"host" validate:"required,ip"`
+	Key  string `json:"key" validate:"required"`
+}
+
+// WriteKeyService TODO
+type WriteKeyService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params          *WriteKeyParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// WriteKey TODO
+func (i *WriteKeyService) WriteKey() (err error) {
+	executeCmd := fmt.Sprintf("su - %s -c \"mkdir -p ~/.ssh/; echo '%s' >> ~/.ssh/authorized_keys\"",
+		i.ExecuteUser, i.Params.Key)
+	if _, err := osutil.ExecShellCommand(false, executeCmd); err != nil {
+		logger.Error("%s execute failed, %v", executeCmd, err)
+	}
+	return nil
+}
+
+// ScpDirParams TODO
+type ScpDirParams struct {
+	Host      string `json:"host" validate:"required,ip"`
+	Dest      string `json:"dest" validate:"required,ip"`
+	Component string `json:"component"`
+	// Dir 		   string `json:"dir" validate:"required"`
+	// DestDir        string `json:"dest_dir"`
+}
+
+// ScpDirService TODO
+type ScpDirService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params          *ScpDirParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ScpDir TODO
+func (i *ScpDirService) ScpDir() (err error) {
+
+	metaDataDirs := GetMetaDataDirByRole(i.Params.Component)
+	for _, metaDataDir := range metaDataDirs {
+		executeCmd := fmt.Sprintf("scp -o \"StrictHostKeyChecking no\" -r %s %s@%s:%s 2>&1",
+			metaDataDir, i.ExecuteUser, i.Params.Dest, metaDataDir)
+		if _, err := osutil.ExecShellCommand(false, executeCmd); err != nil {
+			logger.Error("%s execute failed, %v", executeCmd, err)
+			return err
+		}
+	}
+	return nil
+}
+
+// GetMetaDataDirByRole TODO
+func GetMetaDataDirByRole(component string) []string {
+	metaDataDirs := make([]string, 0)
+	switch component {
+	case NameNode:
+		metaDataDirs = []string{"/data/hadoopdata/name"}
+	case JournalNode:
+		metaDataDirs = []string{"/data/hadoopdata/jn"}
+	case ZooKeeper:
+		metaDataDirs = []string{"/data/hadoopenv/zookeeper/conf/zoo.cfg",
+			"/data/hadoopenv/zookeeper/data", "/data/hadoopenv/zookeeper/logs"}
+	}
+	return metaDataDirs
+}
+
+// CheckActiveParams TODO
+type CheckActiveParams struct {
+	Host  string `json:"host" validate:"required,ip"`
+	Nn1Ip string `json:"nn1_ip" validate:"required"`
+	Nn2Ip string `json:"nn2_ip" validate:"required"`
+}
+
+// CheckActiveService TODO
+type CheckActiveService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params          *CheckActiveParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CheckActive TODO
+func (i *CheckActiveService) CheckActive() (err error) {
+	checkActiveResult := make(map[string]string, 2)
+	checkNn1Cmd := fmt.Sprintf("su - %s -c \"hdfs haadmin -getServiceState nn1 | xargs echo -n\"", i.ExecuteUser)
+	checkNn2Cmd := fmt.Sprintf("su - %s -c \"hdfs haadmin -getServiceState nn2 | xargs echo -n\"", i.ExecuteUser)
+
+	maxRetryCount := 3
+	// var interval int = 5
+	for retryCount := 0; retryCount < maxRetryCount; retryCount++ {
+		time.Sleep(5 * time.Second)
+		if result, err := osutil.ExecShellCommand(false, checkNn1Cmd); err != nil {
+			logger.Error("%s execute failed, %v", checkNn1Cmd, err)
+			continue
+		} else {
+			checkActiveResult[result] = i.Params.Nn1Ip
+		}
+		if result, err := osutil.ExecShellCommand(false, checkNn2Cmd); err != nil {
+			logger.Error("%s execute failed, %v", checkNn2Cmd, err)
+			continue
+		} else {
+			checkActiveResult[result] = i.Params.Nn2Ip
+			break
+		}
+	}
+
+	jsonBytes, err := json.Marshal(checkActiveResult)
+	if err != nil {
+		logger.Error("transfer checkActiveResult to json failed", err.Error())
+		return err
+	}
+	// 标准输出 json返回结果
+	fmt.Printf("%s", string(jsonBytes))
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/shrink_hdfs.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/shrink_hdfs.go
new file mode 100644
index 0000000000..5756c93309
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/shrink_hdfs.go
@@ -0,0 +1,172 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// UpdateDfsHostParams TODO
+type UpdateDfsHostParams struct {
+	DataNodeHosts string `json:"data_node_hosts"  validate:"required"`
+	ConfFile      string `json:"conf_file"  validate:"required"`
+	Operation     string `json:"operation"  validate:"required"`
+}
+
+// UpdateDfsHostService TODO
+type UpdateDfsHostService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params *UpdateDfsHostParams
+
+	RollBackContext rollback.RollBackObjects
+}
+
+// UpdateDfsHost TODO
+func (i *UpdateDfsHostService) UpdateDfsHost() (err error) {
+
+	dnHostArr := strings.Split(i.Params.DataNodeHosts, ",")
+	if i.Params.Operation == Add {
+		for _, dnHost := range dnHostArr {
+			executeCmd := fmt.Sprintf("echo \"%s\" >> %s", dnHost, i.Params.ConfFile)
+			if _, err = osutil.ExecShellCommand(false, executeCmd); err != nil {
+				logger.Error("%s execute failed, %v", executeCmd, err)
+			}
+		}
+	} else if i.Params.Operation == Remove {
+		for _, dnHost := range dnHostArr {
+			executeCmd := fmt.Sprintf("sed -i '/^%s$/d' %s", dnHost, i.Params.ConfFile)
+			if _, err = osutil.ExecShellCommand(false, executeCmd); err != nil {
+				logger.Error("%s execute failed, %v", executeCmd, err)
+			}
+		}
+	}
+	logger.Info("update dfs hosts successfully")
+	return nil
+}
+
+// CheckDecommissionParams TODO
+type CheckDecommissionParams struct {
+	Host          string `json:"host" validate:"required,ip"`
+	DataNodeHosts string `json:"data_node_hosts"  validate:"required"`
+	DataNodePort  int    `json:"data_node_port"`
+	HttpPort      int    `json:"http_port"  validate:"required"`
+	Version       string `json:"version"  validate:"required"`
+	HaproxyPasswd string `json:"haproxy_passwd"`
+}
+
+// CheckDecommissionService TODO
+type CheckDecommissionService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params          *CheckDecommissionParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CheckDatanodeDecommission TODO
+func (c *CheckDecommissionService) CheckDatanodeDecommission() (err error) {
+
+	urlFormat := "http://root:%s@%s:%d/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo"
+	responseBody, err := util.HttpGet(fmt.Sprintf(urlFormat, c.Params.HaproxyPasswd, c.Params.Host, c.Params.HttpPort))
+	if err != nil {
+		return err
+	}
+	var beans map[string][]NameNodeInfoBean
+	if err = json.Unmarshal(responseBody, &beans); err != nil {
+		logger.Error("transfer response to json failed", err.Error())
+		return err
+	}
+	nameNodeInfoBean := beans["beans"][0]
+	logger.Debug("LiveNodesStr is [%s]", nameNodeInfoBean.LiveNodesStr)
+	logger.Debug("DeadNodesStr is [%s]", nameNodeInfoBean.DeadNodesStr)
+
+	var liveNodeMap DataNodeMap
+	var deadNodeMap DataNodeMap
+	if err = json.Unmarshal([]byte(nameNodeInfoBean.LiveNodesStr), &liveNodeMap); err != nil {
+		logger.Error("transfer LiveNodesStr to json failed", err.Error())
+		return err
+	}
+	if err = json.Unmarshal([]byte(nameNodeInfoBean.DeadNodesStr), &deadNodeMap); err != nil {
+		logger.Error("transfer DeadNodesStr to json failed", err.Error())
+		return err
+	}
+	dnHostArr := strings.Split(c.Params.DataNodeHosts, ",")
+	logger.Debug("len dnHostArr is %d", len(dnHostArr))
+	datanodeDetail := make(DataNodeMap, len(dnHostArr))
+	result := true
+	for _, dnHost := range dnHostArr {
+		if value, ok := liveNodeMap[dnHost]; ok {
+			logger.Debug("node %s is in liveNodes")
+			if value.AdminState == "Decommissioned" {
+				value.Decommissioned = true
+			} else {
+				result = false
+			}
+			datanodeDetail[dnHost] = value
+		} else if value, ok := deadNodeMap[dnHost]; ok {
+			logger.Debug("node %s is in deadNodes")
+			datanodeDetail[dnHost] = value
+		}
+	}
+	if result {
+		logger.Info("Datanode Decommission completed")
+		return nil
+	} else {
+		logger.Error("Datanode Decommissioning")
+		return errors.New("Datanode Decommissioning")
+	}
+}
+
+// DataNodeMap TODO
+type DataNodeMap map[string]DataNodeStruct
+
+// DataNodeStruct TODO
+type DataNodeStruct struct {
+	InfoAddr       string `json:"infoAddr"`
+	AdminState     string `json:"adminState"`
+	TransferAddr   string `json:"xferaddr"`
+	Decommissioned bool   `json:"decommissioned"`
+}
+
+// NameNodeInfoBean TODO
+type NameNodeInfoBean struct {
+	LiveNodesStr            string `json:"LiveNodes"`
+	DeadNodesStr            string `json:"DeadNodes"`
+	DecommissioningNodesStr string `json:"DecomNodes"`
+}
+
+// CheckDecommissionResult TODO
+type CheckDecommissionResult struct {
+	Result bool        `json:"result"`
+	Detail DataNodeMap `json:"detail"`
+}
+
+// RefreshNodesParams TODO
+type RefreshNodesParams struct {
+	Host string `json:"host" validate:"required,ip"`
+}
+
+// RefreshNodesService TODO
+type RefreshNodesService struct {
+	GeneralParam *components.GeneralParam
+	InstallParams
+	Params          *RefreshNodesParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// RefreshNodes TODO
+func (r *RefreshNodesService) RefreshNodes() (err error) {
+	execCommand := fmt.Sprintf("su - %s -c \"hdfs dfsadmin -refreshNodes\"", r.ExecuteUser)
+	// 不检查是否执行成功
+	if _, err := osutil.ExecShellCommand(false, execCommand); err != nil {
+		logger.Error("[%s] execute failed, %v", execCommand, err)
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/update_host_mapping.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/update_host_mapping.go
new file mode 100644
index 0000000000..ca52ac6268
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/update_host_mapping.go
@@ -0,0 +1,44 @@
+package hdfs
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+)
+
+// UpdateHostMappingParams TODO
+type UpdateHostMappingParams struct {
+	HostMap map[string]string `json:"host_map"`
+}
+
+// UpdateHostMappingService TODO
+type UpdateHostMappingService struct {
+	GeneralParam *components.GeneralParam
+	Params       *UpdateHostMappingParams
+
+	RollBackContext rollback.RollBackObjects
+}
+
+// UpdateHostMapping TODO
+func (i *UpdateHostMappingService) UpdateHostMapping() (err error) {
+
+	for k, v := range i.Params.HostMap {
+		deleteCommand := fmt.Sprintf("sed -i '/%s$/d' /etc/hosts", v)
+		if _, err := osutil.ExecShellCommand(false, deleteCommand); err != nil {
+			logger.Error("exec delete hostname failed %s", err.Error())
+		}
+		echoCommand := fmt.Sprintf("echo \"%s %s\" >> /etc/hosts", k, v)
+		_, err = osutil.ExecShellCommand(false, echoCommand)
+		if err != nil {
+			logger.Error("exec update host failed %s", err.Error())
+		}
+	}
+	updateCommand := "nscd -i hosts"
+	_, err = osutil.ExecShellCommand(false, updateCommand)
+	if err != nil {
+		logger.Error("nscd host failed %s", err.Error())
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/disk.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/disk.go
new file mode 100644
index 0000000000..0d30f262d5
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/disk.go
@@ -0,0 +1,37 @@
+package util
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"strings"
+)
+
+// GetMaxSize TODO
+func GetMaxSize() int64 {
+	mountPaths := osutil.GetMountPathInfo()
+	var maxSize int64 = 0
+	for k, v := range mountPaths {
+		// 仅判断挂盘/data目录
+		if strings.HasPrefix(k, "/data") && v.TotalSizeMB > maxSize {
+			maxSize = v.TotalSizeMB
+		}
+	}
+	return maxSize
+}
+
+// GetHdfsDataMountDir TODO
+func GetHdfsDataMountDir() []string {
+	var dirs []string
+	dirMaxSize := GetMaxSize()
+	mountPaths := osutil.GetMountPathInfo()
+	for k, v := range mountPaths {
+		// 仅判断挂盘/data目录
+		if strings.HasPrefix(k, "/data") && v.TotalSizeMB == dirMaxSize {
+			dirs = append(dirs, k)
+		}
+	}
+	if dirs == nil {
+		// default hdfs data dir
+		dirs = append(dirs, "/data")
+	}
+	return dirs
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/http.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/http.go
new file mode 100644
index 0000000000..8c7352a4cf
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/http.go
@@ -0,0 +1,27 @@
+package util
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"io/ioutil"
+	"net/http"
+)
+
+// HttpGet TODO
+func HttpGet(url string) ([]byte, error) {
+	var responseBody []byte
+	request, _ := http.NewRequest("GET", url, nil)
+	response, err := http.DefaultClient.Do(request)
+	if err != nil {
+		logger.Error("http get request failed %s", err.Error())
+		return responseBody, err
+	}
+	defer response.Body.Close()
+	if response.StatusCode == 200 {
+		logger.Debug("http get response code is 200")
+	} else {
+		logger.Error("http get failed, status code is %d", response.StatusCode)
+	}
+
+	return ioutil.ReadAll(response.Body)
+
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/util.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/util.go
new file mode 100644
index 0000000000..80d62b1ad1
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/util.go
@@ -0,0 +1,2 @@
+// Package util TODO
+package util
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/xml_util.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/xml_util.go
new file mode 100644
index 0000000000..dce398ca81
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/hdfs/util/xml_util.go
@@ -0,0 +1,37 @@
+package util
+
+import (
+	"encoding/xml"
+)
+
+// ConfigMap TODO
+type ConfigMap map[string]string
+
+// Configuration TODO
+type Configuration struct {
+	XMLName  xml.Name      `xml:"configuration"`
+	Property []PropertyXml `xml:"property"`
+}
+
+// PropertyXml TODO
+type PropertyXml struct {
+	Name  string `xml:"name"`
+	Value string `xml:"value"`
+}
+
+// TransMap2Xml TODO
+func TransMap2Xml(configMap ConfigMap) ([]byte, error) {
+
+	var properties []PropertyXml
+	for k, v := range configMap {
+		properties = append(properties, PropertyXml{
+			Name:  k,
+			Value: v,
+		})
+	}
+	byteData, err := xml.MarshalIndent(Configuration{
+		Property: properties,
+	}, "  ", "  ")
+
+	return append([]byte(xml.Header), byteData...), err
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/clean_data.go
new file mode 100644
index 0000000000..0114c5497d
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/clean_data.go
@@ -0,0 +1,85 @@
+package influxdb
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+)
+
+// CleanDataComp TODO
+type CleanDataComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *CleanDataParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CleanDataParams TODO
+type CleanDataParams struct{}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) Init() (err error) {
+	logger.Info("Clean data fake init")
+	return nil
+}
+
+// CleanData TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) CleanData() (err error) {
+	// 清除crontab
+	logger.Info("获取crontab")
+	out, err := osutil.ListCrontb(cst.DefaultInfluxdbExecUser)
+	if err != nil {
+		logger.Error("获取crontab失败", err)
+		return err
+	}
+	logger.Debug("crontab: ", out)
+	if len(out) > 0 {
+		extraCmd := "crontab -u influxdb -r"
+		logger.Info("清除crontab, [%s]", extraCmd)
+		if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("[%s] execute failed, %v", extraCmd, err)
+			return err
+		}
+	}
+
+	// 强杀进程
+	extraCmd := `ps -ef | egrep 'supervisord|telegraf'|grep -v grep |awk {'print "kill -9 " $2'}|sh`
+	logger.Info("强杀进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `rm -f /etc/supervisord.conf /usr/local/bin/supervisorctl /usr/local/bin/supervisord /usr/bin/java`
+	logger.Info("删除软链, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除influxdbenv
+	extraCmd = `rm -rf /data/influxdbenv*`
+	logger.Info("influxdbenv, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除数据目录
+	extraCmd = `df |grep data|grep -vw '/data'|awk '{print $NF}'|while read line;do rm  -rf $line/influxdbdata*;done`
+	logger.Info("influxdbdata, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/influxdb.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/influxdb.go
new file mode 100644
index 0000000000..70d9f719a2
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/influxdb.go
@@ -0,0 +1,2 @@
+// Package influxdb TODO
+package influxdb
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/install_influxdb.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/install_influxdb.go
new file mode 100644
index 0000000000..17a47450f8
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/install_influxdb.go
@@ -0,0 +1,548 @@
+package influxdb
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/user"
+)
+
+// InstallInfluxdbComp TODO
+type InstallInfluxdbComp struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallInfluxdbParams
+	KafkaConfig
+	RollBackContext rollback.RollBackObjects
+}
+
+// InstallInfluxdbParams TODO
+type InstallInfluxdbParams struct {
+	Version   string `json:"version" ` // 版本号eg: 7.10.2
+	Port      int    `json:"port" `    // 连接端口
+	Host      string `json:"host" `
+	GroupName string `json:"group_name" ` // 组名
+	GroupId   int    `json:"group_id" `   // 连接端口
+	Username  string `json:"username" `
+	Password  string `json:"password" `
+}
+
+// InitDirs TODO
+type InitDirs = []string
+
+// Port TODO
+type Port = int
+type socket = string
+
+// KafkaConfig 目录定义等
+type KafkaConfig struct {
+	InstallDir     string `json:"install_dir"`     // /data
+	InfluxdbEnvDir string `json:"influxdbenv_dir"` //  /data/influxdbenv
+}
+
+// RenderConfig 需要替换的配置值 Todo
+type RenderConfig struct {
+	ClusterName          string
+	NodeName             string
+	HttpPort             int
+	CharacterSetServer   string
+	InnodbBufferPoolSize string
+	Logdir               string
+	ServerId             uint64
+}
+
+// InitDefaultParam TODO
+func (i *InstallInfluxdbComp) InitDefaultParam() (err error) {
+	logger.Info("start InitDefaultParam")
+	// var mountpoint string
+	i.InstallDir = cst.DefaultPkgDir
+	i.InfluxdbEnvDir = cst.DefaultInfluxdbEnv
+
+	return nil
+}
+
+// InitInfluxdbNode TODO
+/*
+创建实例相关的数据,日志目录以及修改权限
+*/
+func (i *InstallInfluxdbComp) InitInfluxdbNode() (err error) {
+
+	execUser := cst.DefaultInfluxdbExecUser
+	logger.Info("检查用户[%s]是否存在", execUser)
+	if _, err := user.Lookup(execUser); err != nil {
+		extraCmd := `groupadd influxdb && useradd influxdb -g influxdb -s /bin/bash -d /home/influxdb -m`
+		if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("创建系统用户[%s]失败,%v", "influxdb", err.Error())
+			return err
+		}
+		logger.Info("用户[%s]创建成功", execUser)
+	} else {
+		logger.Info("用户[%s]存在, 跳过创建", execUser)
+	}
+
+	// mkdir
+	extraCmd := fmt.Sprintf("rm -rf %s", i.InfluxdbEnvDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+	extraCmd = fmt.Sprintf("mkdir -p %s ; chown -R influxdb:influxdb %s", i.InfluxdbEnvDir, "/data/influxdb*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	scripts := []byte(`
+echo "
+* - nofile 200000
+* soft nofile 200000
+* hard nofile 200000
+" >> /etc/security/limits.conf
+echo "
+vm.overcommit_memory=1
+vm.swappiness=1
+net.ipv4.ip_local_port_range=25000 50000
+net.ipv4.tcp_tw_reuse=1
+net.ipv4.tcp_tw_recycle=1
+" >> /etc/sysctl.conf`)
+
+	scriptFile := "/data/influxdbenv/init.sh"
+	if err = ioutil.WriteFile(scriptFile, scripts, 0644); err != nil {
+		logger.Error("write %s failed, %v", scriptFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("bash %s", scriptFile)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改系统参数失败:%s", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// DecompressInfluxdbPkg TODO
+/**
+ * @description:  校验、解压kafka安装包
+ * @return {*}
+ */
+func (i *InstallInfluxdbComp) DecompressInfluxdbPkg() (err error) {
+
+	pkgAbPath := "influxdbpack-" + i.Params.Version + ".tar.gz"
+	extraCmd := fmt.Sprintf("cp %s %s", i.InstallDir+"/"+pkgAbPath, i.InfluxdbEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	if err = os.Chdir(i.InfluxdbEnvDir); err != nil {
+		return fmt.Errorf("cd to dir %s failed, err:%w", i.InfluxdbEnvDir, err)
+	}
+	if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("tar zxf %s", pkgAbPath)); err != nil {
+		logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error())
+		return err
+	}
+
+	logger.Info("influxdb binary directory: %s", i.InfluxdbEnvDir)
+	if _, err := os.Stat(i.InfluxdbEnvDir); err != nil {
+		logger.Error("%s check failed, %v", i.InfluxdbEnvDir, err)
+		return err
+	}
+	logger.Info("decompress influxdb pkg successfully")
+	return nil
+}
+
+// InstallSupervisor TODO
+/**
+ * @description:  安装supervisor
+ * @return {*}
+ */
+func (i *InstallInfluxdbComp) InstallSupervisor() (err error) {
+	// Todo: check supervisor exist
+	// supervisor
+
+	if !util.FileExists(cst.DefaultInfluxdbSupervisorConf) {
+		logger.Error("supervisor not exist, %v", err)
+		return err
+
+	}
+
+	extraCmd := fmt.Sprintf("rm -rf %s", i.InfluxdbEnvDir+"/"+"python")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InfluxdbEnvDir+"/"+"pypy-5.9.0", i.InfluxdbEnvDir+"/"+"python")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf %s", "/etc/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InfluxdbEnvDir+"/"+"supervisor/conf/supervisord.conf",
+		"/etc/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf %s", "/usr/local/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InfluxdbEnvDir+"/"+"supervisor/bin/supervisorctl",
+		"/usr/local/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf %s", "/usr/local/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.InfluxdbEnvDir+"/"+"python/bin/supervisord", "/usr/local/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/influxdbenv/g' %s", i.InfluxdbEnvDir+"/supervisor/check_supervisord.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/influxdbenv/g' %s", i.InfluxdbEnvDir+"/supervisor/conf/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/influxdbenv/g' %s", i.InfluxdbEnvDir+"/supervisor/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/influxdbenv/g' %s", i.InfluxdbEnvDir+"/pypy-5.9.0/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/influxdbenv/g' %s", i.InfluxdbEnvDir+"/python/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm %s ", i.InfluxdbEnvDir+"/supervisor/conf/elasticsearch.ini")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R influxdb:influxdb %s ", i.InfluxdbEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = "ps -ef | grep supervisord | grep -v grep | awk {'print \"kill -9 \" $2'} | sh"
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// crontab
+	extraCmd = `crontab  -l -u influxdb >/home/influxdb/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+	}
+
+	extraCmd = `cp /home/influxdb/crontab.bak /home/influxdb/crontab.tmp`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `sed -i '/check_supervisord.sh/d' /home/influxdb/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd =
+		`echo '*/1 * * * *  /data/influxdbenv/supervisor/check_supervisord.sh >> /data/influxdbenv/supervisor/check_supervisord.err 2>&1' >>/home/influxdb/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `crontab -u influxdb /home/influxdb/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	startCmd := `su - influxdb -c "/usr/local/bin/supervisord -c /data/influxdbenv/supervisor/conf/supervisord.conf"`
+
+	logger.Info(fmt.Sprintf("execute supervisor [%s] begin", startCmd))
+	pid, err := osutil.RunInBG(false, startCmd)
+	logger.Info(fmt.Sprintf("execute supervisor [%s] end, pid: %d", startCmd, pid))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// InstallInfluxdb TODO
+/**
+ * @description: 安装broker
+ * @return {*}
+ */
+func (i *InstallInfluxdbComp) InstallInfluxdb() (err error) {
+	var (
+		version         string = i.Params.Version
+		port            int    = i.Params.Port
+		influxdbBaseDir string = fmt.Sprintf("%s/influxdb-%s-1", cst.DefaultInfluxdbEnv, version)
+	)
+
+	influxdbLink := fmt.Sprintf("%s/influxdb", cst.DefaultInfluxdbEnv)
+	extraCmd := fmt.Sprintf("rm -rf %s", influxdbLink)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -s %s %s ", influxdbBaseDir, influxdbLink)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("copy basedir failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	// mkdir
+	extraCmd = fmt.Sprintf("rm -rf %s", cst.DefaultInfluxdbLogDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("rm -rf %s", cst.DefaultInfluxdbDataDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("mkdir -p %s ; mkdir -p %s ; chown -R influxdb:influxdb %s", cst.DefaultInfluxdbDataDir,
+		cst.DefaultInfluxdbLogDir, "/data/influxdb*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("开始渲染influxdb.conf")
+	extraCmd = fmt.Sprintf(`echo 'reporting-disabled = true
+[meta]
+  dir = "%s/meta"
+
+[data]
+  dir = "%s/data"
+  wal-dir = "%s/wal"
+  index-version = "tsi1"
+  cache-snapshot-memory-size = "25m"
+  max-series-per-database = 0
+  max-values-per-tag = 1000000
+  query-log-enabled = true
+  cache-max-memory-size = "8g"
+  flux-enabled = true
+
+[coordinator]
+  query-timeout = "60s"
+  log-queries-after = "10s"
+
+[http]
+  bind-address = ":%d"
+  auth-enabled = true
+  max-row-limit = 50000
+  log-enabled = false
+  write-tracing = false
+  access-log-path = "%s/var/log/access.log"
+
+[ifql]
+
+[continuous_queries]
+
+[logging]' > %s`, cst.DefaultInfluxdbDataDir, cst.DefaultInfluxdbDataDir, cst.DefaultInfluxdbDataDir, port, influxdbLink, influxdbLink+"/etc/influxdb/influxdb.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("生成influxdb.ini文件")
+	influxdbini := esutil.GenInfluxdbini()
+	influxdbiniFile := fmt.Sprintf("%s/influxdb.ini", cst.DefaultInfluxdbSupervisorConf)
+	if err = ioutil.WriteFile(influxdbiniFile, influxdbini, 0); err != nil {
+		logger.Error("write %s failed, %v", influxdbiniFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("chmod 777 %s/influxdb.ini ", cst.DefaultInfluxdbSupervisorConf)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R influxdb:influxdb %s ", i.InfluxdbEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	if err = esutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisort update failed %v", err)
+	}
+
+	extraCmd = fmt.Sprintf("sleep 60")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
+
+// InitUser TODO
+func (i *InstallInfluxdbComp) InitUser() (err error) {
+
+	var (
+		username        string = i.Params.Username
+		password        string = i.Params.Password
+		port            int    = i.Params.Port
+		influxdbBaseDir string = fmt.Sprintf("%s/influxdb", cst.DefaultInfluxdbEnv)
+	)
+	extraCmd := fmt.Sprintf(
+		`%s/usr/bin/influx -host localhost -port %d -execute "create user '%s' with password '%s' with all privileges"`,
+		influxdbBaseDir, port, username, password)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("copy basedir failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// InstallTelegraf TODO
+func (i *InstallInfluxdbComp) InstallTelegraf() (err error) {
+
+	var (
+		host      string = i.Params.Host
+		port      int    = i.Params.Port
+		groupName string = i.Params.GroupName
+		groupId   int    = i.Params.GroupId
+	)
+
+	logger.Info("开始渲染telegraf.conf")
+	extraCmd := fmt.Sprintf(`echo '[global_tags]
+    cluster = "%s"
+    dbrole = "influxdb"
+    dbgroup = "%s"
+    dbgroup_id = "%d"
+    influx_host = "%s"
+    influx_port = "%d"
+
+# Configuration for telegraf agent
+[agent]
+    interval = "30s"
+    debug = true
+    hostname = "%s"
+    round_interval = true
+    flush_interval = "10s"
+    flush_jitter = "0s"
+    collection_jitter = "0s"
+    metric_batch_size = 1000
+    metric_buffer_limit = 500000
+    quiet = false
+    logfile = ""
+    omit_hostname = false
+
+###############################################################################
+#                                  OUTPUTS                                    #
+###############################################################################
+
+[[outputs.prometheus_client]]
+    listen = ":9274"
+    path = "/metrics"
+    expiration_interval = "340s"
+    collectors_exclude = ["gocollector", "process"]
+
+###############################################################################
+#                                  INPUTS                                     #
+###############################################################################
+### os
+[[inputs.mem]]
+[[inputs.cpu]]
+    percpu = false
+    totalcpu = true
+[[inputs.diskio]]
+[[inputs.disk]]
+
+#### influxdb
+[[inputs.influxdb]]
+  urls = [
+    "http://localhost:%d/debug/vars"
+  ]
+  timeout = "30s"
+  interval = "1m"
+  namedrop = ["influxdb_memstats*", "influxdb_runtime*"]
+
+#### procstat
+[[inputs.procstat]]
+  exe = "influxd"
+  pid_finder = "pgrep"
+  pid_tag = true
+  process_name = "influxd"
+  interval = "30s"
+
+#### http_response
+[[inputs.http_response]]
+   address = "http://%s:%d/ping"
+   response_timeout = "2s"
+   method = "GET"' > %s`, groupName, groupName, groupId, host, port, host, port, host, port, cst.DefaultInfluxdbEnv+"/telegraf/etc/telegraf/telegraf.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("生成telegraf.ini文件")
+	influxdbini := esutil.GenTelegrafini()
+	influxdbiniFile := fmt.Sprintf("%s/telegraf.ini", cst.DefaultInfluxdbSupervisorConf)
+	if err = ioutil.WriteFile(influxdbiniFile, influxdbini, 0); err != nil {
+		logger.Error("write %s failed, %v", influxdbiniFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("chmod 777 %s/telegraf.ini ", cst.DefaultInfluxdbSupervisorConf)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R influxdb:influxdb %s ", i.InfluxdbEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	if err = esutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisort update failed %v", err)
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/startstop_process.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/startstop_process.go
new file mode 100644
index 0000000000..c0df6d1dcd
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/influxdb/startstop_process.go
@@ -0,0 +1,89 @@
+package influxdb
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+)
+
+// StartStopProcessComp TODO
+type StartStopProcessComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *ProcessParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ProcessParams TODO
+type ProcessParams struct {
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) Init() (err error) {
+	logger.Info("start stop cluster init")
+	return nil
+}
+
+// StopProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StopProcess() (err error) {
+
+	// 停止进程
+	extraCmd := "supervisorctl stop all"
+	logger.Info("停止所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
+
+// StartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StartProcess() (err error) {
+
+	// 启动进程
+	extraCmd := "supervisorctl start all"
+	logger.Info("启动所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// RestartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) RestartProcess() (err error) {
+
+	// 停止进程
+	extraCmd := "supervisorctl stop all"
+	logger.Info("停止所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 启动进程
+	extraCmd = "supervisorctl start all"
+	logger.Info("启动所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/clean_data.go
new file mode 100644
index 0000000000..4b0da243c2
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/clean_data.go
@@ -0,0 +1,101 @@
+package kafka
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+)
+
+// CleanDataComp TODO
+type CleanDataComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *CleanDataParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CleanDataParams TODO
+type CleanDataParams struct{}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) Init() (err error) {
+	logger.Info("Clean data fake init")
+	return nil
+}
+
+// CleanData TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) CleanData() (err error) {
+	// 清除crontab
+	logger.Info("获取crontab")
+	out, err := osutil.ListCrontb(cst.DefaultExecUser)
+	if err != nil {
+		logger.Error("获取crontab失败", err)
+		return err
+	}
+	logger.Debug("crontab: ", out)
+	if len(out) > 0 {
+		extraCmd := "crontab -u mysql -r"
+		logger.Info("清除crontab, [%s]", extraCmd)
+		if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("[%s] execute failed, %v", extraCmd, err)
+			return err
+		}
+	}
+
+	// 强杀进程
+	extraCmd := `ps -ef | egrep 'supervisord|burrow|telegraf|java'|grep -v grep |awk {'print "kill -9 " $2'}|sh`
+	logger.Info("强杀进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `rm -f /etc/supervisord.conf /usr/local/bin/supervisorctl /usr/local/bin/supervisord /usr/bin/java`
+	logger.Info("删除软链, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除kafkaenv
+	extraCmd = `rm -rf /data/kafkaenv*`
+	logger.Info("删除kafkaenv, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除数据目录
+	extraCmd = `rm -rf /data*/kafkadata*`
+	logger.Info("删除kafkadata, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除日志目录
+	extraCmd = `rm -rf /data*/kafkalog*`
+	logger.Info("删除kafkalog, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除日志目录
+	extraCmd = `rm -rf /data*/zklog*`
+	logger.Info("删除kafkalog, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/decom_broker.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/decom_broker.go
new file mode 100644
index 0000000000..a7bee3cd8b
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/decom_broker.go
@@ -0,0 +1,230 @@
+package kafka
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/kafkautil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+	"strings"
+	"time"
+)
+
+// DecomBrokerComp TODO
+type DecomBrokerComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *DecomBrokerParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// DecomBrokerParams TODO
+type DecomBrokerParams struct {
+	ZookeeperIp    string   `json:"zookeeper_ip" validate:"required"`    // 连接zk
+	Username       string   `json:"username"`                            // 管理用户
+	Password       string   `json:"password"`                            // 管理密码
+	ExcludeBrokers []string `json:"exclude_brokers" validate:"required"` // 要缩容的broker
+	NewBrokers     []string `json:"new_brokers" `                        // 要扩容的broker
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *DecomBrokerComp) Init() (err error) {
+	logger.Info("Clean data fake init")
+	return nil
+}
+
+// DoReplaceBrokers TODO
+func (d *DecomBrokerComp) DoReplaceBrokers() (err error) {
+
+	const SleepInterval = 300 * time.Second
+
+	zkHost := d.Params.ZookeeperIp + ":2181"
+	oldBrokers := d.Params.ExcludeBrokers
+	newBrokers := d.Params.NewBrokers
+
+	var newBrokerIds []string
+	for _, broker := range newBrokers {
+		id, err := kafkautil.GetBrokerIdByHost(broker, zkHost)
+		if err != nil {
+			logger.Error("cant get %s broker id, %v", broker, err)
+			return err
+		}
+		newBrokerIds = append(newBrokerIds, id)
+	}
+	logger.Info("newBrokerIds: %v", newBrokerIds)
+
+	for i, broker := range oldBrokers {
+		oldBrokerId, err := kafkautil.GetBrokerIdByHost(broker, zkHost)
+		logger.Info("oldBrokerId: [%s]", oldBrokerId)
+		if err != nil {
+			logger.Error("cant get %s broker id, %v", broker, err)
+			return err
+		}
+		topicJson, err := kafkautil.GenReplaceReassignmentJson(oldBrokerId, newBrokerIds[i], zkHost)
+		if err != nil {
+			logger.Error("GenReassignmentJson failed", err)
+			return err
+		}
+		logger.Info("topicJson, %s", topicJson)
+		// /data/kafkaenv/host.json
+		jsonFile := fmt.Sprintf("%s/%s.json", cst.DefaultKafkaEnv, broker)
+		logger.Info("jsonfile: %s", jsonFile)
+		if err = ioutil.WriteFile(jsonFile, []byte(topicJson), 0644); err != nil {
+			logger.Error("write %s failed, %v", jsonFile, err)
+			return err
+		}
+		if !strings.Contains(topicJson, "topic") {
+			logger.Info("无需搬迁数据")
+			continue
+		}
+		// do
+		if err = kafkautil.DoReassignPartitions(zkHost, jsonFile); err != nil {
+			logger.Error("DoReassignPartitions failed, %v", err)
+			return err
+		}
+		for {
+
+			out, err := kafkautil.CheckReassignPartitions(zkHost, jsonFile)
+			if err != nil {
+				logger.Error("CheckReassignPartitions failed %v", err)
+				return err
+			}
+
+			if len(out) == 0 {
+				logger.Info("数据搬迁完毕")
+				break
+			}
+
+			time.Sleep(SleepInterval)
+		}
+		logger.Info("broker [%s] 搬迁 finished", broker)
+
+	}
+
+	return nil
+}
+
+// DoDecomBrokers TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *DecomBrokerComp) DoDecomBrokers() (err error) {
+
+	const SleepInterval = 300 * time.Second
+
+	zkHost := d.Params.ZookeeperIp + ":2181"
+	brokers := d.Params.ExcludeBrokers
+
+	/*
+		allIds, err := kafkautil.GetBrokerIds(zkHost)
+		if err != nil {
+			logger.Error("can't get broker ids", err)
+			return err
+		}
+	*/
+	var excludeIds []string
+	for _, broker := range brokers {
+
+		id, err := kafkautil.GetBrokerIdByHost(broker, zkHost)
+		if err != nil {
+			logger.Error("cant get %s broker id, %v", broker, err)
+			return err
+		}
+		excludeIds = append(excludeIds, id)
+	}
+	logger.Info("excludeIds: %v", excludeIds)
+
+	for _, broker := range brokers {
+		brokerId, err := kafkautil.GetBrokerIdByHost(broker, zkHost)
+		logger.Info("brokerId: [%s]", brokerId)
+		if err != nil {
+			logger.Error("cant get %s broker id, %v", broker, err)
+			return err
+		}
+		topicJson, err := kafkautil.GenReassignmentJson(brokerId, zkHost, excludeIds)
+		if err != nil {
+			logger.Error("GenReassignmentJson failed", err)
+			return err
+		}
+		logger.Info("topicJson, %s", topicJson)
+		// /data/kafkaenv/host.json
+		jsonFile := fmt.Sprintf("%s/%s.json", cst.DefaultKafkaEnv, broker)
+		logger.Info("jsonfile: %s", jsonFile)
+		if err = ioutil.WriteFile(jsonFile, []byte(topicJson), 0644); err != nil {
+			logger.Error("write %s failed, %v", jsonFile, err)
+			return err
+		}
+		if !strings.Contains(topicJson, "topic") {
+			logger.Info("无需搬迁数据")
+			continue
+		}
+		// do
+		if err = kafkautil.DoReassignPartitions(zkHost, jsonFile); err != nil {
+			logger.Error("DoReassignPartitions failed, %v", err)
+			return err
+		}
+		for {
+
+			out, err := kafkautil.CheckReassignPartitions(zkHost, jsonFile)
+			if err != nil {
+				logger.Error("CheckReassignPartitions failed %v", err)
+				return err
+			}
+
+			if len(out) == 0 {
+				logger.Info("数据搬迁完毕")
+				break
+			}
+
+			time.Sleep(SleepInterval)
+		}
+		logger.Info("broker [%s] 搬迁 finished", broker)
+
+	}
+
+	return nil
+}
+
+// DoPartitionCheck TODO
+func (d *DecomBrokerComp) DoPartitionCheck() (err error) {
+	const MaxRetry = 5
+	count := 0
+	zkHost := d.Params.ZookeeperIp + ":2181"
+	brokers := d.Params.ExcludeBrokers
+	for {
+		count++
+		logger.Info("检查搬迁状态,次数[%d]", count)
+		sum := 0
+		for _, broker := range brokers {
+			jsonFile := fmt.Sprintf("%s/%s.json", cst.DefaultKafkaEnv, broker)
+
+			out, err := kafkautil.CheckReassignPartitions(zkHost, jsonFile)
+			if err != nil {
+				logger.Error("检查partition搬迁进度失败 %v", err)
+				return err
+			}
+			sum += len(out)
+		}
+
+		if sum == 0 {
+			logger.Info("数据搬迁完成")
+			break
+		}
+
+		if count == MaxRetry {
+			logger.Error("检查数据搬迁超时,可以选择重试")
+			return fmt.Errorf("检查扩容状态超时,可以选择重试")
+		}
+		time.Sleep(60 * time.Second)
+	}
+
+	logger.Info("数据变迁完毕")
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/install_kafka.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/install_kafka.go
new file mode 100644
index 0000000000..4c8aee8377
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/install_kafka.go
@@ -0,0 +1,923 @@
+package kafka
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"os/user"
+	"runtime"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// InstallKafkaComp TODO
+type InstallKafkaComp struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallKafkaParams
+	KafkaConfig
+	RollBackContext rollback.RollBackObjects
+}
+
+// InstallKafkaParams TODO
+type InstallKafkaParams struct {
+	KafkaConfigs  map[string]string `json:"kafka_configs" `  // elasticsearch.yml
+	Version       string            `json:"version" `        // 版本号eg: 7.10.2
+	Port          int               `json:"port" `           // 连接端口
+	JmxPort       int               `json:"jmx_port" `       // 连接端口
+	Retention     int               `json:"retention" `      // 保存时间
+	Replication   int               `json:"replication" `    // 默认副本数
+	Partition     int               `json:"partition" `      // 默认分区数
+	Factor        int               `json:"factor" `         // __consumer_offsets副本数
+	ZookeeperIp   string            `json:"zookeeper_ip" `   // zookeeper ip, eg: ip1,ip2,ip3
+	ZookeeperConf string            `json:"zookeeper_conf" ` // zookeeper ip, eg: ip1,ip2,ip3
+	MyId          int               `json:"my_id" `          // 默认副本数
+	JvmMem        string            `json:"jvm_mem"`         //  eg: 10g
+	Host          string            `json:"host" `
+	ClusterName   string            `json:"cluster_name" ` // 集群名
+	Username      string            `json:"username" `
+	Password      string            `json:"password" `
+	BkBizId       int               `json:"bk_biz_id"`
+	DbType        string            `json:"db_type"`
+	ServiceType   string            `json:"service_type"`
+}
+
+// InitDirs TODO
+type InitDirs = []string
+
+// Port TODO
+type Port = int
+type socket = string
+
+// KafkaConfig 目录定义等
+type KafkaConfig struct {
+	InstallDir   string `json:"install_dir"`  // /data
+	KafkaEnvDir  string `json:"kafkaenv_dir"` //  /data/kafkaenv
+	KafkaDir     string
+	ZookeeperDir string
+}
+
+// RenderConfig 需要替换的配置值 Todo
+type RenderConfig struct {
+	ClusterName          string
+	NodeName             string
+	HttpPort             int
+	CharacterSetServer   string
+	InnodbBufferPoolSize string
+	Logdir               string
+	ServerId             uint64
+}
+
+// InitDefaultParam TODO
+func (i *InstallKafkaComp) InitDefaultParam() (err error) {
+	logger.Info("start InitDefaultParam")
+	// var mountpoint string
+	i.InstallDir = cst.DefaultPkgDir
+	i.KafkaEnvDir = cst.DefaultKafkaEnv
+	i.KafkaDir = cst.DefaultKafkaDir
+	i.ZookeeperDir = cst.DefaultZookeeperDir
+
+	return nil
+}
+
+// InitKafkaNode TODO
+/*
+创建实例相关的数据,日志目录以及修改权限
+*/
+func (i *InstallKafkaComp) InitKafkaNode() (err error) {
+
+	execUser := cst.DefaultExecUser
+	logger.Info("检查用户[%s]是否存在", execUser)
+	if _, err := user.Lookup(execUser); err != nil {
+		logger.Info("用户[%s]不存在,开始创建", execUser)
+		if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("useradd %s -g root -s /bin/bash -d /home/mysql",
+			execUser)); err != nil {
+			logger.Error("创建系统用户[%s]失败,%s, %v", execUser, output, err.Error())
+			return err
+		}
+		logger.Info("用户[%s]创建成功", execUser)
+	} else {
+		logger.Info("用户[%s]存在, 跳过创建", execUser)
+	}
+
+	// mkdir
+	extraCmd := fmt.Sprintf("rm -rf %s", i.KafkaEnvDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+	extraCmd = fmt.Sprintf("mkdir -p %s ; chown -R mysql %s", i.KafkaEnvDir, "/data/kafka*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("写入/etc/profile")
+	scripts := []byte(`sed -i '/500000/d' /etc/profile
+sed -i '/JAVA_HOME/d' /etc/profile
+sed -i '/LC_ALL/d' /etc/profile
+sed -i '/mysql/d' /etc/profile
+sed -i '/USERNAME/d' /etc/profile
+sed -i '/PASSWORD/d' /etc/profile
+echo 'ulimit -n 500000
+export JAVA_HOME=/data/kafkaenv/jdk
+export JRE=$JAVA_HOME/jre
+export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH
+export CLASSPATH=".:$JAVA_HOME/lib:$JRE/lib:$CLASSPATH"
+export LC_ALL=en_US
+export PATH=/usr/local/mysql/bin/:$PATH'>> /etc/profile
+
+source /etc/profile`)
+
+	scriptFile := "/data/kafkaenv/init.sh"
+	if err = ioutil.WriteFile(scriptFile, scripts, 0644); err != nil {
+		logger.Error("write %s failed, %v", scriptFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("bash %s", scriptFile)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改系统参数失败:%s", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// DecompressKafkaPkg TODO
+/**
+ * @description:  校验、解压kafka安装包
+ * @return {*}
+ */
+func (i *InstallKafkaComp) DecompressKafkaPkg() (err error) {
+
+	pkgAbPath := "kafkapack-" + i.Params.Version + ".tar.gz"
+	extraCmd := fmt.Sprintf("cp %s %s", i.InstallDir+"/"+pkgAbPath, i.KafkaEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	if err = os.Chdir(i.KafkaEnvDir); err != nil {
+		return fmt.Errorf("cd to dir %s failed, err:%w", i.KafkaEnvDir, err)
+	}
+	if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("tar zxf %s", pkgAbPath)); err != nil {
+		logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error())
+		return err
+	}
+
+	logger.Info("kafka binary directory: %s", i.KafkaEnvDir)
+	if _, err := os.Stat(i.KafkaEnvDir); err != nil {
+		logger.Error("%s check failed, %v", i.KafkaEnvDir, err)
+		return err
+	}
+	logger.Info("decompress kafka pkg successfully")
+	return nil
+}
+
+// InstallSupervisor TODO
+/**
+ * @description:  安装supervisor
+ * @return {*}
+ */
+func (i *InstallKafkaComp) InstallSupervisor() (err error) {
+	// Todo: check supervisor exist
+	// supervisor
+
+	if !util.FileExists(cst.DefaultKafkaSupervisorConf) {
+		logger.Error("supervisor not exist, %v", err)
+		return err
+
+	}
+
+	extraCmd := fmt.Sprintf("rm -rf %s", i.KafkaEnvDir+"/"+"python")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.KafkaEnvDir+"/"+"pypy-5.9.0", i.KafkaEnvDir+"/"+"python")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf %s", "/etc/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.KafkaEnvDir+"/"+"supervisor/conf/supervisord.conf", "/etc/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf %s", "/usr/local/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.KafkaEnvDir+"/"+"supervisor/bin/supervisorctl",
+		"/usr/local/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf %s", "/usr/local/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.KafkaEnvDir+"/"+"python/bin/supervisord", "/usr/local/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/kafkaenv/g' %s", i.KafkaEnvDir+"/supervisor/check_supervisord.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/kafkaenv/g' %s", i.KafkaEnvDir+"/supervisor/conf/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/kafkaenv/g' %s", i.KafkaEnvDir+"/supervisor/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/kafkaenv/g' %s", i.KafkaEnvDir+"/pypy-5.9.0/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/kafkaenv/g' %s", i.KafkaEnvDir+"/python/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm %s ", i.KafkaEnvDir+"/supervisor/conf/elasticsearch.ini")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s ", i.KafkaEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = "ps -ef | grep supervisord | grep -v grep | awk {'print \"kill -9 \" $2'} | sh"
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// crontab
+	extraCmd = `crontab  -l -u mysql >/home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+	}
+
+	extraCmd = `cp /home/mysql/crontab.bak /home/mysql/crontab.tmp`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `sed -i '/check_supervisord.sh/d' /home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd =
+		`echo '*/1 * * * *  /data/kafkaenv/supervisor/check_supervisord.sh >> /data/kafkaenv/supervisor/check_supervisord.err 2>&1' >>/home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `crontab -u mysql /home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	startCmd := `su - mysql -c "/usr/local/bin/supervisord -c /data/kafkaenv/supervisor/conf/supervisord.conf"`
+	logger.Info(fmt.Sprintf("execute supervisor [%s] begin", startCmd))
+	pid, err := osutil.RunInBG(false, startCmd)
+	logger.Info(fmt.Sprintf("execute supervisor [%s] end, pid: %d", startCmd, pid))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// InstallZookeeper TODO
+/**
+ * @description: 安装zookeeper
+ * @return {*}
+ */
+func (i *InstallKafkaComp) InstallZookeeper() (err error) {
+
+	var (
+		nodeIp           string = i.Params.Host
+		myId             int    = i.Params.MyId
+		zookeeperConf    string = i.Params.ZookeeperConf
+		username         string = i.Params.Username
+		password         string = i.Params.Password
+		ZookeeperBaseDir string = fmt.Sprintf("%s/zookeeper-%s", cst.DefaultKafkaEnv, cst.DefaultZookeeperVersion)
+	)
+
+	if _, err := net.Dial("tcp", fmt.Sprintf("%s:%d", nodeIp, 2181)); err == nil {
+		logger.Error("zookeeper process exist")
+		return errors.New("zookeeper process exist")
+	}
+
+	zookeeperLink := fmt.Sprintf("%s/zk", cst.DefaultKafkaEnv)
+	extraCmd := fmt.Sprintf("rm -rf %s", zookeeperLink)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -s %s %s ", ZookeeperBaseDir, zookeeperLink)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("link zookeeperLink failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	extraCmd = fmt.Sprintf(`echo 'export USERNAME=%s
+export PASSWORD=%s'>> /etc/profile`, username, password)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// mkdir
+	extraCmd = fmt.Sprintf("rm -rf %s", cst.DefaultZookeeperLogDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("mkdir -p %s ; mkdir -p %s ; mkdir -p %s ; mkdir -p %s ; chown -R mysql %s",
+		cst.DefaultZookeeperLogsDir, cst.DefaultZookeeperDataDir, cst.DefaultZookeeperConfDir, cst.DefaultZookeeperLogDir,
+		"/data/kafka*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s", cst.DefaultZookeeperLogDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("zoo.cfg")
+	extraCmd = fmt.Sprintf(`echo "tickTime=2000
+initLimit=10
+syncLimit=5
+dataDir=%s
+dataLogDir=%s
+autopurge.snapRetainCount=3
+autopurge.purgeInterval=1
+reconfigEnabled=true
+skipACL=yes
+dynamicConfigFile=%s" > %s`, cst.DefaultZookeeperDataDir, cst.DefaultZookeeperLogsDir, cst.DefaultZookeeperDynamicConf, zookeeperLink+"/conf/zoo.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf(`echo "%s" > %s`, zookeeperConf, cst.DefaultZookeeperDynamicConf)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf(`echo %d > %s`, myId, cst.DefaultZookeeperDataDir+"/myid")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("配置jvm参数")
+	extraCmd = fmt.Sprintf(`echo "export JVMFLAGS=\"-Xms1G -Xmx4G \$JVMFLAGS\"" > %s`, zookeeperLink+"/conf/java.env")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("生成zookeeper.ini文件")
+	zookeeperini := esutil.GenZookeeperini()
+	zookeeperiniFile := fmt.Sprintf("%s/zookeeper.ini", cst.DefaultKafkaSupervisorConf)
+	if err = ioutil.WriteFile(zookeeperiniFile, zookeeperini, 0); err != nil {
+		logger.Error("write %s failed, %v", zookeeperiniFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("chmod 777 %s/zookeeper.ini ", cst.DefaultKafkaSupervisorConf)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s ", i.KafkaEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	if err = esutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisort update failed %v", err)
+	}
+
+	extraCmd = fmt.Sprintf("sleep 10")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	if _, err := net.Dial("tcp", fmt.Sprintf("%s:%d", nodeIp, 2181)); err != nil {
+		logger.Error("zookeeper start failed %v", err)
+		return err
+	}
+
+	return nil
+}
+
+// InitKafkaUser TODO
+func (i *InstallKafkaComp) InitKafkaUser() (err error) {
+
+	var (
+		zookeeperIp  string = i.Params.ZookeeperIp
+		version      string = i.Params.Version
+		username     string = i.Params.Username
+		password     string = i.Params.Password
+		kafkaBaseDir string = fmt.Sprintf("%s/kafka-%s", cst.DefaultKafkaEnv, version)
+	)
+	zookeeperIpList := strings.Split(zookeeperIp, ",")
+	extraCmd := fmt.Sprintf(
+		"%s/bin/kafka-configs.sh --zookeeper %s:2181,%s:2181,%s:2181/ --alter --add-config \"SCRAM-SHA-256=[iterations=8192,password=%s],SCRAM-SHA-512=[password=%s]\" --entity-type users --entity-name %s",
+		kafkaBaseDir, zookeeperIpList[0], zookeeperIpList[1], zookeeperIpList[2], password, password, username)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("copy basedir failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// InstallBroker TODO
+/**
+ * @description: 安装broker
+ * @return {*}
+ */
+func (i *InstallKafkaComp) InstallBroker() (err error) {
+	var (
+		retentionHours int               = i.Params.Retention
+		replicationNum int               = i.Params.Replication
+		partitionNum   int               = i.Params.Partition
+		factor         int               = i.Params.Factor
+		nodeIp         string            = i.Params.Host
+		port           int               = i.Params.Port
+		jmxPort        int               = i.Params.JmxPort
+		listeners      string            = fmt.Sprintf("%s:%d", nodeIp, port)
+		version        string            = i.Params.Version
+		processors     int               = runtime.NumCPU()
+		zookeeperIp    string            = i.Params.ZookeeperIp
+		kafkaConfigs   map[string]string = i.Params.KafkaConfigs
+		kafkaBaseDir   string            = fmt.Sprintf("%s/kafka-%s", cst.DefaultKafkaEnv, version)
+		username       string            = i.Params.Username
+		password       string            = i.Params.Password
+	)
+
+	// ln -s /data/kafkaenv/kafka-$version /data/kafkaenv/kafka
+	kafkaLink := fmt.Sprintf("%s/kafka", cst.DefaultKafkaEnv)
+	extraCmd := fmt.Sprintf("rm -rf %s", kafkaLink)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -s %s %s ", kafkaBaseDir, kafkaLink)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("copy basedir failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	// mkdir
+	extraCmd = fmt.Sprintf("rm -rf %s", cst.DefaultKafkaLogDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("rm -rf %s", cst.DefaultKafkaDataDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("mkdir -p %s ; mkdir -p %s ; chown -R mysql %s", cst.DefaultKafkaDataDir,
+		cst.DefaultKafkaLogDir, "/data/kafka*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("开始渲染server.properties")
+	zookeeperIpList := strings.Split(zookeeperIp, ",")
+	for k, v := range kafkaConfigs {
+		config := k + "=" + v
+		logger.Info("config=%s", config)
+	}
+	extraCmd = fmt.Sprintf(`echo "log.retention.hours=%d
+default.replication.factor=%d
+num.partitions=%d
+num.network.threads=%d
+num.recovery.threads.per.data.dir=2
+offsets.topic.replication.factor=%d
+transaction.state.log.replication.factor=3
+transaction.state.log.min.isr=3
+group.initial.rebalance.delay.ms=3000
+num.io.threads=%d
+num.replica.fetchers=%d
+unclean.leader.election.enable=true
+delete.topic.enable=true
+auto.leader.rebalance.enable=true
+auto.create.topics.enable=true
+socket.send.buffer.bytes=102400
+socket.receive.buffer.bytes=102400
+socket.request.max.bytes=104857600
+log.flush.interval.messages=10000
+log.flush.interval.ms=1000
+log.cleanup.policy=delete
+log.segment.bytes=1073741824
+log.retention.check.interval.ms=300000
+zookeeper.connection.timeout.ms=6000
+log.dirs=%s
+listeners=SASL_PLAINTEXT://%s
+advertised.listeners=SASL_PLAINTEXT://%s
+zookeeper.connect=%s:2181,%s:2181,%s:2181/
+# List of enabled mechanisms, can be more than one
+sasl.enabled.mechanisms=SCRAM-SHA-512
+
+# Specify one of of the SASL mechanisms
+sasl.mechanism.inter.broker.protocol=SCRAM-SHA-512
+security.inter.broker.protocol=SASL_PLAINTEXT" > %s`, retentionHours, replicationNum, partitionNum, processors, factor, processors, processors, cst.DefaultKafkaDataDir, listeners, listeners, zookeeperIpList[0], zookeeperIpList[1], zookeeperIpList[2], kafkaLink+"/config/server.properties")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("配置jaas")
+	extraCmd = fmt.Sprintf(`echo 'KafkaServer {
+  org.apache.kafka.common.security.scram.ScramLoginModule required
+  username="%s"
+  password="%s";
+};' > %s`, username, password, kafkaLink+"/config/kafka_server_scram_jaas.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("配置run-class.sh")
+	extraCmd = fmt.Sprintf("sed -i 's/esenv/kafkaenv/g' %s", kafkaLink+"/bin/kafka-run-class.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("配置start.sh")
+	extraCmd = fmt.Sprintf("sed -i '/export KAFKA_HEAP_OPTS=\"-Xmx1G -Xms1G\"/a\\    export JMX_PORT=\"%d\"' %s", jmxPort,
+		kafkaLink+"/bin/kafka-server-start.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	// 配置jvm参数
+	var instMem uint64
+
+	if instMem, err = esutil.GetInstMem(); err != nil {
+		logger.Error("获取实例内存失败, err: %w", err)
+		return fmt.Errorf("获取实例内存失败, err: %w", err)
+	}
+	jvmSize := instMem / 1024
+	if jvmSize > 30 {
+		jvmSize = 30
+	} else {
+		jvmSize = jvmSize / 2
+	}
+	extraCmd = fmt.Sprintf("sed -i 's/-Xmx1G -Xms1G/-Xmx%dG -Xms%dG/g' %s", jvmSize, jvmSize,
+		kafkaLink+"/bin/kafka-server-start.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf insert.txt")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf(
+		"echo \"export KAFKA_OPTS=\\\"\\${KAFKA_OPTS} -javaagent:%s/libs/jmx_prometheus_javaagent-0.17.2.jar=7071:%s/config/kafka-2_0_0.yml\\\"\" >> insert.txt", kafkaLink, kafkaLink)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i '23 r insert.txt' %s", kafkaLink+"/bin/kafka-server-start.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("cp %s %s", kafkaLink+"/bin/kafka-server-start.sh", kafkaLink+
+		"/bin/kafka-server-scram-start.sh")
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("copy start.sh failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i '$d' %s", kafkaLink+"/bin/kafka-server-scram-start.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf(
+		"echo 'exec $base_dir/kafka-run-class.sh $EXTRA_ARGS -Djava.security.auth.login.config=$base_dir/../config/kafka_server_scram_jaas.conf  kafka.Kafka \"$@\"' >> %s", kafkaLink+"/bin/kafka-server-scram-start.sh")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("生成kafka.ini文件")
+	kafkaini := esutil.GenKafkaini()
+	kafkainiFile := fmt.Sprintf("%s/kafka.ini", cst.DefaultKafkaSupervisorConf)
+	if err = ioutil.WriteFile(kafkainiFile, kafkaini, 0); err != nil {
+		logger.Error("write %s failed, %v", kafkainiFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("chmod 777 %s/kafka.ini ", cst.DefaultKafkaSupervisorConf)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s ", i.KafkaEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	if err = esutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisort update failed %v", err)
+		return err
+	}
+
+	// sleep 60s for wating es up
+	time.Sleep(30 * time.Second)
+
+	if _, err := net.Dial("tcp", fmt.Sprintf("%s:%d", nodeIp, port)); err != nil {
+		logger.Error("broker start failed %v", err)
+		return err
+	}
+	return nil
+}
+
+// InstallManager TODO
+/**
+ * @description: 安装kafka manager
+ * @return {*}
+ */
+func (i *InstallKafkaComp) InstallManager() (err error) {
+
+	var (
+		nodeIp           string = i.Params.Host
+		port             int    = i.Params.Port
+		clusterName      string = i.Params.ClusterName
+		zookeeperIp      string = i.Params.ZookeeperIp
+		version          string = i.Params.Version
+		username         string = i.Params.Username
+		password         string = i.Params.Password
+		ZookeeperBaseDir string = fmt.Sprintf("%s/zookeeper-%s", cst.DefaultKafkaEnv, cst.DefaultZookeeperVersion)
+		bkBizId          int    = i.Params.BkBizId
+		dbType           string = i.Params.DbType
+		serviceType      string = i.Params.ServiceType
+	)
+
+	zookeeperLink := fmt.Sprintf("%s/zk", cst.DefaultKafkaEnv)
+	extraCmd := fmt.Sprintf("rm -rf %s", zookeeperLink)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("ln -s %s %s ", ZookeeperBaseDir, zookeeperLink)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("link zookeeperLink failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	// mkdir
+	extraCmd = fmt.Sprintf("rm -rf %s", cst.DefaultZookeeperLogDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	extraCmd = fmt.Sprintf("mkdir -p %s ; mkdir -p %s ; mkdir -p %s ; chown -R mysql %s",
+		cst.DefaultZookeeperLogsDir, cst.DefaultZookeeperDataDir, cst.DefaultZookeeperLogDir, "/data/kafka*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s", cst.DefaultZookeeperLogDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("zoo.cfg")
+	extraCmd = fmt.Sprintf(`cp %s %s`, zookeeperLink+"/conf/zoo_sample.cfg", zookeeperLink+"/conf/zoo.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's#dataDir=/tmp/zookeeper#dataDir=%s/zookeeper/data#g' %s", cst.DefaultKafkaEnv,
+		zookeeperLink+"/conf/zoo.cfg")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("生成zookeeper.ini文件")
+	zookeeperini := esutil.GenZookeeperini()
+	zookeeperiniFile := fmt.Sprintf("%s/zookeeper.ini", cst.DefaultKafkaSupervisorConf)
+	if err = ioutil.WriteFile(zookeeperiniFile, zookeeperini, 0); err != nil {
+		logger.Error("write %s failed, %v", zookeeperiniFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("chmod 777 %s/zookeeper.ini ", cst.DefaultKafkaSupervisorConf)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s ", i.KafkaEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("启动zk")
+	if err = esutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisort update failed %v", err)
+	}
+
+	extraCmd = fmt.Sprintf("sleep 5")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sed -i 's/kafka-manager-zookeeper/%s/g' %s", nodeIp,
+		i.KafkaEnvDir+"/cmak-3.0.0.5/conf/application.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 修改 play.http.contex="/{bkid}/{dbtype}/{cluster}/kafka_manager"
+	httpPath := fmt.Sprintf("/%d/%s/%s/%s", bkBizId, dbType, clusterName, serviceType)
+
+	extraCmd = fmt.Sprintf("sed -i '/play.http.context/s#/#%s#' %s", httpPath,
+		i.KafkaEnvDir+"/cmak-3.0.0.5/conf/application.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	/* 配置账号密码
+	basicAuthentication.enabled=true
+	basicAuthentication.username=""
+	basicAuthentication.password=""
+
+	extraCmd = fmt.Sprintf(
+		"sed -i -e '/basicAuthentication.enabled/s/false/true/g'  -e '/basicAuthentication.username/s/admin/%s/g' -e '/basicAuthentication.password/s/password/%s/g' %s", username, password,
+		i.KafkaEnvDir+"/cmak-3.0.0.5/conf/application.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	*/
+
+	logger.Info("生成manager.ini文件")
+	managerini := esutil.GenManagerini()
+	manageriniFile := fmt.Sprintf("%s/manager.ini", cst.DefaultKafkaSupervisorConf)
+	if err = ioutil.WriteFile(manageriniFile, managerini, 0); err != nil {
+		logger.Error("write %s failed, %v", manageriniFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("chmod 777 %s/manager.ini ", cst.DefaultKafkaSupervisorConf)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql %s ", i.KafkaEnvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	logger.Info("启动manager")
+	if err = esutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisort update failed %v", err)
+	}
+
+	for i := 0; i < 30; i++ {
+		extraCmd = fmt.Sprintf("sleep 10")
+		if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("%s execute failed, %v", extraCmd, err)
+			return err
+		}
+		if _, err := net.Dial("tcp", fmt.Sprintf("%s:%d", nodeIp, port)); err == nil {
+			break
+		}
+	}
+
+	extraCmd = fmt.Sprintf(`%s/zk/bin/zkCli.sh create /kafka-manager/mutex ""`, cst.DefaultKafkaEnv)
+	osutil.ExecShellCommand(false, extraCmd)
+	extraCmd = fmt.Sprintf(`%s/zk/bin/zkCli.sh create /kafka-manager/mutex/locks ""`, cst.DefaultKafkaEnv)
+	osutil.ExecShellCommand(false, extraCmd)
+	extraCmd = fmt.Sprintf(`%s/zk/bin/zkCli.sh create /kafka-manager/mutex/leases ""`, cst.DefaultKafkaEnv)
+	osutil.ExecShellCommand(false, extraCmd)
+
+	zookeeperIpList := strings.Split(zookeeperIp, ",")
+	zkHosts := fmt.Sprintf("%s:2181,%s:2181,%s:2181/", zookeeperIpList[0], zookeeperIpList[1], zookeeperIpList[2])
+	jaasConfig := fmt.Sprintf(
+		"org.apache.kafka.common.security.scram.ScramLoginModule required username=%s  password=%s ;", username, password)
+	postData := url.Values{}
+	postData.Add("name", clusterName)
+	postData.Add("zkHosts", zkHosts)
+	postData.Add("kafkaVersion", version)
+	postData.Add("jmxEnabled", "true")
+	postData.Add("jmxUser", "")
+	postData.Add("jmxPass", "")
+	postData.Add("logkafkaEnabled", "true")
+	postData.Add("pollConsumers", "true")
+	postData.Add("filterConsumers", "true")
+	postData.Add("activeOffsetCacheEnabled", "true")
+	postData.Add("displaySizeEnabled", "true")
+	postData.Add("tuning.brokerViewUpdatePeriodSeconds", "30")
+	postData.Add("tuning.clusterManagerThreadPoolSize", "2")
+	postData.Add("tuning.clusterManagerThreadPoolQueueSize", "100")
+	postData.Add("tuning.kafkaCommandThreadPoolSize", "2")
+	postData.Add("tuning.kafkaCommandThreadPoolQueueSize", "100")
+	postData.Add("tuning.logkafkaCommandThreadPoolSize", "2")
+	postData.Add("tuning.logkafkaCommandThreadPoolQueueSize", "100")
+	postData.Add("tuning.logkafkaUpdatePeriodSeconds", "30")
+	postData.Add("tuning.partitionOffsetCacheTimeoutSecs", "5")
+	postData.Add("tuning.brokerViewThreadPoolSize", "17")
+	postData.Add("tuning.brokerViewThreadPoolQueueSize", "1000")
+	postData.Add("tuning.offsetCacheThreadPoolSize", "17")
+	postData.Add("tuning.offsetCacheThreadPoolQueueSize", "1000")
+	postData.Add("tuning.kafkaAdminClientThreadPoolSize", "17")
+	postData.Add("tuning.kafkaAdminClientThreadPoolQueueSize", "1000")
+	postData.Add("tuning.kafkaManagedOffsetMetadataCheckMillis", "30000")
+	postData.Add("tuning.kafkaManagedOffsetGroupCacheSize", "1000000")
+	postData.Add("tuning.kafkaManagedOffsetGroupExpireDays", "7")
+	postData.Add("securityProtocol", "SASL_PLAINTEXT")
+	postData.Add("saslMechanism", "SCRAM-SHA-512")
+	postData.Add("jaasConfig", jaasConfig)
+	// http://localhost:9000/{prefix}/clusters
+	url := "http://" + nodeIp + ":9000" + httpPath + "/clusters"
+	contentType := "application/x-www-form-urlencoded"
+	if _, err = http.Post(url, contentType, strings.NewReader(postData.Encode())); err != nil {
+		logger.Error("post manager failed, %v", err)
+		return err
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/kafka.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/kafka.go
new file mode 100644
index 0000000000..74cdb52afe
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/kafka.go
@@ -0,0 +1,2 @@
+// Package kafka TODO
+package kafka
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/reconfig.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/reconfig.go
new file mode 100644
index 0000000000..7948be56c4
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/reconfig.go
@@ -0,0 +1,70 @@
+package kafka
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+)
+
+// ReconfigComp TODO
+type ReconfigComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *ReconfigParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ReconfigParams TODO
+type ReconfigParams struct {
+	Host string `json:"host" `
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (r *ReconfigComp) Init() (err error) {
+	logger.Info("reconfig init")
+	return nil
+}
+
+// ReconfigAdd TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (r *ReconfigComp) ReconfigAdd() (err error) {
+	// 增加zookeeper
+	extraCmd := fmt.Sprintf(`%s/zk/bin/zkCli.sh reconfig -file %s`, cst.DefaultKafkaEnv, cst.DefaultZookeeperDynamicConf)
+	osutil.ExecShellCommand(false, extraCmd)
+
+	extraCmd = fmt.Sprintf("sleep 5m")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
+
+// ReconfigRemove TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (r *ReconfigComp) ReconfigRemove() (err error) {
+	// 减少zookeeper
+	extraCmd := fmt.Sprintf(`%s/zk/bin/zkCli.sh reconfig -remove %s`, cst.DefaultKafkaEnv, r.Params.Host)
+	osutil.ExecShellCommand(false, extraCmd)
+
+	extraCmd = fmt.Sprintf("sleep 5m")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/startstop_process.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/startstop_process.go
new file mode 100644
index 0000000000..2359bc3eab
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/kafka/startstop_process.go
@@ -0,0 +1,146 @@
+package kafka
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"strings"
+)
+
+// StartStopProcessComp TODO
+type StartStopProcessComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *ProcessParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ProcessParams TODO
+type ProcessParams struct {
+	ZookeeperIp string `json:"zookeeper_ip" ` // zookeeper ip, eg: ip1,ip2,ip3
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) Init() (err error) {
+	logger.Info("start stop cluster init")
+	return nil
+}
+
+// StopProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StopProcess() (err error) {
+
+	// 停止进程
+	extraCmd := "supervisorctl stop all"
+	logger.Info("停止所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = "ps -ef | egrep 'cmak' | grep -v grep |awk {'print \"kill -9 \" $2'}|sh"
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("rm -rf %s", cst.DefaultKafkaEnv+"/cmak-3.0.0.5/RUNNING_PID")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	return nil
+}
+
+// StartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StartProcess() (err error) {
+
+	// 启动进程
+	extraCmd := "supervisorctl start all"
+	logger.Info("启动所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// RestartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) RestartProcess() (err error) {
+
+	// 停止进程
+	extraCmd := "supervisorctl stop all"
+	logger.Info("停止所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = "ps -ef | egrep 'cmak' | grep -v grep |awk {'print \"kill -9 \" $2'}|sh"
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 启动进程
+	extraCmd = "supervisorctl start all"
+	logger.Info("启动所有进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// RestartBroker TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) RestartBroker() (err error) {
+
+	zookeeperIpList := strings.Split(d.Params.ZookeeperIp, ",")
+	// extraCmd := fmt.Sprintf("line=`sed -n -e '/zookeeper.connect=/=' %s`", cst.DefaultKafkaEnv+"/kafka/config/server.properties")
+	// if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+	//	logger.Error("%s execute failed, %v", extraCmd, err)
+	//	return err
+	// }
+	extraCmd := fmt.Sprintf("sed -i '29c zookeeper.connect=%s:2181,%s:2181,%s:2181/' %s", zookeeperIpList[0],
+		zookeeperIpList[1], zookeeperIpList[2], cst.DefaultKafkaEnv+"/kafka/config/server.properties")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	// 重启broker进程
+	extraCmd = "supervisorctl restart kafka"
+	logger.Info("重启broker进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("sleep 5m")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/medium.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/medium.go
new file mode 100644
index 0000000000..4662400dfe
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/medium.go
@@ -0,0 +1,65 @@
+package components
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"fmt"
+	"path"
+	"path/filepath"
+	"regexp"
+)
+
+// Medium 通用介质包处理
+type Medium struct {
+	Pkg    string `json:"pkg" validate:"required"`          // 安装包名
+	PkgMd5 string `json:"pkg_md5"  validate:"required,md5"` // 安装包MD5
+}
+
+// Check TODO
+func (m *Medium) Check() (err error) {
+	var fileMd5 string
+	// 判断安装包是否存在
+	pkgAbPath := m.GetAbsolutePath()
+	if !util.FileExists(pkgAbPath) {
+		return fmt.Errorf("%s不存在", pkgAbPath)
+	}
+	if fileMd5, err = util.GetFileMd5(pkgAbPath); err != nil {
+		return fmt.Errorf("获取[%s]md5失败, %v", m.Pkg, err.Error())
+	}
+	// 校验md5
+	if fileMd5 != m.PkgMd5 {
+		return fmt.Errorf("安装包的md5不匹配,[%s]文件的md5[%s]不正确", fileMd5, m.PkgMd5)
+	}
+	return
+}
+
+// GetAbsolutePath 返回介质存放的绝对路径
+func (m *Medium) GetAbsolutePath() string {
+	return path.Join(cst.BK_PKG_INSTALL_PATH, m.Pkg)
+}
+
+// GePkgBaseName 例如将 mysql-5.7.20-linux-x86_64-tmysql-3.1.5-gcs.tar.gz
+// 解析出 mysql-5.7.20-linux-x86_64-tmysql-3.1.5-gcs
+// 用于做软连接使用
+func (m *Medium) GePkgBaseName() string {
+	pkgFullName := filepath.Base(m.GetAbsolutePath())
+	return regexp.MustCompile("(.tar.gz|.tgz)$").ReplaceAllString(pkgFullName, "")
+}
+
+// GePkgEsBaseName TODO
+// Todo es包解析
+func (m *Medium) GePkgEsBaseName() string {
+	return regexp.MustCompile("(.tar.gz|.tgz)$").ReplaceAllString(m.Pkg, "")
+}
+
+// GePkgKafkaBaseName TODO
+// Todo kafka包解析
+func (m *Medium) GePkgKafkaBaseName() string {
+	return ""
+}
+
+// GePkgHdfsBaseName TODO
+// Todo hdfs包解析
+func (m *Medium) GePkgHdfsBaseName() string {
+	return ""
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/output.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/output.go
new file mode 100644
index 0000000000..ff46190324
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/output.go
@@ -0,0 +1,39 @@
+package components
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+// WrapperOutputString TODO
+func WrapperOutputString(output string) string {
+	return fmt.Sprintf(`%s`, output)
+}
+
+// WrapperOutput TODO
+func WrapperOutput(v interface{}) (string, error) {
+	if b, e := json.Marshal(v); e != nil {
+		return "", e
+	} else {
+		return fmt.Sprintf(`%s`, string(b)), nil
+	}
+}
+
+// PrintOutputCtx TODO
+func PrintOutputCtx(v interface{}) error {
+	if ss, err := WrapperOutput(v); err != nil {
+		return err
+	} else {
+		fmt.Println(ss)
+	}
+	return nil
+}
+
+// ToPrettyJson TODO
+func ToPrettyJson(v interface{}) string {
+	if data, err := json.MarshalIndent(v, "", "    "); err == nil {
+		// ss := "\n# use --helper to show explanations. example for payload:\n --payload-format raw --payload '%s'"
+		return string(data)
+	}
+	return "未找到合法的 example "
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/check_shrink.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/check_shrink.go
new file mode 100644
index 0000000000..5dec97863a
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/check_shrink.go
@@ -0,0 +1,78 @@
+package pulsar
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+)
+
+// CheckPulsarShrinkComp TODO
+type CheckPulsarShrinkComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *CheckPulsarShrinkParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CheckPulsarShrinkParams TODO
+type CheckPulsarShrinkParams struct {
+	HttpPort      int      `json:"http_port" ` // http端口
+	Host          string   `json:"host" validate:"required,ip" `
+	BookkeeperIp  []string `json:"bookkeeper_ip"`  // 下架的bookkeeper ip
+	BookkeeperNum int      `json:"bookkeeper_num"` // 原有bookkeeper数量
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CheckPulsarShrinkComp) Init() (err error) {
+	logger.Info("Reduce pulsar node fake init")
+	return nil
+}
+
+// CheckBrokerConf TODO
+func (d *CheckPulsarShrinkComp) CheckBrokerConf() (err error) {
+	return pulsarutil.CheckBrokerConf(d.Params.BookkeeperNum - len(d.Params.BookkeeperIp))
+}
+
+// CheckNamespaceEnsembleSize TODO
+func (d *CheckPulsarShrinkComp) CheckNamespaceEnsembleSize() (err error) {
+	return pulsarutil.CheckNamespaceEnsembleSize(d.Params.BookkeeperNum - len(d.Params.BookkeeperIp))
+}
+
+// CheckUnderReplicated TODO
+func (d *CheckPulsarShrinkComp) CheckUnderReplicated() (err error) {
+	return pulsarutil.CheckUnderReplicated()
+}
+
+// CheckLedgerMetadata 检查Ledger元数据
+func (d *CheckPulsarShrinkComp) CheckLedgerMetadata() (err error) {
+	return pulsarutil.CheckLedgerMetadata(d.Params.BookkeeperNum - len(d.Params.BookkeeperIp))
+}
+
+// DecommissionBookie TODO
+func (d *CheckPulsarShrinkComp) DecommissionBookie() (err error) {
+	extraCmd := fmt.Sprintf("%s/bin/bookkeeper shell decommissionbookie", cst.DefaultPulsarBkDir)
+	logger.Info("下架bookkeeper, [%s]", extraCmd)
+	_, err = osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", err)
+		return err
+	}
+	return nil
+}
+
+// SetBookieReadonly 将bookie设置为只读状态
+func (d *CheckPulsarShrinkComp) SetBookieReadonly() (err error) {
+	return pulsarutil.SetBookieReadOnly()
+}
+
+// UnsetBookieReadonly 取消bookie只读状态
+func (d *CheckPulsarShrinkComp) UnsetBookieReadonly() (err error) {
+	return pulsarutil.UnsetBookieReadOnly()
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/clean_data.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/clean_data.go
new file mode 100644
index 0000000000..95a6477641
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/clean_data.go
@@ -0,0 +1,92 @@
+package pulsar
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+)
+
+// CleanDataComp TODO
+type CleanDataComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *CleanDataParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// CleanDataParams TODO
+type CleanDataParams struct{}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) Init() (err error) {
+	logger.Info("Clean data fake init")
+	return nil
+}
+
+// CleanData TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *CleanDataComp) CleanData() (err error) {
+	// 清除crontab
+	logger.Info("获取crontab")
+	out, err := osutil.ListCrontb(cst.DefaultExecUser)
+	if err != nil {
+		logger.Error("获取crontab失败", err)
+		return err
+	}
+	logger.Debug("crontab: ", out)
+	if len(out) > 0 {
+		extraCmd := "crontab -u mysql -r"
+		logger.Info("清除crontab, [%s]", extraCmd)
+		if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("[%s] execute failed, %v", extraCmd, err)
+			return err
+		}
+	}
+
+	// 强杀进程
+	extraCmd := `ps -ef | egrep 'supervisord'|grep -v grep |awk {'print "kill -9 " $2'}|sh`
+	logger.Info("强杀进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `rm -f /etc/supervisord.conf /usr/local/bin/supervisorctl /usr/local/bin/supervisord /usr/bin/java`
+	logger.Info("删除软链, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// clean profile
+	extraCmd = `sed -i '/pulsarprofile/d' /etc/profile`
+	logger.Info("clean profile, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	// 删除pulsarenv
+	extraCmd = `rm -rf /data/pulsarenv*`
+	logger.Info("删除pulsarenv, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 删除数据目录
+	extraCmd = `rm -rf /data*/pulsar*`
+	logger.Info("删除pulsar, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/install_pulsar.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/install_pulsar.go
new file mode 100644
index 0000000000..68905e39be
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/install_pulsar.go
@@ -0,0 +1,818 @@
+package pulsar
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"os/user"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// InstallPulsarComp TODO
+type InstallPulsarComp struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallPulsarParams
+	PulsarConfig
+	RollBackContext rollback.RollBackObjects
+}
+
+// InstallPulsarParams TODO
+type InstallPulsarParams struct {
+	ZkHost               string          `json:"zk_host"`                             // zk列表,逗号分隔,域名/IP
+	Domain               string          `json:"domain"`                              // 域名
+	ClusterName          string          `json:"cluster_name"`                        // 集群名
+	ZkId                 int             `json:"zk_id" `                              // zookeeper的id
+	ZkConfigs            json.RawMessage `json:"zk_configs"`                          // 从dbconfig获取的zookeeper的配置信息
+	BkConfigs            json.RawMessage `json:"bk_configs"`                          // 从dbconfig获取的bookkeeper的配置信息
+	BrokerConfigs        json.RawMessage `json:"broker_configs"`                      // 从dbconfig获取的broker的配置信息
+	PulsarVersion        string          `json:"pulsar_version"  validate:"required"` // 版本号eg: 2.10.1
+	Host                 string          `json:"host" validate:"required,ip" `        // 本机IP
+	Partitions           int             `json:"partitions"`                          // 默认partition数量
+	RetentionTime        int             `json:"retention_time"`                      // 默认retention时间
+	EnsembleSize         int             `json:"ensemble_size"`                       // 默认ensemble大小
+	WriteQuorum          int             `json:"write_quorum"`                        // 默认写入quorum
+	AckQuorum            int             `json:"ack_quorum"`                          // 默认确认quorum
+	Token                string          `json:"token"`                               // zk生成的token
+	Role                 string          `json:"role"`                                // pulsar角色,枚举zookeeper、bookkeeper、broker
+	BrokerWebServicePort int             `json:"broker_web_service_port"`             // broker http服务端口
+	Username             string          `json:"username"`                            // 用户名
+	Password             string          `json:"password"`                            // 密码
+	HostMap              json.RawMessage `json:"host_map"`                            // 写入/etc/hosts的映射
+	NginxSubPath         string          `json:"nginx_sub_path"`                      // 替换ui中反向代理的路径
+}
+
+// InitDirs TODO
+type InitDirs = []string
+
+// Port TODO
+type Port = int
+
+// PulsarConfig 目录定义等
+type PulsarConfig struct {
+	InstallDir   string `json:"install_dir"`   // /data
+	PulsarenvDir string `json:"pulsarenv_dir"` //  /data/pulsarenv
+	PkgDir       string `json:"pkg_idr"`       // /data/install/
+	PulsarDir    string
+}
+
+// GenerateTokenResult TODO
+type GenerateTokenResult struct {
+	Token string `json:"token"`
+}
+
+// InitDefaultParam TODO
+func (i *InstallPulsarComp) InitDefaultParam() (err error) {
+	logger.Info("start InitDefaultParam")
+	// var mountpoint string
+	i.InstallDir = cst.DefaultInstallDir
+	i.PulsarenvDir = cst.DefaultPulsarEnvDir
+	i.PkgDir = cst.DefaultPkgDir
+
+	return nil
+}
+
+// InitPulsarDirs TODO
+/*
+创建实例相关的数据,日志目录以及修改权限
+*/
+func (i *InstallPulsarComp) InitPulsarDirs() (err error) {
+	execUser := cst.DefaultExecUser
+	logger.Info("检查用户[%s]是否存在", execUser)
+	if _, err := user.Lookup(execUser); err != nil {
+		logger.Info("用户[%s]不存在,开始创建", execUser)
+		if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("useradd %s -g root -s /bin/bash -d /home/mysql",
+			execUser)); err != nil {
+			logger.Error("创建系统用户[%s]失败,%s, %v", execUser, output, err.Error())
+			return err
+		}
+		logger.Info("用户[%s]创建成功", execUser)
+	} else {
+		logger.Info("用户[%s]存在, 跳过创建", execUser)
+	}
+
+	// mkdir
+	extraCmd := fmt.Sprintf("mkdir -p %s ;mkdir -p %s ; mkdir -p %s ; chown -R mysql %s",
+		cst.DefaultInstallDir, cst.DefaultPulsarEnvDir, cst.DefaultPulsarLogDir, "/data*/pulsar*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	// mkdir /data*/pulsardata
+	dataDir, err := pulsarutil.GetAllDataDir()
+	for _, dir := range dataDir {
+		extraCmd := fmt.Sprintf("mkdir -p /%s/pulsardata", dir)
+		if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("创建目录/%s/pulsardata失败: %s", dir, err.Error())
+			return err
+		}
+	}
+
+	// chown
+	extraCmd = fmt.Sprintf("chown -R mysql %s", "/data*/pulsar*")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改owner失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("写入/etc/profile")
+	scripts := []byte(fmt.Sprintf(`cat << 'EOF' > /data/pulsarenv/pulsarprofile
+ulimit -n 500000
+export JAVA_HOME=/data/pulsarenv/java/jdk
+export JRE=$JAVA_HOME/jre
+export PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH
+export CLASSPATH=".:$JAVA_HOME/lib:$JRE/lib:$CLASSPATH"
+export LC_ALL=en_US
+EOF
+
+chown mysql  /data/pulsarenv/pulsarprofile
+
+sed -i '/pulsarprofile/d' /etc/profile
+echo "source /data/pulsarenv/pulsarprofile" >>/etc/profile`))
+
+	scriptFile := fmt.Sprintf("%s/init.sh", cst.DefaultPulsarEnvDir)
+	if err = ioutil.WriteFile(scriptFile, scripts, 0644); err != nil {
+		logger.Error("write %s failed, %v", scriptFile, err)
+	}
+
+	extraCmd = fmt.Sprintf("bash %s", scriptFile)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改系统参数失败:%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// InstallZookeeper TODO
+/**
+ * @description: 安装zookeeper
+ * @return {*}
+ */
+func (i *InstallPulsarComp) InstallZookeeper() (err error) {
+	var (
+		// version   string   = i.Params.PulsarVersion
+		zkHost []string = strings.Split(i.Params.ZkHost, ",")
+		// zkBaseDir string   = fmt.Sprintf("%s/apache-pulsar-%s", cst.DefaultPulsarEnvDir, version)
+	)
+	logger.Info("部署zookeeper开始...")
+
+	/*
+		extraCmd := fmt.Sprintf("ln -s %s %s ", zkBaseDir, cst.DefaultPulsarZkDir)
+		if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("link failed, %s, %s", output, err.Error())
+			return err
+		}
+	*/
+
+	// 创建数据目录
+	pulsarlogDir := fmt.Sprintf("%s", cst.DefaultPulsarLogDir)
+	pulsardataDir := fmt.Sprintf("%s", cst.DefaultPulsarDataDir)
+	extraCmd := fmt.Sprintf(`mkdir -p %s ;
+		chown -R mysql  %s ;
+		mkdir -p %s ;
+		chown -R mysql %s`, pulsarlogDir, pulsarlogDir, pulsardataDir, pulsardataDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("初始化实例目录失败:%s", err.Error())
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("echo %d > %s/myid", i.Params.ZkId, cst.DefaultPulsarDataDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("写入myid失败:%s", err.Error())
+		return err
+	}
+
+	logger.Info("zookeeper.cfg")
+	// 生成zookeeper.conf
+	zkCfg, err := i.GenConfig(i.Params.ZkConfigs)
+	if err != nil {
+		logger.Error("解析dbconfig失败: %s\n%v", err.Error(), i.Params.ZkConfigs)
+		return err
+	}
+
+	// 替换zookeeper.conf中的变量
+	zkCfg = strings.ReplaceAll(zkCfg, "{{data_dir}}", cst.DefaultPulsarDataDir)
+	zkCfg = strings.ReplaceAll(zkCfg, "{{data_log_dir}}", cst.DefaultPulsarLogDir)
+	zkCfg = strings.ReplaceAll(zkCfg, "{{zk_host_list[0]}}", zkHost[0])
+	zkCfg = strings.ReplaceAll(zkCfg, "{{zk_host_list[1]}}", zkHost[1])
+	zkCfg = strings.ReplaceAll(zkCfg, "{{zk_host_list[2]}}", zkHost[2])
+
+	logger.Info("zookeeper.conf:\n%s", zkCfg)
+
+	if err = ioutil.WriteFile(cst.DefaultPulsarZkConf, []byte(zkCfg), 0644); err != nil {
+		logger.Error("write %s failed, %v", cst.DefaultPulsarZkConf, err)
+		return err
+	}
+
+	logger.Info("生成zookeeper.ini文件")
+	zkini := pulsarutil.GenZookeeperIni()
+	zkiniFile := fmt.Sprintf("%s/zookeeper.ini", cst.DefaultPulsarSupervisorConfDir)
+	if err = ioutil.WriteFile(zkiniFile, zkini, 0644); err != nil {
+		logger.Error("write %s failed, %v", zkiniFile, err)
+	}
+
+	if err = pulsarutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisor update failed %v", err)
+		return err
+	}
+
+	// sleep 30s for waiting zk up
+	time.Sleep(30 * time.Second)
+	logger.Info("部署zookeeper结束...")
+	return nil
+}
+
+// InitCluster  TODO
+/**
+ * @description: 初始化集群信息,并创建密钥和token
+ * @return {*}
+ */
+func (i *InstallPulsarComp) InitCluster() (err error) {
+	logger.Info("初始化集群信息开始...")
+	extraCmd := fmt.Sprintf("%s/bin/pulsar initialize-cluster-metadata "+
+		"--cluster %s "+
+		"--zookeeper %s "+
+		"--configuration-store %s "+
+		"--web-service-url http://%s:8080 "+
+		"--web-service-url-tls https://%s:8443 "+
+		"--broker-service-url pulsar://%s:6650 "+
+		"--broker-service-url-tls pulsar+ssl://%s:6651",
+		cst.DefaultPulsarZkDir, i.Params.ClusterName,
+		i.Params.ZkHost, i.Params.ZkHost,
+		i.Params.Domain, i.Params.Domain, i.Params.Domain, i.Params.Domain)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("cluster init failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	logger.Info("生成secret")
+	extraCmd = fmt.Sprintf("%s/bin/pulsar tokens create-secret-key --output %s/my-secret.key", cst.DefaultPulsarZkDir,
+		cst.DefaultPulsarZkDir)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("secret generation failed, %s, %s", output, err.Error())
+		return err
+	}
+	logger.Info("生成token")
+	extraCmd = fmt.Sprintf(
+		"%s/bin/pulsar tokens create --secret-key file:///%s/my-secret.key --subject super-user > %s/token.txt", cst.DefaultPulsarZkDir, cst.DefaultPulsarZkDir, cst.DefaultPulsarZkDir)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("token generation failed, %s, %s", output, err.Error())
+		return err
+	}
+	extraCmd = fmt.Sprintf("cat %s/token.txt | xargs echo -n", cst.DefaultPulsarZkDir)
+	output, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("get token failed, %s, %s", output, err.Error())
+		return err
+	}
+	resultStruct := GenerateTokenResult{
+		Token: output,
+	}
+	jsonBytes, err := json.Marshal(resultStruct)
+	if err != nil {
+		logger.Error("transfer resultStruct to json failed", err.Error())
+		return err
+	}
+	fmt.Printf("%s", string(jsonBytes))
+	logger.Info("初始化集群信息结束...")
+	return nil
+}
+
+// InstallBookkeeper TODO
+/**
+ * @description: 安装bookkeeper
+ * @return {*}
+ */
+func (i *InstallPulsarComp) InstallBookkeeper() (err error) {
+
+	logger.Info("部署Pulsar Bookkeeper开始...")
+	var (
+		zkHost []string = strings.Split(i.Params.ZkHost, ",")
+	)
+
+	dataDir, err := pulsarutil.GetAllDataDir()
+	for _, dir := range dataDir {
+		extraCmd := fmt.Sprintf("mkdir -p /%s/pulsardata", dir)
+		if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("创建目录/%s/pulsardata失败: %s", dir, err.Error())
+			return err
+		}
+	}
+
+	// 生成bookkeeper.conf
+	bkCfg, err := i.GenConfig(i.Params.BkConfigs)
+	if err != nil {
+		logger.Error("解析dbconfig失败: %s\n%v", err.Error(), i.Params.BkConfigs)
+		return err
+	}
+
+	// 替换bookkeeper.conf中的变量
+	bkCfg = strings.ReplaceAll(bkCfg, "{{local_ip}}", i.Params.Host)
+	bkCfg = strings.ReplaceAll(bkCfg, "{{zk_host_list[0]}}", zkHost[0])
+	bkCfg = strings.ReplaceAll(bkCfg, "{{zk_host_list[1]}}", zkHost[1])
+	bkCfg = strings.ReplaceAll(bkCfg, "{{zk_host_list[2]}}", zkHost[2])
+
+	var pulsarDataDir []string
+	for _, dir := range dataDir {
+		pulsarDataDir = append(pulsarDataDir, dir+"/pulsardata")
+	}
+
+	bkCfg = strings.ReplaceAll(bkCfg, "{{pulsar_data_dir}}", strings.Join(pulsarDataDir, ","))
+
+	logger.Info("bookkeerper.conf:\n%s", bkCfg)
+
+	if err = ioutil.WriteFile(cst.DefaultPulsarBkConf, []byte(bkCfg), 0644); err != nil {
+		logger.Error("write %s failed, %v", cst.DefaultPulsarBkConf, err)
+		return err
+	}
+
+	// 替换heap和directMemory
+	heapSize, directMemSize, err := pulsarutil.GetHeapAndDirectMemInMi()
+	if err != nil {
+		logger.Error("获取Heap Size和DirectMemSize失败: %s", err.Error())
+		return err
+	}
+	extraCmd := fmt.Sprintf(
+		"sed -i \"s/-Xms2g -Xmx2g -XX:MaxDirectMemorySize=2g/-Xms%s -Xmx%s -XX:MaxDirectMemorySize=%s/g\" "+
+			"%s/conf/bkenv.sh", heapSize, heapSize, directMemSize, cst.DefaultPulsarBkDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("替换Heap Size和DirectMemSize失败:%s, command: %s", err.Error(), extraCmd)
+		return err
+	}
+
+	logger.Info("生成bookkeeper.ini文件")
+	bkini := pulsarutil.GenBookkeeperIni()
+	bkiniFile := fmt.Sprintf("%s/bookkeeper.ini", cst.DefaultPulsarSupervisorConfDir)
+	if err = ioutil.WriteFile(bkiniFile, bkini, 0644); err != nil {
+		logger.Error("write %s failed, %v", bkiniFile, err)
+		return err
+	}
+
+	if err = pulsarutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisor update failed %v", err)
+		return err
+	}
+
+	// sleep 10s for waiting bk up
+	time.Sleep(10 * time.Second)
+
+	logger.Info("部署Pulsar Bookkeeper结束...")
+
+	return nil
+}
+
+// InstallBroker TODO
+/**
+ * @description: 安装broker
+ * @return {*}
+ */
+func (i *InstallPulsarComp) InstallBroker() (err error) {
+
+	logger.Info("部署Pulsar Broker开始...")
+	var (
+		zkHost []string = strings.Split(i.Params.ZkHost, ",")
+	)
+
+	// 生成broker.conf
+	brokerCfg, err := i.GenConfig(i.Params.BrokerConfigs)
+	if err != nil {
+		logger.Error("解析dbconfig失败: %s\n%v", err.Error(), i.Params.BrokerConfigs)
+		return err
+	}
+
+	// 替换broker.conf中的变量
+	secretKeyDir := fmt.Sprintf("file://%s/my-secret.key", cst.DefaultPulsarBrokerDir)
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{secret_key_dir}}", secretKeyDir)
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{cluster_name}}", i.Params.ClusterName)
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{zk_host_list[0]}}", zkHost[0])
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{zk_host_list[1]}}", zkHost[1])
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{zk_host_list[2]}}", zkHost[2])
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{partitions}}", strconv.Itoa(i.Params.Partitions))
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{retention_time}}", strconv.Itoa(i.Params.RetentionTime))
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{ensemble_size}}", strconv.Itoa(i.Params.EnsembleSize))
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{write_quorum}}", strconv.Itoa(i.Params.WriteQuorum))
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{ack_quorum}}", strconv.Itoa(i.Params.AckQuorum))
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{local_ip}}", i.Params.Host)
+	brokerCfg = strings.ReplaceAll(brokerCfg, "{{token}}", i.Params.Token)
+
+	logger.Info("broker.conf:\n%s", brokerCfg)
+
+	if err = ioutil.WriteFile(cst.DefaultPulsarBrokerConf, []byte(brokerCfg), 0644); err != nil {
+		logger.Error("write %s failed, %v", cst.DefaultPulsarBrokerConf, err)
+		return err
+	}
+
+	// 替换heap和directMemory
+	heapSize, directMemSize, err := pulsarutil.GetHeapAndDirectMemInMi()
+	if err != nil {
+		logger.Error("获取Heap Size和DirectMemSize失败: %s", err.Error())
+		return err
+	}
+	extraCmd := fmt.Sprintf(
+		"sed -i \"s/-Xms2g -Xmx2g -XX:MaxDirectMemorySize=2g/-Xms%s -Xmx%s -XX:MaxDirectMemorySize=%s/g\" %s/conf/pulsar_env.sh", heapSize, heapSize, directMemSize, cst.DefaultPulsarBrokerDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("替换Heap Size和DirectMemSize失败:%s, command: %s", err.Error(), extraCmd)
+		return err
+	}
+
+	logger.Info("生成broker.ini文件")
+	brokerini := pulsarutil.GenBrokerIni()
+	brokeriniFile := fmt.Sprintf("%s/broker.ini", cst.DefaultPulsarSupervisorConfDir)
+	if err = ioutil.WriteFile(brokeriniFile, brokerini, 0644); err != nil {
+		logger.Error("write %s failed, %v", brokeriniFile, err)
+		return err
+	}
+
+	if err = pulsarutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisor update failed %v", err)
+		return err
+	}
+
+	// 更新client.conf
+	logger.Info("开始更新client.conf")
+	extraCmd = fmt.Sprintf("sed -i '/^authParams=/s/authParams=/authParams=%s/g' %s/conf/client.conf",
+		i.Params.Token, cst.DefaultPulsarBrokerDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改client.conf token失败:%s, command: %s", err.Error(), extraCmd)
+		return err
+	}
+
+	logger.Info("部署Pulsar Broker结束...")
+
+	return nil
+}
+
+// StartBroker TODO
+/**
+ * @description: 启动broker
+ * @return {*}
+ */
+func (i *InstallPulsarComp) StartBroker() (err error) {
+	logger.Info("生成broker.ini文件")
+	brokerini := pulsarutil.GenBrokerIni()
+	brokeriniFile := fmt.Sprintf("%s/broker.ini", cst.DefaultPulsarSupervisorConfDir)
+	if err = ioutil.WriteFile(brokeriniFile, brokerini, 0644); err != nil {
+		logger.Error("write %s failed, %v", brokeriniFile, err)
+		return err
+	}
+
+	if err = pulsarutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisor update failed %v", err)
+		return err
+	}
+
+	// sleep 10s for waiting broker up
+	time.Sleep(10 * time.Second)
+
+	return nil
+}
+
+// DecompressPulsarPkg TODO
+/**
+ * @description:  校验、解压pulsar安装包
+ * @return {*}
+ */
+func (i *InstallPulsarComp) DecompressPulsarPkg() (err error) {
+	if err = os.Chdir(i.PulsarenvDir); err != nil {
+		return fmt.Errorf("cd to dir %s failed, err:%w", i.InstallDir, err)
+	}
+	// 判断 /data/pulsarenv 目录是否已经存在,如果存在则删除掉
+	if util.FileExists(i.PulsarDir) {
+		if _, err = osutil.ExecShellCommand(false, "rm -rf "+i.PulsarDir); err != nil {
+			logger.Error("rm -rf %s error: %w", i.PulsarenvDir, err)
+			return err
+		}
+	}
+	pkgAbPath := fmt.Sprintf("%s/pulsarpack-%s.tar.gz", i.PkgDir, i.Params.PulsarVersion)
+	if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("tar zxf %s", pkgAbPath)); err != nil {
+		logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error())
+		return err
+	}
+
+	logger.Info("pulsar binary directory: %s", i.PulsarenvDir)
+	if _, err := os.Stat(i.PulsarenvDir); err != nil {
+		logger.Error("%s check failed, %v", i.PulsarenvDir, err)
+		return err
+	}
+	logger.Info("decompress pulsar pkg successfully")
+
+	if i.Params.Role == "zookeeper" {
+		zkBaseDir := fmt.Sprintf("%s/apache-pulsar-%s", cst.DefaultPulsarEnvDir, i.Params.PulsarVersion)
+		extraCmd := fmt.Sprintf("ln -sf %s %s ", zkBaseDir, cst.DefaultPulsarZkDir)
+		if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("link failed, %s, %s", output, err.Error())
+			return err
+		}
+	} else if i.Params.Role == "bookkeeper" {
+		bkBaseDir := fmt.Sprintf("%s/apache-pulsar-%s", cst.DefaultPulsarEnvDir, i.Params.PulsarVersion)
+		extraCmd := fmt.Sprintf("ln -sf %s %s ", bkBaseDir, cst.DefaultPulsarBkDir)
+		if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("link failed, %s, %s", output, err.Error())
+			return err
+		}
+	} else if i.Params.Role == "broker" {
+		brokerBaseDir := fmt.Sprintf("%s/apache-pulsar-%s", cst.DefaultPulsarEnvDir, i.Params.PulsarVersion)
+		extraCmd := fmt.Sprintf("ln -sf %s %s ", brokerBaseDir, cst.DefaultPulsarBrokerDir)
+		if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("link failed, %s, %s", output, err.Error())
+			return err
+		}
+	}
+	logger.Info("link successfully")
+
+	return nil
+}
+
+// InstallSupervisor TODO
+/**
+ * @description:  安装supervisor
+ * @return {*}
+ */
+func (i *InstallPulsarComp) InstallSupervisor() (err error) {
+
+	if !util.FileExists(cst.DefaultPulsarSupervisorConfDir) {
+		logger.Error("supervisor not exist, %v", err)
+		return err
+	}
+
+	extraCmd := fmt.Sprintf("ln -sf %s %s", i.PulsarenvDir+"/"+"supervisor/conf/supervisord.conf", "/etc/supervisord.conf")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.PulsarenvDir+"/"+"supervisor/bin/supervisorctl",
+		"/usr/local/bin/supervisorctl")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("ln -sf %s %s", i.PulsarenvDir+"/"+"python/bin/supervisord", "/usr/local/bin/supervisord")
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = fmt.Sprintf("chown -R mysql:mysql %s ", i.PulsarenvDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// crontab
+	extraCmd = `crontab  -l -u mysql >/home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+	}
+
+	extraCmd = `cp /home/mysql/crontab.bak /home/mysql/crontab.tmp`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `sed -i '/check_supervisord.sh/d' /home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd =
+		`echo '*/1 * * * *  /data/pulsarenv/supervisor/check_supervisord.sh >> /data/pulsarenv/supervisor/check_supervisord.err 2>&1' >>/home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	extraCmd = `crontab -u mysql /home/mysql/crontab.bak`
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	startCmd := `su - mysql -c "/usr/local/bin/supervisord -c /data/pulsarenv/supervisor/conf/supervisord.conf"`
+	logger.Info(fmt.Sprintf("execute supervisor [%s] begin", startCmd))
+	pid, err := osutil.RunInBG(false, startCmd)
+	logger.Info(fmt.Sprintf("execute supervisor [%s] end, pid: %d", startCmd, pid))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// GenConfig TODO
+func (i *InstallPulsarComp) GenConfig(message json.RawMessage) (string, error) {
+	cfgMap := make(map[string]interface{})
+	strResult := ""
+	err := json.Unmarshal(message, &cfgMap)
+	if err != nil {
+		logger.Error("%s cannot resolve to map", string(message))
+		return strResult, err
+	}
+	for k, v := range cfgMap {
+		strResult = fmt.Sprintf("%s\n%s=%v", strResult, k, v)
+	}
+
+	return strResult, err
+}
+
+// InstallPulsarManager TODO
+func (i *InstallPulsarComp) InstallPulsarManager() (err error) {
+
+	if !util.FileExists(cst.DefaultPulsarManagerDir) {
+		logger.Error("pulsar-manager not exist, %v", err)
+		return err
+	}
+
+	logger.Info("部署Pulsar Manager开始...")
+	// 修改application.properties
+	extraCmd := fmt.Sprintf(
+		"sed -i \"s/backend.broker.pulsarAdmin.authParams=/backend.broker.pulsarAdmin.authParams=%s/g\" %s", i.Params.Token, cst.DefaultPulsarManagerConf)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改backend.broker.pulsarAdmin.authParams失败: %s, command: %s", err.Error(), extraCmd)
+		return err
+	}
+	// 设置默认环境
+	extraCmd = fmt.Sprintf("sed -i \"s/default.environment.name=/default.environment.name=%s/g\" %s", i.Params.ClusterName,
+		cst.DefaultPulsarManagerConf)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改default.environment.name失败: %s, command: %s", err.Error(), extraCmd)
+		return err
+	}
+	extraCmd = fmt.Sprintf(
+		"sed -i \"s/default.environment.service_url=/default.environment.service_url=http:\\/\\/%s:%d/g\" %s", i.Params.Domain, i.Params.BrokerWebServicePort, cst.DefaultPulsarManagerConf)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改default.environment.name失败: %s, command: %s", err.Error(), extraCmd)
+		return err
+	}
+
+	// 修改ui中反向代理的子路径
+	extraCmd = fmt.Sprintf("sed -i \"s#{{nginx_sub_path}}#%s#g\" `grep \"{{nginx_sub_path}}\" -rl %s`",
+		i.Params.NginxSubPath, cst.DefaultPulsarManagerDir)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("修改nginx子路径失败: %s, command: %s", err.Error(), extraCmd)
+		return err
+	}
+
+	logger.Info("生成pulsar-manager.ini文件")
+	pulsarManagerIni := pulsarutil.GenPulsarManagerIni()
+	pulsarManagerIniFile := fmt.Sprintf("%s/pulsar-manager.ini", cst.DefaultPulsarSupervisorConfDir)
+	if err = ioutil.WriteFile(pulsarManagerIniFile, pulsarManagerIni, 0644); err != nil {
+		logger.Error("write %s failed, %v", pulsarManagerIniFile, err)
+		return err
+	}
+
+	if err = pulsarutil.SupervisorctlUpdate(); err != nil {
+		logger.Error("supervisor update failed %v", err)
+		return err
+	}
+
+	logger.Info("等待60秒....")
+	if _, err := osutil.ExecShellCommand(false, "sleep 60"); err != nil {
+		logger.Error("等待60s失败, %s", err.Error())
+		return err
+	}
+
+	logger.Info("部署Pulsar Manager结束...")
+
+	return nil
+}
+
+// InitPulsarManager  TODO
+/**
+ * @description: 初始化Pulsar Manager
+ * @return {*}
+ */
+func (i *InstallPulsarComp) InitPulsarManager() (err error) {
+	logger.Info("初始化Pulsar Manager开始...")
+	logger.Info("生成csrf token")
+	extraCmd := fmt.Sprintf("curl http://localhost:7750/pulsar-manager/csrf-token -s")
+	csrfToken := ""
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("get csrf token failed, %s, %s", output, err.Error())
+		return err
+	} else {
+		csrfToken = output
+	}
+
+	logger.Info("设置pulsar manager 用户名、密码")
+	extraCmd = fmt.Sprintf(
+		`curl -H "X-XSRF-TOKEN: %s" -H "Cookie: XSRF-TOKEN=%s;" -H "Content-Type: application/json" -X PUT "http://localhost:7750/pulsar-manager/users/superuser" -s -d '{"name": "%s", "password": "%s", "description": "admin", "email": "username@test.org"}'`, csrfToken, csrfToken, i.Params.Username, i.Params.Password)
+
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("set username/password failed, %s, %s", output, err.Error())
+		return err
+	}
+
+	logger.Info("初始化Pulsar Manager结束...")
+	return nil
+}
+
+// AddHostsFile  TODO
+/**
+ * @description: 增加hosts文件配置,仅用于内部测试
+ * @return {*}
+ */
+func (i *InstallPulsarComp) AddHostsFile() (err error) {
+	logger.Info("增加hosts文件配置开始....")
+	logger.Info("原始hosts文件: ")
+	extraCmd := fmt.Sprintf("cat /etc/hosts")
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("cat /etc/hosts failed, %s, %s", output, err.Error())
+		return err
+	} else {
+		logger.Info("%s", output)
+	}
+
+	logger.Info("备份hosts文件")
+	extraCmd = fmt.Sprintf("cp /etc/hosts /etc/hosts.backup")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s exec failed, %s", extraCmd, err.Error())
+		return err
+	}
+
+	logger.Info("清理历史测试信息")
+	extraCmd = fmt.Sprintf("sed -i \"/# dba test for dbm/d\" /etc/hosts")
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s exec failed, %s", extraCmd, err.Error())
+		return err
+	}
+
+	logger.Info("写入host文件")
+	hostMap := make(map[string]string)
+
+	err = json.Unmarshal(i.Params.HostMap, &hostMap)
+	if err != nil {
+		logger.Error("%s cannot resolve to map", string(i.Params.HostMap))
+		return err
+	}
+	strResult := ""
+	for k, v := range hostMap {
+		strResult = fmt.Sprintf("%s\n%s %s # dba test for dbm", strResult, k, v)
+	}
+	logger.Info("写入host内容:%s", strResult)
+	extraCmd = fmt.Sprintf("echo \"%s\" >> /etc/hosts", strResult)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s exec failed, %s", extraCmd, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// ModifyHostsFile  TODO
+/**
+ * @description: 修改hosts文件配置,仅用于内部测试
+ * @return {*}
+ */
+func (i *InstallPulsarComp) ModifyHostsFile() (err error) {
+	logger.Info("修改hosts文件配置开始....")
+	logger.Info("原始hosts文件: ")
+	extraCmd := fmt.Sprintf("cat /etc/hosts")
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("cat /etc/hosts failed, %s, %s", output, err.Error())
+		return err
+	} else {
+		logger.Info("%s", output)
+	}
+
+	logger.Info("修改host文件")
+	hostMap := make(map[string]string)
+
+	err = json.Unmarshal(i.Params.HostMap, &hostMap)
+	if err != nil {
+		logger.Error("%s cannot resolve to map", string(i.Params.HostMap))
+		return err
+	}
+	for _, v := range hostMap {
+		extraCmd = fmt.Sprintf("sed -i \"/%s/d\" /etc/hosts", v)
+		if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("%s exec failed, %s", extraCmd, err.Error())
+			return err
+		}
+	}
+
+	strResult := ""
+	for k, v := range hostMap {
+		strResult = fmt.Sprintf("%s\n%s %s # dba test for dbm", strResult, k, v)
+	}
+	logger.Info("写入host内容:%s", strResult)
+	extraCmd = fmt.Sprintf("echo \"%s\" >> /etc/hosts", strResult)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s exec failed, %s", extraCmd, err.Error())
+		return err
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/pulsar.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/pulsar.go
new file mode 100644
index 0000000000..1fcb7e6ad2
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/pulsar.go
@@ -0,0 +1,2 @@
+// Package pulsar TODO
+package pulsar
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/startstop_process.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/startstop_process.go
new file mode 100644
index 0000000000..6e94562cc2
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/pulsar/startstop_process.go
@@ -0,0 +1,94 @@
+package pulsar
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/components"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+)
+
+// StartStopProcessComp TODO
+type StartStopProcessComp struct {
+	GeneralParam    *components.GeneralParam
+	Params          *ProcessParams
+	RollBackContext rollback.RollBackObjects
+}
+
+// ProcessParams TODO
+type ProcessParams struct {
+	Role string `json:"role"`
+}
+
+// Init TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) Init() (err error) {
+	logger.Info("Destory cluster fake init")
+	return nil
+}
+
+// StopProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StopProcess() (err error) {
+
+	// 停止进程
+	role := d.Params.Role
+
+	extraCmd := fmt.Sprintf("supervisorctl stop %s", role)
+	logger.Info("停止进程, [%s]", extraCmd)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// StartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) StartProcess() (err error) {
+
+	// 启动进程
+	role := d.Params.Role
+	extraCmd := fmt.Sprintf("supervisorctl start %s", role)
+	logger.Info("启动进程, [%s]", extraCmd)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
+
+// RestartProcess TODO
+/**
+ *  @description:
+ *  @return
+ */
+func (d *StartStopProcessComp) RestartProcess() (err error) {
+
+	// 停止进程
+	role := d.Params.Role
+	extraCmd := fmt.Sprintf("supervisorctl stop %s", role)
+	logger.Info("停止进程, [%s]", extraCmd)
+	if _, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+
+	// 启动进程
+	extraCmd = fmt.Sprintf("supervisorctl start %s", role)
+	logger.Info("启动进程, [%s]", extraCmd)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/essysinit.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/essysinit.go
new file mode 100644
index 0000000000..6db87beb97
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/essysinit.go
@@ -0,0 +1,46 @@
+package sysinit
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+)
+
+// EsSysInitParam TODO
+// Todo
+type EsSysInitParam struct {
+}
+
+// EsSysInitMachine TODO
+/*
+	执行系统初始化脚本,对应job的节点初始化脚本
+	创建mysql账户等操作
+*/
+func (s *EsSysInitParam) EsSysInitMachine() error {
+	logger.Info("start exec sysinit ...")
+	return ExecEsSysInitScript()
+}
+
+// ExecEsSysInitScript TODO
+// Todo
+func ExecEsSysInitScript() (err error) {
+	data, err := staticembed.SysInitEsScript.ReadFile(staticembed.SysInitEsScriptFileName)
+	if err != nil {
+		logger.Error("read es sysinit script failed %s", err.Error())
+		return err
+	}
+	tmpScriptName := "/tmp/essysinit.sh"
+	if err = ioutil.WriteFile(tmpScriptName, data, 07555); err != nil {
+		logger.Error("write tmp script failed %s", err.Error())
+		return err
+	}
+	command := fmt.Sprintf("/bin/bash -c \"%s\"", tmpScriptName)
+	_, err = osutil.ExecShellCommand(false, command)
+	if err != nil {
+		logger.Error("exec es sysinit script failed %s", err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/sysinit.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/sysinit.go
new file mode 100644
index 0000000000..7c3e07953f
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/components/sysinit/sysinit.go
@@ -0,0 +1,54 @@
+// Package sysinit TODO
+package sysinit
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+)
+
+// SysInitParam TODO
+type SysInitParam struct {
+	OsMysqlUser string `json:"user"`
+	OsMysqlPwd  string `json:"pwd"`
+}
+
+/*
+	执行系统初始化脚本 原来的sysinit.sh
+	创建mysql账户等操作
+*/
+
+// SysInitMachine TODO
+func (s *SysInitParam) SysInitMachine() error {
+	logger.Info("start exec sysinit ...")
+	return ExecSysInitScript()
+}
+
+// SetOsPassWordForMysql TODO
+func (s *SysInitParam) SetOsPassWordForMysql() error {
+	logger.Info("start set os pwd ...")
+	return osutil.SetOSUserPassword(s.OsMysqlUser, s.OsMysqlPwd)
+}
+
+// ExecSysInitScript TODO
+func ExecSysInitScript() (err error) {
+	data, err := staticembed.SysInitMySQLScript.ReadFile(staticembed.SysInitMySQLScriptFileName)
+	if err != nil {
+		logger.Error("read sysinit script failed %s", err.Error())
+		return err
+	}
+	tmpScriptName := "/tmp/sysinit.sh"
+	if err = ioutil.WriteFile(tmpScriptName, data, 07555); err != nil {
+		logger.Error("write tmp script failed %s", err.Error())
+		return err
+	}
+	command := fmt.Sprintf("/bin/bash -c \"%s\"", tmpScriptName)
+	_, err = osutil.ExecShellCommand(false, command)
+	if err != nil {
+		logger.Error("exec sysinit script failed %s", err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/codes/codes.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/codes/codes.go
new file mode 100644
index 0000000000..478a7aee7e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/codes/codes.go
@@ -0,0 +1,62 @@
+// Package codes TODO
+package codes
+
+/*
+@description: 相关错误码及对应错误类型
+@rules:
+1. 初始化类的错误码使用					30000-39999
+2. 操作系统的错误码使用					40000-49999
+3. MySQL、Redis、Mongo实例操作的错误码	50000-59999
+*/
+
+const (
+	// Unauthorized TODO
+	Unauthorized = 10001
+	// UnmarshalFailed TODO
+	UnmarshalFailed = 10002
+	// NotExistMountPoint TODO
+	NotExistMountPoint = 20001
+	// NotExistUser TODO
+	NotExistUser = 20002
+	// PermissionDeny TODO
+	PermissionDeny = 20003
+
+	// RenderConfigFailed TODO
+	RenderConfigFailed = 30001
+	// InitParamFailed TODO
+	InitParamFailed = 30002
+	// InitMySQLDirFailed TODO
+	InitMySQLDirFailed = 30003
+
+	// InstallMySQLFailed TODO
+	InstallMySQLFailed = 40001
+	// ExecuteShellFailed TODO
+	ExecuteShellFailed = 40002
+	// DecompressPkgFailed TODO
+	DecompressPkgFailed = 40003
+	// StartMySQLFailed TODO
+	StartMySQLFailed = 40004
+	// NotAvailableMem TODO
+	NotAvailableMem = 40005
+
+	// ImportPrivAndSchemaFailed TODO
+	ImportPrivAndSchemaFailed = 50001
+)
+
+// ErrorCodes TODO
+var ErrorCodes = map[int]string{
+	Unauthorized:              "没有进行用户认证",
+	UnmarshalFailed:           "反序列化失败",
+	NotExistMountPoint:        "没有可用的挂载点",
+	NotExistUser:              "用户不存在",
+	PermissionDeny:            "权限不足",
+	RenderConfigFailed:        "初始化配置失败",
+	InitParamFailed:           "初始化参数失败",
+	InitMySQLDirFailed:        "初始化MySQL目录失败",
+	InstallMySQLFailed:        "安装实例失败",
+	ExecuteShellFailed:        "执行Shell脚本失败",
+	DecompressPkgFailed:       "解压文件失败",
+	StartMySQLFailed:          "启动MySQL失败",
+	NotAvailableMem:           "内存不可用",
+	ImportPrivAndSchemaFailed: "导入权限和库失败",
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/base.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/base.go
new file mode 100644
index 0000000000..dddbe77d0b
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/base.go
@@ -0,0 +1,20 @@
+package config
+
+// GRPC TODO
+var GRPC struct {
+	Addr string `mapstructure:"addr"`
+	Port int    `mapstructure:"port"`
+}
+
+// HTTP TODO
+var HTTP struct {
+	Addr string `mapstructure:"addr"`
+	Port int    `mapstructure:"port"`
+}
+
+// BKCONFIG TODO
+var BKCONFIG struct {
+	Addr   string `mapstructure:"addr"`
+	Path   string `mapstructure:"path"`
+	Method string `mapstructure:"method"`
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/init.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/init.go
new file mode 100644
index 0000000000..a2f178a925
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/config/init.go
@@ -0,0 +1,33 @@
+// Package config TODO
+/*
+ * @Date: 2022-04-21 14:50:43
+ * @LastEditTime: 2022-04-21 14:50:43
+ * @Description:
+ * @FilePath: /bk-dbactuator/pkg/core/config/init.go
+ */
+package config
+
+import (
+	"fmt"
+	"os"
+	"reflect"
+	"sync"
+)
+
+var (
+	_CONFIGS sync.Map
+)
+
+// 注册需要解析为struct的配置项。
+// * key: 配置项路径。
+// * ptrStruct: 配置struct的引用。
+
+// Register TODO
+func Register(key string, ptrStruct interface{}) {
+	if reflect.TypeOf(ptrStruct).Kind() != reflect.Ptr {
+		fmt.Fprintf(os.Stderr, "config.Register need pointer of struct.\n")
+		os.Exit(1)
+	}
+
+	_CONFIGS.Store(key, ptrStruct)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/const.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/const.go
new file mode 100644
index 0000000000..b79a263c6d
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/const.go
@@ -0,0 +1,22 @@
+package cst
+
+const (
+	// Environment TODO
+	Environment = "enviroment"
+	// Test TODO
+	Test = "test"
+)
+
+const (
+	// TIMELAYOUT TODO
+	TIMELAYOUT = "2006-01-02 15:04:05"
+	// TIMELAYOUTSEQ TODO
+	TIMELAYOUTSEQ = "2006-01-02_15:04:05"
+	// TimeLayoutDir TODO
+	TimeLayoutDir = "20060102150405"
+)
+
+const (
+	// BK_PKG_INSTALL_PATH 默认文件下发路径
+	BK_PKG_INSTALL_PATH = "/data/install"
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/cst.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/cst.go
new file mode 100644
index 0000000000..f558488b06
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/cst.go
@@ -0,0 +1,2 @@
+// Package cst TODO
+package cst
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/es.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/es.go
new file mode 100644
index 0000000000..0cf9bd8eab
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/es.go
@@ -0,0 +1,55 @@
+package cst
+
+const (
+	// DefaulEsDataDir TODO
+	DefaulEsDataDir = "/data/esdata"
+	// DefaultInstallDir TODO
+	DefaultInstallDir = "/data"
+	// DefaultHttpPort TODO
+	DefaultHttpPort = 9200 // 默认端口
+	// DefaulEsLogDir TODO
+	DefaulEsLogDir = "/data/eslog"
+	// DefaulEsEnv TODO
+	DefaulEsEnv = "/data/esenv" // es安装包存放目录
+	// DefaultEsDir TODO
+	DefaultEsDir = DefaulEsEnv + "/es_1"
+	// DefaultSupervisorConf TODO
+	DefaultSupervisorConf = DefaulEsEnv + "/supervisor/conf"
+	// DefaultJvmOptionD TODO
+	DefaultJvmOptionD = DefaultEsDir + "/config/jvm.options.d"
+	// DefaultEsConfigFile TODO
+	DefaultEsConfigFile = DefaultEsDir + "/config/elasticsearch.yml"
+	// DefaultExecUser TODO
+	DefaultExecUser = "mysql"
+	// DefaultInfluxdbExecUser TODO
+	DefaultInfluxdbExecUser = "influxdb"
+	// DefaultPkgDir TODO
+	DefaultPkgDir = "/data/install" // 介质存放目录
+	// EsHot TODO
+	EsHot = "hot"
+	// EsCold TODO
+	EsCold = "cold"
+	// EsMaster TODO
+	EsMaster = "master"
+	// EsClient TODO
+	EsClient = "client"
+	// IsXpackMoinitorEnabled TODO
+	IsXpackMoinitorEnabled = false
+	// IsXpackSecurityEnabled TODO
+	IsXpackSecurityEnabled = false
+	// IsNodeIngest TODO
+	IsNodeIngest = true
+	// IsNodeMl TODO
+	IsNodeMl = false
+	// IsBootstrapMemoryLock TODO
+	IsBootstrapMemoryLock = false
+	// IsBootstrapSystemCall TODO
+	IsBootstrapSystemCall = false
+)
+
+// KibanaWhiteList TODO
+var (
+	KibanaWhiteList = []string{"securitytenant", "Authorization"}
+	Kibanatenancy   = []string{"Private", "Global"}
+	KibanaRole      = []string{"kibana_read_only"}
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/influxdb.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/influxdb.go
new file mode 100644
index 0000000000..ed9603075e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/influxdb.go
@@ -0,0 +1,16 @@
+package cst
+
+const (
+	// DefaultInfluxdbDataDir TODO
+	DefaultInfluxdbDataDir = "/data/influxdbdata"
+	// DefaultInfluxdbPort TODO
+	DefaultInfluxdbPort = 9092 // 默认端口
+	// DefaultInfluxdbEnv TODO
+	DefaultInfluxdbEnv = "/data/influxdbenv" // kafka安装包存放目录
+	// DefaultInfluxdbLogDir TODO
+	DefaultInfluxdbLogDir = "/data/influxdblog"
+	// DefaultInfluxdbDir TODO
+	DefaultInfluxdbDir = DefaultInfluxdbEnv + "/influxdb"
+	// DefaultInfluxdbSupervisorConf TODO
+	DefaultInfluxdbSupervisorConf = DefaultInfluxdbEnv + "/supervisor/conf"
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/kafka.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/kafka.go
new file mode 100644
index 0000000000..bceed3f106
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/kafka.go
@@ -0,0 +1,36 @@
+package cst
+
+const (
+	// DefaultKafkaDataDir TODO
+	DefaultKafkaDataDir = "/data/kafkadata"
+	// DefaultKafkaPort TODO
+	DefaultKafkaPort = 9092 // 默认端口
+	// DefaultKafkaEnv TODO
+	DefaultKafkaEnv = "/data/kafkaenv" // kafka安装包存放目录
+	// DefaultKafkaLogDir TODO
+	DefaultKafkaLogDir = "/data/kafkalog"
+	// DefaultZookeeperLogDir TODO
+	DefaultZookeeperLogDir = "/data/zklog"
+	// DefaultKafkaDir TODO
+	DefaultKafkaDir = DefaultKafkaEnv + "/kafka"
+	// DefaultZookeeperDir TODO
+	DefaultZookeeperDir = DefaultKafkaEnv + "/zk"
+	// DefaultZookeeperLogsDir TODO
+	DefaultZookeeperLogsDir = DefaultKafkaEnv + "/zookeeper/logs"
+	// DefaultZookeeperDataDir TODO
+	DefaultZookeeperDataDir = DefaultKafkaEnv + "/zookeeper/data"
+	// DefaultZookeeperConfDir TODO
+	DefaultZookeeperConfDir = DefaultKafkaEnv + "/zookeeper/conf"
+	// DefaultZookeeperDynamicConf TODO
+	DefaultZookeeperDynamicConf = DefaultZookeeperConfDir + "/zoo.cfg.dynamic"
+	// DefaultKafkaSupervisorConf TODO
+	DefaultKafkaSupervisorConf = DefaultKafkaEnv + "/supervisor/conf"
+	// DefaultZookeeperVersion TODO
+	DefaultZookeeperVersion = "3.6.3"
+	// DefaultZookeeperShell TODO
+	DefaultZookeeperShell = DefaultKafkaDir + "/bin/zookeeper-shell.sh"
+	// DefaultTopicBin TODO
+	DefaultTopicBin = DefaultKafkaDir + "/bin/kafka-topics.sh"
+	// DefaultReassignPartitionsBin TODO
+	DefaultReassignPartitionsBin = DefaultKafkaDir + "/bin/kafka-reassign-partitions.sh"
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/mysql.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/mysql.go
new file mode 100644
index 0000000000..b8e5b15e18
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/mysql.go
@@ -0,0 +1,73 @@
+package cst
+
+const (
+	// UsrLocal TODO
+	UsrLocal = "/usr/local"
+	// MysqldInstallPath TODO
+	MysqldInstallPath = "/usr/local/mysql"
+	// DefaultMysqlLogRootPath TODO
+	DefaultMysqlLogRootPath = "/data" // 默认存放mysql日志的根路径
+	// AlterNativeMysqlLogRootPath TODO
+	AlterNativeMysqlLogRootPath = "/data1" // 备选路径
+	// DefaultMysqlLogBasePath TODO
+	DefaultMysqlLogBasePath = "mysqllog"
+	// DefaultMysqlDataRootPath TODO
+	DefaultMysqlDataRootPath = "/data1" // 默认存放mysql数据的根路径
+	// AlterNativeMysqlDataRootPath TODO
+	AlterNativeMysqlDataRootPath = "/data"
+	// DefaultMysqlDataBasePath TODO
+	DefaultMysqlDataBasePath = "mysqldata"
+	// DefaultBackupBasePath TODO
+	DefaultBackupBasePath = "dbbak"
+	// DefaultMycnfRootPath 默认配置文件路径
+	DefaultMycnfRootPath = "/etc"
+	// DefaultMyCnfName TODO
+	DefaultMyCnfName = "/etc/my.cnf"
+	// DefaultSocketName TODO
+	DefaultSocketName = "mysql.sock"
+	// DefaultMySQLPort TODO
+	DefaultMySQLPort = 3306
+	// RelayLogFileMatch TODO
+	RelayLogFileMatch = `(.*)/relay-log.bin`
+	// BinLogFileMatch TODO
+	BinLogFileMatch = `(.*)/binlog\d*.bin`
+	// DatadirMatch TODO
+	DatadirMatch = `(.*)/mysqldata/\d+$`
+	// MysqlOsUserName TODO
+	MysqlOsUserName = "mysql"
+	// MysqlOsUserGroup TODO
+	MysqlOsUserGroup = "mysql"
+	// MySQLClientPath TODO
+	MySQLClientPath = "/usr/local/mysql/bin/mysql"
+)
+
+const (
+	// MIR_MASTER TODO
+	// MIR : meta inner role
+	MIR_MASTER = "master"
+	// MIR_SLAVE TODO
+	MIR_SLAVE = "slave"
+	// MIR_REPEATER TODO
+	MIR_REPEATER = "repeater"
+	// MIR_ORPHAN TODO
+	MIR_ORPHAN = "orphan" // 单节点集群的实例角色
+)
+
+// backup .info 中的 BackupRole
+const (
+	BackupRoleMaster = "MASTER"
+	BackupRoleSlave  = "SLAVE"
+)
+
+// 规范的 备份类型名
+const (
+	TypeGZTAB = "gztab"
+	TypeXTRA  = "xtra"
+)
+
+// LooseBackupTypes 不规范的 备份类型名,不区分大小写
+// dbbackup.conf 中的 backup_type
+var LooseBackupTypes = map[string][]string{
+	TypeGZTAB: []string{"GZTAB"},
+	TypeXTRA:  []string{"XTRA", "xtrabackup"},
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/os.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/os.go
new file mode 100644
index 0000000000..2690cce5b3
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/os.go
@@ -0,0 +1,94 @@
+package cst
+
+// status
+const (
+	Unknown      = "unknown"
+	RUNNING      = "RUNNING"
+	UNAVAILABLE  = "UNAVAILABLE"
+	AVAIL        = "AVAIL"
+	LOCKED       = "LOCKED"
+	ALONE        = "ALONE"
+	UNIQ_LOCK    = "UNIQ_LOCK"
+	INITIALIZING = "INITIALIZING"
+	NULL         = "NULL"
+)
+
+const (
+	// Default TODO
+	Default = "default"
+)
+
+// db role
+const (
+	MySQLMaster        = "mysql_master"
+	MySQLLogDB         = "mysql_logdb"
+	MySQLSlave         = "mysql_slave"
+	MySQLMasterSlave   = "mysql_master&mysql_slave"
+	MySQLMasterOrSlave = "mysql_master/mysql_slave"
+	ProxyMaster        = "proxy_master"
+	ProxySlave         = "proxy_slave"
+	ProxyMasterSlave   = "proxy_master&proxy_slave"
+)
+
+// db Category(dbtype) 和 job 的 gamedb gamedr 是两个东西。
+const (
+	Logdb  = "logdb"
+	MySQL  = "MySQL"
+	Proxy  = "Proxy"
+	Spider = "Spider"
+	Dumper = "Dumper"
+)
+
+// switch type
+const (
+	AutoSwitch = "AutoSwitch"
+	HandSwitch = "HandSwitch"
+	NotSwitch  = "NotSwitch"
+)
+
+// switch weight
+const (
+	SwitchWeight0   = "0"
+	SwitchWeight1   = "1"
+	SwitchWeight100 = "100"
+)
+
+// os type
+const (
+	RedHat    = "redhat"
+	Suse      = "suse"
+	Slackware = "slackware"
+)
+
+// bits
+const (
+	Bit64  = "64"
+	Bit32  = "32"
+	OSBits = 32 << uintptr(^uintptr(0)>>63)
+)
+
+// switch
+const (
+	ON  = "ON"
+	OFF = "OFF"
+)
+
+// dbmstype
+const (
+	MySQLCluster = "mysql_cluster"
+	MySQLSingle  = "mysql_single"
+)
+
+// disasterLevel
+const (
+	IDC        = "IDC"
+	City       = "CITY"
+	DiffCampus = "DiffCampus"
+	SameCampus = "SameCampus"
+)
+
+// etcd finished key value
+const (
+	Success = "success"
+	Failed  = "failed"
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/proxy.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/proxy.go
new file mode 100644
index 0000000000..6af6770dcb
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/proxy.go
@@ -0,0 +1,18 @@
+package cst
+
+// proxy related
+const (
+	ProxyAdminPortInc = 1000
+	// string array, split by comma
+	// 初始化mysql会增加这个账户
+	ProxyUserMonitorAccessAll = "MONITOR@%"
+	// Proxy
+	ProxyInstallPath             = "/usr/local/mysql-proxy"
+	DefaultProxyDataRootPath     = "/data"
+	AlterNativeProxyDataRootPath = "/data1"
+	DefaultProxyCnfName          = "/etc/proxy.cnf"
+	DefaultProxyUserCnfName      = "/etc/proxy_user.cnf"
+	DefaultAdminScripyLua        = "/usr/local/mysql-proxy/lib/mysql-proxy/lua/admin.lua"
+	DefaultBackend               = "1.1.1.1:3306"
+	DefaultProxyLogBasePath      = "mysql-proxy"
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/pulsar.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/pulsar.go
new file mode 100644
index 0000000000..2b1b04e4b6
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst/pulsar.go
@@ -0,0 +1,39 @@
+package cst
+
+const (
+	// DefaultPulsarDataDir TODO
+	DefaultPulsarDataDir = "/data/pulsardata"
+
+	// DefaultPulsarEnvDir TODO
+	DefaultPulsarEnvDir = "/data/pulsarenv"
+
+	// DefaultPulsarLogDir TODO
+	DefaultPulsarLogDir = "/data/pulsarlog"
+
+	// DefaultPulsarSupervisorConfDir TODO
+	DefaultPulsarSupervisorConfDir = DefaultPulsarEnvDir + "/supervisor/conf"
+
+	// DefaultPulsarZkDir TODO
+	DefaultPulsarZkDir = DefaultPulsarEnvDir + "/zookeeper"
+
+	// DefaultPulsarZkConf TODO
+	DefaultPulsarZkConf = DefaultPulsarZkDir + "/conf/zookeeper.conf"
+
+	// DefaultPulsarBkDir TODO
+	DefaultPulsarBkDir = DefaultPulsarEnvDir + "/bookkeeper"
+
+	// DefaultPulsarBkConf TODO
+	DefaultPulsarBkConf = DefaultPulsarBkDir + "/conf/bookkeeper.conf"
+
+	// DefaultPulsarBrokerDir TODO
+	DefaultPulsarBrokerDir = DefaultPulsarEnvDir + "/broker"
+
+	// DefaultPulsarBrokerConf TODO
+	DefaultPulsarBrokerConf = DefaultPulsarBrokerDir + "/conf/broker.conf"
+
+	// DefaultPulsarManagerDir TODO
+	DefaultPulsarManagerDir = DefaultPulsarEnvDir + "/pulsar-manager/pulsar-manager"
+
+	// DefaultPulsarManagerConf TODO
+	DefaultPulsarManagerConf = DefaultPulsarManagerDir + "/application.properties"
+)
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/graceful.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/graceful.go
new file mode 100644
index 0000000000..e25367949c
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/graceful.go
@@ -0,0 +1,35 @@
+package safego
+
+import (
+	"context"
+	"log"
+	"os"
+	"os/signal"
+	"syscall"
+)
+
+// Shutdowner TODO
+type Shutdowner interface {
+	Shutdown(context.Context) error
+}
+
+// Graceful TODO
+func Graceful(ctx context.Context, s Shutdowner) error {
+	// Wait for interrupt signal to gracefully shutdown the server with
+	// a timeout of ctxutil.
+	quit := make(chan os.Signal, 1)
+
+	// kill (no param) default send syscall.SIGTERM
+	// kill -2 is syscall.SIGINT
+	signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
+
+	<-quit
+	log.Printf("Shutting down all...")
+
+	err := s.Shutdown(ctx)
+	if err != nil {
+		log.Fatalf("Forced to shutdown: %v", err)
+	}
+
+	return err
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/recover.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/recover.go
new file mode 100644
index 0000000000..3be9a60218
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/recover.go
@@ -0,0 +1,34 @@
+package safego
+
+import (
+	"fmt"
+	"runtime/debug"
+
+	"go.uber.org/zap"
+)
+
+// Go TODO
+func Go(f func()) {
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				zap.L().Error(fmt.Sprintf("Panic recovered: %s, stack: %s", r, string(debug.Stack())))
+			}
+		}()
+
+		f()
+	}()
+}
+
+// GoArgs 较少用。用此函数启动带任意参数的goroutine,参数类型只能是interface{},在函数内部再进行类型转换。
+func GoArgs(f func(...interface{}), args ...interface{}) {
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				zap.L().Error(fmt.Sprintf("Panic recovered: %s, stack: %s", r, string(debug.Stack())))
+			}
+		}()
+
+		f(args...)
+	}()
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/safego.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/safego.go
new file mode 100644
index 0000000000..5db69ca98b
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/safego/safego.go
@@ -0,0 +1,2 @@
+// Package safego TODO
+package safego
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go
new file mode 100644
index 0000000000..22c7b3e586
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go
@@ -0,0 +1,11 @@
+package staticembed
+
+import "embed"
+
+// DefaultSysSchemaSQLFileName TODO
+const DefaultSysSchemaSQLFileName = "default_sys_schema.sql"
+
+// DefaultSysSchemaSQL TODO
+//
+//go:embed default_sys_schema.sql
+var DefaultSysSchemaSQL embed.FS
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql
new file mode 100644
index 0000000000..382754064a
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql
@@ -0,0 +1,51 @@
+CREATE DATABASE if not exists test;
+create table IF NOT EXISTS test.free_space(a int) engine = InnoDB;
+CREATE TABLE if not exists test.conn_log(
+    conn_id bigint default NULL,
+    conn_time datetime default NULL,
+    user_name varchar(128) default NULL,
+    cur_user_name varchar(128) default NULL,
+    ip varchar(15) default NULL,
+    key conn_time(conn_time)
+);
+create database if not exists db_infobase;
+create table if not exists db_infobase.checksum(
+    db char(64) NOT NULL,
+    tbl char(64) NOT NULL,
+    chunk int(11) NOT NULL,
+    boundaries text NOT NULL,
+    this_crc char(40) NOT NULL,
+    this_cnt int(11) NOT NULL,
+    master_crc char(40) default NULL,
+    master_cnt int(11) default NULL,
+    ts timestamp NOT NULL,
+    PRIMARY KEY (db, tbl, chunk)
+);
+replace into db_infobase.checksum values('test', 'test', 0, '1=1', '0', 0, '0', 0, now());
+CREATE TABLE if not exists db_infobase.spes_status(
+    ip varchar(15) default '',
+    spes_id smallint default 0,
+    report_day int default 0,
+    PRIMARY KEY ip_id_day (ip, spes_id, report_day)
+);
+CREATE TABLE IF NOT EXISTS db_infobase.master_slave_check (
+    check_item VARCHAR(64) NOT NULL PRIMARY KEY comment 'check_item to check',
+    master VARCHAR(64) comment 'the check_item status on master',
+    slave VARCHAR(64) comment 'the check_item status on slave',
+    check_result VARCHAR(64) comment 'the different value of master and slave'
+) ENGINE = InnoDB;
+CREATE TABLE IF NOT EXISTS db_infobase.check_heartbeat (
+    uid INT NOT NULL PRIMARY KEY,
+    ck_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on  UPDATE CURRENT_TIMESTAMP
+) ENGINE = InnoDB;
+REPLACE INTO db_infobase.check_heartbeat(uid) value(1);
+INSERT INTO db_infobase.master_slave_check
+values('slave_delay_sec', now(), now(), 0);
+CREATE TABLE IF NOT EXISTS db_infobase.query_response_time(
+    time_min INT(11) NOT NULL DEFAULT '0',
+    time VARCHAR(14) NOT NULL DEFAULT '',
+    total VARCHAR(100) NOT NULL DEFAULT '',
+    update_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
+    PRIMARY KEY (time_min, time)
+);
+flush logs;
\ No newline at end of file
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/staticembed.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/staticembed.go
new file mode 100644
index 0000000000..9b14946fdb
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/staticembed.go
@@ -0,0 +1,2 @@
+// Package staticembed TODO
+package staticembed
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.go
new file mode 100644
index 0000000000..dd35198905
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.go
@@ -0,0 +1,11 @@
+package staticembed
+
+import "embed"
+
+// SysInitEsScriptFileName TODO
+var SysInitEsScriptFileName = "sysinit_es.sh"
+
+// SysInitEsScript TODO
+//
+//go:embed sysinit_es.sh
+var SysInitEsScript embed.FS
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.sh b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.sh
new file mode 100644
index 0000000000..de8d77a0fd
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_es.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+anynowtime="date +'%Y-%m-%d %H:%M:%S'"
+NOW="echo [\`$anynowtime\`][PID:$$]"
+
+##### 可在脚本开始运行时调用,打印当时的时间戳及PID。
+function job_start
+{
+    echo "`eval $NOW` job_start"
+}
+
+##### 可在脚本执行成功的逻辑分支处调用,打印当时的时间戳及PID。 
+function job_success
+{
+    MSG="$*"
+    echo "`eval $NOW` job_success:[$MSG]"
+    exit 0
+}
+
+##### 可在脚本执行失败的逻辑分支处调用,打印当时的时间戳及PID。
+function job_fail
+{
+    MSG="$*"
+    echo "`eval $NOW` job_fail:[$MSG]"
+    exit 1
+}
+
+job_start
+
+
+#初始化
+useradd mysql -g root -s /bin/bash -d /home/mysql
+echo  -e "mysql soft memlock unlimited\nmysql hard memlock unlimited" >> /etc/security/limits.conf
+echo -e "vm.max_map_count=262144\nvm.swappiness=1" >> /etc/sysctl.conf ;sysctl -p
+mkdir -p /data/esenv 
+chown -R mysql /data/esenv
+mkdir -p /data/eslog 
+chown -R mysql /data/eslog
+
+cat << 'EOF' > /data/esenv/esprofile
+export JAVA_HOME=/data/esenv/es/jdk
+export CLASSPATH=".:$JAVA_HOME/lib:$JRE/lib:$CLASSPATH"
+export ES_HOME=/data/esenv/es
+export ES_CONF_DIR=$ES_HOME/config
+export PATH=${JAVA_HOME}/bin:${ES_HOME}/bin:${ES_HOME}/sbin:$PATH
+EOF
+
+chown mysql  /data/esenv/esprofile
+
+sed -i '/esprofile/d' /etc/profile
+echo "source /data/esenv/esprofile" >>/etc/profile
+
+job_success "初始化完成"
\ No newline at end of file
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.go
new file mode 100644
index 0000000000..a467721b02
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.go
@@ -0,0 +1,11 @@
+package staticembed
+
+import "embed"
+
+// SysInitHdfsScriptFileName TODO
+var SysInitHdfsScriptFileName = "sysinit_hdfs.sh"
+
+// SysInitHdfsScript TODO
+//
+//go:embed sysinit_hdfs.sh
+var SysInitHdfsScript embed.FS
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.sh b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.sh
new file mode 100644
index 0000000000..d9d7af1150
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_hdfs.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+
+echo "
+* - nofile 200000
+* soft nofile 200000
+* hard nofile 200000
+" >> /etc/security/limits.conf
+
+
+echo never >> /sys/kernel/mm/transparent_hugepage/enabled && echo never >>  /sys/kernel/mm/transparent_hugepage/defrag
+echo "
+never >> /sys/kernel/mm/transparent_hugepage/enabled
+never >> /sys/kernel/mm/transparent_hugepage/defrag
+" >> /etc/rc.local
+
+# 设置vm.overcommit_memory 为1 设置vm.swappiness 为1
+
+echo "
+vm.overcommit_memory=1
+vm.swappiness=1
+net.ipv4.ip_local_port_range=25000 50000
+net.ipv4.tcp_tw_reuse=1
+net.ipv4.tcp_tw_recycle=1
+" >> /etc/sysctl.conf
+
+id hadoop >& /dev/null
+if [ $? -ne 0 ]
+then
+   useradd hadoop -g root -s /bin/bash -d /home/hadoop
+fi
+
+mkdir -p /data/hadoopenv
+chown -R hadoop.root /data/hadoopenv
+mkdir -p /data/hadoopdata
+chown -R hadoop.root /data/hadoopdata
+
+
+cat << 'EOF' > /data/hadoopenv/hdfsProfile
+export JAVA_HOME="/data/hadoopenv/java"
+export CLASSPATH=".:$JAVA_HOME/lib:$JRE/lib:$CLASSPATH"
+export HADOOP_HOME="/data/hadoopenv/hadoop"
+export HADOOP_CONF_DIR="$HADOOP_HOME/etc/hadoop"
+export PATH="${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH"
+EOF
+
+chown hadoop:root /data/hadoopenv/hdfsProfile
+
+sed -i '/hdfsProfile/d' /etc/profile
+echo "source /data/hadoopenv/hdfsProfile" >>/etc/profile
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go
new file mode 100644
index 0000000000..1d48cb5e22
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go
@@ -0,0 +1,11 @@
+package staticembed
+
+import "embed"
+
+// SysInitMySQLScriptFileName TODO
+var SysInitMySQLScriptFileName = "sysinit_mysql.sh"
+
+// SysInitMySQLScript TODO
+//
+//go:embed sysinit_mysql.sh
+var SysInitMySQLScript embed.FS
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh
new file mode 100644
index 0000000000..77d11db051
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh
@@ -0,0 +1,86 @@
+#!/bin/sh
+# 新建mysql.mysql用户
+##
+# mysql scripts 
+##
+# 2007-07-09    created
+##
+# depends: ~/abs/ssh.exp  ~/abs/scp.exp
+function _exit() {
+        rm $0
+        exit
+}
+#chmod o+rx /usr/local/ieod-public/sysinfo  -R
+#chmod o+rx /usr/local/agenttools/agent
+#chmod o+rx /usr/local/agenttools/agent/agentRep* 
+#handler nscd restart
+#如果存在mysql用户组就groupadd mysql -g 202
+egrep "^mysql" /etc/group >& /dev/null
+if [ $? -ne 0 ]
+then
+groupadd mysql -g 202
+fi
+#考虑到可能上架已运行的机器,userdel有风险,不采用这种方法
+#如果存在mysql用户就删掉(因为有可能1)id不为30019,2)不存在home目录)
+id mysql >& /dev/null
+if [ $? -ne 0 ]
+then
+        useradd -m -d /home/mysql -g 202 -G users -u 30019 mysql
+        chage -M 99999 mysql
+        if [ ! -d /home/mysql ]; 
+        then
+                mkdir -p /home/mysql
+        fi
+        chmod 755 /home/mysql
+        usermod -d /home/mysql mysql
+fi
+#如果存在mysql用户,上面那一步会报错,也不会创建/home/mysql,所以判断下并创建/home/mysql
+if [ ! -d /data ];
+then
+	mkdir -p /data1/data/
+	ln -s /data1/data/ /data
+fi
+if [ ! -d /data1 ];
+then
+	mkdir -p /data/data1/
+	ln -s /data/data1 /data1
+fi
+mkdir -p /data1/dbha
+chown -R mysql /data1/dbha
+mkdir -p /data/dbha
+chown -R mysql /data/dbha
+#mkdir -p /home/mysql/install
+#chown -R mysql /home/mysql
+#chmod -R a+rwx /home/mysql/install
+mkdir -p /data/install
+chown -R mysql /home/mysql
+chown -R mysql /data/install
+chmod -R a+rwx /data/install
+rm -rf /home/mysql/install
+ln -s /data/install /home/mysql/install
+chown -R mysql /home/mysql/install
+password="$2"
+#password=$(echo "$2" | /home/mysql/install/lib/tools/base64 -d)
+echo "mysql:$password" | chpasswd
+FOUND=$(grep 'ulimit -n 204800' /etc/profile)
+if [ -z "$FOUND" ]; then
+        echo 'ulimit -n 204800' >> /etc/profile
+fi
+FOUND=$(grep 'export LC_ALL=en_US' /etc/profile)
+if [ -z "$FOUND" ]; then
+        echo 'export LC_ALL=en_US' >> /etc/profile
+fi
+FOUND=$(grep 'export PATH=/usr/local/mysql/bin/:$PATH' /etc/profile)
+if [ -z "$FOUND" ]; then
+        echo 'export PATH=/usr/local/mysql/bin/:$PATH' >> /etc/profile
+fi
+FOUND_umask=$(grep '^umask 022' /etc/profile)
+if [ -z "$FOUND_umask" ]; then
+        echo 'umask 022' >> /etc/profile
+fi
+FOUND=$(grep 'fs.aio-max-nr' /etc/sysctl.conf)
+if [ -z "$FOUND" ];then
+echo "fs.aio-max-nr=1024000" >> /etc/sysctl.conf
+/sbin/sysctl -p
+fi
+_exit
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock.go
new file mode 100644
index 0000000000..07ab9de539
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock.go
@@ -0,0 +1,110 @@
+// Package mock TODO
+/*
+ * @Description:  dbactuator 执行获取一些mock 参数
+ */
+package mock
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+const (
+	// MOCK_URL TODO
+	MOCK_URL = "http://127.0.0.1:8080/bkconfig/v1/confitem/query"
+)
+
+// ApiRsp TODO
+type ApiRsp struct {
+	Code    int             `json:"code"`
+	Message string          `json:"message"`
+	Data    json.RawMessage `json:"data"`
+}
+
+// MockMysqlRotateConfigs TODO
+func MockMysqlRotateConfigs() string {
+	pbstr := []byte(`{
+		"bk_biz_id": "0",
+		"level_name": "plat",
+		"level_value": "0",
+		"conf_file": "main.conf",
+		"conf_type": "MysqlRotate",
+		"namespace": "tendbha",
+		"format": "map."
+	}`)
+	s := query(pbstr)
+	logger.Info(s)
+	return s
+}
+
+// MockMysqlDbBackupConfigs TODO
+/**
+ * @description: 获取测试环境的备份配置
+ * @return {*}
+ */
+func MockMysqlDbBackupConfigs() string {
+	pbstr := []byte(`{
+		"bk_biz_id":"0",
+		"level_name":"plat",
+		"level_value":"0",
+		"conf_file":"dbbackup.conf,local_backup_config_not_upload",
+		"conf_type":"MysqlBackup",
+		"namespace":"tendbha",
+		"format":"map"
+	}`)
+	return query(pbstr)
+}
+
+// MockMysqlMonitorData TODO
+func MockMysqlMonitorData() string {
+	pbstr := []byte(`{
+		"bk_biz_id":"0",
+		"level_name":"plat",
+		"level_value":"0",
+		"conf_file":"db_monitor,global_status",
+		"conf_type":"MysqlMasterMonitor",
+		"namespace":"tendbha",
+		"format":"map."
+	}`)
+	return query(pbstr)
+}
+
+// MockProxyMonitorData TODO
+func MockProxyMonitorData() string {
+	pbstr := []byte(`{
+	"bk_biz_id":"0",
+    "level_name":"plat",
+    "level_value":"0",
+    "conf_file":"proxy_monitor,warn_receiver,xml_server",
+    "conf_type":"MysqlProxyMonitor",
+    "namespace":"tendbha",
+    "format":"map"
+	}`)
+	return query(pbstr)
+}
+
+func query(pbstr []byte) string {
+	req, err := http.NewRequest(http.MethodPost, MOCK_URL, bytes.NewBuffer(pbstr))
+	if err != nil {
+		fmt.Println("new request failed", err.Error())
+		return ""
+	}
+	client := &http.Client{}
+	resp, err := client.Do(req)
+	if err != nil {
+		fmt.Println(err)
+		return ""
+	}
+	defer resp.Body.Close()
+	pb, _ := ioutil.ReadAll(resp.Body)
+	var d ApiRsp
+	if err = json.Unmarshal(pb, &d); err != nil {
+		fmt.Println("unmarshal", err)
+		return ""
+	}
+	return string(d.Data)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock_test.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock_test.go
new file mode 100644
index 0000000000..c3bc2589e0
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/mock/mock_test.go
@@ -0,0 +1,20 @@
+/*
+ * @Description: Mock test
+ */
+package mock
+
+import "testing"
+
+func TestMockDbbackupConfig(t *testing.T) {
+	t.Log("start ...")
+	d := MockMysqlDbBackupConfigs()
+	t.Log(d)
+	t.Log("end ...")
+}
+
+func TestMockRotateBinlogConfig(t *testing.T) {
+	t.Log("start ...")
+	MockMysqlRotateConfigs()
+	// t.Log(d)
+	t.Log("end ...")
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback.go
new file mode 100644
index 0000000000..ef33f994b2
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback.go
@@ -0,0 +1,226 @@
+// Package rollback TODO
+package rollback
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"os"
+	"path"
+)
+
+const (
+	// OP_DEL TODO
+	OP_DEL = "DEL"
+	// OP_MOVE TODO
+	OP_MOVE = "MOVE"
+)
+
+// RollBackObjects TODO
+type RollBackObjects struct {
+	RollBackProcessList []RollBackProcess `json:"rollback_processlist"`
+	RollBackFiles       []RollBackFile    `json:"rollback_files"`
+}
+
+// 这些目录无论如何都不能直接删除
+// 我们原子任务主要操作相关目录
+var safeDirs = map[string]struct{}{"/": {}, "/etc": {}, "/usr": {}, "/usr/local": {}, "/data": {}, "/data1": {}}
+
+// RollBackFile 文件包括 常规文件 目录 软连接等
+// 回滚操作不记录删除文件的操作
+// 因为删除文件没有源文件无法恢复
+type RollBackFile struct {
+	// 文件必须是绝对路径
+	FileName       string `json:"file_name"`        // DEL,MOVE 后的文件名称
+	OriginFileName string `json:"origin_file_name"` // DEL,MOVE 前的文件名称
+	OriginOpera    string `json:"origin_opera"`     // 原始操作 DEL:新增文件 MOVE:文件重命名
+}
+
+// RollBackProcess 暂定回滚由任务拉起的新进程
+// 已经kill进程,暂不恢复
+type RollBackProcess struct {
+	StartOsUser string `json:"start_os_user"` // os启动用户
+	ProcessId   int    `json:"process_id"`
+}
+
+// AddDelFile TODO
+func (r *RollBackObjects) AddDelFile(fileName string) {
+	r.RollBackFiles = append(r.RollBackFiles, RollBackFile{
+		FileName:    fileName,
+		OriginOpera: OP_DEL,
+	})
+}
+
+// AddMoveFile TODO
+func (r *RollBackObjects) AddMoveFile(originFileName, fileName string) {
+	r.RollBackFiles = append(r.RollBackFiles, RollBackFile{
+		FileName:       fileName,
+		OriginFileName: originFileName,
+		OriginOpera:    OP_DEL,
+	})
+}
+
+// AddKillProcess TODO
+func (r *RollBackObjects) AddKillProcess(pid int) {
+	r.RollBackProcessList = append(r.RollBackProcessList, RollBackProcess{
+		ProcessId: pid,
+	})
+}
+
+// RollBack TODO
+func (r *RollBackObjects) RollBack() (err error) {
+	if r.RollBackProcessList != nil {
+		err = r.RollBack_Processlists()
+	}
+	if r.RollBackFiles != nil {
+		err = r.RollBack_Files()
+	}
+	return err
+}
+
+// RollBack_Processlists TODO
+func (r *RollBackObjects) RollBack_Processlists() (err error) {
+	if len(r.RollBackProcessList) <= 0 {
+		return nil
+	}
+	for _, rp := range r.RollBackProcessList {
+		if err = rp.Rollback(); err != nil {
+			return
+		}
+	}
+	return err
+}
+
+// RollBack_Files TODO
+func (r *RollBackObjects) RollBack_Files() (err error) {
+	if len(r.RollBackFiles) <= 0 {
+		return nil
+	}
+	for _, rfile := range r.RollBackFiles {
+		if err = rfile.RollBack(); err != nil {
+			return
+		}
+	}
+	return err
+}
+
+// RollBack TODO
+// os.Stat 和 os.Lstat 两个函数用来获取文件类型,但是os.Stat具有穿透连接能力,如果你去获取一个软链的 FileInfo,他会返回软链到的文件的信息,你既然想知道他的具体类型,就要使用 os.Lstat
+func (r *RollBackFile) RollBack() (err error) {
+	f, err := os.Lstat(r.FileName)
+	if err != nil {
+		// 如果是删除文件的话,文件不存在,那就忽略错误
+		if os.IsNotExist(err) && r.OriginOpera == OP_DEL {
+			return nil
+		}
+		return err
+	}
+
+	switch mode := f.Mode().Type(); {
+	case mode.IsDir():
+		return r.rollbackDir()
+	case mode.IsRegular():
+		return r.rollbackRegularFile()
+	case mode&os.ModeSymlink != 0:
+		return r.rollbackLink()
+	default:
+		logger.Error("Not Define mode.String(): %v\n", mode.String())
+	}
+	return nil
+}
+
+func (r *RollBackFile) rollbackRegularFile() (err error) {
+	switch r.OriginOpera {
+	case OP_DEL:
+		return SafeRm(r.FileName)
+	case OP_MOVE:
+		return SafeMove(r.FileName, r.OriginFileName)
+	}
+	return fmt.Errorf("no define Operate %s", r.OriginOpera)
+}
+
+func (r *RollBackFile) rollbackDir() (err error) {
+	switch r.OriginOpera {
+	case OP_DEL:
+		return SafeRmDir(r.FileName)
+	case OP_MOVE:
+		return SafeMove(r.FileName, r.OriginFileName)
+	}
+	return fmt.Errorf("no define Operate %s", r.OriginOpera)
+}
+
+func (r *RollBackFile) rollbackLink() (err error) {
+	switch r.OriginOpera {
+	case OP_DEL:
+		return SafeUnlink(r.FileName)
+	case OP_MOVE:
+		return SafeRelink(r.FileName, r.OriginFileName)
+	}
+	return fmt.Errorf("no define Operate %s", r.OriginOpera)
+}
+
+// SafeMove TODO
+func SafeMove(file, destfile string) (err error) {
+	_, err = osutil.ExecShellCommand(false, fmt.Sprintf("mv %s %s", file, destfile))
+	return
+}
+
+// SafeRelink TODO
+func SafeRelink(linkfile, destfile string) (err error) {
+	_, err = osutil.ExecShellCommand(false, fmt.Sprintf(" unlink %s && ln -s %s %s", linkfile, destfile, linkfile))
+	return
+}
+
+// SafeUnlink TODO
+func SafeUnlink(file string) (err error) {
+	if IsSafe(file) {
+		_, err = osutil.ExecShellCommand(false, fmt.Sprintf("unlink %s", file))
+		return
+	}
+	return fmt.Errorf("%s 不允许删除", file)
+}
+
+// SafeRm TODO
+func SafeRm(file string) (err error) {
+	if IsSafe(file) {
+		_, err = osutil.ExecShellCommand(false, fmt.Sprintf("rm %s", file))
+		return
+	}
+	return fmt.Errorf("%s不允许删除", file)
+}
+
+// SafeRmDir TODO
+func SafeRmDir(file string) (err error) {
+	if IsSafe(file) {
+		_, err = osutil.ExecShellCommand(false, fmt.Sprintf("rm  -rf %s", file))
+		return
+	}
+	return fmt.Errorf("%s 不允许删除", file)
+}
+
+// IsSafe TODO
+func IsSafe(file string) bool {
+	// 如果存在 file  是不能直接删除的目录
+	if _, ok := safeDirs[file]; ok {
+		return !ok
+	}
+	// 如果存在 file  是不能直接删除的目录,判断下base dir
+	if _, ok := safeDirs[path.Base(file)]; ok {
+		return !ok
+	}
+	return !util.StrIsEmpty(file)
+}
+
+// Rollback TODO
+func (r *RollBackProcess) Rollback() (err error) {
+	if r.ProcessId <= 0 {
+		return nil
+	}
+	p, err := os.FindProcess(r.ProcessId)
+	if err != nil {
+		// 找不到这个进程,可能吗没有 不需要回滚
+		return nil
+	}
+	return p.Kill()
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback_test.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback_test.go
new file mode 100644
index 0000000000..232c2b2bd9
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/rollback/rollback_test.go
@@ -0,0 +1,85 @@
+package rollback
+
+import (
+	"testing"
+)
+
+// 将删除/data1/11.txt
+func TestRollBackFile(t *testing.T) {
+	t.Logf("start testing TestRollBackFile...")
+	rf := RollBackFile{
+		FileName:    "/data1/11.txt",
+		OriginOpera: OP_DEL,
+	}
+	if err := rf.RollBack(); err != nil {
+		t.Error("rollback", err)
+	}
+}
+
+// 将会把/data1/1.txt mv  /data1/2.txt
+func TestMoveFile(t *testing.T) {
+	t.Logf("start testing TestRollBackFile...")
+	rf := RollBackFile{
+		FileName:       "/data1/1.txt",
+		OriginFileName: "/data1/2.txt",
+		OriginOpera:    OP_MOVE,
+	}
+	if err := rf.RollBack(); err != nil {
+		t.Error("rollback", err)
+	}
+}
+
+// 将会把/data1/d1 删除
+func TestDelDir(t *testing.T) {
+	t.Logf("start testing TestRollBackFile...")
+	rf := RollBackFile{
+		FileName:    "/data1/d1/",
+		OriginOpera: OP_DEL,
+	}
+	if err := rf.RollBack(); err != nil {
+		t.Errorf("rollback %s", err.Error())
+	}
+}
+
+// 将会把/data1/d1 删除
+func TestMoveDir(t *testing.T) {
+	t.Logf("start testing TestRollBackFile...")
+	rf := RollBackFile{
+		FileName:       "/data1/d1",
+		OriginFileName: "/data1/d",
+		OriginOpera:    OP_MOVE,
+	}
+	if err := rf.RollBack(); err != nil {
+		t.Errorf("rollback %s", err.Error())
+	}
+}
+
+// 将会把/data1/f 软连接到 /data1/c 目录
+func TestRmLink(t *testing.T) {
+	t.Logf("start testing TestRollBackFile...")
+	rf := RollBackFile{
+		FileName:    "/data1/f",
+		OriginOpera: OP_DEL,
+	}
+	if err := rf.RollBack(); err != nil {
+		t.Errorf("rollback %s", err.Error())
+	}
+}
+
+// 将会把/data1/f 软连接到 /data1/c 目录
+func TestMoveLink(t *testing.T) {
+	t.Logf("start testing TestRollBackFile...")
+	rf := RollBackFile{
+		FileName:       "/data1/f",
+		OriginFileName: "/data1/c",
+		OriginOpera:    OP_MOVE,
+	}
+	if err := rf.RollBack(); err != nil {
+		t.Errorf("rollback %s", err.Error())
+	}
+}
+
+func TestIsSafeDir(t *testing.T) {
+	t.Logf("start testing ...")
+	t.Log(IsSafe("/usr/local"))
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_helper.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_helper.go
new file mode 100644
index 0000000000..a3e0c514cf
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_helper.go
@@ -0,0 +1,270 @@
+package esutil
+
+import (
+	"context"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/elastic/go-elasticsearch/v7"
+	"github.com/elastic/go-elasticsearch/v7/esapi"
+)
+
+// EsInsObject TODO
+type EsInsObject struct {
+	Host     string `json:"host"`      // es实例ip
+	HttpPort int    `json:"http_port"` // es实例http端口
+	UserName string `json:"username"`  // es实例用户名
+	Password string `json:"password"`  // es实例密码
+}
+
+// Node TODO
+type Node struct {
+	Ip          string `json:"ip"`
+	InstanceNum int    `json:"instance_num"`
+}
+
+// Allocation TODO
+type Allocation struct {
+	Node   string `json:"node"`
+	Ip     string `json:"ip"`
+	Shards string `json:"shards"`
+}
+
+// Conn TODO
+func (o EsInsObject) Conn() (*elasticsearch.Client, error) {
+	return elasticsearch.NewClient(
+		elasticsearch.Config{
+			Addresses: []string{fmt.Sprintf("http://%s:%d", o.Host, o.HttpPort)},
+			Username:  o.UserName,
+			Password:  o.Password,
+		})
+}
+
+// DoExclude TODO
+func (o EsInsObject) DoExclude(nodes []string) error {
+	esclient, err := o.Conn()
+
+	if err != nil {
+		logger.Error("es连接失败", err)
+		return err
+	}
+
+	ips := strings.Join(nodes[:], ",")
+	var b strings.Builder
+	b.WriteString(fmt.Sprintf(`{
+        "transient": {
+            "cluster.routing.allocation.exclude._ip": "%s"
+        }
+}`, ips))
+
+	req := esapi.ClusterPutSettingsRequest{
+		Body: strings.NewReader(b.String()),
+	}
+
+	res, err := req.Do(context.Background(), esclient)
+	if err != nil {
+		logger.Error("Exclude请求失败", err)
+		return err
+	}
+
+	if res.StatusCode != 200 {
+		logger.Error("exclude请求响应不为200,", res)
+		return errors.New("exclude请求响应不为200")
+	}
+
+	logger.Info("res", res)
+
+	return nil
+}
+
+// CheckEmpty TODO
+func (o EsInsObject) CheckEmpty(nodes []string) error {
+	const SleepInterval = 60 * time.Second
+
+	esclient, err := o.Conn()
+
+	if err != nil {
+		logger.Error("es连接失败", err)
+		return err
+	}
+
+	for {
+		req := esapi.CatAllocationRequest{
+			NodeID: nodes,  // 过滤特定的的nodes
+			Format: "json", // 输出格式为json
+		}
+		res, err := req.Do(context.Background(), esclient)
+		if err != nil {
+			logger.Info("cat api失败", err)
+		}
+		defer res.Body.Close()
+
+		resBody := res.String()
+		logger.Info("allocations", resBody)
+
+		var allocations []Allocation
+		if err := json.NewDecoder(res.Body).Decode(&allocations); err != nil {
+			logger.Error("Error parsing the response body: %s", err)
+		}
+
+		sum := 0
+		for _, allocation := range allocations {
+			logger.Info("allocations: %v", allocation)
+			if allocation.Node == "UNASSIGNED" {
+				continue
+			}
+			shards, _ := strconv.Atoi(allocation.Shards)
+			sum += shards
+		}
+		// sum为0表示数据搬迁完成
+		if sum == 0 {
+			logger.Info("shard搬迁完毕")
+			break
+		}
+
+		time.Sleep(SleepInterval)
+	}
+
+	return nil
+}
+
+// CheckEmptyOnetime TODO
+func (o EsInsObject) CheckEmptyOnetime(nodes []string) (sum int, ok bool, err error) {
+	esclient, err := o.Conn()
+
+	ok = false
+	if err != nil {
+		logger.Error("es连接失败", err)
+		return sum, ok, err
+	}
+
+	req := esapi.CatAllocationRequest{
+		NodeID: nodes, // 过滤特定的的nodes
+	}
+	res, err := req.Do(context.Background(), esclient)
+	if err != nil {
+		logger.Info("cat api失败", err)
+	}
+
+	defer res.Body.Close()
+
+	resBody := res.String()
+	logger.Info("allocations", resBody)
+
+	var allocations []Allocation
+	if err := json.NewDecoder(res.Body).Decode(&allocations); err != nil {
+		logger.Error("Error parsing the response body: %s", err)
+	}
+
+	sum = 0
+	for _, allocation := range allocations {
+		logger.Info("allocations: %v", allocation)
+		if allocation.Node == "UNASSIGNED" {
+			continue
+		}
+		shards, _ := strconv.Atoi(allocation.Shards)
+		sum += shards
+	}
+
+	// sum为0表示数据搬迁完成
+	if sum == 0 {
+		logger.Info("Shards migration finished.")
+		ok = true
+		err = nil
+	}
+
+	return sum, ok, err
+}
+
+// CheckNodes TODO
+func (o EsInsObject) CheckNodes(nodes []Node) (ok bool, err error) {
+	ok = true
+	err = nil
+	// 预期的节点总数
+	totalIns := 0
+	// ip列表
+	ips := make([]string, 0)
+	for _, n := range nodes {
+		ips = append(ips, n.Ip)
+		totalIns += n.InstanceNum
+	}
+	logger.Info("扩容的机器列表 %v", ips)
+	logger.Info("预期的实例数 %d", totalIns)
+
+	esclient, err := o.Conn()
+
+	if err != nil {
+		logger.Error("es连接失败", err)
+		return false, err
+	}
+
+	req := esapi.CatNodesRequest{}
+	res, err := req.Do(context.Background(), esclient)
+	if err != nil {
+		logger.Info("cat api失败", err)
+		return false, err
+	}
+
+	resBody := res.String()
+	logger.Info("原始cat/nodes输出 %v", resBody)
+
+	// remove http code
+	catResults := strings.Replace(strings.TrimSuffix(resBody, "\n"), "[200 OK] ", "", -1)
+	catList := strings.Split(catResults, "\n")
+
+	// ip计数器
+	nodeCounters := make(map[string]int)
+	// 实际扩容的节点数
+	sum := 0
+	for _, r := range catList {
+		// 获取第一列
+		ip := strings.Fields(r)[0]
+		if containStr(ips, ip) {
+			nodeCounters[ip]++
+			sum++
+		}
+	}
+	logger.Info("实际扩容结果: %v", nodeCounters)
+	logger.Info("实际扩容的节点总数:[%d]", sum)
+	if sum != totalIns {
+		ok = false
+		err = fmt.Errorf("map: %v", nodeCounters)
+	}
+
+	return ok, err
+}
+
+func containStr(s []string, e string) bool {
+	for _, a := range s {
+		if a == e {
+			return true
+		}
+	}
+	return false
+}
+
+// CheckEsHealth TODO
+func (o EsInsObject) CheckEsHealth() (err error) {
+	esclient, err := o.Conn()
+
+	if err != nil {
+		return fmt.Errorf("rror creating the client: %s", err)
+	}
+
+	// 1. Get cluster info
+	//
+	res, err := esclient.Info()
+	if err != nil {
+		return fmt.Errorf("error getting response: %s", err)
+	}
+	// Check response status
+	if res.IsError() {
+		return fmt.Errorf("error: %s", res.String())
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_operate.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_operate.go
new file mode 100644
index 0000000000..87d4c3762f
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/es_operate.go
@@ -0,0 +1,445 @@
+package esutil
+
+import (
+	"bufio"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"errors"
+	"fmt"
+	"os"
+	"regexp"
+	"strings"
+
+	"github.com/shirou/gopsutil/mem"
+)
+
+// DiskTypePath TODO
+type DiskTypePath struct {
+	DiskType string // disk type, sd, vd, nvme
+	DiskSize string // disk size, 100 bytes
+	DiskPath string // eg. /data1, /data
+}
+
+// GenerateHeapOption 生成jvm heap size
+func GenerateHeapOption(heapsize uint64) []byte {
+	rawdata := []byte(fmt.Sprintf(`-Xms%dm
+-Xmx%dm`, heapsize, heapsize))
+	return rawdata
+}
+
+// GetInstHeapByIP 计算单个实例的heap, 单位MB
+func GetInstHeapByIP(instCount uint64) (uint64, error) {
+	vMem, err := mem.VirtualMemory()
+	if err != nil {
+		return 0, err
+	}
+	kilo := uint64(1024)
+	totalMemInMi := vMem.Total / kilo / kilo
+	EsTotalMem := float64(totalMemInMi) * ratio()
+	instHeap := uint64(EsTotalMem) / instCount
+	return insMaxHeap(instHeap), nil
+}
+
+// ratio TODO
+// heap占比系数
+func ratio() float64 {
+	return 0.6
+}
+
+// SupervisorctlUpdate TODO
+func SupervisorctlUpdate() error {
+	startCmd := "supervisorctl update"
+	logger.Info(fmt.Sprintf("exec %s", startCmd))
+	_, err := osutil.RunInBG(false, startCmd)
+	return err
+}
+
+// insMaxHeap 单实例最大heap不超过30g,单位MB
+func insMaxHeap(heapSize uint64) uint64 {
+	maxHeap := 30720
+	if heapSize > uint64(maxHeap) {
+		return uint64(maxHeap)
+	}
+	return heapSize
+}
+
+// GetTfByRole 根据角色设置参数
+func GetTfByRole(role string) (isMaster bool, isData bool) {
+	switch role {
+	case cst.EsHot:
+		isMaster = false
+		isData = true
+	case cst.EsCold:
+		isMaster = false
+		isData = true
+	case cst.EsMaster:
+		isMaster = true
+		isData = false
+	case cst.EsClient:
+		isMaster = false
+		isData = false
+	default:
+		isMaster = false
+		isData = false
+	}
+	return isMaster, isData
+}
+
+// WriteCerToYaml TODO
+// es证书配置, /data/esenv/es*/config/elasticsearch.yml
+func WriteCerToYaml(filePath, transportPass, httpPass string) error {
+	file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, 0666)
+	if err != nil {
+		return err
+	}
+	// 及时关闭file句柄
+	defer file.Close()
+	write := bufio.NewWriter(file)
+	data := fmt.Sprintf(`opendistro_security.ssl.transport.pemcert_filepath: node1.pem
+opendistro_security.ssl.transport.pemkey_filepath: node1.key
+opendistro_security.ssl.transport.pemkey_password: %s
+opendistro_security.ssl.transport.pemtrustedcas_filepath: root-ca.pem
+opendistro_security.ssl.transport.enforce_hostname_verification: false
+opendistro_security.ssl.transport.resolve_hostname: false
+opendistro_security.ssl.http.enabled: false
+opendistro_security.ssl.http.pemcert_filepath: node1_http.pem
+opendistro_security.ssl.http.pemkey_filepath: node1_http.key
+opendistro_security.ssl.http.pemkey_password: %s
+opendistro_security.ssl.http.pemtrustedcas_filepath: root-ca.pem
+opendistro_security.nodes_dn:
+- CN=node1.bk.com,OU=Ops,O=Bk Com\, Inc.,DC=bk,DC=com
+opendistro_security.authcz.admin_dn:
+- CN=kirk.bk.com,OU=Ops,O=Bk Com\, Inc.,DC=bk,DC=com
+opendistro_security.ssl.http.clientauth_mode: OPTIONAL
+opendistro_security.allow_unsafe_democertificates: true
+opendistro_security.enable_snapshot_restore_privilege: true
+opendistro_security.check_snapshot_restore_write_privileges: true
+opendistro_security.restapi.roles_enabled: ["all_access", "security_rest_api_access"]`, transportPass, httpPass)
+
+	write.WriteString(data)
+	write.Flush()
+	return nil
+}
+
+// GenEsini TODO
+func GenEsini(seqNum uint64) []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:elasticsearch%d]
+command=/data/esenv/es_%d/bin/elasticsearch ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/eslog%d/es_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`, seqNum, seqNum, seqNum))
+	return iniRaw
+}
+
+// GenKibanaini TODO
+func GenKibanaini() []byte {
+	iniRaw := []byte(`[program:kibana]
+command=/data/esenv/kibana/bin/kibana -c /data/esenv/kibana/config/kibana.yml ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=10 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/esenv/kibana/kibana_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`)
+	return iniRaw
+}
+
+// GenInfluxdbini TODO
+func GenInfluxdbini() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:influxdb]
+command=/data/influxdbenv/influxdb/usr/bin/influxd -config /data/influxdbenv/influxdb/etc/influxdb/influxdb.conf ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=influxdb ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/influxdblog/influxdb_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`))
+	return iniRaw
+}
+
+// GenTelegrafini TODO
+func GenTelegrafini() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:telegraf]
+command=/data/influxdbenv/telegraf/usr/bin/telegraf --config /data/influxdbenv/telegraf/etc/telegraf/telegraf.conf ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=influxdb ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/influxdbenv/telegraf/telegraf_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`))
+	return iniRaw
+}
+
+// GenKafkaini TODO
+func GenKafkaini() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:kafka]
+command=/data/kafkaenv/kafka/bin/kafka-server-scram-start.sh /data/kafkaenv/kafka/config/server.properties ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/kafkalog/kafka_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`))
+	return iniRaw
+}
+
+// GenZookeeperini TODO
+func GenZookeeperini() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:zookeeper]
+command=/data/kafkaenv/zk/bin/zkServer.sh start-foreground ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+stopsignal=KILL ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/zklog/zk_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`))
+	return iniRaw
+}
+
+// GenManagerini TODO
+func GenManagerini() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:manager]
+command=/data/kafkaenv/cmak-3.0.0.5/bin/cmak -java-home /data/kafkaenv/jdk11 ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+stopsignal=KILL ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/kafkaenv/cmak-3.0.0.5/manager_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`))
+	return iniRaw
+}
+
+// GetInstMem TODO
+func GetInstMem() (uint64, error) {
+	vMem, err := mem.VirtualMemory()
+	if err != nil {
+		return 0, err
+	}
+	kilo := uint64(1024)
+	totalMemInMi := vMem.Total / kilo / kilo
+	return totalMemInMi, nil
+}
+
+// ToMasterStr TODO
+func ToMasterStr(ips []string) string {
+	for a, ip := range ips {
+		ips[a] = fmt.Sprintf("master-%s_1", ip)
+	}
+	return strings.Join(ips[:], ",")
+}
+
+// NodeToProcess TODO
+func NodeToProcess(node string) (process string) {
+	id, _ := GetNumByNode(node)
+	switch node {
+	case "all":
+		process = "all"
+	case id:
+		process = fmt.Sprintf("elasticsearch%s", id)
+	default:
+		process = "all"
+	}
+	return process
+}
+
+// GetNumByNode TODO
+func GetNumByNode(node string) (string, error) {
+	if node == "all" || len(node) == 0 {
+		return "all", nil
+	}
+	reg := regexp.MustCompile(`\w.*-\w.*_(\d)`)
+	if reg == nil {
+		return "", errors.New("regexp complie error")
+	}
+	result := reg.FindAllStringSubmatch(node, -1)
+	id := result[0][1]
+	return fmt.Sprintf("elasticsearch%s", id), nil
+}
+
+// GetEsLocalIp TODO
+func GetEsLocalIp() (ip string, err error) {
+	extraCmd := fmt.Sprintf(`grep -w 'network.host'  %s|awk '{print $1}'`, cst.DefaultEsConfigFile)
+	logger.Info("cmd, [%s]", extraCmd)
+	outputs, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return "", err
+	}
+	logger.Info("local ip %s", outputs)
+	ip = strings.TrimSuffix(outputs, "\n")
+	return ip, nil
+}
+
+// GetEsLocalPorts TODO
+func GetEsLocalPorts() (ports []string, err error) {
+	extraCmd := fmt.Sprintf(`grep -w 'http.port' %s/es_*/config/elasticsearch.yml|awk '{print $2}'`, cst.DefaulEsEnv)
+	logger.Info("cmd, [%s]", extraCmd)
+	outputs, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return nil, err
+	}
+	trimRes := strings.TrimSuffix(outputs, "\n")
+	ports = strings.Fields(trimRes)
+	return ports, nil
+}
+
+// GetPath return paths array according to df -h result
+func GetPath() []string {
+	var paths []string
+	var disks []DiskTypePath
+	typeCount := make(map[string]int)
+	/*
+		Filesystem     1K-blocks      Used Available Use% Mounted on
+		devtmpfs         8047192         0   8047192   0% /dev
+		tmpfs            8062512         0   8062512   0% /dev/shm
+		tmpfs            8062512    280236   7782276   4% /run
+		tmpfs            8062512         0   8062512   0% /sys/fs/cgroup
+		/dev/vda1      103079844  22737932  76047608  24% /
+		/dev/vdb1      412715432 165966836 225760744  43% /data
+		tmpfs            1612500         0   1612500   0% /run/user/0
+	*/
+	extraCmd := `df | grep ^/dev |egrep -vw '/|/usr|/boot'|awk '{print $1":"$2":"$NF}'`
+	logger.Info("Command [%s]", extraCmd)
+	output, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Info("[%s] execute failed, %s", extraCmd, err.Error())
+		return paths
+	}
+	logger.Info("Commnad output, %s", output)
+	op := strings.TrimSuffix(output, "\n")
+	if len(op) == 0 {
+		logger.Info("No independent disk found")
+		return paths
+	}
+
+	var dd DiskTypePath
+	diskList := strings.Split(op, "\n")
+	logger.Info("DiskList %+v", diskList)
+	for _, d := range diskList {
+		diskType := GetDiskType(strings.Split(d, ":")[0])
+		diskSize := strings.Split(d, ":")[1]
+		diskPath := strings.Split(d, ":")[2]
+		typeCount[diskType]++
+		dd = DiskTypePath{
+			DiskType: diskType,
+			DiskSize: diskSize,
+			DiskPath: diskPath,
+		}
+		disks = append(disks, dd)
+	}
+
+	logger.Info("disks %+v", disks)
+	if len(typeCount) == 1 {
+		logger.Info("May be all disks has same size and type")
+		for _, x := range disks {
+			paths = append(paths, x.DiskPath)
+		}
+	}
+
+	if len(typeCount) == 2 {
+		logger.Info("There are 2 different type of disks")
+		biggerDisk := MaxCountDisk(typeCount)
+		for _, x := range disks {
+
+			if x.DiskType == biggerDisk {
+				paths = append(paths, x.DiskPath)
+			}
+		}
+
+	}
+
+	if len(typeCount) > 2 {
+		logger.Info("More than 2 type of diks,can't handel it")
+		for _, x := range disks {
+			paths = append(paths, x.DiskPath)
+		}
+	}
+
+	return paths
+
+}
+
+// GetDiskType getdisktype
+func GetDiskType(disk string) string {
+	var dtype string
+	switch {
+	case strings.Contains(disk, "vd"):
+		dtype = "vd"
+	case strings.Contains(disk, "sd"):
+		dtype = "sd"
+	case strings.Contains(disk, "nvme"):
+		dtype = "nvme"
+	default:
+		dtype = ""
+	}
+	return dtype
+}
+
+// MaxCountDisk return max count disk type
+func MaxCountDisk(m map[string]int) string {
+	var maxKey string
+	var maxVal int
+
+	for maxKey, maxVal = range m {
+		break
+	}
+	for x, y := range m {
+		if y > maxVal {
+			maxKey = x
+		}
+	}
+	return maxKey
+}
+
+// GenPath get paths from num
+func GenPath(seq int, seed int, diskPath []string) []string {
+	var paths []string
+	start := (seq - 1) * seed
+	end := start + seed - 1
+	for s := start; s <= end; s++ {
+		paths = append(paths, diskPath[s])
+	}
+	return paths
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/esutil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/esutil.go
new file mode 100644
index 0000000000..bd0ece518b
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/esutil/esutil.go
@@ -0,0 +1,2 @@
+// Package esutil TODO
+package esutil
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/helpers.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/helpers.go
new file mode 100644
index 0000000000..f4b44be4e5
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/helpers.go
@@ -0,0 +1,91 @@
+package util
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+const (
+	// DefaultErrorExitCode TODO
+	DefaultErrorExitCode = 1
+)
+
+// CheckErr prints a user friendly error to STDERR and exits with a non-zero
+// exit code. Unrecognized errors will be printed with an "error: " prefix.
+//
+// This method is generic to the command in use and may be used by non-Kubectl
+// commands.
+func CheckErr(err error) {
+	checkErr(err, fatalErrHandler)
+}
+
+var fatalErrHandler = fatal
+
+// checkErr formats a given error as a string and calls the passed handleErr
+// func with that string and an kubectl exit code.
+func checkErr(err error, handleErr func(string, int)) {
+	if err == nil {
+		return
+	}
+	switch {
+	case errors.Is(err, ErrExit):
+		handleErr("", DefaultErrorExitCode)
+	default:
+		switch err := err.(type) {
+		default: // for any other error type
+			msg, ok := StandardErrorMessage(err)
+			if !ok {
+				msg = err.Error()
+				if !strings.HasPrefix(msg, "error: ") {
+					msg = fmt.Sprintf("error: %s", msg)
+				}
+			}
+			handleErr(msg, DefaultErrorExitCode)
+		}
+	}
+}
+
+// fatal prints the message (if provided) and then exits. If V(99) or greater,
+// klog.Fatal is invoked for extended information. This is intended for maintainer
+// debugging and out of a reasonable range for users.
+func fatal(msg string, code int) {
+	// nolint:logcheck // Not using the result of klog.V(99) inside the if
+	// branch is okay, we just use it to determine how to terminate.
+	// if base.Logger.Level {
+	// 	logger-back.Fatal(msg)
+	// }
+
+	if len(msg) > 0 {
+		// add newline if needed
+		if !strings.HasSuffix(msg, "\n") {
+			msg += "\n"
+		}
+		fmt.Fprint(os.Stderr, msg)
+	}
+	os.Exit(code)
+}
+
+// ErrExit may be passed to CheckError to instruct it to output nothing but exit with
+// status code 1.
+var ErrExit = fmt.Errorf("exit")
+
+type debugError interface {
+	DebugError() (msg string, args []interface{})
+}
+
+// StandardErrorMessage translates common errors into a human readable message, or returns
+// false if the error is not one of the recognized types. It may also log extended
+// information to klog.
+//
+// This method is generic to the command in use and may be used by non-Kubectl
+// commands.
+func StandardErrorMessage(err error) (string, bool) {
+	if debugErr, ok := err.(debugError); ok {
+		logger.Info(debugErr.DebugError())
+	}
+	return "", false
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/client.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/client.go
new file mode 100644
index 0000000000..b356693135
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/client.go
@@ -0,0 +1,73 @@
+package httpclient
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+)
+
+// Download TODO
+func Download(server, dstDir string, fileName string, authUser, authPass string, bwlimitMB int64) error {
+	srcFile := fmt.Sprintf("%s%s", server, fileName)
+	tgtFile := fmt.Sprintf("%s/%s", dstDir, fileName)
+	if fileName == "" {
+		return fmt.Errorf("fileName to download cannot be empty")
+		// tgtFile = fmt.Sprintf("%s/%s", dstDir, "__empty_file_list__")
+	}
+	logger.Info("start download to %s", tgtFile)
+	f, err := os.Create(tgtFile)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	resp, err := DoWithBA(http.MethodGet, srcFile, nil, authUser, authPass)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != http.StatusOK {
+		return fmt.Errorf("bad status: %s", resp.Status)
+	}
+	done := make(chan int, 1)
+	defer close(done)
+	go func(chan int) {
+		osutil.PrintFileSizeIncr(tgtFile, 1, 10, logger.Info, done)
+	}(done)
+	_, err = util.IOLimitRate(f, resp.Body, bwlimitMB)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func basicAuth(username, password string) string {
+	auth := username + ":" + password
+	return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+func redirectPolicyFunc(req *http.Request, via []*http.Request) error {
+	req.Header.Add("Authorization", "Basic "+basicAuth("username1", "password123"))
+	return nil
+}
+
+// DoWithBA TODO
+// http do with basic auth
+func DoWithBA(method string, url string, payload io.Reader, username, password string) (*http.Response, error) {
+	req, err := http.NewRequest(method, url, payload)
+	if err != nil {
+		return nil, err
+	}
+	// Set the auth for the request.
+	req.SetBasicAuth(username, password)
+
+	client := &http.Client{
+		CheckRedirect: redirectPolicyFunc,
+	}
+	return client.Do(req)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/httpclient.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/httpclient.go
new file mode 100644
index 0000000000..826525400c
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/httpclient/httpclient.go
@@ -0,0 +1,2 @@
+// Package httpclient TODO
+package httpclient
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/kafkautil/kafkautil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/kafkautil/kafkautil.go
new file mode 100644
index 0000000000..7665e1012d
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/kafkautil/kafkautil.go
@@ -0,0 +1,333 @@
+// Package kafkautil TODO
+package kafkautil
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"strings"
+	"time"
+)
+
+// GetBrokerIds TODO
+func GetBrokerIds(zk string) (ids []string, err error) {
+	var output string
+	extraCmd := fmt.Sprintf(`
+	export BROKERIDS=$(%s %s <<< 'ls /brokers/ids' | tail -1)
+	export BROKERIDS=${BROKERIDS//[!0-9 ]/}
+	echo $BROKERIDS
+	`, cst.DefaultZookeeperShell, zk)
+	logger.Info("extraCmd: %s", extraCmd)
+	if output, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("Get zk ids failed, %s, %s", output, err.Error())
+		return nil, err
+	}
+	logger.Info("output, %s", output)
+	trimRes := strings.TrimSuffix(output, "\n")
+	ids = strings.Fields(trimRes)
+	return ids, nil
+}
+
+// GetBrokerIdByHost TODO
+func GetBrokerIdByHost(host string, zk string) (id string, err error) {
+	brokerIds, err := GetBrokerIds(zk)
+	logger.Info("brokerIds, %v", brokerIds)
+	if err != nil {
+		logger.Error("Get broker id failed, %v", err)
+		return "", err
+	}
+
+	for _, kfid := range brokerIds {
+		var output string
+		extraCmd := fmt.Sprintf(`
+		DETAIL=$(%s %s <<< "get /brokers/ids/%s")
+		[[ $DETAIL =~ PLAINTEXT:\/\/(.*?)\"\] ]]
+		BROKERS=${BASH_REMATCH[1]}
+		echo $BROKERS
+		`, cst.DefaultZookeeperShell, zk, kfid)
+		logger.Info("extraCmd: %s", extraCmd)
+		if output, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+			logger.Error("Get zk ids failed, %s, %s", output, err.Error())
+			return "", err
+		}
+		logger.Info("output", output)
+		kfHost := strings.Split(strings.TrimSuffix(output, "\n"), ":")[0]
+		if kfHost == host {
+			id = kfid
+			break
+		}
+	}
+
+	return id, nil
+}
+
+// PickRandom TODO
+func PickRandom(arr []string) string {
+	rand.Seed(time.Now().Unix())
+	return arr[rand.Intn(len(arr))]
+}
+
+// GenReassignmentJson TODO
+func GenReassignmentJson(brokerId string, zk string, xBrokerIds []string) (output string, err error) {
+	idsArr, _ := GetBrokerIds(zk)
+	logger.Info("idsArr %v", idsArr)
+	tempArr := make([]string, len(idsArr))
+	copy(tempArr, idsArr)
+	// 剔除缩容的ids
+	for _, id := range xBrokerIds {
+		tempArr = findAndDelete(tempArr, id)
+	}
+	logger.Info("tempArr %v", tempArr)
+	logger.Info("idsArr %v", idsArr)
+
+	// 获取brokerid, eg: 1,2,3
+	allIds := strings.Join(idsArr[:], ",")
+	tempIds := strings.Join(tempArr[:], ",")
+
+	extraCmd := fmt.Sprintf(`
+	function random_broker {
+			IFS=$',' read -a brokers <<< %s
+			selectedexpression=${brokers[ $RANDOM %% ${#brokers[@]} ]}
+			echo $selectedexpression
+	}
+	function array_contains {
+			local array="$1[@]"
+			local seeking=$2
+			local in=1
+			for element in "${!array}"; do
+			  if [[ $element == $seeking ]]; then
+					in=0
+					break
+			  fi
+			done
+			return $in
+	}
+
+	function other_broker {
+			local brokers_string=$1
+			local all_brokers_string=%s
+			if [ ${#brokers_string} -ge ${#all_brokers_string} ]; then
+			  local no_other_broker_available=""
+			  echo $no_other_broker_available
+			else
+			  IFS=$',' read -a brokers <<< "$brokers_string"
+			  local new_broker=$(random_broker)
+			  while array_contains brokers $new_broker; do
+					new_broker=$(random_broker)
+			  done
+			  echo $new_broker
+			fi
+	}
+
+	function all_but_broker {
+			local brokers_string=$1
+			local broker=$2
+			IFS=$',' read -a brokers <<< "$brokers_string"
+			local new_brokers=""
+			for curr_broker in "${brokers[@]}"; do
+			  if [ "$curr_broker" != "$broker" ]; then
+					new_brokers="$new_brokers,$curr_broker"
+			  fi
+			done
+			# Remove leading comma, if any.
+			new_brokers=${new_brokers#","}
+			echo $new_brokers
+	  }
+
+	function replace_broker {
+			local brokers_string=$1
+			local broker=$2
+			local remaining_brokers=$(all_but_broker $brokers_string $broker)
+			local replacement_broker=$(other_broker $brokers_string)
+			new_brokers="$remaining_brokers,$replacement_broker"
+			# Remove leading comma, if any.
+			new_brokers=${new_brokers#","}
+			# Remove trailing comma, if any.
+			new_brokers=${new_brokers%%","}
+			echo $new_brokers
+	  }
+
+	json="{\n"
+	json="$json  \"partitions\": [\n"
+
+	# Actual partition reassignments
+	for topicPartitionReplicas in $(%s --zookeeper %s --describe | grep -w "Leader: %s" | awk '{ print $2"#"$4"#"$6"#"$8 }'); do
+	 #echo "topicPartitionReplicas: $topicPartitionReplicas"
+	  # Note: We use '#' as field separator in awk (see above) and here
+	  # because it is not a valid character for a Kafka topic name.
+	  IFS=$'#' read -a array <<< "$topicPartitionReplicas"
+	  topic="${array[0]}"     # e.g. "zerg.hydra"
+	  partition="${array[1]}" # e.g. "4"
+	  leaders="${array[2]}"
+	  replicas="${array[3]}"  # e.g. "0,8"  (= comma-separated list of broker IDs)
+	  if [ $leaders == $replicas ];then 
+	  	new_replicas=$(replace_broker $replicas %s)
+	  	if [ -z "$new_replicas" ]; then
+			echo "ERROR: Cannot find any replacement broker.  Maybe you have only a single broker in your cluster?"
+			exit 60
+	  	fi
+	  	json="$json    {\"topic\": \"${topic}\", \"partition\": ${partition}, \"replicas\": [${new_replicas}] },\n"
+	  fi
+	done
+
+	# Remove tailing comma, if any.
+	json=${json%%",\n"}
+	json="${json}\n"
+
+	# "Footer" of JSON file
+	json="$json  ],\n"
+	json="$json  \"version\": 1\n"
+	json="${json}}\n"
+
+	# Print JSON to STDOUT
+	echo -e $json
+	`, tempIds, allIds, cst.DefaultTopicBin, zk, brokerId, brokerId)
+	logger.Info("extraCmd, %s", extraCmd)
+	if output, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("gen json failed, %s, %s", output, err.Error())
+		return "", err
+	}
+	logger.Info("output %s", output)
+
+	return output, nil
+}
+
+// GenReplaceReassignmentJson TODO
+func GenReplaceReassignmentJson(oldBrokerId string, newBrokerId string, zk string) (output string, err error) {
+	extraCmd := fmt.Sprintf(`
+	json="{\n"
+	json="$json  \"partitions\": [\n"
+
+	for topicPartitionReplicas in $(%s --zookeeper %s --describe | awk '{ print $2"#"$4"#"$6"#"$8 }'); do
+	  IFS=$'#' read -a array <<< "$topicPartitionReplicas"
+	  topic="${array[0]}"     # e.g. "zerg.hydra"
+	  partition="${array[1]}" # e.g. "4"
+	  leaders="${array[2]}"
+	  replicas="${array[3]}"  # e.g. "0,8"  (= comma-separated list of broker IDs)
+	  if [[ $replicas =~ %s ]];then 
+	  	new_replicas=${replicas/%s/%s}
+	  	if [ -z "$new_replicas" ]; then
+			echo "ERROR: Cannot find any replacement broker.  Maybe you have only a single broker in your cluster?"
+			exit 60
+	  	fi
+	  	json="$json    {\"topic\": \"${topic}\", \"partition\": ${partition}, \"replicas\": [${new_replicas}] },\n"
+	  fi
+	done
+
+	# Remove tailing comma, if any.
+	json=${json%%",\n"}
+	json="${json}\n"
+
+	# "Footer" of JSON file
+	json="$json  ],\n"
+	json="$json  \"version\": 1\n"
+	json="${json}}\n"
+
+	# Print JSON to STDOUT
+	echo -e $json
+	`, cst.DefaultTopicBin, zk, oldBrokerId, oldBrokerId, newBrokerId)
+	logger.Info("extraCmd, %s", extraCmd)
+	if output, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("gen json failed, %s, %s", output, err.Error())
+		return "", err
+	}
+	logger.Info("output %s", output)
+
+	return output, nil
+}
+
+// DoReassignPartitions TODO
+func DoReassignPartitions(zk string, jsonFile string) error {
+
+	extraCmd := fmt.Sprintf(`%s --zookeeper %s --reassignment-json-file %s --execute`, cst.DefaultReassignPartitionsBin,
+		zk, jsonFile)
+	logger.Info("extraCmd: %s", extraCmd)
+	if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("exec reassignparttions failed, [%s], [%s]", output, err.Error())
+		return err
+	}
+	return nil
+}
+
+// CheckReassignPartitions TODO
+func CheckReassignPartitions(zk string, jsonFile string) (output string, err error) {
+	extraCmd := fmt.Sprintf(`%s --zookeeper %s --reassignment-json-file %s --verify|egrep -v 'Status|successfully'`,
+		cst.DefaultReassignPartitionsBin,
+		zk, jsonFile)
+	logger.Info("cmd: [%s]", extraCmd)
+	// 这里不判断status状态
+	output, _ = osutil.ExecShellCommand(false, extraCmd)
+	logger.Info("output %s", output)
+	return strings.TrimSuffix(output, "\n"), nil
+}
+
+// GetTopics TODO
+func GetTopics(zk string) (topicList []string, err error) {
+	extraCmd := fmt.Sprintf(`%s --zookeeper %s --list`, cst.DefaultTopicBin, zk)
+	logger.Info("cmd: [%s]", extraCmd)
+	output, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("获取kafka topic列表失败 %v", err)
+		return topicList, err
+	}
+	topicList = strings.Split(strings.TrimSuffix(output, "\n"), "\n")
+	return topicList, nil
+}
+
+// GenerateReassginFile TODO
+func GenerateReassginFile(zk, topic, idStrs, host string) error {
+	topicJson := fmt.Sprintf(`
+	{
+		"version": 1,
+		"topics": [
+			{ "topic": "%s"}
+		]
+	}`, topic)
+	topicFile := "/tmp/topic.json"
+	if err := ioutil.WriteFile(topicFile, []byte(topicJson), 0644); err != nil {
+		logger.Error("write %s failed, %v", topicFile, err)
+		return err
+	}
+	extraCmd := fmt.Sprintf(
+		`%s  --zookeeper %s  --topics-to-move-json-file %s --broker-list %s --generate | egrep -A1 ^Proposed|egrep -v ^Proposed`,
+		cst.DefaultReassignPartitionsBin,
+		zk, topicFile, idStrs)
+	logger.Info("cmd: [%s]", extraCmd)
+	output, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("生成迁移计划失败, %v", err)
+		return err
+	}
+	logger.Info("迁移计划json: [%s]", output)
+	// /data/kafkaenv/{host}/topic1.json
+	jsonDir := fmt.Sprintf("%s/%s", cst.DefaultKafkaEnv, host)
+	// mkdir
+	extraCmd = fmt.Sprintf("mkdir -p %s", jsonDir)
+	logger.Info("cmd: [%s]", extraCmd)
+	_, err = osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("创建目录失败, %v", err)
+		return err
+	}
+
+	planJsonFile := fmt.Sprintf("%s/%s.json", jsonDir, topic)
+	if err := ioutil.WriteFile(planJsonFile, []byte(output), 0644); err != nil {
+		logger.Error("write %s failed, %v", planJsonFile, err)
+		return err
+	}
+	return nil
+}
+
+func findAndDelete(s []string, item string) []string {
+	index := 0
+	for _, i := range s {
+		if i != item {
+			s[index] = i
+			index++
+		}
+	}
+	return s[:index]
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/logger.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/logger.go
new file mode 100644
index 0000000000..b76aad8a27
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/logger.go
@@ -0,0 +1,10 @@
+package util
+
+// LoggerErrorStack 在最外层遇到 error 时打印 stack 信息到日志
+// err == nil 时不打印
+// output 是个 logger,避免在 util 里引入 logger导致循环 import
+func LoggerErrorStack(output func(format string, args ...interface{}), err error) {
+	if err != nil {
+		output("%+v", err)
+	}
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/cmdexec.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/cmdexec.go
new file mode 100644
index 0000000000..c34ba87aa5
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/cmdexec.go
@@ -0,0 +1,151 @@
+package osutil
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"os/exec"
+	"strings"
+
+	"github.com/golang/glog"
+	"github.com/pkg/errors"
+)
+
+// FileOutputCmd 封装exec.Command,用于执行命令并输出到文件的场景,支持自动将输出文件上传到文件服务器(尽可能上传,如果上传失败则返回原文件)
+type FileOutputCmd struct {
+	exec.Cmd
+	StdOutFile string
+	StdErrFile string
+
+	stdOutFile         *os.File
+	stdErrFile         *os.File
+	stdOutDownloadLink string
+	stdErrDownloadLink string
+}
+
+// GetStdOutDownloadLink TODO
+func (c *FileOutputCmd) GetStdOutDownloadLink() string {
+	return c.stdOutDownloadLink
+}
+
+// GetStdErrDownloadLink TODO
+func (c *FileOutputCmd) GetStdErrDownloadLink() string {
+	return c.stdErrDownloadLink
+}
+
+func (c *FileOutputCmd) initOutputFile() error {
+	if c.StdErrFile == "" {
+		c.StdErrFile = c.StdOutFile
+	}
+	if c.StdOutFile != "" {
+		stdOutFile, err := os.OpenFile(c.StdOutFile, os.O_CREATE|os.O_WRONLY, os.ModePerm)
+		if err != nil {
+			return errors.Wrapf(err, "open std out log file %s failed", c.StdOutFile)
+		}
+		c.stdOutFile = stdOutFile
+		c.Cmd.Stdout = stdOutFile
+	}
+
+	if c.StdOutFile == c.StdErrFile {
+		c.stdErrFile = nil
+		c.Cmd.Stderr = c.stdOutFile
+		return nil
+	}
+
+	if c.StdErrFile != "" {
+		stdErrFile, err := os.OpenFile(c.StdErrFile, os.O_CREATE|os.O_WRONLY, os.ModePerm)
+		if err != nil {
+			return errors.Wrapf(err, "open std err log file %s failed", c.StdErrFile)
+		}
+		c.stdErrFile = stdErrFile
+		c.Cmd.Stderr = stdErrFile
+	}
+	return nil
+}
+
+func (c *FileOutputCmd) closeOutputFile() {
+	if c.stdOutFile != nil {
+		if err := c.stdOutFile.Close(); err != nil {
+			glog.Warning("close %s failed, err:%s", c.StdOutFile, err.Error())
+		}
+	}
+	if c.stdErrFile != nil {
+		if err := c.stdErrFile.Close(); err != nil {
+			glog.Warning("close %s failed, err:%s", c.StdErrFile, err.Error())
+		}
+	}
+	// UploadPath?
+	return
+}
+
+// Run TODO
+func (c *FileOutputCmd) Run() error {
+	if err := c.initOutputFile(); err != nil {
+		return err
+	}
+
+	defer func() {
+		c.closeOutputFile()
+	}()
+
+	return c.Cmd.Run()
+}
+
+// Start TODO
+func (c *FileOutputCmd) Start() error {
+	if err := c.initOutputFile(); err != nil {
+		return err
+	}
+
+	return c.Cmd.Start()
+}
+
+// Wait TODO
+func (c *FileOutputCmd) Wait() error {
+	defer func() {
+		c.closeOutputFile()
+	}()
+
+	return c.Cmd.Wait()
+}
+
+// RunInBG TODO
+func RunInBG(isSudo bool, param string) (pid int, err error) {
+	if isSudo {
+		param = "sudo " + param
+	}
+	cmd := exec.Command("bash", "-c", param)
+	err = cmd.Start()
+	if err != nil {
+		return -1, err
+	}
+	return cmd.Process.Pid, nil
+}
+
+// ExecShellCommand 执行 shell 命令
+// 如果有 err, 返回 stderr; 如果没有 err 返回的是 stdout
+func ExecShellCommand(isSudo bool, param string) (stdoutStr string, err error) {
+	if isSudo {
+		param = "sudo " + param
+	}
+	cmd := exec.Command("bash", "-c", param)
+
+	var stdout, stderr bytes.Buffer
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+	err = cmd.Run()
+	if err != nil {
+		// return stderr.String(), err
+		return stderr.String(), errors.WithMessage(err, stderr.String())
+	}
+	if len(stderr.String()) > 0 {
+		err = fmt.Errorf("execute shell command(%s) error:%s", param, stderr.String())
+		return stderr.String(), err
+	}
+	return stdout.String(), nil
+}
+
+// CleanExecShellOutput TODO
+func CleanExecShellOutput(s string) string {
+	return strings.ReplaceAll(strings.TrimSpace(s), "\n", "")
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab.go
new file mode 100644
index 0000000000..13a5e39808
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab.go
@@ -0,0 +1,252 @@
+/*
+ * @Description: 主机 crontab 操作
+ */
+
+package osutil
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"math/rand"
+	"os"
+	"regexp"
+	"strings"
+	"time"
+
+	"github.com/robfig/cron/v3"
+)
+
+// CrontabLockFile TODO
+const CrontabLockFile = "/home/mysql/.crontab_lock"
+
+// RemoveUserCrontab TODO
+func RemoveUserCrontab(user string) error {
+	cmd := fmt.Sprintf("crontab -u %s -r ", user)
+	output, err := ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Info("%s. %s", output, err.Error())
+		return err
+	}
+	return nil
+}
+
+// GetStatusCrontab TODO
+func GetStatusCrontab(user string) string {
+	newCrontab := make([]string, 0)
+	newCrontab = append(newCrontab, fmt.Sprintf(
+		"#get_status.pl: mysql monitor and report status to tnm, distribute at %s by %s", time.Now().Format(cst.TIMELAYOUT),
+		user))
+	newCrontab = append(newCrontab, "*/5 * * * * /home/mysql/monitor/get_status.pl 1>/dev/null 2>&1 \n")
+	return strings.Join(newCrontab, "\n")
+}
+
+// RemoveSystemCrontab TODO
+/**
+ * @description: 删除 mysql 用户下的crontab任务
+ * @receiver {string} removeKey
+ * @return {*}
+ */
+func RemoveSystemCrontab(removeKey string) (err error) {
+	var (
+		crontabs    = make([]string, 0)
+		output      string
+		crontabList string
+	)
+
+	output, err = ListCrontb("mysql")
+	if err != nil {
+		return err
+	}
+
+	formerCrontab := strings.Split(output, "\n")
+	logger.Info("formerCrontab:%#v \n len(formerCrontab):%d", formerCrontab, len(formerCrontab))
+	for _, crontab := range formerCrontab {
+		if regexp.MustCompile(`^\\s*$`).MatchString(crontab) || strings.Contains(crontab, `#.*DO NOT EDIT THIS FILE`) ||
+			strings.Contains(crontab, "#.*cron installed") || strings.Contains(crontab, "#.*Cron version") ||
+			strings.Contains(crontab, "#.*installed on") || strings.Contains(crontab, removeKey) {
+			continue
+		}
+		crontabs = append(crontabs, crontab)
+	}
+	// return crontabs, nil
+	crontabStr := strings.Join(crontabs, "\n")
+	err = ExecCrontab(crontabStr)
+	if err != nil {
+		return err
+	}
+	result, err := IsCrontabKeyExist(removeKey)
+	if err != nil {
+		return err
+	}
+	if result {
+		err = fmt.Errorf("remove %s failed ,pls execute %s to check it mannually", removeKey, crontabList)
+		return err
+	}
+	return nil
+}
+
+// ListCrontb TODO
+/**
+ * @description:  查看user下的crontab
+ * @receiver {string} user
+ * @return {*}
+ */
+func ListCrontb(user string) (output string, err error) {
+	crontabList := fmt.Sprintf("crontab -u %s -l|egrep -v ^$ || true", user)
+	// "crontab -u " + user + " -l"
+	output, err = ExecShellCommand(false, crontabList)
+	if err != nil {
+		err = fmt.Errorf("execute [%s] get an error:%w,%s", crontabList, err, output)
+		if strings.Contains(output, "no crontab for") {
+			return "", nil
+		} else {
+			return "", err
+		}
+	}
+	return output, err
+}
+
+// AddCrontab TODO
+/**
+ * @description:  追加添加crontab
+ * @receiver {string} crontab 表达式
+ * @return {*}
+ */
+func AddCrontab(crontab string) error {
+	output, err := ListCrontb("mysql")
+	if err != nil {
+		return err
+	}
+
+	crontab = output + "\n" + crontab
+	err = ExecCrontab(crontab)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// ExecCrontab TODO
+/**
+ * @description: 添加crontab
+ * @receiver {string} crontab 表达式
+ * @return {*}
+ */
+func ExecCrontab(crontab string) error {
+	cmd := fmt.Sprintf("echo -e '%s' | crontab - -u mysql", crontab)
+	output, err := ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Info("%s. %s", output, err.Error())
+		return err
+	}
+	return nil
+}
+
+// IsCrontabKeyExist TODO
+/**
+ * @description:  grep crontab
+ * @receiver {string} key
+ * @return {*}
+ */
+func IsCrontabKeyExist(key string) (bool, error) {
+	var (
+		output string
+		err    error
+	)
+	output, err = ListCrontb("mysql")
+	if err != nil {
+		return false, err
+	}
+	if strings.Contains(output, key) {
+		return true, nil
+	}
+	return false, nil
+}
+
+// CrontabsExist 检查存在哪些Crontab
+//
+//	@receiver crontabKeys
+//	@return existCrontabs
+//	@return err
+func CrontabsExist(crontabKeys []string) (existCrontabs []string, err error) {
+	output, err := ListCrontb("mysql")
+	if err != nil {
+		return nil, err
+	}
+	for _, key := range crontabKeys {
+		if strings.Contains(output, key) {
+			existCrontabs = append(existCrontabs, key)
+		}
+	}
+	return
+}
+
+// ValidateCronExpr TODO
+/**
+ * @description: crontab 表达式检查,如果返回error != nil,则表示crontab 表达式不正确
+ * @receiver {string} cronstr eg:" * * * 3 5"
+ * @return {*}
+ */
+func ValidateCronExpr(cronstr string) (err error) {
+	specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
+	_, err = specParser.Parse(cronstr)
+	return
+}
+
+// CleanLocalCrontab 通过 导出 -> grep -v -> 导入 的方式实现清理crontab任务
+func CleanLocalCrontab() error {
+	var (
+		randnum            = rand.Intn(10000)
+		tmpCronFile        = fmt.Sprintf("/tmp/cron_%s_%d.crd", time.Now().Format(cst.TIMELAYOUTSEQ), randnum)
+		getStatusPL        = "/home/mysql/monitor/get_status.pl"
+		dbBackupOld        = "/home/mysql/dbbackup/dbbackup.sh"
+		dbBackupNew        = "/home/mysql/dbbackup/dbbackup.pl"
+		dbBackupMulti      = "/home/mysql/dbbackup/dbbackup_main.sh"
+		dbBackupXtrabackup = "/home/mysql/dbbackup/xtrabackup/xtrabackup_main.sh"
+		rotateLog          = "/home/mysql/rotate_logbin/rotate_logbin.pl"
+		proxyStatus        = "/home/mysql/proxy_monitor/get_proxy_status.pl"
+		slaveSync          = "/home/mysql/monitor/master_slave_sync_check.pl"
+		tbinlodumperStatus = "tbinlogdumper_status.pl"
+		prometheus         = "prometheus"
+	)
+	cleanCrontabs := []string{getStatusPL, dbBackupOld, dbBackupNew, dbBackupMulti, dbBackupXtrabackup, rotateLog,
+		proxyStatus, slaveSync, tbinlodumperStatus, prometheus}
+
+	existCrontabs, err := CrontabsExist(cleanCrontabs)
+	if err != nil {
+		return err
+	}
+	// 如果不存在需要清理的crontab 直接返回成功
+	if len(existCrontabs) <= 0 {
+		return nil
+	}
+	logger.Info("还存在的Crontabs %v", existCrontabs)
+	// 导出mysql用户的crontab任务,并过滤掉要清理的任务
+	shellCMD := fmt.Sprintf("/usr/bin/crontab -u mysql -l")
+	for _, v := range existCrontabs {
+		shellCMD += fmt.Sprintf("|grep -v %s", v)
+	}
+	shellCMD += fmt.Sprintf(">%s 2>&1", tmpCronFile)
+	output, err := ExecShellCommand(false, shellCMD)
+	if err != nil {
+		err = fmt.Errorf("execute [%s] get an error:%s,%s", shellCMD, output, err.Error())
+		logger.Warn(err.Error())
+		// grep 没有找到结果也认为是失败的,这个地方不能当做错误返回。
+	}
+	// 重新导入crontab文件
+	shellCMD = fmt.Sprintf("/usr/bin/crontab -u mysql %s 2>&1", tmpCronFile)
+	output, err = ExecShellCommand(false, shellCMD)
+	if err != nil {
+		err = fmt.Errorf("execute [%s] get an error:%s", shellCMD, output)
+		logger.Error(err.Error())
+		return err
+	}
+	// crontab延时1分钟,所以直接删掉监控执行文件,保证监控执行不成功,就不会有告警了
+	if err := os.RemoveAll(getStatusPL); err != nil {
+		err = fmt.Errorf("rm %s failed, err:%s", getStatusPL, err.Error())
+		logger.Error(err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab_test.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab_test.go
new file mode 100644
index 0000000000..5bd2eea7da
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/crontab_test.go
@@ -0,0 +1,6 @@
+package osutil
+
+import "testing"
+
+func TestAddCrontab(t *testing.T) {
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/mountpoint.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/mountpoint.go
new file mode 100644
index 0000000000..088648b68f
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/mountpoint.go
@@ -0,0 +1,70 @@
+package osutil
+
+import (
+	"strconv"
+	"strings"
+)
+
+// IsDataDirOk TODO
+func IsDataDirOk(filepath string) bool {
+	mountPaths := GetMountPathInfo()
+	if m, ok := mountPaths[filepath]; ok {
+		// 如果 /data 在根分区,并且根分区大于 60G,通过
+		if m.AvailSizeMB > 6144 {
+			return true
+		} else {
+			// not large enough
+			return false
+		}
+	}
+	// no mount point found
+	return false
+}
+
+// MountPath TODO
+type MountPath struct {
+	Filesystem  string
+	TotalSizeMB int64
+	UsedSizeMB  int64
+	AvailSizeMB int64
+	UsePct      int
+	Path        string
+}
+
+// ParseDfOutput TODO
+func ParseDfOutput(rawOutput string) map[string]*MountPath {
+	mountPaths := make(map[string]*MountPath)
+	lines := strings.Split(rawOutput, "\n")
+	for i, line := range lines {
+		// skip headers
+		if i == 0 {
+			continue
+		}
+
+		fields := strings.Fields(line)
+		if len(fields) == 0 || len(fields) != 6 {
+			continue
+		}
+		mountPath := &MountPath{
+			Path:       fields[5],
+			Filesystem: fields[0],
+		}
+		mountPath.TotalSizeMB, _ = strconv.ParseInt(fields[1], 10, 64)
+		mountPath.UsedSizeMB, _ = strconv.ParseInt(fields[2], 10, 64)
+		mountPath.AvailSizeMB, _ = strconv.ParseInt(fields[3], 10, 64)
+		mountPath.UsePct, _ = strconv.Atoi(strings.TrimSuffix(fields[4], "%"))
+
+		mountPaths[fields[5]] = mountPath
+	}
+	return mountPaths
+}
+
+// GetMountPathInfo TODO
+func GetMountPathInfo() map[string]*MountPath {
+	cmdDfm, err := ExecShellCommand(false, "df -hm")
+	mountPaths := ParseDfOutput(cmdDfm)
+	if err != nil {
+		return nil
+	}
+	return mountPaths
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/netutil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/netutil.go
new file mode 100644
index 0000000000..afb78b336e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/netutil.go
@@ -0,0 +1,26 @@
+package osutil
+
+import (
+	"net"
+	"strconv"
+	"time"
+)
+
+// IsPortUp 判断端口是否开启监听
+func IsPortUp(host string, ports ...int) bool {
+	for _, port := range ports {
+		timeout := time.Second
+		hostPort := net.JoinHostPort(host, strconv.Itoa(port))
+		conn, err := net.DialTimeout("tcp", hostPort, timeout)
+		if err != nil {
+			// fmt.Println("Connecting error:", err)
+			return false
+		}
+		if conn != nil {
+			defer conn.Close()
+			return true
+			// fmt.Println("Opened", net.JoinHostPort(host, port))
+		}
+	}
+	return false
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil.go
new file mode 100644
index 0000000000..0bd450112d
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil.go
@@ -0,0 +1,649 @@
+// Package osutil TODO
+package osutil
+
+import (
+	"bufio"
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io"
+	"math"
+	"math/rand"
+	"net"
+	"os"
+	"os/exec"
+	"os/user"
+	"path/filepath"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+	"unicode"
+
+	"github.com/dustin/go-humanize"
+	"github.com/pkg/errors"
+)
+
+// Ucfirst TODO
+func Ucfirst(str string) string {
+	for i, v := range str {
+		return string(unicode.ToUpper(v)) + str[i+1:]
+	}
+	return ""
+}
+
+// HasElem TODO
+func HasElem(elem interface{}, slice interface{}) bool {
+	defer func() {
+		if err := recover(); err != nil {
+			logger.Error("HasElem error %s", err)
+		}
+	}()
+	arrV := reflect.ValueOf(slice)
+	if arrV.Kind() == reflect.Slice || arrV.Kind() == reflect.Array {
+		for i := 0; i < arrV.Len(); i++ {
+			// XXX - panics if slice element points to an unexported struct field
+			// see https://golang.org/pkg/reflect/#Value.Interface
+			if reflect.DeepEqual(arrV.Index(i).Interface(), elem) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// 描述:
+// 把任何类型的值转换成字符串类型
+// 目前暂时支持的类型为:string,int,int64,float64,bool
+
+// ChangeValueToString TODO
+func ChangeValueToString(value interface{}) (string, error) {
+	var result string
+	if item, ok := value.(string); ok {
+		result = item
+	} else if item1, ok := value.(int); ok {
+		result = strconv.Itoa(item1)
+	} else if item2, ok := value.(int64); ok {
+		result = strconv.FormatInt(item2, 10)
+	} else if item3, ok := value.(float64); ok {
+		result = strconv.FormatFloat(item3, 'f', -1, 64)
+	} else if item4, ok := value.(bool); ok {
+		result = strconv.FormatBool(item4)
+	} else {
+		return result, errors.New("[ChangeValueToString]value type unknow,not in (string,int,int64,float64,bool)")
+	}
+	return result, nil
+}
+
+// GetLocalIP 获得本地IP
+func GetLocalIP() (string, error) {
+	var localIP string
+	var err error
+	addrs, err := net.InterfaceAddrs()
+	if err != nil {
+		return localIP, err
+	}
+	for _, addr := range addrs {
+		if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
+			if ipnet.IP.To4() != nil {
+				localIP = ipnet.IP.String()
+				return localIP, nil
+			}
+		}
+	}
+	err = fmt.Errorf("can't find local ip")
+	return localIP, err
+}
+
+// StringToMap 字符串 TO map
+// 如db1,,db2,db3,db2 ,等去重并转换成db1,db2,db3
+func StringToMap(srcStr string, seq string) map[string]struct{} {
+	splitReg := regexp.MustCompile(seq)
+	strList := splitReg.Split(srcStr, -1)
+	strMap := make(map[string]struct{})
+	for _, str := range strList {
+		if len(strings.TrimSpace(str)) == 0 {
+			continue
+		}
+		strMap[strings.TrimSpace(str)] = struct{}{}
+	}
+	return strMap
+}
+
+// StrSliceToMap 字符串slice to map,目标是去重
+func StrSliceToMap(srcStrSlice []string) map[string]struct{} {
+	strMap := make(map[string]struct{})
+	for _, str := range srcStrSlice {
+		if len(strings.TrimSpace(str)) == 0 {
+			continue
+		}
+		strMap[strings.TrimSpace(str)] = struct{}{}
+	}
+	return strMap
+}
+
+// MapKeysToSlice TODO
+func MapKeysToSlice(mapObj map[string]struct{}) []string {
+	keys := make([]string, len(mapObj))
+
+	i := 0
+	for k := range mapObj {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// IntnRange TODO
+func IntnRange(min, max int) int {
+	rand.Seed(time.Now().Unix())
+	return rand.Intn(max-min) + min
+}
+
+// GetFileModifyTime TODO
+func GetFileModifyTime(filename string) (bool, int64) {
+	if _, err := os.Stat(filename); !os.IsNotExist(err) {
+		f, err1 := os.Open(filename)
+		if err1 != nil {
+			return true, 0
+		}
+		fi, err2 := f.Stat()
+		if err2 != nil {
+			return true, 0
+		}
+		return true, fi.ModTime().Unix()
+	}
+	return false, 0
+}
+
+// GetMySQLBaseDir TODO
+func GetMySQLBaseDir(grepstr string) (string, error) {
+	strCmd := fmt.Sprintf(`ps -ef | grep 'mysqld '|grep basedir | grep %s| grep -v grep`, grepstr)
+	data, err := ExecShellCommand(false, strCmd)
+	reg := regexp.MustCompile(`--basedir=[/A-Za-z_]*`)
+	tmparr := reg.FindAllString(data, -1)
+	if len(tmparr) != 1 {
+		return "", errors.New("get basedir unexpected")
+	}
+	basedir := strings.Split(strings.TrimSpace(tmparr[0]), "=")
+	if len(basedir) != 2 || strings.TrimSpace(basedir[1]) == "" {
+		return "", fmt.Errorf("get base dir error:%v", basedir)
+	}
+	return strings.TrimSpace(basedir[1]), err
+}
+
+// GetMySQLBinDir TODO
+func GetMySQLBinDir(getstr string) (string, error) {
+	basedir, err := GetMySQLBaseDir(getstr)
+	if err != nil {
+		return "", err
+	}
+	if !strings.HasPrefix(basedir, "/") {
+		return "", fmt.Errorf("basedir must start at /")
+	}
+	return strings.TrimRight(basedir, "/") + "/bin", nil
+}
+
+// MakeSoftLink src and dest are absolute path with filename
+func MakeSoftLink(src string, dest string, force bool) error {
+	if !FileExist(src) {
+		return errors.New("src file does not exists")
+	}
+	if src == dest {
+		return nil
+	}
+	if FileExist(dest) {
+		if !force {
+			return errors.New("dest file exists")
+		}
+		if err := os.Remove(dest); err != nil {
+			logger.Warn("remove file %s failed, err:%s", dest, err.Error())
+		}
+	}
+	cmd := exec.Command("ln", "-s", src, dest)
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		logger.Error("ln -s failed, output:%s, err:%s", string(out), err.Error())
+	}
+	return err
+}
+
+// MakeHardLink TODO
+func MakeHardLink(src string, dest string) error {
+	if !FileExist(src) {
+		return errors.New("src file does not exists")
+	} else if FileExist(dest) {
+		return errors.New("dest file already exists")
+	}
+	if err := os.Link(src, dest); err != nil {
+		return err
+	}
+	return nil
+}
+
+// CheckFileExistWithPath TODO
+func CheckFileExistWithPath(filename, dirname string) bool {
+	var destFile string
+	if strings.HasPrefix(filename, "/") {
+		destFile = filename
+	} else {
+		destFile = fmt.Sprintf(`%s/%s`, dirname, filename) // app_149/ulog/xxxx.ulog
+	}
+
+	if _, err := os.Stat(destFile); err != nil {
+		if os.IsNotExist(err) {
+			return false
+		}
+		return false
+	}
+	return true
+}
+
+// CheckAndMkdir mkdir ppathname/pathname
+func CheckAndMkdir(pathname, ppathname string) error {
+	if !CheckFileExistWithPath(pathname, ppathname) {
+		return os.MkdirAll(ppathname+"/"+pathname, 0755)
+	}
+	return nil
+}
+
+// ParsePsOutput TODO
+// for ps command, output should skip first line, which
+// refer to cmd string itself(catch by ps after bash -c)
+func ParsePsOutput(rawOutput string) string {
+	var output []string
+	lines := strings.Split(rawOutput, "\n")
+	for i, line := range lines {
+		// skip headers
+		if i == 0 {
+			continue
+		}
+
+		fields := strings.Fields(line)
+		if len(fields) == 0 {
+			continue
+		}
+		output = append(output, fields[0])
+	}
+	return strings.Join(output, "\n")
+}
+
+// FileExist TODO
+func FileExist(fileName string) bool {
+	_, err := os.Stat(fileName)
+	if err != nil {
+		if os.IsExist(err) {
+			return true
+		}
+		return false
+	}
+	return true
+}
+
+// GetFileMd5 TODO
+func GetFileMd5(file string) (string, error) {
+	cmd := "md5sum " + file
+	data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput()
+	if err != nil {
+		return "", err
+	}
+	reg, err := regexp.Compile(`\s+`)
+	if err != nil {
+		return "", err
+	}
+	array := reg.Split(string(data), -1)
+	if len(array) != 3 {
+		return "", errors.New("data result len wrong ,not 3,is " + strconv.Itoa(len(array)))
+	}
+	return array[0], nil
+}
+
+// GetLinuxDisksInfo TODO
+func GetLinuxDisksInfo() ([]DiskInfo, error) {
+	var res []DiskInfo
+	cmd := "df -l|grep -vE 'Filesystem|overlay|tmpfs'"
+	data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput()
+	if err != nil {
+		return res, err
+	}
+	reg, err := regexp.Compile(`\n+`)
+	if err != nil {
+		return res, err
+	}
+	strs := reg.Split(string(data), -1)
+
+	for _, row := range strs {
+		if strings.TrimSpace(row) == "" {
+			continue
+		}
+		result := DiskInfo{}
+		reg, err := regexp.Compile(`\s+`)
+		if err != nil {
+			return res, err
+		}
+		array := reg.Split(row, -1)
+		if len(array) == 6 {
+			result.Filesystem = array[0]
+			result.Blocks_1K = array[1]
+			result.Used, err = strconv.Atoi(array[2])
+			if err != nil {
+				return res, err
+			}
+			result.Available, err = strconv.ParseInt(array[3], 10, 64)
+			if err != nil {
+				return res, err
+			}
+			result.UsedRate = array[4]
+			result.MountedOn = array[5]
+
+			res = append(res, result)
+		} else {
+			return res, errors.New("data result len wrong ,not 6,is " + strconv.Itoa(len(array)))
+		}
+	}
+
+	return res, nil
+}
+
+// GetCurrentUser TODO
+func GetCurrentUser() (string, error) {
+	var currentUser = ""
+	cmd := `whoami`
+	data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput()
+	if err != nil {
+		return currentUser, fmt.Errorf(err.Error() + ",cmd:" + cmd)
+	}
+	reg, err := regexp.Compile(`\n+`)
+	if err != nil {
+		return currentUser, err
+	}
+	array := reg.Split(string(data), -1)
+	if len(array) == 2 {
+		currentUser = array[0]
+	} else {
+		return currentUser, fmt.Errorf("get currentUser fail,len not 2,array:%s", strings.Join(array, ";"))
+	}
+
+	return currentUser, nil
+}
+
+// GetLinuxDirDiskInfo TODO
+func GetLinuxDirDiskInfo(dir string) (DiskInfo, error) {
+	result := DiskInfo{}
+	cmd := fmt.Sprintf("df -l %s|grep -v Filesystem", dir)
+	data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput()
+	if err != nil {
+		return result, err
+	}
+	reg, err := regexp.Compile(`\s+`)
+	if err != nil {
+		return result, err
+	}
+	array := reg.Split(string(data), -1)
+	if len(array) == 7 {
+		result.Filesystem = array[0]
+		result.Blocks_1K = array[1]
+		result.Used, err = strconv.Atoi(array[2])
+		if err != nil {
+			return result, err
+		}
+		result.Available, err = strconv.ParseInt(array[3], 10, 64)
+		if err != nil {
+			return result, err
+		}
+		result.UsedRate = array[4]
+		result.MountedOn = array[5]
+	} else {
+		return result, errors.New("data result len wrong ,not 7,is " + strconv.Itoa(len(array)))
+	}
+
+	return result, nil
+}
+
+// DiskInfo TODO
+type DiskInfo struct {
+	Filesystem string `json:"filesystem"`
+	Blocks_1K  string `json:"blocks_1K"`
+	Used       int    `json:"used"`
+	Available  int64  `json:"available"`
+	UsedRate   string `json:"usedRate"`
+	MountedOn  string `json:"MountedOn"`
+}
+
+// SplitName 切分用户传过来的IP字符串列表等
+// 切分规则:
+// 把\r+|\s+|;+|\n+|,+这些分隔符,转成字符串数组
+// 返回字符串数组
+func SplitName(input string) ([]string, error) {
+	if reg, err := regexp.Compile(`\r+|\s+|;+|\n+`); err != nil {
+		return nil, err
+	} else {
+		input = reg.ReplaceAllString(input, ",")
+	}
+	if reg, err := regexp.Compile(`^,+|,+$`); err != nil {
+		return nil, err
+	} else {
+		input = reg.ReplaceAllString(input, "")
+	}
+	if reg, err := regexp.Compile(`,+`); err != nil {
+		return nil, err
+	} else {
+		input = reg.ReplaceAllString(input, ",")
+	}
+	result := strings.Split(input, ",")
+	return result, nil
+}
+
+// Uniq 对字符串数组进行去重
+func Uniq(input []string) []string {
+	var newData []string
+	if len(input) > 0 {
+		temp := map[string]bool{}
+		for _, value := range input {
+			temp[value] = true
+		}
+		for k := range temp {
+			newData = append(newData, k)
+		}
+	}
+	return newData
+}
+
+// GetUidGid TODO
+func GetUidGid(osuser string) (int, int, error) {
+	group, err := user.Lookup(osuser)
+	if err != nil {
+		logger.Info("Failed to lookup user %s", osuser)
+		return 0, 0, err
+	}
+
+	uid, err := strconv.Atoi(group.Uid)
+	if err != nil {
+		logger.Info("Convert Uid for %s : `%s` failed", osuser, group.Uid)
+		return 0, 0, err
+	}
+
+	gid, err := strconv.Atoi(group.Gid)
+	if err != nil {
+		logger.Info("Convert Gid for %s : `%s` failed", osuser, group.Gid)
+		return 0, 0, err
+	}
+
+	return uid, gid, err
+}
+
+// FileLineCounter 计算文件行数
+// 参考: https://stackoverflow.com/questions/24562942/golang-how-do-i-determine-the-number-of-lines-in-a-file-efficiently
+func FileLineCounter(filename string) (lineCnt uint64, err error) {
+	_, err = os.Stat(filename)
+	if err != nil && os.IsNotExist(err) {
+		return 0, fmt.Errorf("file:%s not exists", filename)
+	}
+	file, err := os.Open(filename)
+	if err != nil {
+		return 0, fmt.Errorf("file:%s open fail,err:%w", filename, err)
+	}
+	defer func() {
+		if err := file.Close(); err != nil {
+			logger.Warn("close file %s failed, err:%s", filename, err.Error())
+		}
+	}()
+	reader01 := bufio.NewReader(file)
+	buf := make([]byte, 32*1024)
+	lineCnt = 0
+	lineSep := []byte{'\n'}
+
+	for {
+		c, err := reader01.Read(buf)
+		lineCnt += uint64(bytes.Count(buf[:c], lineSep))
+
+		switch {
+		case err == io.EOF:
+			return lineCnt, nil
+
+		case err != nil:
+			return lineCnt, fmt.Errorf("file:%s read fail,err:%w", filename, err)
+		}
+	}
+}
+
+// WrapFileLink TODO
+func WrapFileLink(link string) string {
+	name := filepath.Base(link)
+	return fmt.Sprintf(`%s`, link, name)
+}
+
+// SetOSUserPassword run set user password by chpasswd
+func SetOSUserPassword(user, password string) error {
+	exec.Command("/bin/bash", "-c", "")
+	cmd := exec.Command("chpasswd")
+	stdin, err := cmd.StdinPipe()
+	if err != nil {
+		return fmt.Errorf("new pipe failed, err:%w", err)
+	}
+	go func() {
+		_, err := io.WriteString(stdin, fmt.Sprintf("%s:%s", user, password))
+		if err != nil {
+			logger.Warn("write into pipe failed, err:%s", err.Error())
+		}
+		if err := stdin.Close(); err != nil {
+			logger.Warn("colse stdin failed, err:%s", err.Error())
+		}
+	}()
+	if output, err := cmd.CombinedOutput(); err != nil {
+		return fmt.Errorf("run chpasswd failed, output:%s, err:%w", string(output), err)
+	}
+	return nil
+}
+
+// GetNumaStr TODO
+func GetNumaStr() string {
+	numaCmd := "numactl --show | grep policy"
+	output, err := ExecShellCommand(false, numaCmd)
+	if err != nil {
+		logger.Error(err.Error())
+		return ""
+	}
+	if len(output) > 0 {
+		return "numactl --interleave=all "
+	}
+	return ""
+}
+
+// SafeRmDir TODO
+func SafeRmDir(dir string) (err error) {
+	if strings.TrimSpace(dir) == "/" {
+		return fmt.Errorf("禁止删除系统根目录")
+	}
+	return os.RemoveAll(dir)
+}
+
+func getFileSize(f string) (int64, error) {
+	fd, err := os.Stat(f)
+	if err != nil {
+		return 0, err
+	}
+	return fd.Size(), nil
+}
+
+// CalcFileSizeIncr TODO
+func CalcFileSizeIncr(f string, secs uint64) string {
+	var err error
+	var t1Size, t2Size int64
+	if t1Size, err = getFileSize(f); err != nil {
+		return "0"
+	}
+	time.Sleep(time.Duration(secs) * time.Second)
+	if t2Size, err = getFileSize(f); err != nil {
+		return "0"
+	}
+
+	bytesIncr := uint64(math.Abs(float64(t2Size-t1Size))) / secs
+	return humanize.Bytes(bytesIncr)
+}
+
+// PrintFileSizeIncr 后台计算文件变化
+// ch 通知退出,外层需要 close(ch)
+// 2 hour 超时
+func PrintFileSizeIncr(f string, secs uint64, printInterval uint64,
+	output func(format string, args ...interface{}), ch chan int) {
+	for true {
+		speed := CalcFileSizeIncr(f, secs)
+		if speed != "0" {
+			output("file %s change speed %s", f, speed)
+		} else {
+			break
+		}
+		select {
+		case _, beforeClosed := <-ch:
+			if !beforeClosed {
+				return
+			}
+		case <-time.After(2 * time.Hour):
+			return
+		default:
+			time.Sleep(time.Duration(printInterval) * time.Second)
+		}
+		/*
+			ch <- 1 // 这里为了不阻塞,我们只关注外面的 close 信号
+			if _, beforeClosed := <-ch; !beforeClosed {
+				return
+			}
+		*/
+	}
+}
+
+// CapturingPassThroughWriter is a writer that remembers
+// data written to it and passes it to w
+type CapturingPassThroughWriter struct {
+	buf bytes.Buffer
+	w   io.Writer
+}
+
+// NewCapturingPassThroughWriter creates new CapturingPassThroughWriter
+func NewCapturingPassThroughWriter(w io.Writer) *CapturingPassThroughWriter {
+	return &CapturingPassThroughWriter{
+		w: w,
+	}
+}
+
+// Write 用于常见IO
+func (w *CapturingPassThroughWriter) Write(d []byte) (int, error) {
+	w.buf.Write(d)
+	return w.w.Write(d)
+}
+
+// Bytes returns bytes written to the writer
+func (w *CapturingPassThroughWriter) Bytes() []byte {
+	return w.buf.Bytes()
+}
+
+// ReadFileString TODO
+func ReadFileString(filename string) (string, error) {
+	if body, err := os.ReadFile(filename); err != nil {
+		return "", err
+	} else {
+		return string(body), nil
+	}
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil_test.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil_test.go
new file mode 100644
index 0000000000..d64b392c76
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/osutil_test.go
@@ -0,0 +1,16 @@
+package osutil_test
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"testing"
+)
+
+func TestIsFileExist(t *testing.T) {
+	f := "/tmp/1.txt"
+	d := "/tmp/asdad/"
+	exist_f := osutil.FileExist(f)
+	exist_d := osutil.FileExist(d)
+	t.Log("f exist", exist_f)
+	t.Log("d exist", exist_d)
+	return
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/sysctl.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/sysctl.go
new file mode 100644
index 0000000000..67a5aef9fc
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/sysctl.go
@@ -0,0 +1,59 @@
+// Package osutil TODO
+/*
+ * @Author: your name
+ * @Date: 2022-04-21 15:07:16
+ * @LastEditTime: 2022-04-21 15:07:16
+ * @LastEditors: your name
+ * @Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
+ * @FilePath: /bk-dbactuator/pkg/util/osutil/sysctl.go
+ */
+package osutil
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+)
+
+// ClearTcpRecycle TODO
+//
+//	logger.Warn
+func ClearTcpRecycle() error {
+	twRecycleCmd := "grep 'net.ipv4.tcp_tw_recycle=1' /etc/sysctl.conf"
+	result, err := ExecShellCommand(false, twRecycleCmd)
+	if err != nil {
+		err = fmt.Errorf("execute [%s] get an error:%w", twRecycleCmd, err)
+		logger.Warn(err.Error())
+	}
+	if len(result) > 0 {
+		insertTwRecycle := "sed -i -e 's/net.ipv4.tcp_tw_recycle=1/net.ipv4.tcp_tw_recycle=0/g' /etc/sysctl.conf"
+		_, err := ExecShellCommand(false, insertTwRecycle)
+		if err != nil {
+			err = fmt.Errorf("execute [%s] get an error:%w", insertTwRecycle, err)
+			logger.Info(err.Error())
+			return err
+		}
+	}
+	twReuseCmd := "grep 'net.ipv4.tcp_tw_reuse=1' /etc/sysctl.conf"
+	result, err = ExecShellCommand(false, twReuseCmd)
+	if err != nil {
+		err = fmt.Errorf("execute [%s] get an error:%w", twReuseCmd, err)
+		logger.Warn(err.Error())
+	}
+	if len(result) > 0 {
+		insertTwReuse := "sed -i -e 's/net.ipv4.tcp_tw_reuse=1/net.ipv4.tcp_tw_reuse=0/g' /etc/sysctl.conf"
+		_, err := ExecShellCommand(false, insertTwReuse)
+		if err != nil {
+			err = fmt.Errorf("execute [%s] get an error:%w", insertTwReuse, err)
+			logger.Info(err.Error())
+			return err
+		}
+	}
+
+	// Linux kernel 那边只有 t-linux2-0044 开始才支持上面的 2 个参数,下面执行报错的话,给出一个warning。
+	_, err = ExecShellCommand(false, "/sbin/sysctl -p")
+	if err != nil {
+		err = fmt.Errorf("execute [/sbin/sysctl -p] get an error:%w", err)
+		logger.Warn(err.Error())
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/truncate.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/truncate.go
new file mode 100644
index 0000000000..169475663e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/truncate.go
@@ -0,0 +1,89 @@
+package osutil
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"time"
+)
+
+// TruncateFile TODO
+func TruncateFile(file string, bwlimitMB int) error {
+	f, err := os.OpenFile(file, os.O_RDWR, 0666)
+	if err != nil {
+		return err
+	}
+
+	defer f.Close()
+
+	fi, err := os.Stat(file)
+	if err != nil {
+		return err
+	}
+	totalSize := fi.Size()
+	chunkSizeEverySec := bwlimitMB * 1024 * 1024
+
+	// 1s执行多次, >=1, <= 1000
+	batchEverySec := 10
+
+	// 每次清理大小
+	chunkSize := chunkSizeEverySec / batchEverySec
+	// 每次清理间隔
+	chunkInterval := 1000 / batchEverySec // 1000 毫秒
+	logger.Info("bwlimitMB: %d, chunkSize: %d bytes, chunkInterval: %d ms, ", bwlimitMB, chunkSize, chunkInterval)
+
+	done := make(chan int, 1)
+	defer close(done)
+	go func(chan int) {
+		PrintFileSizeIncr(file, 1, 10, logger.Info, done)
+	}(done)
+	var endOffset int64 = totalSize
+	for {
+		endOffset -= int64(chunkSize)
+		if endOffset <= 0 {
+			break
+		}
+		if err := f.Truncate(endOffset); err != nil {
+			return err
+		}
+		time.Sleep(time.Duration(chunkInterval) * time.Millisecond)
+	}
+	// f.Truncate(0)
+	f.Seek(0, 0)
+	f.Sync()
+	if err := os.Remove(file); err != nil {
+		return err
+	}
+	return nil
+}
+
+// TruncateDir TODO
+func TruncateDir(path string, bwlimitMB int) error {
+	LargeFile := int64(500 * 1024 * 1024) // 超过 500MB,我们认为是大文件,采用 truncate 方式删除
+	fs, _ := ioutil.ReadDir(path)
+	for _, file := range fs {
+		fullFile := filepath.Join(path, file.Name())
+		if file.IsDir() {
+			fmt.Printf("path %s is dir, ignore\n", fullFile)
+			continue
+		} else {
+			fmt.Println(path + file.Name())
+			f, e := os.Stat(filepath.Join(path, file.Name()))
+			if e != nil {
+				return e
+			}
+			if f.Size() > LargeFile {
+				if err := TruncateFile(fullFile, bwlimitMB); err != nil {
+					return err
+				} else {
+					if err := os.Remove(fullFile); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/unix_only.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/unix_only.go
new file mode 100644
index 0000000000..8ae670368a
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/unix_only.go
@@ -0,0 +1,237 @@
+//go:build !windows
+// +build !windows
+
+package osutil
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"math/rand"
+	"os"
+	"os/exec"
+	"strings"
+	"syscall"
+	"time"
+)
+
+// IsMountPoint TODO
+// Determine if a directory is a mountpoint, by comparing the device for the directory
+// with the device for it's parent.  If they are the same, it's not a mountpoint, if they're
+// different, it is.
+// reference: https://github.com/cnaize/kubernetes/blob/master/pkg/util/mount/mountpoint_unix.go#L29
+func IsMountPoint(file string) (bool, error) {
+	stat, err := os.Stat(file)
+	if err != nil {
+		return false, err
+	}
+	rootStat, err := os.Lstat(file + "/..")
+	if err != nil {
+		return false, err
+	}
+	// If the directory has the same device as parent, then it's not a mountpoint.
+	return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev, nil
+}
+
+// FindFirstMountPoint find first mountpoint in prefer order
+func FindFirstMountPoint(paths ...string) (string, error) {
+	for _, path := range paths {
+		if _, err := os.Stat(path); err != nil {
+			if os.IsNotExist(err) {
+				continue
+			}
+		}
+		isMountPoint, err := IsMountPoint(path)
+		if err != nil {
+			return "", fmt.Errorf("check whether mountpoint failed, path: %s, err: %w", path, err)
+		}
+		if isMountPoint {
+			return path, nil
+		}
+	}
+	return "", fmt.Errorf("no available mountpoint found, choices: %#v", paths)
+}
+
+// FindFirstMountPointProxy TODO
+func FindFirstMountPointProxy(paths ...string) (string, error) {
+	for _, path := range paths {
+		if _, err := os.Stat(path); err != nil {
+			if os.IsNotExist(err) {
+				continue
+			}
+		}
+		isMountPoint, err := IsMountPoint(path)
+		if err != nil {
+			return "", fmt.Errorf("check whether mountpoint failed, path: %s, err: %w", path, err)
+		}
+		if isMountPoint {
+			return path, nil
+		} else {
+			// 如果目录不是独立挂载点,获取它的父目录,判断父目录所在挂载点是否 > 80GB
+			// 使用 df -hm 来替代
+			// /data1 ->  [, data1] -> /
+			path = strings.TrimSuffix(path, "/")
+			path_slice := strings.Split(path, "/")
+			if len(path_slice) < 2 {
+				return "", fmt.Errorf("wrong patch %s", path)
+			}
+			path_slice = path_slice[:len(path_slice)-1]
+			parentPath := ""
+			if len(path_slice) == 1 {
+				parentPath = "/"
+			} else {
+				parentPath = strings.Join(path_slice, "/")
+			}
+			if IsDataDirOk(parentPath) {
+				return path, nil
+			}
+		}
+	}
+	return "", fmt.Errorf("no available mountpoint found, choices: %#v", paths)
+}
+
+// RunShellCmdAsUser a simple wrapper of Cmd
+// NOTE(wangqingping) len(strings.Join(args, " ")) cannot
+// exceed MAX_ARG_STRLEN, checkout:
+// https://www.linuxjournal.com/article/6060
+func RunShellCmdAsUser(args []string, osuser string) (string, error) {
+	cmd := exec.Command("bash", "-c", strings.Join(args, " "))
+	var outbuff, errbuff bytes.Buffer
+	cmd.Stdout = &outbuff
+	cmd.Stderr = &errbuff
+	uid, gid, err := GetUidGid(osuser)
+	if err != nil {
+		return "", err
+	}
+	cmd.SysProcAttr = &syscall.SysProcAttr{}
+	cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)}
+	if err := cmd.Run(); err != nil {
+		logger.Info("Run command failed, cmd `%s` error %s, %s",
+			strings.Join(args, " "), errbuff.String(), err)
+		return "", err
+	} else {
+		logger.Info("Run command `%s` successfully", strings.Join(args, " "))
+	}
+	return outbuff.String(), nil
+}
+
+// RunShellCmdNoWaitAsUser TODO
+// starts the specified command but does not wait for it to complete.
+func RunShellCmdNoWaitAsUser(args []string, osuser string) (string, error) {
+	cmd := exec.Command("bash", "-c", strings.Join(args, " "))
+	var outbuff, errbuff bytes.Buffer
+	cmd.Stdout = &outbuff
+	cmd.Stderr = &errbuff
+	uid, gid, err := GetUidGid(osuser)
+	if err != nil {
+		return "", err
+	}
+	cmd.SysProcAttr = &syscall.SysProcAttr{}
+	cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)}
+	if err := cmd.Start(); err != nil {
+		logger.Info("Run command failed, cmd `%s` error %s, %s",
+			strings.Join(args, " "), errbuff.String(), err)
+		return "", err
+	} else {
+		logger.Info("Run command `%s` successfully", strings.Join(args, " "))
+	}
+
+	return outbuff.String(), nil
+}
+
+// Lock TODO
+func (l *DirLock) Lock() error {
+	f, err := os.Open(l.dir)
+	if err != nil {
+		return err
+	}
+	l.f = f
+	err = syscall.Flock(int(l.f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB)
+	if err != nil {
+		return fmt.Errorf("cannot flock directory %s - %w", l.dir, err)
+	}
+	return nil
+}
+
+// Unlock TODO
+func (l *DirLock) Unlock() error {
+	defer func() {
+		if err := l.f.Close(); err != nil {
+			logger.Warn("close lock file failed, err:%s", err.Error())
+		}
+	}()
+	return syscall.Flock(int(l.f.Fd()), syscall.LOCK_UN)
+}
+
+// GetDirLock TODO
+/*
+ GetDirLock 获取 crontab lock.
+ set waitTime = 0 if you don't want to wait crontab lock
+*/
+func GetDirLock(waitTime time.Duration, l *DirLock) error {
+	var (
+		flockErr    = make(chan error, 1)
+		timeoutChan = make(chan struct{})
+		err         error
+	)
+
+	if waitTime == 0 {
+		return l.Lock()
+	}
+
+	go func() {
+		var deadline = time.Now().Add(waitTime)
+		for {
+			err := l.Lock()
+			if err == nil {
+				flockErr <- err
+				return
+			}
+			logger.Error("get file lock error:%s,continue to wait", err)
+			if time.Until(deadline) < 0 {
+				timeoutChan <- struct{}{}
+				return
+			}
+			time.Sleep(time.Duration(7+rand.Intn(7)) * time.Second)
+		}
+	}()
+
+	select {
+	case err := <-flockErr:
+		return err
+	case <-timeoutChan:
+		err = fmt.Errorf("lock file(%s) timeout", l.GetDirName())
+		return err
+	}
+}
+
+// ReleaseDirLock TODO
+func ReleaseDirLock(l *DirLock) error {
+	return l.Unlock()
+}
+
+// DirLock TODO
+// from https://github.com/nsqio/nsq/blob/master/internal/dirlock/dirlock.go
+type DirLock struct {
+	dir string
+	f   *os.File
+}
+
+// NewDirLock TODO
+func NewDirLock(dir string) *DirLock {
+	isExist := FileExist(dir)
+	if !isExist {
+		_, err := os.OpenFile(dir, os.O_RDWR|os.O_CREATE, 0755)
+		if err != nil {
+			logger.Warn("openFile(%s) error:%s", dir, err)
+		}
+	}
+	return &DirLock{
+		dir: dir,
+	}
+}
+
+// GetDirName TODO
+func (l *DirLock) GetDirName() string {
+	return l.dir
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/windows_only.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/windows_only.go
new file mode 100644
index 0000000000..3a8de4e525
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil/windows_only.go
@@ -0,0 +1,16 @@
+//go:build windows
+// +build windows
+
+package osutil
+
+// 这里只是为了能在 windows 编译成功,不一定可以使用
+
+// FindFirstMountPoint find first mountpoint in prefer order
+func FindFirstMountPoint(paths ...string) (string, error) {
+	return "/data", nil
+}
+
+// FindFirstMountPointProxy TODO
+func FindFirstMountPointProxy(paths ...string) (string, error) {
+	return "/data", nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_helper.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_helper.go
new file mode 100644
index 0000000000..4f9c9d49b3
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_helper.go
@@ -0,0 +1,207 @@
+package pulsarutil
+
+import (
+	"bufio"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+
+	"gopkg.in/ini.v1"
+)
+
+// CheckBrokerConf TODO
+func CheckBrokerConf(bkNum int) (err error) {
+	// 获取broker.conf中的EnsembleSize、Qw、Qa
+	brokerCfg, err := ini.Load(cst.DefaultPulsarBrokerConf)
+	if err != nil {
+		logger.Error("Failed to read config file %s, reason: %v", cst.DefaultPulsarBrokerConf, err)
+		return err
+	}
+	ensembleSize := brokerCfg.Section("").Key("managedLedgerDefaultEnsembleSize").MustInt()
+	writeQuorumSize := brokerCfg.Section("").Key("managedLedgerDefaultWriteQuorum").MustInt()
+	ackQuorumSize := brokerCfg.Section("").Key("managedLedgerDefaultAckQuorum").MustInt()
+	if bkNum >= ensembleSize && ensembleSize >= writeQuorumSize && writeQuorumSize >= ackQuorumSize {
+		return nil
+	} else {
+		logger.Error("Bookie can be decommissioned only when Num(RemainBookie) >= EnsembleSize >= Qw >= Qa, "+
+			"however, Num(RemainBookie)=%v, E=%v, Qw=%v, Qa=%v",
+			bkNum, ensembleSize, writeQuorumSize, ackQuorumSize)
+		return errors.New("num(RemainBookie) >= EnsembleSize >= Qw >= Qa is not satisfied")
+	}
+}
+
+// GetAllTenant TODO
+func GetAllTenant() ([]string, error) {
+	// 获取所有租户名称
+	extraCmd := fmt.Sprintf("%s/bin/pulsar-admin tenants list", cst.DefaultPulsarBrokerDir)
+	logger.Info("获取所有租户, [%s]", extraCmd)
+	tenantListStr, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return nil, err
+	}
+	br := bufio.NewReader(strings.NewReader(tenantListStr))
+	var tenantList []string
+	for {
+		line, _, err := br.ReadLine()
+		if err != nil && len(line) == 0 {
+			break
+		}
+		tenantList = append(tenantList, string(line))
+	}
+	return tenantList, nil
+}
+
+// GetAllNamespace TODO
+func GetAllNamespace(tenant string) ([]string, error) {
+	// 获取各个租户下的所有namespace
+	extraCmd := fmt.Sprintf("%s/bin/pulsar-admin namespaces list %s", cst.DefaultPulsarBrokerDir, tenant)
+	logger.Info("获取租户[%s]下的所有namespace, [%s]", tenant, extraCmd)
+	namespaceListStr, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return nil, err
+	}
+	br := bufio.NewReader(strings.NewReader(namespaceListStr))
+	var namespaceList []string
+	for {
+		line, _, err := br.ReadLine()
+		if err != nil && len(line) == 0 {
+			break
+		}
+		namespaceList = append(namespaceList, string(line))
+	}
+	return namespaceList, nil
+}
+
+// CheckNamespaceEnsembleSize TODO
+func CheckNamespaceEnsembleSize(bkNum int) (err error) {
+	tenantsList, err := GetAllTenant()
+	if err != nil {
+		logger.Error("get all tenant error, %v", err)
+		return err
+	}
+	for _, tenant := range tenantsList {
+		namespaceList, err := GetAllNamespace(tenant)
+		if err != nil {
+			logger.Error("get all namespace failed, tenant=%s, error: %v", tenant, err)
+			return err
+		}
+		for _, namespace := range namespaceList {
+			extraCmd := fmt.Sprintf("%s/bin/pulsar-admin namespaces get-persistence %s",
+				cst.DefaultPulsarBrokerDir, namespace)
+			logger.Info("获取namespace[%s]持久化策略, [%s]", namespace, extraCmd)
+			res, err := osutil.ExecShellCommand(false, extraCmd)
+			if err != nil {
+				logger.Error("[%s] execute failed, %v", err)
+				return err
+			}
+			if res != "null\n" {
+				persistence := make(map[string]interface{}, 0)
+				if err := json.Unmarshal([]byte(res), &persistence); err == nil {
+					ensembleSize := persistence["bookkeeperEnsemble"].(int)
+					writeQuorumSize := persistence["bookkeeperWriteQuorum"].(int)
+					ackQuorumSize := persistence["bookkeeperAckQuorum"].(int)
+					if !(bkNum >= ensembleSize && ensembleSize >= writeQuorumSize && writeQuorumSize >= ackQuorumSize) {
+						logger.Error("Bookie can be decommissioned only when Num(RemainBookie)>=EnsembleSize>="+
+							"Qw>=Qa, however, Num(RemainBookie)=%v, E=%v, Qw=%v, Qa=%v",
+							bkNum, ensembleSize, writeQuorumSize, ackQuorumSize)
+						return errors.New("num(RemainBookie) > EnsembleSize >= Qw >= Qa is not satisfied")
+					}
+				} else {
+					logger.Error("json unmarshal failed [%s], %v", res, err)
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// CheckUnderReplicated TODO
+func CheckUnderReplicated() error {
+	// 列出复制中的ledgers
+	extraCmd := fmt.Sprintf("%s/bin/bookkeeper shell listunderreplicated | grep ListUnderReplicatedCommand",
+		cst.DefaultPulsarBkDir)
+	logger.Info("获取复制中的ledgers, [%s]", extraCmd)
+	res, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil && len(res) > 0 {
+		logger.Error("[%s] execute failed, %v", err)
+		return err
+	}
+	if len(res) > 0 {
+		logger.Error("under replicated: [%s]", res)
+		return errors.New("bookie is under replicated")
+	}
+	return nil
+}
+
+// GetAllDataDir 获取所有/data*的路径
+func GetAllDataDir() ([]string, error) {
+	files, err := os.ReadDir("/")
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", err)
+		return nil, err
+	}
+
+	var dataDir []string
+	for _, file := range files {
+		if file.IsDir() && strings.HasPrefix(file.Name(), "data") {
+			dataDir = append(dataDir, "/"+file.Name())
+		}
+	}
+	return dataDir, nil
+}
+
+// CheckLedgerMetadata 检查ledger的metadata
+func CheckLedgerMetadata(bookieNum int) (err error) {
+	// 获取所有open状态ledger的元数据
+	extraCmd := fmt.Sprintf("%s/bin/bookkeeper shell listledgers -m | grep \"ListLedgersCommand\" "+
+		"| grep \"state=OPEN\" | cut -d \",\" -f 3,4,5", cst.DefaultPulsarBkDir)
+	logger.Info("获取open状态的ledger, [%s]", extraCmd)
+	ledgerMetadata, err := osutil.ExecShellCommand(false, extraCmd)
+	if err != nil {
+		logger.Error("[%s] execute failed, %v", extraCmd, err)
+		return err
+	}
+	br := bufio.NewReader(strings.NewReader(ledgerMetadata))
+
+	for {
+		line, _, err := br.ReadLine()
+		if err != nil && len(line) == 0 {
+			break
+		}
+		metadata := strings.Split(string(line), ",")
+		ensembleStr := metadata[0]
+		writeQuorumStr := metadata[1]
+		ensembleSize, convErr := strconv.Atoi(strings.Split(ensembleStr, "=")[1])
+		if convErr != nil {
+			logger.Error("get ensemble size failed, str: %s, err: %v", ensembleStr, convErr)
+			return convErr
+		}
+		writeQuorumSize, convErr := strconv.Atoi(strings.Split(writeQuorumStr, "=")[1])
+		if convErr != nil {
+			logger.Error("get write quorum size failed, str: %s, err: %v", writeQuorumStr, convErr)
+			return convErr
+		}
+		if ensembleSize > bookieNum {
+			logger.Error("ensembleSize(%d) can't be bigger than bookieNum(%d)", ensembleSize, bookieNum)
+			return errors.New("ensembleSize > Num(bookie)")
+		}
+		if writeQuorumSize > bookieNum {
+			logger.Error("writeQuorumSize(%d) can't be bigger than bookieNum(%d)", writeQuorumSize, bookieNum)
+			return errors.New("writeQuorumSize > Num(bookie)")
+		}
+		if writeQuorumSize == 1 {
+			logger.Error("some ledger's write quorum is 1, decommission may lead to data lost")
+			return errors.New("writeQuorumSize = 1")
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_operate.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_operate.go
new file mode 100644
index 0000000000..b907d7f70a
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsar_operate.go
@@ -0,0 +1,202 @@
+package pulsarutil
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+
+	"github.com/go-ini/ini"
+	"github.com/shirou/gopsutil/mem"
+)
+
+// GetMemSizeInMi TODO
+func GetMemSizeInMi() (uint64, error) {
+	vMem, err := mem.VirtualMemory()
+	if err != nil {
+		return 0, err
+	}
+	kilo := uint64(1024)
+	totalMemInMi := vMem.Total / kilo / kilo
+	return totalMemInMi, nil
+}
+
+// GetHeapAndDirectMemInMi TODO
+func GetHeapAndDirectMemInMi() (string, string, error) {
+	heapSize := "0g"
+	directMem := "0g"
+	systemMem, err := GetMemSizeInMi()
+	if err != nil {
+		return heapSize, directMem, err
+	}
+
+	if systemMem > 128*uint64(1024) {
+		heapSize = "30g"
+		directMem = "30g"
+	} else {
+		// heap + direct memory = 50% memory
+		// heap : direct memory = 1 : 2
+		heapSize = fmt.Sprintf("%vm", systemMem/6)
+		directMem = fmt.Sprintf("%vm", systemMem/3)
+	}
+
+	return heapSize, directMem, nil
+}
+
+// SupervisorctlUpdate TODO
+func SupervisorctlUpdate() error {
+	startCmd := "supervisorctl update"
+	logger.Info(fmt.Sprintf("exec %s", startCmd))
+	_, err := osutil.RunInBG(false, startCmd)
+	return err
+}
+
+// GenZookeeperIni TODO
+func GenZookeeperIni() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:zookeeper]
+command=%s/zookeeper/bin/pulsar zookeeper ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+stopsignal=KILL ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/pulsarenv/zookeeper/zookeeper_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`, cst.DefaultPulsarEnvDir))
+	return iniRaw
+}
+
+// GenBookkeeperIni TODO
+func GenBookkeeperIni() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:bookkeeper]
+command=%s/bookkeeper/bin/pulsar bookie ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/pulsarenv/bookkeeper/bookkeeper_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`, cst.DefaultPulsarEnvDir))
+	return iniRaw
+}
+
+// GenBrokerIni TODO
+func GenBrokerIni() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:broker]
+command=%s/broker/bin/pulsar broker ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=/data/pulsarenv/broker/broker_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`, cst.DefaultPulsarEnvDir))
+	return iniRaw
+}
+
+// GenPulsarManagerIni TODO
+func GenPulsarManagerIni() []byte {
+	iniRaw := []byte(fmt.Sprintf(`[program:pulsar-manager]
+directory=%s ;
+command=%s/bin/pulsar-manager ; the program (relative uses PATH, can take args)
+numprocs=1 ; number of processes copies to start (def 1)
+autostart=true ; start at supervisord start (default: true)
+startsecs=3 ; # of secs prog must stay up to be running (def. 1)
+startretries=99 ; max # of serial start failures when starting (default 3)
+autorestart=true ; when to restart if exited after running (def: unexpected)
+exitcodes=0 ; 'expected' exit codes used with autorestart (default 0,2)
+user=mysql ;
+redirect_stderr=true ; redirect proc stderr to stdout (default false)
+stdout_logfile=%s/pulsar-manager_startup.log ; stdout log path, NONE for none; default AUTO
+stdout_logfile_maxbytes=50MB ; max # logfile bytes b4 rotation (default 50MB)
+stdout_logfile_backups=10 ; # of stdout logfile backups (default 10)`, cst.DefaultPulsarManagerDir,
+		cst.DefaultPulsarManagerDir, cst.DefaultPulsarManagerDir))
+	return iniRaw
+}
+
+// SetBookieReadOnly 将bookie设置为只读
+func SetBookieReadOnly() (err error) {
+	bookieConfig, iniErr := ini.Load(cst.DefaultPulsarBkConf)
+	if iniErr != nil {
+		logger.Error("Failed to read file %s: %v", cst.DefaultPulsarBkConf, iniErr)
+		return iniErr
+	}
+
+	section, secErr := bookieConfig.GetSection("")
+	if secErr != nil {
+		logger.Error("Failed to get section: %v", secErr)
+		return secErr
+	}
+
+	if section.Haskey("readOnlyModeEnabled") {
+		section.Key("readOnlyModeEnabled").SetValue("true")
+	} else {
+		_, keyErr := section.NewKey("readOnlyModeEnabled", "true")
+		if keyErr != nil {
+			logger.Error("Failed to add readOnlyModeEnabled : %v", keyErr)
+			return keyErr
+		}
+	}
+	if section.Haskey("forceReadOnlyBookie") {
+		section.Key("forceReadOnlyBookie").SetValue("true")
+	} else {
+		_, keyErr := section.NewKey("forceReadOnlyBookie", "true")
+		if keyErr != nil {
+			logger.Error("Failed to add forceReadOnlyBookie : %v", keyErr)
+			return keyErr
+		}
+	}
+
+	iniErr = bookieConfig.SaveTo(cst.DefaultPulsarBkConf)
+	if iniErr != nil {
+		logger.Error("Failed to save file %s: %v", cst.DefaultPulsarBkConf, iniErr)
+		return iniErr
+	}
+
+	return nil
+}
+
+// UnsetBookieReadOnly 取消bookie只读状态
+func UnsetBookieReadOnly() (err error) {
+	bookieConfig, iniErr := ini.Load(cst.DefaultPulsarBkConf)
+	if iniErr != nil {
+		logger.Error("Failed to read file %s: %v", cst.DefaultPulsarBkConf, iniErr)
+		return iniErr
+	}
+
+	section, secErr := bookieConfig.GetSection("")
+	if secErr != nil {
+		logger.Error("Failed to get section: %v", secErr)
+		return secErr
+	}
+
+	if section.Haskey("forceReadOnlyBookie") {
+		section.Key("forceReadOnlyBookie").SetValue("false")
+	} else {
+		_, keyErr := section.NewKey("forceReadOnlyBookie", "false")
+		if keyErr != nil {
+			logger.Error("Failed to add forceReadOnlyBookie : %v", keyErr)
+			return keyErr
+		}
+	}
+
+	iniErr = bookieConfig.SaveTo(cst.DefaultPulsarBkConf)
+	if iniErr != nil {
+		logger.Error("Failed to save file %s: %v", cst.DefaultPulsarBkConf, iniErr)
+		return iniErr
+	}
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsarutil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsarutil.go
new file mode 100644
index 0000000000..54e9c79090
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/pulsarutil/pulsarutil.go
@@ -0,0 +1,2 @@
+// Package pulsarutil TODO
+package pulsarutil
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/init.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/init.go
new file mode 100644
index 0000000000..c0c6ac71d0
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/init.go
@@ -0,0 +1,77 @@
+package sftp
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"log"
+	"os"
+)
+
+// user:pass@host:port:/data/dbbak
+// /data/dbbak
+
+// Download TODO
+func Download(src Config, srcDir, dstDir string, fileName string, bwlimitMB int64) error {
+	remote, err := New(src)
+	if err != nil {
+		return err
+	}
+	defer remote.Close()
+
+	srcFile := fmt.Sprintf(`%s/%s`, srcDir, fileName)
+	dstFile := fmt.Sprintf(`%s/%s`, dstDir, fileName)
+	if fileName == "" {
+		srcFile = srcDir
+		dstFile = dstDir
+	}
+	logger.Info("start download to %s", dstFile)
+	// Get remote file stats.
+	info, err := remote.Info(srcFile)
+	if err != nil {
+		return err
+	}
+	fmt.Printf("%+v\n", info)
+
+	// Download remote file.
+	r, err := remote.Download(srcFile)
+	if err != nil {
+		return err
+	}
+	defer r.Close()
+
+	// create local file
+	f, err := os.Create(dstFile)
+	if err != nil {
+		log.Fatal(err)
+	}
+	defer f.Close()
+
+	done := make(chan int, 1)
+	defer close(done)
+	go func(chan int) {
+		osutil.PrintFileSizeIncr(dstFile, 1, 10, logger.Info, done)
+		/*
+			for true {
+				speed := osutil.CalcFileSizeIncr(dstFile, 1)
+				if speed != "0" {
+					logger.Info("file %s download current speed %s", dstFile, speed)
+				} else {
+					break
+				}
+				time.Sleep(10 * time.Second)
+			}
+		*/
+	}(done)
+
+	// Read downloaded file.
+	// data, err := ioutil.ReadAll(file)
+	// fmt.Println(string(data))
+	// _, err = io.Copy(f, ratelimit.Reader(r, srcBucket))
+	_, err = util.IOLimitRate(f, r, bwlimitMB)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp.go
new file mode 100644
index 0000000000..7053da19f6
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp.go
@@ -0,0 +1,203 @@
+// Package sftp TODO
+package sftp
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"regexp"
+	"time"
+
+	"github.com/pkg/sftp"
+	"golang.org/x/crypto/ssh"
+)
+
+// Config represents SSH connection parameters.
+type Config struct {
+	Username     string
+	Password     string
+	PrivateKey   string
+	Server       string
+	KeyExchanges []string
+
+	Timeout time.Duration
+}
+
+// Client provides basic functionality to interact with a SFTP server.
+type Client struct {
+	config     Config
+	sshClient  *ssh.Client
+	sftpClient *sftp.Client
+}
+
+// New initialises SSH and SFTP clients and returns Client type to use.
+func New(config Config) (*Client, error) {
+	c := &Client{
+		config: config,
+	}
+
+	if err := c.connect(); err != nil {
+		return nil, err
+	}
+
+	return c, nil
+}
+
+// Create creates a remote/destination file for I/O.
+func (c *Client) Create(filePath string) (io.ReadWriteCloser, error) {
+	if err := c.connect(); err != nil {
+		return nil, fmt.Errorf("connect: %w", err)
+	}
+
+	return c.sftpClient.Create(filePath)
+}
+
+// Upload writes local/source file data streams to remote/destination file.
+func (c *Client) Upload(source io.Reader, destination io.Writer, size int) error {
+	if err := c.connect(); err != nil {
+		return fmt.Errorf("connect: %w", err)
+	}
+
+	chunk := make([]byte, size)
+
+	for {
+		num, err := source.Read(chunk)
+		if err == io.EOF {
+			tot, err := destination.Write(chunk[:num])
+			if err != nil {
+				return err
+			}
+
+			if tot != len(chunk[:num]) {
+				return fmt.Errorf("failed to write stream")
+			}
+
+			return nil
+		}
+
+		if err != nil {
+			return err
+		}
+
+		tot, err := destination.Write(chunk[:num])
+		if err != nil {
+			return err
+		}
+
+		if tot != len(chunk[:num]) {
+			return fmt.Errorf("failed to write stream")
+		}
+	}
+}
+
+// Download returns remote/destination file for reading.
+func (c *Client) Download(filePath string) (io.ReadCloser, error) {
+	if err := c.connect(); err != nil {
+		return nil, fmt.Errorf("connect: %w", err)
+	}
+
+	return c.sftpClient.Open(filePath)
+}
+
+// Info gets the details of a file. If the file was not found, an error is returned.
+func (c *Client) Info(filePath string) (os.FileInfo, error) {
+	if err := c.connect(); err != nil {
+		return nil, fmt.Errorf("connect: %w", err)
+	}
+
+	info, err := c.sftpClient.Lstat(filePath)
+	if err != nil {
+		return nil, fmt.Errorf("file stats: %w", err)
+	}
+
+	return info, nil
+}
+
+// Close closes open connections.
+func (c *Client) Close() {
+	if c.sftpClient != nil {
+		c.sftpClient.Close()
+	}
+	if c.sshClient != nil {
+		c.sshClient.Close()
+	}
+}
+
+// GetAuthMethods TODO
+func (c *Config) GetAuthMethods(password string) []ssh.AuthMethod {
+	auth := ssh.Password(password)
+	/*
+		if c.config.PrivateKey != "" {
+			signer, err := ssh.ParsePrivateKey([]byte(c.config.PrivateKey))
+			if err != nil {
+				return fmt.Errorf("ssh parse private key: %w", err)
+			}
+			auth = ssh.PublicKeys(signer)
+		}
+	*/
+	keyboardInteractiveChallenge := func(
+		user,
+		instruction string,
+		questions []string,
+		echos []bool,
+	) (answers []string, err error) {
+		if len(questions) == 0 {
+			return []string{}, nil
+		}
+		/*
+			for i, question := range questions {
+				log.Debug("SSH Question %d: %s", i+1, question)
+			}
+		*/
+		answers = make([]string, len(questions))
+		for i := range questions {
+			yes, _ := regexp.MatchString("*yes*", questions[i])
+			if yes {
+				answers[i] = "yes"
+
+			} else {
+				answers[i] = password
+			}
+		}
+		return answers, nil
+	}
+	auth2 := ssh.KeyboardInteractive(keyboardInteractiveChallenge)
+
+	methods := []ssh.AuthMethod{auth2, auth}
+	return methods
+}
+
+// connect initialises a new SSH and SFTP client only if they were not
+// initialised before at all and, they were initialised but the SSH
+// connection was lost for any reason.
+func (c *Client) connect() error {
+	if c.sshClient != nil {
+		_, _, err := c.sshClient.SendRequest("keepalive", false, nil)
+		if err == nil {
+			return nil
+		}
+	}
+
+	cfg := &ssh.ClientConfig{
+		User: c.config.Username,
+		Auth: c.config.GetAuthMethods(c.config.Password),
+		// HostKeyCallback: func(string, net.Addr, ssh.PublicKey) error { return nil },
+		HostKeyCallback: ssh.InsecureIgnoreHostKey(),
+		// HostKeyCallback: ssh.FixedHostKey(hostKey),
+		Timeout: c.config.Timeout,
+	}
+
+	sshClient, err := ssh.Dial("tcp", c.config.Server, cfg)
+	if err != nil {
+		return fmt.Errorf("ssh dial: %w", err)
+	}
+	c.sshClient = sshClient
+
+	sftpClient, err := sftp.NewClient(sshClient)
+	if err != nil {
+		return fmt.Errorf("sftp new client: %w", err)
+	}
+	c.sftpClient = sftpClient
+
+	return nil
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp_test.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp_test.go
new file mode 100644
index 0000000000..aeb9c33984
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/sftp/sftp_test.go
@@ -0,0 +1,18 @@
+package sftp
+
+import (
+	"testing"
+	"time"
+)
+
+func TestDownloadFile(t *testing.T) {
+	config := Config{
+		Username: "",
+		Password: "", // required only if password authentication is to be used
+		Server:   "127.0.0.1:36000",
+		// KeyExchanges: []string{"diffie-hellman-group-exchange-sha256", "diffie-hellman-group14-sha256"}, // optional
+		Timeout: time.Second * 10, // 0 for not timeout
+	}
+
+	Download(config, "/data/dbbak/", "/data/dbbak", "vip_VM-224-30-centos_127.0.0.1_20000_20220719_035743.info", 1)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/slice.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/slice.go
new file mode 100644
index 0000000000..4ebac95bb4
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/slice.go
@@ -0,0 +1,207 @@
+package util
+
+import (
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// IntsHas check the []int contains the given value
+func IntsHas(ints []int, val int) bool {
+	for _, ele := range ints {
+		if ele == val {
+			return true
+		}
+	}
+	return false
+}
+
+// Int64sHas check the []int64 contains the given value
+func Int64sHas(ints []int64, val int64) bool {
+	for _, ele := range ints {
+		if ele == val {
+			return true
+		}
+	}
+	return false
+}
+
+// StringsHas check the []string contains the given element
+func StringsHas(ss []string, val string) bool {
+	for _, ele := range ss {
+		if ele == val {
+			return true
+		}
+	}
+	return false
+}
+
+// StringsHasICase check the []string contains the given element. insensitive case
+func StringsHasICase(ss []string, val string) bool {
+	val = strings.ToLower(val)
+	for _, ele := range ss {
+		if strings.ToLower(ele) == val {
+			return true
+		}
+	}
+	return false
+}
+
+// UniqueStrings Returns unique items in a slice
+func UniqueStrings(slice []string) []string {
+	// create a map with all the values as key
+	uniqMap := make(map[string]struct{})
+	for _, v := range slice {
+		uniqMap[v] = struct{}{}
+	}
+
+	// turn the map keys into a slice
+	uniqSlice := make([]string, 0, len(uniqMap))
+	for v := range uniqMap {
+		uniqSlice = append(uniqSlice, v)
+	}
+	return uniqSlice
+}
+
+// UniqueInts Returns unique items in a slice
+func UniqueInts(slice []int) []int {
+	// create a map with all the values as key
+	uniqMap := make(map[int]struct{})
+	for _, v := range slice {
+		uniqMap[v] = struct{}{}
+	}
+
+	// turn the map keys into a slice
+	uniqSlice := make([]int, 0, len(uniqMap))
+	for v := range uniqMap {
+		uniqSlice = append(uniqSlice, v)
+	}
+	return uniqSlice
+}
+
+// IsConsecutiveStrings 是否是连续数字
+// 如果存在 空元素 则报错
+func IsConsecutiveStrings(strList []string, isNumber bool) error {
+	err := errors.New("not consecutive numbers")
+	intList := make([]int, len(strList))
+	if !isNumber {
+		// string to ascii
+		// .aa .ab .ac => 469797 469798 469799
+		for i, s := range strList {
+			ss := ""
+			for _, si := range []rune(s) {
+				ss += strconv.FormatInt(int64(si), 10)
+			}
+			// todo ss 不能超过20位
+			strList[i] = ss
+		}
+	}
+	for i, s := range strList {
+		if d, e := strconv.Atoi(s); e != nil {
+			return errors.Errorf("illegal number %s", s)
+		} else {
+			intList[i] = d
+		}
+	}
+	intList = UniqueInts(intList)
+	sort.Ints(intList)
+	count := len(intList)
+	if (intList[count-1] - intList[0] + 1) != count {
+		return err
+	}
+	return nil
+}
+
+// RemoveEmpty 过滤掉空字符串
+func RemoveEmpty(input []string) []string {
+	var result []string
+	for _, item := range input {
+		if item != "" {
+			result = append(result, item)
+		}
+	}
+	return result
+}
+
+// StringSliceToInterfaceSlice 把字符串数组转换为interface{}数组
+func StringSliceToInterfaceSlice(ids []string) []interface{} {
+	var result []interface{}
+	if len(ids) == 1 {
+		result = append(result, ids[0])
+	} else {
+		for i := 0; i < len(ids); i++ {
+			result = append(result, ids[i])
+		}
+	}
+	return result
+}
+
+// StringsRemove an value form an string slice
+func StringsRemove(ss []string, s string) []string {
+	var ns []string
+	for _, v := range ss {
+		if v != s {
+			ns = append(ns, v)
+		}
+	}
+
+	return ns
+}
+
+// StringsInsertAfter 在 slice 里插入某个元素之后,仅匹配一次
+// 如果没有找到元素,忽略
+func StringsInsertAfter(ss []string, old string, new string) []string {
+	var ssNew = make([]string, len(ss)+1)
+	var found bool
+	for i, v := range ss {
+		if found {
+			ssNew[i+1] = v
+		} else if v == old {
+			ssNew[i] = v
+			ssNew[i+1] = new
+			found = true
+		} else {
+			ssNew[i] = v
+		}
+	}
+	if !found {
+		return ssNew[:len(ss)]
+	}
+	return ssNew
+}
+
+// StringsInsertIndex 在 slice index 当前位置,插入一个元素
+// 如果 index 非法,则忽略
+func StringsInsertIndex(ss []string, index int, new string) []string {
+	if index < 0 || index > len(ss)-1 {
+		return ss
+	}
+	var ssNew = make([]string, len(ss)+1)
+	for i, v := range ss {
+		if i > index {
+			ssNew[i+1] = v
+		} else if i < index {
+			ssNew[i] = v
+		} else {
+			ssNew[i] = new
+			ssNew[i+1] = v
+		}
+	}
+	return ssNew
+}
+
+// FilterOutStringSlice 滤除scr中含有filters 里面元素的数组
+//
+//	@receiver src
+//	@receiver filters
+//	@return dst
+func FilterOutStringSlice(src []string, filters []string) (dst []string) {
+	for _, v := range src {
+		if !StringsHas(filters, v) {
+			dst = append(dst, v)
+		}
+	}
+	return
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/str.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/str.go
new file mode 100644
index 0000000000..2e957d4e76
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/str.go
@@ -0,0 +1,33 @@
+package util
+
+import (
+	"regexp"
+	"strings"
+)
+
+// SplitAny TODO
+// util.SplitAny("ab##cd$$ef", "(##|\$\$)")
+func SplitAny(s string, delimiters string) []string {
+	// seps := fmt.Sprintf()
+	// splitRegex := regexp.MustCompile(`[;,\n\t ]+`)
+	// delimiters=[;,\t\s ]+
+	splitRegex := regexp.MustCompile(delimiters)
+	splitResults := splitRegex.Split(s, -1)
+	results := make([]string, 0)
+	for _, s := range splitResults {
+		if strings.TrimSpace(s) != "" {
+			results = append(results, strings.TrimSpace(s))
+		}
+	}
+	return results
+}
+
+// SplitAnyRune TODO
+// util.SplitAnyRune("a,b c", ", ")
+// if s is empty, return [], not [""]
+func SplitAnyRune(s string, seps string) []string {
+	splitter := func(r rune) bool {
+		return strings.ContainsRune(seps, r)
+	}
+	return strings.FieldsFunc(s, splitter)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/command_groups.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/command_groups.go
new file mode 100644
index 0000000000..34245eba47
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/command_groups.go
@@ -0,0 +1,48 @@
+package templates
+
+import (
+	"github.com/spf13/cobra"
+)
+
+// CommandGroup TODO
+type CommandGroup struct {
+	Message  string
+	Commands []*cobra.Command
+}
+
+// CommandGroups TODO
+type CommandGroups []CommandGroup
+
+// Add TODO
+func (g CommandGroups) Add(c *cobra.Command) {
+	for _, group := range g {
+		c.AddCommand(group.Commands...)
+	}
+}
+
+// Has TODO
+func (g CommandGroups) Has(c *cobra.Command) bool {
+	for _, group := range g {
+		for _, command := range group.Commands {
+			if command == c {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// AddAdditionalCommands TODO
+func AddAdditionalCommands(g CommandGroups, message string, cmds []*cobra.Command) CommandGroups {
+	group := CommandGroup{Message: message}
+	for _, c := range cmds {
+		// Don't show commands that have no short description
+		if !g.Has(c) && len(c.Short) != 0 {
+			group.Commands = append(group.Commands, c)
+		}
+	}
+	if len(group.Commands) == 0 {
+		return g
+	}
+	return append(g, group)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/markdown.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/markdown.go
new file mode 100644
index 0000000000..7e1e518c7c
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/markdown.go
@@ -0,0 +1,190 @@
+package templates
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+
+	"github.com/russross/blackfriday"
+)
+
+const linebreak = "\n"
+
+// ASCIIRenderer implements blackfriday.Renderer
+var _ blackfriday.Renderer = &ASCIIRenderer{}
+
+// ASCIIRenderer is a blackfriday.Renderer intended for rendering markdown
+// documents as plain text, well suited for human reading on terminals.
+type ASCIIRenderer struct {
+	Indentation string
+
+	listItemCount uint
+	listLevel     uint
+}
+
+// NormalText gets a text chunk *after* the markdown syntax was already
+// processed and does a final cleanup on things we don't expect here, like
+// removing linebreaks on things that are not a paragraph break (auto unwrap).
+func (r *ASCIIRenderer) NormalText(out *bytes.Buffer, text []byte) {
+	raw := string(text)
+	lines := strings.Split(raw, linebreak)
+	for _, line := range lines {
+		trimmed := strings.Trim(line, " \n\t")
+		if len(trimmed) > 0 && trimmed[0] != '_' {
+			out.WriteString(" ")
+		}
+		out.WriteString(trimmed)
+	}
+}
+
+// List renders the start and end of a list.
+func (r *ASCIIRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
+	r.listLevel++
+	out.WriteString(linebreak)
+	text()
+	r.listLevel--
+}
+
+// ListItem renders list items and supports both ordered and unordered lists.
+func (r *ASCIIRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
+	if flags&blackfriday.LIST_ITEM_BEGINNING_OF_LIST != 0 {
+		r.listItemCount = 1
+	} else {
+		r.listItemCount++
+	}
+	indent := strings.Repeat(r.Indentation, int(r.listLevel))
+	var bullet string
+	if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+		bullet += fmt.Sprintf("%d.", r.listItemCount)
+	} else {
+		bullet += "*"
+	}
+	out.WriteString(indent + bullet + " ")
+	r.fw(out, text)
+	out.WriteString(linebreak)
+}
+
+// Paragraph renders the start and end of a paragraph.
+func (r *ASCIIRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
+	out.WriteString(linebreak)
+	text()
+	out.WriteString(linebreak)
+}
+
+// BlockCode renders a chunk of text that represents source code.
+func (r *ASCIIRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+	out.WriteString(linebreak)
+	lines := []string{}
+	for _, line := range strings.Split(string(text), linebreak) {
+		indented := r.Indentation + line
+		lines = append(lines, indented)
+	}
+	out.WriteString(strings.Join(lines, linebreak))
+}
+
+// GetFlags TODO
+func (r *ASCIIRenderer) GetFlags() int { return 0 }
+
+// HRule TODO
+func (r *ASCIIRenderer) HRule(out *bytes.Buffer) {
+	out.WriteString(linebreak + "----------" + linebreak)
+}
+
+// LineBreak TODO
+func (r *ASCIIRenderer) LineBreak(out *bytes.Buffer) { out.WriteString(linebreak) }
+
+// TitleBlock TODO
+func (r *ASCIIRenderer) TitleBlock(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// Header TODO
+func (r *ASCIIRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) { text() }
+
+// BlockHtml TODO
+func (r *ASCIIRenderer) BlockHtml(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// BlockQuote TODO
+func (r *ASCIIRenderer) BlockQuote(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// TableRow TODO
+func (r *ASCIIRenderer) TableRow(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// TableHeaderCell TODO
+func (r *ASCIIRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) { r.fw(out, text) }
+
+// TableCell TODO
+func (r *ASCIIRenderer) TableCell(out *bytes.Buffer, text []byte, align int) { r.fw(out, text) }
+
+// Footnotes TODO
+func (r *ASCIIRenderer) Footnotes(out *bytes.Buffer, text func() bool) { text() }
+
+// FootnoteItem TODO
+func (r *ASCIIRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+	r.fw(out, text)
+}
+
+// AutoLink TODO
+func (r *ASCIIRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) { r.fw(out, link) }
+
+// CodeSpan TODO
+func (r *ASCIIRenderer) CodeSpan(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// DoubleEmphasis TODO
+func (r *ASCIIRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// Emphasis TODO
+func (r *ASCIIRenderer) Emphasis(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// RawHtmlTag TODO
+func (r *ASCIIRenderer) RawHtmlTag(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// TripleEmphasis TODO
+func (r *ASCIIRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// StrikeThrough TODO
+func (r *ASCIIRenderer) StrikeThrough(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// FootnoteRef TODO
+func (r *ASCIIRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) { r.fw(out, ref) }
+
+// Entity TODO
+func (r *ASCIIRenderer) Entity(out *bytes.Buffer, entity []byte) { r.fw(out, entity) }
+
+// Smartypants TODO
+func (r *ASCIIRenderer) Smartypants(out *bytes.Buffer, text []byte) { r.fw(out, text) }
+
+// DocumentHeader TODO
+func (r *ASCIIRenderer) DocumentHeader(out *bytes.Buffer) {}
+
+// DocumentFooter TODO
+func (r *ASCIIRenderer) DocumentFooter(out *bytes.Buffer) {}
+
+// TocHeaderWithAnchor TODO
+func (r *ASCIIRenderer) TocHeaderWithAnchor(text []byte, level int, anchor string) {}
+
+// TocHeader TODO
+func (r *ASCIIRenderer) TocHeader(text []byte, level int) {}
+
+// TocFinalize TODO
+func (r *ASCIIRenderer) TocFinalize() {}
+
+// Table TODO
+func (r *ASCIIRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+	r.fw(out, header, body)
+}
+
+// Link TODO
+func (r *ASCIIRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+	out.WriteString(" ")
+	r.fw(out, link)
+}
+
+// Image TODO
+func (r *ASCIIRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+	r.fw(out, link)
+}
+
+func (r *ASCIIRenderer) fw(out *bytes.Buffer, text ...[]byte) {
+	for _, t := range text {
+		out.Write(t)
+	}
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/normallizers.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/normallizers.go
new file mode 100644
index 0000000000..f974771b54
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/normallizers.go
@@ -0,0 +1,83 @@
+package templates
+
+import (
+	"strings"
+
+	"github.com/MakeNowJust/heredoc"
+	"github.com/russross/blackfriday"
+	"github.com/spf13/cobra"
+)
+
+// Indentation TODO
+const Indentation = `  `
+
+// LongDesc normalizes a command's long description to follow the conventions.
+func LongDesc(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+	return normalizer{s}.heredoc().markdown().trim().string
+}
+
+// Examples normalizes a command's examples to follow the conventions.
+func Examples(s string) string {
+	if len(s) == 0 {
+		return s
+	}
+	return normalizer{s}.trim().indent().string
+}
+
+// Normalize perform all required normalizations on a given command.
+func Normalize(cmd *cobra.Command) *cobra.Command {
+	if len(cmd.Long) > 0 {
+		cmd.Long = LongDesc(cmd.Long)
+	}
+	if len(cmd.Example) > 0 {
+		cmd.Example = Examples(cmd.Example)
+	}
+	return cmd
+}
+
+// NormalizeAll perform all required normalizations in the entire command tree.
+func NormalizeAll(cmd *cobra.Command) *cobra.Command {
+	if cmd.HasSubCommands() {
+		for _, subCmd := range cmd.Commands() {
+			NormalizeAll(subCmd)
+		}
+	}
+	Normalize(cmd)
+	return cmd
+}
+
+type normalizer struct {
+	string
+}
+
+func (s normalizer) markdown() normalizer {
+	bytes := []byte(s.string)
+	formatted := blackfriday.Markdown(bytes, &ASCIIRenderer{Indentation: Indentation},
+		blackfriday.EXTENSION_NO_INTRA_EMPHASIS)
+	s.string = string(formatted)
+	return s
+}
+
+func (s normalizer) heredoc() normalizer {
+	s.string = heredoc.Doc(s.string)
+	return s
+}
+
+func (s normalizer) trim() normalizer {
+	s.string = strings.TrimSpace(s.string)
+	return s
+}
+
+func (s normalizer) indent() normalizer {
+	indentedLines := []string{}
+	for _, line := range strings.Split(s.string, "\n") {
+		trimmed := strings.TrimSpace(line)
+		indented := Indentation + trimmed
+		indentedLines = append(indentedLines, indented)
+	}
+	s.string = strings.Join(indentedLines, "\n")
+	return s
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/templates.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/templates.go
new file mode 100644
index 0000000000..08870591a3
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/templates/templates.go
@@ -0,0 +1,2 @@
+// Package templates TODO
+package templates
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/duration.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/duration.go
new file mode 100644
index 0000000000..0652a21eca
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/duration.go
@@ -0,0 +1,67 @@
+package timeutil
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// Duration TODO
+type Duration struct {
+	time.Duration
+}
+
+// UnmarshalJSON TODO
+func (d *Duration) UnmarshalJSON(b []byte) error {
+	var unmarshalledJson interface{}
+
+	err := json.Unmarshal(b, &unmarshalledJson)
+	if err != nil {
+		return err
+	}
+
+	switch value := unmarshalledJson.(type) {
+	case float64:
+		d.Duration = time.Duration(value)
+	case string:
+		d.Duration, err = time.ParseDuration(value)
+		if err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("invalid duration: %#v", unmarshalledJson)
+	}
+
+	return nil
+}
+
+// String 用于打印
+func (d *Duration) String() string {
+	return fmt.Sprintf("%s", d.Duration)
+}
+
+// IsZeroDuration TODO
+func (d *Duration) IsZeroDuration() bool {
+	return d.Duration == 0
+}
+
+// Return TODO
+func (d *Duration) Return() time.Duration {
+	return d.Duration
+}
+
+// NewDuration TODO
+func NewDuration(t time.Duration) Duration {
+	return Duration{t}
+}
+
+// CompareDuration 1: t1>t2, -1: t1 t2.Duration {
+		return 1
+	} else if t1.Duration < t2.Duration {
+		return -1
+	} else {
+		return 0
+	}
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/timeutil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/timeutil.go
new file mode 100644
index 0000000000..27407bf41e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/timeutil/timeutil.go
@@ -0,0 +1,2 @@
+// Package timeutil TODO
+package timeutil
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/util.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/util.go
new file mode 100644
index 0000000000..b7dec732c1
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/util.go
@@ -0,0 +1,361 @@
+// Package util TODO
+package util
+
+import (
+	"crypto/md5"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"regexp"
+	"runtime"
+	"strings"
+	"time"
+
+	"github.com/TylerBrock/colorjson"
+	"github.com/golang/glog"
+	"github.com/juju/ratelimit"
+	"github.com/pkg/errors"
+)
+
+// RetryConfig TODO
+type RetryConfig struct {
+	Times     int           // 重试次数
+	DelayTime time.Duration // 每次重试间隔
+}
+
+// Retry 重试
+// 第 0 次也需要 delay 再运行
+func Retry(r RetryConfig, f func() error) (err error) {
+	for i := 0; i < r.Times; i++ {
+		time.Sleep(r.DelayTime)
+		if err = f(); err == nil {
+			return nil
+		}
+		logger.Warn("第%d次重试,函数错误:%s", i, err.Error(), err.Error())
+	}
+	return
+}
+
+// AtWhere TODO
+func AtWhere() string {
+	pc, _, _, ok := runtime.Caller(1)
+	if ok {
+		fileName, line := runtime.FuncForPC(pc).FileLine(pc)
+		result := strings.Index(fileName, "/bk-dbactuator/")
+		if result > 1 {
+			preStr := fileName[0:result]
+			fileName = strings.Replace(fileName, preStr, "", 1)
+		}
+		return fmt.Sprintf("%s:%d", fileName, line)
+	} else {
+		return "Method not Found!"
+	}
+}
+
+// HasElem TODO
+func HasElem(elem interface{}, slice interface{}) bool {
+	defer func() {
+		if err := recover(); err != nil {
+			logger.Error("HasElem error %s ", err)
+		}
+	}()
+	arrV := reflect.ValueOf(slice)
+	if arrV.Kind() == reflect.Slice || arrV.Kind() == reflect.Array {
+		for i := 0; i < arrV.Len(); i++ {
+			// XXX - panics if slice element points to an unexported struct field
+			// see https://golang.org/pkg/reflect/#Value.Interface
+			if reflect.DeepEqual(arrV.Index(i).Interface(), elem) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+const (
+	tcpDialTimeout = 3 * time.Second
+)
+
+// HostCheck TODO
+func HostCheck(host string) bool {
+	_, err := net.DialTimeout("tcp", host, time.Duration(tcpDialTimeout))
+	if err != nil {
+		glog.Infof(err.Error())
+		return false
+	}
+	return true
+}
+
+// GetFileMd5 TODO
+func GetFileMd5(fileAbPath string) (md5sum string, err error) {
+	rFile, err := os.Open(fileAbPath)
+	if err != nil {
+		return "", err
+	}
+	defer rFile.Close()
+	h := md5.New()
+	if _, err := io.Copy(h, rFile); err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
+
+// Struct2Map TODO
+func Struct2Map(s interface{}, tag string) (map[string]interface{}, error) {
+	out := make(map[string]interface{})
+	v := reflect.ValueOf(s)
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+	if v.Kind() != reflect.Struct {
+		return nil, fmt.Errorf("only accept struct or pointer, got %T", v)
+	}
+	t := v.Type()
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if tagValue := f.Tag.Get(tag); tagValue != "" {
+			out[tagValue] = v.Field(i).Interface()
+		}
+	}
+	return out, nil
+}
+
+// SetField TODO
+func SetField(obj interface{}, name string, value interface{}) error {
+	structValue := reflect.ValueOf(obj).Elem()
+	structFieldValue := structValue.FieldByName(name)
+
+	if !structFieldValue.IsValid() {
+		return fmt.Errorf("no such field: %s in obj", name)
+	}
+
+	if !structFieldValue.CanSet() {
+		return fmt.Errorf("cannot set %s field value", name)
+	}
+
+	structFieldType := structFieldValue.Type()
+	val := reflect.ValueOf(value)
+	if structFieldType != val.Type() {
+		return errors.New("provided value type didn't match obj field type")
+	}
+
+	structFieldValue.Set(val)
+	return nil
+}
+
+// Convert2Map TODO
+func Convert2Map(m interface{}) map[string]string {
+	ret := make(map[string]string)
+	v := reflect.ValueOf(m)
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+	var fd string
+	for i := 0; i < v.NumField(); i++ {
+		f := v.Field(i)
+		switch f.Kind() {
+		case reflect.Struct:
+			fallthrough
+		case reflect.Ptr:
+			Convert2Map(f.Interface())
+		default:
+			fd = f.String()
+		}
+		ret[v.Type().Field(i).Tag.Get("json")] = fd
+	}
+	return ret
+}
+
+// StrIsEmpty TODO
+func StrIsEmpty(str string) bool {
+	return strings.TrimSpace(str) == ""
+}
+
+// FileExists 检查目录是否已经存在
+func FileExists(path string) bool {
+	_, err := os.Stat(path)
+	if err != nil {
+		return os.IsExist(err)
+	}
+	return true
+}
+
+// IsDirectory 检查路径是否是目录
+func IsDirectory(path string) bool {
+	fileInfo, err := os.Stat(path)
+	if err != nil {
+		return false
+	}
+	return fileInfo.IsDir()
+}
+
+// FileExistsErr 如果文件不存在则抛出 error
+func FileExistsErr(path string) error {
+	_, err := os.Stat(path)
+	if err != nil {
+		err = errors.WithMessage(err, path)
+	}
+	return nil
+}
+
+// GetFileSize TODO
+func GetFileSize(path string) int64 {
+	f, err := os.Stat(path)
+	if err != nil {
+		// 有可能没权限,有可能不存在
+		if os.IsNotExist(err) {
+			return -1
+		} else if os.IsPermission(err) {
+			return -2
+		} else {
+			return -3
+		}
+	}
+	return f.Size()
+}
+
+// OutputPrettyJson 直接传一个空结构体过来
+func OutputPrettyJson(p interface{}) {
+	var inInterface map[string]interface{}
+	inrec, _ := json.Marshal(p)
+	json.Unmarshal(inrec, &inInterface)
+	// Make a custom formatter with indent set
+	f := colorjson.NewFormatter()
+	f.Indent = 4
+	pp, err := f.Marshal(inInterface)
+	if err != nil {
+		fmt.Println(err)
+		return
+	}
+	fmt.Println("Payload Example: ")
+	fmt.Println("")
+	fmt.Println(string(pp))
+	fmt.Println("")
+}
+
+// IntSlice2String 效果:[]int{1,2,3,4} -> "1,2,3,4"
+func IntSlice2String(elements []int, sep string) string {
+	elemStr := ""
+	if len(elements) > 0 {
+		for i, elem := range elements {
+			if i == (len(elements) - 1) {
+				elemStr += fmt.Sprintf("%d", elem)
+				break
+			}
+			elemStr += fmt.Sprintf("%d%s", elem, sep)
+		}
+	}
+	return elemStr
+}
+
+// ConverMapInterface2MapString TODO
+func ConverMapInterface2MapString(mi map[string]interface{}) (ms map[string]string, err error) {
+	ms = make(map[string]string)
+	for key, v := range mi {
+		dv, ok := v.(string)
+		if !ok {
+			return nil, fmt.Errorf("key:%s 断言string 失败", key)
+		}
+		ms[key] = dv
+	}
+	return
+}
+
+// RegexReplaceSubString TODO
+func RegexReplaceSubString(str, old, new string) string {
+	re := regexp.MustCompile(fmt.Sprintf(`(%s)`, old))
+	return re.ReplaceAllString(str, new)
+}
+
+// IOLimitRate TODO
+// io.Copy 限速
+func IOLimitRate(dst io.Writer, src io.Reader, bwlimitMB int64) (written int64, err error) {
+	bwlimit := bwlimitMB * 1024 * 1024
+	srcBucket := ratelimit.NewBucketWithRate(float64(bwlimit), bwlimit)
+	return io.Copy(dst, ratelimit.Reader(src, srcBucket))
+}
+
+// GetSuffixWithLenAndSep 获取后缀
+// 先截取后面 maxlen 长度字符串,再根据 separator 分隔取后缀
+func GetSuffixWithLenAndSep(strList []string, separator string, maxlen int) []string {
+	if maxlen > 0 {
+		for i, s := range strList {
+			l := len(s)
+			if l-maxlen > 0 {
+				strList[i] = s[l-maxlen:]
+			}
+		}
+	}
+	seqList := make([]string, len(strList))
+	for i, s := range strList {
+		seqList[i] = LastElement(strings.Split(s, separator))
+	}
+	return seqList
+}
+
+// LastElement TODO
+func LastElement(arr []string) string {
+	return arr[len(arr)-1]
+}
+
+// ReverseRead  ·		逆序读取文件,类型tail -n 10
+//
+//	@receiver name
+//	@receiver lineNum 		读取最后多少上内容
+//	@return []string	返回逆序读取的文件内容
+//	@return error
+func ReverseRead(name string, lineNum uint) ([]string, error) {
+	// 打开文件
+	file, err := os.Open(name)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	// 获取文件大小
+	fs, err := file.Stat()
+	if err != nil {
+		return nil, err
+	}
+	fileSize := fs.Size()
+
+	var offset int64 = -1   // 偏移量,初始化为-1,若为0则会读到EOF
+	char := make([]byte, 1) // 用于读取单个字节
+	lineStr := ""           // 存放一行的数据
+	buff := make([]string, 0, 100)
+	for (-offset) <= fileSize {
+		// 通过Seek函数从末尾移动游标然后每次读取一个字节
+		file.Seek(offset, io.SeekEnd)
+		_, err := file.Read(char)
+		if err != nil {
+			return buff, err
+		}
+		if char[0] == '\n' {
+			offset--  // windows跳过'\r'
+			lineNum-- // 到此读取完一行
+			buff = append(buff, lineStr)
+			lineStr = ""
+			if lineNum == 0 {
+				return buff, nil
+			}
+		} else {
+			lineStr = string(char) + lineStr
+		}
+		offset--
+	}
+	buff = append(buff, lineStr)
+	return buff, nil
+}
+
+// SliceErrorsToError TODO
+func SliceErrorsToError(errs []error) error {
+	var errStrs []string
+	for _, e := range errs {
+		errStrs = append(errStrs, e.Error())
+	}
+	errString := strings.Join(errStrs, "\n")
+	return errors.New(errString)
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/validate/validate.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/validate/validate.go
new file mode 100644
index 0000000000..ec86058a06
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/validate/validate.go
@@ -0,0 +1,152 @@
+// Package validate TODO
+package validate
+
+import (
+	"dbm-services/bigdata/db-tools/dbactuator/pkg/util"
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+
+	"github.com/go-playground/locales/en"
+	ut "github.com/go-playground/universal-translator"
+	"github.com/go-playground/validator/v10"
+	en_translations "github.com/go-playground/validator/v10/translations/en"
+	"github.com/pkg/errors"
+)
+
+// ValidateEnums TODO
+// make validate tag work with enums tag
+// 避免 validate oneof 和 swagger enums 写 2 份重复的校验和文档
+// example: Method string `validate:"required,enums" enums:"post,get" json:"method"`
+func ValidateEnums(f validator.FieldLevel) bool {
+	fieldValue := f.Field().String()
+	fieldName := f.StructFieldName()
+	// get StructField
+	sf, _ := f.Parent().Type().FieldByName(fieldName)
+	// get tag value from tag_field enums
+	tagValue := sf.Tag.Get(TagEnum)
+	enumsValues := strings.Split(tagValue, ",")
+	if util.StringsHas(enumsValues, fieldValue) {
+		return true
+	} else {
+		return false
+	}
+}
+
+// GoValidateStructSimple TODO
+// 简单校验 struct,不涉及逻辑
+// 如果 struct 上有 tag validate:"enums",必须启用enum=true校验
+func GoValidateStructSimple(v interface{}, enum bool) error {
+	validate := validator.New()
+	if enum {
+		_ = validate.RegisterValidation("enums", ValidateEnums)
+	}
+	if err := validate.Struct(v); err != nil {
+		return err
+	}
+	return nil
+}
+
+// TagEnum TODO
+const TagEnum = "enums"
+
+// GoValidateStruct v 不能是Ptr
+func GoValidateStruct(v interface{}, enum bool, charset bool) error {
+	validate := validator.New()
+	uni := ut.New(en.New())
+	trans, _ := uni.GetTranslator("en")
+	// 提示时显示 json 字段的名字
+	validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
+		// name := fld.Tag.Get("json")
+		name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
+		if name == "-" {
+			return ""
+		}
+		return name
+	})
+	if err := en_translations.RegisterDefaultTranslations(validate, trans); err != nil {
+		return err
+	}
+
+	if enum {
+		_ = validate.RegisterValidation(TagEnum, ValidateEnums)
+	}
+	if charset {
+		_ = validate.RegisterValidation("checkCharset", validCharSet)
+	}
+	if err := validate.Struct(v); err != nil {
+		return translateErr2Msg(v, trans, err)
+	}
+	return nil
+}
+
+// translateErr2Msg v 不能是Ptr
+func translateErr2Msg(v interface{}, trans ut.Translator, err error) error {
+	var errStr []string
+	_, ok := err.(*validator.InvalidValidationError)
+	if ok {
+		return fmt.Errorf("param error:%s", err.Error())
+	}
+	for _, vErr := range err.(validator.ValidationErrors) {
+		if vErr.Tag() == TagEnum {
+			errmsg := ""
+			// errmsg := customEnumTransFunc(vErr, v)
+			if vErr.Param() == "" {
+				sf, _ := reflect.TypeOf(v).FieldByName(vErr.StructField())
+				tagValue := sf.Tag.Get(TagEnum)
+				errmsg = fmt.Sprintf("%s must be one of [%s]", vErr.Field(), tagValue)
+			} else {
+				errmsg = vErr.Param()
+			}
+			errStr = append(errStr, errmsg)
+			continue
+		}
+		errStr = append(errStr, vErr.Translate(trans))
+	}
+	return errors.New(strings.Join(errStr, " || "))
+}
+func customEnumTransFunc(fe validator.FieldError, v interface{}) string {
+	if fe.Param() == "" {
+		sf, _ := reflect.TypeOf(v).FieldByName(fe.StructField())
+		tagValue := sf.Tag.Get(TagEnum)
+		errmsg := fmt.Sprintf("%s must be one of [%s]", fe.Field(), tagValue)
+		return errmsg
+	} else {
+		return fe.Param()
+	}
+}
+
+// registerTranslator 为自定义字段添加翻译功能
+func registerTranslator(tag string, msg string) validator.RegisterTranslationsFunc {
+	return func(trans ut.Translator) error {
+		if err := trans.Add(tag, msg, false); err != nil {
+			return err
+		}
+		return nil
+	}
+}
+
+// customTransFunc TODO
+// translate 自定义字段的翻译方法
+func customTransFunc(trans ut.Translator, fe validator.FieldError) string {
+	msg, err := trans.T(fe.Tag(), fe.Field())
+	if err != nil {
+		panic(fe.(error).Error())
+	}
+	return msg
+}
+
+func translate(ut ut.Translator, fe validator.FieldError) string {
+	s, err := ut.T(fe.Tag(), fe.Field(), "fe.Param()")
+	if err != nil {
+		log.Printf("warning: error translating FieldError: %#v", fe)
+		return fe.(error).Error()
+	}
+	return s
+}
+
+func validCharSet(f validator.FieldLevel) bool {
+	v := f.Field().String()
+	return util.HasElem(v, []string{"default", "utf8mb4", "utf8", "latin1", "gb2312", "gbk", "binary", "gb18030"})
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xml.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xml.go
new file mode 100644
index 0000000000..d83fee7966
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xml.go
@@ -0,0 +1,60 @@
+package xmlutil
+
+import "encoding/xml"
+
+// GenericMap TODO
+type GenericMap map[string]interface{}
+
+// MarshalXML TODO
+func (g GenericMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
+	start.Name.Local = "performance_status"
+	tokens := []xml.Token{start}
+	tokens = ScanXMLNode(g, tokens)
+	tokens = append(tokens, xml.EndElement{Name: start.Name})
+
+	for _, t := range tokens {
+		err := e.EncodeToken(t)
+		if err != nil {
+			return err
+		}
+	}
+	// flush to ensure tokens are written
+	err := e.Flush()
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// ScanXMLNode TODO
+func ScanXMLNode(g map[string]interface{}, tokens []xml.Token) []xml.Token {
+	for key, value := range g {
+		t := xml.StartElement{Name: xml.Name{Space: "", Local: key}}
+		if mapInterface, ok := value.(map[string]interface{}); ok {
+			haveAttr := false
+			for k, v := range mapInterface { // k:check,expire_days v:
+				if str, innerOk := v.(string); innerOk {
+					t.Attr = append(t.Attr, xml.Attr{Name: xml.Name{Space: "", Local: k}, Value: str})
+					haveAttr = true
+				}
+				// 暂时不考虑既有 child 是 map[string]string, 又是 map[string]map[string]interface{} 这种。
+			}
+			if haveAttr {
+				tokens = append(tokens, t)
+			} else {
+				tokens = append(tokens, t)
+				tokens = ScanXMLNode(mapInterface, tokens)
+			}
+		} else if mapString, ok := value.(map[string]string); ok {
+			for k, v := range mapString {
+				t.Attr = append(t.Attr, xml.Attr{Name: xml.Name{Space: "", Local: k}, Value: v})
+			}
+			tokens = append(tokens, t)
+		} else {
+			return nil
+		}
+		// fmt.Println("key end:", key)
+		tokens = append(tokens, xml.EndElement{Name: xml.Name{Space: "", Local: key}})
+	}
+	return tokens
+}
diff --git a/dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go
new file mode 100644
index 0000000000..b71255c936
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go
@@ -0,0 +1,2 @@
+// Package xmlutil TODO
+package xmlutil
diff --git a/dbm-services/bigdata/db-tools/dbactuator/scripts/upload.sh b/dbm-services/bigdata/db-tools/dbactuator/scripts/upload.sh
new file mode 100644
index 0000000000..f1ecd74f7e
--- /dev/null
+++ b/dbm-services/bigdata/db-tools/dbactuator/scripts/upload.sh
@@ -0,0 +1,182 @@
+#!/usr/bin/env bash
+
+# 安全模式
+set -euo pipefail 
+
+# 重置PATH
+PATH=/usr/local/sbin:/usr/sbin:/usr/bin:/sbin:/bin
+export PATH
+
+# 通用脚本框架变量
+PROGRAM=$(basename "$0")
+EXITCODE=0
+
+BKREPO_USER=
+BKREPO_PASSWORD=
+BKREPO_API=http://127.0.0.1:8080
+BKREPO_PROJECT=generic # 项目代号
+BKREPO_NAME=bk-dbm     # 仓库名字,默认自定义仓库
+DOWNLOAD_DIR=/tmp       # 下载文件的默认路径:/tmp
+BKREPO_METHOD=GET       # 默认为下载
+BKREPO_PUT_OVERWRITE=true   # 上传时是否覆盖仓库
+REMOTE_PATH=
+declare -a REMOTE_FILE=()   # 下载的文件列表
+declare -a UPLOAD_FILE=()   # 上传的文件列表
+
+trap 'rm -f /tmp/bkrepo_tool.*.log' EXIT
+usage () {
+    cat < -p  [ -d  ] -r /devops/path1 -r /devops/path2 ...
+    $PROGRAM -u  -p  -X PUT -T local_file_path1 -T local_file_path2 -R remote_path
+            [ -u, --user        [必填] "指定访问bkrepo的api用户名" ]
+            [ -p, --password    [必填] "指定访问bkrepo的api密码" ] 
+            [ -i, --url         [必填] "指定访问bkrepo的url,默认是$BKREPO_API" ] 
+            [ -r, --remote-file [必填] "指定下载的远程文件路径路径" ]
+            [ -n, --repo        [选填] "指定项目的仓库名字,默认为$BKREPO_NAME" ] 
+            [ -P, --project     [选填] "指定项目名字,默认为blueking" ] 
+            [ -d, --dir         [选填] "指定下载制品库文件的存放文件夹,若不指定,则为/tmp" ]
+            [ -X, --method      [选填] "默认为下载(GET),可选PUT,为上传" ]
+
+            -X PUT时,以下参数生效:
+                [ -T, --upload-file [必填] "指定需要上传的本机文件路径" ]
+                [ -R, --remote-path [必填] "指定上传到的仓库目录的路径" ]
+                [ -O, --override [选填] "指定上传同名文件是否覆盖" ]
+            [ -h --help -?      查看帮助 ]
+EOF
+}
+
+usage_and_exit () {
+    usage
+    exit "$1"
+}
+
+log () {
+    echo "$@"
+}
+
+error () {
+    echo "$@" 1>&2
+    usage_and_exit 1
+}
+
+warning () {
+    echo "$@" 1>&2
+    EXITCODE=$((EXITCODE + 1))
+}
+
+# 解析命令行参数,长短混合模式
+(( $# == 0 )) && usage_and_exit 1
+while (( $# > 0 )); do 
+    case "$1" in
+        -u | --user )
+            shift
+            BKREPO_USER=$1
+            ;;
+        -p | --password)
+            shift
+            BKREPO_PASSWORD=$1
+            ;;
+        -i | --url)
+            shift
+            BKREPO_API=$1
+            ;;
+        -d | --dir )
+            shift
+            DOWNLOAD_DIR=$1
+            ;;
+        -n | --name )
+            shift
+            BKREPO_NAME=$1
+            ;;
+        -P | --project )
+            shift
+            BKREPO_PROJECT=$1
+            ;;
+        -r | --remote-file )
+            shift
+            REMOTE_FILE+=("$1")
+            ;;
+        -T | --upload-file )
+            shift
+            UPLOAD_FILE+=("$1")
+            ;;
+        -O | --override)
+            BKREPO_PUT_OVERWRITE=true
+            ;;
+        -R | --remote-path )
+            shift
+            REMOTE_PATH=$1
+            ;;
+        -X | --method )
+            shift
+            BKREPO_METHOD=$1
+            ;;
+        --help | -h | '-?' )
+            usage_and_exit 0
+            ;;
+        -*)
+            error "不可识别的参数: $1"
+            ;;
+        *) 
+            break
+            ;;
+    esac
+    shift 
+done 
+
+if [[ -z "$BKREPO_USER" || -z "$BKREPO_PASSWORD" ]]; then
+    warning "-u, -p must not be empty"
+fi
+
+if (( EXITCODE > 0 )); then
+    usage_and_exit "$EXITCODE"
+fi
+
+case $BKREPO_METHOD in
+    GET ) 
+        if ! [[ -d "$DOWNLOAD_DIR" ]]; then
+            mkdir -p "$DOWNLOAD_DIR"
+        fi
+
+        cd "$DOWNLOAD_DIR" || { echo "can't change into $DOWNLOAD_DIR"; exit 1; }
+
+        for remote_file in "${REMOTE_FILE[@]}"; do
+            echo "start downloading $remote_file ..."
+            curl -X "$BKREPO_METHOD" -sLO -u "$BKREPO_USER:$BKREPO_PASSWORD" "${BKREPO_API}/${BKREPO_PROJECT}/$BKREPO_NAME/$remote_file"
+            rt=$?
+            if [[ $rt -eq 0 ]]; then
+                echo "download $remote_file finished in $DOWNLOAD_DIR/${remote_file##*/}"
+            else
+                echo "download $remote_file with error code: <$rt>"
+            fi
+        done
+        ;;
+    PUT )
+        for local_file in "${UPLOAD_FILE[@]}"; do
+            if [[ -r "$local_file" ]]; then
+                local_file_md5=$(md5sum "$local_file" | awk '{print $1}')
+                local_file_name=$(basename "$local_file")
+                http_code=$(curl -s -o /tmp/bkrepo_tool.$$.log -w "%{http_code}" \
+                    -u "$BKREPO_USER:$BKREPO_PASSWORD" "${BKREPO_API}/${BKREPO_PROJECT}/${BKREPO_NAME}/$REMOTE_PATH/$local_file_name" \
+                    -T "$local_file" \
+                    -H "X-BKREPO-OVERWRITE: $BKREPO_PUT_OVERWRITE" \
+                    -H "X-BKREPO-MD5: $local_file_md5"
+                )
+                if [[ $http_code -eq 200 ]]; then
+                    echo "upload $local_file to $REMOTE_PATH succeed"
+                else
+                    echo "upload $local_file to $REMOTE_PATH failed"
+                    echo "http response is: $( /etc/timezone
+
+ADD conf /conf/
+ADD bkconfigsvr /
+
+WORKDIR /
+CMD /bkconfigsvr
\ No newline at end of file
diff --git a/dbm-services/common/db-config/Makefile b/dbm-services/common/db-config/Makefile
new file mode 100644
index 0000000000..dc932ad1b7
--- /dev/null
+++ b/dbm-services/common/db-config/Makefile
@@ -0,0 +1,52 @@
+SHELL := /bin/bash
+BASEDIR = $(shell pwd)
+
+SRV_NAME = dbconfig
+CLI_NAME = bkconfigcli
+SRV_NAME_CMD = bkconfigsvr
+CLI_NAME_CMD = bkconfigcli
+VER = 0.0.1
+ALPHA_VERSION = $(VER)-alpha.3
+CURRENT_VERSION = release-$(VER)
+TEST_VERSION = test-$(VER)
+NAMESPACE = blueking
+DH_URL = mirrors.tencent.com
+export GOOS ?= linux
+CMD_HOME = bk-dbconfig/cmd/
+
+all: build
+api:
+	go build -o ./${SRV_NAME_CMD} -v ${CMD_HOME}/${SRV_NAME_CMD}
+
+build:clean
+	CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -o ./$(SRV_NAME_CMD) -v ${CMD_HOME}/${SRV_NAME_CMD}
+
+publish:build
+	docker build --build-arg SRV_NAME=$(SRV_NAME) --rm -t $(SRV_NAME):$(CURRENT_VERSION) .
+	docker tag $(SRV_NAME):$(CURRENT_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+
+test:build
+	docker build --build-arg SRV_NAME=$(SRV_NAME) --rm -t $(SRV_NAME):$(TEST_VERSION) .
+	docker tag $(SRV_NAME):$(TEST_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+
+alpha:build
+	docker build --build-arg SRV_NAME=$(SRV_NAME) --rm -t $(SRV_NAME):$(ALPHA_VERSION) .
+	docker tag $(SRV_NAME):$(ALPHA_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(ALPHA_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(ALPHA_VERSION)
+
+
+gotool:
+	@-gofmt -w .
+	@-go tool vet . 2>&1 | grep -v vendor;true
+clean:
+	rm -f ${SRV_NAME_CMD} ${CLI_NAME_CMD}
+curl:
+	@echo curl -XGET -H "Content-Type: application/json" http://127.0.0.1:8080/user
+help:
+	@echo "make - compile go source"
+	@echo "make gotool - run gofmt & go too vet"
+	@echo "make clean - do some clean job"
+
+.PHONY: all gotool clean help api curl
\ No newline at end of file
diff --git a/dbm-services/common/db-config/README.md b/dbm-services/common/db-config/README.md
new file mode 100644
index 0000000000..2fefa051ad
--- /dev/null
+++ b/dbm-services/common/db-config/README.md
@@ -0,0 +1,204 @@
+# DB个性化配置管理
+
+develop build:
+
+```
+./build.sh
+```
+
+api docs:
+链接: http://localhost:8080/swagger/
+
+# 1. 名词定义
+
+## 配置级别 level_name
+
+表现配置上下级继承关系字段,的当前配置允许的 level_name 有:
+
+- `plat` 平台级
+- `app` 业务级
+- `module` 模块级
+- `cluster` 集群级 请求配置(查询/更新)需要提供 level_name level_value,代表用户想操作的配置级别节点。
+
+## 配置类型 conf_type
+
+比如数据库配置、备份配置,代表某namespace 下的一类配置,一般与一个应用程序或者服务挂钩。
+
+## 配置文件 conf_file
+
+某一个 conf_type 下的独立的子类,通过 conf_file 区分。可以是一个逻辑概念,不一定要有一个这样的实体配置文件名,可以一次 conf_type 请求可以拿到多个 conf_file 的配置项。 大多数情况一个
+conf_type 可能只有一个 conf_file,像数据库不同的db大版本如果配置不兼容,也可以通过 conf_file 来区分db大版本。
+
+- 比如 `conf_type = 'dbconf'`, `conf_file` 可以是`MySQL-5.6`、`MySQL-5.7`,代表不同的版本配置
+- 比如 `conf_type = 'init_user'`, `conf_file` 可以是`mysql#user`、`proxy#user`,代表不同的子配置类型
+
+配置文件有几个属性:
+
+- 是否版本化 versioned
+- 是否校验 conf_name
+- 是否校验 conf_value
+
+不支持跨多个 `conf_type` 查询配置,但可以一次查询相同 conf_type 下的多个 conf_file。
+
+### 版本化 versioned
+
+一个配置文件可以设置是否支持版本化,启用版本化后,修改 conf_file 的配置项 需要发布才会生效,发布后会生成一个新 version,可以看到历史 version。对应非版本化配置文件 称作 un-versioned
+conf_file。
+
+- 修改 版本化的 conf_file 配置项,使用接口 `confitem/upsert`
+- 修改 非版本化的 conf_file 配置项,使用接口 `confitem/save`
+- 生成一个新版本配置文件,使用接口 `version/generate`,它有发布版本和获取最新版本配置项的作用。
+- 查询配置项接口 不区分是否版本化 `confitem/query`
+
+### 校验配置名
+
+启用校验配置名 conf_name_validate=1,在写入配置时会校验该conf_file 这个 conf_name 是否已定义。不在预定义列表的配置名不允许写入
+
+### 校验配置值
+
+启用校验配置值 conf_value_validate=1,在写入配置时会与对应 conf_name 的 value_type,value_type_sub,value_allowed 进行检查。
+
+- 数据类型 value_type,当前允许值 `STRING`, `INT`, `FLOAT`, `NUMBER`
+- 为了更好的检验值,value_type_sub 指定具体的类型子类,当前允许值
+    - `RANGE`: 指定范围,格式如 `(0,100]`
+    - `ENUM`: 枚举值,格式如  `ON|OFF`、`0|1|2|`。当允许为空时,表示值为空
+    - `ENUMS`: 枚举值,值可以是,分隔的多个枚举
+    - `JSON`: 一种特殊的STRING,会验证 value 是否是一个合法的json字符串
+    - `REGEX`: 一种特殊的STRING,会验证 value 是否满足 value_allowed 正则
+    - `BYTES`: 一种特殊的STRING,比如 64m, 128k格式,会转换成bytes与 value_allowed 的范围进行比较
+    - `BOOL`: 一种特殊的 ENUM,参数值允许为空, value_allowed 类似格式 `ON | OFF |`。 当允许为空时,表示该配置生效标志(即`--skip-name-resolve`,
+      不需要`--skip-name-resolve=ON`)
+
+
+value_default 设置配置项的默认值,当它带占位符时,有两种设置:
+- 可以把 tb_config_name_def 里面 flag_status=2 代表只读,前端对应的对只读字段不允许修改(需要前端配合实现)
+ 即不允许修改的占位符,只能自动计算出 conf_value
+- 允许值默认值里加上 on | yes | {{cluster-enabled}},就是可以由程序生成,但也可以强制设置成 1 个固定值
+ 级允许修改的占位符,可以自由选择是设置一个值,或者自动计算
+
+## 配置项 conf_item
+
+配置文件里面的多个配置项,配置项有几个关键属性:
+
+- 配置名 conf_name
+- 配置值 conf_value
+- 配置级别level_name level_value
+- 是否锁定,该配置项是否在当前级别锁定
+- 是否需要重启,该配置项修改值后是否需要重启
+
+## 配置项返回格式
+
+请求配置项(都是合并了上级配置),主要有 list 和 map 两种返回格式。
+
+- `list` 会返回更多的配置项信息,包括 flag_locked,description 登
+- `map` 以 key:value 星期返回 conf_name:conf_value
+- `map.`, `map#`, `map|` 是特殊的map格式,返回结果会以 `.` 或者 `#` 或者 `|` 拆分 conf_name
+
+## 配置项继承
+
+目前有 `plat`, `app`, `module`, `cluster` 几个层级的继承关系。
+我们根据配置的层级分为:
+- plat_config  
+ 全局公共配置,包括配置项的定义(值数据类型、默认值、是否需要重启等)
+- level_config  
+ level_config 是具有继承关系的配置中间节点,不会体现在物理的集群或者实例上,是增量配置。
+ 如果查看某个中间节点的全量配置,叫 merged_config,合并了上层级配置
+- versioned_config  
+ versioned_config 即已经为目标物理集群或者实例,生成的一份配置,通过版本化来管理,是一种特殊的 merged_config。
+ 非版本化的配置,不会生成 versioned_config
+- runtime_config  
+ 实际跑在目标集群或者机器上的配置。
+
+## 配置项锁定
+
+conf_name的字段 flag_locked代表在该 level_name 配置的锁定状态。锁定之后,它的下层级配置不允许修改,只能继承它的配置。如果下层级已经存在配置,当前层级进行锁定时,会提示删除下层级配置。
+锁定配置项的conf_value修改,会提示更新它的下级配置文件。
+
+## 配置发布 publish
+
+- 查询配置 查询配置 隐含了从上层级合并配置再返回
+- 编辑配置 编辑配置,一般指编辑配置项,编辑后可以选择 仅保存 或者 保存并发布(目前只用 保存并发布)
+- 查询配置文件 查询配置文件,会连同配置文件的描述信息、配置项一起返回
+- 查询配置版本 查询版本列表或者历史某个版本详情,详情里包括修改行数、差异对比
+
+## 配置应用 apply
+- level_config 应用是指将当前层级配置修改,同步给它的直接下级,也可以叫 配置同步  
+ 同步时时根据是否当前配置项是否锁定,分为强制应用和普通应用。如果修改的是非锁定状态配置,目前不需要同步给下级。
+ level_config 的`应用到下级`,会给它下层级发布版本(但不应用)
+ 锁定配置应用,会强制应用给所有直接下级。
+- versioned_config 应用是指将已发布配置,`应用到目标实例`上  
+
+## 配置值加密
+在 tb_config_name_def 的 `flag_encrypt` 字段 控制是否对 value 进行加密
+在 `conf/config.yaml` 里面 `encrypt.keyPrefix` 用于设置加密 key 的前缀。注意这个值在一个新环境下用于保持不变,否则无法解密已加密字段。
+
+# 2. 字段定义
+
+**配置文件相关**
+
+- `bk_biz_id` bkcc 业务ID
+- `namespace` 命名空间
+- `namespace_info` 命名空间信息
+- `conf_type` 配置类型
+- `conf_file` 配置文件
+
+**配置项相关**
+
+- `conf_item` 配置条目,也叫配置项
+- `conf_name` 配置项名字
+- `conf_value` 配置值
+- `level_name` 配置层级名
+- `level_value` 配置层级的值
+
+**配置定义相关**
+
+- `value_default` 配置项默认值
+- `value_allowed` 配置项允许值
+- `flag_locked` 配置项是否锁定
+- `flag_encrypt` 配置项的值是否透明加密保存
+
+# 3. 怎么定义可存取数据
+
+1. 只有注册过的 namespace,conf_type,才能往里面写数据,且需设定 conf_name_validate, conf_value_validate, level_versioned
+2. 只有平台配置里面 flag_status >= 1 的配置项,才会出现在公共配置里。 
+ - flag_status = -1 的配置表示预定义的可引用的 预定义配置名列表,不会出现在渲染后的公共配置中
+ - flag_status = 1 可读、可修改默认值和允许值的公共配置,会出现在渲染结果中
+ - flag_status = 2 只读配置,无论在哪都不能修改,但会出现在渲染结果中
+3. 编辑任意平台配置,且 flag_status = -1 时,也会自动渲染在公共配置里
+
+## 缓存
+
+freecache 里面的内容:
+
+1. conf_file_def
+2. conf_level_def
+
+目前内置了几个 level_name, 如果不够用也需要手动插入数据库 tb_config_level_def
+
+# migrate
+migrate db名需要提前创建,否则服务无法启动。
+可以有 2 种 migrate 方法
+## go migrate
+```
+migrate -source file://assets/migrations \
+ -database mysql://user:pass@tcp(localhost:3306)/bk_dbconfig?charset=utf8 up
+```
+migrate 二进制命令可从 https://github.com/golang-migrate/migrate 下载
+
+## ./bkconfigsvr --migrate
+```
+Usage of ./bkconfigsvr:
+      --migrate                 run migrate to databases and exit. set migrate.enable to config.yaml will run migrate and continue 
+      --migrate.force int       force the version to be clean if it's dirty
+      --migrate.source string   migrate source path
+```
+- `--migrate` 参数运行后 bkconfigsvr 会退出。
+ 如果想每次启动 bkconfigsvr 自动 migrate,可以在 conf/config.yaml 中设置`migrate.enable`
+- `--migrate` 也会自动读取 config.yaml 中的 force 和 source 配置,当然也可以在命令行读取
+ source 遵循 https://github.com/golang-migrate/migrate#migration-sources 里面的地址
+
+## db 表说明
+tb_config_file_def: 配置类型和配置文件定义
+tb_config_name_def: 平台配置项定义
+tb_config_node: 业务、模块、集群等配置项
+tb_config_versioned: 已发布配置文件版本
\ No newline at end of file
diff --git a/dbm-services/common/db-config/admin.sh b/dbm-services/common/db-config/admin.sh
new file mode 100755
index 0000000000..f019287730
--- /dev/null
+++ b/dbm-services/common/db-config/admin.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+export PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/root/bin:"
+
+SERVER="bkconfigsvr"
+BASE_DIR=$PWD
+INTERVAL=2
+
+# 命令行参数,需要手动指定
+ARGS=""
+
+function start()
+{
+	if [ "`pgrep $SERVER -u $UID`" != "" ];then
+		echo "$SERVER already running"
+		exit 1
+	fi
+
+	nohup $BASE_DIR/$SERVER $ARGS &>/dev/null &
+
+	echo "sleeping..." &&  sleep $INTERVAL
+
+	# check status
+	if [ "`pgrep $SERVER -u $UID`" == "" ];then
+		echo "$SERVER start failed"
+		exit 1
+	fi
+}
+
+function status() 
+{
+	if [ "`pgrep $SERVER -u $UID`" != "" ];then
+		echo $SERVER is running
+	else
+		echo $SERVER is not running
+	fi
+}
+
+function stop() 
+{
+	if [ "`pgrep $SERVER -u $UID`" != "" ];then
+		kill -9 `pgrep $SERVER -u $UID`
+	fi
+
+	echo "sleeping..." &&  sleep $INTERVAL
+
+	if [ "`pgrep $SERVER -u $UID`" != "" ];then
+		echo "$SERVER stop failed"
+		exit 1
+	fi
+}
+
+case "$1" in
+	'start')
+	start
+	;;  
+	'stop')
+	stop
+	;;  
+	'status')
+	status
+	;;  
+	'restart')
+	stop && start
+	;;  
+	*)  
+	echo "usage: $0 {start|stop|restart|status}"
+	exit 1
+	;;  
+esac
diff --git a/dbm-services/common/db-config/assets/assets.go b/dbm-services/common/db-config/assets/assets.go
new file mode 100644
index 0000000000..66b8f75fb4
--- /dev/null
+++ b/dbm-services/common/db-config/assets/assets.go
@@ -0,0 +1,2 @@
+// Package assets TODO
+package assets
diff --git a/dbm-services/common/db-config/assets/migrate.go b/dbm-services/common/db-config/assets/migrate.go
new file mode 100644
index 0000000000..8d2b964cff
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrate.go
@@ -0,0 +1,10 @@
+package assets
+
+import (
+	"embed"
+)
+
+// Migrations TODO
+//
+//go:embed migrations/*.sql
+var Migrations embed.FS
diff --git a/dbm-services/common/db-config/assets/migrate.md b/dbm-services/common/db-config/assets/migrate.md
new file mode 100644
index 0000000000..72848ed50e
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrate.md
@@ -0,0 +1,38 @@
+
+## dump schema:
+```
+/usr/local/mysql/bin/mysqldump --default-character-set=utf8 \
+ --skip-opt --quick --create-options --no-create-db --no-data \
+ --socket /data1/mysqldata/20000/mysql.sock --port=20000 \
+ bk_dbconfig > 000002_create_table.up.sql
+
+sed -i '/DEFINER=/d' 000002_create_table.up.sql
+sed -i 's/CREATE TABLE /CREATE TABLE IF NOT EXISTS /g' 000002_create_table.up.sql
+```
+
+## dump data:
+按不同的 namespace 生成初始化 migrate 数据,比如 tendbha:
+```
+dbuser=xx
+dbpass=xxx
+seqno=10
+namespaces="common es hdfs kafka PredixyTendisplusCluster rediscomm RedisInstance RedisMS tendb tendbcluster tendbha tendbsingle TendisCache TendisplusInstance TendisSSD TendisX TwemproxyRedisInstance TwemproxyTendisplusInstance TwemproxyTendisSSDInstance pulsar influxdb"
+dbname=dbconfig_release
+exclude_sensitive="(flag_encrypt!=1 or value_default like '{{%')"
+
+for namespace in $namespaces
+do
+  mig_id="0000${seqno}"
+  echo "${mig_id}_${namespace}_data"
+  dumpcmd="/usr/local/mysql/bin/mysqldump --default-character-set=utf8  --skip-opt --quick --no-create-db --no-create-info --complete-insert  --socket /data1/mysqldata/20000/mysql.sock --port=20000 -u$dbuser -p$dbpass"
+  $dumpcmd $dbname tb_config_file_def --where="namespace='${namespace}'" > ${mig_id}_${namespace}_data.up.sql
+  $dumpcmd $dbname tb_config_name_def --where="namespace='${namespace}' AND ${exclude_sensitive}" >> ${mig_id}_${namespace}_data.up.sql
+ 
+  echo "DELETE FROM tb_config_file_def WHERE namespace='${namespace}';
+DELETE FROM tb_config_name_def WHERE namespace='${namespace}' AND ${exclude_sensitive};" > ${mig_id}_${namespace}_data.down.sql
+ 
+ let seqno+=1
+done
+```
+
+migrates 文件名前缀一次保持递增
\ No newline at end of file
diff --git a/dbm-services/common/db-config/assets/migrations/000001_init.down.sql b/dbm-services/common/db-config/assets/migrations/000001_init.down.sql
new file mode 100644
index 0000000000..a4f40086f9
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000001_init.down.sql
@@ -0,0 +1 @@
+SET NAMES utf8;
diff --git a/dbm-services/common/db-config/assets/migrations/000001_init.up.sql b/dbm-services/common/db-config/assets/migrations/000001_init.up.sql
new file mode 100644
index 0000000000..dd346774f2
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000001_init.up.sql
@@ -0,0 +1,3 @@
+SET NAMES utf8;
+-- CREATE DATABASE IF NOT EXISTS `bk_dbconfig` DEFAULT CHARACTER SET utf8;
+-- USE `bk_dbconfig`;
\ No newline at end of file
diff --git a/dbm-services/common/db-config/assets/migrations/000002_create_table.down.sql b/dbm-services/common/db-config/assets/migrations/000002_create_table.down.sql
new file mode 100644
index 0000000000..c4df79dd67
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000002_create_table.down.sql
@@ -0,0 +1,13 @@
+DROP TABLE IF EXISTS `schema_migrations`;
+DROP TABLE IF EXISTS `tb_config_file_def`;
+DROP TABLE IF EXISTS `tb_config_name_def`;
+DROP TABLE IF EXISTS `tb_config_file_node`;
+DROP TABLE IF EXISTS `tb_config_level_def`;
+DROP TABLE IF EXISTS `tb_config_level_node`;
+DROP TABLE IF EXISTS `tb_config_node`;
+DROP TABLE IF EXISTS `tb_config_node_task`;
+DROP TABLE IF EXISTS `tb_config_versioned`;
+DROP TABLE IF EXISTS `v_tb_config_node`;
+DROP TABLE IF EXISTS `v_tb_config_node_plat`;
+/*!50001 DROP VIEW IF EXISTS `v_tb_config_node`*/;
+/*!50001 DROP VIEW IF EXISTS `v_tb_config_node_plat`*/;
\ No newline at end of file
diff --git a/dbm-services/common/db-config/assets/migrations/000002_create_table.up.sql b/dbm-services/common/db-config/assets/migrations/000002_create_table.up.sql
new file mode 100644
index 0000000000..195293f8a5
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000002_create_table.up.sql
@@ -0,0 +1,285 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Table structure for table `schema_migrations`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `schema_migrations` (
+  `version` bigint(20) NOT NULL,
+  `dirty` tinyint(1) NOT NULL,
+  PRIMARY KEY (`version`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tb_config_file_def`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_config_file_def` (
+  `id` int(10) unsigned NOT NULL AUTO_INCREMENT,
+  `namespace` varchar(100) DEFAULT NULL,
+  `conf_type` varchar(100) NOT NULL,
+  `conf_file` varchar(100) DEFAULT NULL,
+  `conf_type_lc` varchar(100) DEFAULT NULL,
+  `conf_file_lc` varchar(100) DEFAULT NULL,
+  `level_names` varchar(100) DEFAULT NULL,
+  `level_versioned` varchar(100) DEFAULT NULL,
+  `conf_name_validate` tinyint(4) NOT NULL DEFAULT '1',
+  `conf_value_validate` tinyint(4) NOT NULL DEFAULT '1',
+  `value_type_strict` tinyint(4) DEFAULT '0' COMMENT 'convert value to value_type for resp',
+  `namespace_info` varchar(100) DEFAULT NULL,
+  `version_keep_limit` int(11) DEFAULT '5',
+  `version_keep_days` int(11) DEFAULT '365',
+  `conf_name_order` tinyint(4) DEFAULT '0' COMMENT '-1,0: no order',
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` datetime DEFAULT CURRENT_TIMESTAMP,
+  `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+  `updated_by` varchar(100) DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `tb_config_type_def_UN` (`namespace`,`conf_type`,`conf_file`)
+) ENGINE=InnoDB AUTO_INCREMENT=202 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tb_config_file_node`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_config_file_node` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `namespace` varchar(100) NOT NULL,
+  `bk_biz_id` varchar(100) NOT NULL,
+  `conf_type` varchar(100) NOT NULL,
+  `conf_file` varchar(100) NOT NULL DEFAULT '',
+  `level_name` varchar(100) NOT NULL,
+  `level_value` varchar(120) NOT NULL,
+  `conf_type_lc` varchar(100) DEFAULT NULL,
+  `conf_file_lc` varchar(100) DEFAULT NULL,
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+  `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+  `updated_by` varchar(100) DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `uniq_level_node` (`namespace`,`bk_biz_id`,`conf_file`,`conf_type`,`level_name`,`level_value`)
+) ENGINE=InnoDB AUTO_INCREMENT=490 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tb_config_level_def`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_config_level_def` (
+  `level_name` varchar(60) NOT NULL,
+  `level_priority` int(11) NOT NULL,
+  `level_name_cn` varchar(100) DEFAULT NULL,
+  `description` varchar(255) DEFAULT '',
+  `flag_disable` tinyint(4) DEFAULT '0',
+  PRIMARY KEY (`level_priority`),
+  UNIQUE KEY `un_level_name` (`level_name`),
+  KEY `idx_level` (`level_priority`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tb_config_name_def`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_config_name_def` (
+  `id` int(10) unsigned NOT NULL AUTO_INCREMENT,
+  `namespace` varchar(100) DEFAULT NULL,
+  `conf_type` varchar(100) DEFAULT NULL,
+  `conf_file` varchar(100) DEFAULT NULL,
+  `conf_name` varchar(100) NOT NULL,
+  `value_type` varchar(100) NOT NULL DEFAULT 'STRING' COMMENT 'STRING,INT,FLOAT,NUMBER',
+  `value_default` text,
+  `value_allowed` text,
+  `value_type_sub` varchar(100) NOT NULL DEFAULT '' COMMENT 'STRING,ENUM,RANGE,REGEX,JSON,COMPLEX',
+  `flag_status` tinyint(4) NOT NULL DEFAULT '0' COMMENT '1: 显式的公共配置 0:不会显式出现在配置文件的全量配置项, 2: 显式的公共配置且只读',
+  `flag_disable` tinyint(4) NOT NULL DEFAULT '0' COMMENT '2:readonly, 1:disable, 0:enable, -2: not_allowed_given, -3:must_given',
+  `flag_locked` tinyint(4) NOT NULL DEFAULT '0',
+  `flag_encrypt` tinyint(4) NOT NULL DEFAULT '0',
+  `need_restart` tinyint(4) NOT NULL DEFAULT '1',
+  `value_formula` varchar(200) DEFAULT NULL,
+  `extra_info` varchar(200) DEFAULT NULL,
+  `conf_name_lc` varchar(100) DEFAULT NULL,
+  `order_index` int(11) DEFAULT '-1' COMMENT '-1: 无序',
+  `since_version` varchar(100) DEFAULT NULL COMMENT 'conf_name allowed since version xxx',
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` datetime DEFAULT CURRENT_TIMESTAMP,
+  `updated_at` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+  `stage` tinyint(4) NOT NULL DEFAULT '0',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `uni_ns_type_file_name` (`namespace`,`conf_type`,`conf_file`,`conf_name`)
+) ENGINE=InnoDB AUTO_INCREMENT=15919 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tb_config_node`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_config_node` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+  `node_id` int(11) DEFAULT NULL,
+  `bk_biz_id` varchar(100) NOT NULL,
+  `namespace` varchar(100) DEFAULT NULL COMMENT 'service.service_role',
+  `conf_type` varchar(60) NOT NULL DEFAULT '',
+  `conf_file` varchar(60) NOT NULL,
+  `conf_name` varchar(60) NOT NULL,
+  `conf_value` text,
+  `level_name` varchar(60) NOT NULL,
+  `level_value` varchar(120) DEFAULT 'pub',
+  `flag_locked` tinyint(4) NOT NULL DEFAULT '0',
+  `flag_disable` tinyint(4) DEFAULT '0' COMMENT '-1: deleted 0:enable, 1:disable, -2: not_allowed',
+  `description` varchar(255) DEFAULT '',
+  `created_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
+  `updated_at` datetime NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+  `updated_revision` varchar(60) DEFAULT NULL,
+  `stage` tinyint(4) NOT NULL DEFAULT '0' COMMENT 'conf_value 里的值状态,0: 仅保存未发布, 1:已发布未应用, 2: 已应用',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `uni_pri` (`bk_biz_id`,`namespace`,`conf_type`,`conf_file`,`level_name`,`level_value`,`conf_name`)
+) ENGINE=InnoDB AUTO_INCREMENT=23398 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tb_config_node_task`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_config_node_task` (
+  `id` int(10) unsigned NOT NULL AUTO_INCREMENT,
+  `node_id` int(10) unsigned NOT NULL,
+  `version_id` int(10) unsigned NOT NULL,
+  `revision` varchar(100) NOT NULL,
+  `conf_name` varchar(100) NOT NULL,
+  `conf_value` varchar(255) DEFAULT NULL,
+  `value_before` varchar(255) DEFAULT NULL,
+  `op_type` varchar(100) NOT NULL DEFAULT '',
+  `updated_revision` varchar(100) NOT NULL,
+  `stage` tinyint(4) NOT NULL COMMENT '1: new, 2:applied',
+  `flag_locked` tinyint(4) NOT NULL DEFAULT '0',
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `uniq_node_conf` (`node_id`,`conf_name`)
+) ENGINE=InnoDB AUTO_INCREMENT=490 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `tb_config_versioned`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_config_versioned` (
+  `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT,
+  `node_id` int(11) DEFAULT NULL,
+  `bk_biz_id` varchar(100) DEFAULT NULL,
+  `conf_type` varchar(100) DEFAULT NULL,
+  `namespace` varchar(100) DEFAULT NULL,
+  `level_name` varchar(100) DEFAULT NULL,
+  `level_value` varchar(100) DEFAULT NULL,
+  `conf_file` varchar(100) DEFAULT NULL,
+  `revision` varchar(100) DEFAULT NULL,
+  `content_str` mediumtext,
+  `content_md5` varchar(60) DEFAULT NULL,
+  `content_obj` text,
+  `is_published` tinyint(1) DEFAULT '0' COMMENT '0:未发布, 1:发布, -1:未发布但层级发布过',
+  `is_applied` tinyint(1) DEFAULT '0' COMMENT '0:未应用, 1:已应用',
+  `module` varchar(100) DEFAULT NULL,
+  `cluster` varchar(100) DEFAULT NULL,
+  `pre_revision` varchar(100) DEFAULT NULL,
+  `rows_affected` int(11) NOT NULL DEFAULT '0',
+  `content_obj_diff` text,
+  `description` varchar(255) DEFAULT NULL,
+  `created_at` datetime DEFAULT CURRENT_TIMESTAMP,
+  `updated_at` datetime DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
+  `created_by` varchar(60) DEFAULT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `uniq_node_revision` (`bk_biz_id`,`namespace`,`conf_type`,`conf_file`,`level_name`,`level_value`,`revision`),
+  UNIQUE KEY `uniq_nodeid_revision` (`node_id`,`revision`)
+) ENGINE=InnoDB AUTO_INCREMENT=4514 DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Temporary table structure for view `v_tb_config_node_plat`
+--
+
+SET @saved_cs_client     = @@character_set_client;
+SET character_set_client = utf8;
+/*!50001 CREATE VIEW `v_tb_config_node_plat` AS SELECT 
+ 1 AS `id`,
+ 1 AS `bk_biz_id`,
+ 1 AS `namespace`,
+ 1 AS `conf_type`,
+ 1 AS `conf_file`,
+ 1 AS `conf_name`,
+ 1 AS `level_name`,
+ 1 AS `level_value`,
+ 1 AS `updated_revision`,
+ 1 AS `conf_value`,
+ 1 AS `flag_locked`,
+ 1 AS `flag_disable`,
+ 1 AS `flag_status`,
+ 1 AS `stage`,
+ 1 AS `description`,
+ 1 AS `created_at`,
+ 1 AS `updated_at`*/;
+SET character_set_client = @saved_cs_client;
+
+--
+-- Final view structure for view `v_tb_config_node_plat`
+--
+
+/*!50001 DROP VIEW IF EXISTS `v_tb_config_node_plat`*/;
+/*!50001 SET @saved_cs_client          = @@character_set_client */;
+/*!50001 SET @saved_cs_results         = @@character_set_results */;
+/*!50001 SET @saved_col_connection     = @@collation_connection */;
+/*!50001 SET character_set_client      = utf8 */;
+/*!50001 SET character_set_results     = utf8 */;
+/*!50001 SET collation_connection      = utf8_general_ci */;
+/*!50001 CREATE ALGORITHM=UNDEFINED */
+/*!50001 VIEW `v_tb_config_node_plat` AS select 0 AS `id`,'0' AS `bk_biz_id`,`tb_config_name_def`.`namespace` AS `namespace`,`tb_config_name_def`.`conf_type` AS `conf_type`,`tb_config_name_def`.`conf_file` AS `conf_file`,`tb_config_name_def`.`conf_name` AS `conf_name`,'plat' AS `level_name`,'0' AS `level_value`,'' AS `updated_revision`,`tb_config_name_def`.`value_default` AS `conf_value`,`tb_config_name_def`.`flag_locked` AS `flag_locked`,`tb_config_name_def`.`flag_disable` AS `flag_disable`,`tb_config_name_def`.`flag_status` AS `flag_status`,`tb_config_name_def`.`stage` AS `stage`,`tb_config_name_def`.`conf_name_lc` AS `description`,`tb_config_name_def`.`created_at` AS `created_at`,`tb_config_name_def`.`updated_at` AS `updated_at` from `tb_config_name_def` where (`tb_config_name_def`.`flag_status` > 0) */;
+/*!50001 SET character_set_client      = @saved_cs_client */;
+/*!50001 SET character_set_results     = @saved_cs_results */;
+/*!50001 SET collation_connection      = @saved_col_connection */;
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-03-28 21:09:08
diff --git a/dbm-services/common/db-config/assets/migrations/000003_init_sensitive.up.sql b/dbm-services/common/db-config/assets/migrations/000003_init_sensitive.up.sql
new file mode 100644
index 0000000000..00857b27d5
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000003_init_sensitive.up.sql
@@ -0,0 +1 @@
+-- update schema_migrations set version=3,dirty=0;
\ No newline at end of file
diff --git a/dbm-services/common/db-config/assets/migrations/000010_common_data.down.sql b/dbm-services/common/db-config/assets/migrations/000010_common_data.down.sql
new file mode 100644
index 0000000000..bb0ed2a270
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000010_common_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='common';
+DELETE FROM tb_config_name_def WHERE namespace='common' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000010_common_data.up.sql b/dbm-services/common/db-config/assets/migrations/000010_common_data.up.sql
new file mode 100644
index 0000000000..211438d026
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000010_common_data.up.sql
@@ -0,0 +1,81 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='common'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (75,'common','actconf','redis','act任务配置','redis相关配置','pub,app',NULL,0,1,0,NULL,0,0,0,'redis相关配置','2022-09-16 14:34:33','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (73,'common','osconf','os','操作系统配置',NULL,'plat,app,cluster',NULL,0,1,0,NULL,0,0,0,NULL,'2022-09-02 17:05:43','2023-03-20 21:40:05','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='common' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8872,'common','actconf','redis','delete_rate','INT','20000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-16 12:01:44','2023-03-22 14:24:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8873,'common','actconf','redis','tendisplus_delete_rate','INT','3000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-16 12:09:00','2023-03-22 14:25:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8821,'common','osconf','os','user','STRING','mysql',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-02 17:21:18','2022-09-05 15:06:45',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
diff --git a/dbm-services/common/db-config/assets/migrations/000011_es_data.down.sql b/dbm-services/common/db-config/assets/migrations/000011_es_data.down.sql
new file mode 100644
index 0000000000..83f1e27138
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000011_es_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='es';
+DELETE FROM tb_config_name_def WHERE namespace='es' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000011_es_data.up.sql b/dbm-services/common/db-config/assets/migrations/000011_es_data.up.sql
new file mode 100644
index 0000000000..eb8b2853b7
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000011_es_data.up.sql
@@ -0,0 +1,106 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='es'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (74,'es','dbconf','7.10.2','elasticsearch.yml配置','es配置文件','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'es配置文件','2022-09-05 17:00:13','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='es' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8840,'es','dbconf','7.10.2','bootstrap.memory_lock','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8841,'es','dbconf','7.10.2','bootstrap.system_call_filter','STRING','false','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8843,'es','dbconf','7.10.2','cluster.initial_master_nodes','STRING','{{master_nodename}}',NULL,'',2,0,0,0,1,'{{master_nodename}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8826,'es','dbconf','7.10.2','cluster.name','STRING','{{cluster_name}}',NULL,'',2,0,0,0,1,'{{cluster_name}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 17:24:58','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8850,'es','dbconf','7.10.2','cluster.routing.allocation.same_shard.host','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:42','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8842,'es','dbconf','7.10.2','discovery.seed_hosts','STRING','{{master_ip}}',NULL,'',2,0,0,0,1,'{{master_ip}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8849,'es','dbconf','7.10.2','http.max_content_length','STRING','2048mb',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8836,'es','dbconf','7.10.2','http.port','INT','9200',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8845,'es','dbconf','7.10.2','indices.recovery.max_bytes_per_sec','STRING','200mb',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8830,'es','dbconf','7.10.2','network.host','STRING','{{host}}',NULL,'',2,0,0,0,1,'{{host}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8831,'es','dbconf','7.10.2','network.publish_host','STRING','{{host}}',NULL,'',2,0,0,0,1,'{{host}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8829,'es','dbconf','7.10.2','node.attr.rack_id','INT','1',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8828,'es','dbconf','7.10.2','node.attr.tag','STRING','hot','hot | cold','ENUM',1,0,0,0,1,'hot',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8832,'es','dbconf','7.10.2','node.data','STRING','{{isdata}}',NULL,'',2,0,0,0,1,'{{isdata}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8833,'es','dbconf','7.10.2','node.ingest','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8834,'es','dbconf','7.10.2','node.master','STRING','{{ismaster}}',NULL,'',2,0,0,0,1,'{{ismaster}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8835,'es','dbconf','7.10.2','node.ml','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8827,'es','dbconf','7.10.2','node.name','STRING','{{node_name}}',NULL,'',2,0,0,0,1,'{{node_name}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:31','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12412,'es','dbconf','7.10.2','password','STRING','{{password}}',NULL,'',0,0,0,1,1,'{{password}}',NULL,NULL,-1,NULL,'es配置','2022-10-25 10:21:35','2023-03-22 18:11:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15921,'es','dbconf','7.10.2','path_data','STRING','/data/esdata','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','数据路径','2023-03-30 08:33:44','2023-03-30 08:33:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15922,'es','dbconf','7.10.2','path_log','STRING','/data/eslog','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','日志路径','2023-03-30 08:35:33','2023-03-30 08:35:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8844,'es','dbconf','7.10.2','processors','STRING','{{processors}}',NULL,'',2,0,0,0,1,'{{processors}}',NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8846,'es','dbconf','7.10.2','search.default_search_timeout','STRING','60s',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12414,'es','dbconf','7.10.2','superpassword','STRING','{{superpassword}}',NULL,'',0,0,0,1,1,'{{superpassword}}',NULL,NULL,-1,NULL,'es配置','2022-10-25 10:23:49','2023-03-22 18:11:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12415,'es','dbconf','7.10.2','superuser','STRING','{{superuser}}',NULL,'',0,0,0,0,1,'{{superuser}}',NULL,NULL,-1,NULL,'es配置','2022-10-25 10:23:49','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8847,'es','dbconf','7.10.2','thread_pool.write.queue_size','STRING','1000',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12413,'es','dbconf','7.10.2','username','STRING','{{username}}',NULL,'',0,0,0,0,1,'{{username}}',NULL,NULL,-1,NULL,'es配置','2022-10-25 10:21:35','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8848,'es','dbconf','7.10.2','xpack.monitoring.collection.enabled','STRING','false','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8839,'es','dbconf','7.10.2','xpack.security.enabled','STRING','false','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'es配置','2022-09-05 21:31:41','2022-11-18 11:30:10',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
diff --git a/dbm-services/common/db-config/assets/migrations/000012_hdfs_data.down.sql b/dbm-services/common/db-config/assets/migrations/000012_hdfs_data.down.sql
new file mode 100644
index 0000000000..034a30c893
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000012_hdfs_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='hdfs';
+DELETE FROM tb_config_name_def WHERE namespace='hdfs' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000012_hdfs_data.up.sql b/dbm-services/common/db-config/assets/migrations/000012_hdfs_data.up.sql
new file mode 100644
index 0000000000..12f81be748
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000012_hdfs_data.up.sql
@@ -0,0 +1,166 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='hdfs'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (78,'hdfs','core-site','2.6.0-cdh5.4.11-tendataV0.2','core-site配置','core-site配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'core-site配置','2022-09-18 17:08:55','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (101,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs集群配置','hdfs集群配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'hdfs集群配置','2022-10-18 16:00:03','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (77,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site配置','hdfs-site配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'hdfs-site配置','2022-09-18 17:08:55','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (76,'hdfs','hdfsconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs配置','hdfs配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'hdfs配置','2022-09-18 16:53:04','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (83,'hdfs','install','2.6.0-cdh5.4.11-tendataV0.2','hdfs安装配置','hdfs安装配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'hdfs安装配置','2022-09-19 17:02:17','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (79,'hdfs','zoo.cfg','2.6.0-cdh5.4.11-tendataV0.2','zk配置','zk配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'zk配置','2022-09-18 17:08:55','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='hdfs' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8878,'hdfs','core-site','2.6.0-cdh5.4.11-tendataV0.2','fs.defaultFS','STRING','hdfs://{{cluster_name}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 00:04:42','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8877,'hdfs','core-site','2.6.0-cdh5.4.11-tendataV0.2','fs.trash.interval','STRING','1440','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 00:04:42','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8876,'hdfs','core-site','2.6.0-cdh5.4.11-tendataV0.2','io.file.buffer.size','STRING','131072','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 00:04:42','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8875,'hdfs','core-site','2.6.0-cdh5.4.11-tendataV0.2','net.topology.script.file.name','STRING','/data/hadoopenv/hadoop/etc/hadoop/rack-aware.sh','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 00:04:42','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12407,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','core-site.fs.defaultFS','STRING','hdfs://{{cluster_name}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 00:04:42','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12408,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','core-site.fs.trash.interval','INT','1440','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 00:04:42','2022-11-18 11:23:15',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12409,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','core-site.io.file.buffer.size','INT','131072','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 00:04:42','2022-11-18 11:23:15',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12410,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','core-site.net.topology.script.file.name','STRING','/data/hadoopenv/hadoop/etc/hadoop/rack-aware.sh','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'域名解析脚本','2022-09-19 00:04:42','2022-11-18 11:38:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12373,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.blockreport.intervalMsec','INT','43200000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'块上报间隔时间毫秒','2022-09-21 14:33:49','2022-11-18 11:34:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12374,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.blocksize','INT','134217728','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'块大小','2022-09-21 14:33:49','2022-11-18 11:34:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12375,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.client.failover.proxy.provider.{{cluster_name}}','STRING','org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12376,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.datanode.data.dir','STRING','file:///data/hadoopdata/data','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12377,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.datanode.fsdataset.volume.choosing.policy','STRING','org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12378,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.datanode.hostname','STRING','{{dn_host}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-23 15:02:59','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12379,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.datanode.http.address','STRING','0.0.0.0:50075','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12380,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.ha.automatic-failover.enabled','STRING','true','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12381,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.ha.fencing.methods','STRING','shell(/bin/true)','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12382,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.ha.namenodes.{{cluster_name}}','STRING','nn1,nn2','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12383,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.hosts','STRING','/data/hadoopenv/hadoop/etc/hadoop/dfs.include','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12384,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.hosts.exclude','STRING','/data/hadoopenv/hadoop/etc/hadoop/dfs.exclude','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12385,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.journalnode.edits.dir','STRING','/data/hadoopdata/jn','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12386,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.checkpoint.period','INT','3600','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12387,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.checkpoint.txns','INT','2000000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12388,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.handler.count','INT','128','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12389,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.heartbeat.recheck-interval','INT','300000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12390,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.http-address.{{cluster_name}}.nn1','STRING','{{nn1_host}}:{{http_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12391,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.http-address.{{cluster_name}}.nn2','STRING','{{nn2_host}}:{{http_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12392,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.name.dir','STRING','file:///data/hadoopdata/name','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12393,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.replication.max-streams','INT','20','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12394,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.replication.max-streams-hard-limit','INT','40','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12395,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.replication.work.multiplier.per.iteration','INT','10','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12396,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.rpc-address.{{cluster_name}}.nn1','STRING','{{nn1_host}}:{{rpc_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12397,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.rpc-address.{{cluster_name}}.nn2','STRING','{{nn2_host}}:{{rpc_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12398,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.service.handler.count','INT','64','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12399,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.servicerpc-address.{{cluster_name}}.nn1','STRING','{{nn1_host}}:53310','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12400,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.servicerpc-address.{{cluster_name}}.nn2','STRING','{{nn2_host}}:53310','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12401,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.namenode.shared.edits.dir','STRING','qjournal://{{jn0_host}}:8485;{{jn1_host}}:8485;{{jn2_host}}:8485;/{{cluster_name}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:21:08','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12402,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.nameservices','STRING','{{cluster_name}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-18 17:02:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12403,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.dfs.replication','INT','2','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12404,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.ha.failover-controller.cli-check.rpc-timeout.ms','INT','60000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-11-18 11:23:15',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12405,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.ha.zookeeper.quorum','STRING','{{zk0_ip}}:2181,{{zk1_ip}}:2181,{{zk2_ip}}:2181','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12406,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','hdfs-site.ha.zookeeper.session-timeout.ms','INT','5000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:21:08','2022-11-18 11:23:15',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12367,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','http_port','INT','50070',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'NN WEB端口','2022-10-10 10:51:06','2022-11-18 11:32:57',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8918,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','install.haproxy_rpm','STRING','libslz-1.1.0-2.el7.x86_64.rpm,haproxy-1.8.12-1.el7.x86_64.rpm,libslz-1.1.0-2.el6.x86_64.rpm,haproxy-1.8.12-1.el6.x86_64.rpm',NULL,'',1,0,0,0,1,'NULL',NULL,NULL,-1,NULL,'安装HaProxy RPM配置','2022-09-23 15:12:46','2022-11-18 11:32:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12362,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','nn1_host','STRING','{{nn1_host}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,'NN1主机名','2022-10-08 12:53:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12365,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','nn1_ip','STRING','{{nn1_ip}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,'NN1 IP','2022-10-08 14:40:29','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12363,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','nn2_host','STRING','{{nn2_host}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,'NN2主机名','2022-10-08 12:56:52','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12364,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','nn2_ip','STRING','{{nn2_ip}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,'NN2 IP','2022-10-08 14:40:29','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12368,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','rpc_port','INT','9000',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'NN RPC端口','2022-10-10 10:51:06','2022-11-18 11:32:57',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13047,'hdfs','dbconf','2.6.0-cdh5.4.11-tendataV0.2','username','STRING','haproxy_user',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'haproxy用户名','2022-12-01 18:56:55','2022-12-01 18:56:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8911,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.blockreport.intervalMsec','STRING','43200000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8901,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.blocksize','STRING','134217728','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8915,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.client.failover.proxy.provider.{{cluster_name}}','STRING','org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8910,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.datanode.data.dir','STRING','file:///data/hadoopdata/data','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8909,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.datanode.fsdataset.volume.choosing.policy','STRING','org.apache.hadoop.hdfs.server.datanode.fsdataset.AvailableSpaceVolumeChoosingPolicy','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8917,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.datanode.hostname','STRING','{{dn_host}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-23 15:02:59','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8908,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.datanode.http.address','STRING','0.0.0.0:50075','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8913,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.ha.automatic-failover.enabled','STRING','true','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8912,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.ha.fencing.methods','STRING','shell(/bin/true)','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8879,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.ha.namenodes.{{cluster_name}}','STRING','nn1,nn2','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8897,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.hosts','STRING','/data/hadoopenv/hadoop/etc/hadoop/dfs.include','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8898,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.hosts.exclude','STRING','/data/hadoopenv/hadoop/etc/hadoop/dfs.exclude','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8914,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.journalnode.edits.dir','STRING','/data/hadoopdata/jn','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8899,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.checkpoint.period','STRING','3600','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8900,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.checkpoint.txns','STRING','2000000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8907,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.handler.count','STRING','128','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8896,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.heartbeat.recheck-interval','STRING','300000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8882,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.http-address.{{cluster_name}}.nn1','STRING','{{nn1_host}}:{{http_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8883,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.http-address.{{cluster_name}}.nn2','STRING','{{nn2_host}}:{{http_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8905,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.name.dir','STRING','file:///data/hadoopdata/name','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8902,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.replication.max-streams','STRING','20','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8903,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.replication.max-streams-hard-limit','STRING','40','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8904,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.replication.work.multiplier.per.iteration','STRING','10','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8880,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.rpc-address.{{cluster_name}}.nn1','STRING','{{nn1_host}}:{{rpc_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8881,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.rpc-address.{{cluster_name}}.nn2','STRING','{{nn2_host}}:{{rpc_port}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8906,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.service.handler.count','STRING','64','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8884,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.servicerpc-address.{{cluster_name}}.nn1','STRING','{{nn1_host}}:53310','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8885,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.servicerpc-address.{{cluster_name}}.nn2','STRING','{{nn2_host}}:53310','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8893,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.namenode.shared.edits.dir','STRING','qjournal://{{jn0_host}}:8485;{{jn1_host}}:8485;{{jn2_host}}:8485;/{{cluster_name}}','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:21:08','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8874,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.nameservices','STRING','{{cluster_name}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-18 17:02:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8895,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','dfs.replication','STRING','2','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8916,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','ha.failover-controller.cli-check.rpc-timeout.ms','STRING','60000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:33:49','2022-10-20 11:24:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8886,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','ha.zookeeper.quorum','STRING','{{zk0_ip}}:2181,{{zk1_ip}}:2181,{{zk2_ip}}:2181','NULL','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-19 01:26:50','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8894,'hdfs','hdfs-site','2.6.0-cdh5.4.11-tendataV0.2','ha.zookeeper.session-timeout.ms','STRING','5000','NULL','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-21 14:21:08','2022-10-20 11:24:09',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
diff --git a/dbm-services/common/db-config/assets/migrations/000013_kafka_data.down.sql b/dbm-services/common/db-config/assets/migrations/000013_kafka_data.down.sql
new file mode 100644
index 0000000000..ebc8f497e4
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000013_kafka_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='kafka';
+DELETE FROM tb_config_name_def WHERE namespace='kafka' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000013_kafka_data.up.sql b/dbm-services/common/db-config/assets/migrations/000013_kafka_data.up.sql
new file mode 100644
index 0000000000..ec55b50f79
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000013_kafka_data.up.sql
@@ -0,0 +1,86 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='kafka'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (84,'kafka','dbconf','2.4.0','kafka配置','kafka配置文件','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'kafka配置文件','2022-09-20 15:17:36','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='kafka' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8891,'kafka','dbconf','2.4.0','adminUser','STRING','kafka','NULL','',1,0,0,0,1,'kafka','NULL',NULL,-1,NULL,'kafka配置','2022-09-20 15:28:01','2022-11-18 11:30:20',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12972,'kafka','dbconf','2.4.0','factor','STRING','3','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','kafka配置','2022-11-24 11:46:20','2022-11-24 11:46:20',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14105,'kafka','dbconf','2.4.0','jmx_port','INT','9999',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'kafka配置','2023-03-06 01:12:50','2023-03-06 11:27:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12371,'kafka','dbconf','2.4.0','partition_num','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','kafka配置','2022-10-10 15:54:23','2022-11-18 11:30:20',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12366,'kafka','dbconf','2.4.0','port','STRING','9092','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','kafka配置','2022-10-08 15:26:51','2022-11-18 11:30:20',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12372,'kafka','dbconf','2.4.0','replication_num','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','kafka配置','2022-10-10 15:54:23','2022-11-18 11:30:20',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12370,'kafka','dbconf','2.4.0','retention_hours','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','kafka配置','2022-10-10 15:54:23','2022-11-18 11:30:20',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13039,'kafka','dbconf','2.4.0','username','STRING','kafka','NULL','',1,0,0,0,1,NULL,'NULL','NULL',-1,'NULL','kafka配置','2022-12-01 11:01:43','2022-12-01 11:01:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12928,'kafka','dbconf','2.4.0','zookeeper_conf','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','kafka配置','2022-11-14 17:41:55','2022-11-18 11:30:20',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
diff --git a/dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.down.sql b/dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.down.sql
new file mode 100644
index 0000000000..dce8edab39
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='PredixyTendisplusCluster';
+DELETE FROM tb_config_name_def WHERE namespace='PredixyTendisplusCluster' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.up.sql b/dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.up.sql
new file mode 100644
index 0000000000..b8dcb49a04
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000014_PredixyTendisplusCluster_data.up.sql
@@ -0,0 +1,187 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='PredixyTendisplusCluster'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (99,'PredixyTendisplusCluster','config','backup','配置','备份相关的配置','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'备份相关的配置','2022-09-28 12:08:17','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (121,'PredixyTendisplusCluster','config','binlogbackup','配置','binlog备份相关的配置','pub,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'binlog备份相关的配置','2022-11-23 19:54:59','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (120,'PredixyTendisplusCluster','config','fullbackup','配置','全备相关的配置','pub,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'全备相关的配置','2022-11-23 19:54:59','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (122,'PredixyTendisplusCluster','config','heartbeat','配置','心跳相关的配置','pub,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'心跳相关的配置','2022-11-23 19:54:59','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (100,'PredixyTendisplusCluster','config','monitor','配置','监控相关的配置','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'监控相关的配置','2022-09-28 12:08:17','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (97,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','redis配置','Tendisplus-2.5的配置文件','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'Tendisplus-2.5的配置文件','2022-09-28 12:08:17','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (202,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','redis配置','Tendisplus-2.6的配置文件','pub,plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,NULL,'2023-03-30 19:17:40','2023-04-07 15:40:18','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (98,'PredixyTendisplusCluster','proxyconf','Predixy-latest','redis配置','predixy配置文件','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'predixy配置文件','2022-09-28 12:08:17','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='PredixyTendisplusCluster' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12966,'PredixyTendisplusCluster','config','binlogbackup','cron','STRING','@every 10m','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:52','2022-11-23 19:57:52',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12965,'PredixyTendisplusCluster','config','binlogbackup','old_file_left_day','INT','2','[0,365]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:52','2022-11-23 19:57:52',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12964,'PredixyTendisplusCluster','config','binlogbackup','to_backup_system','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:52','2022-11-23 19:57:52',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12961,'PredixyTendisplusCluster','config','fullbackup','cron','STRING','0 5,13,21 * * *','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:42','2022-11-23 19:57:42',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12960,'PredixyTendisplusCluster','config','fullbackup','old_file_left_day','INT','2','[0,365]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:42','2022-11-23 19:57:42',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12962,'PredixyTendisplusCluster','config','fullbackup','tar_split','BOOL','true','true|false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:42','2022-11-23 19:57:42',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12963,'PredixyTendisplusCluster','config','fullbackup','tar_split_part_size','STRING','8G','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:44','2022-11-23 19:57:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12959,'PredixyTendisplusCluster','config','fullbackup','to_backup_system','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:42','2022-11-23 19:57:42',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12967,'PredixyTendisplusCluster','config','heartbeat','cron','STRING','@every 10s','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:52','2022-11-23 19:57:52',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12968,'PredixyTendisplusCluster','config','monitor','bkmonitor_data_id','INT','542898','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:52','2022-11-23 19:57:52',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12969,'PredixyTendisplusCluster','config','monitor','bkmonitor_token','STRING','8108b6fe1c8343ca8d6538652242d439','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:53','2022-11-23 19:57:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12970,'PredixyTendisplusCluster','config','monitor','cron','STRING','@every 1m','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:57:55','2022-11-23 19:57:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15973,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','bind','STRING','{{address}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15994,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','binlog-using-defaultCF','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16010,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','binlogdelrange','INT','500000','[0,10000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15984,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','cluster-enabled','STRING','{{cluster_enabled}}','\non|off','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15998,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','cluster-migration-rate-limit','INT','200','[16,2048]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15981,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','databases','INT','{{databases}}','[1,16]','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16012,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','deletefilesinrange-for-binlog','INT','1','0|1','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15977,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','dir','STRING','{{redis_data_dir}}/data/db','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16005,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','domain-enabled','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15978,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','dumpdir','STRING','{{redis_data_dir}}/data/dump','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15986,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','executorThreadNum','INT','24','[8,56]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15985,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','executorWorkPoolSize','INT','2','[2,8]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16013,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','incrpushthreadnum','INT','10','[1,10]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15990,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','kvstorecount','INT','10','10','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15976,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','logdir','STRING','{{redis_data_dir}}/data/log','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16286,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','loglevel','STRING','notice','debug|verbose|notice|warning','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:19:09','2023-04-20 17:19:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15983,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','masterauth','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15995,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','maxBinlogKeepNum','INT','1','[0,50000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16011,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','migrate-gc-enabled','STRING','false','true|false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16001,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','migrate-snapshot-key-num','INT','30000','[64,200000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15999,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','migrateReceiveThreadnum','INT','4','[1,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16000,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','migrateSenderThreadnum','INT','4','[1,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16009,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','minbinlogkeepsec','INT','1800','[0,2592000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15996,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','netBatchSize','INT','1048576','[1048576,536870912]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15997,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','netBatchTimeoutSec','INT','10','[2,300]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15987,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','netIoThreadNum','INT','3','[2,12]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15988,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','noexpire','STRING','no','no|yes','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16006,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','pauseTimeIndexMgr','INT','1','[0,300]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15979,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','pidfile','STRING','{{redis_data_dir}}data/tendisplus.pid','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15974,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','port','INT','{{port}}','[6379,55535]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16014,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','rename-command','STRING','config confxx \nrename-command flushdb cleandb \nrename-command flushall cleanall\nrename-command debug nobug\nrename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15982,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','requirepass','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15989,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','rocks.blockcachemb','STRING','{{rocks_blockcachemb}}','[32,262144]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16003,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','rocks.cache_index_and_filter_blocks','INT','0','0|1','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15991,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','rocks.compress_type','STRING','lz4','none|snappy|lz4','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15992,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','rocks.max_background_compactions','INT','12','[2,256]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15993,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','rocks.write_buffer_size','INT','{{rocks_write_buffer_size}}','[4194304,268435456]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16007,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','scanCntIndexMgr','INT','10000','[0,200000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16002,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','slave-migrate-enabled','STRING','on','\non|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15980,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','slowlog','STRING','{{redis_data_dir}}/data/slowlog','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16008,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','truncateBinlogIntervalMs','INT','100','[1,300000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16004,'PredixyTendisplusCluster','dbconf','Tendisplus-2.5','truncateBinlogNum','INT','10000000','[50000,2000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16015,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','bind','STRING','{{address}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16036,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','binlog-using-defaultCF','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16026,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','cluster-enabled','STRING','{{cluster_enabled}}','\non|off','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16040,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','cluster-migration-rate-limit','INT','200','[16,2048]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16023,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','databases','INT','{{databases}}','[1,16]','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16019,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','dir','STRING','{{redis_data_dir}}/data/db','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16047,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','domain-enabled','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16020,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','dumpdir','STRING','{{redis_data_dir}}/data/dump','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16028,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','executorThreadNum','INT','24','[8,56]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16027,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','executorWorkPoolSize','INT','2','[2,8]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16052,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','incrpushthreadnum','INT','10','[1,10]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16032,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','kvstorecount','INT','10','10','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16018,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','logdir','STRING','{{redis_data_dir}}/data/log','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16287,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','loglevel','STRING','notice','debug|verbose|notice|warning','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:19:09','2023-04-20 17:19:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16025,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','masterauth','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16037,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','maxBinlogKeepNum','INT','1','[0,50000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16043,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','migrate-snapshot-key-num','INT','30000','[64,200000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16041,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','migrateReceiveThreadnum','INT','4','[1,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16042,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','migrateSenderThreadnum','INT','4','[1,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16051,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','minbinlogkeepsec','INT','1800','[0,2592000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16038,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','netBatchSize','INT','1048576','[1048576,536870912]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16039,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','netBatchTimeoutSec','INT','10','[2,300]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16029,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','netIoThreadNum','INT','3','[2,12]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16030,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','noexpire','STRING','no','no|yes','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16048,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','pauseTimeIndexMgr','INT','1','[0,300]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16021,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','pidfile','STRING','{{redis_data_dir}}data/tendisplus.pid','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16016,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','port','INT','{{port}}','[6379,55535]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16053,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','rename-command','STRING','config confxx \nrename-command flushdb cleandb \nrename-command flushall cleanall\nrename-command debug nobug\nrename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16024,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','requirepass','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16031,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','rocks.blockcachemb','STRING','{{rocks_blockcachemb}}','[32,262144]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16045,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','rocks.cache_index_and_filter_blocks','INT','0','0|1','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16033,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','rocks.compress_type','STRING','lz4','none|snappy|lz4','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16034,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','rocks.max_background_compactions','INT','12','[2,256]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16035,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','rocks.write_buffer_size','INT','{{rocks_write_buffer_size}}','[4194304,268435456]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16049,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','scanCntIndexMgr','INT','10000','[0,200000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16044,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','slave-migrate-enabled','STRING','on','\non|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16022,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','slowlog','STRING','{{redis_data_dir}}/data/slowlog','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16050,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','truncateBinlogIntervalMs','INT','100','[1,300000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16046,'PredixyTendisplusCluster','dbconf','Tendisplus-2.6','truncateBinlogNum','INT','10000000','[50000,2000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16060,'PredixyTendisplusCluster','proxyconf','Predixy-latest','clienttimeout','INT','0','[0,300]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16059,'PredixyTendisplusCluster','proxyconf','Predixy-latest','keepalive','INT','0','[0,300]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16056,'PredixyTendisplusCluster','proxyconf','Predixy-latest','password','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16055,'PredixyTendisplusCluster','proxyconf','Predixy-latest','port','INT','{{port}}','[50000,59999]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16054,'PredixyTendisplusCluster','proxyconf','Predixy-latest','redis_password','STRING','{{redis_password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16061,'PredixyTendisplusCluster','proxyconf','Predixy-latest','refreshinterval','INT','1','[1,30]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16062,'PredixyTendisplusCluster','proxyconf','Predixy-latest','serverfailurelimit','INT','10','[2,60]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16063,'PredixyTendisplusCluster','proxyconf','Predixy-latest','serverretrytimeout','INT','1','[1,60]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16058,'PredixyTendisplusCluster','proxyconf','Predixy-latest','servertimeout','INT','0','[0,300]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16057,'PredixyTendisplusCluster','proxyconf','Predixy-latest','workerthreads','INT','8','[1,64]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:47','2023-04-20 17:03:47',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:28
diff --git a/dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.down.sql b/dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.down.sql
new file mode 100644
index 0000000000..c690c3862f
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='rediscomm';
+DELETE FROM tb_config_name_def WHERE namespace='rediscomm' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.up.sql b/dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.up.sql
new file mode 100644
index 0000000000..22ce648637
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000015_rediscomm_data.up.sql
@@ -0,0 +1,109 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='rediscomm'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (124,'rediscomm','config','base','配置','运行时间','pub,plat,app,module,cluster','cluster',1,1,0,'rediscomm',5,365,0,'运行时间','2022-11-29 14:51:54','2023-04-06 11:59:44','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (126,'rediscomm','config','bigkey','配置','热key相关配置','pub,plat,app,module,cluster','cluster',1,1,0,'rediscomm',5,365,0,'热key相关配置','2022-11-29 14:51:54','2023-04-06 11:59:44','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (171,'rediscomm','config','binlogbackup','配置','binlog备份相关的配置','pub,plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'binlog备份相关的配置','2023-02-28 15:01:19','2023-04-06 11:59:44','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (170,'rediscomm','config','fullbackup','配置','全备相关的配置','pub,plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'全备相关的配置','2023-02-28 15:01:19','2023-04-06 11:59:44','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (172,'rediscomm','config','heartbeat','配置','心跳相关的配置','pub,plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'心跳相关的配置','2023-02-28 15:01:19','2023-04-06 11:59:44','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (125,'rediscomm','config','hotkey','配置','大key相关配置','pub,plat,app,module,cluster','cluster',1,1,0,'rediscomm',5,365,0,'大key相关配置','2022-11-29 14:51:54','2023-04-06 11:59:44','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (127,'rediscomm','config','keymod','配置','key模式相关配置','pub,plat,app,module,cluster','cluster',1,1,0,'rediscomm',5,365,0,'key模式相关配置','2022-11-29 14:51:54','2023-04-06 11:59:44','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (173,'rediscomm','config','monitor','配置','监控相关的配置','pub,plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'监控相关的配置','2023-02-28 15:01:19','2023-04-06 11:59:44','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='rediscomm' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12973,'rediscomm','config','base','cron','string','0 8 * * *',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12980,'rediscomm','config','bigkey','disk_max_usage','int','65',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12977,'rediscomm','config','bigkey','duration_seconds','int','10800',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12982,'rediscomm','config','bigkey','keymod_engine','string','default',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12981,'rediscomm','config','bigkey','keymod_spec','string','[]',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12978,'rediscomm','config','bigkey','on_master','bool','false',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12976,'rediscomm','config','bigkey','top_count','int','10',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12979,'rediscomm','config','bigkey','use_rdb','bool','true',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13954,'rediscomm','config','binlogbackup','cron','STRING','@every 10m','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13953,'rediscomm','config','binlogbackup','old_file_left_day','INT','2','[0,365]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13952,'rediscomm','config','binlogbackup','to_backup_system','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13949,'rediscomm','config','fullbackup','cron','STRING','0 5,13,21 * * *','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13948,'rediscomm','config','fullbackup','old_file_left_day','INT','2','[0,365]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13950,'rediscomm','config','fullbackup','tar_split','BOOL','false','true|false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-04-28 10:07:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13951,'rediscomm','config','fullbackup','tar_split_part_size','STRING','8G','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13947,'rediscomm','config','fullbackup','to_backup_system','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13955,'rediscomm','config','heartbeat','cron','STRING','@every 10s','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12975,'rediscomm','config','hotkey','duration_seconds','int','30',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13938,'rediscomm','config','hotkey','top_count','int','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-02-28 15:01:19','2023-02-28 15:01:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12974,'rediscomm','config','hotkye','top_count','int','10',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-29 15:00:02','2022-11-29 17:08:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15965,'rediscomm','config','monitor','bkmonitor_event_data_id','INT','542898','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-06 11:54:33','2023-04-06 11:54:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15966,'rediscomm','config','monitor','bkmonitor_event_token','STRING','8108b6fe1c8343ca8d6538652242d439','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-06 11:54:33','2023-04-06 11:54:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15968,'rediscomm','config','monitor','bkmonitor_metirc_token','STRING','4301571541434e1091cb70af6164ee67','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-06 11:54:33','2023-04-06 11:54:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15967,'rediscomm','config','monitor','bkmonitor_metric_data_id','INT','543957','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-06 11:54:33','2023-04-06 11:54:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15969,'rediscomm','config','monitor','cron','STRING','@every 1m','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-06 11:54:34','2023-04-06 11:54:34',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.down.sql b/dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.down.sql
new file mode 100644
index 0000000000..e21e0c0e24
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='RedisInstance';
+DELETE FROM tb_config_name_def WHERE namespace='RedisInstance' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.up.sql b/dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.up.sql
new file mode 100644
index 0000000000..13cf689fe6
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000016_RedisInstance_data.up.sql
@@ -0,0 +1,134 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='RedisInstance'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (62,'RedisInstance','redisconf','Redis-6','Redis参数配置','redis-6版本_参数配置','plat,app,cluster','cluster',1,1,0,NULL,0,0,0,'redis-6版本_参数配置','2022-08-10 11:53:22','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='RedisInstance' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8491,'RedisInstance','redisconf','Redis-6','activerehashing','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8456,'RedisInstance','redisconf','Redis-6','always-show-logo','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8477,'RedisInstance','redisconf','Redis-6','aof-load-truncated','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8479,'RedisInstance','redisconf','Redis-6','aof-rewrite-incremental-fsync','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8478,'RedisInstance','redisconf','Redis-6','aof-use-rdb-preamble','STRING','no','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8472,'RedisInstance','redisconf','Redis-6','appendfilename','STRING','appendonly.aof','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8473,'RedisInstance','redisconf','Redis-6','appendfsync','STRING','everysec','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8471,'RedisInstance','redisconf','Redis-6','appendonly','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8476,'RedisInstance','redisconf','Redis-6','auto-aof-rewrite-min-size','STRING','64mb','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8475,'RedisInstance','redisconf','Redis-6','auto-aof-rewrite-percentage','INT','100','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8437,'RedisInstance','redisconf','Redis-6','bind','STRING','{{address}} 127.0.0.1','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8483,'RedisInstance','redisconf','Redis-6','client-output-buffer-limit','STRING','normal 0 0 0 \n client-output-buffer-limit slave 2048mb 2048mb 300 \n client-output-buffer-limit pubsub 32mb 8mb 60','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8481,'RedisInstance','redisconf','Redis-6','cluster-config-file','STRING','nodes.conf','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8445,'RedisInstance','redisconf','Redis-6','cluster-enabled','STRING','{{cluster_enabled}}','no|yes','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8482,'RedisInstance','redisconf','Redis-6','cluster-node-timeout','INT','15000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8446,'RedisInstance','redisconf','Redis-6','daemonize','STRING','yes','yes|no','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8444,'RedisInstance','redisconf','Redis-6','databases','INT','{{databases}}','[1,16]','RANGE',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8461,'RedisInstance','redisconf','Redis-6','dbfilename','STRING','dump.rdb','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8443,'RedisInstance','redisconf','Redis-6','dir','STRING','{{redis_data_dir}}/data','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8486,'RedisInstance','redisconf','Redis-6','hash-max-ziplist-entries','INT','512','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8487,'RedisInstance','redisconf','Redis-6','hash-max-ziplist-value','INT','64','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8490,'RedisInstance','redisconf','Redis-6','hll-sparse-max-bytes','INT','3000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8453,'RedisInstance','redisconf','Redis-6','hz','INT','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8467,'RedisInstance','redisconf','Redis-6','lazyfree-lazy-eviction','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8468,'RedisInstance','redisconf','Redis-6','lazyfree-lazy-expire','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8469,'RedisInstance','redisconf','Redis-6','lazyfree-lazy-server-del','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8489,'RedisInstance','redisconf','Redis-6','list-compress-depth','INT','0','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8488,'RedisInstance','redisconf','Redis-6','list-max-ziplist-size','INT','-2','[-5,-1]','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-20 19:00:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8441,'RedisInstance','redisconf','Redis-6','logfile','STRING','{{redis_data_dir}}/redis.log','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8455,'RedisInstance','redisconf','Redis-6','loglevel','STRING','notice','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8480,'RedisInstance','redisconf','Redis-6','lua-time-limit','INT','5000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8454,'RedisInstance','redisconf','Redis-6','maxclients','INT','180000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8440,'RedisInstance','redisconf','Redis-6','maxmemory','STRING','{{maxmemory}}','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8448,'RedisInstance','redisconf','Redis-6','maxmemory-policy','STRING','noeviction','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8474,'RedisInstance','redisconf','Redis-6','no-appendfsync-on-rewrite','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8442,'RedisInstance','redisconf','Redis-6','pidfile','STRING','{{redis_data_dir}}/redis.pid','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8438,'RedisInstance','redisconf','Redis-6','port','INT','{{port}}','[6379,55535]','RANGE',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8447,'RedisInstance','redisconf','Redis-6','protected-mode','STRING','yes','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8460,'RedisInstance','redisconf','Redis-6','rdbchecksum','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8459,'RedisInstance','redisconf','Redis-6','rdbcompression','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8466,'RedisInstance','redisconf','Redis-6','rename-command','STRING','flushall cleanall \n rename-command config confxx \n rename-command flushdb cleandb \n rename-command debug nobug \n rename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8464,'RedisInstance','redisconf','Redis-6','repl-diskless-sync','STRING','no','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8439,'RedisInstance','redisconf','Redis-6','requirepass','STRING','{{password}}','','',2,0,0,1,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8457,'RedisInstance','redisconf','Redis-6','save','STRING','','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8470,'RedisInstance','redisconf','Redis-6','slave-lazy-flush','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8465,'RedisInstance','redisconf','Redis-6','slave-priority','INT','100','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8463,'RedisInstance','redisconf','Redis-6','slave-read-only','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8462,'RedisInstance','redisconf','Redis-6','slave-serve-stale-data','STRING','yes','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8492,'RedisInstance','redisconf','Redis-6','slowlog-log-slower-than','INT','10000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8493,'RedisInstance','redisconf','Redis-6','slowlog-max-len','INT','256','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8458,'RedisInstance','redisconf','Redis-6','stop-writes-on-bgsave-error','STRING','yes','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8452,'RedisInstance','redisconf','Redis-6','supervised','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8449,'RedisInstance','redisconf','Redis-6','tcp-backlog','INT','511','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8451,'RedisInstance','redisconf','Redis-6','tcp-keepalive','INT','300','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8450,'RedisInstance','redisconf','Redis-6','timeout','INT','0','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8484,'RedisInstance','redisconf','Redis-6','zset-max-ziplist-entries','INT','128','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8485,'RedisInstance','redisconf','Redis-6','zset-max-ziplist-value','INT','64','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-10 22:15:55','2022-10-18 10:08:16',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.down.sql b/dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.down.sql
new file mode 100644
index 0000000000..918d132389
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='RedisMS';
+DELETE FROM tb_config_name_def WHERE namespace='RedisMS' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.up.sql b/dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.up.sql
new file mode 100644
index 0000000000..f940eec739
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000017_RedisMS_data.up.sql
@@ -0,0 +1,78 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='RedisMS'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (52,'RedisMS','dbconf','RedisMS-2.8','DB参数配置','Redis主从版(2.8.17)','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'Redis主从版(2.8.17)','2022-08-02 14:29:01','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (53,'RedisMS','dbconf','RedisMS-3.2','DB参数配置','Redis主从版(3.2.12)','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'Redis主从版(3.2.12)','2022-08-02 14:29:01','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='RedisMS' AND (flag_encrypt!=1 or value_default like '{{%')
+
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000018_tendb_data.down.sql b/dbm-services/common/db-config/assets/migrations/000018_tendb_data.down.sql
new file mode 100644
index 0000000000..91778a1c28
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000018_tendb_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='tendb';
+DELETE FROM tb_config_name_def WHERE namespace='tendb' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000018_tendb_data.up.sql b/dbm-services/common/db-config/assets/migrations/000018_tendb_data.up.sql
new file mode 100644
index 0000000000..c525ee55d6
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000018_tendb_data.up.sql
@@ -0,0 +1,89 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='tendb'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (43,'tendb','init_user','mysql#user','','初始化用户','plat','',0,1,0,'',0,0,0,'初始化用户','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (32,'tendb','init_user','proxy#user','','初始化用户','plat','',0,1,0,'',0,0,0,'初始化用户','2022-04-25 10:19:22','2023-03-24 17:35:17','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (193,'tendb','init_user','spider#user','','初始化用户','plat','',0,1,0,'',0,0,0,'初始化用户','2023-03-09 17:40:06','2023-03-24 17:35:35','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (80,'tendb','sys','super_account',NULL,'gcs_admin','plat',NULL,1,1,0,NULL,5,365,0,'gcs_admin','2022-09-19 13:20:56','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='tendb' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8855,'tendb','init_user','mysql#user','admin_user','STRING','ADMIN','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2022-09-19 13:15:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8862,'tendb','init_user','mysql#user','backup_user','STRING','dba_bak_all_sel','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2022-09-19 13:16:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8866,'tendb','init_user','mysql#user','monitor_access_all_user','STRING','MONITOR','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2022-09-19 13:16:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8854,'tendb','init_user','mysql#user','monitor_user','STRING','MONITOR','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2022-09-19 13:15:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8868,'tendb','init_user','mysql#user','os_mysql_user','STRING','mysql','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2022-09-19 13:16:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8858,'tendb','init_user','mysql#user','repl_user','STRING','repl','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2022-09-19 13:15:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8864,'tendb','init_user','mysql#user','yw_user','STRING','yw','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2022-09-19 13:16:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8860,'tendb','init_user','proxy#user','proxy_admin_user','STRING','proxy','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:54:15','2023-03-24 17:30:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15825,'tendb','init_user','spider#user','tdbctl_user','STRING','tdbctl','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-24 17:30:47',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.down.sql b/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.down.sql
new file mode 100644
index 0000000000..4e892b3a42
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='tendbcluster';
+DELETE FROM tb_config_name_def WHERE namespace='tendbcluster' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql b/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql
new file mode 100644
index 0000000000..738961a221
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000019_tendbcluster_data.up.sql
@@ -0,0 +1,1626 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='tendbcluster'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (187,'tendbcluster','backup','binlog_rotate.yaml','binlog滚动与备份选项','binlog_rotate.yaml','plat,app,module,cluster','',1,1,1,'',0,0,0,'binlog_rotate.yaml','2023-03-09 17:40:06','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (188,'tendbcluster','backup','dbbackup.ini','备份配置','dbbackup.conf配置项','plat,app,module,cluster','',1,1,0,'',0,0,0,'dbbackup.conf配置项','2023-03-09 17:40:06','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (189,'tendbcluster','backup','dbbackup.options','备份控制选项','dbbackup.ini控制选项','plat,app,module,cluster','',1,1,0,'',0,0,0,'dbbackup.ini控制选项','2023-03-09 17:40:06','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (190,'tendbcluster','checksum','checksum.option','checksum控制选项','checksum.option','plat,app,module,cluster','',1,1,0,'',0,0,0,'checksum.option','2023-03-09 17:40:06','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (191,'tendbcluster','checksum','checksum.yaml','checksum配置','checksum.yaml','plat,app,module,cluster','',1,1,0,'',0,0,0,'checksum.yaml','2023-03-09 17:40:06','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (145,'tendbcluster','dbconf','MySQL-5.6','my.cnf配置','5.6_参数配置','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'5.6_参数配置','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (144,'tendbcluster','dbconf','MySQL-5.7','my.cnf配置','5.7_参数配置','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'5.7配置','2022-04-25 10:19:22','2023-03-28 21:40:30','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (143,'tendbcluster','dbconf','MySQL-8.0','','8.0_参数配置','plat,app,module,cluster','cluster',1,1,0,'',0,0,0,'MySQL8.0配置','2022-06-02 17:27:34','2023-03-28 21:40:19','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (147,'tendbcluster','dbconf','Spider-1','my.cnf配置','Spider 1.x 接入层','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'Spider 1.x 接入层','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (148,'tendbcluster','dbconf','Spider-3','my.cnf配置','Spider 3.x 接入层','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'Spider 3.x 接入层','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (177,'tendbcluster','dbconf','Tdbctl','my.cnf配置','tdbctl中控配置','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'tdbctl中控配置','2022-04-25 10:19:22','2023-05-10 19:35:47','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (192,'tendbcluster','deploy','deploy_info','部署配置',NULL,'plat,app,module,cluster','',0,1,0,NULL,5,365,0,NULL,'2023-03-09 17:40:06','2023-03-20 21:40:05','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (194,'tendbcluster','sys','sysfile','系统配置',NULL,'plat','',1,1,0,NULL,5,365,0,NULL,'2023-03-09 17:40:06','2023-03-20 21:40:05','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='tendbcluster' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15918,'tendbcluster','backup','binlog_rotate.yaml','backup_client.cos','STRING','{\n  \"enable\": true,\n  \"with_md5\": true,\n  \"file_tag\": \"INCREMENT_BACKUP\",\n  \"tool_path\": \"cos-client\"\n}','','MAP',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14143,'tendbcluster','backup','binlog_rotate.yaml','backup_client.ibs','STRING','{\n  \"enable\": false,\n  \"ibs_mode\": \"hdfs\",\n  \"with_md5\": true,\n  \"file_tag\": \"INCREMENT_BACKUP\",\n  \"tool_path\": \"backup_client\"\n}','','MAP',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-04-13 21:58:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14144,'tendbcluster','backup','binlog_rotate.yaml','crond.api_url','STRING','http://127.0.0.1:9999','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14145,'tendbcluster','backup','binlog_rotate.yaml','crond.command','STRING','cd /home/mysql/rotate_binlog && ./rotatebinlog -c config.yaml','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14146,'tendbcluster','backup','binlog_rotate.yaml','crond.item_name','STRING','rotate_binlog','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14147,'tendbcluster','backup','binlog_rotate.yaml','crond.schedule','STRING','*/5 * * * *','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14148,'tendbcluster','backup','binlog_rotate.yaml','encrypt.enable','BOOL','false','true | false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:30:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14150,'tendbcluster','backup','binlog_rotate.yaml','public.keep_policy','STRING','most','most | least','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14151,'tendbcluster','backup','binlog_rotate.yaml','public.max_binlog_total_size','STRING','200g','[100m, 9999g]','BYTES',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14152,'tendbcluster','backup','binlog_rotate.yaml','public.max_disk_used_pct','INT','80','[1,99]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:46',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14153,'tendbcluster','backup','binlog_rotate.yaml','public.max_keep_duration','STRING','61d','','DURATION',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14154,'tendbcluster','backup','binlog_rotate.yaml','public.purge_interval','STRING','4h','','DURATION',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14155,'tendbcluster','backup','binlog_rotate.yaml','public.rotate_interval','STRING','10m','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:49',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14156,'tendbcluster','backup','binlog_rotate.yaml','report.enable','BOOL','true','true | false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14157,'tendbcluster','backup','binlog_rotate.yaml','report.filepath','STRING','/home/mysql/dbareport/mysql/binlog','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:51',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14158,'tendbcluster','backup','binlog_rotate.yaml','report.log_maxage','INT','30','[1, 60]','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:52',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14159,'tendbcluster','backup','binlog_rotate.yaml','report.log_maxbackups','INT','10','[1, 30]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14160,'tendbcluster','backup','binlog_rotate.yaml','report.log_maxsize','INT','5','[1, 10]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:27:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14161,'tendbcluster','backup','dbbackup.ini','BackupClient.DoChecksum','STRING','true','true | false','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14162,'tendbcluster','backup','dbbackup.ini','BackupClient.FileTag','STRING','MYSQL_FULL_BACKUP','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14163,'tendbcluster','backup','dbbackup.ini','BackupClient.RemoteFileSystem','STRING','hdfs','hdfs | cos','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16361,'tendbcluster','backup','dbbackup.ini','LogicalBackup.ChunkFilesize','INT','2048','[512, 9999999]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB','2022-05-26 20:11:23','2023-05-24 21:40:03',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16347,'tendbcluster','backup','dbbackup.ini','LogicalBackup.DefaultsFile','STRING','','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 09:50:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16358,'tendbcluster','backup','dbbackup.ini','LogicalBackup.DisableCompress','STRING','false','false | true','BOOL',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-05-24 21:45:24','2023-05-24 21:45:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16346,'tendbcluster','backup','dbbackup.ini','LogicalBackup.ExtraOpt','STRING','--skip-definer','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-05-24 21:45:24','2023-05-25 09:59:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14164,'tendbcluster','backup','dbbackup.ini','LogicalBackup.FlushRetryCount','INT','3','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14168,'tendbcluster','backup','dbbackup.ini','LogicalBackup.Regex','STRING','{{.LogicalBackup.Regex}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16359,'tendbcluster','backup','dbbackup.ini','LogicalBackup.Threads','INT','4','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:42:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16349,'tendbcluster','backup','dbbackup.ini','LogicalLoad.EnableBinlog','STRING','false','false | true','BOOL',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 22:01:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16357,'tendbcluster','backup','dbbackup.ini','LogicalLoad.ExtraOpt','STRING','','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14170,'tendbcluster','backup','dbbackup.ini','LogicalLoad.IndexFilePath','STRING','/data/dbbak/xxxxx','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14172,'tendbcluster','backup','dbbackup.ini','LogicalLoad.MysqlCharset','STRING','binary','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14173,'tendbcluster','backup','dbbackup.ini','LogicalLoad.MysqlHost','STRING','{{.LogicalLoad.MysqlHost}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14174,'tendbcluster','backup','dbbackup.ini','LogicalLoad.MysqlLoadDir','STRING','/data/dbbak/your_loader_dir','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14175,'tendbcluster','backup','dbbackup.ini','LogicalLoad.MysqlPasswd','STRING','{{.LogicalLoad.MysqlPasswd}}','','STRING',2,0,0,1,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14176,'tendbcluster','backup','dbbackup.ini','LogicalLoad.MysqlPort','STRING','{{.LogicalLoad.MysqlPort}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14177,'tendbcluster','backup','dbbackup.ini','LogicalLoad.MysqlUser','STRING','{{.LogicalLoad.MysqlUser}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14179,'tendbcluster','backup','dbbackup.ini','LogicalLoad.Regex','STRING','','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14180,'tendbcluster','backup','dbbackup.ini','LogicalLoad.Threads','INT','2','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16343,'tendbcluster','backup','dbbackup.ini','PhysicalBackup.DefaultsFile','STRING','{{.PhysicalBackup.DefaultsFile}}','','',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 10:24:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16354,'tendbcluster','backup','dbbackup.ini','PhysicalBackup.ExtraOpt','STRING','--safe-slave-backup-timeout=60','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16345,'tendbcluster','backup','dbbackup.ini','PhysicalBackup.SplitSpeed','INT','500','[0, 2048]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB/s','2022-05-26 20:11:23','2023-05-25 10:03:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16363,'tendbcluster','backup','dbbackup.ini','PhysicalBackup.Threads','INT','2','[0, 8]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-10 15:21:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16348,'tendbcluster','backup','dbbackup.ini','PhysicalBackup.Throttle','INT','100','[0, 200]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 22:07:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16352,'tendbcluster','backup','dbbackup.ini','PhysicalLoad.CopyBack','STRING','false','false | true','BOOL',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:56:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16355,'tendbcluster','backup','dbbackup.ini','PhysicalLoad.DefaultsFile','STRING','/etc/my.cnf','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16356,'tendbcluster','backup','dbbackup.ini','PhysicalLoad.ExtraOpt','STRING','','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16353,'tendbcluster','backup','dbbackup.ini','PhysicalLoad.IndexFilePath','STRING','/xx/xxx.index','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:56:32',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16350,'tendbcluster','backup','dbbackup.ini','PhysicalLoad.MysqlLoadDir','STRING','/xx/loader_dir','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:57:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16351,'tendbcluster','backup','dbbackup.ini','PhysicalLoad.Threads','INT','2','[0, 16]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:57:18',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14181,'tendbcluster','backup','dbbackup.ini','Public.BackupDir','STRING','/data/dbbak/','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16344,'tendbcluster','backup','dbbackup.ini','Public.BackupId','STRING','{{.Public.BackupId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 10:08:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14182,'tendbcluster','backup','dbbackup.ini','Public.BackupTimeout','STRING','09:00:00','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14183,'tendbcluster','backup','dbbackup.ini','Public.BackupType','STRING','logical','logical | physical','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14184,'tendbcluster','backup','dbbackup.ini','Public.BillId','INT','{{.Public.BillId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14185,'tendbcluster','backup','dbbackup.ini','Public.BkBizId','INT','{{.Public.BkBizId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14186,'tendbcluster','backup','dbbackup.ini','Public.BkCloudId','INT','{{.Public.BkCloudId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14187,'tendbcluster','backup','dbbackup.ini','Public.ClusterAddress','STRING','{{.Public.ClusterAddress}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16307,'tendbcluster','backup','dbbackup.ini','Public.ClusterId','STRING','{{.Public.ClusterId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-05-15 11:20:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14188,'tendbcluster','backup','dbbackup.ini','Public.DataSchemaGrant','STRING','{{.Public.DataSchemaGrant}}','All | Schema | Grant | Data','ENUM',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16362,'tendbcluster','backup','dbbackup.ini','Public.IOLimitMBPerSec','INT','500','[0, 4096]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-15 11:21:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14189,'tendbcluster','backup','dbbackup.ini','Public.MysqlCharset','STRING','binary','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14190,'tendbcluster','backup','dbbackup.ini','Public.MysqlHost','STRING','{{.Public.MysqlHost}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14191,'tendbcluster','backup','dbbackup.ini','Public.MysqlPasswd','STRING','{{.Public.MysqlPasswd}}','','STRING',2,0,0,1,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14192,'tendbcluster','backup','dbbackup.ini','Public.MysqlPort','INT','{{.Public.MysqlPort}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14193,'tendbcluster','backup','dbbackup.ini','Public.MysqlRole','STRING','{{.Public.MysqlRole}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14194,'tendbcluster','backup','dbbackup.ini','Public.MysqlUser','STRING','{{.Public.MysqlUser}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14195,'tendbcluster','backup','dbbackup.ini','Public.OldFileLeftDay','INT','2','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14196,'tendbcluster','backup','dbbackup.ini','Public.ResultReportPath','STRING','/home/mysql/dbareport/mysql/dbbackup/result','result log dir','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14197,'tendbcluster','backup','dbbackup.ini','Public.StatusReportPath','STRING','/home/mysql/dbareport/mysql/dbbackup/status','status log dir','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16360,'tendbcluster','backup','dbbackup.ini','Public.TarSizeThreshold','INT','8196','[128, 9999999]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB','2022-05-26 20:11:23','2023-05-24 21:40:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14199,'tendbcluster','backup','dbbackup.options','BackupType','STRING','logical','logical | physical','ENUM',1,0,0,0,0,NULL,'','备份类型',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14200,'tendbcluster','backup','dbbackup.options','CrontabTime','STRING','3 5 * * *','','STRING',1,0,0,0,0,NULL,'','DB备份开始时间',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14201,'tendbcluster','backup','dbbackup.options','Logical.IgnoreDatabases','STRING','mysql,test,infodba_schema,sys','','',1,0,0,0,0,NULL,'','主库备份数据',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14202,'tendbcluster','backup','dbbackup.options','Logical.IgnoreTables','STRING','','','',1,0,0,0,0,NULL,'','主库备份数据',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14203,'tendbcluster','backup','dbbackup.options','Master.DataSchemaGrant','STRING','schema','grant,schema,data,all','ENUMS',1,0,0,0,0,NULL,'','从库备份数据',-1,NULL,'','2023-03-09 17:36:33','2023-03-28 20:31:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14204,'tendbcluster','backup','dbbackup.options','Slave.DataSchemaGrant','STRING','all','grant,schema,data,all','ENUMS',1,0,0,0,0,NULL,'','从库备份数据',-1,NULL,'','2023-03-09 17:36:33','2023-03-28 20:33:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15915,'tendbcluster','backup','dbbackup.options','Spider.DataSchemaGrant','STRING','grant,schema','grant,schema,data,all','ENUMS',1,0,0,0,0,NULL,'','从库备份数据',-1,NULL,'','2023-03-09 17:36:33','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14205,'tendbcluster','checksum','checksum.option','crond','STRING','0 2 * * 1-5','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14206,'tendbcluster','checksum','checksum.option','run_duration','STRING','4h','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14207,'tendbcluster','checksum','checksum.yaml','filter.databases','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14208,'tendbcluster','checksum','checksum.yaml','filter.databases_regex','STRING','*','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14209,'tendbcluster','checksum','checksum.yaml','filter.ignore_databases','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14210,'tendbcluster','checksum','checksum.yaml','filter.ignore_databases_regex','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14211,'tendbcluster','checksum','checksum.yaml','filter.ignore_tables','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14212,'tendbcluster','checksum','checksum.yaml','filter.ignore_tables_regex','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14213,'tendbcluster','checksum','checksum.yaml','filter.tables','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14214,'tendbcluster','checksum','checksum.yaml','filter.tables_regex','STRING','*','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14215,'tendbcluster','checksum','checksum.yaml','pt_checksum.path','STRING','./pt-table-checksum','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14216,'tendbcluster','checksum','checksum.yaml','pt_checksum.replicate','STRING','infodba_schema.checksums','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14217,'tendbcluster','checksum','checksum.yaml','report_path','STRING','./','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14218,'tendbcluster','dbconf','MySQL-5.5','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14219,'tendbcluster','dbconf','MySQL-5.5','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14220,'tendbcluster','dbconf','MySQL-5.5','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14221,'tendbcluster','dbconf','MySQL-5.5','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14222,'tendbcluster','dbconf','MySQL-5.5','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14223,'tendbcluster','dbconf','MySQL-5.5','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14224,'tendbcluster','dbconf','MySQL-5.5','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What form of binary logging the master will use.','2023-03-09 17:36:33','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14225,'tendbcluster','dbconf','MySQL-5.5','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14226,'tendbcluster','dbconf','MySQL-5.5','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14227,'tendbcluster','dbconf','MySQL-5.5','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14228,'tendbcluster','dbconf','MySQL-5.5','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14229,'tendbcluster','dbconf','MySQL-5.5','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14230,'tendbcluster','dbconf','MySQL-5.5','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14231,'tendbcluster','dbconf','MySQL-5.5','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14232,'tendbcluster','dbconf','MySQL-5.5','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14233,'tendbcluster','dbconf','MySQL-5.5','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14234,'tendbcluster','dbconf','MySQL-5.5','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14235,'tendbcluster','dbconf','MySQL-5.5','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14236,'tendbcluster','dbconf','MySQL-5.5','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14237,'tendbcluster','dbconf','MySQL-5.5','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14238,'tendbcluster','dbconf','MySQL-5.5','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14239,'tendbcluster','dbconf','MySQL-5.5','mysqld.expire_logs_days','INT','61','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14240,'tendbcluster','dbconf','MySQL-5.5','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14241,'tendbcluster','dbconf','MySQL-5.5','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14242,'tendbcluster','dbconf','MySQL-5.5','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14244,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14246,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}',NULL,'',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14247,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14248,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14249,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14250,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14251,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14252,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_flush_log_at_trx_commit','INT','0','0|1|2','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14253,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14254,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14255,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14256,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14257,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14258,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14259,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_max_dirty_pages_pct','INT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14260,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14261,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14262,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14263,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_read_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14264,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14265,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14266,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14267,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14268,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_thread_concurrency','INT','16','[1,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14269,'tendbcluster','dbconf','MySQL-5.5','mysqld.innodb_write_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14270,'tendbcluster','dbconf','MySQL-5.5','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2023-03-09 17:36:33','2023-04-07 11:55:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14271,'tendbcluster','dbconf','MySQL-5.5','mysqld.key_buffer','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14272,'tendbcluster','dbconf','MySQL-5.5','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14273,'tendbcluster','dbconf','MySQL-5.5','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14274,'tendbcluster','dbconf','MySQL-5.5','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14275,'tendbcluster','dbconf','MySQL-5.5','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14276,'tendbcluster','dbconf','MySQL-5.5','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14278,'tendbcluster','dbconf','MySQL-5.5','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14279,'tendbcluster','dbconf','MySQL-5.5','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14280,'tendbcluster','dbconf','MySQL-5.5','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14281,'tendbcluster','dbconf','MySQL-5.5','mysqld.log_warnings','STRING','0',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14282,'tendbcluster','dbconf','MySQL-5.5','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14277,'tendbcluster','dbconf','MySQL-5.5','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14301,'tendbcluster','dbconf','MySQL-5.5','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14283,'tendbcluster','dbconf','MySQL-5.5','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14284,'tendbcluster','dbconf','MySQL-5.5','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14285,'tendbcluster','dbconf','MySQL-5.5','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14286,'tendbcluster','dbconf','MySQL-5.5','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14287,'tendbcluster','dbconf','MySQL-5.5','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14288,'tendbcluster','dbconf','MySQL-5.5','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14289,'tendbcluster','dbconf','MySQL-5.5','mysqld.max_connect_errors','STRING','99999999',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14290,'tendbcluster','dbconf','MySQL-5.5','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14291,'tendbcluster','dbconf','MySQL-5.5','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14292,'tendbcluster','dbconf','MySQL-5.5','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14293,'tendbcluster','dbconf','MySQL-5.5','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14294,'tendbcluster','dbconf','MySQL-5.5','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14295,'tendbcluster','dbconf','MySQL-5.5','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14296,'tendbcluster','dbconf','MySQL-5.5','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2023-03-09 17:36:33','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14297,'tendbcluster','dbconf','MySQL-5.5','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14298,'tendbcluster','dbconf','MySQL-5.5','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14299,'tendbcluster','dbconf','MySQL-5.5','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14300,'tendbcluster','dbconf','MySQL-5.5','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14302,'tendbcluster','dbconf','MySQL-5.5','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log',NULL,'STRING',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14305,'tendbcluster','dbconf','MySQL-5.5','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14306,'tendbcluster','dbconf','MySQL-5.5','mysqld.show_compatibility_56','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14307,'tendbcluster','dbconf','MySQL-5.5','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14308,'tendbcluster','dbconf','MySQL-5.5','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14309,'tendbcluster','dbconf','MySQL-5.5','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14310,'tendbcluster','dbconf','MySQL-5.5','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 14:59:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14311,'tendbcluster','dbconf','MySQL-5.5','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14312,'tendbcluster','dbconf','MySQL-5.5','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14313,'tendbcluster','dbconf','MySQL-5.5','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 15:00:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14314,'tendbcluster','dbconf','MySQL-5.5','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.slow_query_log_file}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14315,'tendbcluster','dbconf','MySQL-5.5','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.socket}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14316,'tendbcluster','dbconf','MySQL-5.5','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14317,'tendbcluster','dbconf','MySQL-5.5','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14318,'tendbcluster','dbconf','MySQL-5.5','mysqld.sql_mode','STRING','\'\'','\'\'|STRICT|ONLY_FULL_GROUP_BY|','ENUMS',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14319,'tendbcluster','dbconf','MySQL-5.5','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14320,'tendbcluster','dbconf','MySQL-5.5','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14321,'tendbcluster','dbconf','MySQL-5.5','mysqld.sync_binlog','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14322,'tendbcluster','dbconf','MySQL-5.5','mysqld.table_definition_cache','INT','768','[400,4096]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14323,'tendbcluster','dbconf','MySQL-5.5','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14324,'tendbcluster','dbconf','MySQL-5.5','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14325,'tendbcluster','dbconf','MySQL-5.5','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14326,'tendbcluster','dbconf','MySQL-5.5','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14327,'tendbcluster','dbconf','MySQL-5.5','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2023-03-09 17:36:33','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14328,'tendbcluster','dbconf','MySQL-5.5','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2023-03-09 17:36:33','2023-04-14 15:42:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14329,'tendbcluster','dbconf','MySQL-5.5','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14330,'tendbcluster','dbconf','MySQL-5.6','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14331,'tendbcluster','dbconf','MySQL-5.6','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14332,'tendbcluster','dbconf','MySQL-5.6','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14333,'tendbcluster','dbconf','MySQL-5.6','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14334,'tendbcluster','dbconf','MySQL-5.6','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14335,'tendbcluster','dbconf','MySQL-5.6','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14336,'tendbcluster','dbconf','MySQL-5.6','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14337,'tendbcluster','dbconf','MySQL-5.6','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,'mysqld.bind-address',0,'',NULL,'2023-03-09 17:36:33','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14338,'tendbcluster','dbconf','MySQL-5.6','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14339,'tendbcluster','dbconf','MySQL-5.6','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14340,'tendbcluster','dbconf','MySQL-5.6','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,'',0,'','What form of binary logging the master will use.','2023-03-09 17:36:33','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14341,'tendbcluster','dbconf','MySQL-5.6','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14342,'tendbcluster','dbconf','MySQL-5.6','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14343,'tendbcluster','dbconf','MySQL-5.6','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14344,'tendbcluster','dbconf','MySQL-5.6','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14345,'tendbcluster','dbconf','MySQL-5.6','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14346,'tendbcluster','dbconf','MySQL-5.6','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14347,'tendbcluster','dbconf','MySQL-5.6','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14348,'tendbcluster','dbconf','MySQL-5.6','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14349,'tendbcluster','dbconf','MySQL-5.6','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14350,'tendbcluster','dbconf','MySQL-5.6','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14351,'tendbcluster','dbconf','MySQL-5.6','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14352,'tendbcluster','dbconf','MySQL-5.6','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14353,'tendbcluster','dbconf','MySQL-5.6','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14355,'tendbcluster','dbconf','MySQL-5.6','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14356,'tendbcluster','dbconf','MySQL-5.6','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14357,'tendbcluster','dbconf','MySQL-5.6','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14358,'tendbcluster','dbconf','MySQL-5.6','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14359,'tendbcluster','dbconf','MySQL-5.6','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14360,'tendbcluster','dbconf','MySQL-5.6','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14361,'tendbcluster','dbconf','MySQL-5.6','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14362,'tendbcluster','dbconf','MySQL-5.6','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14363,'tendbcluster','dbconf','MySQL-5.6','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14364,'tendbcluster','dbconf','MySQL-5.6','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14365,'tendbcluster','dbconf','MySQL-5.6','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14366,'tendbcluster','dbconf','MySQL-5.6','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,'',0,'',NULL,'2023-03-09 17:36:33','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14367,'tendbcluster','dbconf','MySQL-5.6','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14368,'tendbcluster','dbconf','MySQL-5.6','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14369,'tendbcluster','dbconf','MySQL-5.6','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14370,'tendbcluster','dbconf','MySQL-5.6','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14371,'tendbcluster','dbconf','MySQL-5.6','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14372,'tendbcluster','dbconf','MySQL-5.6','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14373,'tendbcluster','dbconf','MySQL-5.6','mysqld.host_cache_size','INT','643','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14375,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14376,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14377,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_adaptive_hash_index','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14378,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14379,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14380,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14381,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14382,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14383,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14384,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_load_at_startup','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14385,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2023-03-09 17:36:33','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14386,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14387,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14388,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_checksum_algorithm','STRING','INNODB','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14389,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14390,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14391,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the compression failure rate threshold for a table, as a percentage, at which point MySQL begins adding padding within compressed pages to avoid expensive compression failures. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14392,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the level of zlib compression to use for InnoDB compressed tables and indexes. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14393,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum percentage that can be reserved as free space within each compressed page','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14394,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14395,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14396,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14397,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14398,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14399,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14400,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14401,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14402,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14403,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14404,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14405,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14406,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14407,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14408,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14409,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14410,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14411,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14412,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14413,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14414,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14415,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14416,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable this option to allow index key prefixes longer than 767 bytes (up to 3072 bytes) for InnoDB tables that use the DYNAMIC and COMPRESSED row formats.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14417,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14418,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14419,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether prevent corruption that could occur if a different version of the zlib compression algorithm is used during recovery.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14420,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14421,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14422,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14423,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14424,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_max_dirty_pages_pct','INT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14425,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_max_dirty_pages_pct_lwm','INT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14426,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14427,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14428,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14429,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14430,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14431,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14432,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14433,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14434,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14435,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14436,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14437,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14438,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14439,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2023-03-09 17:36:33','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14440,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14441,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14442,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14443,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14444,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14445,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14446,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14447,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14448,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14449,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14450,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14451,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14452,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14453,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_support_xa','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB support for two-phase commit in XA transactions, causing an extra disk flush for transaction preparation. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14454,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14455,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14456,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14457,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14458,'tendbcluster','dbconf','MySQL-5.6','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2023-03-09 17:36:33','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14459,'tendbcluster','dbconf','MySQL-5.6','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2023-03-09 17:36:33','2023-04-07 11:55:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14460,'tendbcluster','dbconf','MySQL-5.6','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14461,'tendbcluster','dbconf','MySQL-5.6','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14462,'tendbcluster','dbconf','MySQL-5.6','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14463,'tendbcluster','dbconf','MySQL-5.6','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14464,'tendbcluster','dbconf','MySQL-5.6','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14465,'tendbcluster','dbconf','MySQL-5.6','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14466,'tendbcluster','dbconf','MySQL-5.6','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14467,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14469,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14470,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14471,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14472,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14473,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14474,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14475,'tendbcluster','dbconf','MySQL-5.6','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14476,'tendbcluster','dbconf','MySQL-5.6','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14468,'tendbcluster','dbconf','MySQL-5.6','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14519,'tendbcluster','dbconf','MySQL-5.6','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14477,'tendbcluster','dbconf','MySQL-5.6','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14478,'tendbcluster','dbconf','MySQL-5.6','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14479,'tendbcluster','dbconf','MySQL-5.6','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether examine checksums when reading from the binary log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14480,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14481,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14482,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14483,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14484,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14485,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14486,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14487,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14488,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14489,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14490,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14491,'tendbcluster','dbconf','MySQL-5.6','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14492,'tendbcluster','dbconf','MySQL-5.6','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The size of the metadata locks cache. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14493,'tendbcluster','dbconf','MySQL-5.6','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14494,'tendbcluster','dbconf','MySQL-5.6','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14495,'tendbcluster','dbconf','MySQL-5.6','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14496,'tendbcluster','dbconf','MySQL-5.6','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14497,'tendbcluster','dbconf','MySQL-5.6','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14498,'tendbcluster','dbconf','MySQL-5.6','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14499,'tendbcluster','dbconf','MySQL-5.6','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14500,'tendbcluster','dbconf','MySQL-5.6','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14501,'tendbcluster','dbconf','MySQL-5.6','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14502,'tendbcluster','dbconf','MySQL-5.6','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14503,'tendbcluster','dbconf','MySQL-5.6','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14504,'tendbcluster','dbconf','MySQL-5.6','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14505,'tendbcluster','dbconf','MySQL-5.6','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14506,'tendbcluster','dbconf','MySQL-5.6','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14507,'tendbcluster','dbconf','MySQL-5.6','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14508,'tendbcluster','dbconf','MySQL-5.6','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14509,'tendbcluster','dbconf','MySQL-5.6','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14510,'tendbcluster','dbconf','MySQL-5.6','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14511,'tendbcluster','dbconf','MySQL-5.6','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2023-03-09 17:36:33','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14512,'tendbcluster','dbconf','MySQL-5.6','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14513,'tendbcluster','dbconf','MySQL-5.6','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14514,'tendbcluster','dbconf','MySQL-5.6','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14515,'tendbcluster','dbconf','MySQL-5.6','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14516,'tendbcluster','dbconf','MySQL-5.6','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14517,'tendbcluster','dbconf','MySQL-5.6','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14518,'tendbcluster','dbconf','MySQL-5.6','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14520,'tendbcluster','dbconf','MySQL-5.6','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log',NULL,'STRING',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14523,'tendbcluster','dbconf','MySQL-5.6','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14524,'tendbcluster','dbconf','MySQL-5.6','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14525,'tendbcluster','dbconf','MySQL-5.6','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14526,'tendbcluster','dbconf','MySQL-5.6','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14527,'tendbcluster','dbconf','MySQL-5.6','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14528,'tendbcluster','dbconf','MySQL-5.6','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies if the slave will use database partitioning or information from master to parallelize transactions.(Default: SCHEMA).','2023-03-09 17:36:33','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14529,'tendbcluster','dbconf','MySQL-5.6','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2023-03-09 17:36:33','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14530,'tendbcluster','dbconf','MySQL-5.6','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14531,'tendbcluster','dbconf','MySQL-5.6','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14532,'tendbcluster','dbconf','MySQL-5.6','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 15:00:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14533,'tendbcluster','dbconf','MySQL-5.6','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.slow_query_log_file}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14534,'tendbcluster','dbconf','MySQL-5.6','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.socket}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14535,'tendbcluster','dbconf','MySQL-5.6','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14536,'tendbcluster','dbconf','MySQL-5.6','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14537,'tendbcluster','dbconf','MySQL-5.6','mysqld.sql_mode','STRING','\'\'','\'\'| NO_ZERO_DATE| ANSI_QUOTES| IGNORE_SPACE| NO_AUTO_VALUE_ON_ZERO| ONLY_FULL_GROUP_BY| STRICT_TRANS_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| NO_KEY_OPTIONS| NO_TABLE_OPTIONS| NO_ENGINE_SUBSTITUTION| NO_AUTO_CREATE_USER| NO_FIELD_OPTIONS| NO_UNSIGNED_SUBTRACTION| NO_ZERO_IN_DATE| PIPES_AS_CONCAT| REAL_AS_FLOAT| ALLOW_INVALID_DATES| NO_BACKSLASH_ESCAPES| NO_DIR_IN_CREATE| STRICT_ALL_TABLES ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2023-03-09 17:36:33','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14538,'tendbcluster','dbconf','MySQL-5.6','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14539,'tendbcluster','dbconf','MySQL-5.6','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14540,'tendbcluster','dbconf','MySQL-5.6','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS).','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14541,'tendbcluster','dbconf','MySQL-5.6','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14542,'tendbcluster','dbconf','MySQL-5.6','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14543,'tendbcluster','dbconf','MySQL-5.6','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14544,'tendbcluster','dbconf','MySQL-5.6','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14545,'tendbcluster','dbconf','MySQL-5.6','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14546,'tendbcluster','dbconf','MySQL-5.6','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of additional threads per group of thread pool.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14547,'tendbcluster','dbconf','MySQL-5.6','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14548,'tendbcluster','dbconf','MySQL-5.6','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14549,'tendbcluster','dbconf','MySQL-5.6','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14550,'tendbcluster','dbconf','MySQL-5.6','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14551,'tendbcluster','dbconf','MySQL-5.6','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2023-03-09 17:36:33','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14552,'tendbcluster','dbconf','MySQL-5.6','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14553,'tendbcluster','dbconf','MySQL-5.6','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14554,'tendbcluster','dbconf','MySQL-5.7','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14555,'tendbcluster','dbconf','MySQL-5.7','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14556,'tendbcluster','dbconf','MySQL-5.7','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14557,'tendbcluster','dbconf','MySQL-5.7','mysql.port','INT','{{.Mysqld.Port}}',NULL,'',2,0,0,0,0,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14558,'tendbcluster','dbconf','MySQL-5.7','mysql.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,0,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14559,'tendbcluster','dbconf','MySQL-5.7','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14560,'tendbcluster','dbconf','MySQL-5.7','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14561,'tendbcluster','dbconf','MySQL-5.7','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14562,'tendbcluster','dbconf','MySQL-5.7','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ALTER TABLE implicitly upgrades temporal columns format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision) pre-5.6.4','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14563,'tendbcluster','dbconf','MySQL-5.7','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14564,'tendbcluster','dbconf','MySQL-5.7','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14565,'tendbcluster','dbconf','MySQL-5.7','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14566,'tendbcluster','dbconf','MySQL-5.7','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14567,'tendbcluster','dbconf','MySQL-5.7','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What form of binary logging the master will use.','2023-03-09 17:36:33','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14568,'tendbcluster','dbconf','MySQL-5.7','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14569,'tendbcluster','dbconf','MySQL-5.7','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14570,'tendbcluster','dbconf','MySQL-5.7','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14571,'tendbcluster','dbconf','MySQL-5.7','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14572,'tendbcluster','dbconf','MySQL-5.7','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14573,'tendbcluster','dbconf','MySQL-5.7','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14574,'tendbcluster','dbconf','MySQL-5.7','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14575,'tendbcluster','dbconf','MySQL-5.7','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14576,'tendbcluster','dbconf','MySQL-5.7','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14577,'tendbcluster','dbconf','MySQL-5.7','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14578,'tendbcluster','dbconf','MySQL-5.7','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14579,'tendbcluster','dbconf','MySQL-5.7','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14580,'tendbcluster','dbconf','MySQL-5.7','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14581,'tendbcluster','dbconf','MySQL-5.7','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable defines the global automatic password expiration policy. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14583,'tendbcluster','dbconf','MySQL-5.7','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14584,'tendbcluster','dbconf','MySQL-5.7','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14585,'tendbcluster','dbconf','MySQL-5.7','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14586,'tendbcluster','dbconf','MySQL-5.7','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14587,'tendbcluster','dbconf','MySQL-5.7','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14588,'tendbcluster','dbconf','MySQL-5.7','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14589,'tendbcluster','dbconf','MySQL-5.7','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14590,'tendbcluster','dbconf','MySQL-5.7','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14591,'tendbcluster','dbconf','MySQL-5.7','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14592,'tendbcluster','dbconf','MySQL-5.7','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14593,'tendbcluster','dbconf','MySQL-5.7','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14594,'tendbcluster','dbconf','MySQL-5.7','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14595,'tendbcluster','dbconf','MySQL-5.7','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14596,'tendbcluster','dbconf','MySQL-5.7','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14597,'tendbcluster','dbconf','MySQL-5.7','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14598,'tendbcluster','dbconf','MySQL-5.7','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14599,'tendbcluster','dbconf','MySQL-5.7','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14600,'tendbcluster','dbconf','MySQL-5.7','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14601,'tendbcluster','dbconf','MySQL-5.7','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14603,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14604,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14605,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14606,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14607,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_alter_table_default_algorithm','STRING','INPLACE','INPLACE| INSTANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The instant algorithm is used to avoid data copying, thereby realizing the function of quickly adding columns to large tables.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14608,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Asynchronous DROP TABLE truncate the file size each time in the background, the unit is MB. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14609,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14610,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14611,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14612,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14613,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the percentage of the most recently used pages for each buffer pool to read out and dump','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14614,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14615,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14616,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2023-03-09 17:36:33','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14617,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14618,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14619,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14620,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14621,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14622,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the compression failure rate threshold for a table, as a percentage, at which point MySQL begins adding padding within compressed pages to avoid expensive compression failures. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14623,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the level of zlib compression to use for InnoDB compressed tables and indexes. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14624,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum percentage that can be reserved as free space within each compressed page','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14625,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14626,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14627,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14628,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether disable deadlock detection.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14629,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_default_row_format option defines the default row format for InnoDB tables and user-created temporary tables.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14630,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14631,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14632,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14633,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14634,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14635,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14636,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ignore the innodb_io_capacity setting for bursts of I/O activity that occur at checkpoints.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14637,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14638,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14639,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14640,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14641,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14642,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14643,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14644,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14645,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14646,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14647,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14648,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14649,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14650,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable this option to allow index key prefixes longer than 767 bytes (up to 3072 bytes) for InnoDB tables that use the DYNAMIC and COMPRESSED row formats.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14651,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14652,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14653,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables checksums for redo log pages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14654,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether prevent corruption that could occur if a different version of the zlib compression algorithm is used during recovery.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14655,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14656,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14657,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14658,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14659,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14660,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14661,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14662,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14663,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a threshold size for undo tablespaces.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14664,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14665,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14666,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14667,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14668,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14669,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14670,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of page cleaner threads that flush dirty pages from buffer pool instances.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14671,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14672,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14673,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14674,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14675,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14676,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14677,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2023-03-09 17:36:33','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14678,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default. If innodb_rollback_on_timeout is specified, a transaction timeout causes InnoDB to abort and roll back the entire transaction.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14679,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14680,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14681,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14682,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14683,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14684,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14685,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14686,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14687,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14688,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14689,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14690,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14691,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14692,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14693,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14694,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14695,'tendbcluster','dbconf','MySQL-5.7','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2023-03-09 17:36:33','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14696,'tendbcluster','dbconf','MySQL-5.7','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2023-03-09 17:36:33','2023-04-07 11:55:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14697,'tendbcluster','dbconf','MySQL-5.7','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14698,'tendbcluster','dbconf','MySQL-5.7','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14699,'tendbcluster','dbconf','MySQL-5.7','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14700,'tendbcluster','dbconf','MySQL-5.7','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14701,'tendbcluster','dbconf','MySQL-5.7','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14702,'tendbcluster','dbconf','MySQL-5.7','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14703,'tendbcluster','dbconf','MySQL-5.7','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14704,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14706,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14707,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14708,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14709,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14710,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14711,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14712,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the time zone of timestamps in messages written to the error log, and in general query log and slow query log messages written to files.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14713,'tendbcluster','dbconf','MySQL-5.7','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14714,'tendbcluster','dbconf','MySQL-5.7','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14705,'tendbcluster','dbconf','MySQL-5.7','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14763,'tendbcluster','dbconf','MySQL-5.7','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14715,'tendbcluster','dbconf','MySQL-5.7','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14716,'tendbcluster','dbconf','MySQL-5.7','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14717,'tendbcluster','dbconf','MySQL-5.7','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether examine checksums when reading from the binary log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14718,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14719,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14720,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14721,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14722,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14723,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14724,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14725,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14726,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum value of the points_per_circle argument to the ST_Buffer_Strategy() function.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14727,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14728,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14729,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times that any given stored procedure may be called recursively. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14730,'tendbcluster','dbconf','MySQL-5.7','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14731,'tendbcluster','dbconf','MySQL-5.7','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The size of the metadata locks cache. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14732,'tendbcluster','dbconf','MySQL-5.7','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14733,'tendbcluster','dbconf','MySQL-5.7','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14734,'tendbcluster','dbconf','MySQL-5.7','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether the mysql_native_password built-in authentication plugin supports proxy users.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14735,'tendbcluster','dbconf','MySQL-5.7','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14736,'tendbcluster','dbconf','MySQL-5.7','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14737,'tendbcluster','dbconf','MySQL-5.7','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14738,'tendbcluster','dbconf','MySQL-5.7','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14739,'tendbcluster','dbconf','MySQL-5.7','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'ngram_token_size is set to the size of the largest token that you want to search for.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14740,'tendbcluster','dbconf','MySQL-5.7','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14741,'tendbcluster','dbconf','MySQL-5.7','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14742,'tendbcluster','dbconf','MySQL-5.7','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14743,'tendbcluster','dbconf','MySQL-5.7','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14744,'tendbcluster','dbconf','MySQL-5.7','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14745,'tendbcluster','dbconf','MySQL-5.7','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14746,'tendbcluster','dbconf','MySQL-5.7','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14747,'tendbcluster','dbconf','MySQL-5.7','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14748,'tendbcluster','dbconf','MySQL-5.7','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14749,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14750,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14751,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_cache_min_res_unit','INT','4096','[512,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum size (in bytes) for blocks allocated by the query cache. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14752,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14753,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2023-03-09 17:36:33','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14754,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14755,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14756,'tendbcluster','dbconf','MySQL-5.7','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14757,'tendbcluster','dbconf','MySQL-5.7','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of blocks that are allocated when doing range optimization.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14758,'tendbcluster','dbconf','MySQL-5.7','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The limit on memory consumption for the range optimizer. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14759,'tendbcluster','dbconf','MySQL-5.7','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14760,'tendbcluster','dbconf','MySQL-5.7','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14761,'tendbcluster','dbconf','MySQL-5.7','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14762,'tendbcluster','dbconf','MySQL-5.7','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14764,'tendbcluster','dbconf','MySQL-5.7','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-09 17:36:33','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14767,'tendbcluster','dbconf','MySQL-5.7','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14768,'tendbcluster','dbconf','MySQL-5.7','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks GTIDs within the current session and returns them to the client. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14769,'tendbcluster','dbconf','MySQL-5.7','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the default schema (database) name within the current session and makes this information available to the client when changes occur.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14770,'tendbcluster','dbconf','MySQL-5.7','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the state of the current session and notifies the client when state changes occur. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14771,'tendbcluster','dbconf','MySQL-5.7','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the sha256_password built-in authentication plugin supports proxy users.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14772,'tendbcluster','dbconf','MySQL-5.7','mysqld.show_compatibility_56','STRING','ON','ON| OFF ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The INFORMATION_SCHEMA has tables that contain system and status variable information The Performance Schema tables are intended to replace the INFORMATION_SCHEMA tables, which are deprecated as of MySQL 5.7.6 and will be removed in a future MySQL release','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14773,'tendbcluster','dbconf','MySQL-5.7','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether SHOW CREATE TABLE output includes comments to flag temporal columns found to be in pre-5.6.4 format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision).','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14774,'tendbcluster','dbconf','MySQL-5.7','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14775,'tendbcluster','dbconf','MySQL-5.7','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14776,'tendbcluster','dbconf','MySQL-5.7','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14777,'tendbcluster','dbconf','MySQL-5.7','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14778,'tendbcluster','dbconf','MySQL-5.7','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies if the slave will use database partitioning or information from master to parallelize transactions.(Default: DATABASE).','2023-03-09 17:36:33','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14779,'tendbcluster','dbconf','MySQL-5.7','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2023-03-09 17:36:33','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14780,'tendbcluster','dbconf','MySQL-5.7','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14781,'tendbcluster','dbconf','MySQL-5.7','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14782,'tendbcluster','dbconf','MySQL-5.7','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specify the initial slow query log state.','2023-03-09 17:36:33','2023-04-17 14:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14783,'tendbcluster','dbconf','MySQL-5.7','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/slow-query.log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14784,'tendbcluster','dbconf','MySQL-5.7','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14785,'tendbcluster','dbconf','MySQL-5.7','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14786,'tendbcluster','dbconf','MySQL-5.7','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14787,'tendbcluster','dbconf','MySQL-5.7','mysqld.sql_mode','STRING','\'\'','\'\'| NO_TABLE_OPTIONS| NO_ZERO_DATE| ONLY_FULL_GROUP_BY| STRICT_ALL_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_CREATE_USER| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_UNSIGNED_SUBTRACTION| NO_ENGINE_SUBSTITUTION| ANSI_QUOTES| NO_KEY_OPTIONS| PIPES_AS_CONCAT| ALLOW_INVALID_DATES| NO_DIR_IN_CREATE| NO_ZERO_IN_DATE| REAL_AS_FLOAT| STRICT_TRANS_TABLES| NO_FIELD_OPTIONS ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2023-03-09 17:36:33','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14788,'tendbcluster','dbconf','MySQL-5.7','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14789,'tendbcluster','dbconf','MySQL-5.7','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sets a soft upper limit for the number of cached stored routines per connection.','2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14790,'tendbcluster','dbconf','MySQL-5.7','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS).','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14791,'tendbcluster','dbconf','MySQL-5.7','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14792,'tendbcluster','dbconf','MySQL-5.7','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14793,'tendbcluster','dbconf','MySQL-5.7','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14794,'tendbcluster','dbconf','MySQL-5.7','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14795,'tendbcluster','dbconf','MySQL-5.7','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14796,'tendbcluster','dbconf','MySQL-5.7','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of additional threads per group of thread pool.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14797,'tendbcluster','dbconf','MySQL-5.7','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14798,'tendbcluster','dbconf','MySQL-5.7','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The stack size for each thread.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14799,'tendbcluster','dbconf','MySQL-5.7','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14800,'tendbcluster','dbconf','MySQL-5.7','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14801,'tendbcluster','dbconf','MySQL-5.7','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14802,'tendbcluster','dbconf','MySQL-5.7','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount in bytes by which to increase a per-transaction memory pool which needs memory.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14803,'tendbcluster','dbconf','MySQL-5.7','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14804,'tendbcluster','dbconf','MySQL-5.7','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'There is a per-transaction memory pool from which various transaction-related allocations take memory.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14805,'tendbcluster','dbconf','MySQL-5.7','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2023-03-09 17:36:33','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14806,'tendbcluster','dbconf','MySQL-5.7','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls whether updates to a view can be made when the view does not contain all columns of the primary key defined in the underlying table, if the update statement contains a LIMIT clause.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14807,'tendbcluster','dbconf','MySQL-5.7','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14808,'tendbcluster','dbconf','MySQL-5.7','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14814,'tendbcluster','dbconf','MySQL-8.0','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14815,'tendbcluster','dbconf','MySQL-8.0','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14816,'tendbcluster','dbconf','MySQL-8.0','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14817,'tendbcluster','dbconf','MySQL-8.0','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14818,'tendbcluster','dbconf','MySQL-8.0','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14819,'tendbcluster','dbconf','MySQL-8.0','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14820,'tendbcluster','dbconf','MySQL-8.0','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ALTER TABLE implicitly upgrades temporal columns format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision) pre-5.6.4','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14821,'tendbcluster','dbconf','MySQL-8.0','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14822,'tendbcluster','dbconf','MySQL-8.0','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}','','',2,0,1,0,1,'{{mysqld.bind-address}}',NULL,'',0,'','绑定地址','2023-03-09 17:36:33','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14823,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14824,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14853,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_expire_logs_seconds','INT','2592000','[0, 4294967295]','RANGE',1,0,0,0,1,NULL,NULL,'',0,'','日志过期天数','2023-03-09 17:36:33','2023-04-26 20:27:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14825,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,'',0,'','What form of binary logging the master will use.','2023-03-09 17:36:33','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14826,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14827,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14828,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14829,'tendbcluster','dbconf','MySQL-8.0','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14830,'tendbcluster','dbconf','MySQL-8.0','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14831,'tendbcluster','dbconf','MySQL-8.0','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14832,'tendbcluster','dbconf','MySQL-8.0','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14833,'tendbcluster','dbconf','MySQL-8.0','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14834,'tendbcluster','dbconf','MySQL-8.0','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14835,'tendbcluster','dbconf','MySQL-8.0','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14836,'tendbcluster','dbconf','MySQL-8.0','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14837,'tendbcluster','dbconf','MySQL-8.0','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14838,'tendbcluster','dbconf','MySQL-8.0','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14839,'tendbcluster','dbconf','MySQL-8.0','mysqld.default_authentication_plugin','STRING','MYSQL_NATIVE_PASSWORD','MYSQL_NATIVE_PASSWORD| SHA256_PASSWORD| CACHING_SHA2_PASSWORD ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'It determines which authentication plugin the server assigns to new accounts created by CREATE USER and GRANT statements that do not explicitly specify an authentication plugin. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14840,'tendbcluster','dbconf','MySQL-8.0','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable defines the global automatic password expiration policy. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14842,'tendbcluster','dbconf','MySQL-8.0','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14843,'tendbcluster','dbconf','MySQL-8.0','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14844,'tendbcluster','dbconf','MySQL-8.0','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14845,'tendbcluster','dbconf','MySQL-8.0','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14846,'tendbcluster','dbconf','MySQL-8.0','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14847,'tendbcluster','dbconf','MySQL-8.0','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14848,'tendbcluster','dbconf','MySQL-8.0','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14849,'tendbcluster','dbconf','MySQL-8.0','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14850,'tendbcluster','dbconf','MySQL-8.0','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14851,'tendbcluster','dbconf','MySQL-8.0','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14852,'tendbcluster','dbconf','MySQL-8.0','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14854,'tendbcluster','dbconf','MySQL-8.0','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14855,'tendbcluster','dbconf','MySQL-8.0','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14856,'tendbcluster','dbconf','MySQL-8.0','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14857,'tendbcluster','dbconf','MySQL-8.0','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14858,'tendbcluster','dbconf','MySQL-8.0','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14859,'tendbcluster','dbconf','MySQL-8.0','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14860,'tendbcluster','dbconf','MySQL-8.0','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14862,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14863,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14864,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14865,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14866,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Asynchronous DROP TABLE truncate the file size each time in the background, the unit is MB. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14867,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14868,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14869,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14870,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the percentage of the most recently used pages for each buffer pool to read out and dump','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14871,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14872,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14873,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2023-03-09 17:36:33','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14874,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14875,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14876,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14877,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14878,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14879,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the compression failure rate threshold for a table, as a percentage, at which point MySQL begins adding padding within compressed pages to avoid expensive compression failures. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14880,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the level of zlib compression to use for InnoDB compressed tables and indexes. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14881,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum percentage that can be reserved as free space within each compressed page','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14882,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14883,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14884,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14885,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether disable deadlock detection.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14886,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_default_row_format option defines the default row format for InnoDB tables and user-created temporary tables.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14887,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14888,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_fast_ahi_cleanup_for_drop_table','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable fast cleanup optimization of adaptive hash index when large table drop table is enabled.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14890,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14891,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14892,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14893,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14894,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ignore the innodb_io_capacity setting for bursts of I/O activity that occur at checkpoints.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14895,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14896,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14897,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14898,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14899,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14900,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14901,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14902,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14903,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14904,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14905,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14906,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14907,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14908,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14909,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14910,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables checksums for redo log pages.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14911,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether prevent corruption that could occur if a different version of the zlib compression algorithm is used during recovery.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14912,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14913,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14914,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14915,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14916,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14917,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14918,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14919,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14920,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a threshold size for undo tablespaces.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14921,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14922,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14923,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14924,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14925,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14926,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14927,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of page cleaner threads that flush dirty pages from buffer pool instances.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14928,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14929,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14930,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14931,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14932,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14933,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14934,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2023-03-09 17:36:33','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14935,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default. If innodb_rollback_on_timeout is specified, a transaction timeout causes InnoDB to abort and roll back the entire transaction.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14936,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14937,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14938,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14939,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14940,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14941,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14942,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14943,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14944,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14945,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14946,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14947,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14948,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14949,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14950,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_table_drop_mode','STRING','SYNC_DROP','SYNC_DROP| ASYNC_DROP ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14951,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14952,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14953,'tendbcluster','dbconf','MySQL-8.0','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2023-03-09 17:36:33','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14954,'tendbcluster','dbconf','MySQL-8.0','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2023-03-09 17:36:33','2023-04-07 11:55:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14955,'tendbcluster','dbconf','MySQL-8.0','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14956,'tendbcluster','dbconf','MySQL-8.0','mysqld.key_buffer','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14957,'tendbcluster','dbconf','MySQL-8.0','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14958,'tendbcluster','dbconf','MySQL-8.0','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14959,'tendbcluster','dbconf','MySQL-8.0','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14960,'tendbcluster','dbconf','MySQL-8.0','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14961,'tendbcluster','dbconf','MySQL-8.0','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14962,'tendbcluster','dbconf','MySQL-8.0','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14963,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14965,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14966,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14967,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14968,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14969,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14970,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14971,'tendbcluster','dbconf','MySQL-8.0','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the time zone of timestamps in messages written to the error log, and in general query log and slow query log messages written to files.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14973,'tendbcluster','dbconf','MySQL-8.0','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14964,'tendbcluster','dbconf','MySQL-8.0','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15018,'tendbcluster','dbconf','MySQL-8.0','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14974,'tendbcluster','dbconf','MySQL-8.0','mysqld.lower_case_table_names','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14975,'tendbcluster','dbconf','MySQL-8.0','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14976,'tendbcluster','dbconf','MySQL-8.0','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether examine checksums when reading from the binary log.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14977,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14978,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14979,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14980,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14981,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14982,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14983,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14984,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14985,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum value of the points_per_circle argument to the ST_Buffer_Strategy() function.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14986,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14987,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14988,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times that any given stored procedure may be called recursively. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14989,'tendbcluster','dbconf','MySQL-8.0','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14990,'tendbcluster','dbconf','MySQL-8.0','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14991,'tendbcluster','dbconf','MySQL-8.0','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14992,'tendbcluster','dbconf','MySQL-8.0','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether the mysql_native_password built-in authentication plugin supports proxy users.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14993,'tendbcluster','dbconf','MySQL-8.0','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14994,'tendbcluster','dbconf','MySQL-8.0','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14995,'tendbcluster','dbconf','MySQL-8.0','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14996,'tendbcluster','dbconf','MySQL-8.0','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14997,'tendbcluster','dbconf','MySQL-8.0','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'ngram_token_size is set to the size of the largest token that you want to search for.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14998,'tendbcluster','dbconf','MySQL-8.0','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14999,'tendbcluster','dbconf','MySQL-8.0','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15000,'tendbcluster','dbconf','MySQL-8.0','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15001,'tendbcluster','dbconf','MySQL-8.0','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15002,'tendbcluster','dbconf','MySQL-8.0','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15003,'tendbcluster','dbconf','MySQL-8.0','mysqld.optimizer_trace_offset','INT','-1','[-2147483647,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15004,'tendbcluster','dbconf','MySQL-8.0','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15005,'tendbcluster','dbconf','MySQL-8.0','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15006,'tendbcluster','dbconf','MySQL-8.0','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15007,'tendbcluster','dbconf','MySQL-8.0','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15010,'tendbcluster','dbconf','MySQL-8.0','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15012,'tendbcluster','dbconf','MySQL-8.0','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of blocks that are allocated when doing range optimization.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15013,'tendbcluster','dbconf','MySQL-8.0','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The limit on memory consumption for the range optimizer. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15014,'tendbcluster','dbconf','MySQL-8.0','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15015,'tendbcluster','dbconf','MySQL-8.0','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15016,'tendbcluster','dbconf','MySQL-8.0','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15017,'tendbcluster','dbconf','MySQL-8.0','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15019,'tendbcluster','dbconf','MySQL-8.0','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log',NULL,'STRING',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15022,'tendbcluster','dbconf','MySQL-8.0','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15023,'tendbcluster','dbconf','MySQL-8.0','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks GTIDs within the current session and returns them to the client. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15024,'tendbcluster','dbconf','MySQL-8.0','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the default schema (database) name within the current session and makes this information available to the client when changes occur.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15025,'tendbcluster','dbconf','MySQL-8.0','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the state of the current session and notifies the client when state changes occur. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15026,'tendbcluster','dbconf','MySQL-8.0','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the sha256_password built-in authentication plugin supports proxy users.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15028,'tendbcluster','dbconf','MySQL-8.0','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether SHOW CREATE TABLE output includes comments to flag temporal columns found to be in pre-5.6.4 format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision).','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15029,'tendbcluster','dbconf','MySQL-8.0','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15030,'tendbcluster','dbconf','MySQL-8.0','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15031,'tendbcluster','dbconf','MySQL-8.0','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15032,'tendbcluster','dbconf','MySQL-8.0','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15033,'tendbcluster','dbconf','MySQL-8.0','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies if the slave will use database partitioning or information from master to parallelize transactions.(Default: DATABASE).','2023-03-09 17:36:33','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15034,'tendbcluster','dbconf','MySQL-8.0','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2023-03-09 17:36:33','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15035,'tendbcluster','dbconf','MySQL-8.0','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN,HASH_SCAN','TABLE_SCAN,INDEX_SCAN| INDEX_SCAN,HASH_SCAN| TABLE_SCAN,HASH_SCAN| TABLE_SCAN,INDEX_SCAN,HASH_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15036,'tendbcluster','dbconf','MySQL-8.0','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15037,'tendbcluster','dbconf','MySQL-8.0','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specify the initial slow query log state.','2023-03-09 17:36:33','2023-04-17 14:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15038,'tendbcluster','dbconf','MySQL-8.0','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.slow_query_log_file}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15039,'tendbcluster','dbconf','MySQL-8.0','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.socket}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15040,'tendbcluster','dbconf','MySQL-8.0','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15041,'tendbcluster','dbconf','MySQL-8.0','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15042,'tendbcluster','dbconf','MySQL-8.0','mysqld.sql_mode','STRING','\'\'','\'\'| NO_ENGINE_SUBSTITUTION| ALLOW_INVALID_DATES| ANSI_QUOTES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_DIR_IN_CREATE| NO_UNSIGNED_SUBTRACTION| NO_ZERO_DATE| NO_ZERO_IN_DATE| ONLY_FULL_GROUP_BY| PIPES_AS_CONCAT| REAL_AS_FLOAT| STRICT_ALL_TABLES| STRICT_TRANS_TABLES ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2023-03-09 17:36:33','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15043,'tendbcluster','dbconf','MySQL-8.0','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15044,'tendbcluster','dbconf','MySQL-8.0','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sets a soft upper limit for the number of cached stored routines per connection.','2023-03-09 17:36:33','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15045,'tendbcluster','dbconf','MySQL-8.0','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS)','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15046,'tendbcluster','dbconf','MySQL-8.0','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15047,'tendbcluster','dbconf','MySQL-8.0','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2023-03-09 17:36:33','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15048,'tendbcluster','dbconf','MySQL-8.0','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15049,'tendbcluster','dbconf','MySQL-8.0','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15050,'tendbcluster','dbconf','MySQL-8.0','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15051,'tendbcluster','dbconf','MySQL-8.0','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines how many worker threads in a group can remain active at the same time once a thread group is oversubscribed.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15052,'tendbcluster','dbconf','MySQL-8.0','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15053,'tendbcluster','dbconf','MySQL-8.0','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The stack size for each thread.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15054,'tendbcluster','dbconf','MySQL-8.0','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15055,'tendbcluster','dbconf','MySQL-8.0','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15056,'tendbcluster','dbconf','MySQL-8.0','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15057,'tendbcluster','dbconf','MySQL-8.0','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount in bytes by which to increase a per-transaction memory pool which needs memory.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15058,'tendbcluster','dbconf','MySQL-8.0','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2023-03-09 17:36:33','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15059,'tendbcluster','dbconf','MySQL-8.0','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'There is a per-transaction memory pool from which various transaction-related allocations take memory.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15060,'tendbcluster','dbconf','MySQL-8.0','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15061,'tendbcluster','dbconf','MySQL-8.0','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls whether updates to a view can be made when the view does not contain all columns of the primary key defined in the underlying table, if the update statement contains a LIMIT clause.','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15062,'tendbcluster','dbconf','MySQL-8.0','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15063,'tendbcluster','dbconf','MySQL-8.0','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15421,'tendbcluster','dbconf','Spider-1','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15422,'tendbcluster','dbconf','Spider-1','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15423,'tendbcluster','dbconf','Spider-1','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15424,'tendbcluster','dbconf','Spider-1','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15425,'tendbcluster','dbconf','Spider-1','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15426,'tendbcluster','dbconf','Spider-1','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15427,'tendbcluster','dbconf','Spider-1','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What form of binary logging the master will use.','2023-03-09 17:55:11','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15428,'tendbcluster','dbconf','Spider-1','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2023-03-09 17:55:11','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15429,'tendbcluster','dbconf','Spider-1','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15430,'tendbcluster','dbconf','Spider-1','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15431,'tendbcluster','dbconf','Spider-1','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15432,'tendbcluster','dbconf','Spider-1','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15433,'tendbcluster','dbconf','Spider-1','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15434,'tendbcluster','dbconf','Spider-1','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15435,'tendbcluster','dbconf','Spider-1','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15436,'tendbcluster','dbconf','Spider-1','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15437,'tendbcluster','dbconf','Spider-1','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15438,'tendbcluster','dbconf','Spider-1','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15439,'tendbcluster','dbconf','Spider-1','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15440,'tendbcluster','dbconf','Spider-1','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15441,'tendbcluster','dbconf','Spider-1','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15442,'tendbcluster','dbconf','Spider-1','mysqld.expire_logs_days','INT','61','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15443,'tendbcluster','dbconf','Spider-1','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15444,'tendbcluster','dbconf','Spider-1','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15445,'tendbcluster','dbconf','Spider-1','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15446,'tendbcluster','dbconf','Spider-1','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:55:11','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15447,'tendbcluster','dbconf','Spider-1','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15448,'tendbcluster','dbconf','Spider-1','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15449,'tendbcluster','dbconf','Spider-1','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}',NULL,'',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15450,'tendbcluster','dbconf','Spider-1','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15451,'tendbcluster','dbconf','Spider-1','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15452,'tendbcluster','dbconf','Spider-1','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15454,'tendbcluster','dbconf','Spider-1','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15455,'tendbcluster','dbconf','Spider-1','mysqld.innodb_flush_log_at_trx_commit','INT','0','0|1|2','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15456,'tendbcluster','dbconf','Spider-1','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15457,'tendbcluster','dbconf','Spider-1','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15458,'tendbcluster','dbconf','Spider-1','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15459,'tendbcluster','dbconf','Spider-1','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15460,'tendbcluster','dbconf','Spider-1','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15461,'tendbcluster','dbconf','Spider-1','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15462,'tendbcluster','dbconf','Spider-1','mysqld.innodb_max_dirty_pages_pct','INT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15463,'tendbcluster','dbconf','Spider-1','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15464,'tendbcluster','dbconf','Spider-1','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15465,'tendbcluster','dbconf','Spider-1','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15466,'tendbcluster','dbconf','Spider-1','mysqld.innodb_read_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15467,'tendbcluster','dbconf','Spider-1','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15468,'tendbcluster','dbconf','Spider-1','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15469,'tendbcluster','dbconf','Spider-1','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15470,'tendbcluster','dbconf','Spider-1','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15471,'tendbcluster','dbconf','Spider-1','mysqld.innodb_thread_concurrency','INT','16','[1,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15472,'tendbcluster','dbconf','Spider-1','mysqld.innodb_write_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15473,'tendbcluster','dbconf','Spider-1','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2023-03-09 17:55:11','2023-04-07 11:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15474,'tendbcluster','dbconf','Spider-1','mysqld.key_buffer','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15475,'tendbcluster','dbconf','Spider-1','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15476,'tendbcluster','dbconf','Spider-1','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15477,'tendbcluster','dbconf','Spider-1','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15478,'tendbcluster','dbconf','Spider-1','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15479,'tendbcluster','dbconf','Spider-1','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15481,'tendbcluster','dbconf','Spider-1','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15482,'tendbcluster','dbconf','Spider-1','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15483,'tendbcluster','dbconf','Spider-1','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15484,'tendbcluster','dbconf','Spider-1','mysqld.log_warnings','STRING','0',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15485,'tendbcluster','dbconf','Spider-1','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15486,'tendbcluster','dbconf','Spider-1','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15487,'tendbcluster','dbconf','Spider-1','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15488,'tendbcluster','dbconf','Spider-1','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15489,'tendbcluster','dbconf','Spider-1','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15490,'tendbcluster','dbconf','Spider-1','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15491,'tendbcluster','dbconf','Spider-1','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15492,'tendbcluster','dbconf','Spider-1','mysqld.max_connect_errors','STRING','99999999',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15493,'tendbcluster','dbconf','Spider-1','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15494,'tendbcluster','dbconf','Spider-1','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15495,'tendbcluster','dbconf','Spider-1','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15496,'tendbcluster','dbconf','Spider-1','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15497,'tendbcluster','dbconf','Spider-1','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15498,'tendbcluster','dbconf','Spider-1','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15499,'tendbcluster','dbconf','Spider-1','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2023-03-09 17:55:11','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15500,'tendbcluster','dbconf','Spider-1','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15501,'tendbcluster','dbconf','Spider-1','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15502,'tendbcluster','dbconf','Spider-1','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15503,'tendbcluster','dbconf','Spider-1','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15508,'tendbcluster','dbconf','Spider-1','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15510,'tendbcluster','dbconf','Spider-1','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15511,'tendbcluster','dbconf','Spider-1','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15512,'tendbcluster','dbconf','Spider-1','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15514,'tendbcluster','dbconf','Spider-1','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15515,'tendbcluster','dbconf','Spider-1','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15516,'tendbcluster','dbconf','Spider-1','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-17 15:00:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15517,'tendbcluster','dbconf','Spider-1','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.slow_query_log_file}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15518,'tendbcluster','dbconf','Spider-1','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.socket}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15519,'tendbcluster','dbconf','Spider-1','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15816,'tendbcluster','dbconf','Spider-1','mysqld.spider_auto_increment_mode_switch','INT','1','1 | 0','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the auto increment is enabled. The value can be 0 (or OFF) to disable or 1 (or ON) to enable. If on, TSpider will generate a global non-continuous unique identity for new rows. Identity only ensure incremental on the same TSpider node.','2023-03-09 17:55:05','2023-03-09 18:28:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15817,'tendbcluster','dbconf','Spider-1','mysqld.spider_auto_increment_mode_value','INT','{{.Mysqld.SpiderAutoIncrementModeValue}}','[0, 36]','RANGE',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'TSpider node generate global non-continuous unique identity\'s start number. All TSpider\'s value must be different. Valid value can be computed by TSpider\'s increment value modulo spider_auto_increment_step.','2023-03-09 17:55:05','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15818,'tendbcluster','dbconf','Spider-1','mysqld.spider_auto_increment_step','INT','37','37','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The step of the global non-continuous unique identity generated by TSpider node. All TSpider nodes in cluster must be the same','2023-03-09 17:55:05','2023-03-09 18:28:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15813,'tendbcluster','dbconf','Spider-1','mysqld.spider_max_connections','INT','500','[10, 1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the values, as the max conncetion from TSpider to TenDB. Default 0, mean unlimit the connections. General recommend set to 200','2023-03-09 17:55:05','2023-03-09 18:29:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15814,'tendbcluster','dbconf','Spider-1','mysqld.spider_net_read_timeout','INT','60','[10, 1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Wait timeout in microseconds of receiving data from TenDB node','2023-03-09 17:55:05','2023-03-09 18:29:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15815,'tendbcluster','dbconf','Spider-1','mysqld.spider_net_write_timeout','INT','60','[10, 1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Wait timeout in microseconds of sending data to TenDB node','2023-03-09 17:55:05','2023-03-09 18:30:02',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15520,'tendbcluster','dbconf','Spider-1','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15521,'tendbcluster','dbconf','Spider-1','mysqld.sql_mode','STRING','\'\'','\'\'|STRICT|ONLY_FULL_GROUP_BY|','ENUMS',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15522,'tendbcluster','dbconf','Spider-1','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15523,'tendbcluster','dbconf','Spider-1','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15524,'tendbcluster','dbconf','Spider-1','mysqld.sync_binlog','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15525,'tendbcluster','dbconf','Spider-1','mysqld.table_definition_cache','INT','768','[400,4096]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15526,'tendbcluster','dbconf','Spider-1','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2023-03-09 17:55:11','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15527,'tendbcluster','dbconf','Spider-1','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15528,'tendbcluster','dbconf','Spider-1','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2023-03-09 17:55:11','2023-03-09 17:55:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15529,'tendbcluster','dbconf','Spider-1','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15530,'tendbcluster','dbconf','Spider-1','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2023-03-09 17:55:11','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15531,'tendbcluster','dbconf','Spider-1','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2023-03-09 17:55:11','2023-04-14 15:42:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15532,'tendbcluster','dbconf','Spider-1','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:11','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15166,'tendbcluster','dbconf','Spider-3','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15167,'tendbcluster','dbconf','Spider-3','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15168,'tendbcluster','dbconf','Spider-3','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15169,'tendbcluster','dbconf','Spider-3','mysql.port','INT','{{.Mysqld.Port}}',NULL,'',2,0,0,0,0,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15170,'tendbcluster','dbconf','Spider-3','mysql.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,0,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15171,'tendbcluster','dbconf','Spider-3','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15172,'tendbcluster','dbconf','Spider-3','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15173,'tendbcluster','dbconf','Spider-3','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15174,'tendbcluster','dbconf','Spider-3','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ALTER TABLE implicitly upgrades temporal columns format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision) pre-5.6.4','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15175,'tendbcluster','dbconf','Spider-3','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15176,'tendbcluster','dbconf','Spider-3','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15177,'tendbcluster','dbconf','Spider-3','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15178,'tendbcluster','dbconf','Spider-3','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15179,'tendbcluster','dbconf','Spider-3','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What form of binary logging the master will use.','2023-03-09 17:55:05','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15180,'tendbcluster','dbconf','Spider-3','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15181,'tendbcluster','dbconf','Spider-3','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15182,'tendbcluster','dbconf','Spider-3','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15183,'tendbcluster','dbconf','Spider-3','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15184,'tendbcluster','dbconf','Spider-3','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15185,'tendbcluster','dbconf','Spider-3','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15186,'tendbcluster','dbconf','Spider-3','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15187,'tendbcluster','dbconf','Spider-3','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2023-03-09 17:55:05','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15188,'tendbcluster','dbconf','Spider-3','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15189,'tendbcluster','dbconf','Spider-3','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15190,'tendbcluster','dbconf','Spider-3','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15191,'tendbcluster','dbconf','Spider-3','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15192,'tendbcluster','dbconf','Spider-3','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15193,'tendbcluster','dbconf','Spider-3','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable defines the global automatic password expiration policy. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15195,'tendbcluster','dbconf','Spider-3','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15196,'tendbcluster','dbconf','Spider-3','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15197,'tendbcluster','dbconf','Spider-3','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15198,'tendbcluster','dbconf','Spider-3','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15199,'tendbcluster','dbconf','Spider-3','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15200,'tendbcluster','dbconf','Spider-3','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15201,'tendbcluster','dbconf','Spider-3','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15202,'tendbcluster','dbconf','Spider-3','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15203,'tendbcluster','dbconf','Spider-3','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15204,'tendbcluster','dbconf','Spider-3','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15205,'tendbcluster','dbconf','Spider-3','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15206,'tendbcluster','dbconf','Spider-3','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15207,'tendbcluster','dbconf','Spider-3','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15208,'tendbcluster','dbconf','Spider-3','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15209,'tendbcluster','dbconf','Spider-3','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15210,'tendbcluster','dbconf','Spider-3','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15211,'tendbcluster','dbconf','Spider-3','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15212,'tendbcluster','dbconf','Spider-3','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15213,'tendbcluster','dbconf','Spider-3','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15214,'tendbcluster','dbconf','Spider-3','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:55:05','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15215,'tendbcluster','dbconf','Spider-3','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15216,'tendbcluster','dbconf','Spider-3','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15217,'tendbcluster','dbconf','Spider-3','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15218,'tendbcluster','dbconf','Spider-3','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15219,'tendbcluster','dbconf','Spider-3','mysqld.innodb_alter_table_default_algorithm','STRING','INPLACE','INPLACE| INSTANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The instant algorithm is used to avoid data copying, thereby realizing the function of quickly adding columns to large tables.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15220,'tendbcluster','dbconf','Spider-3','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Asynchronous DROP TABLE truncate the file size each time in the background, the unit is MB. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15221,'tendbcluster','dbconf','Spider-3','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15222,'tendbcluster','dbconf','Spider-3','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15223,'tendbcluster','dbconf','Spider-3','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15224,'tendbcluster','dbconf','Spider-3','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15225,'tendbcluster','dbconf','Spider-3','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the percentage of the most recently used pages for each buffer pool to read out and dump','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15226,'tendbcluster','dbconf','Spider-3','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2023-03-09 17:55:05','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15227,'tendbcluster','dbconf','Spider-3','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15228,'tendbcluster','dbconf','Spider-3','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2023-03-09 17:55:05','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15229,'tendbcluster','dbconf','Spider-3','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15230,'tendbcluster','dbconf','Spider-3','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15231,'tendbcluster','dbconf','Spider-3','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15232,'tendbcluster','dbconf','Spider-3','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15233,'tendbcluster','dbconf','Spider-3','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15234,'tendbcluster','dbconf','Spider-3','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the compression failure rate threshold for a table, as a percentage, at which point MySQL begins adding padding within compressed pages to avoid expensive compression failures. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15235,'tendbcluster','dbconf','Spider-3','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the level of zlib compression to use for InnoDB compressed tables and indexes. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15236,'tendbcluster','dbconf','Spider-3','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum percentage that can be reserved as free space within each compressed page','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15237,'tendbcluster','dbconf','Spider-3','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15238,'tendbcluster','dbconf','Spider-3','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15239,'tendbcluster','dbconf','Spider-3','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15240,'tendbcluster','dbconf','Spider-3','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether disable deadlock detection.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15241,'tendbcluster','dbconf','Spider-3','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_default_row_format option defines the default row format for InnoDB tables and user-created temporary tables.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15242,'tendbcluster','dbconf','Spider-3','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15244,'tendbcluster','dbconf','Spider-3','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15245,'tendbcluster','dbconf','Spider-3','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15246,'tendbcluster','dbconf','Spider-3','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15247,'tendbcluster','dbconf','Spider-3','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15248,'tendbcluster','dbconf','Spider-3','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ignore the innodb_io_capacity setting for bursts of I/O activity that occur at checkpoints.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15249,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15250,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15251,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15252,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15253,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15254,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15255,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15256,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15257,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15258,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15259,'tendbcluster','dbconf','Spider-3','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15260,'tendbcluster','dbconf','Spider-3','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15261,'tendbcluster','dbconf','Spider-3','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15262,'tendbcluster','dbconf','Spider-3','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable this option to allow index key prefixes longer than 767 bytes (up to 3072 bytes) for InnoDB tables that use the DYNAMIC and COMPRESSED row formats.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15263,'tendbcluster','dbconf','Spider-3','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15264,'tendbcluster','dbconf','Spider-3','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15265,'tendbcluster','dbconf','Spider-3','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables checksums for redo log pages.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15266,'tendbcluster','dbconf','Spider-3','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether prevent corruption that could occur if a different version of the zlib compression algorithm is used during recovery.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15267,'tendbcluster','dbconf','Spider-3','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15268,'tendbcluster','dbconf','Spider-3','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15269,'tendbcluster','dbconf','Spider-3','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15270,'tendbcluster','dbconf','Spider-3','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15271,'tendbcluster','dbconf','Spider-3','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15272,'tendbcluster','dbconf','Spider-3','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15273,'tendbcluster','dbconf','Spider-3','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15274,'tendbcluster','dbconf','Spider-3','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15275,'tendbcluster','dbconf','Spider-3','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a threshold size for undo tablespaces.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15276,'tendbcluster','dbconf','Spider-3','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15277,'tendbcluster','dbconf','Spider-3','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15278,'tendbcluster','dbconf','Spider-3','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15279,'tendbcluster','dbconf','Spider-3','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15280,'tendbcluster','dbconf','Spider-3','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15281,'tendbcluster','dbconf','Spider-3','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15282,'tendbcluster','dbconf','Spider-3','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of page cleaner threads that flush dirty pages from buffer pool instances.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15283,'tendbcluster','dbconf','Spider-3','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15284,'tendbcluster','dbconf','Spider-3','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15285,'tendbcluster','dbconf','Spider-3','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15286,'tendbcluster','dbconf','Spider-3','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15287,'tendbcluster','dbconf','Spider-3','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15288,'tendbcluster','dbconf','Spider-3','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15289,'tendbcluster','dbconf','Spider-3','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2023-03-09 17:55:05','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15290,'tendbcluster','dbconf','Spider-3','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default. If innodb_rollback_on_timeout is specified, a transaction timeout causes InnoDB to abort and roll back the entire transaction.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15291,'tendbcluster','dbconf','Spider-3','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15292,'tendbcluster','dbconf','Spider-3','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15293,'tendbcluster','dbconf','Spider-3','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15294,'tendbcluster','dbconf','Spider-3','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15295,'tendbcluster','dbconf','Spider-3','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15296,'tendbcluster','dbconf','Spider-3','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15297,'tendbcluster','dbconf','Spider-3','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15298,'tendbcluster','dbconf','Spider-3','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15299,'tendbcluster','dbconf','Spider-3','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15300,'tendbcluster','dbconf','Spider-3','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15301,'tendbcluster','dbconf','Spider-3','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15302,'tendbcluster','dbconf','Spider-3','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15303,'tendbcluster','dbconf','Spider-3','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15304,'tendbcluster','dbconf','Spider-3','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15305,'tendbcluster','dbconf','Spider-3','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15306,'tendbcluster','dbconf','Spider-3','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15307,'tendbcluster','dbconf','Spider-3','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2023-03-09 17:55:05','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15308,'tendbcluster','dbconf','Spider-3','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2023-03-09 17:55:05','2023-04-07 11:55:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15309,'tendbcluster','dbconf','Spider-3','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15310,'tendbcluster','dbconf','Spider-3','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15311,'tendbcluster','dbconf','Spider-3','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15312,'tendbcluster','dbconf','Spider-3','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15313,'tendbcluster','dbconf','Spider-3','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15314,'tendbcluster','dbconf','Spider-3','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15315,'tendbcluster','dbconf','Spider-3','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15316,'tendbcluster','dbconf','Spider-3','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15318,'tendbcluster','dbconf','Spider-3','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15319,'tendbcluster','dbconf','Spider-3','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15320,'tendbcluster','dbconf','Spider-3','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15321,'tendbcluster','dbconf','Spider-3','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15322,'tendbcluster','dbconf','Spider-3','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15323,'tendbcluster','dbconf','Spider-3','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15324,'tendbcluster','dbconf','Spider-3','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the time zone of timestamps in messages written to the error log, and in general query log and slow query log messages written to files.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15325,'tendbcluster','dbconf','Spider-3','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15326,'tendbcluster','dbconf','Spider-3','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15327,'tendbcluster','dbconf','Spider-3','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15328,'tendbcluster','dbconf','Spider-3','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15329,'tendbcluster','dbconf','Spider-3','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether examine checksums when reading from the binary log.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15330,'tendbcluster','dbconf','Spider-3','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15331,'tendbcluster','dbconf','Spider-3','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15332,'tendbcluster','dbconf','Spider-3','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15333,'tendbcluster','dbconf','Spider-3','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15334,'tendbcluster','dbconf','Spider-3','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15335,'tendbcluster','dbconf','Spider-3','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15336,'tendbcluster','dbconf','Spider-3','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15337,'tendbcluster','dbconf','Spider-3','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15338,'tendbcluster','dbconf','Spider-3','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum value of the points_per_circle argument to the ST_Buffer_Strategy() function.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15339,'tendbcluster','dbconf','Spider-3','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15340,'tendbcluster','dbconf','Spider-3','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15341,'tendbcluster','dbconf','Spider-3','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times that any given stored procedure may be called recursively. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15342,'tendbcluster','dbconf','Spider-3','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15343,'tendbcluster','dbconf','Spider-3','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The size of the metadata locks cache. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15344,'tendbcluster','dbconf','Spider-3','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15345,'tendbcluster','dbconf','Spider-3','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15346,'tendbcluster','dbconf','Spider-3','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether the mysql_native_password built-in authentication plugin supports proxy users.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15347,'tendbcluster','dbconf','Spider-3','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15348,'tendbcluster','dbconf','Spider-3','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15349,'tendbcluster','dbconf','Spider-3','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15350,'tendbcluster','dbconf','Spider-3','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15351,'tendbcluster','dbconf','Spider-3','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'ngram_token_size is set to the size of the largest token that you want to search for.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15352,'tendbcluster','dbconf','Spider-3','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15353,'tendbcluster','dbconf','Spider-3','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15354,'tendbcluster','dbconf','Spider-3','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15355,'tendbcluster','dbconf','Spider-3','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15356,'tendbcluster','dbconf','Spider-3','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15357,'tendbcluster','dbconf','Spider-3','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15358,'tendbcluster','dbconf','Spider-3','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15359,'tendbcluster','dbconf','Spider-3','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15360,'tendbcluster','dbconf','Spider-3','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15361,'tendbcluster','dbconf','Spider-3','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15362,'tendbcluster','dbconf','Spider-3','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15363,'tendbcluster','dbconf','Spider-3','mysqld.query_cache_min_res_unit','INT','4096','[512,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum size (in bytes) for blocks allocated by the query cache. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15364,'tendbcluster','dbconf','Spider-3','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15365,'tendbcluster','dbconf','Spider-3','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2023-03-09 17:55:05','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15366,'tendbcluster','dbconf','Spider-3','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15367,'tendbcluster','dbconf','Spider-3','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15368,'tendbcluster','dbconf','Spider-3','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15369,'tendbcluster','dbconf','Spider-3','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of blocks that are allocated when doing range optimization.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15370,'tendbcluster','dbconf','Spider-3','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The limit on memory consumption for the range optimizer. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15371,'tendbcluster','dbconf','Spider-3','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15372,'tendbcluster','dbconf','Spider-3','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15373,'tendbcluster','dbconf','Spider-3','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15374,'tendbcluster','dbconf','Spider-3','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15379,'tendbcluster','dbconf','Spider-3','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15380,'tendbcluster','dbconf','Spider-3','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks GTIDs within the current session and returns them to the client. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15381,'tendbcluster','dbconf','Spider-3','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the default schema (database) name within the current session and makes this information available to the client when changes occur.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15382,'tendbcluster','dbconf','Spider-3','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the state of the current session and notifies the client when state changes occur. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15383,'tendbcluster','dbconf','Spider-3','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the sha256_password built-in authentication plugin supports proxy users.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15385,'tendbcluster','dbconf','Spider-3','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether SHOW CREATE TABLE output includes comments to flag temporal columns found to be in pre-5.6.4 format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision).','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15386,'tendbcluster','dbconf','Spider-3','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15387,'tendbcluster','dbconf','Spider-3','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15388,'tendbcluster','dbconf','Spider-3','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15389,'tendbcluster','dbconf','Spider-3','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15391,'tendbcluster','dbconf','Spider-3','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2023-03-09 17:55:05','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15392,'tendbcluster','dbconf','Spider-3','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15393,'tendbcluster','dbconf','Spider-3','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15394,'tendbcluster','dbconf','Spider-3','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specify the initial slow query log state.','2023-03-09 17:55:05','2023-04-17 14:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15395,'tendbcluster','dbconf','Spider-3','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/slow-query.log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15396,'tendbcluster','dbconf','Spider-3','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15397,'tendbcluster','dbconf','Spider-3','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15809,'tendbcluster','dbconf','Spider-3','mysqld.spider_auto_increment_mode_switch','INT','1','1 | 0','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the auto increment is enabled. The value can be 0 (or OFF) to disable or 1 (or ON) to enable. If on, TSpider will generate a global non-continuous unique identity for new rows. Identity only ensure incremental on the same TSpider node.','2023-03-09 17:55:05','2023-03-09 18:28:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15810,'tendbcluster','dbconf','Spider-3','mysqld.spider_auto_increment_mode_value','INT','{{.Mysqld.SpiderAutoIncrementModeValue}}','[0, 36]','RANGE',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'TSpider node generate global non-continuous unique identity\'s start number. All TSpider\'s value must be different. Valid value can be computed by TSpider\'s increment value modulo spider_auto_increment_step.','2023-03-09 17:55:05','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15811,'tendbcluster','dbconf','Spider-3','mysqld.spider_auto_increment_step','INT','37','37','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The step of the global non-continuous unique identity generated by TSpider node. All TSpider nodes in cluster must be the same','2023-03-09 17:55:05','2023-03-09 18:28:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15803,'tendbcluster','dbconf','Spider-3','mysqld.spider_bgs_mode','INT','1','1 | 0','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the parallel distribute is enabled. The value can be 0 (or OFF) to disable or 1 (or ON) to enable.','2023-03-09 17:55:05','2023-03-09 18:40:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15804,'tendbcluster','dbconf','Spider-3','mysqld.spider_index_hint_pushdown','STRING','on','on | off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'switch to control if push down index hint, like force_index','2023-03-09 17:55:05','2023-03-09 18:40:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15806,'tendbcluster','dbconf','Spider-3','mysqld.spider_max_connections','INT','500','[10, 1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the values, as the max conncetion from TSpider to TenDB. Default 0, mean unlimit the connections. General recommend set to 200','2023-03-09 17:55:05','2023-03-09 18:29:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15807,'tendbcluster','dbconf','Spider-3','mysqld.spider_net_read_timeout','INT','60','[10, 1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Wait timeout in microseconds of receiving data from TenDB node','2023-03-09 17:55:05','2023-03-09 18:29:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15808,'tendbcluster','dbconf','Spider-3','mysqld.spider_net_write_timeout','INT','60','[10, 1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Wait timeout in microseconds of sending data to TenDB node','2023-03-09 17:55:05','2023-03-09 18:30:02',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15398,'tendbcluster','dbconf','Spider-3','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15399,'tendbcluster','dbconf','Spider-3','mysqld.sql_mode','STRING','\'\'','\'\'| NO_TABLE_OPTIONS| NO_ZERO_DATE| ONLY_FULL_GROUP_BY| STRICT_ALL_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_CREATE_USER| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_UNSIGNED_SUBTRACTION| NO_ENGINE_SUBSTITUTION| ANSI_QUOTES| NO_KEY_OPTIONS| PIPES_AS_CONCAT| ALLOW_INVALID_DATES| NO_DIR_IN_CREATE| NO_ZERO_IN_DATE| REAL_AS_FLOAT| STRICT_TRANS_TABLES| NO_FIELD_OPTIONS ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2023-03-09 17:55:05','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15400,'tendbcluster','dbconf','Spider-3','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15401,'tendbcluster','dbconf','Spider-3','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sets a soft upper limit for the number of cached stored routines per connection.','2023-03-09 17:55:05','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15402,'tendbcluster','dbconf','Spider-3','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS).','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15403,'tendbcluster','dbconf','Spider-3','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15404,'tendbcluster','dbconf','Spider-3','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2023-03-09 17:55:05','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15405,'tendbcluster','dbconf','Spider-3','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15406,'tendbcluster','dbconf','Spider-3','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15407,'tendbcluster','dbconf','Spider-3','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15408,'tendbcluster','dbconf','Spider-3','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of additional threads per group of thread pool.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15409,'tendbcluster','dbconf','Spider-3','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15410,'tendbcluster','dbconf','Spider-3','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The stack size for each thread.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15411,'tendbcluster','dbconf','Spider-3','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15412,'tendbcluster','dbconf','Spider-3','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15413,'tendbcluster','dbconf','Spider-3','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15414,'tendbcluster','dbconf','Spider-3','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount in bytes by which to increase a per-transaction memory pool which needs memory.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15415,'tendbcluster','dbconf','Spider-3','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15416,'tendbcluster','dbconf','Spider-3','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'There is a per-transaction memory pool from which various transaction-related allocations take memory.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15417,'tendbcluster','dbconf','Spider-3','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2023-03-09 17:55:05','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15418,'tendbcluster','dbconf','Spider-3','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls whether updates to a view can be made when the view does not contain all columns of the primary key defined in the underlying table, if the update statement contains a LIMIT clause.','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15419,'tendbcluster','dbconf','Spider-3','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2023-03-09 17:55:05','2023-03-09 17:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15420,'tendbcluster','dbconf','Spider-3','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:55:05','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15548,'tendbcluster','dbconf','Tdbctl','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15549,'tendbcluster','dbconf','Tdbctl','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15550,'tendbcluster','dbconf','Tdbctl','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15551,'tendbcluster','dbconf','Tdbctl','mysql.port','INT','{{.Mysqld.Port}}',NULL,'',2,0,0,0,0,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15552,'tendbcluster','dbconf','Tdbctl','mysql.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,0,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15553,'tendbcluster','dbconf','Tdbctl','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15554,'tendbcluster','dbconf','Tdbctl','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15555,'tendbcluster','dbconf','Tdbctl','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15556,'tendbcluster','dbconf','Tdbctl','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ALTER TABLE implicitly upgrades temporal columns format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision) pre-5.6.4','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15557,'tendbcluster','dbconf','Tdbctl','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15558,'tendbcluster','dbconf','Tdbctl','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15559,'tendbcluster','dbconf','Tdbctl','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15560,'tendbcluster','dbconf','Tdbctl','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15561,'tendbcluster','dbconf','Tdbctl','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What form of binary logging the master will use.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15562,'tendbcluster','dbconf','Tdbctl','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15563,'tendbcluster','dbconf','Tdbctl','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15564,'tendbcluster','dbconf','Tdbctl','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15565,'tendbcluster','dbconf','Tdbctl','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15566,'tendbcluster','dbconf','Tdbctl','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15567,'tendbcluster','dbconf','Tdbctl','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15568,'tendbcluster','dbconf','Tdbctl','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15569,'tendbcluster','dbconf','Tdbctl','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15570,'tendbcluster','dbconf','Tdbctl','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15571,'tendbcluster','dbconf','Tdbctl','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15572,'tendbcluster','dbconf','Tdbctl','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15573,'tendbcluster','dbconf','Tdbctl','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15574,'tendbcluster','dbconf','Tdbctl','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15575,'tendbcluster','dbconf','Tdbctl','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable defines the global automatic password expiration policy. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15577,'tendbcluster','dbconf','Tdbctl','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15578,'tendbcluster','dbconf','Tdbctl','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15579,'tendbcluster','dbconf','Tdbctl','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15580,'tendbcluster','dbconf','Tdbctl','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15581,'tendbcluster','dbconf','Tdbctl','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15582,'tendbcluster','dbconf','Tdbctl','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15583,'tendbcluster','dbconf','Tdbctl','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15584,'tendbcluster','dbconf','Tdbctl','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15585,'tendbcluster','dbconf','Tdbctl','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15820,'tendbcluster','dbconf','Tdbctl','mysqld.enforce-gtid-consistency','STRING','ON','OFF | ON','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Depending on the value of this variable, the server enforces GTID consistency by allowing execution of only statements that can be safely logged using a GTID','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15586,'tendbcluster','dbconf','Tdbctl','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15587,'tendbcluster','dbconf','Tdbctl','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15588,'tendbcluster','dbconf','Tdbctl','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15589,'tendbcluster','dbconf','Tdbctl','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15590,'tendbcluster','dbconf','Tdbctl','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15591,'tendbcluster','dbconf','Tdbctl','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15592,'tendbcluster','dbconf','Tdbctl','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15593,'tendbcluster','dbconf','Tdbctl','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15594,'tendbcluster','dbconf','Tdbctl','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15819,'tendbcluster','dbconf','Tdbctl','mysqld.gtid_mode','STRING','ON','OFF | ON','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether GTID based logging is enabled and what type of transactions the logs can contain','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15595,'tendbcluster','dbconf','Tdbctl','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15597,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15598,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15599,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15600,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15601,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_alter_table_default_algorithm','STRING','INPLACE','INPLACE| INSTANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The instant algorithm is used to avoid data copying, thereby realizing the function of quickly adding columns to large tables.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15602,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Asynchronous DROP TABLE truncate the file size each time in the background, the unit is MB. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15603,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15604,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15605,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15606,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15607,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the percentage of the most recently used pages for each buffer pool to read out and dump','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15608,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15609,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15610,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15611,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15612,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15613,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15614,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15615,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15619,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15620,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15621,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15622,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether disable deadlock detection.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15623,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_default_row_format option defines the default row format for InnoDB tables and user-created temporary tables.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15624,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15626,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15627,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15628,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15629,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15630,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ignore the innodb_io_capacity setting for bursts of I/O activity that occur at checkpoints.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15631,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15632,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15633,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15634,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15635,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15636,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15637,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15638,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15639,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15640,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15641,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15642,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15643,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15644,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable this option to allow index key prefixes longer than 767 bytes (up to 3072 bytes) for InnoDB tables that use the DYNAMIC and COMPRESSED row formats.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15645,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15646,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15647,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables checksums for redo log pages.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15649,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15650,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15651,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15652,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15653,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15654,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15655,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15656,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15657,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a threshold size for undo tablespaces.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15658,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15659,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15660,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15661,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15662,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15663,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15664,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of page cleaner threads that flush dirty pages from buffer pool instances.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15665,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15666,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15667,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15668,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15669,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15670,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15671,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15672,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default. If innodb_rollback_on_timeout is specified, a transaction timeout causes InnoDB to abort and roll back the entire transaction.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15673,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15674,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15675,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15676,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15677,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15678,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15679,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15680,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15681,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15682,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15683,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15684,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15685,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15686,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15687,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15688,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15689,'tendbcluster','dbconf','Tdbctl','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15690,'tendbcluster','dbconf','Tdbctl','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15691,'tendbcluster','dbconf','Tdbctl','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15692,'tendbcluster','dbconf','Tdbctl','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15693,'tendbcluster','dbconf','Tdbctl','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15694,'tendbcluster','dbconf','Tdbctl','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15695,'tendbcluster','dbconf','Tdbctl','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15696,'tendbcluster','dbconf','Tdbctl','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15697,'tendbcluster','dbconf','Tdbctl','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15698,'tendbcluster','dbconf','Tdbctl','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15700,'tendbcluster','dbconf','Tdbctl','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15701,'tendbcluster','dbconf','Tdbctl','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15702,'tendbcluster','dbconf','Tdbctl','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15703,'tendbcluster','dbconf','Tdbctl','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15704,'tendbcluster','dbconf','Tdbctl','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15705,'tendbcluster','dbconf','Tdbctl','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15706,'tendbcluster','dbconf','Tdbctl','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the time zone of timestamps in messages written to the error log, and in general query log and slow query log messages written to files.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15707,'tendbcluster','dbconf','Tdbctl','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15708,'tendbcluster','dbconf','Tdbctl','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15821,'tendbcluster','dbconf','Tdbctl','mysqld.loose-rpl_semi_sync_master_enabled','INT','1','1 | 0','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether semisynchronous replication is enabled on the source server','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15823,'tendbcluster','dbconf','Tdbctl','mysqld.loose-rpl_semi_sync_master_timeout','INT','10000','[100, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'A value in milliseconds that controls how long the source waits on a commit for acknowledgment from a replica before timing out and reverting to asynchronous replication','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15822,'tendbcluster','dbconf','Tdbctl','mysqld.loose-rpl_semi_sync_slave_enabled','INT','1','1 | 0','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'controls whether semisynchronous replication is enabled on the replica server','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15709,'tendbcluster','dbconf','Tdbctl','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15710,'tendbcluster','dbconf','Tdbctl','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15711,'tendbcluster','dbconf','Tdbctl','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether examine checksums when reading from the binary log.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15712,'tendbcluster','dbconf','Tdbctl','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15713,'tendbcluster','dbconf','Tdbctl','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15714,'tendbcluster','dbconf','Tdbctl','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15715,'tendbcluster','dbconf','Tdbctl','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15716,'tendbcluster','dbconf','Tdbctl','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15717,'tendbcluster','dbconf','Tdbctl','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15718,'tendbcluster','dbconf','Tdbctl','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15719,'tendbcluster','dbconf','Tdbctl','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15720,'tendbcluster','dbconf','Tdbctl','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum value of the points_per_circle argument to the ST_Buffer_Strategy() function.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15721,'tendbcluster','dbconf','Tdbctl','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15722,'tendbcluster','dbconf','Tdbctl','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15723,'tendbcluster','dbconf','Tdbctl','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times that any given stored procedure may be called recursively. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15724,'tendbcluster','dbconf','Tdbctl','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15725,'tendbcluster','dbconf','Tdbctl','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The size of the metadata locks cache. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15726,'tendbcluster','dbconf','Tdbctl','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15727,'tendbcluster','dbconf','Tdbctl','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15728,'tendbcluster','dbconf','Tdbctl','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether the mysql_native_password built-in authentication plugin supports proxy users.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15729,'tendbcluster','dbconf','Tdbctl','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15730,'tendbcluster','dbconf','Tdbctl','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15731,'tendbcluster','dbconf','Tdbctl','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15732,'tendbcluster','dbconf','Tdbctl','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15733,'tendbcluster','dbconf','Tdbctl','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'ngram_token_size is set to the size of the largest token that you want to search for.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15734,'tendbcluster','dbconf','Tdbctl','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15735,'tendbcluster','dbconf','Tdbctl','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15736,'tendbcluster','dbconf','Tdbctl','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15737,'tendbcluster','dbconf','Tdbctl','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15738,'tendbcluster','dbconf','Tdbctl','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15739,'tendbcluster','dbconf','Tdbctl','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15740,'tendbcluster','dbconf','Tdbctl','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15741,'tendbcluster','dbconf','Tdbctl','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15742,'tendbcluster','dbconf','Tdbctl','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15743,'tendbcluster','dbconf','Tdbctl','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15749,'tendbcluster','dbconf','Tdbctl','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15750,'tendbcluster','dbconf','Tdbctl','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15751,'tendbcluster','dbconf','Tdbctl','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of blocks that are allocated when doing range optimization.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15752,'tendbcluster','dbconf','Tdbctl','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The limit on memory consumption for the range optimizer. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15753,'tendbcluster','dbconf','Tdbctl','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15754,'tendbcluster','dbconf','Tdbctl','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15755,'tendbcluster','dbconf','Tdbctl','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15756,'tendbcluster','dbconf','Tdbctl','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15761,'tendbcluster','dbconf','Tdbctl','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15762,'tendbcluster','dbconf','Tdbctl','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks GTIDs within the current session and returns them to the client. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15763,'tendbcluster','dbconf','Tdbctl','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the default schema (database) name within the current session and makes this information available to the client when changes occur.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15764,'tendbcluster','dbconf','Tdbctl','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the state of the current session and notifies the client when state changes occur. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15765,'tendbcluster','dbconf','Tdbctl','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the sha256_password built-in authentication plugin supports proxy users.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15767,'tendbcluster','dbconf','Tdbctl','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether SHOW CREATE TABLE output includes comments to flag temporal columns found to be in pre-5.6.4 format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision).','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15768,'tendbcluster','dbconf','Tdbctl','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15770,'tendbcluster','dbconf','Tdbctl','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15771,'tendbcluster','dbconf','Tdbctl','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15773,'tendbcluster','dbconf','Tdbctl','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15774,'tendbcluster','dbconf','Tdbctl','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15775,'tendbcluster','dbconf','Tdbctl','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15776,'tendbcluster','dbconf','Tdbctl','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specify the initial slow query log state.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15777,'tendbcluster','dbconf','Tdbctl','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/slow-query.log',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15778,'tendbcluster','dbconf','Tdbctl','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15779,'tendbcluster','dbconf','Tdbctl','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15780,'tendbcluster','dbconf','Tdbctl','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15781,'tendbcluster','dbconf','Tdbctl','mysqld.sql_mode','STRING','\'\'','\'\'| NO_TABLE_OPTIONS| NO_ZERO_DATE| ONLY_FULL_GROUP_BY| STRICT_ALL_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_CREATE_USER| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_UNSIGNED_SUBTRACTION| NO_ENGINE_SUBSTITUTION| ANSI_QUOTES| NO_KEY_OPTIONS| PIPES_AS_CONCAT| ALLOW_INVALID_DATES| NO_DIR_IN_CREATE| NO_ZERO_IN_DATE| REAL_AS_FLOAT| STRICT_TRANS_TABLES| NO_FIELD_OPTIONS ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15782,'tendbcluster','dbconf','Tdbctl','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15783,'tendbcluster','dbconf','Tdbctl','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sets a soft upper limit for the number of cached stored routines per connection.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15784,'tendbcluster','dbconf','Tdbctl','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS).','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15785,'tendbcluster','dbconf','Tdbctl','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15786,'tendbcluster','dbconf','Tdbctl','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15787,'tendbcluster','dbconf','Tdbctl','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15812,'tendbcluster','dbconf','Tdbctl','mysqld.tc_is_primary','STRING','OFF','OFF | ON','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When the value is ON, it means that the current node is the master node, allowing the execution of cluster-related DDL statements, management statements','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15788,'tendbcluster','dbconf','Tdbctl','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15789,'tendbcluster','dbconf','Tdbctl','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15790,'tendbcluster','dbconf','Tdbctl','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of additional threads per group of thread pool.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15791,'tendbcluster','dbconf','Tdbctl','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15792,'tendbcluster','dbconf','Tdbctl','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The stack size for each thread.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15793,'tendbcluster','dbconf','Tdbctl','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15794,'tendbcluster','dbconf','Tdbctl','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15795,'tendbcluster','dbconf','Tdbctl','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15796,'tendbcluster','dbconf','Tdbctl','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount in bytes by which to increase a per-transaction memory pool which needs memory.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15797,'tendbcluster','dbconf','Tdbctl','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15798,'tendbcluster','dbconf','Tdbctl','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'There is a per-transaction memory pool from which various transaction-related allocations take memory.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15799,'tendbcluster','dbconf','Tdbctl','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15800,'tendbcluster','dbconf','Tdbctl','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls whether updates to a view can be made when the view does not contain all columns of the primary key defined in the underlying table, if the update statement contains a LIMIT clause.','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15801,'tendbcluster','dbconf','Tdbctl','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15802,'tendbcluster','dbconf','Tdbctl','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2023-03-09 17:57:45','2023-05-10 19:35:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15064,'tendbcluster','deploy','deploy_info','charset','STRING','utf8','utf8|utf8mb4|latin1|gbk','ENUM',1,0,0,0,1,NULL,NULL,'字符集',-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15065,'tendbcluster','deploy','deploy_info','db_version','STRING','MySQL-5.7','MySQL-5.5 | MySQL-5.6 | MySQL-5.7 | MySQL-8.0','ENUM',1,0,0,0,1,NULL,NULL,'DB版本',-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16289,'tendbcluster','deploy','deploy_info','spider_version','STRING','Spider-3','Spider-3 | Spider-1','ENUM',1,0,0,0,1,NULL,NULL,'容灾级别',-1,NULL,NULL,'2023-03-09 17:36:33','2023-05-22 17:04:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15066,'tendbcluster','deploy','deploy_info','storage_engine','STRING','InnoDB','InnoDB','',1,0,0,0,1,NULL,NULL,'存储引擎',-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15067,'tendbcluster','deploy','deploy_info','tolerance_level','STRING','compus','idc|compus|city','ENUM',-1,0,0,0,1,NULL,NULL,'容灾级别',-1,NULL,NULL,'2023-03-09 17:36:33','2023-03-09 17:36:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15881,'tendbcluster','mysql_monitor','items-config.yaml','character-consistency','STRING','{\"enable\":true, \"schedule\":\"0 0 14 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15882,'tendbcluster','mysql_monitor','items-config.yaml','engine','STRING','{\"enable\":true, \"schedule\":\"0 0 12 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15883,'tendbcluster','mysql_monitor','items-config.yaml','ext3-check','STRING','{\"enable\":true, \"schedule\":\"0 0 16 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15884,'tendbcluster','mysql_monitor','items-config.yaml','master-slave-heartbeat','STRING','{\"enable\":true, \"schedule\":\"@every 10s\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15885,'tendbcluster','mysql_monitor','items-config.yaml','mysql-config-diff','STRING','{\"enable\":true, \"schedule\":\"@every 10m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15886,'tendbcluster','mysql_monitor','items-config.yaml','mysql-connlog-report','STRING','{\"enable\":true, \"schedule\":\"0 40 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15887,'tendbcluster','mysql_monitor','items-config.yaml','mysql-connlog-rotate','STRING','{\"enable\":true, \"schedule\":\"0 30 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15888,'tendbcluster','mysql_monitor','items-config.yaml','mysql-connlog-size','STRING','{\"enable\":true, \"schedule\":\"0 0 12 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15889,'tendbcluster','mysql_monitor','items-config.yaml','mysql-err-critical','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15890,'tendbcluster','mysql_monitor','items-config.yaml','mysql-err-notice','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15891,'tendbcluster','mysql_monitor','items-config.yaml','mysql-inject','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15892,'tendbcluster','mysql_monitor','items-config.yaml','mysql-lock','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15893,'tendbcluster','mysql_monitor','items-config.yaml','rotate-slowlog','STRING','{\"enable\":true, \"schedule\":\"0 55 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15894,'tendbcluster','mysql_monitor','items-config.yaml','routine-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15895,'tendbcluster','mysql_monitor','items-config.yaml','slave-status','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": [\"repeater\", \"slave\"]}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15896,'tendbcluster','mysql_monitor','items-config.yaml','trigger-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15897,'tendbcluster','mysql_monitor','items-config.yaml','view-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:54','2023-03-22 12:35:54',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.down.sql b/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.down.sql
new file mode 100644
index 0000000000..453db512cd
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='tendbha';
+DELETE FROM tb_config_name_def WHERE namespace='tendbha' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql b/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql
new file mode 100644
index 0000000000..eaded7115f
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000020_tendbha_data.up.sql
@@ -0,0 +1,1044 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='tendbha'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (135,'tendbha','backup','binlog_rotate.yaml','binlog滚动与备份选项','binlog_rotate.yaml','plat,app,module,cluster','',1,1,1,'',0,0,0,'binlog_rotate.yaml','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (128,'tendbha','backup','dbbackup.ini','备份配置','dbbackup.conf配置项','plat,app,module,cluster','',1,1,0,'',0,0,0,'dbbackup.conf配置项','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (129,'tendbha','backup','dbbackup.options','备份控制选项','dbbackup.ini控制选项','plat,app,module,cluster','',1,1,0,'',0,0,0,'dbbackup.ini控制选项','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (137,'tendbha','checksum','checksum.option','checksum控制选项','checksum.option','plat,app,module,cluster','',1,1,0,'',0,0,0,'checksum.option','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (136,'tendbha','checksum','checksum.yaml','checksum配置','checksum.yaml','plat,app,module,cluster','',1,1,0,'',0,0,0,'checksum.yaml','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (9,'tendbha','dbconf','MySQL-5.6','my.cnf配置','5.6_参数配置','plat,app,module,cluster','cluster',0,0,0,'',0,0,0,'5.6_参数配置','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (10,'tendbha','dbconf','MySQL-5.7','my.cnf配置','5.7_参数配置','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'5.7_参数配置','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (40,'tendbha','dbconf','MySQL-8.0','','8.0_参数配置','plat,app,module,cluster','cluster',0,0,0,'',0,0,0,'MySQL8.0配置','2022-06-02 17:27:34','2023-03-28 21:40:07','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (6,'tendbha','dbha','dbha','DBHA切换配置',NULL,'plat,app,city,module,cluster','',1,1,0,NULL,5,365,0,NULL,'2022-04-25 10:19:22','2023-03-20 21:40:05','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (4,'tendbha','deploy','deploy_info','部署配置',NULL,'plat,app,module,cluster','',0,1,0,NULL,5,365,0,NULL,'2022-04-25 10:19:22','2023-03-20 21:40:05','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (201,'tendbha','mysql_monitor','items-config.yaml','监控配置',NULL,'plat,app,module,cluster','',1,1,1,NULL,5,365,0,NULL,'2023-03-09 17:40:06','2023-03-20 21:40:05','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (39,'tendbha','proxyconf','default','','mysql-proxy配置','plat','',0,1,0,'',0,0,0,'mysql-proxy配置','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (7,'tendbha','sys','sysfile','系统配置',NULL,'plat','',1,1,0,NULL,5,365,0,NULL,'2022-04-25 10:19:22','2023-03-20 21:40:05','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (1,'tendbha','user','tb_app_info','用户配置',NULL,'plat,app,module,cluster','',1,1,0,NULL,NULL,NULL,0,NULL,'2022-04-25 10:19:22','2023-03-20 21:40:05','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='tendbha' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15916,'tendbha','backup','binlog_rotate.yaml','backup_client.cos','STRING','{\n  \"enable\": true,\n  \"with_md5\": true,\n  \"file_tag\": \"INCREMENT_BACKUP\",\n  \"tool_path\": \"cos-client\"\n}','','MAP',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13141,'tendbha','backup','binlog_rotate.yaml','backup_client.ibs','STRING','{\n  \"enable\": false,\n  \"ibs_mode\": \"hdfs\",\n  \"with_md5\": true,\n  \"file_tag\": \"INCREMENT_BACKUP\",\n  \"tool_path\": \"backup_client\"\n}','','MAP',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-04-13 21:58:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14110,'tendbha','backup','binlog_rotate.yaml','crond.api_url','STRING','http://127.0.0.1:9999','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14111,'tendbha','backup','binlog_rotate.yaml','crond.command','STRING','cd /home/mysql/rotate_binlog && ./rotatebinlog -c config.yaml','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14106,'tendbha','backup','binlog_rotate.yaml','crond.item_name','STRING','rotate_binlog','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14108,'tendbha','backup','binlog_rotate.yaml','crond.schedule','STRING','*/5 * * * *','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13134,'tendbha','backup','binlog_rotate.yaml','encrypt.enable','BOOL','false','true | false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:30:57',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13119,'tendbha','backup','binlog_rotate.yaml','public.keep_policy','STRING','most','most | least','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13121,'tendbha','backup','binlog_rotate.yaml','public.max_binlog_total_size','STRING','200g','[100m, 9999g]','BYTES',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13122,'tendbha','backup','binlog_rotate.yaml','public.max_disk_used_pct','INT','80','[1,99]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13124,'tendbha','backup','binlog_rotate.yaml','public.max_keep_duration','STRING','61d','','DURATION',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13128,'tendbha','backup','binlog_rotate.yaml','public.purge_interval','STRING','4h','','DURATION',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13158,'tendbha','backup','binlog_rotate.yaml','public.rotate_interval','STRING','10m','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13129,'tendbha','backup','binlog_rotate.yaml','report.enable','BOOL','true','true | false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13130,'tendbha','backup','binlog_rotate.yaml','report.filepath','STRING','/home/mysql/dbareport/mysql/binlog','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13133,'tendbha','backup','binlog_rotate.yaml','report.log_maxage','INT','30','[1, 60]','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13132,'tendbha','backup','binlog_rotate.yaml','report.log_maxbackups','INT','10','[1, 30]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13131,'tendbha','backup','binlog_rotate.yaml','report.log_maxsize','INT','5','[1, 10]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13003,'tendbha','backup','dbbackup.ini','BackupClient.DoChecksum','STRING','true','true | false','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13001,'tendbha','backup','dbbackup.ini','BackupClient.FileTag','STRING','MYSQL_FULL_BACKUP','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13002,'tendbha','backup','dbbackup.ini','BackupClient.RemoteFileSystem','STRING','hdfs','hdfs | cos','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13006,'tendbha','backup','dbbackup.ini','LogicalBackup.ChunkFilesize','INT','2048','[512, 9999999]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB','2022-05-26 20:11:23','2023-05-24 21:40:03',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13010,'tendbha','backup','dbbackup.ini','LogicalBackup.DefaultsFile','STRING','','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 09:50:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16311,'tendbha','backup','dbbackup.ini','LogicalBackup.DisableCompress','STRING','false','false | true','BOOL',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-05-24 21:45:24','2023-05-24 21:45:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16312,'tendbha','backup','dbbackup.ini','LogicalBackup.ExtraOpt','STRING','--skip-definer','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-05-24 21:45:24','2023-05-25 09:59:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13009,'tendbha','backup','dbbackup.ini','LogicalBackup.FlushRetryCount','INT','3','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13007,'tendbha','backup','dbbackup.ini','LogicalBackup.Regex','STRING','{{.LogicalBackup.Regex}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13008,'tendbha','backup','dbbackup.ini','LogicalBackup.Threads','INT','4','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:42:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13020,'tendbha','backup','dbbackup.ini','LogicalLoad.EnableBinlog','STRING','false','false | true','BOOL',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 22:01:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16321,'tendbha','backup','dbbackup.ini','LogicalLoad.ExtraOpt','STRING','','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13157,'tendbha','backup','dbbackup.ini','LogicalLoad.IndexFilePath','STRING','/data/dbbak/xxxxx','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13016,'tendbha','backup','dbbackup.ini','LogicalLoad.MysqlCharset','STRING','binary','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13011,'tendbha','backup','dbbackup.ini','LogicalLoad.MysqlHost','STRING','{{.LogicalLoad.MysqlHost}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13017,'tendbha','backup','dbbackup.ini','LogicalLoad.MysqlLoadDir','STRING','/data/dbbak/your_loader_dir','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13015,'tendbha','backup','dbbackup.ini','LogicalLoad.MysqlPasswd','STRING','{{.LogicalLoad.MysqlPasswd}}','','STRING',2,0,0,1,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13013,'tendbha','backup','dbbackup.ini','LogicalLoad.MysqlPort','STRING','{{.LogicalLoad.MysqlPort}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13014,'tendbha','backup','dbbackup.ini','LogicalLoad.MysqlUser','STRING','{{.LogicalLoad.MysqlUser}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13018,'tendbha','backup','dbbackup.ini','LogicalLoad.Regex','STRING','','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13019,'tendbha','backup','dbbackup.ini','LogicalLoad.Threads','INT','2','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16293,'tendbha','backup','dbbackup.ini','PhysicalBackup.DefaultsFile','STRING','{{.PhysicalBackup.DefaultsFile}}','','',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 10:24:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16294,'tendbha','backup','dbbackup.ini','PhysicalBackup.ExtraOpt','STRING','--safe-slave-backup-timeout=60','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16291,'tendbha','backup','dbbackup.ini','PhysicalBackup.SplitSpeed','INT','500','[0, 2048]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB/s','2022-05-26 20:11:23','2023-05-25 10:03:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16290,'tendbha','backup','dbbackup.ini','PhysicalBackup.Threads','INT','2','[0, 8]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-10 15:21:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16292,'tendbha','backup','dbbackup.ini','PhysicalBackup.Throttle','INT','100','[0, 200]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 22:07:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16315,'tendbha','backup','dbbackup.ini','PhysicalLoad.CopyBack','STRING','false','false | true','BOOL',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:56:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16317,'tendbha','backup','dbbackup.ini','PhysicalLoad.DefaultsFile','STRING','/etc/my.cnf','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16319,'tendbha','backup','dbbackup.ini','PhysicalLoad.ExtraOpt','STRING','','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16316,'tendbha','backup','dbbackup.ini','PhysicalLoad.IndexFilePath','STRING','/xx/xxx.index','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:56:32',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16313,'tendbha','backup','dbbackup.ini','PhysicalLoad.MysqlLoadDir','STRING','/xx/loader_dir','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:57:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16314,'tendbha','backup','dbbackup.ini','PhysicalLoad.Threads','INT','2','[0, 16]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:57:18',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12999,'tendbha','backup','dbbackup.ini','Public.BackupDir','STRING','/data/dbbak/','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16318,'tendbha','backup','dbbackup.ini','Public.BackupId','STRING','{{.Public.BackupId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 10:08:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12998,'tendbha','backup','dbbackup.ini','Public.BackupTimeout','STRING','09:00:00','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12997,'tendbha','backup','dbbackup.ini','Public.BackupType','STRING','logical','logical | physical','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12990,'tendbha','backup','dbbackup.ini','Public.BillId','INT','{{.Public.BillId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12991,'tendbha','backup','dbbackup.ini','Public.BkBizId','INT','{{.Public.BkBizId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12989,'tendbha','backup','dbbackup.ini','Public.BkCloudId','INT','{{.Public.BkCloudId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12988,'tendbha','backup','dbbackup.ini','Public.ClusterAddress','STRING','{{.Public.ClusterAddress}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16305,'tendbha','backup','dbbackup.ini','Public.ClusterId','STRING','{{.Public.ClusterId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13000,'tendbha','backup','dbbackup.ini','Public.DataSchemaGrant','STRING','{{.Public.DataSchemaGrant}}','All | Schema | Grant | Data','ENUM',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16308,'tendbha','backup','dbbackup.ini','Public.IOLimitMBPerSec','INT','500','[0, 4096]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-15 11:21:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12994,'tendbha','backup','dbbackup.ini','Public.MysqlCharset','STRING','binary','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12983,'tendbha','backup','dbbackup.ini','Public.MysqlHost','STRING','{{.Public.MysqlHost}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12987,'tendbha','backup','dbbackup.ini','Public.MysqlPasswd','STRING','{{.Public.MysqlPasswd}}','','STRING',2,0,0,1,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12984,'tendbha','backup','dbbackup.ini','Public.MysqlPort','INT','{{.Public.MysqlPort}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12993,'tendbha','backup','dbbackup.ini','Public.MysqlRole','STRING','{{.Public.MysqlRole}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12986,'tendbha','backup','dbbackup.ini','Public.MysqlUser','STRING','{{.Public.MysqlUser}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12995,'tendbha','backup','dbbackup.ini','Public.OldFileLeftDay','INT','2','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13115,'tendbha','backup','dbbackup.ini','Public.ResultReportPath','STRING','/home/mysql/dbareport/mysql/dbbackup/result','result log dir','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13116,'tendbha','backup','dbbackup.ini','Public.StatusReportPath','STRING','/home/mysql/dbareport/mysql/dbbackup/status','status log dir','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12996,'tendbha','backup','dbbackup.ini','Public.TarSizeThreshold','INT','8196','[128, 9999999]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB','2022-05-26 20:11:23','2023-05-24 21:40:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13029,'tendbha','backup','dbbackup.options','BackupType','STRING','logical','logical | physical','ENUM',1,0,0,0,0,NULL,'','备份类型',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13023,'tendbha','backup','dbbackup.options','CrontabTime','STRING','3 5 * * *','','STRING',1,0,0,0,0,NULL,'','DB备份开始时间',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13032,'tendbha','backup','dbbackup.options','Logical.IgnoreDatabases','STRING','mysql,test,infodba_schema,sys','','',1,0,0,0,0,NULL,'','主库备份数据',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13033,'tendbha','backup','dbbackup.options','Logical.IgnoreTables','STRING','','','',1,0,0,0,0,NULL,'','主库备份数据',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13037,'tendbha','backup','dbbackup.options','Master.DataSchemaGrant','STRING','schema','grant,schema,data,all','ENUMS',1,0,0,0,0,NULL,'','从库备份数据',-1,NULL,'','2022-05-26 20:11:23','2023-03-28 20:30:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13038,'tendbha','backup','dbbackup.options','Slave.DataSchemaGrant','STRING','all','grant,schema,data,all','ENUMS',1,0,0,0,0,NULL,'','从库备份数据',-1,NULL,'','2022-05-26 20:11:23','2023-03-28 20:33:03',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13153,'tendbha','checksum','checksum.option','crond','STRING','0 2 * * 1-5','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:39:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13154,'tendbha','checksum','checksum.option','run_duration','STRING','4h','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:33:07',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13142,'tendbha','checksum','checksum.yaml','filter.databases','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:20',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13146,'tendbha','checksum','checksum.yaml','filter.databases_regex','STRING','*','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13144,'tendbha','checksum','checksum.yaml','filter.ignore_databases','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13148,'tendbha','checksum','checksum.yaml','filter.ignore_databases_regex','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13145,'tendbha','checksum','checksum.yaml','filter.ignore_tables','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:32',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13149,'tendbha','checksum','checksum.yaml','filter.ignore_tables_regex','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13143,'tendbha','checksum','checksum.yaml','filter.tables','STRING','','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13147,'tendbha','checksum','checksum.yaml','filter.tables_regex','STRING','*','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13150,'tendbha','checksum','checksum.yaml','pt_checksum.path','STRING','./pt-table-checksum','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:51',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13151,'tendbha','checksum','checksum.yaml','pt_checksum.replicate','STRING','infodba_schema.checksums','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13152,'tendbha','checksum','checksum.yaml','report_path','STRING','./','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-12-23 11:32:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10971,'tendbha','dbconf','MySQL-5.5','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (203,'tendbha','dbconf','MySQL-5.5','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (204,'tendbha','dbconf','MySQL-5.5','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5333,'tendbha','dbconf','MySQL-5.5','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5334,'tendbha','dbconf','MySQL-5.5','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (205,'tendbha','dbconf','MySQL-5.5','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (206,'tendbha','dbconf','MySQL-5.5','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What form of binary logging the master will use.','2022-04-25 10:00:47','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (207,'tendbha','dbconf','MySQL-5.5','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (208,'tendbha','dbconf','MySQL-5.5','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5335,'tendbha','dbconf','MySQL-5.5','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5336,'tendbha','dbconf','MySQL-5.5','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (209,'tendbha','dbconf','MySQL-5.5','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (210,'tendbha','dbconf','MySQL-5.5','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (211,'tendbha','dbconf','MySQL-5.5','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5337,'tendbha','dbconf','MySQL-5.5','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5339,'tendbha','dbconf','MySQL-5.5','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5340,'tendbha','dbconf','MySQL-5.5','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5341,'tendbha','dbconf','MySQL-5.5','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5338,'tendbha','dbconf','MySQL-5.5','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5342,'tendbha','dbconf','MySQL-5.5','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5343,'tendbha','dbconf','MySQL-5.5','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (212,'tendbha','dbconf','MySQL-5.5','mysqld.expire_logs_days','INT','61','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5344,'tendbha','dbconf','MySQL-5.5','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5345,'tendbha','dbconf','MySQL-5.5','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5346,'tendbha','dbconf','MySQL-5.5','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (213,'tendbha','dbconf','MySQL-5.5','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-04-25 10:00:47','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5347,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (215,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}',NULL,'',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5348,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (216,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (217,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (218,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (219,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-25 21:34:49',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (220,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_flush_log_at_trx_commit','INT','0','0|1|2','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (221,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (222,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (223,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (224,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (225,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (226,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5349,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_max_dirty_pages_pct','INT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5350,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5351,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5352,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (227,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_read_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5353,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5354,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5355,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5356,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (228,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_thread_concurrency','INT','16','[1,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (229,'tendbha','dbconf','MySQL-5.5','mysqld.innodb_write_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (230,'tendbha','dbconf','MySQL-5.5','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2022-04-25 10:00:47','2023-04-07 11:55:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (231,'tendbha','dbconf','MySQL-5.5','mysqld.key_buffer','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5357,'tendbha','dbconf','MySQL-5.5','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5358,'tendbha','dbconf','MySQL-5.5','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5359,'tendbha','dbconf','MySQL-5.5','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5360,'tendbha','dbconf','MySQL-5.5','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (232,'tendbha','dbconf','MySQL-5.5','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (234,'tendbha','dbconf','MySQL-5.5','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5361,'tendbha','dbconf','MySQL-5.5','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (235,'tendbha','dbconf','MySQL-5.5','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (236,'tendbha','dbconf','MySQL-5.5','mysqld.log_warnings','STRING','0',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (237,'tendbha','dbconf','MySQL-5.5','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (233,'tendbha','dbconf','MySQL-5.5','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (251,'tendbha','dbconf','MySQL-5.5','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (238,'tendbha','dbconf','MySQL-5.5','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2022-04-25 10:00:47','2022-06-16 21:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5362,'tendbha','dbconf','MySQL-5.5','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (239,'tendbha','dbconf','MySQL-5.5','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (240,'tendbha','dbconf','MySQL-5.5','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (241,'tendbha','dbconf','MySQL-5.5','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (242,'tendbha','dbconf','MySQL-5.5','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (243,'tendbha','dbconf','MySQL-5.5','mysqld.max_connect_errors','STRING','99999999',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5363,'tendbha','dbconf','MySQL-5.5','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5364,'tendbha','dbconf','MySQL-5.5','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (244,'tendbha','dbconf','MySQL-5.5','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2022-04-25 10:00:47','2022-06-16 21:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (245,'tendbha','dbconf','MySQL-5.5','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5365,'tendbha','dbconf','MySQL-5.5','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (246,'tendbha','dbconf','MySQL-5.5','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (247,'tendbha','dbconf','MySQL-5.5','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2022-04-25 10:00:47','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5366,'tendbha','dbconf','MySQL-5.5','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (248,'tendbha','dbconf','MySQL-5.5','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (249,'tendbha','dbconf','MySQL-5.5','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (250,'tendbha','dbconf','MySQL-5.5','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (252,'tendbha','dbconf','MySQL-5.5','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log',NULL,'STRING',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (256,'tendbha','dbconf','MySQL-5.5','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (257,'tendbha','dbconf','MySQL-5.5','mysqld.show_compatibility_56','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (258,'tendbha','dbconf','MySQL-5.5','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (259,'tendbha','dbconf','MySQL-5.5','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (260,'tendbha','dbconf','MySQL-5.5','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (261,'tendbha','dbconf','MySQL-5.5','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 14:59:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (262,'tendbha','dbconf','MySQL-5.5','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5367,'tendbha','dbconf','MySQL-5.5','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (263,'tendbha','dbconf','MySQL-5.5','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 15:00:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (264,'tendbha','dbconf','MySQL-5.5','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.slow_query_log_file}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (265,'tendbha','dbconf','MySQL-5.5','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.socket}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (266,'tendbha','dbconf','MySQL-5.5','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-23 12:21:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5368,'tendbha','dbconf','MySQL-5.5','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (267,'tendbha','dbconf','MySQL-5.5','mysqld.sql_mode','STRING','\'\'','\'\'|STRICT|ONLY_FULL_GROUP_BY|','ENUMS',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5369,'tendbha','dbconf','MySQL-5.5','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (268,'tendbha','dbconf','MySQL-5.5','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (269,'tendbha','dbconf','MySQL-5.5','mysqld.sync_binlog','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:24:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5370,'tendbha','dbconf','MySQL-5.5','mysqld.table_definition_cache','INT','768','[400,4096]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (270,'tendbha','dbconf','MySQL-5.5','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (271,'tendbha','dbconf','MySQL-5.5','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5371,'tendbha','dbconf','MySQL-5.5','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (272,'tendbha','dbconf','MySQL-5.5','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (273,'tendbha','dbconf','MySQL-5.5','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2022-04-25 10:00:47','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (274,'tendbha','dbconf','MySQL-5.5','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2022-04-25 10:00:47','2023-04-14 15:43:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10967,'tendbha','dbconf','MySQL-5.5','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10972,'tendbha','dbconf','MySQL-5.6','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (76,'tendbha','dbconf','MySQL-5.6','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (77,'tendbha','dbconf','MySQL-5.6','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5181,'tendbha','dbconf','MySQL-5.6','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5179,'tendbha','dbconf','MySQL-5.6','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5180,'tendbha','dbconf','MySQL-5.6','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5182,'tendbha','dbconf','MySQL-5.6','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (78,'tendbha','dbconf','MySQL-5.6','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,'mysqld.bind-address',0,'',NULL,'2022-04-25 10:00:47','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5183,'tendbha','dbconf','MySQL-5.6','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5184,'tendbha','dbconf','MySQL-5.6','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (79,'tendbha','dbconf','MySQL-5.6','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,'',0,'','What form of binary logging the master will use.','2022-04-25 10:00:47','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5185,'tendbha','dbconf','MySQL-5.6','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5187,'tendbha','dbconf','MySQL-5.6','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5186,'tendbha','dbconf','MySQL-5.6','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5188,'tendbha','dbconf','MySQL-5.6','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5189,'tendbha','dbconf','MySQL-5.6','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5190,'tendbha','dbconf','MySQL-5.6','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5192,'tendbha','dbconf','MySQL-5.6','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (80,'tendbha','dbconf','MySQL-5.6','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (81,'tendbha','dbconf','MySQL-5.6','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5193,'tendbha','dbconf','MySQL-5.6','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5194,'tendbha','dbconf','MySQL-5.6','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (82,'tendbha','dbconf','MySQL-5.6','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (83,'tendbha','dbconf','MySQL-5.6','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (84,'tendbha','dbconf','MySQL-5.6','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5196,'tendbha','dbconf','MySQL-5.6','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5198,'tendbha','dbconf','MySQL-5.6','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5199,'tendbha','dbconf','MySQL-5.6','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5200,'tendbha','dbconf','MySQL-5.6','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5197,'tendbha','dbconf','MySQL-5.6','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5201,'tendbha','dbconf','MySQL-5.6','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5202,'tendbha','dbconf','MySQL-5.6','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5203,'tendbha','dbconf','MySQL-5.6','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5204,'tendbha','dbconf','MySQL-5.6','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5205,'tendbha','dbconf','MySQL-5.6','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (85,'tendbha','dbconf','MySQL-5.6','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,'',0,'',NULL,'2022-04-25 10:00:47','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5206,'tendbha','dbconf','MySQL-5.6','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5207,'tendbha','dbconf','MySQL-5.6','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5208,'tendbha','dbconf','MySQL-5.6','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5209,'tendbha','dbconf','MySQL-5.6','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5210,'tendbha','dbconf','MySQL-5.6','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5211,'tendbha','dbconf','MySQL-5.6','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5212,'tendbha','dbconf','MySQL-5.6','mysqld.host_cache_size','INT','643','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (86,'tendbha','dbconf','MySQL-5.6','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-04-25 10:00:47','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5213,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5214,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5215,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_adaptive_hash_index','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5216,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5217,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5218,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5219,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5220,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (87,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5221,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_load_at_startup','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (88,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2022-04-25 10:00:47','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5223,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5222,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5224,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_checksum_algorithm','STRING','INNODB','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5225,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5226,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5227,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the compression failure rate threshold for a table, as a percentage, at which point MySQL begins adding padding within compressed pages to avoid expensive compression failures. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5228,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the level of zlib compression to use for InnoDB compressed tables and indexes. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5229,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum percentage that can be reserved as free space within each compressed page','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5230,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (89,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (90,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5231,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (91,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (92,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:07:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (93,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5232,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5233,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5234,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5235,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5236,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5237,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5238,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5239,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5240,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5241,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5242,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5243,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5244,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (94,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5245,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5246,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable this option to allow index key prefixes longer than 767 bytes (up to 3072 bytes) for InnoDB tables that use the DYNAMIC and COMPRESSED row formats.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (95,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (96,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5247,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether prevent corruption that could occur if a different version of the zlib compression algorithm is used during recovery.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (97,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (98,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (99,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5248,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5249,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_max_dirty_pages_pct','INT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5250,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_max_dirty_pages_pct_lwm','INT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5251,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5252,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5253,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5254,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5255,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5256,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5257,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5258,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5259,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5260,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5261,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5262,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5263,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (100,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2022-04-25 10:00:47','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5264,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5265,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5266,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5267,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5268,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5269,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5270,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5271,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5272,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5273,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5274,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5275,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5276,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5277,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_support_xa','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB support for two-phase commit in XA transactions, causing an extra disk flush for transaction preparation. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5278,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5279,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5280,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (101,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (102,'tendbha','dbconf','MySQL-5.6','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2022-04-25 10:00:47','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (103,'tendbha','dbconf','MySQL-5.6','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2022-04-25 10:00:47','2023-04-07 11:54:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5281,'tendbha','dbconf','MySQL-5.6','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5282,'tendbha','dbconf','MySQL-5.6','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5283,'tendbha','dbconf','MySQL-5.6','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5284,'tendbha','dbconf','MySQL-5.6','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5285,'tendbha','dbconf','MySQL-5.6','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5286,'tendbha','dbconf','MySQL-5.6','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5287,'tendbha','dbconf','MySQL-5.6','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (105,'tendbha','dbconf','MySQL-5.6','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (107,'tendbha','dbconf','MySQL-5.6','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5288,'tendbha','dbconf','MySQL-5.6','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5289,'tendbha','dbconf','MySQL-5.6','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (108,'tendbha','dbconf','MySQL-5.6','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5290,'tendbha','dbconf','MySQL-5.6','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5291,'tendbha','dbconf','MySQL-5.6','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (109,'tendbha','dbconf','MySQL-5.6','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (110,'tendbha','dbconf','MySQL-5.6','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (106,'tendbha','dbconf','MySQL-5.6','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (124,'tendbha','dbconf','MySQL-5.6','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (111,'tendbha','dbconf','MySQL-5.6','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2022-04-25 10:00:47','2022-06-16 21:32:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5292,'tendbha','dbconf','MySQL-5.6','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5293,'tendbha','dbconf','MySQL-5.6','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether examine checksums when reading from the binary log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (112,'tendbha','dbconf','MySQL-5.6','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (113,'tendbha','dbconf','MySQL-5.6','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (114,'tendbha','dbconf','MySQL-5.6','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (115,'tendbha','dbconf','MySQL-5.6','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (116,'tendbha','dbconf','MySQL-5.6','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5294,'tendbha','dbconf','MySQL-5.6','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5295,'tendbha','dbconf','MySQL-5.6','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5296,'tendbha','dbconf','MySQL-5.6','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5297,'tendbha','dbconf','MySQL-5.6','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5298,'tendbha','dbconf','MySQL-5.6','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5299,'tendbha','dbconf','MySQL-5.6','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5300,'tendbha','dbconf','MySQL-5.6','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5301,'tendbha','dbconf','MySQL-5.6','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The size of the metadata locks cache. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5302,'tendbha','dbconf','MySQL-5.6','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5303,'tendbha','dbconf','MySQL-5.6','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5304,'tendbha','dbconf','MySQL-5.6','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5305,'tendbha','dbconf','MySQL-5.6','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5306,'tendbha','dbconf','MySQL-5.6','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5307,'tendbha','dbconf','MySQL-5.6','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5308,'tendbha','dbconf','MySQL-5.6','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5309,'tendbha','dbconf','MySQL-5.6','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5310,'tendbha','dbconf','MySQL-5.6','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5311,'tendbha','dbconf','MySQL-5.6','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5312,'tendbha','dbconf','MySQL-5.6','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5313,'tendbha','dbconf','MySQL-5.6','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (117,'tendbha','dbconf','MySQL-5.6','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2022-04-25 10:00:47','2022-06-16 21:32:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (118,'tendbha','dbconf','MySQL-5.6','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5314,'tendbha','dbconf','MySQL-5.6','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5315,'tendbha','dbconf','MySQL-5.6','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5316,'tendbha','dbconf','MySQL-5.6','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (119,'tendbha','dbconf','MySQL-5.6','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (120,'tendbha','dbconf','MySQL-5.6','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2022-04-25 10:00:47','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5317,'tendbha','dbconf','MySQL-5.6','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5318,'tendbha','dbconf','MySQL-5.6','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (121,'tendbha','dbconf','MySQL-5.6','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 09:56:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5319,'tendbha','dbconf','MySQL-5.6','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5320,'tendbha','dbconf','MySQL-5.6','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (122,'tendbha','dbconf','MySQL-5.6','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (123,'tendbha','dbconf','MySQL-5.6','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (125,'tendbha','dbconf','MySQL-5.6','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log',NULL,'STRING',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (129,'tendbha','dbconf','MySQL-5.6','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (131,'tendbha','dbconf','MySQL-5.6','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (132,'tendbha','dbconf','MySQL-5.6','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (133,'tendbha','dbconf','MySQL-5.6','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5321,'tendbha','dbconf','MySQL-5.6','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (134,'tendbha','dbconf','MySQL-5.6','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies if the slave will use database partitioning or information from master to parallelize transactions.(Default: SCHEMA).','2022-04-25 10:00:47','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (135,'tendbha','dbconf','MySQL-5.6','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2022-04-25 10:00:47','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5322,'tendbha','dbconf','MySQL-5.6','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5323,'tendbha','dbconf','MySQL-5.6','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (136,'tendbha','dbconf','MySQL-5.6','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 15:00:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (137,'tendbha','dbconf','MySQL-5.6','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.slow_query_log_file}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (138,'tendbha','dbconf','MySQL-5.6','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.socket}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (139,'tendbha','dbconf','MySQL-5.6','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2022-04-25 10:00:47','2022-06-23 12:21:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5324,'tendbha','dbconf','MySQL-5.6','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (140,'tendbha','dbconf','MySQL-5.6','mysqld.sql_mode','STRING','\'\'','\'\'| NO_ZERO_DATE| ANSI_QUOTES| IGNORE_SPACE| NO_AUTO_VALUE_ON_ZERO| ONLY_FULL_GROUP_BY| STRICT_TRANS_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| NO_KEY_OPTIONS| NO_TABLE_OPTIONS| NO_ENGINE_SUBSTITUTION| NO_AUTO_CREATE_USER| NO_FIELD_OPTIONS| NO_UNSIGNED_SUBTRACTION| NO_ZERO_IN_DATE| PIPES_AS_CONCAT| REAL_AS_FLOAT| ALLOW_INVALID_DATES| NO_BACKSLASH_ESCAPES| NO_DIR_IN_CREATE| STRICT_ALL_TABLES ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2022-04-25 10:00:47','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5325,'tendbha','dbconf','MySQL-5.6','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (141,'tendbha','dbconf','MySQL-5.6','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (142,'tendbha','dbconf','MySQL-5.6','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS).','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5326,'tendbha','dbconf','MySQL-5.6','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (143,'tendbha','dbconf','MySQL-5.6','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5327,'tendbha','dbconf','MySQL-5.6','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (144,'tendbha','dbconf','MySQL-5.6','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5328,'tendbha','dbconf','MySQL-5.6','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5329,'tendbha','dbconf','MySQL-5.6','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of additional threads per group of thread pool.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5330,'tendbha','dbconf','MySQL-5.6','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5331,'tendbha','dbconf','MySQL-5.6','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (145,'tendbha','dbconf','MySQL-5.6','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5332,'tendbha','dbconf','MySQL-5.6','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (146,'tendbha','dbconf','MySQL-5.6','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2022-04-25 10:00:47','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (147,'tendbha','dbconf','MySQL-5.6','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10968,'tendbha','dbconf','MySQL-5.6','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10970,'tendbha','dbconf','MySQL-5.7','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (2,'tendbha','dbconf','MySQL-5.7','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (3,'tendbha','dbconf','MySQL-5.7','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (756,'tendbha','dbconf','MySQL-5.7','mysql.port','INT','{{.Mysqld.Port}}',NULL,'',2,0,0,0,0,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (754,'tendbha','dbconf','MySQL-5.7','mysql.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,0,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4999,'tendbha','dbconf','MySQL-5.7','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4997,'tendbha','dbconf','MySQL-5.7','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4998,'tendbha','dbconf','MySQL-5.7','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5000,'tendbha','dbconf','MySQL-5.7','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ALTER TABLE implicitly upgrades temporal columns format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision) pre-5.6.4','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5001,'tendbha','dbconf','MySQL-5.7','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4,'tendbha','dbconf','MySQL-5.7','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,'{{mysqld.bind-address}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5002,'tendbha','dbconf','MySQL-5.7','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5003,'tendbha','dbconf','MySQL-5.7','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5,'tendbha','dbconf','MySQL-5.7','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What form of binary logging the master will use.','2022-04-25 10:00:47','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5004,'tendbha','dbconf','MySQL-5.7','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5006,'tendbha','dbconf','MySQL-5.7','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5005,'tendbha','dbconf','MySQL-5.7','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5007,'tendbha','dbconf','MySQL-5.7','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5008,'tendbha','dbconf','MySQL-5.7','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5009,'tendbha','dbconf','MySQL-5.7','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5011,'tendbha','dbconf','MySQL-5.7','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (6,'tendbha','dbconf','MySQL-5.7','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (7,'tendbha','dbconf','MySQL-5.7','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5012,'tendbha','dbconf','MySQL-5.7','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5013,'tendbha','dbconf','MySQL-5.7','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8,'tendbha','dbconf','MySQL-5.7','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9,'tendbha','dbconf','MySQL-5.7','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5014,'tendbha','dbconf','MySQL-5.7','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable defines the global automatic password expiration policy. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10,'tendbha','dbconf','MySQL-5.7','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5016,'tendbha','dbconf','MySQL-5.7','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5018,'tendbha','dbconf','MySQL-5.7','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5019,'tendbha','dbconf','MySQL-5.7','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5020,'tendbha','dbconf','MySQL-5.7','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5017,'tendbha','dbconf','MySQL-5.7','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5021,'tendbha','dbconf','MySQL-5.7','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5022,'tendbha','dbconf','MySQL-5.7','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5023,'tendbha','dbconf','MySQL-5.7','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5024,'tendbha','dbconf','MySQL-5.7','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5025,'tendbha','dbconf','MySQL-5.7','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11,'tendbha','dbconf','MySQL-5.7','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5026,'tendbha','dbconf','MySQL-5.7','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5027,'tendbha','dbconf','MySQL-5.7','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5028,'tendbha','dbconf','MySQL-5.7','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5029,'tendbha','dbconf','MySQL-5.7','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5030,'tendbha','dbconf','MySQL-5.7','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5031,'tendbha','dbconf','MySQL-5.7','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5032,'tendbha','dbconf','MySQL-5.7','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12,'tendbha','dbconf','MySQL-5.7','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-04-25 10:00:47','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5033,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5034,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5035,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5036,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5037,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_alter_table_default_algorithm','STRING','INPLACE','INPLACE| INSTANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The instant algorithm is used to avoid data copying, thereby realizing the function of quickly adding columns to large tables.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5038,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Asynchronous DROP TABLE truncate the file size each time in the background, the unit is MB. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5039,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5040,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5041,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5042,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5043,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the percentage of the most recently used pages for each buffer pool to read out and dump','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5044,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2022-04-25 10:00:47','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5046,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5045,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5047,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5048,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5049,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5050,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the compression failure rate threshold for a table, as a percentage, at which point MySQL begins adding padding within compressed pages to avoid expensive compression failures. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5051,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the level of zlib compression to use for InnoDB compressed tables and indexes. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5052,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum percentage that can be reserved as free space within each compressed page','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5053,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5054,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether disable deadlock detection.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5055,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_default_row_format option defines the default row format for InnoDB tables and user-created temporary tables.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5056,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (17,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (18,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-05-27 15:22:52',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (19,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5057,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5058,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5059,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ignore the innodb_io_capacity setting for bursts of I/O activity that occur at checkpoints.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5060,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5061,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5062,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5063,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5064,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5065,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5066,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5067,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5068,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5069,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5070,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (20,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5071,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5072,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable this option to allow index key prefixes longer than 767 bytes (up to 3072 bytes) for InnoDB tables that use the DYNAMIC and COMPRESSED row formats.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (21,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (22,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5073,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables checksums for redo log pages.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5074,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether prevent corruption that could occur if a different version of the zlib compression algorithm is used during recovery.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (23,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (24,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (25,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5075,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5076,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5077,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5078,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5079,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5080,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a threshold size for undo tablespaces.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5081,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5082,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5083,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5084,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5085,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5086,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5087,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of page cleaner threads that flush dirty pages from buffer pool instances.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5088,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5089,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5090,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5091,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5092,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5093,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (26,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2022-04-25 10:00:47','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5094,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default. If innodb_rollback_on_timeout is specified, a transaction timeout causes InnoDB to abort and roll back the entire transaction.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5095,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5096,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5097,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5098,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5099,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5100,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5101,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5102,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5103,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5104,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5105,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5106,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5107,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5108,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5109,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (27,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (28,'tendbha','dbconf','MySQL-5.7','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2022-04-25 10:00:47','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (29,'tendbha','dbconf','MySQL-5.7','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2022-04-25 10:00:47','2023-04-07 11:54:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5110,'tendbha','dbconf','MySQL-5.7','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5111,'tendbha','dbconf','MySQL-5.7','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5112,'tendbha','dbconf','MySQL-5.7','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5113,'tendbha','dbconf','MySQL-5.7','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5114,'tendbha','dbconf','MySQL-5.7','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5115,'tendbha','dbconf','MySQL-5.7','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5116,'tendbha','dbconf','MySQL-5.7','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (31,'tendbha','dbconf','MySQL-5.7','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (33,'tendbha','dbconf','MySQL-5.7','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5117,'tendbha','dbconf','MySQL-5.7','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5118,'tendbha','dbconf','MySQL-5.7','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (34,'tendbha','dbconf','MySQL-5.7','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5119,'tendbha','dbconf','MySQL-5.7','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5120,'tendbha','dbconf','MySQL-5.7','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5121,'tendbha','dbconf','MySQL-5.7','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the time zone of timestamps in messages written to the error log, and in general query log and slow query log messages written to files.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (35,'tendbha','dbconf','MySQL-5.7','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (36,'tendbha','dbconf','MySQL-5.7','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (32,'tendbha','dbconf','MySQL-5.7','mysqld.loose-log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:16:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (50,'tendbha','dbconf','MySQL-5.7','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (37,'tendbha','dbconf','MySQL-5.7','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'If set to 0, table names are stored as specified and comparisons are case sensitive. If set to 1, they are stored in lowercase on disk and comparisons are not case sensitive.','2022-04-25 10:00:47','2022-06-16 21:32:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5122,'tendbha','dbconf','MySQL-5.7','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5123,'tendbha','dbconf','MySQL-5.7','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether examine checksums when reading from the binary log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (38,'tendbha','dbconf','MySQL-5.7','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (39,'tendbha','dbconf','MySQL-5.7','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (40,'tendbha','dbconf','MySQL-5.7','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (41,'tendbha','dbconf','MySQL-5.7','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (42,'tendbha','dbconf','MySQL-5.7','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5124,'tendbha','dbconf','MySQL-5.7','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5125,'tendbha','dbconf','MySQL-5.7','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5126,'tendbha','dbconf','MySQL-5.7','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5127,'tendbha','dbconf','MySQL-5.7','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum value of the points_per_circle argument to the ST_Buffer_Strategy() function.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5128,'tendbha','dbconf','MySQL-5.7','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5129,'tendbha','dbconf','MySQL-5.7','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5130,'tendbha','dbconf','MySQL-5.7','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times that any given stored procedure may be called recursively. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5131,'tendbha','dbconf','MySQL-5.7','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5132,'tendbha','dbconf','MySQL-5.7','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The size of the metadata locks cache. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5133,'tendbha','dbconf','MySQL-5.7','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5134,'tendbha','dbconf','MySQL-5.7','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5135,'tendbha','dbconf','MySQL-5.7','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether the mysql_native_password built-in authentication plugin supports proxy users.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5136,'tendbha','dbconf','MySQL-5.7','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5137,'tendbha','dbconf','MySQL-5.7','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5138,'tendbha','dbconf','MySQL-5.7','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5139,'tendbha','dbconf','MySQL-5.7','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5140,'tendbha','dbconf','MySQL-5.7','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'ngram_token_size is set to the size of the largest token that you want to search for.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5141,'tendbha','dbconf','MySQL-5.7','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5142,'tendbha','dbconf','MySQL-5.7','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5143,'tendbha','dbconf','MySQL-5.7','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5144,'tendbha','dbconf','MySQL-5.7','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5145,'tendbha','dbconf','MySQL-5.7','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5146,'tendbha','dbconf','MySQL-5.7','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (43,'tendbha','dbconf','MySQL-5.7','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2022-04-25 10:00:47','2022-06-16 21:32:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (44,'tendbha','dbconf','MySQL-5.7','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5147,'tendbha','dbconf','MySQL-5.7','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5148,'tendbha','dbconf','MySQL-5.7','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5149,'tendbha','dbconf','MySQL-5.7','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Do not cache results that are larger than this number of bytes. The default value is 1MB.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5150,'tendbha','dbconf','MySQL-5.7','mysqld.query_cache_min_res_unit','INT','4096','[512,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum size (in bytes) for blocks allocated by the query cache. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (45,'tendbha','dbconf','MySQL-5.7','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount of memory allocated for caching query results. By default, the query cache is disabled.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (46,'tendbha','dbconf','MySQL-5.7','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Set the query cache type.','2022-04-25 10:00:47','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5151,'tendbha','dbconf','MySQL-5.7','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Invalidate queries in query cache on LOCK for write','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5152,'tendbha','dbconf','MySQL-5.7','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (47,'tendbha','dbconf','MySQL-5.7','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-05-24 00:16:49',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5153,'tendbha','dbconf','MySQL-5.7','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of blocks that are allocated when doing range optimization.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5154,'tendbha','dbconf','MySQL-5.7','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The limit on memory consumption for the range optimizer. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5155,'tendbha','dbconf','MySQL-5.7','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5156,'tendbha','dbconf','MySQL-5.7','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (48,'tendbha','dbconf','MySQL-5.7','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (49,'tendbha','dbconf','MySQL-5.7','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (1469,'tendbha','dbconf','MySQL-5.7','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 12:49:35','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (55,'tendbha','dbconf','MySQL-5.7','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5157,'tendbha','dbconf','MySQL-5.7','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks GTIDs within the current session and returns them to the client. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5158,'tendbha','dbconf','MySQL-5.7','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the default schema (database) name within the current session and makes this information available to the client when changes occur.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5159,'tendbha','dbconf','MySQL-5.7','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the state of the current session and notifies the client when state changes occur. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5160,'tendbha','dbconf','MySQL-5.7','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the sha256_password built-in authentication plugin supports proxy users.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (56,'tendbha','dbconf','MySQL-5.7','mysqld.show_compatibility_56','STRING','ON','ON| OFF ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The INFORMATION_SCHEMA has tables that contain system and status variable information The Performance Schema tables are intended to replace the INFORMATION_SCHEMA tables, which are deprecated as of MySQL 5.7.6 and will be removed in a future MySQL release','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5161,'tendbha','dbconf','MySQL-5.7','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether SHOW CREATE TABLE output includes comments to flag temporal columns found to be in pre-5.6.4 format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision).','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (57,'tendbha','dbconf','MySQL-5.7','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (58,'tendbha','dbconf','MySQL-5.7','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (59,'tendbha','dbconf','MySQL-5.7','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5162,'tendbha','dbconf','MySQL-5.7','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (60,'tendbha','dbconf','MySQL-5.7','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies if the slave will use database partitioning or information from master to parallelize transactions.(Default: DATABASE).','2022-04-25 10:00:47','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (61,'tendbha','dbconf','MySQL-5.7','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2022-04-25 10:00:47','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5163,'tendbha','dbconf','MySQL-5.7','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5164,'tendbha','dbconf','MySQL-5.7','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (62,'tendbha','dbconf','MySQL-5.7','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specify the initial slow query log state.','2022-04-25 10:00:47','2023-04-17 14:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (63,'tendbha','dbconf','MySQL-5.7','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/slow-query.log',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (64,'tendbha','dbconf','MySQL-5.7','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/mysql.sock',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (65,'tendbha','dbconf','MySQL-5.7','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2022-04-25 10:00:47','2022-06-23 12:21:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5165,'tendbha','dbconf','MySQL-5.7','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (66,'tendbha','dbconf','MySQL-5.7','mysqld.sql_mode','STRING','\'\'','\'\'| NO_TABLE_OPTIONS| NO_ZERO_DATE| ONLY_FULL_GROUP_BY| STRICT_ALL_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_CREATE_USER| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_UNSIGNED_SUBTRACTION| NO_ENGINE_SUBSTITUTION| ANSI_QUOTES| NO_KEY_OPTIONS| PIPES_AS_CONCAT| ALLOW_INVALID_DATES| NO_DIR_IN_CREATE| NO_ZERO_IN_DATE| REAL_AS_FLOAT| STRICT_TRANS_TABLES| NO_FIELD_OPTIONS ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2022-04-25 10:00:47','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5166,'tendbha','dbconf','MySQL-5.7','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (67,'tendbha','dbconf','MySQL-5.7','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sets a soft upper limit for the number of cached stored routines per connection.','2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (68,'tendbha','dbconf','MySQL-5.7','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS).','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5167,'tendbha','dbconf','MySQL-5.7','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (69,'tendbha','dbconf','MySQL-5.7','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5168,'tendbha','dbconf','MySQL-5.7','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (70,'tendbha','dbconf','MySQL-5.7','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5169,'tendbha','dbconf','MySQL-5.7','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5170,'tendbha','dbconf','MySQL-5.7','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of additional threads per group of thread pool.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5171,'tendbha','dbconf','MySQL-5.7','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5172,'tendbha','dbconf','MySQL-5.7','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The stack size for each thread.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5173,'tendbha','dbconf','MySQL-5.7','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (71,'tendbha','dbconf','MySQL-5.7','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5174,'tendbha','dbconf','MySQL-5.7','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5175,'tendbha','dbconf','MySQL-5.7','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount in bytes by which to increase a per-transaction memory pool which needs memory.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (72,'tendbha','dbconf','MySQL-5.7','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5176,'tendbha','dbconf','MySQL-5.7','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'There is a per-transaction memory pool from which various transaction-related allocations take memory.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5177,'tendbha','dbconf','MySQL-5.7','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2022-06-16 21:39:26','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (5178,'tendbha','dbconf','MySQL-5.7','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls whether updates to a view can be made when the view does not contain all columns of the primary key defined in the underlying table, if the update statement contains a LIMIT clause.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (73,'tendbha','dbconf','MySQL-5.7','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10966,'tendbha','dbconf','MySQL-5.7','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10973,'tendbha','dbconf','MySQL-8.0','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (330,'tendbha','dbconf','MySQL-8.0','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (331,'tendbha','dbconf','MySQL-8.0','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4821,'tendbha','dbconf','MySQL-8.0','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether automatically grants the EXECUTE and ALTER ROUTINE privileges to the creator of a stored routine','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4819,'tendbha','dbconf','MySQL-8.0','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the interval between successive column values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4820,'tendbha','dbconf','MySQL-8.0','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the starting point for the AUTO_INCREMENT column value.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4822,'tendbha','dbconf','MySQL-8.0','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ALTER TABLE implicitly upgrades temporal columns format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision) pre-5.6.4','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4823,'tendbha','dbconf','MySQL-8.0','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of outstanding connection requests MySQL can have.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (332,'tendbha','dbconf','MySQL-8.0','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}','','',2,0,1,0,1,'{{mysqld.bind-address}}',NULL,'',0,'','绑定地址','2022-04-25 10:00:47','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4824,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the cache to hold changes to the binary log during a transaction','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4825,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Include checksum for log events in the binary log. None indicates only check length.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (339,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_expire_logs_seconds','INT','2592000','[0, 4294967295]','RANGE',1,0,0,0,1,NULL,NULL,'',0,'','日志过期天数','2022-04-25 10:00:47','2023-04-26 20:27:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (333,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,'',0,'','What form of binary logging the master will use.','2022-04-25 10:00:47','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4826,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether enable on a master (the default), transactions are externalized in the same order as they are written to the binary log ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4828,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This system variable affects row-based logging only. When enabled, it causes the server to write informational log events such as row query log events into its binary log','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4827,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls what formats that rows should be logged in.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4829,'tendbha','dbconf','MySQL-8.0','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the statement cache for updates to non-transactional engines for the binary log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4830,'tendbha','dbconf','MySQL-8.0','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the block encryption mode for block-based algorithms such as AES. It affects encryption for AES_ENCRYPT() and AES_DECRYPT().','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4831,'tendbha','dbconf','MySQL-8.0','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'MyISAM uses a special tree-like cache to make bulk inserts faster for INSERT ... SELECT, INSERT ... VALUES (...), (...), ..., and LOAD DATA when adding data to nonempty tables','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4833,'tendbha','dbconf','MySQL-8.0','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The file system character set. you would better not change it.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (334,'tendbha','dbconf','MySQL-8.0','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,'Specify default server character set','2022-04-25 10:00:47','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (335,'tendbha','dbconf','MySQL-8.0','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,'{{mysqld.collation_server}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4834,'tendbha','dbconf','MySQL-8.0','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If AUTO (the default), MySQL permits INSERT and SELECT statements to run concurrently for MyISAM tables that have no free blocks in the middle of the data file.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4835,'tendbha','dbconf','MySQL-8.0','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (336,'tendbha','dbconf','MySQL-8.0','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (337,'tendbha','dbconf','MySQL-8.0','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4836,'tendbha','dbconf','MySQL-8.0','mysqld.default_authentication_plugin','STRING','MYSQL_NATIVE_PASSWORD','MYSQL_NATIVE_PASSWORD| SHA256_PASSWORD| CACHING_SHA2_PASSWORD ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'It determines which authentication plugin the server assigns to new accounts created by CREATE USER and GRANT statements that do not explicitly specify an authentication plugin. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4837,'tendbha','dbconf','MySQL-8.0','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable defines the global automatic password expiration policy. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (338,'tendbha','dbconf','MySQL-8.0','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4839,'tendbha','dbconf','MySQL-8.0','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default mode value to use for the WEEK() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4841,'tendbha','dbconf','MySQL-8.0','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'After inserting delayed_insert_limit rows, the INSERT DELAYED handler will check if there are any SELECT statements pending. If so, it allows these to execute before continuing.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4842,'tendbha','dbconf','MySQL-8.0','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How long a INSERT DELAYED thread should wait for INSERT statements before terminating.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4843,'tendbha','dbconf','MySQL-8.0','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'What size queue (in rows) should be allocated for handling INSERT DELAYED. If the queue becomes full, any client that does INSERT DELAYED will wait until there is room in the queue again.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4840,'tendbha','dbconf','MySQL-8.0','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This option applies only to MyISAM tables. It can have one of the following values to affect handling of the DELAY_KEY_WRITE table option that can be used in CREATE TABLE statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4844,'tendbha','dbconf','MySQL-8.0','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This variable controls how the server handles clients with expired passwords','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4845,'tendbha','dbconf','MySQL-8.0','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the number of digits by which to increase the scale of the result of division operations performed with the operator','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4846,'tendbha','dbconf','MySQL-8.0','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether optimizer JSON output should add end markers. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4847,'tendbha','dbconf','MySQL-8.0','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The optimizer will use existing index statistics instead of doing index dives for equality ranges if the number of equality ranges for the index is larger than or equal to this number. If set to 0, index dives are always used.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4848,'tendbha','dbconf','MySQL-8.0','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable indicates the status of the Event Scheduler','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4849,'tendbha','dbconf','MySQL-8.0','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'This option causes CREATE TABLE to create all TIMESTAMP columns as NULL with DEFAULT NULL attribute, Without this option, TIMESTAMP columns are NOT NULL and have implicit DEFAULT clauses.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4850,'tendbha','dbconf','MySQL-8.0','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this is set to a nonzero value, all tables are closed every flush_time seconds to free up resources and synchronize unflushed data to disk. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4851,'tendbha','dbconf','MySQL-8.0','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The maximum length of the word to be included in a MyISAM FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4852,'tendbha','dbconf','MySQL-8.0','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The minimum length of the word to be included in a FULLTEXT index. Note: FULLTEXT indexes must be rebuilt after changing this variable.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4853,'tendbha','dbconf','MySQL-8.0','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of best matches to use for query expansion','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4854,'tendbha','dbconf','MySQL-8.0','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted result length in bytes for the GROUP_CONCAT() function','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4855,'tendbha','dbconf','MySQL-8.0','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the internal host cache','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (340,'tendbha','dbconf','MySQL-8.0','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-04-25 10:00:47','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4856,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to dynamically adjust the rate of flushing dirty pages in the InnoDB buffer pool based on the workload.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4857,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the low water mark representing percentage of redo log capacity at which adaptive flushing is enabled','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4858,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the InnoDB adaptive hash index is enabled or disabled.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4859,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Permits InnoDB to automatically adjust the value of innodb_thread_sleep_delay up or down according to the current workload.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4860,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Asynchronous DROP TABLE truncate the file size each time in the background, the unit is MB. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4861,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The increment size (in megabytes) for extending the size of an auto-extending InnoDB system tablespace file when it becomes full.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4862,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The lock mode to use for generating auto-increment values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4863,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether to record the pages cached in the InnoDB buffer pool when the MySQL server is shut down, to shorten the warmup process at the next restart. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4864,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the percentage of the most recently used pages for each buffer pool to read out and dump','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (341,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of regions that the InnoDB buffer pool is divided into.','2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4865,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the InnoDB buffer pool is automatically warmed up by loading the same pages it held at an earlier time on MySQL server startup.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (342,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,'{{mysqld.innodb_buffer_pool_size}}',NULL,NULL,-1,NULL,'The size in bytes of the buffer pool, the memory area where InnoDB caches table and index data..','2022-04-25 10:00:47','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4867,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether InnoDB performs change buffering','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4866,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4868,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies how to generate and verify the checksum stored in the disk blocks of InnoDB tablespaces.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4869,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables per-index compression-related statistics in the INFORMATION_SCHEMA.INNODB_CMP_PER_INDEX table.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4870,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of threads that can commit at the same time. A value of 0 (the default) permits any number of transactions to commit simultaneously.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4871,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the compression failure rate threshold for a table, as a percentage, at which point MySQL begins adding padding within compressed pages to avoid expensive compression failures. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4872,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the level of zlib compression to use for InnoDB compressed tables and indexes. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4873,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum percentage that can be reserved as free space within each compressed page','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4874,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines the number of threads that can enter InnoDB concurrently','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (343,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (344,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/data',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4875,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether disable deadlock detection.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4876,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_default_row_format option defines the default row format for InnoDB tables and user-created temporary tables.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4877,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables the operating system file system cache for merge-sort temporary files.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4878,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_fast_ahi_cleanup_for_drop_table','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable fast cleanup optimization of adaptive hash index when large table drop table is enabled.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (346,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-01 10:23:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (347,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines Innodb transaction durability','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4879,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the method used to flush data to InnoDB data files and log files','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4880,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4881,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether ignore the innodb_io_capacity setting for bursts of I/O activity that occur at checkpoints.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4882,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The memory allocated, in bytes, for the InnoDB FULLTEXT search index cache, which holds a parsed document in memory while creating an InnoDB FULLTEXT index','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4883,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether to enable additional full-text search (FTS) diagnostic output. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4884,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Create FTS index with stopword.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4885,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Maximum character length of words that are stored in an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4886,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Minimum length of words that are stored in an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4887,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Number of words to process during each OPTIMIZE TABLE operation on an InnoDB FULLTEXT index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4888,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The InnoDB full-text search query result cache limit (defined in bytes) per full-text search query or per thread.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4889,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The user supplied stopword table name.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4890,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Number of threads used in parallel to index and tokenize text in an InnoDB FULLTEXT index when building a search index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4891,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The total memory allocated, in bytes, for the InnoDB full-text search index cache for all tables. Creating numerous tables, each with a FULLTEXT search index, could consume a significant portion of available memory.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4892,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'User supplied stopword table name, effective in the session level.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (348,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The innodb_io_capacity limit is a total limit for all buffer pool instances.','2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4893,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'defines an upper limit the number of I/O operations performed per second by InnoDB background tasks in critical situations.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (349,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The length of time in seconds an InnoDB transaction waits for a row lock before giving up','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (350,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4894,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables checksums for redo log pages.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4895,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether prevent corruption that could occur if a different version of the zlib compression algorithm is used during recovery.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (351,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (352,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (353,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/innodb/log',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4896,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'It specifies, per buffer pool instance, how far down the buffer pool LRU page list the page cleaner thread scans looking for dirty pages to flush. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4897,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to flush data from the buffer pool so that the percentage of dirty pages does not exceed this value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4898,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a low water mark representing the percentage of dirty pages at which preflushing is enabled to control the dirty page ratio.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4899,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the maximum length of the purge queue.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4900,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the maximum delay in microseconds for the delay imposed by the innodb_max_purge_lag variable. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4901,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines a threshold size for undo tablespaces.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4902,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Disables InnoDB metrics counters.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4903,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables InnoDB metrics counters.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4904,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4905,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the approximate percentage of the InnoDB buffer pool used for the old block sublist','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4906,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Maximum modification log file size for online index creation','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4907,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Changes the way OPTIMIZE TABLE operates on InnoDB tables. Intended to be enabled temporarily, during maintenance operations for InnoDB tables with FULLTEXT indexes. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4908,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of page cleaner threads that flush dirty pages from buffer pool instances.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4909,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this option is enabled, information about all deadlocks in InnoDB user transactions is recorded in the mysqld error log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4910,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The granularity of changes, expressed in units of redo log records, that trigger a purge operation, flushing the changed buffer pool blocks to disk.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4911,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4912,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of background threads devoted to the InnoDB purge operation.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4913,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables the random read-ahead technique for optimizing InnoDB I/O.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4914,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the sensitivity of linear read-ahead that InnoDB uses to prefetch pages into the buffer pool','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (354,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for read operations in InnoDB.','2022-04-25 10:00:47','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4915,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'InnoDB rolls back only the last statement on a transaction timeout by default. If innodb_rollback_on_timeout is specified, a transaction timeout causes InnoDB to abort and roll back the entire transaction.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4916,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Defines the number of rollback segments used by InnoDB.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4917,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Specifies the size of sort buffers used to sort data during creation of an InnoDB index.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4918,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum delay between polls for a spin lock.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4919,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Causes InnoDB to automatically recalculate persistent statistics after the data in a table is changed substantially.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4920,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4921,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When this variable is enabled, InnoDB updates statistics when metadata statements such as SHOW TABLE STATUS or SHOW INDEX are run, or when accessing the INFORMATION_SCHEMA tables TABLES or STATISTICS','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4922,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies whether InnoDB index statistics are persisted to disk. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4923,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of index pages to sample when estimating cardinality and other statistics for an indexed column, such as those calculated by ANALYZE TABLE.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4924,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of leaf index pages to sample when calculating transient statistics','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4925,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables periodic output for the standard InnoDB Monitor.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4926,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enables or disables the InnoDB Lock Monitor.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4927,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When innodb_strict_mode is ON, InnoDB returns errors rather than warnings for certain conditions','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4928,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Defines the size of the mutex/lock wait array.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4929,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times a thread waits for an InnoDB mutex to be freed before the thread is suspended.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4930,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_table_drop_mode','STRING','SYNC_DROP','SYNC_DROP| ASYNC_DROP ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Large table asynchronous delete function switch.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4931,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If autocommit = 0, InnoDB honors LOCK TABLES; MySQL does not return from LOCK TABLES ... WRITE until all other threads have released all their locks to the table. The default value of innodb_table_locks is 1, which means that LOCK TABLES causes InnoDB to ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (355,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'InnoDB tries to keep the number of operating system threads concurrently inside InnoDB less than or equal to the limit given by this variable.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (356,'tendbha','dbconf','MySQL-8.0','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of I/O threads for write operations in InnoDB.','2022-04-25 10:00:47','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (357,'tendbha','dbconf','MySQL-8.0','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on an interactive connection before closing it','2022-04-25 10:00:47','2023-04-07 11:55:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4932,'tendbha','dbconf','MySQL-8.0','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for ordinary index scans.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (358,'tendbha','dbconf','MySQL-8.0','mysqld.key_buffer','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4933,'tendbha','dbconf','MySQL-8.0','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This characterizes the number of hits a hot block has to be untouched until it is considered aged enough to be downgraded to a warm block. This specifies the percentage ratio of that number of hits to the total number of blocks in key cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4934,'tendbha','dbconf','MySQL-8.0','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The default size of key cache blocks','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4935,'tendbha','dbconf','MySQL-8.0','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The minimum percentage of warm blocks in key cache','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4936,'tendbha','dbconf','MySQL-8.0','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies the locale that controls the language used to display day and month names and abbreviations.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4937,'tendbha','dbconf','MySQL-8.0','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whetther LOCAL is supported for LOAD DATA INFILE','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4938,'tendbha','dbconf','MySQL-8.0','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable specifies the timeout in seconds for attempts to acquire metadata locks.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (359,'tendbha','dbconf','MySQL-8.0','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,'{{mysqld.logdir}}/binlog/binlog{{port}}.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (361,'tendbha','dbconf','MySQL-8.0','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4939,'tendbha','dbconf','MySQL-8.0','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The destination or destinations for general query log and slow query log output. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4940,'tendbha','dbconf','MySQL-8.0','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether queries that do not use indexes are logged to the slow query log','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (362,'tendbha','dbconf','MySQL-8.0','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4941,'tendbha','dbconf','MySQL-8.0','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether include slow administrative statements in the statements written to the slow query log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4942,'tendbha','dbconf','MySQL-8.0','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If log_queries_not_using_indexes is enabled, the log_throttle_queries_not_using_indexes variable limits the number of such queries per minute that can be written to the slow query log. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4943,'tendbha','dbconf','MySQL-8.0','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls the time zone of timestamps in messages written to the error log, and in general query log and slow query log messages written to files.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (364,'tendbha','dbconf','MySQL-8.0','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a query takes longer than this many seconds, the server increments the Slow_queries status variable','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (360,'tendbha','dbconf','MySQL-8.0','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (378,'tendbha','dbconf','MySQL-8.0','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (365,'tendbha','dbconf','MySQL-8.0','mysqld.lower_case_table_names','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-15 21:17:07',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4944,'tendbha','dbconf','MySQL-8.0','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If set to true, all INSERT, UPDATE, DELETE, and LOCK TABLE WRITE statements wait until there is no pending SELECT or LOCK TABLE READ on the affected table','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4945,'tendbha','dbconf','MySQL-8.0','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'whether examine checksums when reading from the binary log.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (366,'tendbha','dbconf','MySQL-8.0','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of one packet or any generated/intermediate string.','2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (367,'tendbha','dbconf','MySQL-8.0','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (368,'tendbha','dbconf','MySQL-8.0','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (369,'tendbha','dbconf','MySQL-8.0','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections.','2022-04-25 10:00:47','2022-06-22 12:43:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (370,'tendbha','dbconf','MySQL-8.0','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If more than this many successive connection requests from a host are interrupted without a successful connection, the server blocks that host from further connections.','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4946,'tendbha','dbconf','MySQL-8.0','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of error, warning, and information messages to be stored for display by the SHOW ERRORS and SHOW WARNINGS statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4947,'tendbha','dbconf','MySQL-8.0','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'the maximum size to which user-created MEMORY tables are permitted to grow.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4948,'tendbha','dbconf','MySQL-8.0','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The cutoff on the size of index values that determines which filesort algorithm to use.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4949,'tendbha','dbconf','MySQL-8.0','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum value of the points_per_circle argument to the ST_Buffer_Strategy() function.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4950,'tendbha','dbconf','MySQL-8.0','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable limits the total number of prepared statements in the server.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4951,'tendbha','dbconf','MySQL-8.0','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of bytes to use when sorting data values.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4952,'tendbha','dbconf','MySQL-8.0','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of times that any given stored procedure may be called recursively. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4953,'tendbha','dbconf','MySQL-8.0','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum permitted number of simultaneous client connections per user.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4954,'tendbha','dbconf','MySQL-8.0','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Queries that examine fewer than this number of rows are not logged to the slow query log. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4955,'tendbha','dbconf','MySQL-8.0','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when sorting MyISAM indexes during a REPAIR TABLE or when creating indexes with CREATE INDEX or ALTER TABLE','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4956,'tendbha','dbconf','MySQL-8.0','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether the mysql_native_password built-in authentication plugin supports proxy users.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4957,'tendbha','dbconf','MySQL-8.0','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control client thread buffer length.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4958,'tendbha','dbconf','MySQL-8.0','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from a connection before aborting the read.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4959,'tendbha','dbconf','MySQL-8.0','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If a read or write on a communication port is interrupted, retry this many times before giving up.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4960,'tendbha','dbconf','MySQL-8.0','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for a block to be written to a connection before aborting the write.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4961,'tendbha','dbconf','MySQL-8.0','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'ngram_token_size is set to the size of the largest token that you want to search for.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4962,'tendbha','dbconf','MySQL-8.0','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls the heuristics applied during query optimization to prune less-promising partial plans from the optimizer search space.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4963,'tendbha','dbconf','MySQL-8.0','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum depth of search performed by the query optimizer.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4964,'tendbha','dbconf','MySQL-8.0','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Control optimizer behavior. Must configed refer to official documentation','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4965,'tendbha','dbconf','MySQL-8.0','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum number of optimizer traces to display. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4966,'tendbha','dbconf','MySQL-8.0','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum cumulative size of stored optimizer traces. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4967,'tendbha','dbconf','MySQL-8.0','mysqld.optimizer_trace_offset','INT','-1','[-2147483647,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The offset of optimizer traces to display.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (371,'tendbha','dbconf','MySQL-8.0','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'Whether enable the performance schema.','2022-04-25 10:00:47','2022-06-16 21:28:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (372,'tendbha','dbconf','MySQL-8.0','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,'{{mysqld.port}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4968,'tendbha','dbconf','MySQL-8.0','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the buffer that is allocated when preloading indexes.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4969,'tendbha','dbconf','MySQL-8.0','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The allocation size of memory blocks that are allocated for objects created during statement parsing and execution.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4970,'tendbha','dbconf','MySQL-8.0','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of the persistent buffer used for statement parsing and execution. This buffer is not freed between statements.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4971,'tendbha','dbconf','MySQL-8.0','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The size of blocks that are allocated when doing range optimization.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4972,'tendbha','dbconf','MySQL-8.0','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The limit on memory consumption for the range optimizer. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4973,'tendbha','dbconf','MySQL-8.0','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each thread that does a sequential scan for a MyISAM table allocates a buffer of this size (in bytes) for each table it scans.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4974,'tendbha','dbconf','MySQL-8.0','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Used for reads from MyISAM tables, and, for any storage engine, for Multi-Range Read optimization.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (376,'tendbha','dbconf','MySQL-8.0','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/relay-log/relay-log.bin',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (377,'tendbha','dbconf','MySQL-8.0','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (379,'tendbha','dbconf','MySQL-8.0','mysqld.replicate-wild-ignore-table','STRING','mysql.%,infodba_schema.conn_log',NULL,'STRING',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:12:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (383,'tendbha','dbconf','MySQL-8.0','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,'{{mysqld.server_id}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4975,'tendbha','dbconf','MySQL-8.0','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks GTIDs within the current session and returns them to the client. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4976,'tendbha','dbconf','MySQL-8.0','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the default schema (database) name within the current session and makes this information available to the client when changes occur.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4977,'tendbha','dbconf','MySQL-8.0','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the server tracks changes to the state of the current session and notifies the client when state changes occur. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4978,'tendbha','dbconf','MySQL-8.0','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether the sha256_password built-in authentication plugin supports proxy users.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4979,'tendbha','dbconf','MySQL-8.0','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Whether SHOW CREATE TABLE output includes comments to flag temporal columns found to be in pre-5.6.4 format (TIME, DATETIME, and TIMESTAMP columns without support for fractional seconds precision).','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (385,'tendbha','dbconf','MySQL-8.0','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (386,'tendbha','dbconf','MySQL-8.0','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (387,'tendbha','dbconf','MySQL-8.0','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4980,'tendbha','dbconf','MySQL-8.0','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds to wait for more data from the master before the slave considers the connection broken, aborts the read, and tries to reconnect','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (388,'tendbha','dbconf','MySQL-8.0','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specifies if the slave will use database partitioning or information from master to parallelize transactions.(Default: DATABASE).','2022-04-25 10:00:47','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (389,'tendbha','dbconf','MySQL-8.0','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Enable multithreading on the replica and set the number of application threads used to execute replicated transactions in parallel.','2022-04-25 10:00:47','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4981,'tendbha','dbconf','MySQL-8.0','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN,HASH_SCAN','TABLE_SCAN,INDEX_SCAN| INDEX_SCAN,HASH_SCAN| TABLE_SCAN,HASH_SCAN| TABLE_SCAN,INDEX_SCAN,HASH_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'When preparing batches of rows for row-based logging and replication, this variable controls how the rows are searched for matches.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4982,'tendbha','dbconf','MySQL-8.0','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If creating a thread takes longer than this many seconds, the server increments the Slow_launch_threads status variable','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (390,'tendbha','dbconf','MySQL-8.0','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Specify the initial slow query log state.','2022-04-25 10:00:47','2023-04-17 14:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (391,'tendbha','dbconf','MySQL-8.0','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,'{{mysqld.slow_query_log_file}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (392,'tendbha','dbconf','MySQL-8.0','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,'{{mysqld.socket}}',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (393,'tendbha','dbconf','MySQL-8.0','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Each session that must perform a sort allocates a buffer of this size. ','2022-04-25 10:00:47','2022-06-23 12:21:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4983,'tendbha','dbconf','MySQL-8.0','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'If this variable is set to 1, then you can find the value of an automatically generated AUTO_INCREMENT value','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (394,'tendbha','dbconf','MySQL-8.0','mysqld.sql_mode','STRING','\'\'','\'\'| NO_ENGINE_SUBSTITUTION| ALLOW_INVALID_DATES| ANSI_QUOTES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_DIR_IN_CREATE| NO_UNSIGNED_SUBTRACTION| NO_ZERO_DATE| NO_ZERO_IN_DATE| ONLY_FULL_GROUP_BY| PIPES_AS_CONCAT| REAL_AS_FLOAT| STRICT_ALL_TABLES| STRICT_TRANS_TABLES ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The current server SQL mode.','2022-04-25 10:00:47','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4984,'tendbha','dbconf','MySQL-8.0','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Attention please, your setting is the session level, you would better set it by init_connect. Set to 1 to restrict UPDATE or DELETE statements must include the WHERE clause or LIMIT clause','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (395,'tendbha','dbconf','MySQL-8.0','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sets a soft upper limit for the number of cached stored routines per connection.','2022-04-25 10:00:47','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (396,'tendbha','dbconf','MySQL-8.0','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Sync binlog (MySQL flush to disk or rely on OS)','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4985,'tendbha','dbconf','MySQL-8.0','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of table definitions (from .frm files) that can be stored in the definition cache.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (397,'tendbha','dbconf','MySQL-8.0','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of open tables for all threads. Increasing this value increases the number of file descriptors that mysqld requires.','2022-04-25 10:00:47','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4986,'tendbha','dbconf','MySQL-8.0','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The number of table cache instances','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (398,'tendbha','dbconf','MySQL-8.0','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'How many threads we should keep in a cache for reuse','2022-04-25 10:00:47','2022-06-17 12:33:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4987,'tendbha','dbconf','MySQL-8.0','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The thread-handling model used by the server for connection threads.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4988,'tendbha','dbconf','MySQL-8.0','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Determines how many worker threads in a group can remain active at the same time once a thread group is oversubscribed.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4989,'tendbha','dbconf','MySQL-8.0','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of thread groups in the thread pool.. ','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4990,'tendbha','dbconf','MySQL-8.0','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,'The stack size for each thread.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4991,'tendbha','dbconf','MySQL-8.0','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Set the global server time zone, default follow the SYSTEM, and it\'s values can be set in several formats, such as SYSTEM, +8:00, -6:00, Europe/Helsinki etc, For more information, please consult mysql documents','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (399,'tendbha','dbconf','MySQL-8.0','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,'{{mysqld.datadir}}/tmp',NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4992,'tendbha','dbconf','MySQL-8.0','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The maximum size of internal in-memory temporary tables. This variable does not apply to user-created MEMORY tables.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4993,'tendbha','dbconf','MySQL-8.0','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The amount in bytes by which to increase a per-transaction memory pool which needs memory.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4994,'tendbha','dbconf','MySQL-8.0','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Default transaction isolation level','2022-06-16 21:39:26','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4995,'tendbha','dbconf','MySQL-8.0','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'There is a per-transaction memory pool from which various transaction-related allocations take memory.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (400,'tendbha','dbconf','MySQL-8.0','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-04-25 10:00:47','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (4996,'tendbha','dbconf','MySQL-8.0','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'This variable controls whether updates to a view can be made when the view does not contain all columns of the primary key defined in the underlying table, if the update statement contains a LIMIT clause.','2022-06-16 21:39:26','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (401,'tendbha','dbconf','MySQL-8.0','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'The number of seconds the server waits for activity on a noninteractive connection before closing it','2022-04-25 10:00:47','2022-06-16 22:21:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10969,'tendbha','dbconf','MySQL-8.0','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (466,'tendbha','deploy','deploy_info','charset','STRING','utf8','utf8|utf8mb4|latin1|gbk','ENUM',1,0,0,0,1,NULL,NULL,'字符集',-1,NULL,NULL,'2022-04-25 10:00:47','2022-09-13 14:19:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (467,'tendbha','deploy','deploy_info','db_version','STRING','MySQL-5.7','MySQL-5.5 | MySQL-5.6 | MySQL-5.7 | MySQL-8.0','ENUM',1,0,0,0,1,NULL,NULL,'DB版本',-1,NULL,NULL,'2022-04-25 10:00:47','2022-09-13 17:28:25',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (468,'tendbha','deploy','deploy_info','storage_engine','STRING','InnoDB','InnoDB','',1,0,0,0,1,NULL,NULL,'存储引擎',-1,NULL,NULL,'2022-04-25 10:00:47','2022-09-13 15:03:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (470,'tendbha','deploy','deploy_info','tolerance_level','STRING','compus','idc|compus|city','ENUM',-1,0,0,0,1,NULL,NULL,'容灾级别',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15831,'tendbha','mysql_monitor','items-config.yaml','character-consistency','STRING','{\"enable\":true, \"schedule\":\"0 0 14 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15835,'tendbha','mysql_monitor','items-config.yaml','engine','STRING','{\"enable\":true, \"schedule\":\"0 0 12 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15836,'tendbha','mysql_monitor','items-config.yaml','ext3-check','STRING','{\"enable\":true, \"schedule\":\"0 0 16 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:32',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15847,'tendbha','mysql_monitor','items-config.yaml','master-slave-heartbeat','STRING','{\"enable\":true, \"schedule\":\"@every 10s\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:33:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15846,'tendbha','mysql_monitor','items-config.yaml','mysql-config-diff','STRING','{\"enable\":true, \"schedule\":\"@every 10m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15845,'tendbha','mysql_monitor','items-config.yaml','mysql-connlog-report','STRING','{\"enable\":true, \"schedule\":\"0 40 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15844,'tendbha','mysql_monitor','items-config.yaml','mysql-connlog-rotate','STRING','{\"enable\":true, \"schedule\":\"0 30 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15843,'tendbha','mysql_monitor','items-config.yaml','mysql-connlog-size','STRING','{\"enable\":true, \"schedule\":\"0 0 12 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15839,'tendbha','mysql_monitor','items-config.yaml','mysql-err-critical','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15838,'tendbha','mysql_monitor','items-config.yaml','mysql-err-notice','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15841,'tendbha','mysql_monitor','items-config.yaml','mysql-inject','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:38',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15840,'tendbha','mysql_monitor','items-config.yaml','mysql-lock','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:37',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15913,'tendbha','mysql_monitor','items-config.yaml','proxy-backend','STRING','{\"enable\":true, \"schedule\":\"0 0 14 * * 1\", \"machine_type\":\"proxy\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:33:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15912,'tendbha','mysql_monitor','items-config.yaml','proxy-user-list','STRING','{\"enable\":true, \"schedule\":\"0 0 14 * * 1\", \"machine_type\":\"proxy\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:33:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15842,'tendbha','mysql_monitor','items-config.yaml','rotate-slowlog','STRING','{\"enable\":true, \"schedule\":\"0 55 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15832,'tendbha','mysql_monitor','items-config.yaml','routine-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15837,'tendbha','mysql_monitor','items-config.yaml','slave-status','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": [\"repeater\", \"slave\"]}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15834,'tendbha','mysql_monitor','items-config.yaml','trigger-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15833,'tendbha','mysql_monitor','items-config.yaml','view-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-27 13:09:08','2023-03-20 17:11:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (715,'tendbha','proxyconf','default','conn_log','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (716,'tendbha','proxyconf','default','daemon','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (719,'tendbha','proxyconf','default','event-threads','INT','7','[1,10]','RANGE',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (714,'tendbha','proxyconf','default','ignore-user','STRING','MONITOR,proxy','','STRING',1,0,1,0,0,'NULL','','',-1,NULL,'','2022-05-23 15:51:24','2022-09-29 16:19:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (717,'tendbha','proxyconf','default','keepalive','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (720,'tendbha','proxyconf','default','log-level','STRING','warning','info|warning','ENUM',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (721,'tendbha','proxyconf','default','plugins','STRING','admin, proxy','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (722,'tendbha','proxyconf','default','proxy-address','STRING','1.1.1.1:3306','','STRING',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (718,'tendbha','proxyconf','default','query_response_time_stats','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,'','',-1,NULL,'','2022-05-23 15:51:24','2022-05-23 15:55:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (457,'tendbha','user','tb_app_info','dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'DBA',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (462,'tendbha','user','tb_app_info','developer','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'开发人员',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (460,'tendbha','user','tb_app_info','mongo_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'Mongo DBA',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (458,'tendbha','user','tb_app_info','mysql_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'MySQL DBA',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (461,'tendbha','user','tb_app_info','nosql_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'NoSQL DBA',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (465,'tendbha','user','tb_app_info','notifier','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'告警订阅',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (463,'tendbha','user','tb_app_info','opser','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'运维人员',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (464,'tendbha','user','tb_app_info','productor','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'产品运营',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (459,'tendbha','user','tb_app_info','redis_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,'Redis DBA',-1,NULL,NULL,'2022-04-25 10:00:47','2022-10-20 12:26:09',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.down.sql b/dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.down.sql
new file mode 100644
index 0000000000..ca16b6e154
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='tendbsingle';
+DELETE FROM tb_config_name_def WHERE namespace='tendbsingle' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.up.sql b/dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.up.sql
new file mode 100644
index 0000000000..b9484fe54d
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000021_tendbsingle_data.up.sql
@@ -0,0 +1,1017 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='tendbsingle'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (178,'tendbsingle','backup','binlog_rotate.yaml','binlog滚动与备份选项','binlog_rotate.yaml','plat,app,module,cluster','',1,1,1,'',0,0,0,'binlog_rotate.yaml','2023-03-09 17:34:12','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (131,'tendbsingle','backup','dbbackup.ini','备份配置','dbbackup.conf配置项','plat,app,module,cluster','',1,1,0,'',0,0,0,'dbbackup.conf配置项','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (132,'tendbsingle','backup','dbbackup.options','备份控制选项','dbbackup.ini控制选项','plat,app,module,cluster','',1,1,0,'',0,0,0,'dbbackup.ini控制选项','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (47,'tendbsingle','dbconf','MySQL-5.6','my.cnf配置','5.6_参数配置','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'5.6_参数配置','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (42,'tendbsingle','dbconf','MySQL-5.7','my.cnf配置','5.7_参数配置','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'5.7_参数配置','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (48,'tendbsingle','dbconf','MySQL-8.0','my.cnf配置','8.0_参数配置','plat,app,module,cluster','cluster',1,1,0,NULL,5,365,0,'8.0_参数配置','2022-04-25 10:19:22','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (105,'tendbsingle','deploy','deploy_info','部署配置',NULL,'plat,app,module,cluster','',0,1,0,NULL,5,365,0,NULL,'2022-04-25 10:19:22','2023-03-20 21:40:05','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='tendbsingle' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15917,'tendbsingle','backup','binlog_rotate.yaml','backup_client.cos','STRING','{\n  \"enable\": true,\n  \"with_md5\": true,\n  \"file_tag\": \"INCREMENT_BACKUP\",\n  \"tool_path\": \"cos-client\"\n}','','MAP',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2022-04-25 10:00:47','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14112,'tendbsingle','backup','binlog_rotate.yaml','backup_client.ibs','STRING','{\n  \"enable\": false,\n  \"ibs_mode\": \"hdfs\",\n  \"with_md5\": true,\n  \"file_tag\": \"INCREMENT_BACKUP\",\n  \"tool_path\": \"backup_client\"\n}','','MAP',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14113,'tendbsingle','backup','binlog_rotate.yaml','crond.api_url','STRING','http://127.0.0.1:9999','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14114,'tendbsingle','backup','binlog_rotate.yaml','crond.command','STRING','cd /home/mysql/rotate_binlog && ./rotatebinlog -c config.yaml','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14115,'tendbsingle','backup','binlog_rotate.yaml','crond.item_name','STRING','rotate_binlog','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14116,'tendbsingle','backup','binlog_rotate.yaml','crond.schedule','STRING','*/5 * * * *','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14117,'tendbsingle','backup','binlog_rotate.yaml','encrypt.enable','BOOL','false','true | false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14119,'tendbsingle','backup','binlog_rotate.yaml','public.keep_policy','STRING','most','most | least','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14120,'tendbsingle','backup','binlog_rotate.yaml','public.max_binlog_total_size','STRING','200g','[100m, 9999g]','BYTES',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14121,'tendbsingle','backup','binlog_rotate.yaml','public.max_disk_used_pct','INT','80','[1,99]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14122,'tendbsingle','backup','binlog_rotate.yaml','public.max_keep_duration','STRING','61d','','DURATION',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14123,'tendbsingle','backup','binlog_rotate.yaml','public.purge_interval','STRING','4h','','DURATION',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14124,'tendbsingle','backup','binlog_rotate.yaml','public.rotate_interval','STRING','10m','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14125,'tendbsingle','backup','binlog_rotate.yaml','report.enable','BOOL','true','true | false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14126,'tendbsingle','backup','binlog_rotate.yaml','report.filepath','STRING','/home/mysql/dbareport/mysql/binlog','','STRING',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14127,'tendbsingle','backup','binlog_rotate.yaml','report.log_maxage','INT','30','[1, 60]','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14128,'tendbsingle','backup','binlog_rotate.yaml','report.log_maxbackups','INT','10','[1, 30]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (14129,'tendbsingle','backup','binlog_rotate.yaml','report.log_maxsize','INT','5','[1, 10]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'Controls whether to produce additional warning messages.','2023-03-09 17:34:12','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13048,'tendbsingle','backup','dbbackup.ini','BackupClient.DoChecksum','STRING','true','true | false','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13049,'tendbsingle','backup','dbbackup.ini','BackupClient.FileTag','STRING','MYSQL_FULL_BACKUP','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13050,'tendbsingle','backup','dbbackup.ini','BackupClient.RemoteFileSystem','STRING','hdfs','hdfs | cos','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16403,'tendbsingle','backup','dbbackup.ini','LogicalBackup.ChunkFilesize','INT','2048','[512, 9999999]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB','2022-05-26 20:11:23','2023-05-24 21:40:03',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16389,'tendbsingle','backup','dbbackup.ini','LogicalBackup.DefaultsFile','STRING','','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 09:50:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16400,'tendbsingle','backup','dbbackup.ini','LogicalBackup.DisableCompress','STRING','false','false | true','BOOL',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-05-24 21:45:24','2023-05-24 21:45:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16388,'tendbsingle','backup','dbbackup.ini','LogicalBackup.ExtraOpt','STRING','--skip-definer','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2023-05-24 21:45:24','2023-05-25 09:59:19',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13051,'tendbsingle','backup','dbbackup.ini','LogicalBackup.FlushRetryCount','INT','3','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13055,'tendbsingle','backup','dbbackup.ini','LogicalBackup.Regex','STRING','{{.LogicalBackup.Regex}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16401,'tendbsingle','backup','dbbackup.ini','LogicalBackup.Threads','INT','4','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:42:40',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16391,'tendbsingle','backup','dbbackup.ini','LogicalLoad.EnableBinlog','STRING','false','false | true','BOOL',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 22:01:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16399,'tendbsingle','backup','dbbackup.ini','LogicalLoad.ExtraOpt','STRING','','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13156,'tendbsingle','backup','dbbackup.ini','LogicalLoad.IndexFilePath','STRING','/data/dbbak/xxxxx','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13058,'tendbsingle','backup','dbbackup.ini','LogicalLoad.MysqlCharset','STRING','binary','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13059,'tendbsingle','backup','dbbackup.ini','LogicalLoad.MysqlHost','STRING','{{.LogicalLoad.MysqlHost}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13060,'tendbsingle','backup','dbbackup.ini','LogicalLoad.MysqlLoadDir','STRING','/data/dbbak/your_loader_dir','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13061,'tendbsingle','backup','dbbackup.ini','LogicalLoad.MysqlPasswd','STRING','{{.LogicalLoad.MysqlPasswd}}','','STRING',2,0,0,1,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13062,'tendbsingle','backup','dbbackup.ini','LogicalLoad.MysqlPort','STRING','{{.LogicalLoad.MysqlPort}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13063,'tendbsingle','backup','dbbackup.ini','LogicalLoad.MysqlUser','STRING','{{.LogicalLoad.MysqlUser}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13065,'tendbsingle','backup','dbbackup.ini','LogicalLoad.Regex','STRING','','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13066,'tendbsingle','backup','dbbackup.ini','LogicalLoad.Threads','INT','2','','INT',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16385,'tendbsingle','backup','dbbackup.ini','PhysicalBackup.DefaultsFile','STRING','{{.PhysicalBackup.DefaultsFile}}','','',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 10:24:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16396,'tendbsingle','backup','dbbackup.ini','PhysicalBackup.ExtraOpt','STRING','--safe-slave-backup-timeout=60','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16387,'tendbsingle','backup','dbbackup.ini','PhysicalBackup.SplitSpeed','INT','500','[0, 2048]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB/s','2022-05-26 20:11:23','2023-05-25 10:03:30',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16405,'tendbsingle','backup','dbbackup.ini','PhysicalBackup.Threads','INT','2','[0, 8]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-10 15:21:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16390,'tendbsingle','backup','dbbackup.ini','PhysicalBackup.Throttle','INT','100','[0, 200]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 22:07:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16394,'tendbsingle','backup','dbbackup.ini','PhysicalLoad.CopyBack','STRING','false','false | true','BOOL',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:56:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16397,'tendbsingle','backup','dbbackup.ini','PhysicalLoad.DefaultsFile','STRING','/etc/my.cnf','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16398,'tendbsingle','backup','dbbackup.ini','PhysicalLoad.ExtraOpt','STRING','','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:46:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16395,'tendbsingle','backup','dbbackup.ini','PhysicalLoad.IndexFilePath','STRING','/xx/xxx.index','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:56:32',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16392,'tendbsingle','backup','dbbackup.ini','PhysicalLoad.MysqlLoadDir','STRING','/xx/loader_dir','','',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:57:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16393,'tendbsingle','backup','dbbackup.ini','PhysicalLoad.Threads','INT','2','[0, 16]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-24 21:57:18',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13067,'tendbsingle','backup','dbbackup.ini','Public.BackupDir','STRING','/data/dbbak','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16386,'tendbsingle','backup','dbbackup.ini','Public.BackupId','STRING','{{.Public.BackupId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-25 10:08:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13068,'tendbsingle','backup','dbbackup.ini','Public.BackupTimeout','STRING','09:00:00','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13069,'tendbsingle','backup','dbbackup.ini','Public.BackupType','STRING','logical','logical | physical','ENUM',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13070,'tendbsingle','backup','dbbackup.ini','Public.BillId','INT','{{.Public.BillId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13071,'tendbsingle','backup','dbbackup.ini','Public.BkBizId','INT','{{.Public.BkBizId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13072,'tendbsingle','backup','dbbackup.ini','Public.BkCloudId','INT','{{.Public.BkCloudId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13073,'tendbsingle','backup','dbbackup.ini','Public.ClusterAddress','STRING','{{.Public.ClusterAddress}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16306,'tendbsingle','backup','dbbackup.ini','Public.ClusterId','STRING','{{.Public.ClusterId}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-15 11:20:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13074,'tendbsingle','backup','dbbackup.ini','Public.DataSchemaGrant','STRING','{{.Public.DataSchemaGrant}}','All | Schema | Grant | Data','ENUM',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16404,'tendbsingle','backup','dbbackup.ini','Public.IOLimitMBPerSec','INT','500','[0, 4096]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-05-15 11:21:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13075,'tendbsingle','backup','dbbackup.ini','Public.MysqlCharset','STRING','binary','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13076,'tendbsingle','backup','dbbackup.ini','Public.MysqlHost','STRING','{{.Public.MysqlHost}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13077,'tendbsingle','backup','dbbackup.ini','Public.MysqlPasswd','STRING','{{.Public.MysqlPasswd}}','','STRING',2,0,0,1,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13078,'tendbsingle','backup','dbbackup.ini','Public.MysqlPort','INT','{{.Public.MysqlPort}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13079,'tendbsingle','backup','dbbackup.ini','Public.MysqlRole','STRING','{{.Public.MysqlRole}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13080,'tendbsingle','backup','dbbackup.ini','Public.MysqlUser','STRING','{{.Public.MysqlUser}}','','STRING',2,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13081,'tendbsingle','backup','dbbackup.ini','Public.OldFileLeftDay','INT','2','','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13117,'tendbsingle','backup','dbbackup.ini','Public.ResultReportPath','STRING','/home/mysql/dbareport/mysql/dbbackup/result','result log dir','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13118,'tendbsingle','backup','dbbackup.ini','Public.StatusReportPath','STRING','/home/mysql/dbareport/mysql/dbbackup/status','status log dir','STRING',1,0,0,0,0,NULL,'','',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16402,'tendbsingle','backup','dbbackup.ini','Public.TarSizeThreshold','INT','8196','[128, 9999999]','RANGE',1,0,0,0,0,NULL,'','',-1,NULL,'MB','2022-05-26 20:11:23','2023-05-24 21:40:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13083,'tendbsingle','backup','dbbackup.options','BackupType','STRING','logical','logical | physical','ENUM',1,0,0,0,0,NULL,'','备份类型',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13084,'tendbsingle','backup','dbbackup.options','CrontabTime','STRING','3 5 * * *','','STRING',1,0,0,0,0,NULL,'','DB备份开始时间',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13085,'tendbsingle','backup','dbbackup.options','Logical.IgnoreDatabases','STRING','mysql,test,infodba_schema,information_schema,performance_schema,sys','','',1,0,0,0,0,NULL,'','主库备份数据',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13086,'tendbsingle','backup','dbbackup.options','Logical.IgnoreTables','STRING','','','',1,0,0,0,0,NULL,'','主库备份数据',-1,NULL,'','2022-05-26 20:11:23','2023-03-22 12:23:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13087,'tendbsingle','backup','dbbackup.options','Master.DataSchemaGrant','STRING','schema','grant,schema,data,all','ENUMS',1,0,0,0,0,NULL,'','从库备份数据',-1,NULL,'','2022-05-26 20:11:23','2023-03-28 20:31:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10979,'tendbsingle','dbconf','MySQL-5.5','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8963,'tendbsingle','dbconf','MySQL-5.5','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8964,'tendbsingle','dbconf','MySQL-5.5','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8965,'tendbsingle','dbconf','MySQL-5.5','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8966,'tendbsingle','dbconf','MySQL-5.5','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8967,'tendbsingle','dbconf','MySQL-5.5','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8968,'tendbsingle','dbconf','MySQL-5.5','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8969,'tendbsingle','dbconf','MySQL-5.5','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8970,'tendbsingle','dbconf','MySQL-5.5','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8971,'tendbsingle','dbconf','MySQL-5.5','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8972,'tendbsingle','dbconf','MySQL-5.5','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8973,'tendbsingle','dbconf','MySQL-5.5','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8974,'tendbsingle','dbconf','MySQL-5.5','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8975,'tendbsingle','dbconf','MySQL-5.5','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8976,'tendbsingle','dbconf','MySQL-5.5','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8977,'tendbsingle','dbconf','MySQL-5.5','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8978,'tendbsingle','dbconf','MySQL-5.5','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8979,'tendbsingle','dbconf','MySQL-5.5','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8980,'tendbsingle','dbconf','MySQL-5.5','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8981,'tendbsingle','dbconf','MySQL-5.5','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8982,'tendbsingle','dbconf','MySQL-5.5','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8983,'tendbsingle','dbconf','MySQL-5.5','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8984,'tendbsingle','dbconf','MySQL-5.5','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8985,'tendbsingle','dbconf','MySQL-5.5','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8986,'tendbsingle','dbconf','MySQL-5.5','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8987,'tendbsingle','dbconf','MySQL-5.5','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8988,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8990,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8991,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8992,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8993,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8994,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8995,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8996,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_flush_log_at_trx_commit','INT','0','0|1|2','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8997,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8998,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8999,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9000,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9001,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9002,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9003,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_max_dirty_pages_pct','INT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9004,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9005,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9006,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9007,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_read_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9008,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9009,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9010,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9011,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9012,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_thread_concurrency','INT','16','[1,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9013,'tendbsingle','dbconf','MySQL-5.5','mysqld.innodb_write_io_threads','INT','8','[0,32]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9014,'tendbsingle','dbconf','MySQL-5.5','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 11:55:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9015,'tendbsingle','dbconf','MySQL-5.5','mysqld.key_buffer','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9016,'tendbsingle','dbconf','MySQL-5.5','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9017,'tendbsingle','dbconf','MySQL-5.5','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9018,'tendbsingle','dbconf','MySQL-5.5','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9019,'tendbsingle','dbconf','MySQL-5.5','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9020,'tendbsingle','dbconf','MySQL-5.5','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9022,'tendbsingle','dbconf','MySQL-5.5','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9023,'tendbsingle','dbconf','MySQL-5.5','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9024,'tendbsingle','dbconf','MySQL-5.5','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9025,'tendbsingle','dbconf','MySQL-5.5','mysqld.log_warnings','STRING','0',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9026,'tendbsingle','dbconf','MySQL-5.5','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9021,'tendbsingle','dbconf','MySQL-5.5','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9045,'tendbsingle','dbconf','MySQL-5.5','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9027,'tendbsingle','dbconf','MySQL-5.5','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9028,'tendbsingle','dbconf','MySQL-5.5','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9029,'tendbsingle','dbconf','MySQL-5.5','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9030,'tendbsingle','dbconf','MySQL-5.5','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9031,'tendbsingle','dbconf','MySQL-5.5','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9032,'tendbsingle','dbconf','MySQL-5.5','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9033,'tendbsingle','dbconf','MySQL-5.5','mysqld.max_connect_errors','STRING','99999999',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9034,'tendbsingle','dbconf','MySQL-5.5','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9035,'tendbsingle','dbconf','MySQL-5.5','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9036,'tendbsingle','dbconf','MySQL-5.5','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9037,'tendbsingle','dbconf','MySQL-5.5','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9038,'tendbsingle','dbconf','MySQL-5.5','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9039,'tendbsingle','dbconf','MySQL-5.5','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9040,'tendbsingle','dbconf','MySQL-5.5','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9041,'tendbsingle','dbconf','MySQL-5.5','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9042,'tendbsingle','dbconf','MySQL-5.5','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9043,'tendbsingle','dbconf','MySQL-5.5','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9044,'tendbsingle','dbconf','MySQL-5.5','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9050,'tendbsingle','dbconf','MySQL-5.5','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9051,'tendbsingle','dbconf','MySQL-5.5','mysqld.show_compatibility_56','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9052,'tendbsingle','dbconf','MySQL-5.5','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9053,'tendbsingle','dbconf','MySQL-5.5','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9054,'tendbsingle','dbconf','MySQL-5.5','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9055,'tendbsingle','dbconf','MySQL-5.5','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:59:05',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9056,'tendbsingle','dbconf','MySQL-5.5','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9057,'tendbsingle','dbconf','MySQL-5.5','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9058,'tendbsingle','dbconf','MySQL-5.5','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 15:00:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9059,'tendbsingle','dbconf','MySQL-5.5','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9060,'tendbsingle','dbconf','MySQL-5.5','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9061,'tendbsingle','dbconf','MySQL-5.5','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9062,'tendbsingle','dbconf','MySQL-5.5','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9063,'tendbsingle','dbconf','MySQL-5.5','mysqld.sql_mode','STRING','\'\'','\'\'|STRICT|ONLY_FULL_GROUP_BY|','ENUMS',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9064,'tendbsingle','dbconf','MySQL-5.5','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9065,'tendbsingle','dbconf','MySQL-5.5','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9066,'tendbsingle','dbconf','MySQL-5.5','mysqld.sync_binlog','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9067,'tendbsingle','dbconf','MySQL-5.5','mysqld.table_definition_cache','INT','768','[400,4096]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9068,'tendbsingle','dbconf','MySQL-5.5','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9069,'tendbsingle','dbconf','MySQL-5.5','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9070,'tendbsingle','dbconf','MySQL-5.5','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9071,'tendbsingle','dbconf','MySQL-5.5','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9072,'tendbsingle','dbconf','MySQL-5.5','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9073,'tendbsingle','dbconf','MySQL-5.5','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-14 15:43:03',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10975,'tendbsingle','dbconf','MySQL-5.5','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10980,'tendbsingle','dbconf','MySQL-5.6','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9231,'tendbsingle','dbconf','MySQL-5.6','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9232,'tendbsingle','dbconf','MySQL-5.6','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9233,'tendbsingle','dbconf','MySQL-5.6','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9234,'tendbsingle','dbconf','MySQL-5.6','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9235,'tendbsingle','dbconf','MySQL-5.6','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9236,'tendbsingle','dbconf','MySQL-5.6','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9237,'tendbsingle','dbconf','MySQL-5.6','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9238,'tendbsingle','dbconf','MySQL-5.6','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9239,'tendbsingle','dbconf','MySQL-5.6','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9240,'tendbsingle','dbconf','MySQL-5.6','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9241,'tendbsingle','dbconf','MySQL-5.6','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9242,'tendbsingle','dbconf','MySQL-5.6','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9243,'tendbsingle','dbconf','MySQL-5.6','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9244,'tendbsingle','dbconf','MySQL-5.6','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9245,'tendbsingle','dbconf','MySQL-5.6','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9246,'tendbsingle','dbconf','MySQL-5.6','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9247,'tendbsingle','dbconf','MySQL-5.6','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9248,'tendbsingle','dbconf','MySQL-5.6','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9249,'tendbsingle','dbconf','MySQL-5.6','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9250,'tendbsingle','dbconf','MySQL-5.6','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9251,'tendbsingle','dbconf','MySQL-5.6','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9252,'tendbsingle','dbconf','MySQL-5.6','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9253,'tendbsingle','dbconf','MySQL-5.6','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9255,'tendbsingle','dbconf','MySQL-5.6','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9256,'tendbsingle','dbconf','MySQL-5.6','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9257,'tendbsingle','dbconf','MySQL-5.6','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9258,'tendbsingle','dbconf','MySQL-5.6','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9259,'tendbsingle','dbconf','MySQL-5.6','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9260,'tendbsingle','dbconf','MySQL-5.6','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9261,'tendbsingle','dbconf','MySQL-5.6','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9262,'tendbsingle','dbconf','MySQL-5.6','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9263,'tendbsingle','dbconf','MySQL-5.6','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9264,'tendbsingle','dbconf','MySQL-5.6','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9265,'tendbsingle','dbconf','MySQL-5.6','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9266,'tendbsingle','dbconf','MySQL-5.6','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9267,'tendbsingle','dbconf','MySQL-5.6','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9268,'tendbsingle','dbconf','MySQL-5.6','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9269,'tendbsingle','dbconf','MySQL-5.6','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9270,'tendbsingle','dbconf','MySQL-5.6','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9271,'tendbsingle','dbconf','MySQL-5.6','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9272,'tendbsingle','dbconf','MySQL-5.6','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9273,'tendbsingle','dbconf','MySQL-5.6','mysqld.host_cache_size','INT','643','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9274,'tendbsingle','dbconf','MySQL-5.6','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9275,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9276,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9277,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_adaptive_hash_index','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9278,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9279,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9280,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9281,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9282,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9283,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9284,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_load_at_startup','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9285,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9286,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9287,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9288,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_checksum_algorithm','STRING','INNODB','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9289,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9290,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9291,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9292,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9293,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9294,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9295,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9296,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9297,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9298,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9299,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9300,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9301,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9302,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9303,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9304,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9305,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9306,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9307,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9308,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9309,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9310,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9311,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9312,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9313,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9314,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9315,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9316,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9317,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9318,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9319,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9320,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9321,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9322,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9323,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9324,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_max_dirty_pages_pct','INT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9325,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_max_dirty_pages_pct_lwm','INT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9326,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9327,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9328,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9329,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9330,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9331,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9332,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9333,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9334,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9335,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9336,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9337,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9338,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9339,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9340,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9341,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9342,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9343,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9344,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9345,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9346,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9347,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9348,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9349,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9350,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9351,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9352,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9353,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_support_xa','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9354,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9355,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9356,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9357,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9358,'tendbsingle','dbconf','MySQL-5.6','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9359,'tendbsingle','dbconf','MySQL-5.6','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 11:55:14',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9360,'tendbsingle','dbconf','MySQL-5.6','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9361,'tendbsingle','dbconf','MySQL-5.6','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9362,'tendbsingle','dbconf','MySQL-5.6','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9363,'tendbsingle','dbconf','MySQL-5.6','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9364,'tendbsingle','dbconf','MySQL-5.6','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9365,'tendbsingle','dbconf','MySQL-5.6','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9366,'tendbsingle','dbconf','MySQL-5.6','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9367,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9369,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9370,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9371,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9372,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9373,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9374,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9375,'tendbsingle','dbconf','MySQL-5.6','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9376,'tendbsingle','dbconf','MySQL-5.6','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9368,'tendbsingle','dbconf','MySQL-5.6','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9419,'tendbsingle','dbconf','MySQL-5.6','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9377,'tendbsingle','dbconf','MySQL-5.6','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9378,'tendbsingle','dbconf','MySQL-5.6','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9379,'tendbsingle','dbconf','MySQL-5.6','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9380,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9381,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9382,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9383,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9384,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9385,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9386,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9387,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9388,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9389,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9390,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9391,'tendbsingle','dbconf','MySQL-5.6','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9392,'tendbsingle','dbconf','MySQL-5.6','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9393,'tendbsingle','dbconf','MySQL-5.6','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9394,'tendbsingle','dbconf','MySQL-5.6','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9395,'tendbsingle','dbconf','MySQL-5.6','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9396,'tendbsingle','dbconf','MySQL-5.6','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9397,'tendbsingle','dbconf','MySQL-5.6','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9398,'tendbsingle','dbconf','MySQL-5.6','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9399,'tendbsingle','dbconf','MySQL-5.6','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9400,'tendbsingle','dbconf','MySQL-5.6','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9401,'tendbsingle','dbconf','MySQL-5.6','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9402,'tendbsingle','dbconf','MySQL-5.6','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9403,'tendbsingle','dbconf','MySQL-5.6','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9404,'tendbsingle','dbconf','MySQL-5.6','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9405,'tendbsingle','dbconf','MySQL-5.6','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9406,'tendbsingle','dbconf','MySQL-5.6','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9407,'tendbsingle','dbconf','MySQL-5.6','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9408,'tendbsingle','dbconf','MySQL-5.6','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9409,'tendbsingle','dbconf','MySQL-5.6','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9410,'tendbsingle','dbconf','MySQL-5.6','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9411,'tendbsingle','dbconf','MySQL-5.6','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9412,'tendbsingle','dbconf','MySQL-5.6','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9413,'tendbsingle','dbconf','MySQL-5.6','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9414,'tendbsingle','dbconf','MySQL-5.6','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9415,'tendbsingle','dbconf','MySQL-5.6','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9416,'tendbsingle','dbconf','MySQL-5.6','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9417,'tendbsingle','dbconf','MySQL-5.6','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9418,'tendbsingle','dbconf','MySQL-5.6','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9424,'tendbsingle','dbconf','MySQL-5.6','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9425,'tendbsingle','dbconf','MySQL-5.6','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9426,'tendbsingle','dbconf','MySQL-5.6','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9427,'tendbsingle','dbconf','MySQL-5.6','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9428,'tendbsingle','dbconf','MySQL-5.6','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9429,'tendbsingle','dbconf','MySQL-5.6','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9430,'tendbsingle','dbconf','MySQL-5.6','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9431,'tendbsingle','dbconf','MySQL-5.6','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9432,'tendbsingle','dbconf','MySQL-5.6','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9433,'tendbsingle','dbconf','MySQL-5.6','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 15:00:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9434,'tendbsingle','dbconf','MySQL-5.6','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9435,'tendbsingle','dbconf','MySQL-5.6','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9436,'tendbsingle','dbconf','MySQL-5.6','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9437,'tendbsingle','dbconf','MySQL-5.6','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9438,'tendbsingle','dbconf','MySQL-5.6','mysqld.sql_mode','STRING','\'\'','\'\'| NO_ZERO_DATE| ANSI_QUOTES| IGNORE_SPACE| NO_AUTO_VALUE_ON_ZERO| ONLY_FULL_GROUP_BY| STRICT_TRANS_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| NO_KEY_OPTIONS| NO_TABLE_OPTIONS| NO_ENGINE_SUBSTITUTION| NO_AUTO_CREATE_USER| NO_FIELD_OPTIONS| NO_UNSIGNED_SUBTRACTION| NO_ZERO_IN_DATE| PIPES_AS_CONCAT| REAL_AS_FLOAT| ALLOW_INVALID_DATES| NO_BACKSLASH_ESCAPES| NO_DIR_IN_CREATE| STRICT_ALL_TABLES ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9439,'tendbsingle','dbconf','MySQL-5.6','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9440,'tendbsingle','dbconf','MySQL-5.6','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9441,'tendbsingle','dbconf','MySQL-5.6','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9442,'tendbsingle','dbconf','MySQL-5.6','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9443,'tendbsingle','dbconf','MySQL-5.6','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9444,'tendbsingle','dbconf','MySQL-5.6','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9445,'tendbsingle','dbconf','MySQL-5.6','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9446,'tendbsingle','dbconf','MySQL-5.6','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9447,'tendbsingle','dbconf','MySQL-5.6','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9448,'tendbsingle','dbconf','MySQL-5.6','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9449,'tendbsingle','dbconf','MySQL-5.6','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9450,'tendbsingle','dbconf','MySQL-5.6','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9451,'tendbsingle','dbconf','MySQL-5.6','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9452,'tendbsingle','dbconf','MySQL-5.6','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9453,'tendbsingle','dbconf','MySQL-5.6','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10976,'tendbsingle','dbconf','MySQL-5.6','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10978,'tendbsingle','dbconf','MySQL-5.7','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9659,'tendbsingle','dbconf','MySQL-5.7','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9660,'tendbsingle','dbconf','MySQL-5.7','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9661,'tendbsingle','dbconf','MySQL-5.7','mysql.port','INT','{{.Mysqld.Port}}',NULL,'',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9662,'tendbsingle','dbconf','MySQL-5.7','mysql.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9663,'tendbsingle','dbconf','MySQL-5.7','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9664,'tendbsingle','dbconf','MySQL-5.7','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9665,'tendbsingle','dbconf','MySQL-5.7','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9666,'tendbsingle','dbconf','MySQL-5.7','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9667,'tendbsingle','dbconf','MySQL-5.7','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9668,'tendbsingle','dbconf','MySQL-5.7','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9669,'tendbsingle','dbconf','MySQL-5.7','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9670,'tendbsingle','dbconf','MySQL-5.7','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9671,'tendbsingle','dbconf','MySQL-5.7','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9672,'tendbsingle','dbconf','MySQL-5.7','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9673,'tendbsingle','dbconf','MySQL-5.7','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9674,'tendbsingle','dbconf','MySQL-5.7','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9675,'tendbsingle','dbconf','MySQL-5.7','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9676,'tendbsingle','dbconf','MySQL-5.7','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9677,'tendbsingle','dbconf','MySQL-5.7','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9678,'tendbsingle','dbconf','MySQL-5.7','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9679,'tendbsingle','dbconf','MySQL-5.7','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9680,'tendbsingle','dbconf','MySQL-5.7','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9681,'tendbsingle','dbconf','MySQL-5.7','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9682,'tendbsingle','dbconf','MySQL-5.7','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9683,'tendbsingle','dbconf','MySQL-5.7','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9684,'tendbsingle','dbconf','MySQL-5.7','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9685,'tendbsingle','dbconf','MySQL-5.7','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9687,'tendbsingle','dbconf','MySQL-5.7','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9688,'tendbsingle','dbconf','MySQL-5.7','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9689,'tendbsingle','dbconf','MySQL-5.7','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9690,'tendbsingle','dbconf','MySQL-5.7','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9691,'tendbsingle','dbconf','MySQL-5.7','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9692,'tendbsingle','dbconf','MySQL-5.7','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9693,'tendbsingle','dbconf','MySQL-5.7','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9694,'tendbsingle','dbconf','MySQL-5.7','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9695,'tendbsingle','dbconf','MySQL-5.7','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9696,'tendbsingle','dbconf','MySQL-5.7','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9697,'tendbsingle','dbconf','MySQL-5.7','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9698,'tendbsingle','dbconf','MySQL-5.7','mysqld.expire_logs_days','INT','60','[0, 99]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-26 20:27:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9699,'tendbsingle','dbconf','MySQL-5.7','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9700,'tendbsingle','dbconf','MySQL-5.7','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9701,'tendbsingle','dbconf','MySQL-5.7','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9702,'tendbsingle','dbconf','MySQL-5.7','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9703,'tendbsingle','dbconf','MySQL-5.7','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9704,'tendbsingle','dbconf','MySQL-5.7','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9705,'tendbsingle','dbconf','MySQL-5.7','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9706,'tendbsingle','dbconf','MySQL-5.7','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9707,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9708,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9709,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9710,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9711,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_alter_table_default_algorithm','STRING','INPLACE','INPLACE| INSTANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9712,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9713,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_async_truncate_work_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9714,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9715,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9716,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9717,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9718,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9719,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9720,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9721,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9722,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9723,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9724,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9725,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9726,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9727,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9728,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9729,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9730,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9731,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9732,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9733,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9734,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9735,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_file_format','STRING','Barracuda','Barracuda','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9736,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9737,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9738,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9739,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9740,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9741,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9742,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9743,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9744,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9745,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9746,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9747,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9748,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9749,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9750,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9751,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9752,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9753,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9754,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_large_prefix','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9755,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9756,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9757,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9758,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9759,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9760,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9761,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9762,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9763,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9764,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9765,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9766,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9767,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9768,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9769,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9770,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9771,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9772,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9773,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9774,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9775,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9776,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9777,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9778,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9779,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9780,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9781,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9782,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9783,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9784,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9785,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9786,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9787,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9788,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9789,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9790,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9791,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9792,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9793,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9794,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9795,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9796,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9797,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9798,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9799,'tendbsingle','dbconf','MySQL-5.7','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9800,'tendbsingle','dbconf','MySQL-5.7','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 11:55:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9801,'tendbsingle','dbconf','MySQL-5.7','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9802,'tendbsingle','dbconf','MySQL-5.7','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9803,'tendbsingle','dbconf','MySQL-5.7','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9804,'tendbsingle','dbconf','MySQL-5.7','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9805,'tendbsingle','dbconf','MySQL-5.7','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9806,'tendbsingle','dbconf','MySQL-5.7','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9807,'tendbsingle','dbconf','MySQL-5.7','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9808,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9810,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9811,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9812,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9813,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9814,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9815,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9816,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9817,'tendbsingle','dbconf','MySQL-5.7','mysqld.log_warnings','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9818,'tendbsingle','dbconf','MySQL-5.7','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9809,'tendbsingle','dbconf','MySQL-5.7','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9867,'tendbsingle','dbconf','MySQL-5.7','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9819,'tendbsingle','dbconf','MySQL-5.7','mysqld.lower_case_table_names','INT','0','[0,1]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9820,'tendbsingle','dbconf','MySQL-5.7','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9821,'tendbsingle','dbconf','MySQL-5.7','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9822,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9823,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9824,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9825,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9826,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9827,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9828,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9829,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9830,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9831,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9832,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9833,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9834,'tendbsingle','dbconf','MySQL-5.7','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9835,'tendbsingle','dbconf','MySQL-5.7','mysqld.metadata_locks_cache_size','INT','1024','[1,1048576]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9836,'tendbsingle','dbconf','MySQL-5.7','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9837,'tendbsingle','dbconf','MySQL-5.7','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9838,'tendbsingle','dbconf','MySQL-5.7','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9839,'tendbsingle','dbconf','MySQL-5.7','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9840,'tendbsingle','dbconf','MySQL-5.7','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9841,'tendbsingle','dbconf','MySQL-5.7','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9842,'tendbsingle','dbconf','MySQL-5.7','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9843,'tendbsingle','dbconf','MySQL-5.7','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9844,'tendbsingle','dbconf','MySQL-5.7','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9845,'tendbsingle','dbconf','MySQL-5.7','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9846,'tendbsingle','dbconf','MySQL-5.7','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9847,'tendbsingle','dbconf','MySQL-5.7','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9848,'tendbsingle','dbconf','MySQL-5.7','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9849,'tendbsingle','dbconf','MySQL-5.7','mysqld.optimizer_trace_offset','INT','-1','[-2147483648,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9850,'tendbsingle','dbconf','MySQL-5.7','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9851,'tendbsingle','dbconf','MySQL-5.7','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9852,'tendbsingle','dbconf','MySQL-5.7','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9853,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9854,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_cache_limit','INT','1048576','[1,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9855,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_cache_min_res_unit','INT','4096','[512,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9856,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_cache_size','INT','0','[0,104857600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9857,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_cache_type','STRING','OFF','OFF| ON| DEMAND','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:28:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9858,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_cache_wlock_invalidate','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9859,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9860,'tendbsingle','dbconf','MySQL-5.7','mysqld.query_response_time_stats','STRING','ON','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9861,'tendbsingle','dbconf','MySQL-5.7','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9862,'tendbsingle','dbconf','MySQL-5.7','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9863,'tendbsingle','dbconf','MySQL-5.7','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9864,'tendbsingle','dbconf','MySQL-5.7','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9865,'tendbsingle','dbconf','MySQL-5.7','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9866,'tendbsingle','dbconf','MySQL-5.7','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9870,'tendbsingle','dbconf','MySQL-5.7','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9871,'tendbsingle','dbconf','MySQL-5.7','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9872,'tendbsingle','dbconf','MySQL-5.7','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9873,'tendbsingle','dbconf','MySQL-5.7','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9874,'tendbsingle','dbconf','MySQL-5.7','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9875,'tendbsingle','dbconf','MySQL-5.7','mysqld.show_compatibility_56','STRING','ON','ON| OFF ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9876,'tendbsingle','dbconf','MySQL-5.7','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9877,'tendbsingle','dbconf','MySQL-5.7','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9878,'tendbsingle','dbconf','MySQL-5.7','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9879,'tendbsingle','dbconf','MySQL-5.7','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9880,'tendbsingle','dbconf','MySQL-5.7','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9881,'tendbsingle','dbconf','MySQL-5.7','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9882,'tendbsingle','dbconf','MySQL-5.7','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9883,'tendbsingle','dbconf','MySQL-5.7','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN','TABLE_SCAN,INDEX_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9884,'tendbsingle','dbconf','MySQL-5.7','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9885,'tendbsingle','dbconf','MySQL-5.7','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9886,'tendbsingle','dbconf','MySQL-5.7','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9887,'tendbsingle','dbconf','MySQL-5.7','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9888,'tendbsingle','dbconf','MySQL-5.7','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9889,'tendbsingle','dbconf','MySQL-5.7','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9890,'tendbsingle','dbconf','MySQL-5.7','mysqld.sql_mode','STRING','\'\'','\'\'| NO_TABLE_OPTIONS| NO_ZERO_DATE| ONLY_FULL_GROUP_BY| STRICT_ALL_TABLES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_CREATE_USER| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_UNSIGNED_SUBTRACTION| NO_ENGINE_SUBSTITUTION| ANSI_QUOTES| NO_KEY_OPTIONS| PIPES_AS_CONCAT| ALLOW_INVALID_DATES| NO_DIR_IN_CREATE| NO_ZERO_IN_DATE| REAL_AS_FLOAT| STRICT_TRANS_TABLES| NO_FIELD_OPTIONS ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9891,'tendbsingle','dbconf','MySQL-5.7','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9892,'tendbsingle','dbconf','MySQL-5.7','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9893,'tendbsingle','dbconf','MySQL-5.7','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9894,'tendbsingle','dbconf','MySQL-5.7','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9895,'tendbsingle','dbconf','MySQL-5.7','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9896,'tendbsingle','dbconf','MySQL-5.7','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9897,'tendbsingle','dbconf','MySQL-5.7','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9898,'tendbsingle','dbconf','MySQL-5.7','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9899,'tendbsingle','dbconf','MySQL-5.7','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9900,'tendbsingle','dbconf','MySQL-5.7','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9901,'tendbsingle','dbconf','MySQL-5.7','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9902,'tendbsingle','dbconf','MySQL-5.7','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9903,'tendbsingle','dbconf','MySQL-5.7','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9904,'tendbsingle','dbconf','MySQL-5.7','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9905,'tendbsingle','dbconf','MySQL-5.7','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9906,'tendbsingle','dbconf','MySQL-5.7','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9907,'tendbsingle','dbconf','MySQL-5.7','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9908,'tendbsingle','dbconf','MySQL-5.7','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9909,'tendbsingle','dbconf','MySQL-5.7','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (9910,'tendbsingle','dbconf','MySQL-5.7','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10974,'tendbsingle','dbconf','MySQL-5.7','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10981,'tendbsingle','dbconf','MySQL-8.0','client.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 19:15:59',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10129,'tendbsingle','dbconf','MySQL-8.0','mysql.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10130,'tendbsingle','dbconf','MySQL-8.0','mysql.no-auto-rehash','STRING','true','true | false','ENUM',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 19:28:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10131,'tendbsingle','dbconf','MySQL-8.0','mysqld.automatic_sp_privileges','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10132,'tendbsingle','dbconf','MySQL-8.0','mysqld.auto_increment_increment','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10133,'tendbsingle','dbconf','MySQL-8.0','mysqld.auto_increment_offset','INT','1','[1,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10134,'tendbsingle','dbconf','MySQL-8.0','mysqld.avoid_temporal_upgrade','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10135,'tendbsingle','dbconf','MySQL-8.0','mysqld.back_log','INT','3000','[1,65535]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10136,'tendbsingle','dbconf','MySQL-8.0','mysqld.bind-address','STRING','{{.Mysqld.BindAddress}}','','',2,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:04',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10137,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_cache_size','INT','2097152','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10138,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_checksum','STRING','CRC32','NONE| CRC32 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10167,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_expire_logs_seconds','INT','2592000','[0, 4294967295]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-26 20:27:27',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10139,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_format','STRING','ROW','ROW| MIXED| STATEMENT','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:31:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10140,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_order_commits','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10141,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_rows_query_log_events','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10142,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_row_image','STRING','FULL','FULL| MINIMAL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10143,'tendbsingle','dbconf','MySQL-8.0','mysqld.binlog_stmt_cache_size','INT','32768','[4096,16777216]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10144,'tendbsingle','dbconf','MySQL-8.0','mysqld.block_encryption_mode','STRING','AES-128-ECB','AES-128-ECB| AES-192-ECB| AES-256-ECB| AES-128-CBC| AES-192-CBC| AES-256-CBC ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10145,'tendbsingle','dbconf','MySQL-8.0','mysqld.bulk_insert_buffer_size','INT','8388608','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10146,'tendbsingle','dbconf','MySQL-8.0','mysqld.character_set_filesystem','STRING','BINARY','UTF8| LATIN1| GBK| BINARY ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10147,'tendbsingle','dbconf','MySQL-8.0','mysqld.character_set_server','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10148,'tendbsingle','dbconf','MySQL-8.0','mysqld.collation_server','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10149,'tendbsingle','dbconf','MySQL-8.0','mysqld.concurrent_insert','STRING','AUTO','NEVER| AUTO| ALWAYS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10150,'tendbsingle','dbconf','MySQL-8.0','mysqld.connect_timeout','INT','10','[2,1800]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10151,'tendbsingle','dbconf','MySQL-8.0','mysqld.datadir','STRING','{{.Mysqld.Datadir}}/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10152,'tendbsingle','dbconf','MySQL-8.0','mysqld.default-storage-engine','STRING','InnoDB','InnoDB','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:26:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10153,'tendbsingle','dbconf','MySQL-8.0','mysqld.default_authentication_plugin','STRING','MYSQL_NATIVE_PASSWORD','MYSQL_NATIVE_PASSWORD| SHA256_PASSWORD| CACHING_SHA2_PASSWORD ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10154,'tendbsingle','dbconf','MySQL-8.0','mysqld.default_password_lifetime','INT','0','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10156,'tendbsingle','dbconf','MySQL-8.0','mysqld.default_time_zone','STRING','SYSTEM','SYSTEM | +00:00 | +01:00 | +02:00 | +03:00 | +04:00 | +05:00 | +06:00 | +07:00 | +08:00 | +09:00 | +10:00 | +11:00 | +12:00 | -12:00 | -11:00 | -10:00 | -09:00 | -08:00 | -07:00 | -06:00 | -05:00 | -04:00 | -03:00 | -02:00 | -01:00','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 15:35:58',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10157,'tendbsingle','dbconf','MySQL-8.0','mysqld.default_week_format','INT','0','[0,7]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10158,'tendbsingle','dbconf','MySQL-8.0','mysqld.delayed_insert_limit','INT','100','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10159,'tendbsingle','dbconf','MySQL-8.0','mysqld.delayed_insert_timeout','INT','300','[1,3600]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10160,'tendbsingle','dbconf','MySQL-8.0','mysqld.delayed_queue_size','INT','1000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10161,'tendbsingle','dbconf','MySQL-8.0','mysqld.delay_key_write','STRING','ON','ON| OFF| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10162,'tendbsingle','dbconf','MySQL-8.0','mysqld.disconnect_on_expired_password','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10163,'tendbsingle','dbconf','MySQL-8.0','mysqld.div_precision_increment','INT','4','[0,30]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10164,'tendbsingle','dbconf','MySQL-8.0','mysqld.end_markers_in_json','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10165,'tendbsingle','dbconf','MySQL-8.0','mysqld.eq_range_index_dive_limit','INT','200','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10166,'tendbsingle','dbconf','MySQL-8.0','mysqld.event_scheduler','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10168,'tendbsingle','dbconf','MySQL-8.0','mysqld.explicit_defaults_for_timestamp','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10169,'tendbsingle','dbconf','MySQL-8.0','mysqld.flush_time','INT','0','[0,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10170,'tendbsingle','dbconf','MySQL-8.0','mysqld.ft_max_word_len','INT','84','[10,4294967295]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10171,'tendbsingle','dbconf','MySQL-8.0','mysqld.ft_min_word_len','INT','4','[1,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10172,'tendbsingle','dbconf','MySQL-8.0','mysqld.ft_query_expansion_limit','INT','20','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10173,'tendbsingle','dbconf','MySQL-8.0','mysqld.group_concat_max_len','INT','1024','[4,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10174,'tendbsingle','dbconf','MySQL-8.0','mysqld.host_cache_size','INT','644','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10175,'tendbsingle','dbconf','MySQL-8.0','mysqld.init_connect','STRING','\"insert into infodba_schema.conn_log values(connection_id(),now(),user(),current_user(),\'{{.Mysqld.BindAddress}}\');\"','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 17:11:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10176,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_adaptive_flushing','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10177,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_adaptive_flushing_lwm','INT','10','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10178,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_adaptive_hash_index','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10179,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_adaptive_max_sleep_delay','INT','150000','[1,1000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10180,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_async_truncate_size','INT','128','[128,168]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10181,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_autoextend_increment','INT','64','[1,1000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10182,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_autoinc_lock_mode','INT','2','[0,2]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10183,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_dump_at_shutdown','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10184,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_dump_pct','INT','25','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10185,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_instances','INT','4','1 | 4 | 8','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10186,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_load_at_startup','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10187,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_buffer_pool_size','STRING','{{.Mysqld.InnodbBufferPoolSize}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:06',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10188,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_change_buffering','STRING','ALL','NONE| INSERTS| DELETES| CHANGES| PURGES| ALL ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10189,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_change_buffer_max_size','INT','25','[0,50]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10190,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_checksum_algorithm','STRING','CRC32','INNODB| CRC32| NONE| STRICT_INNODB| STRICT_CRC32| STRICT_NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10191,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_cmp_per_index_enabled','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10192,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_commit_concurrency','INT','0','[0,1000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10193,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_compression_failure_threshold_pct','INT','5','[0,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10194,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_compression_level','INT','6','[0,9]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10195,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_compression_pad_pct_max','INT','50','[0,70]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10196,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_concurrency_tickets','INT','5000','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10197,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_data_file_path','STRING','ibdata1:1G:autoextend',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10198,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_data_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/data',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:31',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10199,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_deadlock_detect','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10200,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_default_row_format','STRING','DYNAMIC','DYNAMIC| COMPACT| REDUNDANT ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10201,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_disable_sort_file_cache','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10202,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_fast_ahi_cleanup_for_drop_table','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10204,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_file_per_table','INT','1','1','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10205,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_flush_log_at_trx_commit','STRING','0','0| 1| 2 ','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10206,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_flush_method','STRING','O_DIRECT','fsync| O_DSYNC| littlesync| nosync| O_DIRECT| O_DIRECT_NO_FSYNC ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10207,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_flush_neighbors','STRING','0','0| 1| 2 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10208,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_flush_sync','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10209,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_cache_size','INT','8000000','[1600000,80000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10210,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_enable_diag_print','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10211,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_enable_stopword','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10212,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_max_token_size','INT','84','[10,84]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10213,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_min_token_size','INT','3','[0,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10214,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_num_word_optimize','INT','2000','[1000,10000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10215,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_result_cache_limit','INT','2000000000','[1000000,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10216,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_server_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10217,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_sort_pll_degree','INT','2','[1,16]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10218,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_total_cache_size','INT','640000000','[32000000,1600000000]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10219,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_ft_user_stopword_table','STRING','NULL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10220,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_io_capacity','INT','1000','[1,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10221,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_io_capacity_max','INT','40000','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10222,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_lock_wait_timeout','INT','50','[1,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10223,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_log_buffer_size','INT','33554432','[1024,134217728]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10224,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_log_checksums','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10225,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_log_compressed_pages','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10226,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_log_files_in_group','INT','4','[3,10]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10227,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_log_file_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10228,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_log_group_home_dir','STRING','{{.Mysqld.Datadir}}/innodb/log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10229,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_lru_scan_depth','INT','1024','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10230,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_max_dirty_pages_pct','FLOAT','75','[0,99]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10231,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_max_dirty_pages_pct_lwm','FLOAT','0','[0,75]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10232,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_max_purge_lag','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10233,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_max_purge_lag_delay','INT','0','[0,10000000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10234,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_max_undo_log_size','INT','1073741824','[10485760,17179869184]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10235,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_monitor_disable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10236,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_monitor_enable','STRING','ALL','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10237,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_old_blocks_pct','INT','37','[5,95]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10238,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_old_blocks_time','INT','1000','[0,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10239,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_online_alter_log_max_size','INT','134217728','[134217728,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10240,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_optimize_fulltext_only','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10241,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_page_cleaners','INT','4','[1,64]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10242,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_print_all_deadlocks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10243,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_purge_batch_size','INT','300','[1,5000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10244,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_purge_rseg_truncate_frequency','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10245,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_purge_threads','INT','4','[1,32]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10246,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_random_read_ahead','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10247,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_read_ahead_threshold','INT','56','[0,64]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10248,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_read_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10249,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_rollback_on_timeout','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10250,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_rollback_segments','INT','128','[1,128]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10251,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_sort_buffer_size','INT','1048576','[65536,67108864]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10252,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_spin_wait_delay','INT','6','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10253,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_stats_auto_recalc','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10254,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_stats_method','STRING','NULLS_EQUAL','NULLS_EQUAL| NULLS_UNEQUAL| NULLS_IGNORED ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10255,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_stats_on_metadata','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10256,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_stats_persistent','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10257,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_stats_persistent_sample_pages','INT','20','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10258,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_stats_transient_sample_pages','INT','8','[1,4294967296]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10259,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_status_output','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10260,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_status_output_locks','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10261,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_strict_mode','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10262,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_sync_array_size','INT','1','[1,1024]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10263,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_sync_spin_loops','INT','30','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10264,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_table_drop_mode','STRING','SYNC_DROP','SYNC_DROP| ASYNC_DROP ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10265,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_table_locks','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10266,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_thread_concurrency','INT','16','[0,128]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10267,'tendbsingle','dbconf','MySQL-8.0','mysqld.innodb_write_io_threads','STRING','8','[0,32]','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 14:56:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10268,'tendbsingle','dbconf','MySQL-8.0','mysqld.interactive_timeout','INT','86400','[1, 86400]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-07 11:55:21',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10269,'tendbsingle','dbconf','MySQL-8.0','mysqld.join_buffer_size','STRING','{MIN(DBInitMemory*128,262144)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10270,'tendbsingle','dbconf','MySQL-8.0','mysqld.key_buffer','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10271,'tendbsingle','dbconf','MySQL-8.0','mysqld.key_cache_age_threshold','INT','300','[100,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10272,'tendbsingle','dbconf','MySQL-8.0','mysqld.key_cache_block_size','INT','1024','[512,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10273,'tendbsingle','dbconf','MySQL-8.0','mysqld.key_cache_division_limit','INT','100','[1,100]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10274,'tendbsingle','dbconf','MySQL-8.0','mysqld.lc_time_names','STRING','EN_US','JA_JP| PT_BR| EN_US ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10275,'tendbsingle','dbconf','MySQL-8.0','mysqld.local_infile','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10276,'tendbsingle','dbconf','MySQL-8.0','mysqld.lock_wait_timeout','INT','31536000','[1,31536000]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10277,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_bin','STRING','{{.Mysqld.Logdir}}/binlog/binlog{{.Mysqld.Port}}.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:53',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10279,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_bin_trust_function_creators','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10280,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_output','STRING','FILE','FILE| TABLE| NONE ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10281,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_queries_not_using_indexes','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10282,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_slave_updates','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10283,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_slow_admin_statements','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10284,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_throttle_queries_not_using_indexes','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10285,'tendbsingle','dbconf','MySQL-8.0','mysqld.log_timestamps','STRING','SYSTEM','UTC| SYSTEM ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10287,'tendbsingle','dbconf','MySQL-8.0','mysqld.long_query_time','FLOAT','1','[0,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10278,'tendbsingle','dbconf','MySQL-8.0','mysqld.loose_log_bin_compress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10332,'tendbsingle','dbconf','MySQL-8.0','mysqld.loose_relay_log_uncompress','STRING','OFF','ON|OFF','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-05-11 12:35:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10288,'tendbsingle','dbconf','MySQL-8.0','mysqld.lower_case_table_names','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10289,'tendbsingle','dbconf','MySQL-8.0','mysqld.low_priority_updates','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10290,'tendbsingle','dbconf','MySQL-8.0','mysqld.master_verify_checksum','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10291,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_allowed_packet','INT','134217728','[1024,1073741824]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10292,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_binlog_cache_size','STRING','128M',NULL,'BYTES',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10293,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_binlog_size','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10294,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_connections','INT','5000','[500,100000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10295,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_connect_errors','INT','99999999','[1,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10296,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_error_count','INT','64','[0,65535]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10297,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_heap_table_size','INT','67108864','[16384,68719476736]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10298,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_length_for_sort_data','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10299,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_points_in_geometry','INT','65536','[3,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10300,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_prepared_stmt_count','INT','16382','[0,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10301,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_sort_length','INT','1024','[4,8388608]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10302,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_sp_recursion_depth','INT','0','[0,255]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10303,'tendbsingle','dbconf','MySQL-8.0','mysqld.max_user_connections','INT','0','[0,10240]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10304,'tendbsingle','dbconf','MySQL-8.0','mysqld.min_examined_row_limit','INT','0','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10305,'tendbsingle','dbconf','MySQL-8.0','mysqld.myisam_sort_buffer_size','INT','67108864','[2097152,134217728]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10306,'tendbsingle','dbconf','MySQL-8.0','mysqld.mysql_native_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10307,'tendbsingle','dbconf','MySQL-8.0','mysqld.net_buffer_length','INT','16384','[1024,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10308,'tendbsingle','dbconf','MySQL-8.0','mysqld.net_read_timeout','INT','30','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10309,'tendbsingle','dbconf','MySQL-8.0','mysqld.net_retry_count','INT','10','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10310,'tendbsingle','dbconf','MySQL-8.0','mysqld.net_write_timeout','INT','60','[1,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10311,'tendbsingle','dbconf','MySQL-8.0','mysqld.ngram_token_size','INT','2','[1,10]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10312,'tendbsingle','dbconf','MySQL-8.0','mysqld.optimizer_prune_level','STRING','1','0| 1 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10313,'tendbsingle','dbconf','MySQL-8.0','mysqld.optimizer_search_depth','INT','62','[0,62]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10314,'tendbsingle','dbconf','MySQL-8.0','mysqld.optimizer_switch','STRING','INDEX_MERGE=ON,INDEX_MERGE_UNION=ON,INDEX_MERGE_SORT_UNION=ON,INDEX_MERGE_INTERSECTION=ON,ENGINE_CONDITION_PUSHDOWN=ON,INDEX_CONDITION_PUSHDOWN=ON,MRR=ON,MRR_COST_BASED=ON,BLOCK_NESTED_LOOP=ON,BATCHED_KEY_ACCESS=OFF,MATERIALIZATION=ON,SEMIJOIN=ON,LOOSESCAN=ON,FIRSTMATCH=ON,DUPLICATEWEEDOUT=ON,SUBQUERY_MATERIALIZATION_COST_BASED=ON,USE_INDEX_EXTENSIONS=ON,CONDITION_FANOUT_FILTER=ON,DERIVED_MERGE=ON','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10315,'tendbsingle','dbconf','MySQL-8.0','mysqld.optimizer_trace_limit','INT','1','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10316,'tendbsingle','dbconf','MySQL-8.0','mysqld.optimizer_trace_max_mem_size','INT','16384','[0,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10317,'tendbsingle','dbconf','MySQL-8.0','mysqld.optimizer_trace_offset','INT','-1','[-2147483647,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10318,'tendbsingle','dbconf','MySQL-8.0','mysqld.performance_schema','STRING','OFF','ON| OFF ','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10319,'tendbsingle','dbconf','MySQL-8.0','mysqld.port','STRING','{{.Mysqld.Port}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10320,'tendbsingle','dbconf','MySQL-8.0','mysqld.preload_buffer_size','INT','32768','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10321,'tendbsingle','dbconf','MySQL-8.0','mysqld.query_alloc_block_size','INT','8192','[1024,16384]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10324,'tendbsingle','dbconf','MySQL-8.0','mysqld.query_prealloc_size','INT','8192','[8192,1048576]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10326,'tendbsingle','dbconf','MySQL-8.0','mysqld.range_alloc_block_size','INT','4096','[4096,4294967295]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10327,'tendbsingle','dbconf','MySQL-8.0','mysqld.range_optimizer_max_mem_size','INT','8388608','[0,17179869184]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10328,'tendbsingle','dbconf','MySQL-8.0','mysqld.read_buffer_size','INT','262144','[8200,2147479552]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10329,'tendbsingle','dbconf','MySQL-8.0','mysqld.read_rnd_buffer_size','INT','524288','[1,2147483647]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10330,'tendbsingle','dbconf','MySQL-8.0','mysqld.relay-log','STRING','{{.Mysqld.Datadir}}/relay-log/relay-log.bin',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:36',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10331,'tendbsingle','dbconf','MySQL-8.0','mysqld.relay_log_recovery','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10337,'tendbsingle','dbconf','MySQL-8.0','mysqld.server_id','STRING','{{.Mysqld.ServerId}}',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:01:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10338,'tendbsingle','dbconf','MySQL-8.0','mysqld.session_track_gtids','STRING','OFF','OFF| OWN_GTID| ALL_GTIDS ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10339,'tendbsingle','dbconf','MySQL-8.0','mysqld.session_track_schema','STRING','ON','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10340,'tendbsingle','dbconf','MySQL-8.0','mysqld.session_track_state_change','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10341,'tendbsingle','dbconf','MySQL-8.0','mysqld.sha256_password_proxy_users','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10343,'tendbsingle','dbconf','MySQL-8.0','mysqld.show_old_temporals','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10344,'tendbsingle','dbconf','MySQL-8.0','mysqld.skip-name-resolve','STRING','true','true | false','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-09 18:37:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10345,'tendbsingle','dbconf','MySQL-8.0','mysqld.slave_compressed_protocol','INT','1','1 | 0','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:11:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10346,'tendbsingle','dbconf','MySQL-8.0','mysqld.slave_exec_mode','STRING','STRICT','|STRICT|IDEMPOTENT','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10347,'tendbsingle','dbconf','MySQL-8.0','mysqld.slave_net_timeout','INT','120','[15,300]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10348,'tendbsingle','dbconf','MySQL-8.0','mysqld.slave_parallel_type','STRING','DATABASE','DATABASE| LOGICAL_CLOCK','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:55:08',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10349,'tendbsingle','dbconf','MySQL-8.0','mysqld.slave_parallel_workers','INT','4','0| 1| 2| 4| 8| 16| 32| 64','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:58:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10350,'tendbsingle','dbconf','MySQL-8.0','mysqld.slave_rows_search_algorithms','STRING','TABLE_SCAN,INDEX_SCAN,HASH_SCAN','TABLE_SCAN,INDEX_SCAN| INDEX_SCAN,HASH_SCAN| TABLE_SCAN,HASH_SCAN| TABLE_SCAN,INDEX_SCAN,HASH_SCAN ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10351,'tendbsingle','dbconf','MySQL-8.0','mysqld.slow_launch_time','INT','2','[1,10]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10352,'tendbsingle','dbconf','MySQL-8.0','mysqld.slow_query_log','STRING','ON','ON | OFF','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-17 14:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10353,'tendbsingle','dbconf','MySQL-8.0','mysqld.slow_query_log_file','STRING','{{.Mysqld.Logdir}}/slow-query.log',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10354,'tendbsingle','dbconf','MySQL-8.0','mysqld.socket','STRING','{{.Mysqld.Datadir}}/mysql.sock',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10355,'tendbsingle','dbconf','MySQL-8.0','mysqld.sort_buffer_size','INT','2097152','[32768,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10356,'tendbsingle','dbconf','MySQL-8.0','mysqld.sql_auto_is_null','STRING','OFF','ON| OFF ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10357,'tendbsingle','dbconf','MySQL-8.0','mysqld.sql_mode','STRING','\'\'','\'\'| NO_ENGINE_SUBSTITUTION| ALLOW_INVALID_DATES| ANSI_QUOTES| ERROR_FOR_DIVISION_BY_ZERO| HIGH_NOT_PRECEDENCE| IGNORE_SPACE| NO_AUTO_VALUE_ON_ZERO| NO_BACKSLASH_ESCAPES| NO_DIR_IN_CREATE| NO_UNSIGNED_SUBTRACTION| NO_ZERO_DATE| NO_ZERO_IN_DATE| ONLY_FULL_GROUP_BY| PIPES_AS_CONCAT| REAL_AS_FLOAT| STRICT_ALL_TABLES| STRICT_TRANS_TABLES ','ENUMS',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 11:29:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10358,'tendbsingle','dbconf','MySQL-8.0','mysqld.sql_safe_updates','STRING','OFF','OFF| ON ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10359,'tendbsingle','dbconf','MySQL-8.0','mysqld.stored_program_cache','INT','1024','[16, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-20 12:23:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10360,'tendbsingle','dbconf','MySQL-8.0','mysqld.sync_binlog','INT','0','[0,4294967295]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10361,'tendbsingle','dbconf','MySQL-8.0','mysqld.table_definition_cache','STRING','{MAX(DBInitMemory*512/1000,2048)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10362,'tendbsingle','dbconf','MySQL-8.0','mysqld.table_open_cache','INT','5120','[1, 524288]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:10:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10363,'tendbsingle','dbconf','MySQL-8.0','mysqld.table_open_cache_instances','STRING','{MIN(DBInitMemory/1000,16)}','','',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10364,'tendbsingle','dbconf','MySQL-8.0','mysqld.thread_cache_size','INT','8','[4,64]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10365,'tendbsingle','dbconf','MySQL-8.0','mysqld.thread_handling','STRING','one-thread-per-connection','one-thread-per-connection| pool-of-threads ','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10366,'tendbsingle','dbconf','MySQL-8.0','mysqld.thread_pool_oversubscribe','INT','3','[3,32]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10367,'tendbsingle','dbconf','MySQL-8.0','mysqld.thread_pool_size','STRING','{MIN(DBInitCpu,64)}','','',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10368,'tendbsingle','dbconf','MySQL-8.0','mysqld.thread_stack','INT','524288','[131072,1073741824]','RANGE',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10369,'tendbsingle','dbconf','MySQL-8.0','mysqld.time_zone','STRING','SYSTEM','SYSTEM| -12:00| -11:00| -10:00| -09:00| -08:00| -07:00| -06:00| -05:00| -04:00| -03:00| -02:00| -01:00| +00:00| +01:00| +02:00| +03:00| +04:00| +05:00| +05:30| +06:00| +06:30| +07:00| +08:00| +09:00| +10:00| +11:00| +12:00| +13:00 ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10370,'tendbsingle','dbconf','MySQL-8.0','mysqld.tmpdir','STRING','{{.Mysqld.Datadir}}/tmp',NULL,'',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-03-28 18:00:23',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10371,'tendbsingle','dbconf','MySQL-8.0','mysqld.tmp_table_size','INT','209715200','[1024,1073741824]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10372,'tendbsingle','dbconf','MySQL-8.0','mysqld.transaction_alloc_block_size','INT','8192','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10373,'tendbsingle','dbconf','MySQL-8.0','mysqld.transaction_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10374,'tendbsingle','dbconf','MySQL-8.0','mysqld.transaction_prealloc_size','INT','4096','[1024,131072]','RANGE',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10375,'tendbsingle','dbconf','MySQL-8.0','mysqld.tx_isolation','STRING','REPEATABLE-READ','REPEATABLE-READ | READ-COMMITTED','ENUM',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2023-04-19 15:19:16',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10376,'tendbsingle','dbconf','MySQL-8.0','mysqld.updatable_views_with_limit','STRING','YES','YES| NO ','ENUM',-1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10377,'tendbsingle','dbconf','MySQL-8.0','mysqld.wait_timeout','INT','86400','[1,31536000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-26 18:11:34','2022-09-26 18:11:34',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (10977,'tendbsingle','dbconf','MySQL-8.0','mysqldump.default-character-set','STRING','{{.Mysqld.CharacterSetServer}}','LATIN1| UTF8| GBK| UTF8MB4 |{{mysqld.character_set_server}}','ENUM',2,0,0,0,0,'{{mysqld.character_set_server}}',NULL,NULL,-1,NULL,NULL,'2022-09-26 20:47:10','2023-03-28 18:01:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12417,'tendbsingle','deploy','deploy_info','charset','STRING','utf8','utf8|utf8mb4|latin1|gbk','ENUM',1,0,0,0,1,NULL,NULL,'字符集',-1,NULL,NULL,'2022-10-27 11:03:11','2022-10-27 11:03:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12418,'tendbsingle','deploy','deploy_info','db_version','STRING','MySQL-5.7','MySQL-5.5 | MySQL-5.6 | MySQL-5.7 | MySQL-8.0','ENUM',1,0,0,0,1,NULL,NULL,'DB版本',-1,NULL,NULL,'2022-10-27 11:03:11','2022-10-27 11:03:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12419,'tendbsingle','deploy','deploy_info','storage_engine','STRING','InnoDB','InnoDB','',1,0,0,0,1,NULL,NULL,'存储引擎',-1,NULL,NULL,'2022-10-27 11:03:11','2022-10-27 11:03:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12420,'tendbsingle','deploy','deploy_info','tolerance_level','STRING','compus','idc|compus|city','ENUM',-1,0,0,0,1,NULL,NULL,'容灾级别',-1,NULL,NULL,'2022-10-27 11:03:11','2022-10-27 11:03:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15850,'tendbsingle','mysql_monitor','items-config.yaml','character-consistency','STRING','{\"enable\":true, \"schedule\":\"0 0 14 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15851,'tendbsingle','mysql_monitor','items-config.yaml','engine','STRING','{\"enable\":true, \"schedule\":\"0 0 12 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15852,'tendbsingle','mysql_monitor','items-config.yaml','ext3-check','STRING','{\"enable\":true, \"schedule\":\"0 0 16 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15853,'tendbsingle','mysql_monitor','items-config.yaml','master-slave-heartbeat','STRING','{\"enable\":true, \"schedule\":\"@every 10s\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15854,'tendbsingle','mysql_monitor','items-config.yaml','mysql-config-diff','STRING','{\"enable\":true, \"schedule\":\"@every 10m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15855,'tendbsingle','mysql_monitor','items-config.yaml','mysql-connlog-report','STRING','{\"enable\":true, \"schedule\":\"0 40 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15856,'tendbsingle','mysql_monitor','items-config.yaml','mysql-connlog-rotate','STRING','{\"enable\":true, \"schedule\":\"0 30 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15857,'tendbsingle','mysql_monitor','items-config.yaml','mysql-connlog-size','STRING','{\"enable\":true, \"schedule\":\"0 0 12 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15858,'tendbsingle','mysql_monitor','items-config.yaml','mysql-err-critical','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15859,'tendbsingle','mysql_monitor','items-config.yaml','mysql-err-notice','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15860,'tendbsingle','mysql_monitor','items-config.yaml','mysql-inject','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15861,'tendbsingle','mysql_monitor','items-config.yaml','mysql-lock','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15862,'tendbsingle','mysql_monitor','items-config.yaml','rotate-slowlog','STRING','{\"enable\":true, \"schedule\":\"0 55 23 * * *\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15863,'tendbsingle','mysql_monitor','items-config.yaml','routine-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15864,'tendbsingle','mysql_monitor','items-config.yaml','slave-status','STRING','{\"enable\":true, \"schedule\":\"@every 1m\", \"machine_type\":\"backend\", \"role\": [\"repeater\", \"slave\"]}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15865,'tendbsingle','mysql_monitor','items-config.yaml','trigger-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15866,'tendbsingle','mysql_monitor','items-config.yaml','view-definer','STRING','{\"enable\":true, \"schedule\":\"0 0 15 * * 1\", \"machine_type\":\"backend\", \"role\": []}','','MAP',1,0,0,0,0,NULL,'','',-1,NULL,'','2023-03-22 12:35:44','2023-03-22 12:35:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11726,'tendbsingle','proxyconf','default','conn_log','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11727,'tendbsingle','proxyconf','default','daemon','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11728,'tendbsingle','proxyconf','default','event-threads','INT','7','[1,10]','RANGE',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11729,'tendbsingle','proxyconf','default','ignore-user','STRING','MONITOR,proxy','','STRING',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11730,'tendbsingle','proxyconf','default','keepalive','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11731,'tendbsingle','proxyconf','default','log-level','STRING','warning','info|warning','ENUM',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11732,'tendbsingle','proxyconf','default','plugins','STRING','admin, proxy','','STRING',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11733,'tendbsingle','proxyconf','default','proxy-address','STRING','1.1.1.1:3306','','STRING',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11734,'tendbsingle','proxyconf','default','query_response_time_stats','STRING','true','true|false','ENUM',1,0,1,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-09-27 11:52:55',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11735,'tendbsingle','user','tb_app_info','dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11736,'tendbsingle','user','tb_app_info','developer','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11737,'tendbsingle','user','tb_app_info','mongo_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11738,'tendbsingle','user','tb_app_info','mysql_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11739,'tendbsingle','user','tb_app_info','nosql_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11740,'tendbsingle','user','tb_app_info','notifier','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11741,'tendbsingle','user','tb_app_info','opser','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11742,'tendbsingle','user','tb_app_info','productor','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (11743,'tendbsingle','user','tb_app_info','redis_dba','STRING','',NULL,'',-1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 11:52:55','2022-10-20 12:26:09',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.down.sql b/dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.down.sql
new file mode 100644
index 0000000000..5cda1940fd
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='TendisCache';
+DELETE FROM tb_config_name_def WHERE namespace='TendisCache' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.up.sql b/dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.up.sql
new file mode 100644
index 0000000000..421d458256
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000022_TendisCache_data.up.sql
@@ -0,0 +1,99 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='TendisCache'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (60,'TendisCache','dbconf','TendisCache-2.8','DB参数配置','5.8_参数配置','','',0,0,0,'',0,0,0,'5.8_参数配置','2022-08-04 15:28:35','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (49,'TendisCache','dbconf','TendisCache-3.2','DB参数配置','TendisCache-3.2配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'5.8_参数配置','2022-08-02 14:27:14','2023-03-28 21:40:02','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (51,'TendisCache','dbconf','TendisCache-4.0','DB参数配置','TendisCache-4.0配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'TendisCache-4.0配置','2022-08-02 14:27:14','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='TendisCache' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8164,'TendisCache','dbconf','TendisCache-3.2','appendfilename','STRING','\"appendonly.aof\"','[0, 999999]','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8165,'TendisCache','dbconf','TendisCache-3.2','appendfsync','STRING','everysec','everysec | always | no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8166,'TendisCache','dbconf','TendisCache-3.2','appendonly','STRING','no','no | yes','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8167,'TendisCache','dbconf','TendisCache-3.2','client-output-buffer-limit normal','STRING','1gb 1gb 60','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8169,'TendisCache','dbconf','TendisCache-3.2','client-output-buffer-limit pubsub','STRING','32mb 8mb 60','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8168,'TendisCache','dbconf','TendisCache-3.2','client-output-buffer-limit slave','STRING','2gb 1gb 60','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8181,'TendisCache','dbconf','TendisCache-3.2','daemonize','STRING','yes','yes | no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8163,'TendisCache','dbconf','TendisCache-3.2','databases','INT','16','[0, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8178,'TendisCache','dbconf','TendisCache-3.2','dir','STRING','{{datadir}}/{{port}}/data','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8177,'TendisCache','dbconf','TendisCache-3.2','logfile','STRING','{{datadir}}/{{port}}/redis.log','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8180,'TendisCache','dbconf','TendisCache-3.2','masterauth','STRING','{{masterauth}}','','',2,0,0,1,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8171,'TendisCache','dbconf','TendisCache-3.2','maxclients','INT','180000','[0, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8176,'TendisCache','dbconf','TendisCache-3.2','pidfile','STRING','{{datadir}}/{{port}}/redis.pid','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8170,'TendisCache','dbconf','TendisCache-3.2','port','INT','{{port}}','[0, 999999]','RANGE',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8173,'TendisCache','dbconf','TendisCache-3.2','repl-backlog-size','STRING','1gb','','BYTES',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8174,'TendisCache','dbconf','TendisCache-3.2','repl-backlog-tt','INT','28800','[0, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8172,'TendisCache','dbconf','TendisCache-3.2','repl-ping-slave-period','INT','10','[0, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8175,'TendisCache','dbconf','TendisCache-3.2','repl-timeout','INT','28800','[0, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8162,'TendisCache','dbconf','TendisCache-3.2','tcp-keepalive','INT','300','[0, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8161,'TendisCache','dbconf','TendisCache-3.2','timeout','INT','0','[0, 999999]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,'超时配置xxxxxx','2022-04-25 10:00:47','2022-08-04 15:32:11',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.down.sql b/dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.down.sql
new file mode 100644
index 0000000000..7cfc7f7871
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='TendisplusInstance';
+DELETE FROM tb_config_name_def WHERE namespace='TendisplusInstance' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.up.sql b/dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.up.sql
new file mode 100644
index 0000000000..fa03c5273e
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000023_TendisplusInstance_data.up.sql
@@ -0,0 +1,119 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='TendisplusInstance'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (63,'TendisplusInstance','redisconf','tendisplus-2.5','Tendisplus参数配置','tendisplus-2.5版本_参数配置','plat,app,cluster','cluster',1,1,0,NULL,0,0,0,'tendisplus-2.5版本_参数配置','2022-08-11 20:22:50','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='TendisplusInstance' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8702,'TendisplusInstance','redisconf','tendisplus-2.5','bind','STRING','{{address}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8723,'TendisplusInstance','redisconf','tendisplus-2.5','binlog-using-defaultCF','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8739,'TendisplusInstance','redisconf','tendisplus-2.5','binlogdelrange','INT','500000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8713,'TendisplusInstance','redisconf','tendisplus-2.5','cluster-enabled','STRING','{{cluster_enabled}}','no|yes','ENUM',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8727,'TendisplusInstance','redisconf','tendisplus-2.5','cluster-migration-rate-limit','INT','200','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8710,'TendisplusInstance','redisconf','tendisplus-2.5','databases','INT','{{databases}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8741,'TendisplusInstance','redisconf','tendisplus-2.5','deletefilesinrange-for-binlog','INT','1','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8706,'TendisplusInstance','redisconf','tendisplus-2.5','dir','STRING','{{redis_data_dir}}/data/db','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8734,'TendisplusInstance','redisconf','tendisplus-2.5','domain-enabled','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8707,'TendisplusInstance','redisconf','tendisplus-2.5','dumpdir','STRING','{{redis_data_dir}}/data/dump','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8715,'TendisplusInstance','redisconf','tendisplus-2.5','executorThreadNum','INT','24','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8714,'TendisplusInstance','redisconf','tendisplus-2.5','executorWorkPoolSize','INT','2','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8742,'TendisplusInstance','redisconf','tendisplus-2.5','incrpushthreadnum','INT','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8719,'TendisplusInstance','redisconf','tendisplus-2.5','kvstorecount','INT','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8705,'TendisplusInstance','redisconf','tendisplus-2.5','logdir','STRING','{{redis_data_dir}}/data/log','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8704,'TendisplusInstance','redisconf','tendisplus-2.5','loglevel','STRING','notice','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8712,'TendisplusInstance','redisconf','tendisplus-2.5','masterauth','STRING','{{password}}','','',2,0,0,1,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8724,'TendisplusInstance','redisconf','tendisplus-2.5','maxBinlogKeepNum','INT','1','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8740,'TendisplusInstance','redisconf','tendisplus-2.5','migrate-gc-enabled','STRING','false','false | true','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:20:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8730,'TendisplusInstance','redisconf','tendisplus-2.5','migrate-snapshot-key-num','INT','30000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8728,'TendisplusInstance','redisconf','tendisplus-2.5','migrateReceiveThreadnum','INT','4','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8729,'TendisplusInstance','redisconf','tendisplus-2.5','migrateSenderThreadnum','INT','4','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8738,'TendisplusInstance','redisconf','tendisplus-2.5','minbinlogkeepsec','INT','1800','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8725,'TendisplusInstance','redisconf','tendisplus-2.5','netBatchSize','INT','1048576','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8726,'TendisplusInstance','redisconf','tendisplus-2.5','netBatchTimeoutSec','INT','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8716,'TendisplusInstance','redisconf','tendisplus-2.5','netIoThreadNum','INT','3','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8717,'TendisplusInstance','redisconf','tendisplus-2.5','noexpire','STRING','no','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8735,'TendisplusInstance','redisconf','tendisplus-2.5','pauseTimeIndexMgr','INT','1','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8708,'TendisplusInstance','redisconf','tendisplus-2.5','pidfile','STRING','{{redis_data_dir}}data/tendisplus.pid','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8703,'TendisplusInstance','redisconf','tendisplus-2.5','port','INT','{{port}}','[6379,55535]','RANGE',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8743,'TendisplusInstance','redisconf','tendisplus-2.5','rename-command','STRING','config confxx \nrename-command flushdb cleandb \nrename-command flushall cleanall\nrename-command debug nobug\nrename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8711,'TendisplusInstance','redisconf','tendisplus-2.5','requirepass','STRING','{{password}}','','',2,0,0,1,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8718,'TendisplusInstance','redisconf','tendisplus-2.5','rocks.blockcachemb','STRING','{{rocks_blockcachemb}}','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8732,'TendisplusInstance','redisconf','tendisplus-2.5','rocks.cache_index_and_filter_blocks','INT','0','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8720,'TendisplusInstance','redisconf','tendisplus-2.5','rocks.compress_type','STRING','lz4','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8721,'TendisplusInstance','redisconf','tendisplus-2.5','rocks.max_background_compactions','INT','12','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8722,'TendisplusInstance','redisconf','tendisplus-2.5','rocks.write_buffer_size','INT','{{rocks_write_buffer_size}}','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8736,'TendisplusInstance','redisconf','tendisplus-2.5','scanCntIndexMgr','INT','10000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8731,'TendisplusInstance','redisconf','tendisplus-2.5','slave-migrate-enabled','STRING','on','\non|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8709,'TendisplusInstance','redisconf','tendisplus-2.5','slowlog','STRING','{{redis_data_dir}}/data/slowlog','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8737,'TendisplusInstance','redisconf','tendisplus-2.5','truncateBinlogIntervalMs','INT','100','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (8733,'TendisplusInstance','redisconf','tendisplus-2.5','truncateBinlogNum','INT','10000000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-08-12 09:41:48','2022-08-12 09:41:48',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.down.sql b/dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.down.sql
new file mode 100644
index 0000000000..0fe14fdb3b
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='TendisSSD';
+DELETE FROM tb_config_name_def WHERE namespace='TendisSSD' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.up.sql b/dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.up.sql
new file mode 100644
index 0000000000..e280700502
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000024_TendisSSD_data.up.sql
@@ -0,0 +1,77 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='TendisSSD'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (50,'TendisSSD','dbconf','TendisSSD-2.8','DB参数配置','TendisSSD-2.8配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'TendisSSD-2.8配置','2022-08-02 14:29:01','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='TendisSSD' AND (flag_encrypt!=1 or value_default like '{{%')
+
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000025_TendisX_data.down.sql b/dbm-services/common/db-config/assets/migrations/000025_TendisX_data.down.sql
new file mode 100644
index 0000000000..7c0d8404b3
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000025_TendisX_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='TendisX';
+DELETE FROM tb_config_name_def WHERE namespace='TendisX' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000025_TendisX_data.up.sql b/dbm-services/common/db-config/assets/migrations/000025_TendisX_data.up.sql
new file mode 100644
index 0000000000..d4e236ee38
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000025_TendisX_data.up.sql
@@ -0,0 +1,77 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='TendisX'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (54,'TendisX','dbconf','TendisX-4.0','DB参数配置','TendisX集群(4.0.10)','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'TendisX集群(4.0.10)','2022-08-02 14:29:01','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='TendisX' AND (flag_encrypt!=1 or value_default like '{{%')
+
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.down.sql b/dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.down.sql
new file mode 100644
index 0000000000..74fd03c1ed
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='TwemproxyRedisInstance';
+DELETE FROM tb_config_name_def WHERE namespace='TwemproxyRedisInstance' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.up.sql b/dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.up.sql
new file mode 100644
index 0000000000..7810e63ac6
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000026_TwemproxyRedisInstance_data.up.sql
@@ -0,0 +1,167 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='TwemproxyRedisInstance'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (117,'TwemproxyRedisInstance','config','binlogbackup','配置','binlog备份相关的配置','plat,app,cluster','cluster',1,1,0,'tendisCache',5,365,0,'binlog备份相关的配置','2022-11-22 08:55:34','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (71,'TwemproxyRedisInstance','config','fullbackup','配置','备份相关的配置','plat,app,cluster','cluster',1,1,0,'tendisCache',5,365,0,'备份相关的配置','2022-09-01 17:00:08','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (118,'TwemproxyRedisInstance','config','heartbeat','配置','心跳相关的配置','plat,app,cluster','cluster',1,1,0,'tendisCache',5,365,0,'心跳相关的配置','2022-11-22 08:55:34','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (72,'TwemproxyRedisInstance','config','monitor','配置','监控相关的配置','plat,app,cluster','cluster',1,1,0,'tendisCache',5,365,0,'监控相关的配置','2022-09-01 17:00:08','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (69,'TwemproxyRedisInstance','dbconf','Redis-6','redis配置','redis6.0的配置文件','plat,app,cluster,host,instance','cluster',1,1,0,'tendisCache',5,365,0,'redis6.0的配置文件','2022-09-01 16:58:02','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (70,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','redis配置','twemproxy配置文件','plat,app,cluster','cluster',1,1,0,'tendisCache',5,365,0,'twemproxy配置文件','2022-09-01 16:59:02','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='TwemproxyRedisInstance' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12936,'TwemproxyRedisInstance','config','binlogbackup','cron','STRING','@every 10m','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:44','2022-11-22 08:55:44',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12935,'TwemproxyRedisInstance','config','binlogbackup','old_file_left_day','INT','2','[0,365]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:43','2022-11-22 08:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12934,'TwemproxyRedisInstance','config','binlogbackup','to_backup_system','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:43','2022-11-22 08:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12931,'TwemproxyRedisInstance','config','fullbackup','cron','STRING','0 5,13,21 * * *','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:43','2022-11-22 08:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12930,'TwemproxyRedisInstance','config','fullbackup','old_file_left_day','INT','2','[0,365]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:43','2022-11-22 08:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12932,'TwemproxyRedisInstance','config','fullbackup','tar_split','BOOL','true','true|false','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:43','2022-11-22 08:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12933,'TwemproxyRedisInstance','config','fullbackup','tar_split_part_size','STRING','8G','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:43','2022-11-22 08:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12929,'TwemproxyRedisInstance','config','fullbackup','to_backup_system','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:43','2022-11-22 08:55:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12937,'TwemproxyRedisInstance','config','heartbeat','cron','STRING','@every 10s','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-22 08:55:45','2022-11-22 08:55:45',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12938,'TwemproxyRedisInstance','config','monitor','bkmonitor_data_id','INT','542898','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:45:12','2022-11-23 19:45:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12939,'TwemproxyRedisInstance','config','monitor','bkmonitor_token','STRING','8108b6fe1c8343ca8d6538652242d439','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:45:12','2022-11-23 19:45:12',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12940,'TwemproxyRedisInstance','config','monitor','cron','STRING','@every 1m','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-11-23 19:45:13','2022-11-23 19:45:13',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16121,'TwemproxyRedisInstance','dbconf','Redis-6','activedefrag','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16118,'TwemproxyRedisInstance','dbconf','Redis-6','activerehashing','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16083,'TwemproxyRedisInstance','dbconf','Redis-6','always-show-logo','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16104,'TwemproxyRedisInstance','dbconf','Redis-6','aof-load-truncated','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16106,'TwemproxyRedisInstance','dbconf','Redis-6','aof-rewrite-incremental-fsync','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16105,'TwemproxyRedisInstance','dbconf','Redis-6','aof-use-rdb-preamble','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16099,'TwemproxyRedisInstance','dbconf','Redis-6','appendfilename','STRING','appendonly.aof','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16100,'TwemproxyRedisInstance','dbconf','Redis-6','appendfsync','STRING','everysec','always|everysec|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16098,'TwemproxyRedisInstance','dbconf','Redis-6','appendonly','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16103,'TwemproxyRedisInstance','dbconf','Redis-6','auto-aof-rewrite-min-size','BYTES','64mb','[16mb,1gb]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16102,'TwemproxyRedisInstance','dbconf','Redis-6','auto-aof-rewrite-percentage','INT','100','[0,1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16064,'TwemproxyRedisInstance','dbconf','Redis-6','bind','STRING','{{address}} 127.0.0.1','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16110,'TwemproxyRedisInstance','dbconf','Redis-6','client-output-buffer-limit','STRING','normal 256mb 512mb 300\nclient-output-buffer-limit slave 2048mb 2048mb 300\nclient-output-buffer-limit pubsub 32mb 8mb 60','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16108,'TwemproxyRedisInstance','dbconf','Redis-6','cluster-config-file','STRING','nodes.conf','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16072,'TwemproxyRedisInstance','dbconf','Redis-6','cluster-enabled','STRING','{{cluster_enabled}}','\non|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16109,'TwemproxyRedisInstance','dbconf','Redis-6','cluster-node-timeout','INT','15000','[15000,120000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16073,'TwemproxyRedisInstance','dbconf','Redis-6','daemonize','STRING','yes','yes|no','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16071,'TwemproxyRedisInstance','dbconf','Redis-6','databases','INT','{{databases}}','[1,16]','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16088,'TwemproxyRedisInstance','dbconf','Redis-6','dbfilename','STRING','dump.rdb','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16070,'TwemproxyRedisInstance','dbconf','Redis-6','dir','STRING','{{redis_data_dir}}/data','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16111,'TwemproxyRedisInstance','dbconf','Redis-6','hash-max-ziplist-entries','INT','512','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16112,'TwemproxyRedisInstance','dbconf','Redis-6','hash-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16117,'TwemproxyRedisInstance','dbconf','Redis-6','hll-sparse-max-bytes','INT','3000','[0,15000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16080,'TwemproxyRedisInstance','dbconf','Redis-6','hz','INT','10','[1,500]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16094,'TwemproxyRedisInstance','dbconf','Redis-6','lazyfree-lazy-eviction','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16095,'TwemproxyRedisInstance','dbconf','Redis-6','lazyfree-lazy-expire','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16096,'TwemproxyRedisInstance','dbconf','Redis-6','lazyfree-lazy-server-del','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16114,'TwemproxyRedisInstance','dbconf','Redis-6','list-compress-depth','INT','0','0|1|2|3','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16113,'TwemproxyRedisInstance','dbconf','Redis-6','list-max-ziplist-size','INT','-2','[-5,-1]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 19:00:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16068,'TwemproxyRedisInstance','dbconf','Redis-6','logfile','STRING','{{redis_data_dir}}/redis.log','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16288,'TwemproxyRedisInstance','dbconf','Redis-6','loglevel','STRING','notice','debug|verbose|notice|warning','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:19:10','2023-04-20 17:19:10',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16107,'TwemproxyRedisInstance','dbconf','Redis-6','lua-time-limit','INT','5000','[0,60000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16081,'TwemproxyRedisInstance','dbconf','Redis-6','maxclients','INT','180000','[10,204800]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16067,'TwemproxyRedisInstance','dbconf','Redis-6','maxmemory','INT','{{maxmemory}}','[0,137438953472]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16076,'TwemproxyRedisInstance','dbconf','Redis-6','maxmemory-policy','STRING','noeviction','noeviction|allkeys-lru|allkeys-lfu|volatile-lru|volatile-lfu|allkeys-random|volatile-random|volatile-ttl','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16101,'TwemproxyRedisInstance','dbconf','Redis-6','no-appendfsync-on-rewrite','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16069,'TwemproxyRedisInstance','dbconf','Redis-6','pidfile','STRING','{{redis_data_dir}}/redis.pid','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16065,'TwemproxyRedisInstance','dbconf','Redis-6','port','INT','{{port}}','[6379,55535]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16075,'TwemproxyRedisInstance','dbconf','Redis-6','protected-mode','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16087,'TwemproxyRedisInstance','dbconf','Redis-6','rdbchecksum','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16086,'TwemproxyRedisInstance','dbconf','Redis-6','rdbcompression','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16093,'TwemproxyRedisInstance','dbconf','Redis-6','rename-command','STRING','config confxx \nrename-command flushdb cleandb \nrename-command flushall cleanall\nrename-command debug nobug\nrename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16091,'TwemproxyRedisInstance','dbconf','Redis-6','repl-diskless-sync','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16097,'TwemproxyRedisInstance','dbconf','Redis-6','replica-lazy-flush','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16092,'TwemproxyRedisInstance','dbconf','Redis-6','replica-priority','INT','100','[0,1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16090,'TwemproxyRedisInstance','dbconf','Redis-6','replica-read-only','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16089,'TwemproxyRedisInstance','dbconf','Redis-6','replica-serve-stale-data','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16066,'TwemproxyRedisInstance','dbconf','Redis-6','requirepass','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16084,'TwemproxyRedisInstance','dbconf','Redis-6','save','STRING','','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16119,'TwemproxyRedisInstance','dbconf','Redis-6','slowlog-log-slower-than','INT','10000','[10,1000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16120,'TwemproxyRedisInstance','dbconf','Redis-6','slowlog-max-len','INT','256','[0,10240]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16085,'TwemproxyRedisInstance','dbconf','Redis-6','stop-writes-on-bgsave-error','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16079,'TwemproxyRedisInstance','dbconf','Redis-6','supervised','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16077,'TwemproxyRedisInstance','dbconf','Redis-6','tcp-backlog','INT','511','[128,10240]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16074,'TwemproxyRedisInstance','dbconf','Redis-6','tcp-keepalive','INT','300','[10,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16078,'TwemproxyRedisInstance','dbconf','Redis-6','timeout','INT','0','[0,2147483647]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16115,'TwemproxyRedisInstance','dbconf','Redis-6','zset-max-ziplist-entries','INT','128','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16116,'TwemproxyRedisInstance','dbconf','Redis-6','zset-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16128,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','auto_eject_hosts','STRING','false','true|false','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16123,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','backlog','INT','512','[128,10240]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16125,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','distribution','STRING','modhash','modhash','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16126,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','hash','STRING','fnv1a_64','fnv1a_64','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16127,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','hash_tag','STRING','{}','{}','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16133,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','mbuf-size','INT','1024','[128,102400]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16136,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','password','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16135,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','port','INT','{{port}}','[50000,59999]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16129,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','preconnect','STRING','false','true|false','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16124,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','redis','STRING','true','true','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16134,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','redis_password','STRING','{{redis_password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16131,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','server_connections','INT','1','[1,10]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16132,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','server_failure_limit','INT','3','[1,10]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16130,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','server_retry_timeout','INT','2000','[1000,10000]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16122,'TwemproxyRedisInstance','proxyconf','Twemproxy-latest','slowms','INT','1000000','[0,10000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:50','2023-04-20 17:03:50',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.down.sql b/dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.down.sql
new file mode 100644
index 0000000000..362f790192
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='TwemproxyTendisplusInstance';
+DELETE FROM tb_config_name_def WHERE namespace='TwemproxyTendisplusInstance' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.up.sql b/dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.up.sql
new file mode 100644
index 0000000000..84ce767fc6
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000027_TwemproxyTendisplusInstance_data.up.sql
@@ -0,0 +1,136 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='TwemproxyTendisplusInstance'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (87,'TwemproxyTendisplusInstance','config','backup','配置','备份相关的配置','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'备份相关的配置','2022-09-27 17:48:51','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (88,'TwemproxyTendisplusInstance','config','monitor','配置','监控相关的配置','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'监控相关的配置','2022-09-27 17:49:02','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (85,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','redis配置','Tendisplus-2.5的配置文件','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'Tendisplus-2.5的配置文件','2022-09-27 17:48:42','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (86,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','redis配置','twemproxy配置文件','plat,app,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'twemproxy配置文件','2022-09-27 17:48:46','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='TwemproxyTendisplusInstance' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12061,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','bind','STRING','{{address}}','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12082,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','binlog-using-defaultCF','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12098,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','binlogdelrange','INT','500000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12072,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','cluster-enabled','STRING','{{cluster_enabled}}','no|yes','ENUM',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12086,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','cluster-migration-rate-limit','INT','200','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12069,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','databases','INT','2','[2,16]','',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-12-08 10:32:28',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12100,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','deletefilesinrange-for-binlog','INT','1','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12065,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','dir','STRING','{{redis_data_dir}}/data/db','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12093,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','domain-enabled','STRING','off','on|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12066,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','dumpdir','STRING','{{redis_data_dir}}/data/dump','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12074,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','executorThreadNum','INT','24','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12073,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','executorWorkPoolSize','INT','2','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12101,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','incrpushthreadnum','INT','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12078,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','kvstorecount','INT','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12064,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','logdir','STRING','{{redis_data_dir}}/data/log','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12063,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','loglevel','STRING','notice','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12071,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','masterauth','STRING','{{password}}','','',2,0,0,1,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12083,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','maxBinlogKeepNum','INT','1','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12099,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','migrate-gc-enabled','STRING','false','false | true','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:20:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12089,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','migrate-snapshot-key-num','INT','30000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12087,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','migrateReceiveThreadnum','INT','4','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12088,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','migrateSenderThreadnum','INT','4','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12097,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','minbinlogkeepsec','INT','1800','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12084,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','netBatchSize','INT','1048576','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12085,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','netBatchTimeoutSec','INT','10','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12075,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','netIoThreadNum','INT','3','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12076,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','noexpire','STRING','no','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12094,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','pauseTimeIndexMgr','INT','1','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12067,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','pidfile','STRING','{{redis_data_dir}}data/tendisplus.pid','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12062,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','port','INT','{{port}}','[6379,55535]','RANGE',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12102,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','rename-command','STRING','config confxx \nrename-command flushdb cleandb \nrename-command flushall cleanall\nrename-command debug nobug\nrename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12070,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','requirepass','STRING','{{password}}','','',2,0,0,1,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12077,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','rocks.blockcachemb','STRING','{{rocks_blockcachemb}}','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12091,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','rocks.cache_index_and_filter_blocks','INT','0','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12079,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','rocks.compress_type','STRING','lz4','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12080,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','rocks.max_background_compactions','INT','12','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12081,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','rocks.write_buffer_size','INT','{{rocks_write_buffer_size}}','','',2,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12095,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','scanCntIndexMgr','INT','10000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12090,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','slave-migrate-enabled','STRING','on','\non|off','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12068,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','slowlog','STRING','{{redis_data_dir}}/data/slowlog','','',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12096,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','truncateBinlogIntervalMs','INT','100','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12092,'TwemproxyTendisplusInstance','dbconf','Tendisplus-2.5','truncateBinlogNum','INT','10000000','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12108,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','auto_eject_hosts','STRING','false','true|false','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12104,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','backlog','INT','512','nullNULL','nullNULL',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12106,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','distribution','STRING','modhash','modhash','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12107,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','hash','STRING','fnv1a_64','fnv1a_64','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12113,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','mbuf-size','INT','1024','[128,102400]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12116,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','password','STRING','{{password}}','','',2,0,0,1,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12115,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','port','INT','{{port}}','[50000,59999]','RANGE',2,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12109,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','preconnect','STRING','false','true|false','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12105,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','redis','STRING','true','true','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12114,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','redis_password','STRING','{{redis_password}}','','',2,0,0,1,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12111,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','server_connections','INT','1','[1,10]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12112,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','server_failure_limit','INT','3','[1,10]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12110,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','server_retry_timeout','INT','2000','[1000,10000]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (12103,'TwemproxyTendisplusInstance','proxyconf','Twemproxy-latest','slowms','INT','1000000','[0,10000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2022-09-27 17:50:24','2022-09-27 17:50:24',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.down.sql b/dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.down.sql
new file mode 100644
index 0000000000..2c6dbdb3ac
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='TwemproxyTendisSSDInstance';
+DELETE FROM tb_config_name_def WHERE namespace='TwemproxyTendisSSDInstance' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.up.sql b/dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.up.sql
new file mode 100644
index 0000000000..3214b4849b
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000028_TwemproxyTendisSSDInstance_data.up.sql
@@ -0,0 +1,225 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='TwemproxyTendisSSDInstance'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (174,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','redis配置','TendisSSD的配置文件','plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'TendisSSD的配置文件','2023-03-01 10:27:53','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (175,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','redis配置','TendisSSD的配置文件','plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'TendisSSD的配置文件','2023-03-01 10:27:53','2023-03-22 12:08:50','');
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (176,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','redis配置','twemproxy配置文件','plat,app,module,cluster','cluster',1,1,0,'Tendisplus',5,365,0,'twemproxy配置文件','2023-03-01 10:27:53','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='TwemproxyTendisSSDInstance' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16179,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','activerehashing','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16169,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','aof-rewrite-incremental-fsync','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16164,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','appendfilename','STRING','appendonly.aof','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16165,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','appendfsync','STRING','everysec','always|everysec|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16163,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','appendonly','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16168,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','auto-aof-rewrite-min-size','STRING','64mb','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16167,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','auto-aof-rewrite-percentage','INT','100','[0,1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16137,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','bind','STRING','{{address}} 127.0.0.1','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16189,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','binlog-enabled','INT','1','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16201,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','binlog-filesize','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16193,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','clean-time','INT','3','[1,16]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16171,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','client-output-buffer-limit','STRING','normal 256mb 512mb 300\nclient-output-buffer-limit slave 2048mb 2048mb 300\nclient-output-buffer-limit pubsub 32mb 8mb 60','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16145,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','daemonize','STRING','yes','yes|no','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16144,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','databases','INT','{{databases}}','[1,16]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16156,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','dbfilename','STRING','dump.rdb','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16143,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','dir','STRING','{{redis_data_dir}}/data','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16198,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','disk-delete-count','INT','50','[0,1024]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16197,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','disk-delete-time','INT','50','[0,1024]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16191,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','dumpdir','STRING','{{redis_data_dir}}/rbinlog/','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16172,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','hash-max-ziplist-entries','INT','512','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16173,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','hash-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16149,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','hz','INT','10','[1,500]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16174,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','list-max-ziplist-entries','INT','512','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16175,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','list-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16192,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','log-count','INT','200000','[2000,2000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16199,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','log-keep-count','INT','20000000','[2000,2000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16200,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','log-keep-time','INT','36000','[0,3600000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16141,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','logfile','STRING','{{redis_data_dir}}/redis.log','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16284,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','loglevel','STRING','notice','debug|verbose|notice|warning','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:19:09','2023-04-20 17:19:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16170,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','lua-time-limit','INT','5000','[0,60000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16150,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','maxclients','INT','50000','[10,204800]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16140,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','maxmemory','STRING','{{maxmemory}}','[0,137438953472]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16147,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','maxmemory-policy','STRING','noeviction','noeviction|volatile-ttl|allkeys-random|volatile-random|allkeys-lru|volatile-lru','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16188,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','max_manifest_file_size','INT','200000000','200000000','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16187,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','max_open_files','INT','100000','[10000,204800]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16166,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','no-appendfsync-on-rewrite','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16194,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','pause-clean-time','INT','5','[1,16]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16196,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','pause-scan-expires-time','INT','100','[64,1024]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16142,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','pidfile','STRING','{{redis_data_dir}}/redis.pid','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16138,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','port','INT','{{port}}','[6379,55535]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16155,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','rdbchecksum','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16154,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','rdbcompression','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16162,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','rename-command','STRING','config confxx \nrename-command debug nobug\nrename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16160,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','repl-disable-tcp-nodelay','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16190,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','repl-mode','STRING','tredis-binlog','tredis-binlog','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16159,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','repl-timeout','INT','600','[60,36000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16139,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','requirepass','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16183,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','rocksdb_block_cache','INT','500000000','[500000,2000000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16184,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','rocksdb_block_size','INT','32000','[8000,128000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16185,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','rocksdb_write_buffer_size','INT','32000000','[500000,2000000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16152,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','save','STRING','','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16195,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','scan-expires-time','INT','1','[1,16]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16176,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','set-max-intset-entries','INT','512','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16161,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','slave-priority','INT','100','[0,1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16158,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','slave-read-only','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16157,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','slave-serve-stale-data','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16180,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','slowlog-log-slower-than','INT','10000','[10,1000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16181,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','slowlog-max-len','INT','256','[0,10240]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16153,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','stop-writes-on-bgsave-error','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16186,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','target_file_size_base','INT','8000000','[512000,128000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16146,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','tcp-keepalive','INT','300','[10,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16148,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','timeout','INT','0','[0,2147483647]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16202,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','transfer-enabled','INT','0','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16182,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','write_batch_size','INT','2','2','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16177,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','zset-max-ziplist-entries','INT','128','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16178,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.2','zset-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16245,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','activerehashing','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16235,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','aof-rewrite-incremental-fsync','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16230,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','appendfilename','STRING','appendonly.aof','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16231,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','appendfsync','STRING','everysec','always|everysec|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16229,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','appendonly','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16234,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','auto-aof-rewrite-min-size','BYTES','64mb','[16mb,1gb]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16233,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','auto-aof-rewrite-percentage','INT','100','[0,1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16203,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','bind','STRING','{{address}} 127.0.0.1','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16255,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','binlog-enabled','INT','1','0|1','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16267,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','binlog-filesize','INT','268435456','[1024,1073741824]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16259,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','clean-time','INT','3','[1,16]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16237,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','client-output-buffer-limit','STRING','normal 256mb 512mb 300\nclient-output-buffer-limit slave 2048mb 2048mb 300\nclient-output-buffer-limit pubsub 32mb 8mb 60','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16211,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','daemonize','STRING','yes','yes|no','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16210,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','databases','INT','{{databases}}','[1,16]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16222,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','dbfilename','STRING','dump.rdb','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16209,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','dir','STRING','{{redis_data_dir}}/data','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16264,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','disk-delete-count','INT','50','[0,1024]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16263,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','disk-delete-time','INT','50','[0,1024]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16257,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','dumpdir','STRING','{{redis_data_dir}}/rbinlog/','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16238,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','hash-max-ziplist-entries','INT','512','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16239,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','hash-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16215,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','hz','INT','10','[1,500]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16240,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','list-max-ziplist-entries','INT','512','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16241,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','list-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16258,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','log-count','INT','200000','[2000,2000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16265,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','log-keep-count','INT','20000000','[2000,2000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16266,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','log-keep-time','INT','36000','[0,3600000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16207,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','logfile','STRING','{{redis_data_dir}}/redis.log','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16285,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','loglevel','STRING','notice','debug|verbose|notice|warning','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:19:09','2023-04-20 17:19:09',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16236,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','lua-time-limit','INT','5000','[0,60000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16216,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','maxclients','INT','50000','[10,204800]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16206,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','maxmemory','STRING','{{maxmemory}}','[0,137438953472]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16213,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','maxmemory-policy','STRING','noeviction','noeviction|volatile-ttl|allkeys-random|volatile-random|allkeys-lru|volatile-lru','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16254,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','max_manifest_file_size','INT','200000000','200000000','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16253,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','max_open_files','INT','100000','[10000,204800]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16232,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','no-appendfsync-on-rewrite','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16260,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','pause-clean-time','INT','5','[1,16]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16262,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','pause-scan-expires-time','INT','100','[64,1024]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16208,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','pidfile','STRING','{{redis_data_dir}}/redis.pid','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16204,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','port','INT','{{port}}','[6379,55535]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16221,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','rdbchecksum','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16220,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','rdbcompression','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16228,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','rename-command','STRING','config confxx \nrename-command debug nobug\nrename-command keys mykeys','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16226,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','repl-disable-tcp-nodelay','STRING','no','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16256,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','repl-mode','STRING','tredis-binlog','tredis-binlog','ENUM',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16225,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','repl-timeout','INT','600','[60,36000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16205,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','requirepass','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16249,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','rocksdb_block_cache','INT','500000000','[500000,2000000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16250,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','rocksdb_block_size','INT','32000','[8000,128000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16251,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','rocksdb_write_buffer_size','INT','32000000','[500000,2000000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16218,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','save','STRING','','','',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16261,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','scan-expires-time','INT','1','[1,16]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16242,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','set-max-intset-entries','INT','512','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16227,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','slave-priority','INT','100','[0,1000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16224,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','slave-read-only','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16223,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','slave-serve-stale-data','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16246,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','slowlog-log-slower-than','INT','10000','[10,1000000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16247,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','slowlog-max-len','INT','256','[0,10240]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16219,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','stop-writes-on-bgsave-error','STRING','yes','yes|no','ENUM',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16252,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','target_file_size_base','INT','8000000','[512000,128000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16212,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','tcp-keepalive','INT','300','[10,3600]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16214,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','timeout','INT','0','[0,2147483647]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16248,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','write_batch_size','INT','2','2','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16243,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','zset-max-ziplist-entries','INT','128','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16244,'TwemproxyTendisSSDInstance','dbconf','TendisSSD-1.3','zset-max-ziplist-value','INT','64','[1,10000]','RANGE',1,0,0,0,0,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16274,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','auto_eject_hosts','STRING','false','true|false','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16269,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','backlog','INT','512','nullNULL','nullNULL',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16271,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','distribution','STRING','modhash','modhash','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16272,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','hash','STRING','fnv1a_64','fnv1a_64','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16273,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','hash_tag','STRING','{}','{}','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16279,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','mbuf-size','INT','1024','[128,102400]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16282,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','password','STRING','{{password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16281,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','port','INT','{{port}}','[50000,59999]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16275,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','preconnect','STRING','false','true|false','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16270,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','redis','STRING','true','true','ENUM',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16280,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','redis_password','STRING','{{redis_password}}','','',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16277,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','server_connections','INT','1','[1,10]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16278,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','server_failure_limit','INT','3','[1,10]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16276,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','server_retry_timeout','INT','2000','[1000,10000]','RANGE',1,0,1,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (16268,'TwemproxyTendisSSDInstance','proxyconf','Twemproxy-latest','slowms','INT','1000000','[0,10000000]','RANGE',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-20 17:03:54','2023-04-20 17:03:54',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000029_pulsar_data.down.sql b/dbm-services/common/db-config/assets/migrations/000029_pulsar_data.down.sql
new file mode 100644
index 0000000000..50ce14a392
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000029_pulsar_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='pulsar';
+DELETE FROM tb_config_name_def WHERE namespace='pulsar' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000029_pulsar_data.up.sql b/dbm-services/common/db-config/assets/migrations/000029_pulsar_data.up.sql
new file mode 100644
index 0000000000..5f35bcb202
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000029_pulsar_data.up.sql
@@ -0,0 +1,550 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='pulsar'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (142,'pulsar','dbconf','2.10.1','pulsar集群配置','pulsar集群配置','plat,app,cluster','cluster',1,1,0,NULL,5,365,0,'pulsar集群配置','2023-01-13 10:37:29','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='pulsar' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13184,'pulsar','dbconf','2.10.1','bookkeeper.advertisedAddress','STRING','{{local_ip}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13185,'pulsar','dbconf','2.10.1','bookkeeper.allowLoopback','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13198,'pulsar','dbconf','2.10.1','bookkeeper.auditorPeriodicBookieCheckInterval','STRING','86400','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13222,'pulsar','dbconf','2.10.1','bookkeeper.auditorPeriodicCheckInterval','STRING','604800','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13201,'pulsar','dbconf','2.10.1','bookkeeper.autoRecoveryDaemonEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13186,'pulsar','dbconf','2.10.1','bookkeeper.bookieDeathWatchInterval','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13181,'pulsar','dbconf','2.10.1','bookkeeper.bookiePort','STRING','3181','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13235,'pulsar','dbconf','2.10.1','bookkeeper.compactionMaxOutstandingRequests','STRING','100000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13232,'pulsar','dbconf','2.10.1','bookkeeper.compactionRate','STRING','100000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:34:38',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13240,'pulsar','dbconf','2.10.1','bookkeeper.compactionRateByBytes','STRING','1000000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13239,'pulsar','dbconf','2.10.1','bookkeeper.compactionRateByEntries','STRING','100000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:34:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13255,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_readAheadCacheBatchSize','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13254,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_readAheadCacheMaxSizeMb','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13256,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_blockCacheSize','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13259,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_blockSize','STRING','65536','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13260,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_bloomFilterBitsPerKey','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13263,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_maxSizeInLevel1MB','STRING','256','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13262,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_numFilesInLevel0','STRING','4','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13261,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_numLevels','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13258,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_sstSizeInMB','STRING','64','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13257,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_rocksDB_writeBufferSizeMB','STRING','64','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13253,'pulsar','dbconf','2.10.1','bookkeeper.dbStorage_writeCacheMaxSizeMb','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13245,'pulsar','dbconf','2.10.1','bookkeeper.diskCheckInterval','STRING','10000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13244,'pulsar','dbconf','2.10.1','bookkeeper.diskUsageThreshold','STRING','0.95','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13197,'pulsar','dbconf','2.10.1','bookkeeper.enableBusyWait','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13228,'pulsar','dbconf','2.10.1','bookkeeper.entryLogFilePreallocationEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13224,'pulsar','dbconf','2.10.1','bookkeeper.fileInfoFormatVersionToWrite','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13229,'pulsar','dbconf','2.10.1','bookkeeper.flushEntrylogBytes','STRING','268435456','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13187,'pulsar','dbconf','2.10.1','bookkeeper.flushInterval','STRING','60000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13190,'pulsar','dbconf','2.10.1','bookkeeper.gcOverreplicatedLedgerWaitTime','STRING','86400000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13189,'pulsar','dbconf','2.10.1','bookkeeper.gcWaitTime','STRING','900000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13252,'pulsar','dbconf','2.10.1','bookkeeper.httpServerClass','STRING','org.apache.bookkeeper.http.vertx.VertxHttpServer','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13250,'pulsar','dbconf','2.10.1','bookkeeper.httpServerEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13251,'pulsar','dbconf','2.10.1','bookkeeper.httpServerPort','STRING','8000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13238,'pulsar','dbconf','2.10.1','bookkeeper.isThrottleByBytes','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13214,'pulsar','dbconf','2.10.1','bookkeeper.journalAdaptiveGroupWrites','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13218,'pulsar','dbconf','2.10.1','bookkeeper.journalAlignmentSize','STRING','4096','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13216,'pulsar','dbconf','2.10.1','bookkeeper.journalBufferedWritesThreshold','STRING','524288','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13182,'pulsar','dbconf','2.10.1','bookkeeper.journalDirectory','STRING','{{pulsar_data_dir}}/journal','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13219,'pulsar','dbconf','2.10.1','bookkeeper.journalFlushWhenQueueEmpty','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13206,'pulsar','dbconf','2.10.1','bookkeeper.journalFormatVersionToWrite','STRING','5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13208,'pulsar','dbconf','2.10.1','bookkeeper.journalMaxBackups','STRING','5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13215,'pulsar','dbconf','2.10.1','bookkeeper.journalMaxGroupWaitMSec','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13207,'pulsar','dbconf','2.10.1','bookkeeper.journalMaxSizeMB','STRING','2048','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13209,'pulsar','dbconf','2.10.1','bookkeeper.journalPreAllocSizeMB','STRING','16','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13211,'pulsar','dbconf','2.10.1','bookkeeper.journalRemoveFromPageCache','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13213,'pulsar','dbconf','2.10.1','bookkeeper.journalSyncData','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13210,'pulsar','dbconf','2.10.1','bookkeeper.journalWriteBufferSizeKB','STRING','64','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13212,'pulsar','dbconf','2.10.1','bookkeeper.journalWriteData','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13221,'pulsar','dbconf','2.10.1','bookkeeper.ledgerDirectories','STRING','{{pulsar_data_dir}}/ledgers','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13220,'pulsar','dbconf','2.10.1','bookkeeper.ledgerStorageClass','STRING','org.apache.bookkeeper.bookie.storage.ldb.DbLedgerStorage','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13227,'pulsar','dbconf','2.10.1','bookkeeper.logSizeLimit','STRING','1073741824','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13202,'pulsar','dbconf','2.10.1','bookkeeper.lostBookieRecoveryDelay','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13237,'pulsar','dbconf','2.10.1','bookkeeper.majorCompactionInterval','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:34:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13236,'pulsar','dbconf','2.10.1','bookkeeper.majorCompactionThreshold','STRING','0.5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13195,'pulsar','dbconf','2.10.1','bookkeeper.maxPendingAddRequestsPerThread','STRING','10000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13194,'pulsar','dbconf','2.10.1','bookkeeper.maxPendingReadRequestsPerThread','STRING','2500','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13246,'pulsar','dbconf','2.10.1','bookkeeper.metadataServiceUri','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13234,'pulsar','dbconf','2.10.1','bookkeeper.minorCompactionInterval','STRING','1800','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:34:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13233,'pulsar','dbconf','2.10.1','bookkeeper.minorCompactionThreshold','STRING','0.5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:34:39',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13183,'pulsar','dbconf','2.10.1','bookkeeper.minUsableSizeForIndexFileCreation','STRING','1073741824','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13205,'pulsar','dbconf','2.10.1','bookkeeper.nettyMaxFrameSizeBytes','STRING','5253120','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13191,'pulsar','dbconf','2.10.1','bookkeeper.numAddWorkerThreads','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13193,'pulsar','dbconf','2.10.1','bookkeeper.numHighPriorityWorkerThreads','STRING','8','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13217,'pulsar','dbconf','2.10.1','bookkeeper.numJournalCallbackThreads','STRING','8','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13192,'pulsar','dbconf','2.10.1','bookkeeper.numReadWorkerThreads','STRING','8','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13223,'pulsar','dbconf','2.10.1','bookkeeper.openFileLimit','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13200,'pulsar','dbconf','2.10.1','bookkeeper.openLedgerRereplicationGracePeriod','STRING','30000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13225,'pulsar','dbconf','2.10.1','bookkeeper.pageLimit','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13242,'pulsar','dbconf','2.10.1','bookkeeper.prometheusStatsHttpPort','STRING','8000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13230,'pulsar','dbconf','2.10.1','bookkeeper.readBufferSizeBytes','STRING','4096','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13243,'pulsar','dbconf','2.10.1','bookkeeper.readOnlyModeEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13196,'pulsar','dbconf','2.10.1','bookkeeper.readWorkerThreadsThrottlingEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13199,'pulsar','dbconf','2.10.1','bookkeeper.rereplicationEntryBatchSize','STRING','100','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13204,'pulsar','dbconf','2.10.1','bookkeeper.serverTcpNoDelay','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13241,'pulsar','dbconf','2.10.1','bookkeeper.statsProviderClass','STRING','org.apache.bookkeeper.stats.prometheus.PrometheusMetricsProvider','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13188,'pulsar','dbconf','2.10.1','bookkeeper.useHostNameAsBookieID','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13203,'pulsar','dbconf','2.10.1','bookkeeper.useV2WireProtocol','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13231,'pulsar','dbconf','2.10.1','bookkeeper.writeBufferSizeBytes','STRING','65536','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13249,'pulsar','dbconf','2.10.1','bookkeeper.zkEnableSecurity','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13226,'pulsar','dbconf','2.10.1','bookkeeper.zkLedgersRootPath','STRING','/ledgers','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13247,'pulsar','dbconf','2.10.1','bookkeeper.zkServers','STRING','{{zk_host_list[0]}}:2181,{{zk_host_list[1]}}:2181,{{zk_host_list[2]}}:2181','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-05-11 11:23:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13248,'pulsar','dbconf','2.10.1','bookkeeper.zkTimeout','STRING','30000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13390,'pulsar','dbconf','2.10.1','broker.acknowledgmentAtBatchIndexLevelEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13312,'pulsar','dbconf','2.10.1','broker.activeConsumerFailoverDelayTimeMillis','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13274,'pulsar','dbconf','2.10.1','broker.advertisedAddress','STRING','{{local_ip}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13589,'pulsar','dbconf','2.10.1','broker.aggregatePublisherStatsByProducerName','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13301,'pulsar','dbconf','2.10.1','broker.allowAutoSubscriptionCreation','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13299,'pulsar','dbconf','2.10.1','broker.allowAutoTopicCreation','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13300,'pulsar','dbconf','2.10.1','broker.allowAutoTopicCreationType','STRING','partitioned','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:41:18',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13446,'pulsar','dbconf','2.10.1','broker.anonymousUserRole','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13445,'pulsar','dbconf','2.10.1','broker.athenzDomainNames','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13405,'pulsar','dbconf','2.10.1','broker.authenticateOriginalAuthData','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13434,'pulsar','dbconf','2.10.1','broker.authenticationEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:36:01',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13435,'pulsar','dbconf','2.10.1','broker.authenticationProviders','STRING','org.apache.pulsar.broker.authentication.AuthenticationProviderToken','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:36:43',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13436,'pulsar','dbconf','2.10.1','broker.authenticationRefreshCheckSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13439,'pulsar','dbconf','2.10.1','broker.authorizationAllowWildcardsMatching','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13437,'pulsar','dbconf','2.10.1','broker.authorizationEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:36:15',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13438,'pulsar','dbconf','2.10.1','broker.authorizationProvider','STRING','org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13521,'pulsar','dbconf','2.10.1','broker.autoSkipNonRecoverableData','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13293,'pulsar','dbconf','2.10.1','broker.backlogQuotaCheckEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13294,'pulsar','dbconf','2.10.1','broker.backlogQuotaCheckIntervalInSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13295,'pulsar','dbconf','2.10.1','broker.backlogQuotaDefaultLimitBytes','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13629,'pulsar','dbconf','2.10.1','broker.backlogQuotaDefaultLimitGB','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13296,'pulsar','dbconf','2.10.1','broker.backlogQuotaDefaultLimitSecond','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13297,'pulsar','dbconf','2.10.1','broker.backlogQuotaDefaultRetentionPolicy','STRING','producer_request_hold','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13272,'pulsar','dbconf','2.10.1','broker.bindAddress','STRING','0.0.0.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13273,'pulsar','dbconf','2.10.1','broker.bindAddresses','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13461,'pulsar','dbconf','2.10.1','broker.bookkeeperClientAuthenticationParameters','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13460,'pulsar','dbconf','2.10.1','broker.bookkeeperClientAuthenticationParametersName','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13459,'pulsar','dbconf','2.10.1','broker.bookkeeperClientAuthenticationPlugin','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13477,'pulsar','dbconf','2.10.1','broker.bookkeeperClientEnforceMinNumRacksPerWriteQuorum','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13472,'pulsar','dbconf','2.10.1','broker.bookkeeperClientGetBookieInfoIntervalSeconds','STRING','86400','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13473,'pulsar','dbconf','2.10.1','broker.bookkeeperClientGetBookieInfoRetryIntervalSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13467,'pulsar','dbconf','2.10.1','broker.bookkeeperClientHealthCheckEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13469,'pulsar','dbconf','2.10.1','broker.bookkeeperClientHealthCheckErrorThresholdPerInterval','STRING','5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13468,'pulsar','dbconf','2.10.1','broker.bookkeeperClientHealthCheckIntervalSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13470,'pulsar','dbconf','2.10.1','broker.bookkeeperClientHealthCheckQuarantineTimeInSeconds','STRING','1800','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13479,'pulsar','dbconf','2.10.1','broker.bookkeeperClientIsolationGroups','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13481,'pulsar','dbconf','2.10.1','broker.bookkeeperClientMinAvailableBookiesInIsolationGroups','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13476,'pulsar','dbconf','2.10.1','broker.bookkeeperClientMinNumRacksPerWriteQuorum','STRING','2','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13463,'pulsar','dbconf','2.10.1','broker.bookkeeperClientNumWorkerThreads','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13471,'pulsar','dbconf','2.10.1','broker.bookkeeperClientQuarantineRatio','STRING','1.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13474,'pulsar','dbconf','2.10.1','broker.bookkeeperClientRackawarePolicyEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13475,'pulsar','dbconf','2.10.1','broker.bookkeeperClientRegionawarePolicyEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13478,'pulsar','dbconf','2.10.1','broker.bookkeeperClientReorderReadSequenceEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13480,'pulsar','dbconf','2.10.1','broker.bookkeeperClientSecondaryIsolationGroups','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13464,'pulsar','dbconf','2.10.1','broker.bookkeeperClientSpeculativeReadTimeoutInMillis','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13462,'pulsar','dbconf','2.10.1','broker.bookkeeperClientTimeoutInSeconds','STRING','30','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13493,'pulsar','dbconf','2.10.1','broker.bookkeeperDiskWeightBasedPlacementEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13482,'pulsar','dbconf','2.10.1','broker.bookkeeperEnableStickyReads','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13494,'pulsar','dbconf','2.10.1','broker.bookkeeperExplicitLacIntervalInMills','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13458,'pulsar','dbconf','2.10.1','broker.bookkeeperMetadataServiceUri','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13465,'pulsar','dbconf','2.10.1','broker.bookkeeperNumberOfChannelsPerBookie','STRING','16','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13492,'pulsar','dbconf','2.10.1','broker.bookkeeperTlsCertFilesRefreshDurationSeconds','STRING','300','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13490,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSCertificateFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13484,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSClientAuthentication','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13489,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSKeyFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13485,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSKeyFileType','STRING','PEM','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13487,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSKeyStorePasswordPath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13483,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSProviderFactoryClass','STRING','org.apache.bookkeeper.tls.TLSContextFactory','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13491,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSTrustCertsFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13486,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSTrustCertTypes','STRING','PEM','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13488,'pulsar','dbconf','2.10.1','broker.bookkeeperTLSTrustStorePasswordPath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13466,'pulsar','dbconf','2.10.1','broker.bookkeeperUseV2WireProtocol','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13570,'pulsar','dbconf','2.10.1','broker.bootstrapNamespaces','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13443,'pulsar','dbconf','2.10.1','broker.brokerClientAuthenticationParameters','STRING','{{token}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-05-11 16:53:57',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13442,'pulsar','dbconf','2.10.1','broker.brokerClientAuthenticationPlugin','STRING','org.apache.pulsar.client.impl.auth.AuthenticationToken','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:45:57',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13424,'pulsar','dbconf','2.10.1','broker.brokerClientSslProvider','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13428,'pulsar','dbconf','2.10.1','broker.brokerClientTlsCiphers','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13441,'pulsar','dbconf','2.10.1','broker.brokerClientTlsEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13423,'pulsar','dbconf','2.10.1','broker.brokerClientTlsEnabledWithKeyStore','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13429,'pulsar','dbconf','2.10.1','broker.brokerClientTlsProtocols','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13426,'pulsar','dbconf','2.10.1','broker.brokerClientTlsTrustStore','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13427,'pulsar','dbconf','2.10.1','broker.brokerClientTlsTrustStorePassword','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13425,'pulsar','dbconf','2.10.1','broker.brokerClientTlsTrustStoreType','STRING','JKS','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13444,'pulsar','dbconf','2.10.1','broker.brokerClientTrustCertsFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13319,'pulsar','dbconf','2.10.1','broker.brokerDeduplicationEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13323,'pulsar','dbconf','2.10.1','broker.brokerDeduplicationEntriesInterval','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13320,'pulsar','dbconf','2.10.1','broker.brokerDeduplicationMaxNumberOfProducers','STRING','10000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13324,'pulsar','dbconf','2.10.1','broker.brokerDeduplicationProducerInactivityTimeoutMinutes','STRING','360','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13321,'pulsar','dbconf','2.10.1','broker.brokerDeduplicationSnapshotFrequencyInSeconds','STRING','120','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13322,'pulsar','dbconf','2.10.1','broker.brokerDeduplicationSnapshotIntervalSeconds','STRING','120','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13306,'pulsar','dbconf','2.10.1','broker.brokerDeleteInactivePartitionedTopicMetadataEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13303,'pulsar','dbconf','2.10.1','broker.brokerDeleteInactiveTopicsEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13304,'pulsar','dbconf','2.10.1','broker.brokerDeleteInactiveTopicsFrequencySeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13307,'pulsar','dbconf','2.10.1','broker.brokerDeleteInactiveTopicsMaxInactiveDurationSeconds','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13305,'pulsar','dbconf','2.10.1','broker.brokerDeleteInactiveTopicsMode','STRING','delete_when_no_subscriptions','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13328,'pulsar','dbconf','2.10.1','broker.brokerMaxConnections','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13329,'pulsar','dbconf','2.10.1','broker.brokerMaxConnectionsPerIp','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13343,'pulsar','dbconf','2.10.1','broker.brokerPublisherThrottlingMaxByteRate','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13342,'pulsar','dbconf','2.10.1','broker.brokerPublisherThrottlingMaxMessageRate','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13341,'pulsar','dbconf','2.10.1','broker.brokerPublisherThrottlingTickTimeMillis','STRING','50','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13385,'pulsar','dbconf','2.10.1','broker.brokerServiceCompactionMonitorIntervalInSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13387,'pulsar','dbconf','2.10.1','broker.brokerServiceCompactionPhaseOneLoopTimeInSeconds','STRING','30','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13386,'pulsar','dbconf','2.10.1','broker.brokerServiceCompactionThresholdInBytes','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13266,'pulsar','dbconf','2.10.1','broker.brokerServicePort','STRING','6650','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13267,'pulsar','dbconf','2.10.1','broker.brokerServicePortTls','STRING','6651','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:40:25',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13628,'pulsar','dbconf','2.10.1','broker.brokerServicePurgeInactiveFrequencyInSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13291,'pulsar','dbconf','2.10.1','broker.brokerShutdownTimeoutMs','STRING','60000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13331,'pulsar','dbconf','2.10.1','broker.clientLibraryVersionCheckEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13285,'pulsar','dbconf','2.10.1','broker.clusterName','STRING','{{cluster_name}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13265,'pulsar','dbconf','2.10.1','broker.configurationMetadataStoreUrl','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13623,'pulsar','dbconf','2.10.1','broker.configurationStoreServers','STRING','{{zk_host_list[0]}}:2181,{{zk_host_list[1]}}:2181,{{zk_host_list[2]}}:2181','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-05-11 11:23:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13549,'pulsar','dbconf','2.10.1','broker.defaultNamespaceBundleSplitAlgorithm','STRING','range_equally_divide','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13325,'pulsar','dbconf','2.10.1','broker.defaultNumberOfNamespaceBundles','STRING','4','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13302,'pulsar','dbconf','2.10.1','broker.defaultNumPartitions','STRING','{{partitions}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13568,'pulsar','dbconf','2.10.1','broker.defaultRetentionSizeInMB','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13567,'pulsar','dbconf','2.10.1','broker.defaultRetentionTimeInMinutes','STRING','{{retention_time}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13388,'pulsar','dbconf','2.10.1','broker.delayedDeliveryEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13389,'pulsar','dbconf','2.10.1','broker.delayedDeliveryTickTimeMillis','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13455,'pulsar','dbconf','2.10.1','broker.disableHttpDebugMethods','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13359,'pulsar','dbconf','2.10.1','broker.dispatcherMaxReadBatchSize','STRING','100','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13360,'pulsar','dbconf','2.10.1','broker.dispatcherMaxReadSizeBytes','STRING','5242880','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13362,'pulsar','dbconf','2.10.1','broker.dispatcherMaxRoundRobinBatchSize','STRING','20','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13361,'pulsar','dbconf','2.10.1','broker.dispatcherMinReadBatchSize','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13363,'pulsar','dbconf','2.10.1','broker.dispatcherReadFailureBackoffInitialTimeInMs','STRING','15000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13365,'pulsar','dbconf','2.10.1','broker.dispatcherReadFailureBackoffMandatoryStopTimeInMs','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13364,'pulsar','dbconf','2.10.1','broker.dispatcherReadFailureBackoffMaxTimeInMs','STRING','60000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13352,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingOnBatchMessageEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13358,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingOnNonBacklogConsumerEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13349,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRateInByte','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13348,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRateInMsg','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13356,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRatePerReplicatorInByte','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13355,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRatePerReplicatorInMsg','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13354,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRatePerSubscriptionInByte','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13353,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRatePerSubscriptionInMsg','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13351,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRatePerTopicInByte','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13350,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRatePerTopicInMsg','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13357,'pulsar','dbconf','2.10.1','broker.dispatchThrottlingRateRelativeToPublishRate','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13282,'pulsar','dbconf','2.10.1','broker.enableBusyWait','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13374,'pulsar','dbconf','2.10.1','broker.enableNonPersistentTopics','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13617,'pulsar','dbconf','2.10.1','broker.enablePackagesManagement','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13373,'pulsar','dbconf','2.10.1','broker.enablePersistentTopics','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13391,'pulsar','dbconf','2.10.1','broker.enableReplicatedSubscriptions','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13376,'pulsar','dbconf','2.10.1','broker.enableRunBookieAutoRecoveryTogether','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13375,'pulsar','dbconf','2.10.1','broker.enableRunBookieTogether','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13379,'pulsar','dbconf','2.10.1','broker.encryptionRequireOnProducer','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13367,'pulsar','dbconf','2.10.1','broker.entryFilterNames','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13368,'pulsar','dbconf','2.10.1','broker.entryFiltersDirectory','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13582,'pulsar','dbconf','2.10.1','broker.exposeBundlesMetricsInPrometheus','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13577,'pulsar','dbconf','2.10.1','broker.exposeConsumerLevelMetricsInPrometheus','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13580,'pulsar','dbconf','2.10.1','broker.exposeManagedCursorMetricsInPrometheus','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13579,'pulsar','dbconf','2.10.1','broker.exposeManagedLedgerMetricsInPrometheus','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13587,'pulsar','dbconf','2.10.1','broker.exposePreciseBacklogInPrometheus','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13578,'pulsar','dbconf','2.10.1','broker.exposeProducerLevelMetricsInPrometheus','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13584,'pulsar','dbconf','2.10.1','broker.exposePublisherStats','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13576,'pulsar','dbconf','2.10.1','broker.exposeTopicLevelMetricsInPrometheus','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13287,'pulsar','dbconf','2.10.1','broker.failureDomainsEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13609,'pulsar','dbconf','2.10.1','broker.fileSystemProfilePath','STRING','../conf/filesystem_offload_core_site.xml','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13610,'pulsar','dbconf','2.10.1','broker.fileSystemURI','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13309,'pulsar','dbconf','2.10.1','broker.forceDeleteNamespaceAllowed','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13308,'pulsar','dbconf','2.10.1','broker.forceDeleteTenantAllowed','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13583,'pulsar','dbconf','2.10.1','broker.functionsWorkerEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13604,'pulsar','dbconf','2.10.1','broker.gcsManagedLedgerOffloadBucket','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13605,'pulsar','dbconf','2.10.1','broker.gcsManagedLedgerOffloadMaxBlockSizeInBytes','STRING','67108864','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13606,'pulsar','dbconf','2.10.1','broker.gcsManagedLedgerOffloadReadBufferSizeInBytes','STRING','1048576','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13603,'pulsar','dbconf','2.10.1','broker.gcsManagedLedgerOffloadRegion','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13607,'pulsar','dbconf','2.10.1','broker.gcsManagedLedgerOffloadServiceAccountKeyFile','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13622,'pulsar','dbconf','2.10.1','broker.globalZookeeperServers','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13275,'pulsar','dbconf','2.10.1','broker.haProxyProtocolEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13454,'pulsar','dbconf','2.10.1','broker.httpMaxRequestSize','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13456,'pulsar','dbconf','2.10.1','broker.httpRequestsLimitEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13457,'pulsar','dbconf','2.10.1','broker.httpRequestsMaxPerSecond','STRING','100.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13330,'pulsar','dbconf','2.10.1','broker.isAllowAutoUpdateSchemaEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13284,'pulsar','dbconf','2.10.1','broker.isRunningStandalone','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13591,'pulsar','dbconf','2.10.1','broker.isSchemaValidationEnforced','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13569,'pulsar','dbconf','2.10.1','broker.keepAliveIntervalSeconds','STRING','30','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13522,'pulsar','dbconf','2.10.1','broker.lazyCursorRecovery','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13539,'pulsar','dbconf','2.10.1','broker.loadBalancerAutoBundleSplitEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13540,'pulsar','dbconf','2.10.1','broker.loadBalancerAutoUnloadSplitBundlesEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13555,'pulsar','dbconf','2.10.1','broker.loadBalancerBandwithInResourceWeight','STRING','1.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13556,'pulsar','dbconf','2.10.1','broker.loadBalancerBandwithOutResourceWeight','STRING','1.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13536,'pulsar','dbconf','2.10.1','broker.loadBalancerBrokerMaxTopics','STRING','50000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13537,'pulsar','dbconf','2.10.1','broker.loadBalancerBrokerOverloadedThresholdPercentage','STRING','85','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13551,'pulsar','dbconf','2.10.1','broker.loadBalancerBrokerThresholdShedderPercentage','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13560,'pulsar','dbconf','2.10.1','broker.loadBalancerBundleUnloadMinThroughputThreshold','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13557,'pulsar','dbconf','2.10.1','broker.loadBalancerCPUResourceWeight','STRING','1.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13559,'pulsar','dbconf','2.10.1','broker.loadBalancerDirectMemoryResourceWeight','STRING','1.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13529,'pulsar','dbconf','2.10.1','broker.loadBalancerEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13554,'pulsar','dbconf','2.10.1','broker.loadBalancerHistoryResourcePercentage','STRING','0.9','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13532,'pulsar','dbconf','2.10.1','broker.loadBalancerHostUsageCheckIntervalMinutes','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13550,'pulsar','dbconf','2.10.1','broker.loadBalancerLoadSheddingStrategy','STRING','org.apache.pulsar.broker.loadbalance.impl.ThresholdShedder','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13558,'pulsar','dbconf','2.10.1','broker.loadBalancerMemoryResourceWeight','STRING','1.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13552,'pulsar','dbconf','2.10.1','broker.loadBalancerMsgRateDifferenceShedderThreshold','STRING','50','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13553,'pulsar','dbconf','2.10.1','broker.loadBalancerMsgThroughputMultiplierDifferenceShedderThreshold','STRING','4','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13544,'pulsar','dbconf','2.10.1','broker.loadBalancerNamespaceBundleMaxBandwidthMbytes','STRING','100','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13543,'pulsar','dbconf','2.10.1','broker.loadBalancerNamespaceBundleMaxMsgRate','STRING','30000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13542,'pulsar','dbconf','2.10.1','broker.loadBalancerNamespaceBundleMaxSessions','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13541,'pulsar','dbconf','2.10.1','broker.loadBalancerNamespaceBundleMaxTopics','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13545,'pulsar','dbconf','2.10.1','broker.loadBalancerNamespaceMaximumBundles','STRING','128','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13546,'pulsar','dbconf','2.10.1','broker.loadBalancerOverrideBrokerNicSpeedGbps','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13531,'pulsar','dbconf','2.10.1','broker.loadBalancerReportUpdateMaxIntervalMinutes','STRING','15','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13530,'pulsar','dbconf','2.10.1','broker.loadBalancerReportUpdateThresholdPercentage','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13538,'pulsar','dbconf','2.10.1','broker.loadBalancerResourceQuotaUpdateIntervalMinutes','STRING','15','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13533,'pulsar','dbconf','2.10.1','broker.loadBalancerSheddingEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13535,'pulsar','dbconf','2.10.1','broker.loadBalancerSheddingGracePeriodMinutes','STRING','30','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13534,'pulsar','dbconf','2.10.1','broker.loadBalancerSheddingIntervalMinutes','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13547,'pulsar','dbconf','2.10.1','broker.loadManagerClassName','STRING','org.apache.pulsar.broker.loadbalance.impl.ModularLoadManagerImpl','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13525,'pulsar','dbconf','2.10.1','broker.managedLedgerAddEntryTimeoutSeconds','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13504,'pulsar','dbconf','2.10.1','broker.managedLedgerCacheCopyEntries','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13506,'pulsar','dbconf','2.10.1','broker.managedLedgerCacheEvictionFrequency','STRING','100.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13507,'pulsar','dbconf','2.10.1','broker.managedLedgerCacheEvictionTimeThresholdMillis','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13505,'pulsar','dbconf','2.10.1','broker.managedLedgerCacheEvictionWatermark','STRING','0.9','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13503,'pulsar','dbconf','2.10.1','broker.managedLedgerCacheSizeMB','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13508,'pulsar','dbconf','2.10.1','broker.managedLedgerCursorBackloggedThreshold','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13517,'pulsar','dbconf','2.10.1','broker.managedLedgerCursorMaxEntriesPerLedger','STRING','50000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13498,'pulsar','dbconf','2.10.1','broker.managedLedgerCursorPositionFlushSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13518,'pulsar','dbconf','2.10.1','broker.managedLedgerCursorRolloverTimeInSeconds','STRING','14400','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13497,'pulsar','dbconf','2.10.1','broker.managedLedgerDefaultAckQuorum','STRING','{{ack_quorum}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13495,'pulsar','dbconf','2.10.1','broker.managedLedgerDefaultEnsembleSize','STRING','{{ensemble_size}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13509,'pulsar','dbconf','2.10.1','broker.managedLedgerDefaultMarkDeleteRateLimit','STRING','1.0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13496,'pulsar','dbconf','2.10.1','broker.managedLedgerDefaultWriteQuorum','STRING','{{write_quorum}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13500,'pulsar','dbconf','2.10.1','broker.managedLedgerDigestType','STRING','CRC32C','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13513,'pulsar','dbconf','2.10.1','broker.managedLedgerInactiveLedgerRolloverTimeSeconds','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13510,'pulsar','dbconf','2.10.1','broker.managedLedgerMaxEntriesPerLedger','STRING','50000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13512,'pulsar','dbconf','2.10.1','broker.managedLedgerMaxLedgerRolloverTimeMinutes','STRING','240','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13514,'pulsar','dbconf','2.10.1','broker.managedLedgerMaxSizePerLedgerMbytes','STRING','2048','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13519,'pulsar','dbconf','2.10.1','broker.managedLedgerMaxUnackedRangesToPersist','STRING','10000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13520,'pulsar','dbconf','2.10.1','broker.managedLedgerMaxUnackedRangesToPersistInZooKeeper','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13523,'pulsar','dbconf','2.10.1','broker.managedLedgerMetadataOperationsTimeoutSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13511,'pulsar','dbconf','2.10.1','broker.managedLedgerMinLedgerRolloverTimeMinutes','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13528,'pulsar','dbconf','2.10.1','broker.managedLedgerNewEntriesCheckDelayInMillis','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13502,'pulsar','dbconf','2.10.1','broker.managedLedgerNumSchedulerThreads','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13501,'pulsar','dbconf','2.10.1','broker.managedLedgerNumWorkerThreads','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13516,'pulsar','dbconf','2.10.1','broker.managedLedgerOffloadAutoTriggerSizeThresholdBytes','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13608,'pulsar','dbconf','2.10.1','broker.managedLedgerOffloadBucket','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13515,'pulsar','dbconf','2.10.1','broker.managedLedgerOffloadDeletionLagMs','STRING','14400000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13594,'pulsar','dbconf','2.10.1','broker.managedLedgerOffloadDriver','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13595,'pulsar','dbconf','2.10.1','broker.managedLedgerOffloadMaxThreads','STRING','2','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13596,'pulsar','dbconf','2.10.1','broker.managedLedgerOffloadPrefetchRounds','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13526,'pulsar','dbconf','2.10.1','broker.managedLedgerPrometheusStatsLatencyRolloverSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13524,'pulsar','dbconf','2.10.1','broker.managedLedgerReadEntryTimeoutSeconds','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13499,'pulsar','dbconf','2.10.1','broker.managedLedgerStatsPeriodSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13527,'pulsar','dbconf','2.10.1','broker.managedLedgerTraceTaskExecution','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13597,'pulsar','dbconf','2.10.1','broker.managedLedgerUnackedRangesOpenCacheSetEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13283,'pulsar','dbconf','2.10.1','broker.maxConcurrentHttpRequests','STRING','1024','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13369,'pulsar','dbconf','2.10.1','broker.maxConcurrentLookupRequest','STRING','50000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13371,'pulsar','dbconf','2.10.1','broker.maxConcurrentNonPersistentMessagePerConnection','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13370,'pulsar','dbconf','2.10.1','broker.maxConcurrentTopicLoadRequest','STRING','5000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13383,'pulsar','dbconf','2.10.1','broker.maxConsumersPerSubscription','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13380,'pulsar','dbconf','2.10.1','broker.maxConsumersPerTopic','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13395,'pulsar','dbconf','2.10.1','broker.maxMessagePublishBufferSizeInMB','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13384,'pulsar','dbconf','2.10.1','broker.maxMessageSize','STRING','5242880','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13397,'pulsar','dbconf','2.10.1','broker.maxMessageSizeCheckIntervalInSeconds','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13326,'pulsar','dbconf','2.10.1','broker.maxNamespacesPerTenant','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13398,'pulsar','dbconf','2.10.1','broker.maxNumPartitionsPerPartitionedTopic','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13310,'pulsar','dbconf','2.10.1','broker.maxPendingPublishRequestsPerConnection','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13377,'pulsar','dbconf','2.10.1','broker.maxProducersPerTopic','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13345,'pulsar','dbconf','2.10.1','broker.maxPublishRatePerTopicInBytes','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13344,'pulsar','dbconf','2.10.1','broker.maxPublishRatePerTopicInMessages','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13381,'pulsar','dbconf','2.10.1','broker.maxSameAddressConsumersPerTopic','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13378,'pulsar','dbconf','2.10.1','broker.maxSameAddressProducersPerTopic','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13382,'pulsar','dbconf','2.10.1','broker.maxSubscriptionsPerTopic','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13286,'pulsar','dbconf','2.10.1','broker.maxTenants','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13327,'pulsar','dbconf','2.10.1','broker.maxTopicsPerNamespace','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13336,'pulsar','dbconf','2.10.1','broker.maxUnackedMessagesPerBroker','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13334,'pulsar','dbconf','2.10.1','broker.maxUnackedMessagesPerConsumer','STRING','50000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13335,'pulsar','dbconf','2.10.1','broker.maxUnackedMessagesPerSubscription','STRING','200000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13337,'pulsar','dbconf','2.10.1','broker.maxUnackedMessagesPerSubscriptionOnBrokerBlocked','STRING','0.16','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13311,'pulsar','dbconf','2.10.1','broker.messageExpiryCheckIntervalInMinutes','STRING','5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13430,'pulsar','dbconf','2.10.1','broker.metadataStoreBatchingEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13431,'pulsar','dbconf','2.10.1','broker.metadataStoreBatchingMaxDelayMillis','STRING','5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13432,'pulsar','dbconf','2.10.1','broker.metadataStoreBatchingMaxOperations','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13433,'pulsar','dbconf','2.10.1','broker.metadataStoreBatchingMaxSizeKb','STRING','128','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13290,'pulsar','dbconf','2.10.1','broker.metadataStoreCacheExpirySeconds','STRING','300','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13289,'pulsar','dbconf','2.10.1','broker.metadataStoreOperationTimeoutSeconds','STRING','30','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13288,'pulsar','dbconf','2.10.1','broker.metadataStoreSessionTimeoutMillis','STRING','30000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13264,'pulsar','dbconf','2.10.1','broker.metadataStoreUrl','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13581,'pulsar','dbconf','2.10.1','broker.metricsServletTimeoutMs','STRING','30000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13561,'pulsar','dbconf','2.10.1','broker.namespaceBundleUnloadingTimeoutMs','STRING','60000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13276,'pulsar','dbconf','2.10.1','broker.numAcceptorThreads','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13281,'pulsar','dbconf','2.10.1','broker.numCacheExecutorThreadPoolSize','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13280,'pulsar','dbconf','2.10.1','broker.numExecutorThreadPoolSize','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13279,'pulsar','dbconf','2.10.1','broker.numHttpServerThreads','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13277,'pulsar','dbconf','2.10.1','broker.numIOThreads','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13278,'pulsar','dbconf','2.10.1','broker.numOrderedExecutorThreads','STRING','8','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13372,'pulsar','dbconf','2.10.1','broker.numWorkerThreadsForNonPersistentTopic','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13593,'pulsar','dbconf','2.10.1','broker.offloadersDirectory','STRING','./offloaders','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13620,'pulsar','dbconf','2.10.1','broker.packagesManagementLedgerRootPath','STRING','/ledgers','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13618,'pulsar','dbconf','2.10.1','broker.packagesManagementStorageProvider','STRING','org.apache.pulsar.packages.management.storage.bookkeeper.BookKeeperPackagesStorageProvider','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13619,'pulsar','dbconf','2.10.1','broker.packagesReplicas','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13366,'pulsar','dbconf','2.10.1','broker.preciseDispatcherFlowControl','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13340,'pulsar','dbconf','2.10.1','broker.preciseTopicPublishRateLimiterEnable','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13333,'pulsar','dbconf','2.10.1','broker.preferLaterVersions','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13404,'pulsar','dbconf','2.10.1','broker.proxyRoles','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13392,'pulsar','dbconf','2.10.1','broker.replicatedSubscriptionsSnapshotFrequencyMillis','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13394,'pulsar','dbconf','2.10.1','broker.replicatedSubscriptionsSnapshotMaxCachedPerSubscription','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13393,'pulsar','dbconf','2.10.1','broker.replicatedSubscriptionsSnapshotTimeoutSeconds','STRING','30','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13563,'pulsar','dbconf','2.10.1','broker.replicationConnectionsPerBroker','STRING','16','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13562,'pulsar','dbconf','2.10.1','broker.replicationMetricsEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13566,'pulsar','dbconf','2.10.1','broker.replicationPolicyCheckDurationSeconds','STRING','600','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13564,'pulsar','dbconf','2.10.1','broker.replicationProducerQueueSize','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13627,'pulsar','dbconf','2.10.1','broker.replicationTlsEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13565,'pulsar','dbconf','2.10.1','broker.replicatorPrefix','STRING','pulsar.repl','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13396,'pulsar','dbconf','2.10.1','broker.retentionCheckIntervalInSeconds','STRING','120','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13599,'pulsar','dbconf','2.10.1','broker.s3ManagedLedgerOffloadBucket','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13601,'pulsar','dbconf','2.10.1','broker.s3ManagedLedgerOffloadMaxBlockSizeInBytes','STRING','67108864','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13602,'pulsar','dbconf','2.10.1','broker.s3ManagedLedgerOffloadReadBufferSizeInBytes','STRING','1048576','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13598,'pulsar','dbconf','2.10.1','broker.s3ManagedLedgerOffloadRegion','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13600,'pulsar','dbconf','2.10.1','broker.s3ManagedLedgerOffloadServiceEndpoint','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13452,'pulsar','dbconf','2.10.1','broker.saslJaasClientAllowedIds','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13453,'pulsar','dbconf','2.10.1','broker.saslJaasServerSectionName','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13592,'pulsar','dbconf','2.10.1','broker.schemaCompatibilityStrategy','STRING','FULL','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13590,'pulsar','dbconf','2.10.1','broker.schemaRegistryStorageClassName','STRING','org.apache.pulsar.broker.service.schema.BookkeeperSchemaStorageFactory','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13292,'pulsar','dbconf','2.10.1','broker.skipBrokerShutdownOnOOM','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13588,'pulsar','dbconf','2.10.1','broker.splitTopicAndPartitionLabelInPrometheus','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:40:56',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13585,'pulsar','dbconf','2.10.1','broker.statsUpdateFrequencyInSecs','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13586,'pulsar','dbconf','2.10.1','broker.statsUpdateInitialDelayInSecs','STRING','60','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13332,'pulsar','dbconf','2.10.1','broker.statusFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13347,'pulsar','dbconf','2.10.1','broker.subscribeRatePeriodPerConsumerInSecond','STRING','30','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13346,'pulsar','dbconf','2.10.1','broker.subscribeThrottlingRatePerConsumer','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13313,'pulsar','dbconf','2.10.1','broker.subscriptionExpirationTimeMinutes','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13315,'pulsar','dbconf','2.10.1','broker.subscriptionExpiryCheckIntervalInMinutes','STRING','5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13318,'pulsar','dbconf','2.10.1','broker.subscriptionKeySharedConsistentHashingReplicaPoints','STRING','100','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13631,'pulsar','dbconf','2.10.1','broker.subscriptionKeySharedEnable','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13317,'pulsar','dbconf','2.10.1','broker.subscriptionKeySharedUseConsistentHashing','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13314,'pulsar','dbconf','2.10.1','broker.subscriptionRedeliveryTrackerEnabled','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13316,'pulsar','dbconf','2.10.1','broker.subscriptionTypesEnabled','STRING','Exclusive,Shared,Failover,Key_Shared','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13440,'pulsar','dbconf','2.10.1','broker.superUserRoles','STRING','super-user','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:37:50',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13548,'pulsar','dbconf','2.10.1','broker.supportedNamespaceBundleSplitAlgorithms','STRING','range_equally_divide,topic_count_equally_divide','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13400,'pulsar','dbconf','2.10.1','broker.systemTopicEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13401,'pulsar','dbconf','2.10.1','broker.systemTopicSchemaCompatibilityStrategy','STRING','ALWAYS_COMPATIBLE','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13410,'pulsar','dbconf','2.10.1','broker.tlsAllowInsecureConnection','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13407,'pulsar','dbconf','2.10.1','broker.tlsCertificateFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13406,'pulsar','dbconf','2.10.1','broker.tlsCertRefreshCheckDurationSec','STRING','300','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13412,'pulsar','dbconf','2.10.1','broker.tlsCiphers','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13630,'pulsar','dbconf','2.10.1','broker.tlsEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13416,'pulsar','dbconf','2.10.1','broker.tlsEnabledWithKeyStore','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13408,'pulsar','dbconf','2.10.1','broker.tlsKeyFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13418,'pulsar','dbconf','2.10.1','broker.tlsKeyStore','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13419,'pulsar','dbconf','2.10.1','broker.tlsKeyStorePassword','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13417,'pulsar','dbconf','2.10.1','broker.tlsKeyStoreType','STRING','JKS','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13411,'pulsar','dbconf','2.10.1','broker.tlsProtocols','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13414,'pulsar','dbconf','2.10.1','broker.tlsProvider','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13413,'pulsar','dbconf','2.10.1','broker.tlsRequireTrustedClientCertOnConnect','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13409,'pulsar','dbconf','2.10.1','broker.tlsTrustCertsFilePath','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13421,'pulsar','dbconf','2.10.1','broker.tlsTrustStore','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13422,'pulsar','dbconf','2.10.1','broker.tlsTrustStorePassword','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13420,'pulsar','dbconf','2.10.1','broker.tlsTrustStoreType','STRING','JKS','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13451,'pulsar','dbconf','2.10.1','broker.tokenAudience','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13450,'pulsar','dbconf','2.10.1','broker.tokenAudienceClaim','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13449,'pulsar','dbconf','2.10.1','broker.tokenAuthClaim','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13448,'pulsar','dbconf','2.10.1','broker.tokenPublicKey','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13447,'pulsar','dbconf','2.10.1','broker.tokenSecretKey','STRING','{{secret_key_dir}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13403,'pulsar','dbconf','2.10.1','broker.topicFencingTimeoutSeconds','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13402,'pulsar','dbconf','2.10.1','broker.topicLevelPoliciesEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13339,'pulsar','dbconf','2.10.1','broker.topicPublisherThrottlingTickTimeMillis','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13615,'pulsar','dbconf','2.10.1','broker.transactionBufferClientMaxConcurrentRequests','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13613,'pulsar','dbconf','2.10.1','broker.transactionBufferSnapshotMaxTransactionCount','STRING','1000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13614,'pulsar','dbconf','2.10.1','broker.transactionBufferSnapshotMinTimeInMillis','STRING','5000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13611,'pulsar','dbconf','2.10.1','broker.transactionCoordinatorEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13612,'pulsar','dbconf','2.10.1','broker.transactionMetadataStoreProviderClassName','STRING','org.apache.pulsar.transaction.coordinator.impl.MLTransactionMetadataStoreProvider','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13616,'pulsar','dbconf','2.10.1','broker.transactionPendingAckLogIndexMinLag','STRING','500','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13298,'pulsar','dbconf','2.10.1','broker.ttlDurationDefaultInSeconds','STRING','0','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13338,'pulsar','dbconf','2.10.1','broker.unblockStuckSubscriptionEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13268,'pulsar','dbconf','2.10.1','broker.webServicePort','STRING','8080','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13269,'pulsar','dbconf','2.10.1','broker.webServicePortTls','STRING','8443','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 15:40:38',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13271,'pulsar','dbconf','2.10.1','broker.webServiceTlsCiphers','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13270,'pulsar','dbconf','2.10.1','broker.webServiceTlsProtocols','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13415,'pulsar','dbconf','2.10.1','broker.webServiceTlsProvider','STRING','Conscrypt','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13573,'pulsar','dbconf','2.10.1','broker.webSocketConnectionsPerBroker','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13575,'pulsar','dbconf','2.10.1','broker.webSocketMaxTextFrameSize','STRING','1048576','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13572,'pulsar','dbconf','2.10.1','broker.webSocketNumIoThreads','STRING','','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13571,'pulsar','dbconf','2.10.1','broker.webSocketServiceEnabled','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13574,'pulsar','dbconf','2.10.1','broker.webSocketSessionIdleTimeoutMillis','STRING','300000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13626,'pulsar','dbconf','2.10.1','broker.zooKeeperCacheExpirySeconds','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13625,'pulsar','dbconf','2.10.1','broker.zooKeeperOperationTimeoutSeconds','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13621,'pulsar','dbconf','2.10.1','broker.zookeeperServers','STRING','{{zk_host_list[0]}}:2181,{{zk_host_list[1]}}:2181,{{zk_host_list[2]}}:2181','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-05-11 11:23:49',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13399,'pulsar','dbconf','2.10.1','broker.zookeeperSessionExpiredPolicy','STRING','reconnect','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13624,'pulsar','dbconf','2.10.1','broker.zooKeeperSessionTimeoutMillis','STRING','-1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13636,'pulsar','dbconf','2.10.1','port','INT','6650',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-01-18 11:43:30','2023-04-24 15:45:22',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13635,'pulsar','dbconf','2.10.1','username','STRING','manager_user',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-01-18 11:41:15','2023-04-24 15:45:17',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15970,'pulsar','dbconf','2.10.1','zk_ip_0','STRING','zk_ip_0',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-13 14:41:36','2023-04-24 15:45:26',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15971,'pulsar','dbconf','2.10.1','zk_ip_1','STRING','zk_ip_1',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-13 14:43:17','2023-04-24 15:45:29',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (15972,'pulsar','dbconf','2.10.1','zk_ip_2','STRING','zk_ip_2',NULL,'',1,0,0,0,1,NULL,NULL,NULL,-1,NULL,NULL,'2023-04-13 14:43:17','2023-04-24 15:45:33',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13174,'pulsar','dbconf','2.10.1','zookeeper.autopurge.purgeInterval','STRING','1','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13173,'pulsar','dbconf','2.10.1','zookeeper.autopurge.snapRetainCount','STRING','3','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13170,'pulsar','dbconf','2.10.1','zookeeper.clientPort','STRING','2181','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13169,'pulsar','dbconf','2.10.1','zookeeper.dataDir','STRING','{{data_dir}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13632,'pulsar','dbconf','2.10.1','zookeeper.dataLogDir','STRING','{{data_log_dir}}','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-04-17 17:10:41',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13175,'pulsar','dbconf','2.10.1','zookeeper.forceSync','STRING','yes','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13167,'pulsar','dbconf','2.10.1','zookeeper.initLimit','STRING','10','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13178,'pulsar','dbconf','2.10.1','zookeeper.metricsProvider.className','STRING','org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13180,'pulsar','dbconf','2.10.1','zookeeper.metricsProvider.exportJvmInfo','STRING','true','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13179,'pulsar','dbconf','2.10.1','zookeeper.metricsProvider.httpPort','STRING','8000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13177,'pulsar','dbconf','2.10.1','zookeeper.portUnification','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13171,'pulsar','dbconf','2.10.1','zookeeper.server.0','STRING','{{zk_host_list[0]}}:2888:3888','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-05-11 11:23:47',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13172,'pulsar','dbconf','2.10.1','zookeeper.server.1','STRING','{{zk_host_list[1]}}:2888:3888','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-05-11 11:23:48',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13633,'pulsar','dbconf','2.10.1','zookeeper.server.2','STRING','{{zk_host_list[2]}}:2888:3888','NULL','',2,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-05-11 11:23:51',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13176,'pulsar','dbconf','2.10.1','zookeeper.sslQuorum','STRING','false','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13168,'pulsar','dbconf','2.10.1','zookeeper.syncLimit','STRING','5','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13166,'pulsar','dbconf','2.10.1','zookeeper.tickTime','STRING','2000','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL','pulsar config','2023-01-13 11:26:00','2023-01-13 11:26:00',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/assets/migrations/000030_influxdb_data.down.sql b/dbm-services/common/db-config/assets/migrations/000030_influxdb_data.down.sql
new file mode 100644
index 0000000000..596f5997be
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000030_influxdb_data.down.sql
@@ -0,0 +1,2 @@
+DELETE FROM tb_config_file_def WHERE namespace='influxdb';
+DELETE FROM tb_config_name_def WHERE namespace='influxdb' AND (flag_encrypt!=1 or value_default like '{{%');
diff --git a/dbm-services/common/db-config/assets/migrations/000030_influxdb_data.up.sql b/dbm-services/common/db-config/assets/migrations/000030_influxdb_data.up.sql
new file mode 100644
index 0000000000..e91b69a402
--- /dev/null
+++ b/dbm-services/common/db-config/assets/migrations/000030_influxdb_data.up.sql
@@ -0,0 +1,78 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_file_def`
+--
+-- WHERE:  namespace='influxdb'
+
+INSERT INTO `tb_config_file_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_type_lc`, `conf_file_lc`, `level_names`, `level_versioned`, `conf_name_validate`, `conf_value_validate`, `value_type_strict`, `namespace_info`, `version_keep_limit`, `version_keep_days`, `conf_name_order`, `description`, `created_at`, `updated_at`, `updated_by`) VALUES (149,'influxdb','dbconf','1.8.4','influxdb配置','influxdb配置文件','plat,app,instance','instance',1,1,0,'NULL',5,365,0,'influxdb配置文件','2022-09-20 15:17:36','2023-03-22 12:08:50','');
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbconfig
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Dumping data for table `tb_config_name_def`
+--
+-- WHERE:  namespace='influxdb' AND (flag_encrypt!=1 or value_default like '{{%')
+
+INSERT INTO `tb_config_name_def` (`id`, `namespace`, `conf_type`, `conf_file`, `conf_name`, `value_type`, `value_default`, `value_allowed`, `value_type_sub`, `flag_status`, `flag_disable`, `flag_locked`, `flag_encrypt`, `need_restart`, `value_formula`, `extra_info`, `conf_name_lc`, `order_index`, `since_version`, `description`, `created_at`, `updated_at`, `stage`) VALUES (13637,'influxdb','dbconf','1.8.4','username','STRING','admin','NULL','',1,0,0,0,1,'NULL','NULL','NULL',-1,'NULL',NULL,'2023-02-14 15:08:46','2023-02-14 15:08:46',0);
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-05-31 10:34:29
diff --git a/dbm-services/common/db-config/build.sh b/dbm-services/common/db-config/build.sh
new file mode 100644
index 0000000000..e8c3a35e3e
--- /dev/null
+++ b/dbm-services/common/db-config/build.sh
@@ -0,0 +1,18 @@
+
+workDir=`pwd`
+
+# unit test
+cd internal/service/simpleconfig && go test -v . ;cd  $workDir
+cd internal/repository/model && go test -v . ; cd $workDir
+cd pkg/validate && go test -v . ;cd  $workDir
+
+cd  $workDir
+./build_doc.sh
+make
+pkill bkconfigsvr
+kill `cat svr.pid`
+sleep 0.5
+./bkconfigsvr >>logs/main.log 2>&1 &
+echo $! > svr.pid
+sleep 0.5
+ps -ef|grep bkconfigsvr|grep -v grep
\ No newline at end of file
diff --git a/dbm-services/common/db-config/build_doc.sh b/dbm-services/common/db-config/build_doc.sh
new file mode 100755
index 0000000000..dfd544fcc5
--- /dev/null
+++ b/dbm-services/common/db-config/build_doc.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+swag init -g cmd/bkconfigsvr/main.go --parseDependency
+if [ $? -gt 0 ];then
+  echo "generate swagger api docs failed"
+  exit 1
+fi
+tree docs/
\ No newline at end of file
diff --git a/dbm-services/common/db-config/cmd/bkconfigcli/bkconfigcli.go b/dbm-services/common/db-config/cmd/bkconfigcli/bkconfigcli.go
new file mode 100644
index 0000000000..b53f149831
--- /dev/null
+++ b/dbm-services/common/db-config/cmd/bkconfigcli/bkconfigcli.go
@@ -0,0 +1,2 @@
+// Package bkconfigcli TODO
+package bkconfigcli
diff --git a/dbm-services/common/db-config/cmd/bkconfigcli/main.go b/dbm-services/common/db-config/cmd/bkconfigcli/main.go
new file mode 100644
index 0000000000..1db1507db4
--- /dev/null
+++ b/dbm-services/common/db-config/cmd/bkconfigcli/main.go
@@ -0,0 +1 @@
+package bkconfigcli
diff --git a/dbm-services/common/db-config/cmd/bkconfigsvr/main.go b/dbm-services/common/db-config/cmd/bkconfigsvr/main.go
new file mode 100644
index 0000000000..0cf42fc2cc
--- /dev/null
+++ b/dbm-services/common/db-config/cmd/bkconfigsvr/main.go
@@ -0,0 +1,101 @@
+package main
+
+import (
+	"bk-dbconfig/internal/repository"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/internal/router"
+	"bk-dbconfig/pkg/core/config"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/middleware"
+	"log"
+	_ "net/http/pprof"
+	"os"
+
+	"github.com/gin-gonic/gin"
+	"github.com/golang-migrate/migrate/v4"
+	_ "github.com/golang-migrate/migrate/v4/source/file"
+	"github.com/spf13/pflag"
+	"github.com/spf13/viper"
+)
+
+// @title           bkconfigsvr API
+// @version         0.0.1
+// @description     This is a bkconfigsvr celler server.
+// @termsOfService  http://swagger.io/terms/
+// @host            localhost:80
+// @Schemes        http
+// @contact.name   API Support
+// @contact.url    http://www.swagger.io/support
+// @contact.email  support@swagger.io
+
+// @license.name  Apache 2.0
+// @license.url   http://www.apache.org/licenses/LICENSE-2.0.html
+
+// @BasePath  /
+
+// main TODO
+// @securityDefinitions.basic  BasicAuth
+func main() {
+	// init DB
+	model.DB.Init()
+	defer model.DB.Close()
+	model.InitCache()
+
+	// process db migrate
+	pflag.Parse()
+	if config.GetBool("migrate") || config.GetBool("migrate.enable") {
+		if err := dbMigrate(); err != nil && err != migrate.ErrNoChange {
+			log.Fatal(err)
+		}
+		// 命令行指定了 --migrate 时,执行完退出
+		// 配置文件指定了 --migrate.enable 时,执行完继续执行主进程
+		if config.GetBool("migrate") {
+			os.Exit(0)
+		}
+	}
+
+	model.LoadCache()
+	gin.SetMode(gin.ReleaseMode)
+	engine := gin.New() // New()
+	engine.Use(gin.Recovery())
+	engine.Use(middleware.CorsMiddleware())
+	engine.Use(middleware.RequestMiddleware())
+	engine.Use(middleware.RequestLoggerMiddleware())
+	router.RegisterPing(engine)
+	router.RegisterRestRoutes(engine)
+	if config.GetBool("swagger.enableUI") {
+		router.RegisterRoutesSwagger(engine)
+	}
+
+	httpAddr := config.GetString("http.listenAddress")
+	logger.Info("start http server on %s ", httpAddr)
+
+	if err := engine.Run(httpAddr); err != nil {
+		log.Fatal(err)
+	}
+
+}
+
+// init reads in config file and ENV variables if set.
+func init() {
+	config.InitConfig("logger")
+	config.InitConfig("config")
+	logger.Init()
+
+	pflag.Bool("migrate", false,
+		"run migrate to databases and exit. set migrate.enable to config.yaml will run migrate and continue ")
+	pflag.String("migrate.source", "", "migrate source path")
+	pflag.Int("migrate.force", 0, "force the version to be clean if it's dirty")
+	viper.BindPFlags(pflag.CommandLine)
+}
+
+func dbMigrate() error {
+	logger.Info("run db migrations...")
+	if err := repository.DoMigrateFromEmbed(); err == nil {
+		return nil
+	} else {
+		return err
+	}
+	// logger.Info("try to run migrations with migrate.source")
+	// return repository.DoMigrateFromSource()
+}
diff --git a/dbm-services/common/db-config/cmd/encryptcli/main.go b/dbm-services/common/db-config/cmd/encryptcli/main.go
new file mode 100644
index 0000000000..ae4f68e098
--- /dev/null
+++ b/dbm-services/common/db-config/cmd/encryptcli/main.go
@@ -0,0 +1,2 @@
+// Package encryptcli tool
+package encryptcli
diff --git a/dbm-services/common/db-config/conf/config.yaml b/dbm-services/common/db-config/conf/config.yaml
new file mode 100644
index 0000000000..cec58b9b0e
--- /dev/null
+++ b/dbm-services/common/db-config/conf/config.yaml
@@ -0,0 +1,26 @@
+gormlog: true
+
+http:
+  listenAddress: 0.0.0.0:80
+
+db:
+  name: bk_dbconfig
+  addr: localhost:3306
+  username: xx
+  password: xxxx
+
+dbConnConf:
+  maxIdleConns: 10
+  maxOpenConns: 50
+  connMaxLifetime: 1
+
+swagger:
+  enableUI: true
+
+encrypt:
+  keyPrefix: ""
+
+migrate:
+  enable: true
+  source: "file://assets/migrations/"
+  force: 0
\ No newline at end of file
diff --git a/dbm-services/common/db-config/conf/logger.yaml b/dbm-services/common/db-config/conf/logger.yaml
new file mode 100644
index 0000000000..33114e0d92
--- /dev/null
+++ b/dbm-services/common/db-config/conf/logger.yaml
@@ -0,0 +1,18 @@
+---
+log:
+  # 可选: stdout, stderr, /path/to/log/file
+  output: logs/bkdbconfig_svr.log
+  # 可选: logfmt, json
+  formater: logfmt
+  # 可选: debug, info, warn, error, fatal, panic
+  level: info
+  # 100M
+  # 时间格式
+  timeformat: 2006-01-02T15:04:05.000Z07:00
+  maxsize: 100
+  # 保留备份日志文件数
+  maxbackups: 3
+  # 保留天数
+  maxage: 30
+  # 启动 level server
+  levelserver: false
\ No newline at end of file
diff --git a/dbm-services/common/db-config/docs/design/encrypt.md b/dbm-services/common/db-config/docs/design/encrypt.md
new file mode 100644
index 0000000000..4b2a894144
--- /dev/null
+++ b/dbm-services/common/db-config/docs/design/encrypt.md
@@ -0,0 +1,38 @@
+# 密码加密
+加密
+```
+curl --location --request POST 'http://bkdbm-dbconfig/bkconfig/v1/conffile/update' \
+-d '{   
+    "req_type": "SaveAndPublish",
+    "conf_names": [
+        {
+            "conf_name": "xx",
+            "value_default": "xxx",
+            "op_type": "update",
+            "value_type":"STRING"
+        }
+    ],
+    "confirm": 0,
+    "conf_file_info": {
+        "namespace": "aaa",
+        "conf_type": "bbb",
+        "conf_file": "ccc"
+    }
+}'
+```
+
+查询:
+```
+curl --location --request POST 'http://bkdbm-dbconfig/bkconfig/v1/confitem/query' \
+--header 'Content-Type: application/json' \
+-d '{
+    "bk_biz_id": "0",
+    "level_name": "plat",
+    "level_value": "0",
+    "conf_file": "ccc",
+    "conf_type": "bbb",
+    "namespace": "aaa",
+    "format": "map",
+    "conf_name": "xx"
+}'
+```
\ No newline at end of file
diff --git a/dbm-services/common/db-config/docs/design/examples.md b/dbm-services/common/db-config/docs/design/examples.md
new file mode 100644
index 0000000000..ed732961c6
--- /dev/null
+++ b/dbm-services/common/db-config/docs/design/examples.md
@@ -0,0 +1,204 @@
+## 公共配置文件
+
+**获取**
+
+**保存并发布**
+
+## my.cnf 配置项
+
+**获取**
+
+
+**保存并发布**
+
+## 部署配置
+**获取**
+```
+{
+    "code": 0,
+    "message": "",
+    "data": [
+        {
+            "bk_biz_id": "testapp",
+            "level_name": "cluster",
+            "level_value": "act3",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "deploy",
+                "conf_file": "tb_app_info",
+                "conf_type_lc": "部署配置",
+                "conf_file_lc": "",
+                "namespace_info": "",
+                "description": "",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "charset": "utf8mb4",
+                "major_version": "mysql-5.6",
+                "mycnf_template": "my.cnf#5.6",
+                "storage_engine": "innodb"
+            }
+        }
+    ]
+}
+```
+
+response:
+```
+{
+    "code": 0,
+    "message": "",
+    "data": [
+        {
+            "bk_biz_id": "testapp",
+            "level_name": "module",
+            "level_value": "act",
+            "conf_type": "deploy",
+            "conf_file": "tb_app_info",
+            "content": {
+                "charset": "utf8mb4",
+                "major_version": "mysql-5.6"
+            }
+        }
+    ]
+}
+```
+
+## 初始权限配置
+**获取**
+```
+curl --location --request POST 'http://localhost:8080/bkconfig/v1/confitem/query' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+    "bk_biz_id":"testapp",
+    "level_name":"pub",
+    "level_value":"0",
+    "conf_file":"mysql#user,proxy#user",
+    "conf_type":"init_user",
+    "namespace":"tendbha",
+    "format":"map"
+}'
+```
+
+response:
+```
+{
+    "code": 0,
+    "message": "",
+    "data": [
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "init_user",
+                "conf_file": "mysql#user",
+                "conf_type_lc": "",
+                "conf_file_lc": "初始化用户",
+                "namespace_info": "",
+                "description": "我是描述",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "admin_pwd": "xx",
+                "admin_user": "xx"
+            }
+        },
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "init_user",
+                "conf_file": "proxy#user",
+            },
+            "content": {
+                "proxy_admin_pwd": "xx",
+                "proxy_admin_user": "xx"
+            }
+        },
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "init_user",
+                "conf_file": "mysql_os#user",
+            },
+            "content": {
+                "os_mysql_pwd": "xx",
+                "os_mysql_user": "xx"
+            }
+        }
+    ]
+}
+```
+
+## 监控配置
+**获取**
+```
+curl --location --request POST 'http://localhost:8080/bkconfig/v1/confitem/query' \
+--header 'Content-Type: application/json' \
+--data-raw '{
+    "bk_biz_id":"0",
+    "level_name":"plat",
+    "level_value":"0",
+    "conf_file":"db_monitor,global_status",
+    "conf_type":"MysqlMasterMonitor",
+    "namespace":"tendbha",
+    "format":"map"
+}'
+```
+
+response:
+```
+{
+    "code": 0,
+    "message": "",
+    "data": [
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlMasterMonitor",
+                "conf_file": "db_monitor",
+            },
+            "content": {
+                "conn_log": "{\"check\": \"YES\",\"expire_days\": \"1\",\"max_size\": \"2G\"}",
+                ...
+                "myisam_check": "{\"check\": \"YES\"}",
+                "unnormal_sql_check": "{\"accounts\": \"event_scheduler\",\"check\": \"YES\",\"timelimit\": \"18000\"}",
+                "warn_level.DB_DEADLOCK_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.DB_SQL_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                ...
+                "warn_swith": "{\"valve\": \"OPEN\"}"
+            }
+        },
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlMasterMonitor",
+                "conf_file": "global_status"
+            },
+            "content": {
+                "Aborted_clients": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                ...
+                "spin_waits": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1029\",\"valve\": \"100000000\"}"
+            }
+        }
+    ]
+}
+
+```
\ No newline at end of file
diff --git a/dbm-services/common/db-config/docs/design/readme.md b/dbm-services/common/db-config/docs/design/readme.md
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/common/db-config/docs/design/value_validate.md b/dbm-services/common/db-config/docs/design/value_validate.md
new file mode 100644
index 0000000000..a4ca3735fb
--- /dev/null
+++ b/dbm-services/common/db-config/docs/design/value_validate.md
@@ -0,0 +1,58 @@
+# 设置 value_default, value_allowed, value_type, value_type_sub
+
+## value_type
+value_type 是直接定义 value_default 字段的数据类型,支持以下基本数据类型:
+1. STRING:字符串
+2. INT: 数字类型,整型
+3. FLOAT: 浮点类型
+4. NUMBER:数字类型
+5. BOOL: 布尔类型
+
+### 数字类型 value_type=INT | FLOAT | NUMBER
+value_type_sub 可以为 ENUM、RANGE,为 空 时会自动检测 value_allowed 是 RANGE / ENUM
+
+### 布尔类型 value_type=BOOL
+value_type_sub 可以为 ENUM、FLAG,为 空 时等价于 ENUM。
+ - `ENUM` 通过 value_allowed 定义 true/false 字符串:  
+   - true: 允许 "1", "t", "T", "true", "TRUE", "True"
+   - false: 允许 "0", "f", "F", "false", "FALSE", "False"
+ - `FLAG` 表示这个 BOOL 值是通过 flag 配置项是否出现来决定 true / false
+  比如 `--skip-name-resolve`,不需要`--skip-name-resolve=on`。strict模式返回时会返回 `"skip-name-resolve":"flag"` 以做区分,
+
+### 字符串 value_type=STRING
+STRING 类型是最灵活的,value_type_sub 子类支持:
+ - `STRING`  
+  value_type_sub 为空时,与 STRING 等价,表示最普通的 STRING 类型,不会校验 value_allowed
+ - `ENUM`, `ENUMS`  
+  ENUMS 字符串类型的枚举, 如 binlog_format value_allowed = `ROW | STATEMENT | MIXED`,最终 conf_value 取值为其中一个
+  ENUMS 代表枚举值可以多个, 比如 sql_mode value_allowed = `ONLY_FULL_GROUP_BY | STRICT_TRANS_TABLES | NO_ENGINE_SUBSTITUTION | `, 最终 conf_value 可以取值 `ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES`, 这里 sql_mode 还允许选择 空值,客户端渲染的时候需要注意。
+ - `BYTES`  
+  字节单位类型,比如 `1b`, `2k`, `3m`, `4g`,可一个 `3 mb`, `4gb` 这种带b格式,1b 代表一字节,纯数字代表秒b。
+  BYTES 子类型,会自动判断 value_allowed 是 range 还是 enum,range格式 `[m, n]`, enum格式 `a |b | c`。
+  range 左右区间可以是带单位的,如 `[1024, 64m]`
+ - `DURATION`   
+   时间单位类型,比如 `1s`, `2m`, `3h`,一天 24h 的可以用 `1d`, `1w` 扩展过的 golang duration,纯数字代表秒s。
+   DURATION 子类型,会自动判断 value_allowed 是 range 还是 enum,range格式 `[m, n]`, enum格式 `a | b | c`。
+   range 左右区间可以是带单位的,如 `[3600, 24h]`
+ - `JSON`, `MAP`  
+  会验证 value 是否是一个合法的 json. MAP  与 JSON 的区别在于,MAP strict模式返回时会返回 json 而不是 string。
+  一般 value_allowed 为空
+ - `REGEX`   
+  会验证 value 是否满足 value_allowed 定义的正则,需要配合 value_allowed使用。
+  比如:value_type_sub=REGEX value_allowed=`^[a-z0-9._%+\-]+@[a-z0-9.\-]+\.[a-z]{2,4}$`,代表合法的email(当然用下面的 validate更简单)
+ - `GOVALIDATE`  
+   基于 https://github.com/go-playground/validator ,value_allowed 填写 validate 内容,比如 `email`, `json`, `ipv4`, `min=1,max=9`
+ - `LIST`  
+  不会做校验,只影响以 map strict 格式返回时,是否自动对值进行转换成 list。
+  比如 value_type=`STRING` value_type_sub=`LIST`,conf_value=`a, b,c`,返回时将会是`"conf_value": ["a", "b", "c"]`
+### 怎么选择合适的 value_type, value_type_sub
+- 例 1:  
+字节类型 max_allowed_packet 可以设置为 STRING BYTES `64m`,`[1, 1g]` ,也可以设置为 INT RANGE `67108864`,`[1, 1073741824]`,看实际需求
+
+- 例 2:  
+timeout_ms 这种已经带单位的,值一般是 INT,不使用 STRING DURATION。同理 disk_size_mb 应该是个 INT
+没带单位的,按照实际客户端程序可识别的配置进行设置。
+
+- 例 3:
+value_type=`STRING` value_type_sub=`ENUM` value_allowed=`true | false` 可以当做布尔来用
+效果相当于 value_type=`BOOL` value_type_sub=`ENUM`, 区别在于如果启用strict模式按照数据类型返回,STRING ENUM 返回`"xxx": "true"`,而 BOOL ENUM  返回`"xxx": true`
\ No newline at end of file
diff --git a/dbm-services/common/db-config/docs/docs.go b/dbm-services/common/db-config/docs/docs.go
new file mode 100644
index 0000000000..1e2963ecc8
--- /dev/null
+++ b/dbm-services/common/db-config/docs/docs.go
@@ -0,0 +1,1845 @@
+// Package docs GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+// This file was generated by swaggo/swag
+package docs
+
+import "github.com/swaggo/swag"
+
+const docTemplate = `{
+	"schemes": {{ marshal .Schemes }},
+	"swagger": "2.0",
+	"info": {
+		"description": "{{escape .Description}}",
+		"title": "{{.Title}}",
+		"termsOfService": "http://swagger.io/terms/",
+		"contact": {
+			"name": "API Support",
+			"url": "http://www.swagger.io/support",
+			"email": "support@swagger.io"
+		},
+		"license": {
+			"name": "Apache 2.0",
+			"url": "http://www.apache.org/licenses/LICENSE-2.0.html"
+		},
+		"version": "{{.Version}}"
+	},
+	"host": "{{.Host}}",
+	"basePath": "{{.BasePath}}",
+	"paths": {
+		"/bkconfig/v1/conffile/add": {
+			"post": {
+				"description": "新增平台级配置文件,定义允许的配置名。指定 req_type 为 ` + "`" + `SaveOnly` + "`" + ` 仅保存, ` + "`" + `SaveAndPublish` + "`" + ` 保存并发布。保存并发布 也必须提供全量,而不能是前面保存基础上的增量\nreq_type=` + "`" + `SaveOnly` + "`" + ` 已废弃\n第一次保存时,会返回 ` + "`" + `file_id` + "`" + `,下次 保存/发布 需传入 ` + "`" + `file_id` + "`" + `\nnamespace,conf_type,conf_file 唯一确定一个配置文件,不同DB版本信息体现在 conf_file 里 (如MySQL-5.7), namespace_info 可以存前端传入的 数据库版本,只用于在展示\nHTTP Header 指定 ` + "`" + `X-Bkapi-User-Name` + "`" + ` 请求的操作人员",
+				"consumes": [
+					"application/json"
+				],
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"plat_config"
+				],
+				"summary": "新增平台级配置文件",
+				"parameters": [
+					{
+						"description": "ConfName for ConfType",
+						"name": "body",
+						"in": "body",
+						"required": true,
+						"schema": {
+							"$ref": "#/definitions/api.UpsertConfFilePlatReq"
+						}
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/api.UpsertConfFilePlatResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/conffile/list": {
+			"get": {
+				"description": "查询配置文件模板列表。只有平台和业务才有配置文件列表\n返回的 updated_by 代表操作人",
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"plat_config"
+				],
+				"summary": "查询配置文件列表",
+				"parameters": [
+					{
+						"type": "string",
+						"description": "业务id, bk_biz_id=0 代表平台配置",
+						"name": "bk_biz_id",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"description": "如果指定了 conf_file 则只查这一个文件信息",
+						"name": "conf_file",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"example": "dbconf",
+						"name": "conf_type",
+						"in": "query",
+						"required": true
+					},
+					{
+						"enum": [
+							"plat",
+							"app",
+							"module",
+							"cluster"
+						],
+						"type": "string",
+						"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+						"name": "level_name",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"description": "配置层级值",
+						"name": "level_value",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"description": "命名空间,一般指DB类型",
+						"name": "namespace",
+						"in": "query",
+						"required": true
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"type": "array",
+							"items": {
+								"$ref": "#/definitions/api.ListConfFileResp"
+							}
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/conffile/query": {
+			"get": {
+				"description": "查询 平台配置 某个配置类型/配置文件的所有配置名列表",
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"plat_config"
+				],
+				"summary": "查询平台配置项列表",
+				"parameters": [
+					{
+						"type": "string",
+						"example": "MySQL-5.7",
+						"name": "conf_file",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"description": "如果设置,会根据前缀模糊匹配搜索",
+						"name": "conf_name",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"example": "dbconf",
+						"name": "conf_type",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"example": "tendbha",
+						"name": "namespace",
+						"in": "query"
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/QueryConfigNamesResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/conffile/update": {
+			"post": {
+				"description": "编辑平台级配置文件。指定 req_type 为 ` + "`" + `SaveOnly` + "`" + ` 仅保存, ` + "`" + `SaveAndPublish` + "`" + ` 保存并发布\nHTTP Header 指定 ` + "`" + `X-Bkapi-User-Name` + "`" + ` 请求的操作人员\n编辑平台配置时,如果设置 flag_disable=1 时,该配置不会显示在平台配置项列表,相当于管理 所有允许的配置项列表\n保存时会校验输入的 value_default, value_type, value_allowed\n1. value_type 目前允许 STRING, INT, FLOAT, NUMBER\n2. value_type_sub 允许 ENUM, ENUMS, RANGE, STRING, JSON, REGEX(一种特殊的STRING,会验证 value_default 是否满足 value_allowed 正则)\n3. value_allowed 允许 枚举: 例如` + "`" + `0|1|2` + "`" + `, ` + "`" + `ON|OFF` + "`" + ` 格式, 范围: 例如` + "`" + `(0, 1000]` + "`" + `",
+				"consumes": [
+					"application/json"
+				],
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"plat_config"
+				],
+				"summary": "编辑平台级配置文件",
+				"parameters": [
+					{
+						"description": "ConfName for ConfType",
+						"name": "body",
+						"in": "body",
+						"required": true,
+						"schema": {
+							"$ref": "#/definitions/api.UpsertConfFilePlatReq"
+						}
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/api.UpsertConfFilePlatResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/confitem/query": {
+			"post": {
+				"description": "根据业务/模块/集群的信息,获取某个配置文件的配置项。一般用户前端请求、再编辑的场景,后端服务直接获取配置文件使用 /version/generate 接口\nconf_file 可以是,号分隔的多个文件名,返回结果是一个按照配置文件名组合的一个 list\n需要指定返回格式 format, 可选值 map, list.\nmap 格式会丢弃 conf_item 的其它信息,只保留 conf_name=conf_value, 一般用于后台服务\nlist 格式会保留 conf_items 的其它信息,conf_name=conf_item,一般用于前端展示\n获取cluster级别配置时,需要提供 level_info:{\"module\":\"xxx\"} 模块信息",
+				"consumes": [
+					"application/json"
+				],
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_item"
+				],
+				"summary": "获取配置文件配置项列表",
+				"parameters": [
+					{
+						"description": "GetConfigItemsReq",
+						"name": "body",
+						"in": "body",
+						"required": true,
+						"schema": {
+							"$ref": "#/definitions/GetConfigItemsReq"
+						}
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"type": "array",
+							"items": {
+								"$ref": "#/definitions/GetConfigItemsResp"
+							}
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/confitem/save": {
+			"post": {
+				"description": "编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分\n针对编辑的配置类型 conf_type 无版本化的概念,即保存生效,无需发布\n保存 cluster级别配置时,需要提供 level_info:{\"module\":\"xxx\"} 模块信息",
+				"consumes": [
+					"application/json"
+				],
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_item"
+				],
+				"summary": "编辑配置(无版本概念)",
+				"parameters": [
+					{
+						"description": "SaveConfItemsReq",
+						"name": "body",
+						"in": "body",
+						"required": true,
+						"schema": {
+							"$ref": "#/definitions/api.SaveConfItemsReq"
+						}
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/api.UpsertConfItemsResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/confitem/upsert": {
+			"post": {
+				"description": "编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分\n例1: level_name=app, level_value=testapp 表示修改业务 bk_biz_id=testapp 的配置\n例2: level_name=module, level_value=account 表示某业务 bk_biz_id 的模块 module=account 的配置\nHTTP Header 指定 ` + "`" + `X-Bkapi-User-Name` + "`" + ` 请求的操作人员\n获取cluster级别配置时,需要提供 level_info:{\"module\":\"xxx\"} 模块信息",
+				"consumes": [
+					"application/json"
+				],
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_item"
+				],
+				"summary": "编辑发布层级配置",
+				"parameters": [
+					{
+						"description": "UpsertConfItemsReq",
+						"name": "body",
+						"in": "body",
+						"required": true,
+						"schema": {
+							"$ref": "#/definitions/api.UpsertConfItemsReq"
+						}
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/api.UpsertConfItemsResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/confname/list": {
+			"get": {
+				"description": "查询某个配置类型/配置文件的配置名列表,会排除 已锁定的平台配置",
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_meta"
+				],
+				"summary": "查询预定义的配置名列表",
+				"parameters": [
+					{
+						"type": "string",
+						"example": "MySQL-5.7",
+						"name": "conf_file",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"description": "如果设置,会根据前缀模糊匹配搜索",
+						"name": "conf_name",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"example": "dbconf",
+						"name": "conf_type",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"example": "tendbha",
+						"name": "namespace",
+						"in": "query"
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/QueryConfigNamesResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/simpleitem/list": {
+			"get": {
+				"description": "请勿使用",
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"simple_item"
+				],
+				"summary": "查询配置项列表通用接口",
+				"parameters": [
+					{
+						"type": "string",
+						"name": "bk_biz_id",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "cluster",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "conf_file",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "conf_name",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "conf_type",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "conf_value",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "created_at",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "created_by",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "description",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "format",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "inherit_from",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "level_name",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "level_value",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "module",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "namespace",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "revision",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "updated_at",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "updated_by",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"name": "view",
+						"in": "query"
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"type": "array",
+							"items": {
+								"$ref": "#/definitions/model.ConfigModel"
+							}
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/version/detail": {
+			"get": {
+				"description": "查询历史配置版本的详情",
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_version"
+				],
+				"summary": "查询版本的详细信息",
+				"parameters": [
+					{
+						"type": "string",
+						"example": "testapp",
+						"description": "业务ID,必选项",
+						"name": "bk_biz_id",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"example": "MySQL-5.7",
+						"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+						"name": "conf_file",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"example": "dbconf",
+						"description": "配置类型,如 dbconf,backup",
+						"name": "conf_type",
+						"in": "query",
+						"required": true
+					},
+					{
+						"enum": [
+							"plat",
+							"app",
+							"module",
+							"cluster"
+						],
+						"type": "string",
+						"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+						"name": "level_name",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"description": "配置层级值",
+						"name": "level_value",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"example": "tendbha",
+						"description": "命名空间,一般指DB类型",
+						"name": "namespace",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"example": "v_20220309215824",
+						"name": "revision",
+						"in": "query"
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/api.GetVersionedDetailResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/version/generate": {
+			"post": {
+				"description": "从现有配置项直接生成配置文件并返回,每次调用会生成一个新版本,可以选择是否直接发布。这个接口一般用户后台服务查询配置\n修改配置并发布,使用 /confitem/upsert 接口\n直接查询配置文件内容,使用 /confitem/query 接口\n根据 ` + "`" + `method` + "`" + ` 生成方式不同,可以生成配置并存储 ` + "`" + `GenerateAndSave` + "`" + `、生成配置并存储且发布` + "`" + `GenerateAndPublish` + "`" + `\n使用 ` + "`" + `GenerateAndSave` + "`" + ` 方式需要进一步调用 PublishConfigFile 接口进行发布",
+				"consumes": [
+					"application/json"
+				],
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_version"
+				],
+				"summary": "生成并获取配置文件新版本",
+				"parameters": [
+					{
+						"description": "Generate config file versioned",
+						"name": "body",
+						"in": "body",
+						"required": true,
+						"schema": {
+							"$ref": "#/definitions/GenerateConfigReq"
+						}
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/GenerateConfigResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/version/list": {
+			"get": {
+				"description": "Get config file versions list",
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_version"
+				],
+				"summary": "查询历史配置版本名列表",
+				"parameters": [
+					{
+						"type": "string",
+						"example": "testapp",
+						"description": "业务ID,必选项",
+						"name": "bk_biz_id",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"example": "MySQL-5.7",
+						"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+						"name": "conf_file",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"example": "dbconf",
+						"description": "配置类型,如 dbconf,backup",
+						"name": "conf_type",
+						"in": "query",
+						"required": true
+					},
+					{
+						"enum": [
+							"plat",
+							"app",
+							"module",
+							"cluster"
+						],
+						"type": "string",
+						"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+						"name": "level_name",
+						"in": "query",
+						"required": true
+					},
+					{
+						"type": "string",
+						"description": "配置层级值",
+						"name": "level_value",
+						"in": "query"
+					},
+					{
+						"type": "string",
+						"example": "tendbha",
+						"description": "命名空间,一般指DB类型",
+						"name": "namespace",
+						"in": "query",
+						"required": true
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/ListConfigVersionsResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		},
+		"/bkconfig/v1/version/publish": {
+			"post": {
+				"description": "发布指定版本的配置文件,未发布状态的配置文件是不能使用的\n发布操作会把已有 published 状态的配置文件下线;同一个 revision 版本的配置无法重复发布\n发布时带上 patch 参数可以覆盖配置中心该版本的配置项(只有配置项值是` + "`" + `{{` + "`" + `开头的才能被覆盖)",
+				"consumes": [
+					"application/json"
+				],
+				"produces": [
+					"application/json"
+				],
+				"tags": [
+					"config_version"
+				],
+				"summary": "直接发布一个版本[废弃]",
+				"parameters": [
+					{
+						"description": "Publish config file versioned",
+						"name": "body",
+						"in": "body",
+						"required": true,
+						"schema": {
+							"$ref": "#/definitions/PublishConfigFileReq"
+						}
+					}
+				],
+				"responses": {
+					"200": {
+						"description": "OK",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPOkNilResp"
+						}
+					},
+					"400": {
+						"description": "Bad Request",
+						"schema": {
+							"$ref": "#/definitions/api.HTTPClientErrResp"
+						}
+					}
+				}
+			}
+		}
+	},
+	"definitions": {
+		"GenerateConfigReq": {
+			"type": "object",
+			"required": [
+				"bk_biz_id",
+				"conf_file",
+				"conf_type",
+				"format",
+				"level_name",
+				"method",
+				"namespace"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"description": "业务ID,必选项",
+					"type": "string",
+					"example": "testapp"
+				},
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"format": {
+					"description": "` + "`" + `map.` + "`" + `, ` + "`" + `map#` + "`" + `, ` + "`" + `map|` + "`" + ` 是特殊的map格式,返回结果会以 . 或者 # 或者 | 拆分 conf_name",
+					"type": "string",
+					"enum": [
+						"list",
+						"map",
+						"map.",
+						"map#",
+						"map|"
+					]
+				},
+				"level_info": {
+					"description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+					"type": "object",
+					"additionalProperties": {
+						"type": "string"
+					}
+				},
+				"level_name": {
+					"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+					"type": "string",
+					"enum": [
+						"plat",
+						"app",
+						"module",
+						"cluster"
+					]
+				},
+				"level_value": {
+					"description": "配置层级值",
+					"type": "string"
+				},
+				"method": {
+					"description": "method must be one of GenerateOnly|GenerateAndSave|GenerateAndPublish\n` + "`" + `GenerateOnly` + "`" + `: generate merged config\n` + "`" + `GenerateAndSave` + "`" + `: generate and save the merged config to db (snapshot).\n` + "`" + `GenerateAndPublish` + "`" + `: generate and save the merged config to db, and mark it as published (release)",
+					"type": "string",
+					"enum": [
+						"GenerateAndSave",
+						"GenerateAndPublish"
+					]
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				}
+			}
+		},
+		"GenerateConfigResp": {
+			"type": "object",
+			"required": [
+				"level_name"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"type": "string"
+				},
+				"conf_file": {
+					"type": "string"
+				},
+				"content": {
+					"description": "content is a {conf_name:conf_type} dict like {\"a\":1, \"b\":\"string\"}",
+					"type": "object",
+					"additionalProperties": true
+				},
+				"level_name": {
+					"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+					"type": "string",
+					"enum": [
+						"plat",
+						"app",
+						"module",
+						"cluster"
+					]
+				},
+				"level_value": {
+					"description": "配置层级值",
+					"type": "string"
+				},
+				"revision": {
+					"description": "version name for this config_file generation",
+					"type": "string"
+				}
+			}
+		},
+		"GetConfigItemsReq": {
+			"type": "object",
+			"required": [
+				"bk_biz_id",
+				"conf_file",
+				"conf_type",
+				"format",
+				"level_name",
+				"namespace"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"description": "业务ID,必选项",
+					"type": "string",
+					"example": "testapp"
+				},
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_name": {
+					"description": "指定要查询的 conf_name, 多个值以,分隔,为空表示查询该 conf_file 的所有conf_name",
+					"type": "string"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"format": {
+					"description": "` + "`" + `map.` + "`" + `, ` + "`" + `map#` + "`" + `, ` + "`" + `map|` + "`" + ` 是特殊的map格式,返回结果会以 . 或者 # 或者 | 拆分 conf_name",
+					"type": "string",
+					"enum": [
+						"list",
+						"map",
+						"map.",
+						"map#",
+						"map|"
+					]
+				},
+				"level_info": {
+					"description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+					"type": "object",
+					"additionalProperties": {
+						"type": "string"
+					}
+				},
+				"level_name": {
+					"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+					"type": "string",
+					"enum": [
+						"plat",
+						"app",
+						"module",
+						"cluster"
+					]
+				},
+				"level_value": {
+					"description": "配置层级值",
+					"type": "string"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				}
+			}
+		},
+		"GetConfigItemsResp": {
+			"type": "object",
+			"required": [
+				"conf_file",
+				"conf_type",
+				"level_name",
+				"namespace"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"type": "string"
+				},
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_file_lc": {
+					"description": "配置文件中文名,也可以是其它 locale 语言类型",
+					"type": "string",
+					"example": "5.7_参数配置"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"conf_type_lc": {
+					"description": "配置类型中文名",
+					"type": "string",
+					"example": "DB参数配置"
+				},
+				"content": {
+					"description": "content is a {conf_name:conf_type} dict like {\"a\":1, \"b\":\"string\"}",
+					"type": "object",
+					"additionalProperties": true
+				},
+				"created_at": {
+					"type": "string"
+				},
+				"description": {
+					"description": "配置文件的描述",
+					"type": "string"
+				},
+				"level_name": {
+					"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+					"type": "string",
+					"enum": [
+						"plat",
+						"app",
+						"module",
+						"cluster"
+					]
+				},
+				"level_value": {
+					"description": "配置层级值",
+					"type": "string"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				},
+				"namespace_info": {
+					"description": "namespace信息,比如数据库版本,与 conf_file 对应",
+					"type": "string",
+					"example": "MySQL 5.7"
+				},
+				"updated_at": {
+					"type": "string"
+				},
+				"updated_by": {
+					"type": "string"
+				}
+			}
+		},
+		"ListConfigVersionsResp": {
+			"type": "object",
+			"required": [
+				"level_name"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"type": "string"
+				},
+				"conf_file": {
+					"type": "string"
+				},
+				"level_name": {
+					"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+					"type": "string",
+					"enum": [
+						"plat",
+						"app",
+						"module",
+						"cluster"
+					]
+				},
+				"level_value": {
+					"description": "配置层级值",
+					"type": "string"
+				},
+				"namespace": {
+					"type": "string"
+				},
+				"published": {
+					"description": "version published. empty when published version is not in versions",
+					"type": "string"
+				},
+				"versions": {
+					"description": "版本列表,格式 [{\"revision\":\"v1\", \"rows_affected\":1},{\"revision\":\"v2\", \"rows_affected\":2}]",
+					"type": "array",
+					"items": {
+						"type": "object",
+						"additionalProperties": true
+					}
+				}
+			}
+		},
+		"PublishConfigFileReq": {
+			"type": "object",
+			"required": [
+				"bk_biz_id",
+				"conf_file",
+				"conf_type",
+				"namespace",
+				"revision"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"description": "业务ID,必选项",
+					"type": "string",
+					"example": "testapp"
+				},
+				"cluster": {
+					"type": "string"
+				},
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				},
+				"patch": {
+					"description": "patch will overwrite conf_value to versioned config_file. it's a key-value dict",
+					"type": "object",
+					"additionalProperties": {
+						"type": "string"
+					}
+				},
+				"revision": {
+					"description": "the version you want to publish",
+					"type": "string",
+					"example": "v_20220309161928"
+				}
+			}
+		},
+		"QueryConfigNamesResp": {
+			"type": "object",
+			"properties": {
+				"conf_file": {
+					"type": "string"
+				},
+				"conf_names": {
+					"type": "object",
+					"additionalProperties": {
+						"$ref": "#/definitions/api.ConfNameDef"
+					}
+				}
+			}
+		},
+		"api.ConfFileDef": {
+			"type": "object",
+			"required": [
+				"conf_file",
+				"conf_type",
+				"namespace"
+			],
+			"properties": {
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_file_lc": {
+					"description": "配置文件中文名,也可以是其它 locale 语言类型",
+					"type": "string",
+					"example": "5.7_参数配置"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"conf_type_lc": {
+					"description": "配置类型中文名",
+					"type": "string",
+					"example": "DB参数配置"
+				},
+				"description": {
+					"description": "配置文件的描述",
+					"type": "string"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				},
+				"namespace_info": {
+					"description": "namespace信息,比如数据库版本,与 conf_file 对应",
+					"type": "string",
+					"example": "MySQL 5.7"
+				}
+			}
+		},
+		"api.ConfNameDef": {
+			"type": "object",
+			"required": [
+				"conf_name",
+				"value_type"
+			],
+			"properties": {
+				"conf_name": {
+					"description": "配置项,也叫参数项",
+					"type": "string"
+				},
+				"conf_name_lc": {
+					"description": "配置项中文名,可不填",
+					"type": "string"
+				},
+				"description": {
+					"description": "配置项说明",
+					"type": "string"
+				},
+				"flag_disable": {
+					"description": "是否禁用,代表该配置项状态. 默认0启用",
+					"type": "integer",
+					"example": 0
+				},
+				"flag_locked": {
+					"description": "是否锁定. 默认0",
+					"type": "integer",
+					"example": 0
+				},
+				"need_restart": {
+					"description": "是否需要重启生效. 默认1",
+					"type": "integer",
+					"example": 1
+				},
+				"value_allowed": {
+					"description": "允许设定值,如枚举/范围等,为空时表示不限制范围\n当 value_type_sub=ENUM 时,value_allowed 格式 0|1 或者 ON|OFF 或者 aaa|bbb|ccc , 会校验value的合法性\n当 value_type_sub=REGEX 时,会根据 value_allowed 进行正则校验\n当 value_type_sub=RANGE 时,也会校验value 范围的合法性.\n - BYTES 是一种特殊的RANGE,value允许1mm 但value_allowed 必须是数字的range",
+					"type": "string"
+				},
+				"value_default": {
+					"description": "配置项默认值",
+					"type": "string",
+					"example": "1"
+				},
+				"value_type": {
+					"description": "配置项的值类型,如 ` + "`" + `STRING` + "`" + `,` + "`" + `INT` + "`" + `,` + "`" + `FLOAT` + "`" + `,` + "`" + `NUMBER` + "`" + `",
+					"type": "string",
+					"enum": [
+						"STRING",
+						"INT",
+						"FLOAT",
+						"NUMBER"
+					],
+					"example": "STRING"
+				},
+				"value_type_sub": {
+					"description": "value_type 的子类型,如果设置则用于校验 value_type 的具体类型,或者返回用于告知前端控件类型,例如 ENUM,RANGE",
+					"type": "string",
+					"enum": [
+						"",
+						"STRING",
+						"ENUM",
+						"RANGE",
+						"BYTES",
+						"REGEX",
+						"JSON",
+						"COMPLEX"
+					],
+					"example": "ENUM"
+				}
+			}
+		},
+		"api.GetVersionedDetailResp": {
+			"type": "object",
+			"properties": {
+				"configs": {
+					"description": "配置项",
+					"type": "object",
+					"additionalProperties": true
+				},
+				"configs_diff": {
+					"description": "与上一个版本的差异",
+					"type": "object",
+					"additionalProperties": true
+				},
+				"content": {
+					"type": "string"
+				},
+				"created_at": {
+					"description": "发布时间",
+					"type": "string"
+				},
+				"created_by": {
+					"description": "发布人",
+					"type": "string"
+				},
+				"description": {
+					"type": "string"
+				},
+				"id": {
+					"type": "integer"
+				},
+				"is_published": {
+					"type": "integer"
+				},
+				"pre_revision": {
+					"description": "上一个版本好",
+					"type": "string"
+				},
+				"revision": {
+					"description": "版本号",
+					"type": "string"
+				},
+				"rows_affected": {
+					"description": "相对上一个版本 影响行数",
+					"type": "integer"
+				}
+			}
+		},
+		"api.HTTPClientErrResp": {
+			"type": "object",
+			"properties": {
+				"code": {
+					"type": "integer",
+					"example": 400
+				},
+				"data": {},
+				"message": {
+					"description": "status bad request",
+					"type": "string",
+					"example": "输入参数错误"
+				}
+			}
+		},
+		"api.HTTPOkNilResp": {
+			"type": "object",
+			"properties": {
+				"code": {
+					"type": "integer",
+					"example": 200
+				},
+				"data": {},
+				"message": {
+					"type": "string"
+				}
+			}
+		},
+		"api.ListConfFileResp": {
+			"type": "object",
+			"required": [
+				"conf_file",
+				"conf_type",
+				"namespace"
+			],
+			"properties": {
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_file_lc": {
+					"description": "配置文件中文名,也可以是其它 locale 语言类型",
+					"type": "string",
+					"example": "5.7_参数配置"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"conf_type_lc": {
+					"description": "配置类型中文名",
+					"type": "string",
+					"example": "DB参数配置"
+				},
+				"created_at": {
+					"description": "创建时间",
+					"type": "string"
+				},
+				"description": {
+					"description": "配置文件的描述",
+					"type": "string"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				},
+				"namespace_info": {
+					"description": "namespace信息,比如数据库版本,与 conf_file 对应",
+					"type": "string",
+					"example": "MySQL 5.7"
+				},
+				"updated_at": {
+					"description": "更新时间",
+					"type": "string"
+				},
+				"updated_by": {
+					"description": "更新人",
+					"type": "string"
+				}
+			}
+		},
+		"api.SaveConfItemsReq": {
+			"type": "object",
+			"required": [
+				"bk_biz_id",
+				"level_name"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"description": "业务ID,必选项",
+					"type": "string",
+					"example": "testapp"
+				},
+				"conf_file_info": {
+					"$ref": "#/definitions/api.ConfFileDef"
+				},
+				"conf_items": {
+					"type": "array",
+					"items": {
+						"$ref": "#/definitions/api.UpsertConfItem"
+					}
+				},
+				"confirm": {
+					"description": "保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求",
+					"type": "integer"
+				},
+				"description": {
+					"description": "发布描述",
+					"type": "string"
+				},
+				"level_info": {
+					"description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+					"type": "object",
+					"additionalProperties": {
+						"type": "string"
+					}
+				},
+				"level_name": {
+					"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+					"type": "string",
+					"enum": [
+						"plat",
+						"app",
+						"module",
+						"cluster"
+					]
+				},
+				"level_value": {
+					"description": "配置层级值",
+					"type": "string"
+				}
+			}
+		},
+		"api.UpsertConfFilePlatReq": {
+			"type": "object",
+			"required": [
+				"conf_file",
+				"conf_type",
+				"namespace",
+				"req_type"
+			],
+			"properties": {
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_file_lc": {
+					"description": "配置文件中文名,也可以是其它 locale 语言类型",
+					"type": "string",
+					"example": "5.7_参数配置"
+				},
+				"conf_names": {
+					"description": "如果revision为空,表示第一次保存。每次 update 操作都会返回 revision,确保在这一轮编辑操作下都是操作这个revision\n已发布的 revision 不能编辑\nRevision string ` + "`" + `json:\"revision\" form:\"revision\"` + "`" + `",
+					"type": "array",
+					"items": {
+						"$ref": "#/definitions/api.UpsertConfNames"
+					}
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"conf_type_lc": {
+					"description": "配置类型中文名",
+					"type": "string",
+					"example": "DB参数配置"
+				},
+				"confirm": {
+					"description": "保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求",
+					"type": "integer"
+				},
+				"description": {
+					"description": "配置文件的描述",
+					"type": "string"
+				},
+				"file_id": {
+					"description": "新建配置文件,第一次保存返回 file_id, 后续保存/发布 需传入 file_id",
+					"type": "integer"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				},
+				"namespace_info": {
+					"description": "namespace信息,比如数据库版本,与 conf_file 对应",
+					"type": "string",
+					"example": "MySQL 5.7"
+				},
+				"req_type": {
+					"description": "配置文件修改动作的请求类型,` + "`" + `SaveOnly` + "`" + `: 仅保存, ` + "`" + `SaveAndPublish` + "`" + `保存并发布",
+					"type": "string",
+					"enum": [
+						"SaveOnly",
+						"SaveAndPublish"
+					]
+				}
+			}
+		},
+		"api.UpsertConfFilePlatResp": {
+			"type": "object",
+			"required": [
+				"conf_file",
+				"conf_type",
+				"namespace"
+			],
+			"properties": {
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"file_id": {
+					"type": "integer"
+				},
+				"is_published": {
+					"type": "integer"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				},
+				"revision": {
+					"description": "编辑配置文件,仅保存时不会产生 revision,保存并发布时才返回",
+					"type": "string"
+				}
+			}
+		},
+		"api.UpsertConfItem": {
+			"type": "object",
+			"required": [
+				"conf_name",
+				"op_type"
+			],
+			"properties": {
+				"conf_name": {
+					"description": "配置项名称",
+					"type": "string"
+				},
+				"conf_value": {
+					"type": "string"
+				},
+				"description": {
+					"type": "string"
+				},
+				"extra_info": {
+					"type": "string"
+				},
+				"flag_disable": {
+					"description": "是否禁用,默认 0 表示启用. 1表示禁用",
+					"type": "integer",
+					"example": 0
+				},
+				"flag_locked": {
+					"description": "是否锁定,默认 0 表上不锁定",
+					"type": "integer",
+					"example": 0
+				},
+				"op_type": {
+					"description": "配置项修改动作,需提供操作类型字段,允许值 ` + "`" + `add` + "`" + `,` + "`" + `update` + "`" + `,` + "`" + `remove` + "`" + `",
+					"type": "string",
+					"enum": [
+						"add",
+						"update",
+						"remove"
+					]
+				}
+			}
+		},
+		"api.UpsertConfItemsReq": {
+			"type": "object",
+			"required": [
+				"bk_biz_id",
+				"level_name",
+				"req_type"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"description": "业务ID,必选项",
+					"type": "string",
+					"example": "testapp"
+				},
+				"conf_file_info": {
+					"$ref": "#/definitions/api.ConfFileDef"
+				},
+				"conf_items": {
+					"type": "array",
+					"items": {
+						"$ref": "#/definitions/api.UpsertConfItem"
+					}
+				},
+				"confirm": {
+					"description": "保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求",
+					"type": "integer"
+				},
+				"description": {
+					"description": "发布描述",
+					"type": "string"
+				},
+				"level_info": {
+					"description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+					"type": "object",
+					"additionalProperties": {
+						"type": "string"
+					}
+				},
+				"level_name": {
+					"description": "配置层级名,当前允许值 ` + "`" + `app` + "`" + `,` + "`" + `module` + "`" + `,` + "`" + `cluster` + "`" + `\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+					"type": "string",
+					"enum": [
+						"plat",
+						"app",
+						"module",
+						"cluster"
+					]
+				},
+				"level_value": {
+					"description": "配置层级值",
+					"type": "string"
+				},
+				"req_type": {
+					"description": "配置文件修改动作的请求类型,` + "`" + `SaveOnly` + "`" + `: 仅保存, ` + "`" + `SaveAndPublish` + "`" + `保存并发布",
+					"type": "string",
+					"enum": [
+						"SaveOnly",
+						"SaveAndPublish"
+					]
+				},
+				"revision": {
+					"type": "string"
+				}
+			}
+		},
+		"api.UpsertConfItemsResp": {
+			"type": "object",
+			"required": [
+				"conf_file",
+				"conf_type",
+				"namespace"
+			],
+			"properties": {
+				"bk_biz_id": {
+					"type": "string"
+				},
+				"conf_file": {
+					"description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+					"type": "string",
+					"example": "MySQL-5.7"
+				},
+				"conf_type": {
+					"description": "配置类型,如 dbconf,backup",
+					"type": "string",
+					"example": "dbconf"
+				},
+				"is_published": {
+					"type": "integer"
+				},
+				"namespace": {
+					"description": "命名空间,一般指DB类型",
+					"type": "string",
+					"example": "tendbha"
+				},
+				"revision": {
+					"description": "编辑配置文件,第一次保存返回 revision, 后续保存/发布 需传入 revision",
+					"type": "string"
+				}
+			}
+		},
+		"api.UpsertConfNames": {
+			"type": "object",
+			"required": [
+				"conf_name",
+				"op_type",
+				"value_type"
+			],
+			"properties": {
+				"conf_name": {
+					"description": "配置项,也叫参数项",
+					"type": "string"
+				},
+				"conf_name_lc": {
+					"description": "配置项中文名,可不填",
+					"type": "string"
+				},
+				"description": {
+					"description": "配置项说明",
+					"type": "string"
+				},
+				"flag_disable": {
+					"description": "是否禁用,代表该配置项状态. 默认0启用",
+					"type": "integer",
+					"example": 0
+				},
+				"flag_locked": {
+					"description": "是否锁定. 默认0",
+					"type": "integer",
+					"example": 0
+				},
+				"need_restart": {
+					"description": "是否需要重启生效. 默认1",
+					"type": "integer",
+					"example": 1
+				},
+				"op_type": {
+					"description": "配置项修改动作,需提供操作类型字段,允许值 ` + "`" + `add` + "`" + `,` + "`" + `update` + "`" + `,` + "`" + `remove` + "`" + `",
+					"type": "string",
+					"enum": [
+						"add",
+						"update",
+						"remove"
+					]
+				},
+				"value_allowed": {
+					"description": "允许设定值,如枚举/范围等,为空时表示不限制范围\n当 value_type_sub=ENUM 时,value_allowed 格式 0|1 或者 ON|OFF 或者 aaa|bbb|ccc , 会校验value的合法性\n当 value_type_sub=REGEX 时,会根据 value_allowed 进行正则校验\n当 value_type_sub=RANGE 时,也会校验value 范围的合法性.\n - BYTES 是一种特殊的RANGE,value允许1mm 但value_allowed 必须是数字的range",
+					"type": "string"
+				},
+				"value_default": {
+					"description": "配置项默认值",
+					"type": "string",
+					"example": "1"
+				},
+				"value_type": {
+					"description": "配置项的值类型,如 ` + "`" + `STRING` + "`" + `,` + "`" + `INT` + "`" + `,` + "`" + `FLOAT` + "`" + `,` + "`" + `NUMBER` + "`" + `",
+					"type": "string",
+					"enum": [
+						"STRING",
+						"INT",
+						"FLOAT",
+						"NUMBER"
+					],
+					"example": "STRING"
+				},
+				"value_type_sub": {
+					"description": "value_type 的子类型,如果设置则用于校验 value_type 的具体类型,或者返回用于告知前端控件类型,例如 ENUM,RANGE",
+					"type": "string",
+					"enum": [
+						"",
+						"STRING",
+						"ENUM",
+						"RANGE",
+						"BYTES",
+						"REGEX",
+						"JSON",
+						"COMPLEX"
+					],
+					"example": "ENUM"
+				}
+			}
+		},
+		"model.ConfigModel": {
+			"type": "object",
+			"properties": {
+				"bk_biz_id": {
+					"type": "string"
+				},
+				"conf_file": {
+					"type": "string"
+				},
+				"conf_name": {
+					"type": "string"
+				},
+				"conf_type": {
+					"type": "string"
+				},
+				"conf_value": {
+					"type": "string"
+				},
+				"created_at": {
+					"type": "string"
+				},
+				"description": {
+					"type": "string"
+				},
+				"extra_info": {
+					"type": "string"
+				},
+				"flag_disable": {
+					"type": "integer"
+				},
+				"flag_locked": {
+					"type": "integer"
+				},
+				"id": {
+					"type": "integer"
+				},
+				"level_locked": {
+					"type": "string"
+				},
+				"level_name": {
+					"type": "string"
+				},
+				"level_value": {
+					"type": "string"
+				},
+				"namespace": {
+					"type": "string"
+				},
+				"updated_at": {
+					"type": "string"
+				}
+			}
+		}
+	},
+	"securityDefinitions": {
+		"BasicAuth": {
+			"type": "basic"
+		}
+	}
+}`
+
+// SwaggerInfo holds exported Swagger Info so clients can modify it
+var SwaggerInfo = &swag.Spec{
+	Version:          "0.0.1",
+	Host:             "localhost:8080",
+	BasePath:         "/",
+	Schemes:          []string{"http"},
+	Title:            "bkconfigsvr API",
+	Description:      "This is a bkconfigsvr celler server.",
+	InfoInstanceName: "swagger",
+	SwaggerTemplate:  docTemplate,
+}
+
+func init() {
+	swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo)
+}
diff --git a/dbm-services/common/db-config/docs/embed_apidoc.go b/dbm-services/common/db-config/docs/embed_apidoc.go
new file mode 100644
index 0000000000..d500d39caa
--- /dev/null
+++ b/dbm-services/common/db-config/docs/embed_apidoc.go
@@ -0,0 +1,10 @@
+package docs
+
+import (
+	"embed"
+)
+
+// SwaggerDocs TODO
+//
+//go:embed swagger.json
+var SwaggerDocs embed.FS
diff --git a/dbm-services/common/db-config/docs/swagger.json b/dbm-services/common/db-config/docs/swagger.json
new file mode 100644
index 0000000000..0c0f0c7755
--- /dev/null
+++ b/dbm-services/common/db-config/docs/swagger.json
@@ -0,0 +1,1825 @@
+{
+    "schemes": [
+        "http"
+    ],
+    "swagger": "2.0",
+    "info": {
+        "description": "This is a bkconfigsvr celler server.",
+        "title": "bkconfigsvr API",
+        "termsOfService": "http://swagger.io/terms/",
+        "contact": {
+            "name": "API Support",
+            "url": "http://www.swagger.io/support",
+            "email": "support@swagger.io"
+        },
+        "license": {
+            "name": "Apache 2.0",
+            "url": "http://www.apache.org/licenses/LICENSE-2.0.html"
+        },
+        "version": "0.0.1"
+    },
+    "host": "localhost:8080",
+    "basePath": "/",
+    "paths": {
+        "/bkconfig/v1/conffile/add": {
+            "post": {
+                "description": "新增平台级配置文件,定义允许的配置名。指定 req_type 为 `SaveOnly` 仅保存, `SaveAndPublish` 保存并发布。保存并发布 也必须提供全量,而不能是前面保存基础上的增量\nreq_type=`SaveOnly` 已废弃\n第一次保存时,会返回 `file_id`,下次 保存/发布 需传入 `file_id`\nnamespace,conf_type,conf_file 唯一确定一个配置文件,不同DB版本信息体现在 conf_file 里 (如MySQL-5.7), namespace_info 可以存前端传入的 数据库版本,只用于在展示\nHTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "plat_config"
+                ],
+                "summary": "新增平台级配置文件",
+                "parameters": [
+                    {
+                        "description": "ConfName for ConfType",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/api.UpsertConfFilePlatReq"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/api.UpsertConfFilePlatResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/conffile/list": {
+            "get": {
+                "description": "查询配置文件模板列表。只有平台和业务才有配置文件列表\n返回的 updated_by 代表操作人",
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "plat_config"
+                ],
+                "summary": "查询配置文件列表",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "description": "业务id, bk_biz_id=0 代表平台配置",
+                        "name": "bk_biz_id",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "description": "如果指定了 conf_file 则只查这一个文件信息",
+                        "name": "conf_file",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "example": "dbconf",
+                        "name": "conf_type",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "enum": [
+                            "plat",
+                            "app",
+                            "module",
+                            "cluster"
+                        ],
+                        "type": "string",
+                        "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                        "name": "level_name",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "description": "配置层级值",
+                        "name": "level_value",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "description": "命名空间,一般指DB类型",
+                        "name": "namespace",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "array",
+                            "items": {
+                                "$ref": "#/definitions/api.ListConfFileResp"
+                            }
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/conffile/query": {
+            "get": {
+                "description": "查询 平台配置 某个配置类型/配置文件的所有配置名列表",
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "plat_config"
+                ],
+                "summary": "查询平台配置项列表",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "example": "MySQL-5.7",
+                        "name": "conf_file",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "description": "如果设置,会根据前缀模糊匹配搜索",
+                        "name": "conf_name",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "example": "dbconf",
+                        "name": "conf_type",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "example": "tendbha",
+                        "name": "namespace",
+                        "in": "query"
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/QueryConfigNamesResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/conffile/update": {
+            "post": {
+                "description": "编辑平台级配置文件。指定 req_type 为 `SaveOnly` 仅保存, `SaveAndPublish` 保存并发布\nHTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员\n编辑平台配置时,如果设置 flag_disable=1 时,该配置不会显示在平台配置项列表,相当于管理 所有允许的配置项列表\n保存时会校验输入的 value_default, value_type, value_allowed\n1. value_type 目前允许 STRING, INT, FLOAT, NUMBER\n2. value_type_sub 允许 ENUM, ENUMS, RANGE, STRING, JSON, REGEX(一种特殊的STRING,会验证 value_default 是否满足 value_allowed 正则)\n3. value_allowed 允许 枚举: 例如`0|1|2`, `ON|OFF` 格式, 范围: 例如`(0, 1000]`",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "plat_config"
+                ],
+                "summary": "编辑平台级配置文件",
+                "parameters": [
+                    {
+                        "description": "ConfName for ConfType",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/api.UpsertConfFilePlatReq"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/api.UpsertConfFilePlatResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/confitem/query": {
+            "post": {
+                "description": "根据业务/模块/集群的信息,获取某个配置文件的配置项。一般用户前端请求、再编辑的场景,后端服务直接获取配置文件使用 /version/generate 接口\nconf_file 可以是,号分隔的多个文件名,返回结果是一个按照配置文件名组合的一个 list\n需要指定返回格式 format, 可选值 map, list.\nmap 格式会丢弃 conf_item 的其它信息,只保留 conf_name=conf_value, 一般用于后台服务\nlist 格式会保留 conf_items 的其它信息,conf_name=conf_item,一般用于前端展示\n获取cluster级别配置时,需要提供 level_info:{\"module\":\"xxx\"} 模块信息",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_item"
+                ],
+                "summary": "获取配置文件配置项列表",
+                "parameters": [
+                    {
+                        "description": "GetConfigItemsReq",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/GetConfigItemsReq"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "array",
+                            "items": {
+                                "$ref": "#/definitions/GetConfigItemsResp"
+                            }
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/confitem/save": {
+            "post": {
+                "description": "编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分\n针对编辑的配置类型 conf_type 无版本化的概念,即保存生效,无需发布\n保存 cluster级别配置时,需要提供 level_info:{\"module\":\"xxx\"} 模块信息",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_item"
+                ],
+                "summary": "编辑配置(无版本概念)",
+                "parameters": [
+                    {
+                        "description": "SaveConfItemsReq",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/api.SaveConfItemsReq"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/api.UpsertConfItemsResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/confitem/upsert": {
+            "post": {
+                "description": "编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分\n例1: level_name=app, level_value=testapp 表示修改业务 bk_biz_id=testapp 的配置\n例2: level_name=module, level_value=account 表示某业务 bk_biz_id 的模块 module=account 的配置\nHTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员\n获取cluster级别配置时,需要提供 level_info:{\"module\":\"xxx\"} 模块信息",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_item"
+                ],
+                "summary": "编辑发布层级配置",
+                "parameters": [
+                    {
+                        "description": "UpsertConfItemsReq",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/api.UpsertConfItemsReq"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/api.UpsertConfItemsResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/confname/list": {
+            "get": {
+                "description": "查询某个配置类型/配置文件的配置名列表,会排除 已锁定的平台配置",
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_meta"
+                ],
+                "summary": "查询预定义的配置名列表",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "example": "MySQL-5.7",
+                        "name": "conf_file",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "description": "如果设置,会根据前缀模糊匹配搜索",
+                        "name": "conf_name",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "example": "dbconf",
+                        "name": "conf_type",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "example": "tendbha",
+                        "name": "namespace",
+                        "in": "query"
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/QueryConfigNamesResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/simpleitem/list": {
+            "get": {
+                "description": "请勿使用",
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "simple_item"
+                ],
+                "summary": "查询配置项列表通用接口",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "name": "bk_biz_id",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "cluster",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "conf_file",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "conf_name",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "conf_type",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "conf_value",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "created_at",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "created_by",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "description",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "format",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "inherit_from",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "level_name",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "level_value",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "module",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "namespace",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "revision",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "updated_at",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "updated_by",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "name": "view",
+                        "in": "query"
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "type": "array",
+                            "items": {
+                                "$ref": "#/definitions/model.ConfigModel"
+                            }
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/version/detail": {
+            "get": {
+                "description": "查询历史配置版本的详情",
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_version"
+                ],
+                "summary": "查询版本的详细信息",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "example": "testapp",
+                        "description": "业务ID,必选项",
+                        "name": "bk_biz_id",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "example": "MySQL-5.7",
+                        "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                        "name": "conf_file",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "example": "dbconf",
+                        "description": "配置类型,如 dbconf,backup",
+                        "name": "conf_type",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "enum": [
+                            "plat",
+                            "app",
+                            "module",
+                            "cluster"
+                        ],
+                        "type": "string",
+                        "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                        "name": "level_name",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "description": "配置层级值",
+                        "name": "level_value",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "example": "tendbha",
+                        "description": "命名空间,一般指DB类型",
+                        "name": "namespace",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "example": "v_20220309215824",
+                        "name": "revision",
+                        "in": "query"
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/api.GetVersionedDetailResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/version/generate": {
+            "post": {
+                "description": "从现有配置项直接生成配置文件并返回,每次调用会生成一个新版本,可以选择是否直接发布。这个接口一般用户后台服务查询配置\n修改配置并发布,使用 /confitem/upsert 接口\n直接查询配置文件内容,使用 /confitem/query 接口\n根据 `method` 生成方式不同,可以生成配置并存储 `GenerateAndSave`、生成配置并存储且发布`GenerateAndPublish`\n使用 `GenerateAndSave` 方式需要进一步调用 PublishConfigFile 接口进行发布",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_version"
+                ],
+                "summary": "生成并获取配置文件新版本",
+                "parameters": [
+                    {
+                        "description": "Generate config file versioned",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/GenerateConfigReq"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/GenerateConfigResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/version/list": {
+            "get": {
+                "description": "Get config file versions list",
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_version"
+                ],
+                "summary": "查询历史配置版本名列表",
+                "parameters": [
+                    {
+                        "type": "string",
+                        "example": "testapp",
+                        "description": "业务ID,必选项",
+                        "name": "bk_biz_id",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "example": "MySQL-5.7",
+                        "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                        "name": "conf_file",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "example": "dbconf",
+                        "description": "配置类型,如 dbconf,backup",
+                        "name": "conf_type",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "enum": [
+                            "plat",
+                            "app",
+                            "module",
+                            "cluster"
+                        ],
+                        "type": "string",
+                        "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                        "name": "level_name",
+                        "in": "query",
+                        "required": true
+                    },
+                    {
+                        "type": "string",
+                        "description": "配置层级值",
+                        "name": "level_value",
+                        "in": "query"
+                    },
+                    {
+                        "type": "string",
+                        "example": "tendbha",
+                        "description": "命名空间,一般指DB类型",
+                        "name": "namespace",
+                        "in": "query",
+                        "required": true
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/ListConfigVersionsResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/bkconfig/v1/version/publish": {
+            "post": {
+                "description": "发布指定版本的配置文件,未发布状态的配置文件是不能使用的\n发布操作会把已有 published 状态的配置文件下线;同一个 revision 版本的配置无法重复发布\n发布时带上 patch 参数可以覆盖配置中心该版本的配置项(只有配置项值是`{{`开头的才能被覆盖)",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "config_version"
+                ],
+                "summary": "直接发布一个版本[废弃]",
+                "parameters": [
+                    {
+                        "description": "Publish config file versioned",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/PublishConfigFileReq"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPOkNilResp"
+                        }
+                    },
+                    "400": {
+                        "description": "Bad Request",
+                        "schema": {
+                            "$ref": "#/definitions/api.HTTPClientErrResp"
+                        }
+                    }
+                }
+            }
+        }
+    },
+    "definitions": {
+        "GenerateConfigReq": {
+            "type": "object",
+            "required": [
+                "bk_biz_id",
+                "conf_file",
+                "conf_type",
+                "format",
+                "level_name",
+                "method",
+                "namespace"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "description": "业务ID,必选项",
+                    "type": "string",
+                    "example": "testapp"
+                },
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "format": {
+                    "description": "`map.`, `map#`, `map|` 是特殊的map格式,返回结果会以 . 或者 # 或者 | 拆分 conf_name",
+                    "type": "string",
+                    "enum": [
+                        "list",
+                        "map",
+                        "map.",
+                        "map#",
+                        "map|"
+                    ]
+                },
+                "level_info": {
+                    "description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "level_name": {
+                    "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                    "type": "string",
+                    "enum": [
+                        "plat",
+                        "app",
+                        "module",
+                        "cluster"
+                    ]
+                },
+                "level_value": {
+                    "description": "配置层级值",
+                    "type": "string"
+                },
+                "method": {
+                    "description": "method must be one of GenerateOnly|GenerateAndSave|GenerateAndPublish\n`GenerateOnly`: generate merged config\n`GenerateAndSave`: generate and save the merged config to db (snapshot).\n`GenerateAndPublish`: generate and save the merged config to db, and mark it as published (release)",
+                    "type": "string",
+                    "enum": [
+                        "GenerateAndSave",
+                        "GenerateAndPublish"
+                    ]
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                }
+            }
+        },
+        "GenerateConfigResp": {
+            "type": "object",
+            "required": [
+                "level_name"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "type": "string"
+                },
+                "conf_file": {
+                    "type": "string"
+                },
+                "content": {
+                    "description": "content is a {conf_name:conf_type} dict like {\"a\":1, \"b\":\"string\"}",
+                    "type": "object",
+                    "additionalProperties": true
+                },
+                "level_name": {
+                    "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                    "type": "string",
+                    "enum": [
+                        "plat",
+                        "app",
+                        "module",
+                        "cluster"
+                    ]
+                },
+                "level_value": {
+                    "description": "配置层级值",
+                    "type": "string"
+                },
+                "revision": {
+                    "description": "version name for this config_file generation",
+                    "type": "string"
+                }
+            }
+        },
+        "GetConfigItemsReq": {
+            "type": "object",
+            "required": [
+                "bk_biz_id",
+                "conf_file",
+                "conf_type",
+                "format",
+                "level_name",
+                "namespace"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "description": "业务ID,必选项",
+                    "type": "string",
+                    "example": "testapp"
+                },
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_name": {
+                    "description": "指定要查询的 conf_name, 多个值以,分隔,为空表示查询该 conf_file 的所有conf_name",
+                    "type": "string"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "format": {
+                    "description": "`map.`, `map#`, `map|` 是特殊的map格式,返回结果会以 . 或者 # 或者 | 拆分 conf_name",
+                    "type": "string",
+                    "enum": [
+                        "list",
+                        "map",
+                        "map.",
+                        "map#",
+                        "map|"
+                    ]
+                },
+                "level_info": {
+                    "description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "level_name": {
+                    "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                    "type": "string",
+                    "enum": [
+                        "plat",
+                        "app",
+                        "module",
+                        "cluster"
+                    ]
+                },
+                "level_value": {
+                    "description": "配置层级值",
+                    "type": "string"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                }
+            }
+        },
+        "GetConfigItemsResp": {
+            "type": "object",
+            "required": [
+                "conf_file",
+                "conf_type",
+                "level_name",
+                "namespace"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "type": "string"
+                },
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_file_lc": {
+                    "description": "配置文件中文名,也可以是其它 locale 语言类型",
+                    "type": "string",
+                    "example": "5.7_参数配置"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "conf_type_lc": {
+                    "description": "配置类型中文名",
+                    "type": "string",
+                    "example": "DB参数配置"
+                },
+                "content": {
+                    "description": "content is a {conf_name:conf_type} dict like {\"a\":1, \"b\":\"string\"}",
+                    "type": "object",
+                    "additionalProperties": true
+                },
+                "created_at": {
+                    "type": "string"
+                },
+                "description": {
+                    "description": "配置文件的描述",
+                    "type": "string"
+                },
+                "level_name": {
+                    "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                    "type": "string",
+                    "enum": [
+                        "plat",
+                        "app",
+                        "module",
+                        "cluster"
+                    ]
+                },
+                "level_value": {
+                    "description": "配置层级值",
+                    "type": "string"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                },
+                "namespace_info": {
+                    "description": "namespace信息,比如数据库版本,与 conf_file 对应",
+                    "type": "string",
+                    "example": "MySQL 5.7"
+                },
+                "updated_at": {
+                    "type": "string"
+                },
+                "updated_by": {
+                    "type": "string"
+                }
+            }
+        },
+        "ListConfigVersionsResp": {
+            "type": "object",
+            "required": [
+                "level_name"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "type": "string"
+                },
+                "conf_file": {
+                    "type": "string"
+                },
+                "level_name": {
+                    "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                    "type": "string",
+                    "enum": [
+                        "plat",
+                        "app",
+                        "module",
+                        "cluster"
+                    ]
+                },
+                "level_value": {
+                    "description": "配置层级值",
+                    "type": "string"
+                },
+                "namespace": {
+                    "type": "string"
+                },
+                "published": {
+                    "description": "version published. empty when published version is not in versions",
+                    "type": "string"
+                },
+                "versions": {
+                    "description": "版本列表,格式 [{\"revision\":\"v1\", \"rows_affected\":1},{\"revision\":\"v2\", \"rows_affected\":2}]",
+                    "type": "array",
+                    "items": {
+                        "type": "object",
+                        "additionalProperties": true
+                    }
+                }
+            }
+        },
+        "PublishConfigFileReq": {
+            "type": "object",
+            "required": [
+                "bk_biz_id",
+                "conf_file",
+                "conf_type",
+                "namespace",
+                "revision"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "description": "业务ID,必选项",
+                    "type": "string",
+                    "example": "testapp"
+                },
+                "cluster": {
+                    "type": "string"
+                },
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                },
+                "patch": {
+                    "description": "patch will overwrite conf_value to versioned config_file. it's a key-value dict",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "revision": {
+                    "description": "the version you want to publish",
+                    "type": "string",
+                    "example": "v_20220309161928"
+                }
+            }
+        },
+        "QueryConfigNamesResp": {
+            "type": "object",
+            "properties": {
+                "conf_file": {
+                    "type": "string"
+                },
+                "conf_names": {
+                    "type": "object",
+                    "additionalProperties": {
+                        "$ref": "#/definitions/api.ConfNameDef"
+                    }
+                }
+            }
+        },
+        "api.ConfFileDef": {
+            "type": "object",
+            "required": [
+                "conf_file",
+                "conf_type",
+                "namespace"
+            ],
+            "properties": {
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_file_lc": {
+                    "description": "配置文件中文名,也可以是其它 locale 语言类型",
+                    "type": "string",
+                    "example": "5.7_参数配置"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "conf_type_lc": {
+                    "description": "配置类型中文名",
+                    "type": "string",
+                    "example": "DB参数配置"
+                },
+                "description": {
+                    "description": "配置文件的描述",
+                    "type": "string"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                },
+                "namespace_info": {
+                    "description": "namespace信息,比如数据库版本,与 conf_file 对应",
+                    "type": "string",
+                    "example": "MySQL 5.7"
+                }
+            }
+        },
+        "api.ConfNameDef": {
+            "type": "object",
+            "required": [
+                "conf_name",
+                "value_type"
+            ],
+            "properties": {
+                "conf_name": {
+                    "description": "配置项,也叫参数项",
+                    "type": "string"
+                },
+                "conf_name_lc": {
+                    "description": "配置项中文名,可不填",
+                    "type": "string"
+                },
+                "description": {
+                    "description": "配置项说明",
+                    "type": "string"
+                },
+                "flag_disable": {
+                    "description": "是否禁用,代表该配置项状态. 默认0启用",
+                    "type": "integer",
+                    "example": 0
+                },
+                "flag_locked": {
+                    "description": "是否锁定. 默认0",
+                    "type": "integer",
+                    "example": 0
+                },
+                "need_restart": {
+                    "description": "是否需要重启生效. 默认1",
+                    "type": "integer",
+                    "example": 1
+                },
+                "value_allowed": {
+                    "description": "允许设定值,如枚举/范围等,为空时表示不限制范围\n当 value_type_sub=ENUM 时,value_allowed 格式 0|1 或者 ON|OFF 或者 aaa|bbb|ccc , 会校验value的合法性\n当 value_type_sub=REGEX 时,会根据 value_allowed 进行正则校验\n当 value_type_sub=RANGE 时,也会校验value 范围的合法性.\n - BYTES 是一种特殊的RANGE,value允许1mm 但value_allowed 必须是数字的range",
+                    "type": "string"
+                },
+                "value_default": {
+                    "description": "配置项默认值",
+                    "type": "string",
+                    "example": "1"
+                },
+                "value_type": {
+                    "description": "配置项的值类型,如 `STRING`,`INT`,`FLOAT`,`NUMBER`",
+                    "type": "string",
+                    "enum": [
+                        "STRING",
+                        "INT",
+                        "FLOAT",
+                        "NUMBER"
+                    ],
+                    "example": "STRING"
+                },
+                "value_type_sub": {
+                    "description": "value_type 的子类型,如果设置则用于校验 value_type 的具体类型,或者返回用于告知前端控件类型,例如 ENUM,RANGE",
+                    "type": "string",
+                    "enum": [
+                        "",
+                        "STRING",
+                        "ENUM",
+                        "RANGE",
+                        "BYTES",
+                        "REGEX",
+                        "JSON",
+                        "COMPLEX"
+                    ],
+                    "example": "ENUM"
+                }
+            }
+        },
+        "api.GetVersionedDetailResp": {
+            "type": "object",
+            "properties": {
+                "configs": {
+                    "description": "配置项",
+                    "type": "object",
+                    "additionalProperties": true
+                },
+                "configs_diff": {
+                    "description": "与上一个版本的差异",
+                    "type": "object",
+                    "additionalProperties": true
+                },
+                "content": {
+                    "type": "string"
+                },
+                "created_at": {
+                    "description": "发布时间",
+                    "type": "string"
+                },
+                "created_by": {
+                    "description": "发布人",
+                    "type": "string"
+                },
+                "description": {
+                    "type": "string"
+                },
+                "id": {
+                    "type": "integer"
+                },
+                "is_published": {
+                    "type": "integer"
+                },
+                "pre_revision": {
+                    "description": "上一个版本好",
+                    "type": "string"
+                },
+                "revision": {
+                    "description": "版本号",
+                    "type": "string"
+                },
+                "rows_affected": {
+                    "description": "相对上一个版本 影响行数",
+                    "type": "integer"
+                }
+            }
+        },
+        "api.HTTPClientErrResp": {
+            "type": "object",
+            "properties": {
+                "code": {
+                    "type": "integer",
+                    "example": 400
+                },
+                "data": {},
+                "message": {
+                    "description": "status bad request",
+                    "type": "string",
+                    "example": "输入参数错误"
+                }
+            }
+        },
+        "api.HTTPOkNilResp": {
+            "type": "object",
+            "properties": {
+                "code": {
+                    "type": "integer",
+                    "example": 200
+                },
+                "data": {},
+                "message": {
+                    "type": "string"
+                }
+            }
+        },
+        "api.ListConfFileResp": {
+            "type": "object",
+            "required": [
+                "conf_file",
+                "conf_type",
+                "namespace"
+            ],
+            "properties": {
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_file_lc": {
+                    "description": "配置文件中文名,也可以是其它 locale 语言类型",
+                    "type": "string",
+                    "example": "5.7_参数配置"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "conf_type_lc": {
+                    "description": "配置类型中文名",
+                    "type": "string",
+                    "example": "DB参数配置"
+                },
+                "created_at": {
+                    "description": "创建时间",
+                    "type": "string"
+                },
+                "description": {
+                    "description": "配置文件的描述",
+                    "type": "string"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                },
+                "namespace_info": {
+                    "description": "namespace信息,比如数据库版本,与 conf_file 对应",
+                    "type": "string",
+                    "example": "MySQL 5.7"
+                },
+                "updated_at": {
+                    "description": "更新时间",
+                    "type": "string"
+                },
+                "updated_by": {
+                    "description": "更新人",
+                    "type": "string"
+                }
+            }
+        },
+        "api.SaveConfItemsReq": {
+            "type": "object",
+            "required": [
+                "bk_biz_id",
+                "level_name"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "description": "业务ID,必选项",
+                    "type": "string",
+                    "example": "testapp"
+                },
+                "conf_file_info": {
+                    "$ref": "#/definitions/api.ConfFileDef"
+                },
+                "conf_items": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/api.UpsertConfItem"
+                    }
+                },
+                "confirm": {
+                    "description": "保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求",
+                    "type": "integer"
+                },
+                "description": {
+                    "description": "发布描述",
+                    "type": "string"
+                },
+                "level_info": {
+                    "description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "level_name": {
+                    "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                    "type": "string",
+                    "enum": [
+                        "plat",
+                        "app",
+                        "module",
+                        "cluster"
+                    ]
+                },
+                "level_value": {
+                    "description": "配置层级值",
+                    "type": "string"
+                }
+            }
+        },
+        "api.UpsertConfFilePlatReq": {
+            "type": "object",
+            "required": [
+                "conf_file",
+                "conf_type",
+                "namespace",
+                "req_type"
+            ],
+            "properties": {
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_file_lc": {
+                    "description": "配置文件中文名,也可以是其它 locale 语言类型",
+                    "type": "string",
+                    "example": "5.7_参数配置"
+                },
+                "conf_names": {
+                    "description": "如果revision为空,表示第一次保存。每次 update 操作都会返回 revision,确保在这一轮编辑操作下都是操作这个revision\n已发布的 revision 不能编辑\nRevision string `json:\"revision\" form:\"revision\"`",
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/api.UpsertConfNames"
+                    }
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "conf_type_lc": {
+                    "description": "配置类型中文名",
+                    "type": "string",
+                    "example": "DB参数配置"
+                },
+                "confirm": {
+                    "description": "保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求",
+                    "type": "integer"
+                },
+                "description": {
+                    "description": "配置文件的描述",
+                    "type": "string"
+                },
+                "file_id": {
+                    "description": "新建配置文件,第一次保存返回 file_id, 后续保存/发布 需传入 file_id",
+                    "type": "integer"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                },
+                "namespace_info": {
+                    "description": "namespace信息,比如数据库版本,与 conf_file 对应",
+                    "type": "string",
+                    "example": "MySQL 5.7"
+                },
+                "req_type": {
+                    "description": "配置文件修改动作的请求类型,`SaveOnly`: 仅保存, `SaveAndPublish`保存并发布",
+                    "type": "string",
+                    "enum": [
+                        "SaveOnly",
+                        "SaveAndPublish"
+                    ]
+                }
+            }
+        },
+        "api.UpsertConfFilePlatResp": {
+            "type": "object",
+            "required": [
+                "conf_file",
+                "conf_type",
+                "namespace"
+            ],
+            "properties": {
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "file_id": {
+                    "type": "integer"
+                },
+                "is_published": {
+                    "type": "integer"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                },
+                "revision": {
+                    "description": "编辑配置文件,仅保存时不会产生 revision,保存并发布时才返回",
+                    "type": "string"
+                }
+            }
+        },
+        "api.UpsertConfItem": {
+            "type": "object",
+            "required": [
+                "conf_name",
+                "op_type"
+            ],
+            "properties": {
+                "conf_name": {
+                    "description": "配置项名称",
+                    "type": "string"
+                },
+                "conf_value": {
+                    "type": "string"
+                },
+                "description": {
+                    "type": "string"
+                },
+                "extra_info": {
+                    "type": "string"
+                },
+                "flag_disable": {
+                    "description": "是否禁用,默认 0 表示启用. 1表示禁用",
+                    "type": "integer",
+                    "example": 0
+                },
+                "flag_locked": {
+                    "description": "是否锁定,默认 0 表上不锁定",
+                    "type": "integer",
+                    "example": 0
+                },
+                "op_type": {
+                    "description": "配置项修改动作,需提供操作类型字段,允许值 `add`,`update`,`remove`",
+                    "type": "string",
+                    "enum": [
+                        "add",
+                        "update",
+                        "remove"
+                    ]
+                }
+            }
+        },
+        "api.UpsertConfItemsReq": {
+            "type": "object",
+            "required": [
+                "bk_biz_id",
+                "level_name",
+                "req_type"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "description": "业务ID,必选项",
+                    "type": "string",
+                    "example": "testapp"
+                },
+                "conf_file_info": {
+                    "$ref": "#/definitions/api.ConfFileDef"
+                },
+                "conf_items": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/api.UpsertConfItem"
+                    }
+                },
+                "confirm": {
+                    "description": "保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求",
+                    "type": "integer"
+                },
+                "description": {
+                    "description": "发布描述",
+                    "type": "string"
+                },
+                "level_info": {
+                    "description": "上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {\"module\": \"m1\"} 提供cluster所属上级 module 的信息\n非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用\ntodo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "level_name": {
+                    "description": "配置层级名,当前允许值 `app`,`module`,`cluster`\n配合 flag_locked 锁定标记,可以知道 锁定级别",
+                    "type": "string",
+                    "enum": [
+                        "plat",
+                        "app",
+                        "module",
+                        "cluster"
+                    ]
+                },
+                "level_value": {
+                    "description": "配置层级值",
+                    "type": "string"
+                },
+                "req_type": {
+                    "description": "配置文件修改动作的请求类型,`SaveOnly`: 仅保存, `SaveAndPublish`保存并发布",
+                    "type": "string",
+                    "enum": [
+                        "SaveOnly",
+                        "SaveAndPublish"
+                    ]
+                },
+                "revision": {
+                    "type": "string"
+                }
+            }
+        },
+        "api.UpsertConfItemsResp": {
+            "type": "object",
+            "required": [
+                "conf_file",
+                "conf_type",
+                "namespace"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "type": "string"
+                },
+                "conf_file": {
+                    "description": "配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件",
+                    "type": "string",
+                    "example": "MySQL-5.7"
+                },
+                "conf_type": {
+                    "description": "配置类型,如 dbconf,backup",
+                    "type": "string",
+                    "example": "dbconf"
+                },
+                "is_published": {
+                    "type": "integer"
+                },
+                "namespace": {
+                    "description": "命名空间,一般指DB类型",
+                    "type": "string",
+                    "example": "tendbha"
+                },
+                "revision": {
+                    "description": "编辑配置文件,第一次保存返回 revision, 后续保存/发布 需传入 revision",
+                    "type": "string"
+                }
+            }
+        },
+        "api.UpsertConfNames": {
+            "type": "object",
+            "required": [
+                "conf_name",
+                "op_type",
+                "value_type"
+            ],
+            "properties": {
+                "conf_name": {
+                    "description": "配置项,也叫参数项",
+                    "type": "string"
+                },
+                "conf_name_lc": {
+                    "description": "配置项中文名,可不填",
+                    "type": "string"
+                },
+                "description": {
+                    "description": "配置项说明",
+                    "type": "string"
+                },
+                "flag_disable": {
+                    "description": "是否禁用,代表该配置项状态. 默认0启用",
+                    "type": "integer",
+                    "example": 0
+                },
+                "flag_locked": {
+                    "description": "是否锁定. 默认0",
+                    "type": "integer",
+                    "example": 0
+                },
+                "need_restart": {
+                    "description": "是否需要重启生效. 默认1",
+                    "type": "integer",
+                    "example": 1
+                },
+                "op_type": {
+                    "description": "配置项修改动作,需提供操作类型字段,允许值 `add`,`update`,`remove`",
+                    "type": "string",
+                    "enum": [
+                        "add",
+                        "update",
+                        "remove"
+                    ]
+                },
+                "value_allowed": {
+                    "description": "允许设定值,如枚举/范围等,为空时表示不限制范围\n当 value_type_sub=ENUM 时,value_allowed 格式 0|1 或者 ON|OFF 或者 aaa|bbb|ccc , 会校验value的合法性\n当 value_type_sub=REGEX 时,会根据 value_allowed 进行正则校验\n当 value_type_sub=RANGE 时,也会校验value 范围的合法性.\n - BYTES 是一种特殊的RANGE,value允许1mm 但value_allowed 必须是数字的range",
+                    "type": "string"
+                },
+                "value_default": {
+                    "description": "配置项默认值",
+                    "type": "string",
+                    "example": "1"
+                },
+                "value_type": {
+                    "description": "配置项的值类型,如 `STRING`,`INT`,`FLOAT`,`NUMBER`",
+                    "type": "string",
+                    "enum": [
+                        "STRING",
+                        "INT",
+                        "FLOAT",
+                        "NUMBER"
+                    ],
+                    "example": "STRING"
+                },
+                "value_type_sub": {
+                    "description": "value_type 的子类型,如果设置则用于校验 value_type 的具体类型,或者返回用于告知前端控件类型,例如 ENUM,RANGE",
+                    "type": "string",
+                    "enum": [
+                        "",
+                        "STRING",
+                        "ENUM",
+                        "RANGE",
+                        "BYTES",
+                        "REGEX",
+                        "JSON",
+                        "COMPLEX"
+                    ],
+                    "example": "ENUM"
+                }
+            }
+        },
+        "model.ConfigModel": {
+            "type": "object",
+            "properties": {
+                "bk_biz_id": {
+                    "type": "string"
+                },
+                "conf_file": {
+                    "type": "string"
+                },
+                "conf_name": {
+                    "type": "string"
+                },
+                "conf_type": {
+                    "type": "string"
+                },
+                "conf_value": {
+                    "type": "string"
+                },
+                "created_at": {
+                    "type": "string"
+                },
+                "description": {
+                    "type": "string"
+                },
+                "extra_info": {
+                    "type": "string"
+                },
+                "flag_disable": {
+                    "type": "integer"
+                },
+                "flag_locked": {
+                    "type": "integer"
+                },
+                "id": {
+                    "type": "integer"
+                },
+                "level_locked": {
+                    "type": "string"
+                },
+                "level_name": {
+                    "type": "string"
+                },
+                "level_value": {
+                    "type": "string"
+                },
+                "namespace": {
+                    "type": "string"
+                },
+                "updated_at": {
+                    "type": "string"
+                }
+            }
+        }
+    },
+    "securityDefinitions": {
+        "BasicAuth": {
+            "type": "basic"
+        }
+    }
+}
\ No newline at end of file
diff --git a/dbm-services/common/db-config/docs/swagger.yaml b/dbm-services/common/db-config/docs/swagger.yaml
new file mode 100644
index 0000000000..16711f952a
--- /dev/null
+++ b/dbm-services/common/db-config/docs/swagger.yaml
@@ -0,0 +1,1402 @@
+basePath: /
+definitions:
+  GenerateConfigReq:
+    properties:
+      bk_biz_id:
+        description: 业务ID,必选项
+        example: testapp
+        type: string
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      format:
+        description: '`map.`, `map#`, `map|` 是特殊的map格式,返回结果会以 . 或者 # 或者 | 拆分 conf_name'
+        enum:
+        - list
+        - map
+        - map.
+        - map#
+        - map|
+        type: string
+      level_info:
+        additionalProperties:
+          type: string
+        description: |-
+          上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {"module": "m1"} 提供cluster所属上级 module 的信息
+          非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用
+          todo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系
+        type: object
+      level_name:
+        description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        type: string
+      level_value:
+        description: 配置层级值
+        type: string
+      method:
+        description: |-
+          method must be one of GenerateOnly|GenerateAndSave|GenerateAndPublish
+          `GenerateOnly`: generate merged config
+          `GenerateAndSave`: generate and save the merged config to db (snapshot).
+          `GenerateAndPublish`: generate and save the merged config to db, and mark it as published (release)
+        enum:
+        - GenerateAndSave
+        - GenerateAndPublish
+        type: string
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+    required:
+    - bk_biz_id
+    - conf_file
+    - conf_type
+    - format
+    - level_name
+    - method
+    - namespace
+    type: object
+  GenerateConfigResp:
+    properties:
+      bk_biz_id:
+        type: string
+      conf_file:
+        type: string
+      content:
+        additionalProperties: true
+        description: content is a {conf_name:conf_type} dict like {"a":1, "b":"string"}
+        type: object
+      level_name:
+        description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        type: string
+      level_value:
+        description: 配置层级值
+        type: string
+      revision:
+        description: version name for this config_file generation
+        type: string
+    required:
+    - level_name
+    type: object
+  GetConfigItemsReq:
+    properties:
+      bk_biz_id:
+        description: 业务ID,必选项
+        example: testapp
+        type: string
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_name:
+        description: 指定要查询的 conf_name, 多个值以,分隔,为空表示查询该 conf_file 的所有conf_name
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      format:
+        description: '`map.`, `map#`, `map|` 是特殊的map格式,返回结果会以 . 或者 # 或者 | 拆分 conf_name'
+        enum:
+        - list
+        - map
+        - map.
+        - map#
+        - map|
+        type: string
+      level_info:
+        additionalProperties:
+          type: string
+        description: |-
+          上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {"module": "m1"} 提供cluster所属上级 module 的信息
+          非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用
+          todo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系
+        type: object
+      level_name:
+        description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        type: string
+      level_value:
+        description: 配置层级值
+        type: string
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+    required:
+    - bk_biz_id
+    - conf_file
+    - conf_type
+    - format
+    - level_name
+    - namespace
+    type: object
+  GetConfigItemsResp:
+    properties:
+      bk_biz_id:
+        type: string
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_file_lc:
+        description: 配置文件中文名,也可以是其它 locale 语言类型
+        example: 5.7_参数配置
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      conf_type_lc:
+        description: 配置类型中文名
+        example: DB参数配置
+        type: string
+      content:
+        additionalProperties: true
+        description: content is a {conf_name:conf_type} dict like {"a":1, "b":"string"}
+        type: object
+      created_at:
+        type: string
+      description:
+        description: 配置文件的描述
+        type: string
+      level_name:
+        description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        type: string
+      level_value:
+        description: 配置层级值
+        type: string
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+      namespace_info:
+        description: namespace信息,比如数据库版本,与 conf_file 对应
+        example: MySQL 5.7
+        type: string
+      updated_at:
+        type: string
+      updated_by:
+        type: string
+    required:
+    - conf_file
+    - conf_type
+    - level_name
+    - namespace
+    type: object
+  ListConfigVersionsResp:
+    properties:
+      bk_biz_id:
+        type: string
+      conf_file:
+        type: string
+      level_name:
+        description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        type: string
+      level_value:
+        description: 配置层级值
+        type: string
+      namespace:
+        type: string
+      published:
+        description: version published. empty when published version is not in versions
+        type: string
+      versions:
+        description: 版本列表,格式 [{"revision":"v1", "rows_affected":1},{"revision":"v2",
+          "rows_affected":2}]
+        items:
+          additionalProperties: true
+          type: object
+        type: array
+    required:
+    - level_name
+    type: object
+  PublishConfigFileReq:
+    properties:
+      bk_biz_id:
+        description: 业务ID,必选项
+        example: testapp
+        type: string
+      cluster:
+        type: string
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+      patch:
+        additionalProperties:
+          type: string
+        description: patch will overwrite conf_value to versioned config_file. it's
+          a key-value dict
+        type: object
+      revision:
+        description: the version you want to publish
+        example: v_20220309161928
+        type: string
+    required:
+    - bk_biz_id
+    - conf_file
+    - conf_type
+    - namespace
+    - revision
+    type: object
+  QueryConfigNamesResp:
+    properties:
+      conf_file:
+        type: string
+      conf_names:
+        additionalProperties:
+          $ref: '#/definitions/api.ConfNameDef'
+        type: object
+    type: object
+  api.ConfFileDef:
+    properties:
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_file_lc:
+        description: 配置文件中文名,也可以是其它 locale 语言类型
+        example: 5.7_参数配置
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      conf_type_lc:
+        description: 配置类型中文名
+        example: DB参数配置
+        type: string
+      description:
+        description: 配置文件的描述
+        type: string
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+      namespace_info:
+        description: namespace信息,比如数据库版本,与 conf_file 对应
+        example: MySQL 5.7
+        type: string
+    required:
+    - conf_file
+    - conf_type
+    - namespace
+    type: object
+  api.ConfNameDef:
+    properties:
+      conf_name:
+        description: 配置项,也叫参数项
+        type: string
+      conf_name_lc:
+        description: 配置项中文名,可不填
+        type: string
+      description:
+        description: 配置项说明
+        type: string
+      flag_disable:
+        description: 是否禁用,代表该配置项状态. 默认0启用
+        example: 0
+        type: integer
+      flag_locked:
+        description: 是否锁定. 默认0
+        example: 0
+        type: integer
+      need_restart:
+        description: 是否需要重启生效. 默认1
+        example: 1
+        type: integer
+      value_allowed:
+        description: |-
+          允许设定值,如枚举/范围等,为空时表示不限制范围
+          当 value_type_sub=ENUM 时,value_allowed 格式 0|1 或者 ON|OFF 或者 aaa|bbb|ccc , 会校验value的合法性
+          当 value_type_sub=REGEX 时,会根据 value_allowed 进行正则校验
+          当 value_type_sub=RANGE 时,也会校验value 范围的合法性.
+           - BYTES 是一种特殊的RANGE,value允许1mm 但value_allowed 必须是数字的range
+        type: string
+      value_default:
+        description: 配置项默认值
+        example: "1"
+        type: string
+      value_type:
+        description: 配置项的值类型,如 `STRING`,`INT`,`FLOAT`,`NUMBER`
+        enum:
+        - STRING
+        - INT
+        - FLOAT
+        - NUMBER
+        example: STRING
+        type: string
+      value_type_sub:
+        description: value_type 的子类型,如果设置则用于校验 value_type 的具体类型,或者返回用于告知前端控件类型,例如
+          ENUM,RANGE
+        enum:
+        - ""
+        - STRING
+        - ENUM
+        - RANGE
+        - BYTES
+        - REGEX
+        - JSON
+        - COMPLEX
+        example: ENUM
+        type: string
+    required:
+    - conf_name
+    - value_type
+    type: object
+  api.GetVersionedDetailResp:
+    properties:
+      configs:
+        additionalProperties: true
+        description: 配置项
+        type: object
+      configs_diff:
+        additionalProperties: true
+        description: 与上一个版本的差异
+        type: object
+      content:
+        type: string
+      created_at:
+        description: 发布时间
+        type: string
+      created_by:
+        description: 发布人
+        type: string
+      description:
+        type: string
+      id:
+        type: integer
+      is_published:
+        type: integer
+      pre_revision:
+        description: 上一个版本好
+        type: string
+      revision:
+        description: 版本号
+        type: string
+      rows_affected:
+        description: 相对上一个版本 影响行数
+        type: integer
+    type: object
+  api.HTTPClientErrResp:
+    properties:
+      code:
+        example: 400
+        type: integer
+      data: {}
+      message:
+        description: status bad request
+        example: 输入参数错误
+        type: string
+    type: object
+  api.HTTPOkNilResp:
+    properties:
+      code:
+        example: 200
+        type: integer
+      data: {}
+      message:
+        type: string
+    type: object
+  api.ListConfFileResp:
+    properties:
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_file_lc:
+        description: 配置文件中文名,也可以是其它 locale 语言类型
+        example: 5.7_参数配置
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      conf_type_lc:
+        description: 配置类型中文名
+        example: DB参数配置
+        type: string
+      created_at:
+        description: 创建时间
+        type: string
+      description:
+        description: 配置文件的描述
+        type: string
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+      namespace_info:
+        description: namespace信息,比如数据库版本,与 conf_file 对应
+        example: MySQL 5.7
+        type: string
+      updated_at:
+        description: 更新时间
+        type: string
+      updated_by:
+        description: 更新人
+        type: string
+    required:
+    - conf_file
+    - conf_type
+    - namespace
+    type: object
+  api.SaveConfItemsReq:
+    properties:
+      bk_biz_id:
+        description: 业务ID,必选项
+        example: testapp
+        type: string
+      conf_file_info:
+        $ref: '#/definitions/api.ConfFileDef'
+      conf_items:
+        items:
+          $ref: '#/definitions/api.UpsertConfItem'
+        type: array
+      confirm:
+        description: 保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求
+        type: integer
+      description:
+        description: 发布描述
+        type: string
+      level_info:
+        additionalProperties:
+          type: string
+        description: |-
+          上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {"module": "m1"} 提供cluster所属上级 module 的信息
+          非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用
+          todo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系
+        type: object
+      level_name:
+        description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        type: string
+      level_value:
+        description: 配置层级值
+        type: string
+    required:
+    - bk_biz_id
+    - level_name
+    type: object
+  api.UpsertConfFilePlatReq:
+    properties:
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_file_lc:
+        description: 配置文件中文名,也可以是其它 locale 语言类型
+        example: 5.7_参数配置
+        type: string
+      conf_names:
+        description: |-
+          如果revision为空,表示第一次保存。每次 update 操作都会返回 revision,确保在这一轮编辑操作下都是操作这个revision
+          已发布的 revision 不能编辑
+          Revision string `json:"revision" form:"revision"`
+        items:
+          $ref: '#/definitions/api.UpsertConfNames'
+        type: array
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      conf_type_lc:
+        description: 配置类型中文名
+        example: DB参数配置
+        type: string
+      confirm:
+        description: 保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求
+        type: integer
+      description:
+        description: 配置文件的描述
+        type: string
+      file_id:
+        description: 新建配置文件,第一次保存返回 file_id, 后续保存/发布 需传入 file_id
+        type: integer
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+      namespace_info:
+        description: namespace信息,比如数据库版本,与 conf_file 对应
+        example: MySQL 5.7
+        type: string
+      req_type:
+        description: '配置文件修改动作的请求类型,`SaveOnly`: 仅保存, `SaveAndPublish`保存并发布'
+        enum:
+        - SaveOnly
+        - SaveAndPublish
+        type: string
+    required:
+    - conf_file
+    - conf_type
+    - namespace
+    - req_type
+    type: object
+  api.UpsertConfFilePlatResp:
+    properties:
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      file_id:
+        type: integer
+      is_published:
+        type: integer
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+      revision:
+        description: 编辑配置文件,仅保存时不会产生 revision,保存并发布时才返回
+        type: string
+    required:
+    - conf_file
+    - conf_type
+    - namespace
+    type: object
+  api.UpsertConfItem:
+    properties:
+      conf_name:
+        description: 配置项名称
+        type: string
+      conf_value:
+        type: string
+      description:
+        type: string
+      extra_info:
+        type: string
+      flag_disable:
+        description: 是否禁用,默认 0 表示启用. 1表示禁用
+        example: 0
+        type: integer
+      flag_locked:
+        description: 是否锁定,默认 0 表上不锁定
+        example: 0
+        type: integer
+      op_type:
+        description: 配置项修改动作,需提供操作类型字段,允许值 `add`,`update`,`remove`
+        enum:
+        - add
+        - update
+        - remove
+        type: string
+    required:
+    - conf_name
+    - op_type
+    type: object
+  api.UpsertConfItemsReq:
+    properties:
+      bk_biz_id:
+        description: 业务ID,必选项
+        example: testapp
+        type: string
+      conf_file_info:
+        $ref: '#/definitions/api.ConfFileDef'
+      conf_items:
+        items:
+          $ref: '#/definitions/api.UpsertConfItem'
+        type: array
+      confirm:
+        description: 保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求
+        type: integer
+      description:
+        description: 发布描述
+        type: string
+      level_info:
+        additionalProperties:
+          type: string
+        description: |-
+          上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {"module": "m1"} 提供cluster所属上级 module 的信息
+          非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用
+          todo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系
+        type: object
+      level_name:
+        description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        type: string
+      level_value:
+        description: 配置层级值
+        type: string
+      req_type:
+        description: '配置文件修改动作的请求类型,`SaveOnly`: 仅保存, `SaveAndPublish`保存并发布'
+        enum:
+        - SaveOnly
+        - SaveAndPublish
+        type: string
+      revision:
+        type: string
+    required:
+    - bk_biz_id
+    - level_name
+    - req_type
+    type: object
+  api.UpsertConfItemsResp:
+    properties:
+      bk_biz_id:
+        type: string
+      conf_file:
+        description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        type: string
+      conf_type:
+        description: 配置类型,如 dbconf,backup
+        example: dbconf
+        type: string
+      is_published:
+        type: integer
+      namespace:
+        description: 命名空间,一般指DB类型
+        example: tendbha
+        type: string
+      revision:
+        description: 编辑配置文件,第一次保存返回 revision, 后续保存/发布 需传入 revision
+        type: string
+    required:
+    - conf_file
+    - conf_type
+    - namespace
+    type: object
+  api.UpsertConfNames:
+    properties:
+      conf_name:
+        description: 配置项,也叫参数项
+        type: string
+      conf_name_lc:
+        description: 配置项中文名,可不填
+        type: string
+      description:
+        description: 配置项说明
+        type: string
+      flag_disable:
+        description: 是否禁用,代表该配置项状态. 默认0启用
+        example: 0
+        type: integer
+      flag_locked:
+        description: 是否锁定. 默认0
+        example: 0
+        type: integer
+      need_restart:
+        description: 是否需要重启生效. 默认1
+        example: 1
+        type: integer
+      op_type:
+        description: 配置项修改动作,需提供操作类型字段,允许值 `add`,`update`,`remove`
+        enum:
+        - add
+        - update
+        - remove
+        type: string
+      value_allowed:
+        description: |-
+          允许设定值,如枚举/范围等,为空时表示不限制范围
+          当 value_type_sub=ENUM 时,value_allowed 格式 0|1 或者 ON|OFF 或者 aaa|bbb|ccc , 会校验value的合法性
+          当 value_type_sub=REGEX 时,会根据 value_allowed 进行正则校验
+          当 value_type_sub=RANGE 时,也会校验value 范围的合法性.
+           - BYTES 是一种特殊的RANGE,value允许1mm 但value_allowed 必须是数字的range
+        type: string
+      value_default:
+        description: 配置项默认值
+        example: "1"
+        type: string
+      value_type:
+        description: 配置项的值类型,如 `STRING`,`INT`,`FLOAT`,`NUMBER`
+        enum:
+        - STRING
+        - INT
+        - FLOAT
+        - NUMBER
+        example: STRING
+        type: string
+      value_type_sub:
+        description: value_type 的子类型,如果设置则用于校验 value_type 的具体类型,或者返回用于告知前端控件类型,例如
+          ENUM,RANGE
+        enum:
+        - ""
+        - STRING
+        - ENUM
+        - RANGE
+        - BYTES
+        - REGEX
+        - JSON
+        - COMPLEX
+        example: ENUM
+        type: string
+    required:
+    - conf_name
+    - op_type
+    - value_type
+    type: object
+  model.ConfigModel:
+    properties:
+      bk_biz_id:
+        type: string
+      conf_file:
+        type: string
+      conf_name:
+        type: string
+      conf_type:
+        type: string
+      conf_value:
+        type: string
+      created_at:
+        type: string
+      description:
+        type: string
+      extra_info:
+        type: string
+      flag_disable:
+        type: integer
+      flag_locked:
+        type: integer
+      id:
+        type: integer
+      level_locked:
+        type: string
+      level_name:
+        type: string
+      level_value:
+        type: string
+      namespace:
+        type: string
+      updated_at:
+        type: string
+    type: object
+host: localhost:8080
+info:
+  contact:
+    email: support@swagger.io
+    name: API Support
+    url: http://www.swagger.io/support
+  description: This is a bkconfigsvr celler server.
+  license:
+    name: Apache 2.0
+    url: http://www.apache.org/licenses/LICENSE-2.0.html
+  termsOfService: http://swagger.io/terms/
+  title: bkconfigsvr API
+  version: 0.0.1
+paths:
+  /bkconfig/v1/conffile/add:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        新增平台级配置文件,定义允许的配置名。指定 req_type 为 `SaveOnly` 仅保存, `SaveAndPublish` 保存并发布。保存并发布 也必须提供全量,而不能是前面保存基础上的增量
+        req_type=`SaveOnly` 已废弃
+        第一次保存时,会返回 `file_id`,下次 保存/发布 需传入 `file_id`
+        namespace,conf_type,conf_file 唯一确定一个配置文件,不同DB版本信息体现在 conf_file 里 (如MySQL-5.7), namespace_info 可以存前端传入的 数据库版本,只用于在展示
+        HTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员
+      parameters:
+      - description: ConfName for ConfType
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/api.UpsertConfFilePlatReq'
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/api.UpsertConfFilePlatResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 新增平台级配置文件
+      tags:
+      - plat_config
+  /bkconfig/v1/conffile/list:
+    get:
+      description: |-
+        查询配置文件模板列表。只有平台和业务才有配置文件列表
+        返回的 updated_by 代表操作人
+      parameters:
+      - description: 业务id, bk_biz_id=0 代表平台配置
+        in: query
+        name: bk_biz_id
+        required: true
+        type: string
+      - description: 如果指定了 conf_file 则只查这一个文件信息
+        in: query
+        name: conf_file
+        type: string
+      - example: dbconf
+        in: query
+        name: conf_type
+        required: true
+        type: string
+      - description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        in: query
+        name: level_name
+        required: true
+        type: string
+      - description: 配置层级值
+        in: query
+        name: level_value
+        type: string
+      - description: 命名空间,一般指DB类型
+        in: query
+        name: namespace
+        required: true
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            items:
+              $ref: '#/definitions/api.ListConfFileResp'
+            type: array
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 查询配置文件列表
+      tags:
+      - plat_config
+  /bkconfig/v1/conffile/query:
+    get:
+      description: 查询 平台配置 某个配置类型/配置文件的所有配置名列表
+      parameters:
+      - example: MySQL-5.7
+        in: query
+        name: conf_file
+        required: true
+        type: string
+      - description: 如果设置,会根据前缀模糊匹配搜索
+        in: query
+        name: conf_name
+        type: string
+      - example: dbconf
+        in: query
+        name: conf_type
+        required: true
+        type: string
+      - example: tendbha
+        in: query
+        name: namespace
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/QueryConfigNamesResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 查询平台配置项列表
+      tags:
+      - plat_config
+  /bkconfig/v1/conffile/update:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        编辑平台级配置文件。指定 req_type 为 `SaveOnly` 仅保存, `SaveAndPublish` 保存并发布
+        HTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员
+        编辑平台配置时,如果设置 flag_disable=1 时,该配置不会显示在平台配置项列表,相当于管理 所有允许的配置项列表
+        保存时会校验输入的 value_default, value_type, value_allowed
+        1. value_type 目前允许 STRING, INT, FLOAT, NUMBER
+        2. value_type_sub 允许 ENUM, ENUMS, RANGE, STRING, JSON, REGEX(一种特殊的STRING,会验证 value_default 是否满足 value_allowed 正则)
+        3. value_allowed 允许 枚举: 例如`0|1|2`, `ON|OFF` 格式, 范围: 例如`(0, 1000]`
+      parameters:
+      - description: ConfName for ConfType
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/api.UpsertConfFilePlatReq'
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/api.UpsertConfFilePlatResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 编辑平台级配置文件
+      tags:
+      - plat_config
+  /bkconfig/v1/confitem/query:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        根据业务/模块/集群的信息,获取某个配置文件的配置项。一般用户前端请求、再编辑的场景,后端服务直接获取配置文件使用 /version/generate 接口
+        conf_file 可以是,号分隔的多个文件名,返回结果是一个按照配置文件名组合的一个 list
+        需要指定返回格式 format, 可选值 map, list.
+        map 格式会丢弃 conf_item 的其它信息,只保留 conf_name=conf_value, 一般用于后台服务
+        list 格式会保留 conf_items 的其它信息,conf_name=conf_item,一般用于前端展示
+        获取cluster级别配置时,需要提供 level_info:{"module":"xxx"} 模块信息
+      parameters:
+      - description: GetConfigItemsReq
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/GetConfigItemsReq'
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            items:
+              $ref: '#/definitions/GetConfigItemsResp'
+            type: array
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 获取配置文件配置项列表
+      tags:
+      - config_item
+  /bkconfig/v1/confitem/save:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分
+        针对编辑的配置类型 conf_type 无版本化的概念,即保存生效,无需发布
+        保存 cluster级别配置时,需要提供 level_info:{"module":"xxx"} 模块信息
+      parameters:
+      - description: SaveConfItemsReq
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/api.SaveConfItemsReq'
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/api.UpsertConfItemsResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 编辑配置(无版本概念)
+      tags:
+      - config_item
+  /bkconfig/v1/confitem/upsert:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分
+        例1: level_name=app, level_value=testapp 表示修改业务 bk_biz_id=testapp 的配置
+        例2: level_name=module, level_value=account 表示某业务 bk_biz_id 的模块 module=account 的配置
+        HTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员
+        获取cluster级别配置时,需要提供 level_info:{"module":"xxx"} 模块信息
+      parameters:
+      - description: UpsertConfItemsReq
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/api.UpsertConfItemsReq'
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/api.UpsertConfItemsResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 编辑发布层级配置
+      tags:
+      - config_item
+  /bkconfig/v1/confname/list:
+    get:
+      description: 查询某个配置类型/配置文件的配置名列表,会排除 已锁定的平台配置
+      parameters:
+      - example: MySQL-5.7
+        in: query
+        name: conf_file
+        required: true
+        type: string
+      - description: 如果设置,会根据前缀模糊匹配搜索
+        in: query
+        name: conf_name
+        type: string
+      - example: dbconf
+        in: query
+        name: conf_type
+        required: true
+        type: string
+      - example: tendbha
+        in: query
+        name: namespace
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/QueryConfigNamesResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 查询预定义的配置名列表
+      tags:
+      - config_meta
+  /bkconfig/v1/simpleitem/list:
+    get:
+      description: 请勿使用
+      parameters:
+      - in: query
+        name: bk_biz_id
+        type: string
+      - in: query
+        name: cluster
+        type: string
+      - in: query
+        name: conf_file
+        type: string
+      - in: query
+        name: conf_name
+        type: string
+      - in: query
+        name: conf_type
+        type: string
+      - in: query
+        name: conf_value
+        type: string
+      - in: query
+        name: created_at
+        type: string
+      - in: query
+        name: created_by
+        type: string
+      - in: query
+        name: description
+        type: string
+      - in: query
+        name: format
+        type: string
+      - in: query
+        name: inherit_from
+        type: string
+      - in: query
+        name: level_name
+        type: string
+      - in: query
+        name: level_value
+        type: string
+      - in: query
+        name: module
+        type: string
+      - in: query
+        name: namespace
+        type: string
+      - in: query
+        name: revision
+        type: string
+      - in: query
+        name: updated_at
+        type: string
+      - in: query
+        name: updated_by
+        type: string
+      - in: query
+        name: view
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            items:
+              $ref: '#/definitions/model.ConfigModel'
+            type: array
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 查询配置项列表通用接口
+      tags:
+      - simple_item
+  /bkconfig/v1/version/detail:
+    get:
+      description: 查询历史配置版本的详情
+      parameters:
+      - description: 业务ID,必选项
+        example: testapp
+        in: query
+        name: bk_biz_id
+        required: true
+        type: string
+      - description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        in: query
+        name: conf_file
+        required: true
+        type: string
+      - description: 配置类型,如 dbconf,backup
+        example: dbconf
+        in: query
+        name: conf_type
+        required: true
+        type: string
+      - description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        in: query
+        name: level_name
+        required: true
+        type: string
+      - description: 配置层级值
+        in: query
+        name: level_value
+        type: string
+      - description: 命名空间,一般指DB类型
+        example: tendbha
+        in: query
+        name: namespace
+        required: true
+        type: string
+      - example: v_20220309215824
+        in: query
+        name: revision
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/api.GetVersionedDetailResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 查询版本的详细信息
+      tags:
+      - config_version
+  /bkconfig/v1/version/generate:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        从现有配置项直接生成配置文件并返回,每次调用会生成一个新版本,可以选择是否直接发布。这个接口一般用户后台服务查询配置
+        修改配置并发布,使用 /confitem/upsert 接口
+        直接查询配置文件内容,使用 /confitem/query 接口
+        根据 `method` 生成方式不同,可以生成配置并存储 `GenerateAndSave`、生成配置并存储且发布`GenerateAndPublish`
+        使用 `GenerateAndSave` 方式需要进一步调用 PublishConfigFile 接口进行发布
+      parameters:
+      - description: Generate config file versioned
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/GenerateConfigReq'
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/GenerateConfigResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 生成并获取配置文件新版本
+      tags:
+      - config_version
+  /bkconfig/v1/version/list:
+    get:
+      description: Get config file versions list
+      parameters:
+      - description: 业务ID,必选项
+        example: testapp
+        in: query
+        name: bk_biz_id
+        required: true
+        type: string
+      - description: 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有
+          MySQL-5.5, MySQL-5.6 两个配置文件
+        example: MySQL-5.7
+        in: query
+        name: conf_file
+        required: true
+        type: string
+      - description: 配置类型,如 dbconf,backup
+        example: dbconf
+        in: query
+        name: conf_type
+        required: true
+        type: string
+      - description: |-
+          配置层级名,当前允许值 `app`,`module`,`cluster`
+          配合 flag_locked 锁定标记,可以知道 锁定级别
+        enum:
+        - plat
+        - app
+        - module
+        - cluster
+        in: query
+        name: level_name
+        required: true
+        type: string
+      - description: 配置层级值
+        in: query
+        name: level_value
+        type: string
+      - description: 命名空间,一般指DB类型
+        example: tendbha
+        in: query
+        name: namespace
+        required: true
+        type: string
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/ListConfigVersionsResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 查询历史配置版本名列表
+      tags:
+      - config_version
+  /bkconfig/v1/version/publish:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        发布指定版本的配置文件,未发布状态的配置文件是不能使用的
+        发布操作会把已有 published 状态的配置文件下线;同一个 revision 版本的配置无法重复发布
+        发布时带上 patch 参数可以覆盖配置中心该版本的配置项(只有配置项值是`{{`开头的才能被覆盖)
+      parameters:
+      - description: Publish config file versioned
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/PublishConfigFileReq'
+      produces:
+      - application/json
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/api.HTTPOkNilResp'
+        "400":
+          description: Bad Request
+          schema:
+            $ref: '#/definitions/api.HTTPClientErrResp'
+      summary: 直接发布一个版本[废弃]
+      tags:
+      - config_version
+schemes:
+- http
+securityDefinitions:
+  BasicAuth:
+    type: basic
+swagger: "2.0"
diff --git a/dbm-services/common/db-config/go.mod b/dbm-services/common/db-config/go.mod
new file mode 100644
index 0000000000..30454f742c
--- /dev/null
+++ b/dbm-services/common/db-config/go.mod
@@ -0,0 +1,80 @@
+module bk-dbconfig
+
+go 1.19
+
+replace github.com/sirupsen/logrus v1.8.1 => github.com/Sirupsen/logrus v1.8.1
+
+require (
+	github.com/coocood/freecache v1.2.1
+	github.com/gin-contrib/cors v1.3.1
+	github.com/gin-gonic/gin v1.7.7
+	github.com/go-playground/locales v0.14.0
+	github.com/go-playground/universal-translator v0.18.0
+	github.com/go-playground/validator/v10 v10.10.0
+	github.com/golang-jwt/jwt v3.2.2+incompatible
+	github.com/golang-migrate/migrate/v4 v4.15.2
+	github.com/google/uuid v1.3.0
+	github.com/jinzhu/copier v0.3.5
+	github.com/jsternberg/zap-logfmt v1.2.0
+	github.com/pkg/errors v0.9.1
+	github.com/robfig/cron v1.2.0
+	github.com/sethvargo/go-password v0.2.0
+	github.com/sirupsen/logrus v1.8.1
+	github.com/smartystreets/goconvey v1.6.4
+	github.com/spf13/cast v1.3.0
+	github.com/spf13/pflag v1.0.5
+	github.com/spf13/viper v1.7.1
+	github.com/swaggo/swag v1.8.0
+	github.com/vmihailenco/msgpack/v5 v5.3.5
+	go.uber.org/zap v1.17.0
+	golang.org/x/crypto v0.0.0-20220408190544-5352b0902921
+	gorm.io/driver/mysql v1.2.2
+	gorm.io/gorm v1.22.4
+)
+
+require (
+	github.com/KyleBanks/depth v1.2.1 // indirect
+	github.com/PuerkitoBio/purell v1.1.1 // indirect
+	github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
+	github.com/cespare/xxhash/v2 v2.1.2 // indirect
+	github.com/fsnotify/fsnotify v1.4.9 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-openapi/jsonpointer v0.19.5 // indirect
+	github.com/go-openapi/jsonreference v0.19.6 // indirect
+	github.com/go-openapi/spec v0.20.4 // indirect
+	github.com/go-openapi/swag v0.19.15 // indirect
+	github.com/go-sql-driver/mysql v1.6.0 // indirect
+	github.com/golang/protobuf v1.5.2 // indirect
+	github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
+	github.com/hashicorp/errwrap v1.1.0 // indirect
+	github.com/hashicorp/go-multierror v1.1.1 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.3 // indirect
+	github.com/josharian/intern v1.0.0 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/jtolds/gls v4.20.0+incompatible // indirect
+	github.com/leodido/go-urn v1.2.1 // indirect
+	github.com/magiconair/properties v1.8.1 // indirect
+	github.com/mailru/easyjson v0.7.6 // indirect
+	github.com/mattn/go-isatty v0.0.12 // indirect
+	github.com/mitchellh/mapstructure v1.1.2 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/pelletier/go-toml v1.9.3 // indirect
+	github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d // indirect
+	github.com/spf13/afero v1.6.0 // indirect
+	github.com/spf13/jwalterweatherman v1.0.0 // indirect
+	github.com/subosito/gotenv v1.2.0 // indirect
+	github.com/ugorji/go/codec v1.1.7 // indirect
+	github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
+	go.uber.org/atomic v1.7.0 // indirect
+	go.uber.org/multierr v1.6.0 // indirect
+	golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
+	golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf // indirect
+	golang.org/x/text v0.3.7 // indirect
+	golang.org/x/tools v0.1.10 // indirect
+	google.golang.org/protobuf v1.27.1 // indirect
+	gopkg.in/ini.v1 v1.51.0 // indirect
+	gopkg.in/yaml.v2 v2.4.0 // indirect
+)
diff --git a/dbm-services/common/db-config/go.sum b/dbm-services/common/db-config/go.sum
new file mode 100644
index 0000000000..ea1d2b75e2
--- /dev/null
+++ b/dbm-services/common/db-config/go.sum
@@ -0,0 +1,1931 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
+github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
+github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
+github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/Sirupsen/logrus v1.8.1 h1:9Kb5eF3ggHzzQCCk15IsQy3yZXuoOZDmEhDeaXv5p3E=
+github.com/Sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/agiledragon/gomonkey/v2 v2.3.1 h1:k+UnUY0EMNYUFUAQVETGY9uUTxjMdnUkP0ARyJS1zzs=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY=
+github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0=
+github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
+github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU=
+github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
+github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM=
+github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo=
+github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
+github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
+github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
+github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o=
+github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/coocood/freecache v1.2.1 h1:/v1CqMq45NFH9mp/Pt142reundeBM0dVUD3osQBeu/U=
+github.com/coocood/freecache v1.2.1/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8=
+github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.13+incompatible h1:5s7uxnKZG+b8hYWlPYUi6x1Sjpq2MSt96d15eLZeHyw=
+github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
+github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/cors v1.3.1 h1:doAsuITavI4IOcd0Y19U4B+O0dNWihRyX//nn4sEmgA=
+github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
+github.com/gin-gonic/gin v1.7.7 h1:3DoBmSbJbZAWqXJC3SLjAPfutPJJRN1U5pALB7EeTTs=
+github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs=
+github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M=
+github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
+github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/go-playground/validator/v10 v10.10.0 h1:I7mrTYv78z8k8VXa/qJlOlEXn/nBh+BF8dHX5nt/dr0=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
+github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc=
+github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
+github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
+github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
+github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
+github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
+github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
+github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
+github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
+github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jinzhu/now v1.1.3 h1:PlHq1bSCSZL9K0wUhbm2pGLoTWs2GwVhsP6emvGV/ZI=
+github.com/jinzhu/now v1.1.3/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jsternberg/zap-logfmt v1.2.0 h1:1v+PK4/B48cy8cfQbxL4FmmNZrjnIMr2BsnyEmXqv2o=
+github.com/jsternberg/zap-logfmt v1.2.0/go.mod h1:kz+1CUmCutPWABnNkOu9hOHKdT2q3TDYCcsFy9hpqb0=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4=
+github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
+github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/otiai10/copy v1.7.0 h1:hVoPiN+t+7d2nzzwMiDHPSOogsWAStewq3TwU05+clE=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
+github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI=
+github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
+github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/swaggo/swag v1.8.0 h1:80NNhvpJcuItNpBDqgJwDuKlMmaZ/OATOzhG3bhcM3w=
+github.com/swaggo/swag v1.8.0/go.mod h1:gZ+TJ2w/Ve1RwQsA2IRoSOTidHz6DX+PIG8GWvbnoLU=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
+github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
+github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
+github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
+go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220408190544-5352b0902921 h1:iU7T1X1J6yxDr0rda54sWGkHgOp5XJrqm79gcNlC2VM=
+golang.org/x/crypto v0.0.0-20220408190544-5352b0902921/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf h1:Fm4IcnUL803i92qDlmB0obyHmosDrxZWxJL3gIeNqOw=
+golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.10 h1:QjFRCZxdOhBJ/UNgnBZLbNV13DlbnK0quyivTnXJM20=
+golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
+google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106 h1:ErU+UA6wxadoU8nWrsy5MZUVBs75K17zUCsUCIfrXCE=
+google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.2.2 h1:2qoqhOun1maoJOfLtnzJwq+bZlHkEF34rGntgySqp48=
+gorm.io/driver/mysql v1.2.2/go.mod h1:qsiz+XcAyMrS6QY+X3M9R6b/lKM1imKmcuK9kac5LTo=
+gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
+gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
+gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
+gorm.io/gorm v1.22.4 h1:8aPcyEJhY0MAt8aY6Dc524Pn+pO29K+ydu+e/cXSpQM=
+gorm.io/gorm v1.22.4/go.mod h1:1aeVC+pe9ZmvKZban/gW4QPra7PRoTEssyc922qCAkk=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
+modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878=
+modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo=
+modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8=
+modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw=
+modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
+modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM=
+modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
+modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
+modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
+modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
+modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
+modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
+modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
+modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
+modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
+modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
+modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/dbm-services/common/db-config/internal/api/api.go b/dbm-services/common/db-config/internal/api/api.go
new file mode 100644
index 0000000000..b3b767c276
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/api.go
@@ -0,0 +1,2 @@
+// Package api TODO
+package api
diff --git a/dbm-services/common/db-config/internal/api/apply_config.go b/dbm-services/common/db-config/internal/api/apply_config.go
new file mode 100644
index 0000000000..0a17694c27
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/apply_config.go
@@ -0,0 +1,212 @@
+package api
+
+import (
+	"bk-dbconfig/pkg/validate"
+)
+
+// ApplyConfigItem TODO
+type ApplyConfigItem struct {
+	ConfName string `json:"conf_name"`
+	// 新值
+	ConfValue string `json:"conf_value"`
+	// 旧值
+	ValueBefore string `json:"value_before"`
+	// 该配置项最近由哪个版本修改的
+	UpdatedRevision string `json:"updated_revision"`
+	// 是否需要重启
+	NeedRestart bool `json:"need_restart"`
+	// 配置项定义的描述
+	Description string `json:"description"`
+	OPType      string `json:"op_type"`
+	// 该配置来源于哪个级别
+	LevelNameFrom string `json:"level_name_from"`
+	// 配置项是否被该级别锁定
+	FlagLocked int8 `json:"flag_locked"`
+	// 是否已经应用
+	Applied int8 `json:"applied"`
+}
+
+// ApplyConfigInfoReq TODO
+type ApplyConfigInfoReq struct {
+	BaseConfigNode
+}
+
+// ConfigItemBase TODO
+type ConfigItemBase struct {
+	ConfName        string `json:"conf_name"`
+	ConfValue       string `json:"conf_value"`
+	UpdatedRevision string `json:"updated_revision"`
+}
+
+// ConfigNameBase TODO
+type ConfigNameBase struct {
+	ConfName     string `json:"conf_name"`
+	ValueDefault string `json:"value_default"`
+	ValueAllowed string `json:"value_allowed"`
+	ValueType    string `json:"value_type"`
+	NeedRestart  bool   `json:"need_restart"`
+	Description  string `json:"description"`
+}
+
+// ApplyConfigInfoResp TODO
+type ApplyConfigInfoResp struct {
+	ConfigsDiff map[string]*ApplyConfigItem `json:"configs_diff"`
+
+	// Configs       []*ConfigItemBase          `json:"configs"`
+	// ConfigsBefore []*ConfigItemBase          `json:"configs_before"`
+	// ConfigNames map[string]*ConfigNameBase `json:"config_names"`
+
+	RevisionToApply string `json:"revision_toapply"`
+	RevisionBefore  string `json:"revision_before"`
+	VersionID       uint64 `json:"version_id"`
+	NodeID          uint64 `json:"node_id"`
+}
+
+// Validate TODO
+func (a *ApplyConfigInfoReq) Validate() error {
+	if err := validate.GoValidateStruct(*a, true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// VersionStatReq godoc
+type VersionStatReq struct {
+	BKBizIDDef
+	BaseConfFileDef
+	BaseLevelsDef
+}
+
+// VersionStatResp godoc
+type VersionStatResp struct {
+	// map key 是查询的对象,values 是状态码
+	LevelValues map[string][]int `json:"level_values"`
+	// 状态吗说明
+	StatusInfo map[int]string `json:"status_info"`
+}
+
+// VersionStatus TODO
+type VersionStatus struct {
+	Code    int    `json:"code"`
+	Message string `json:"message"`
+}
+
+// StatusMap TODO
+var StatusMap = map[int]string{
+	1:  "最新发布版本 已应用",
+	2:  "最新发布版本 未应用",
+	3:  "配置异常: 没找到已发布版本",
+	4:  "配置异常:没找到已应用版本",
+	10: "待发布: 存在来自上层级的配置强制更新",
+}
+
+// Validate TODO
+func (a *VersionStatReq) Validate() error {
+	if err := validate.GoValidateStruct(*a, true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ApplyConfigReq 修改版本状态为已应用
+type ApplyConfigReq struct {
+	BaseConfigNode
+	// 新的已成功应用的版本,一般为上一个 已发布版本
+	RevisionApplied string `json:"revision_applied" form:"revision_applied" validate:"required"`
+}
+
+// Validate TODO
+func (a *ApplyConfigReq) Validate() error {
+	if err := validate.GoValidateStruct(*a, true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// VersionApplyReq level_config版本应用
+// level_config 应用,必须是全部配置项应用
+type VersionApplyReq struct {
+	BaseConfigNode
+	// 新的将应用的版本,一般为上一个 已发布版本
+	RevisionApply string `json:"revision_apply" form:"revision_apply" validate:"required"`
+	// 要应用给哪些直属子级,给定的子级会保存并发布,没给定的会仅保存为 1 个版本
+	// ChildApplied []string `json:"child_applied" form:"child_applied"`
+}
+
+// Validate TODO
+func (a *VersionApplyReq) Validate() error {
+	if err := validate.GoValidateStruct(*a, true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// BaseConfigNode godoc
+// bk_biz_id, namespace, conf_type, conf_file, level_name, level_value => config node_id
+type BaseConfigNode struct {
+	BKBizIDDef
+	BaseConfFileDef
+	BaseLevelDef
+}
+
+// QueryConfigOptions TODO
+type QueryConfigOptions struct {
+	InheritFrom           string `json:"inherit_from"`
+	Module                string `json:"module" form:"module"`
+	Cluster               string `json:"cluster" form:"cluster"`
+	ConfName              string `json:"conf_name" form:"conf_name"`
+	ConfValue             string `json:"conf_value" form:"conf_value"`
+	Generate              bool
+	Decrypt               bool
+	Format                string `json:"format"`
+	View                  string `json:"view"`
+	Description           string `json:"description"`
+	CreatedBy             string `json:"createdBy"`
+	RowsAffected          int
+	FromNodeConfigApplied bool // 请求是否来自 level_config 的应用
+}
+
+// Set TODO
+func (b *BaseConfigNode) Set(bkBizID, namespace, confType, confFile, levelName, levelValue string) {
+	/*
+	   levelNode := &BaseConfigNode{
+	       BKBizIDDef: BKBizIDDef{
+	           BKBizID: bkBizID,
+	       },
+	       BaseConfFileDef: BaseConfFileDef{
+	           Namespace: namespace,
+	           ConfType:  confType,
+	           ConfFile:  confFile,
+	       },
+	       BaseLevelDef: BaseLevelDef{
+	           LevelName: levelName,
+	           LevelValue: levelValue,
+	       },
+	   },
+	*/
+	b.BKBizID = bkBizID
+	b.Namespace = namespace
+	b.ConfType = confType
+	b.ConfFile = confFile
+	b.LevelName = levelName
+	b.LevelValue = levelValue
+}
+
+// ConfItemApplyReq versioned_config item 应用
+// versioned_config 应用,可以选择应用了哪些 conf_name
+type ConfItemApplyReq struct {
+	BaseConfigNode
+	NodeID uint64 `json:"node_id" form:"node_id"`
+	// 已应用的版本
+	RevisionApply string `json:"revision_apply" form:"revision_apply" validate:"required"`
+	// 应用了哪些配置项
+	ConfNames []string `json:"conf_names" form:"conf_names" validate:"required"`
+}
+
+// Validate TODO
+func (a *ConfItemApplyReq) Validate() error {
+	if err := validate.GoValidateStruct(*a, true); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/api/baseResponse.go b/dbm-services/common/db-config/internal/api/baseResponse.go
new file mode 100644
index 0000000000..95b37c231e
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/baseResponse.go
@@ -0,0 +1,40 @@
+package api
+
+import (
+	"encoding/json"
+)
+
+// BaseApiResponse TODO
+type BaseApiResponse struct {
+	Code    int             `json:"code"`
+	Message string          `json:"message"`
+	Data    json.RawMessage `json:"data"`
+}
+
+// BaseApiRespInterface TODO
+type BaseApiRespInterface struct {
+	Code    int         `json:"code"`
+	Message string      `json:"message"`
+	Data    interface{} `json:"data"`
+}
+
+// HTTPOkNilResp TODO
+type HTTPOkNilResp struct {
+	Code    int         `json:"code" example:"200"`
+	Message string      `json:"message"`
+	Data    interface{} `json:"data"`
+}
+
+// HTTPClientErrResp TODO
+type HTTPClientErrResp struct {
+	Code    int         `json:"code" example:"400"`
+	Message string      `json:"message" example:"输入参数错误"` // status bad request
+	Data    interface{} `json:"data"`
+}
+
+// HTTPServerErrResp TODO
+type HTTPServerErrResp struct {
+	Code    int         `json:"code" example:"500"`
+	Message string      `json:"message" example:"服务端错误"` // server internal error
+	Data    interface{} `json:"data"`
+}
diff --git a/dbm-services/common/db-config/internal/api/bkapigw_user.go b/dbm-services/common/db-config/internal/api/bkapigw_user.go
new file mode 100644
index 0000000000..2231b5f51d
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/bkapigw_user.go
@@ -0,0 +1,30 @@
+package api
+
+import "encoding/json"
+
+// X-Bkapi-Authorization:[{"bk_app_code": "bk_dbm", "bk_app_secret": "72fec2b4-512b-4c4d-b5d2-572f634af641", "bk_username": "admin"}]
+// X-Request-Id:[96d76806-dcf6-11ed-b47f-020b20fcf451]]
+
+// BKAuthorization TODO
+type BKAuthorization struct {
+	BKAppCode   string `json:"bk_app_code"`
+	BKAppSecret string `json:"bk_app_secret"`
+	BKUsername  string `json:"bk_username"`
+}
+
+// GetHeaderUsername TODO
+func GetHeaderUsername(header string) string {
+	if header == "" {
+		return ""
+	}
+	var bkAuth = BKAuthorization{}
+	username := ""
+	if err := json.Unmarshal([]byte(header), &bkAuth); err != nil {
+		return ""
+	} else {
+		if bkAuth.BKUsername != "" {
+			username = bkAuth.BKUsername
+		}
+	}
+	return username
+}
diff --git a/dbm-services/common/db-config/internal/api/config_base.go b/dbm-services/common/db-config/internal/api/config_base.go
new file mode 100644
index 0000000000..f411404c88
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/config_base.go
@@ -0,0 +1,91 @@
+package api
+
+import (
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/util"
+
+	"github.com/pkg/errors"
+)
+
+// BKBizIDDef TODO
+type BKBizIDDef struct {
+	// 业务ID,必选项
+	BKBizID string `json:"bk_biz_id" form:"bk_biz_id" validate:"required" example:"testapp"`
+}
+
+// RequestType TODO
+type RequestType struct {
+	// 配置文件修改动作的请求类型,`SaveOnly`: 仅保存, `SaveAndPublish`保存并发布
+	ReqType string `json:"req_type" form:"req_type" validate:"required,enums" enums:"SaveOnly,SaveAndPublish"`
+}
+
+// OperationType TODO
+type OperationType struct {
+	// 配置项修改动作,需提供操作类型字段,允许值 `add`,`update`,`remove`
+	OPType string `json:"op_type" form:"op_type" validate:"required,enums" enums:"add,update,remove"`
+}
+
+// OperationUser TODO
+type OperationUser struct {
+	// 操作人
+	OPUser string `json:"op_user" form:"op_user"`
+}
+
+// RespFormatDef TODO
+type RespFormatDef struct {
+	// `map.`, `map#`, `map|` 是特殊的map格式,返回结果会以 . 或者 # 或者 | 拆分 conf_name
+	Format string `json:"format" form:"format" validate:"enums" enums:",list,map,map.,map#,map|"`
+}
+
+// UpLevelInfo TODO
+type UpLevelInfo struct {
+	// 上层级信息,如获取当前层级 cluster=c1 的配置,需要设置 level_info: {"module": "m1"} 提供cluster所属上级 module 的信息
+	// 非必选项,目前只在查询 cluster 级别配置时需要指定模块信息有用
+	// todo 将来可能本配置中心,直接请求dbmeta元数据来获取 可能的 app-module-cluster-host-instance 关系
+	LevelInfo map[string]string `json:"level_info" form:"level_info"`
+}
+
+// Validate TODO
+// 这里应该根据 level_names 字段来判断是否需要上层级信息
+func (v *UpLevelInfo) Validate(currentLevelName string) error {
+	// todo 检查 level_name key 的合法性
+	if currentLevelName == constvar.LevelCluster {
+		if !util.MapHasElement(v.LevelInfo, constvar.LevelModule) {
+			return errors.Errorf("query level [cluster] shoud have level_info [module]")
+		}
+	} else if currentLevelName == constvar.LevelInstance {
+		if !util.MapHasElement(v.LevelInfo, constvar.LevelModule) ||
+			!util.MapHasElement(v.LevelInfo, constvar.LevelCluster) {
+			return errors.Errorf("query level [instance] shoud have level_info [module,cluster]")
+		}
+	} else if !util.IsEmptyMapString(v.LevelInfo) {
+		return errors.Errorf("query level [%s] should not have level_info %s", currentLevelName, v.LevelInfo)
+	}
+	return nil
+}
+
+// GetLevelValue TODO
+func (v *UpLevelInfo) GetLevelValue(levelName string) string {
+	if levelValue, ok := v.LevelInfo[levelName]; ok {
+		return levelValue
+	} else {
+		return ""
+	}
+}
+
+// BaseLevelDef TODO
+type BaseLevelDef struct {
+	// 配置层级名,当前允许值 `app`,`module`,`cluster`,`instance`
+	// 配合 flag_locked 锁定标记,可以知道 锁定级别
+	LevelName string `json:"level_name" label:"level" form:"level_name" validate:"required,enums" enums:"plat,app,module,cluster,instance" example:"cluster"`
+	// 配置层级值
+	LevelValue string `json:"level_value" form:"level_value"`
+}
+
+// BaseLevelsDef TODO
+type BaseLevelsDef struct {
+	// 配置层级名,当前允许值 `app`,`module`,`cluster`,`instance`
+	LevelName string `json:"level_name" form:"level_name" validate:"required,enums" enums:"plat,app,module,cluster,instance" example:"cluster"`
+	// 配置层级值, array 多个
+	LevelValues []string `json:"level_values" form:"level_values" validate:"required"`
+}
diff --git a/dbm-services/common/db-config/internal/api/config_file.go b/dbm-services/common/db-config/internal/api/config_file.go
new file mode 100644
index 0000000000..e97240e502
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/config_file.go
@@ -0,0 +1,24 @@
+package api
+
+// ListConfFileReq TODO
+type ListConfFileReq struct {
+	// 业务id, bk_biz_id=0 代表平台配置
+	BKBizID string `json:"bk_biz_id" form:"bk_biz_id" validate:"required"`
+	// 命名空间,一般指DB类型
+	Namespace string `json:"namespace" form:"namespace" validate:"required"`
+	ConfType  string `json:"conf_type" form:"conf_type" validate:"required" example:"dbconf"`
+	// 如果指定了 conf_file 则只查这一个文件信息
+	ConfFile string `json:"conf_file" form:"conf_file"`
+	BaseLevelDef
+}
+
+// ListConfFileResp TODO
+type ListConfFileResp struct {
+	ConfFileDef
+	// 创建时间
+	CreatedAt string `json:"created_at"`
+	// 更新时间
+	UpdatedAt string `json:"updated_at"`
+	// 更新人
+	UpdatedBy string `json:"updated_by"`
+}
diff --git a/dbm-services/common/db-config/internal/api/config_item.go b/dbm-services/common/db-config/internal/api/config_item.go
new file mode 100644
index 0000000000..2a283eae14
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/config_item.go
@@ -0,0 +1,180 @@
+package api
+
+import (
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/validate"
+
+	"github.com/pkg/errors"
+)
+
+// BaseConfItemDef TODO
+type BaseConfItemDef struct {
+	// 配置项名称
+	ConfName    string `json:"conf_name" form:"conf_name" validate:"required"`
+	ConfValue   string `json:"conf_value" form:"conf_value"`
+	Description string `json:"description" form:"description"`
+	// 是否禁用,默认 0 表示启用. 1表示禁用
+	FlagDisable int8 `json:"flag_disable" form:"flag_disable" example:"0"`
+	// 是否锁定,默认 0 表上不锁定
+	FlagLocked int8 `json:"flag_locked" form:"flag_locked" example:"0"`
+	// 返回该 conf_name 的发布、应用状态. 1: 已发布未应用 2: 已应用
+	Stage int8 `json:"stage" form:"stage" example:"0"`
+}
+
+// BaseConfItemResp TODO
+type BaseConfItemResp struct {
+	BaseConfItemDef
+	BaseLevelDef
+	// op_type 仅在返回差异config部分时有效
+	OPType string `json:"op_type"`
+}
+
+// UpsertConfItem TODO
+type UpsertConfItem struct {
+	BaseConfItemDef
+	OperationType
+}
+
+// UpsertConfItemsReq TODO
+// 更新 app/module/cluster 的配置
+type UpsertConfItemsReq struct {
+	SaveConfItemsReq
+	RequestType
+	Revision string `json:"revision" form:"revision"`
+}
+
+// UpsertConfItemsResp TODO
+type UpsertConfItemsResp struct {
+	BKBizID string `json:"bk_biz_id"`
+	BaseConfFileDef
+	// 编辑配置文件,第一次保存返回 revision, 后续保存/发布 需传入 revision
+	Revision    string `json:"revision"`
+	IsPublished int8   `json:"is_published"`
+}
+
+// SaveConfItemsReq 直接保存 config node, 只针对无版本概念的 conf_type
+type SaveConfItemsReq struct {
+	BKBizIDDef
+	// 保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求
+	Confirm int8 `json:"confirm" form:"confirm"`
+	// 发布描述
+	Description string `json:"description" form:"description"`
+	BaseLevelDef
+	UpLevelInfo
+	ConfFileInfo ConfFileDef       `json:"conf_file_info" form:"conf_file_info"`
+	ConfItems    []*UpsertConfItem `json:"conf_items" form:"conf_items"`
+}
+
+// Validate TODO
+func (v *SaveConfItemsReq) Validate() error {
+	if err := ValidateAppWithLevelName(v.BKBizID, v.LevelName, v.LevelValue); err != nil {
+		return err
+	}
+	if err := validate.GoValidateStruct(*v, true); err != nil {
+		return err
+	}
+	for _, c := range v.ConfItems {
+		if err := validate.GoValidateStruct(*c, true); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Validate TODO
+func (v *UpsertConfItemsReq) Validate() error {
+	if err := ValidateAppWithLevelName(v.BKBizID, v.LevelName, v.LevelValue); err != nil {
+		return err
+	}
+	if err := validate.GoValidateStruct(*v, true); err != nil {
+		return err
+	}
+	for _, c := range v.ConfItems {
+		if err := validate.GoValidateStruct(*c, true); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetConfigItemsReq TODO
+// 获取配置项信息,可用于平台、业务、模块、集群等个级别配置项获取。注意返回的结果是与上层级 merge 后的,但不会持久化成新版本
+type GetConfigItemsReq struct {
+	BKBizIDDef
+	BaseConfFileDef
+	BaseLevelDef
+	UpLevelInfo
+	// 返回的数据格式
+	RespFormatDef
+	// 指定要查询的 conf_name, 多个值以,分隔,为空表示查询该 conf_file 的所有conf_name
+	ConfName string `json:"conf_name" form:"conf_name"`
+} // @name GetConfigItemsReq
+
+// GetConfigItemsResp TODO
+type GetConfigItemsResp struct {
+	BKBizID string `json:"bk_biz_id"`
+	// Module   string `json:"module"`
+	// Cluster  string `json:"cluster"`
+	BaseLevelDef
+	ConfFileResp `json:"conf_file_info"`
+	// content is a {conf_name:conf_type} dict like {"a":1, "b":"string"}
+	Content map[string]interface{} `json:"content"`
+} // @name GetConfigItemsResp
+
+// Validate TODO
+func (v *GetConfigItemsReq) Validate() error {
+	if err := ValidateAppWithLevelName(v.BKBizID, v.LevelName, v.LevelValue); err != nil {
+		return err
+	}
+	if err := validate.GoValidateStruct(*v, true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// GenerateConfigReq TODO
+// Description Generate config file request
+type GenerateConfigReq struct {
+	BaseConfigNode
+	UpLevelInfo
+	// method must be one of GenerateOnly|GenerateAndSave|GenerateAndPublish
+	// `GenerateOnly`: generate merged config
+	// `GenerateAndSave`: generate and save the merged config to db (snapshot).
+	// `GenerateAndPublish`: generate and save the merged config to db, and mark it as published (release)
+	Method string `json:"method" form:"method" validate:"required,enums" enums:"GenerateAndSave,GenerateAndPublish"`
+	RespFormatDef
+} // @name GenerateConfigReq
+
+// GenerateConfigResp TODO
+type GenerateConfigResp struct {
+	BKBizID string `json:"bk_biz_id"`
+	BaseLevelDef
+	ConfFile string `json:"conf_file"`
+	// content is a {conf_name:conf_type} dict like {"a":1, "b":"string"}
+	Content map[string]interface{} `json:"content"`
+	// version name for this config_file generation
+	Revision string `json:"revision"`
+} // @name GenerateConfigResp
+
+// Validate TODO
+func (v *GenerateConfigReq) Validate() error {
+	if err := ValidateAppWithLevelName(v.BKBizID, v.LevelName, v.LevelValue); err != nil {
+		return err
+	}
+	if err := validate.GoValidateStruct(*v, true); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ValidateAppWithLevelName TODO
+func ValidateAppWithLevelName(bkBizID, levelName, levelValue string) error {
+	if (bkBizID == constvar.BKBizIDForPlat && levelName != constvar.LevelPlat) ||
+		(bkBizID != constvar.BKBizIDForPlat && levelName == constvar.LevelPlat) {
+		return errors.New("bk_biz_id=0 should have level_name=plat")
+	}
+	if levelName == constvar.LevelApp && bkBizID != constvar.BKBizIDForPlat && levelValue != bkBizID {
+		return errors.New("level_name=bk_biz_id should have bk_biz_id=level_value")
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/api/config_meta.go b/dbm-services/common/db-config/internal/api/config_meta.go
new file mode 100644
index 0000000000..5f450c10cf
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/config_meta.go
@@ -0,0 +1,109 @@
+package api
+
+// BaseConfFileDef TODO
+type BaseConfFileDef struct {
+	// 命名空间,一般指DB类型
+	Namespace string `json:"namespace" form:"namespace" validate:"required" example:"tendbha"`
+	// 配置类型,如 dbconf,backup
+	ConfType string `json:"conf_type" form:"conf_type" validate:"required" example:"dbconf"`
+	// 配置文件名,一般配置类型与配置文件一一对应,但如 mysql 5.6, 5.7 两个版本同属 dbconf 配置,所以有 MySQL-5.5, MySQL-5.6 两个配置文件
+	ConfFile string `json:"conf_file" form:"conf_file" validate:"required" example:"MySQL-5.7"`
+}
+
+// ConfFileDef TODO
+type ConfFileDef struct {
+	BaseConfFileDef
+	// 配置类型中文名
+	ConfTypeLC string `json:"conf_type_lc" form:"conf_type_lc" example:"DB参数配置"`
+	// 配置文件中文名,也可以是其它 locale 语言类型
+	ConfFileLC string `json:"conf_file_lc" form:"conf_file_lc" example:"5.7_参数配置"`
+	// namespace信息,比如数据库版本,与 conf_file 对应
+	NamespaceInfo string `json:"namespace_info" form:"namespace_info" example:"MySQL 5.7"`
+	// 配置文件的描述
+	Description string `json:"description" form:"description"`
+}
+
+// ConfFileResp TODO
+type ConfFileResp struct {
+	ConfFileDef
+	UpdatedBy string `json:"updated_by"`
+	CreatedAt string `json:"created_at"`
+	UpdatedAt string `json:"updated_at"`
+}
+
+// ConfNameDef TODO
+type ConfNameDef struct {
+	// 配置项,也叫参数项
+	ConfName string `json:"conf_name" form:"conf_name" validate:"required"`
+	// 配置项中文名,可不填
+	ConfNameLC string `json:"conf_name_lc" form:"conf_name_lc"`
+	// 配置项的值类型,如 `STRING`,`INT`,`FLOAT`,`NUMBER`
+	ValueType string `json:"value_type" form:"value_type" validate:"required,enums" enums:"STRING,INT,FLOAT,NUMBER" example:"STRING"`
+	// value_type 的子类型,如果设置则用于校验 value_type 的具体类型,或者返回用于告知前端控件类型,例如 ENUM,RANGE
+	ValueTypeSub string `json:"value_type_sub" form:"value_type_sub" validate:"enums" enums:",STRING,ENUM,ENUMS,RANGE,BYTES,REGEX,JSON,COMPLEX" example:"ENUM"`
+	// 允许设定值,如枚举/范围等,为空时表示不限制范围
+	// 当 value_type_sub=ENUM 时,value_allowed 格式 0|1 或者 ON|OFF 或者 aaa|bbb|ccc , 会校验value的合法性
+	// 当 value_type_sub=REGEX 时,会根据 value_allowed 进行正则校验
+	// 当 value_type_sub=RANGE 时,也会校验value 范围的合法性.
+	//  - BYTES 是一种特殊的RANGE,value允许1mm 但value_allowed 必须是数字的range
+	ValueAllowed string `json:"value_allowed" form:"value_allowed"`
+	// 配置项默认值
+	ValueDefault string `json:"value_default" form:"value_default" example:"1"`
+	// 是否需要重启生效. 默认1
+	NeedRestart int8 `json:"need_restart" form:"need_restart" example:"1"`
+	// 是否禁用,代表该配置项状态. 默认0启用. 1: disable,相当于软删, -1: 物理删除
+	FlagDisable int8 `json:"flag_disable" form:"flag_disable" example:"0"`
+	// 是否锁定. 默认0
+	FlagLocked int8 `json:"flag_locked" form:"flag_locked" example:"0"`
+	// 配置读写状态,1:可读可写, 2:只读不可修改,用于展示或者生成配置 -1: 不展示配置,只表示合法全量配置用于下拉
+	FlagStatus int8 `json:"flag_status" form:"flag_status" example:"1"`
+	// 配置项说明
+	Description string `json:"description" form:"description"`
+}
+
+// ConfTypeDef TODO
+type ConfTypeDef struct {
+	ConfFileDef
+	LevelNames        string `json:"level_names"`
+	LevelVersioned    string `json:"level_versioned"`
+	VersionKeepLimit  int    `json:"version_keep_limit"`
+	VersionKeepDays   int    `json:"version_keep_days"`
+	ConfNameValidate  int8   `json:"conf_name_validate"`
+	ConfValueValidate int8   `json:"conf_value_validate"`
+	ConfNameOrder     int8   `json:"conf_name_order"`
+}
+
+// UpsertConfNames TODO
+type UpsertConfNames struct {
+	ConfNameDef
+	OperationType
+}
+
+// QueryConfigNamesReq TODO
+type QueryConfigNamesReq struct {
+	ConfType string `json:"conf_type" form:"conf_type" validate:"required" example:"dbconf"`
+	ConfFile string `json:"conf_file" form:"conf_file" validate:"required" example:"MySQL-5.7"`
+	// 如果设置,会根据前缀模糊匹配搜索
+	ConfName  string `json:"conf_name" form:"conf_name"`
+	Namespace string `json:"namespace" form:"namespace" example:"tendbha"`
+} // @name QueryConfigNamesReq
+
+// QueryConfigNamesResp TODO
+type QueryConfigNamesResp struct {
+	ConfFile  string                  `json:"conf_file"`
+	ConfNames map[string]*ConfNameDef `json:"conf_names" form:"conf_names"`
+} // @name QueryConfigNamesResp
+
+// QueryConfigTypeReq TODO
+type QueryConfigTypeReq struct {
+	Namespace string `json:"namespace" form:"namespace" validate:"required" example:"tendbha"`
+	ConfType  string `json:"conf_type" form:"conf_type" validate:"required" example:"dbconf"`
+	ConfFile  string `json:"conf_file" form:"conf_file"`
+} // @name QueryConfigTypeReq
+
+// QueryConfigTypeResp TODO
+type QueryConfigTypeResp struct {
+	ConfTypeInfo *ConfTypeDef      `json:"conf_type_info"`
+	ConfFiles    map[string]string `json:"conf_files"`
+	ConfLevels   map[string]string `json:"conf_levels"`
+} // @name QueryConfigTypeResp
diff --git a/dbm-services/common/db-config/internal/api/config_plat.go b/dbm-services/common/db-config/internal/api/config_plat.go
new file mode 100644
index 0000000000..4b2fef4013
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/config_plat.go
@@ -0,0 +1,51 @@
+package api
+
+import (
+	"bk-dbconfig/pkg/validate"
+)
+
+// UpsertConfFilePlatReq TODO
+// 如果 conf_file 已经存在,则报错
+// 新建 conf_file,保存操作在 def 表,发布时进入 node 表,生成revision并发布
+type UpsertConfFilePlatReq struct {
+	RequestType
+	// 保存时如果与下层级存在冲突,提示确认,用 confirm=1 重新请求
+	Confirm int8 `json:"confirm" form:"confirm"`
+	// 发布描述,只在 req_type=SaveAndPublish 时有效
+	Description  string      `json:"description" form:"description"`
+	ConfFileInfo ConfFileDef `json:"conf_file_info" form:"conf_file_info"`
+	// 新建配置文件,第一次保存返回 file_id, 后续保存/发布 需传入 file_id
+	FileID uint64 `json:"file_id" form:"file_id"`
+	// 如果revision为空,表示第一次保存。每次 update 操作都会返回 revision,确保在这一轮编辑操作下都是操作这个revision
+	// 已发布的 revision 不能编辑
+	// Revision string `json:"revision" form:"revision"`
+	ConfNames []*UpsertConfNames `json:"conf_names" form:"conf_names"`
+}
+
+// UpsertConfFilePlatResp TODO
+type UpsertConfFilePlatResp struct {
+	BaseConfFileDef
+	// 新建配置文件,第一次保存返回 file_id, 后续保存/发布 需传入 file_id
+
+	FileID uint64 `json:"file_id"`
+	// 编辑配置文件,仅保存时不会产生 revision,保存并发布时才返回
+	Revision    string `json:"revision"`
+	IsPublished int8   `json:"is_published"`
+}
+
+// Validate TODO
+func (f *UpsertConfFilePlatReq) Validate() error {
+	if err := validate.GoValidateStruct(*f, true); err != nil {
+		return err
+	}
+	for _, c := range f.ConfNames {
+		if err := validate.GoValidateStruct(*c, true); err != nil {
+			return err
+		}
+		valueTypeSub := validate.ValueTypeDef{ValueType: c.ValueType, ValueTypeSub: c.ValueTypeSub}
+		if err := valueTypeSub.Validate(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/api/config_version.go b/dbm-services/common/db-config/internal/api/config_version.go
new file mode 100644
index 0000000000..5182b3b499
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/config_version.go
@@ -0,0 +1,62 @@
+package api
+
+// GetVersionedDetailReq TODO
+type GetVersionedDetailReq struct {
+	BKBizIDDef
+	BaseConfFileDef
+	BaseLevelDef
+	// 当 revision = "v_latest" 时,会返回当前最新的版本
+	Revision string `json:"revision" form:"revision" validate:"required" example:"v_20220309215824"`
+	RespFormatDef
+}
+
+// GetVersionedDetailResp TODO
+type GetVersionedDetailResp struct {
+	ID uint64 `json:"id"`
+	// 版本号
+	Revision string `json:"revision"`
+	// Content     interface{} `json:"content"`
+	ContentStr  string `json:"content_str"`
+	IsPublished int8   `json:"is_published"`
+	// 上一个版本好
+	PreRevision string `json:"pre_revision"`
+	// 相对上一个版本 影响行数
+	RowsAffected int    `json:"rows_affected"`
+	Description  string `json:"description"`
+	// 发布人
+	CreatedBy string `json:"created_by"`
+	// 发布时间
+	CreatedAt string `json:"created_at"`
+	// 配置项,根据 format 会有不同的格式
+	Configs map[string]interface{} `json:"configs"`
+	// 与上一个版本的差异
+	ConfigsDiff map[string]interface{} `json:"configs_diff"`
+}
+
+// PublishConfigFileReq TODO
+type PublishConfigFileReq struct {
+	BaseConfigNode
+	// the version you want to publish
+	Revision string `json:"revision" form:"revision" validate:"required" example:"v_20220309161928"`
+	// patch will overwrite conf_value to versioned config_file. it's a key-value dict
+	Patch map[string]string `json:"patch" form:"patch"`
+} // @name PublishConfigFileReq
+
+// ListConfigVersionsReq list config file versions
+type ListConfigVersionsReq struct {
+	BKBizIDDef
+	BaseConfFileDef
+	BaseLevelDef
+} // @name ListConfigVersionsReq
+
+// ListConfigVersionsResp TODO
+type ListConfigVersionsResp struct {
+	BKBizID   string `json:"bk_biz_id"`
+	Namespace string `json:"namespace"`
+	ConfFile  string `json:"conf_file"`
+	// 版本列表,格式 [{"revision":"v1", "rows_affected":1},{"revision":"v2", "rows_affected":2}]
+	Versions []map[string]interface{} `json:"versions"`
+	// version published. empty when published version is not in versions
+	VersionLatest string `json:"published"`
+	BaseLevelDef
+} // @name ListConfigVersionsResp
diff --git a/dbm-services/common/db-config/internal/api/dbha.go b/dbm-services/common/db-config/internal/api/dbha.go
new file mode 100644
index 0000000000..b3bb717df7
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/dbha.go
@@ -0,0 +1,34 @@
+package api
+
+import "bk-dbconfig/pkg/validate"
+
+// BatchGetConfigItemReq TODO
+// 批量获取多个对象的某一配置项
+type BatchGetConfigItemReq struct {
+	BaseConfFileDef
+	LevelName string `json:"level_name" label:"level" form:"level_name" validate:"required,enums" enums:"instance,cluster,module,app"`
+	// 批量对象,比如 ["1.1.1.1:6379", ""2.2.2.2:6379""]
+	LevelValues []string `json:"level_values" form:"level_values"`
+	// 指定要查询的 conf_name,目前仅支持一个
+	ConfName  string `json:"conf_name" form:"conf_name" example:"requirepass"`
+	confNames []string
+} // @name BatchGetConfigItemReq
+
+// BatchGetConfigItemResp TODO
+type BatchGetConfigItemResp struct {
+	BaseConfFileDef
+	LevelName string `json:"level_name" example:"instance"`
+	ConfName  string `json:"conf_name" example:"requirepass"`
+	// content is a {level_value: conf_value} dict like {"1.1.1.1:6379":"xxx", "2.2.2.2:6379":"yyy"}
+	// content is a {level_value: {conf_name:conf_value})
+	Content map[string]map[string]string `json:"content"`
+} // @name BatchGetConfigItemResp
+
+// Validate TODO
+func (v *BatchGetConfigItemReq) Validate() error {
+
+	if err := validate.GoValidateStruct(*v, true); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/api/simple_config.go b/dbm-services/common/db-config/internal/api/simple_config.go
new file mode 100644
index 0000000000..3eceef8cda
--- /dev/null
+++ b/dbm-services/common/db-config/internal/api/simple_config.go
@@ -0,0 +1,171 @@
+package api
+
+import (
+	"bk-dbconfig/pkg/constvar"
+
+	"github.com/pkg/errors"
+)
+
+// CreateResponseConfig TODO
+type CreateResponseConfig struct {
+}
+
+// ConfigQueryReq2 TODO
+type ConfigQueryReq2 struct {
+	BKBizID   string `json:"bk_biz_id" form:"bk_biz_id"`
+	Namespace string `json:"namespace" form:"namespace"`
+	Module    string `json:"module" form:"module"`
+	Cluster   string `json:"cluster" form:"cluster"`
+	ConfType  string `json:"conf_type" form:"conf_type"`
+	ConfFile  string `json:"conf_file" form:"conf_file"`
+	ConfName  string `json:"conf_name" form:"conf_name"`
+	ConfValue string `json:"conf_value" form:"conf_value"`
+
+	InheritFrom string `json:"inherit_from" form:"inherit_from"`
+	Format      string `json:"format" form:"format"`
+	View        string `json:"view" form:"view"`
+}
+
+// ConfigQueryResp TODO
+type ConfigQueryResp struct {
+	BKBizID     string                       `json:"bk_biz_id"`
+	Namespace   string                       `json:"namespace"`
+	ConfType    string                       `json:"conf_type"`
+	ConfFile    string                       `json:"conf_file"`
+	Module      string                       `json:"module"`
+	Cluster     string                       `json:"cluster"`
+	ConfValues  map[string]map[string]string `json:"conf_values"`
+	ExtraInfo   string                       `json:"extra_info"`
+	Description string                       `json:"description"`
+	FlagDisable int8                         `json:"flag_disable"`
+	TimeCreated string                       `json:"time_created"`
+	TimeUpdated string                       `json:"time_updated"`
+}
+
+// QueryReqUserConfig TODO
+type QueryReqUserConfig struct {
+	BKBizID  string `json:"bk_biz_id"`
+	Username string `json:"username"`
+}
+
+// PlatConfigCloneReq TODO
+type PlatConfigCloneReq struct {
+	BKBizID   string `json:"bk_biz_id"`
+	Namespace string `json:"namespace"`
+	ConfType  string `json:"conf_type"`
+	ConfFile  string `json:"conf_file"`
+}
+
+// SimpleConfigQueryReq TODO
+// 所有创建该 Req 的地方,需要调用 Validate
+type SimpleConfigQueryReq struct {
+	BaseConfigNode
+
+	ConfName  string `json:"conf_name" form:"conf_name"`
+	ConfValue string `json:"conf_value" form:"conf_value"`
+	Module    string `json:"module" form:"module"`
+	Cluster   string `json:"cluster" form:"cluster"`
+
+	InheritFrom string `json:"inherit_from" form:"inherit_from"`
+	Format      string `json:"format" form:"format"`
+	View        string `json:"view" form:"view"`
+
+	Description string `json:"description" form:"description"`
+	CreatedAt   string `json:"created_at" form:"created_at"`
+	UpdatedAt   string `json:"updated_at" form:"updated_at"`
+	CreatedBy   string `json:"created_by" form:"created_by"`
+	UpdatedBy   string `json:"updated_by" form:"updated_by"`
+
+	Revision string `json:"revision" form:"revision"`
+
+	UpLevelInfo
+
+	// 是否是生成配置文件
+	Generate bool
+	// 是否是解密
+	Decrypt bool
+}
+
+// Validate TODO
+func (v *SimpleConfigQueryReq) Validate() error {
+	v.SetLevelNameValue()
+
+	if v.BKBizID == "" || v.ConfType == "" || v.Namespace == "" {
+		return errors.New("namespace,conf_type,bk_biz_id can not be empty")
+	}
+	if v.LevelName == constvar.LevelApp && v.BKBizID != constvar.BKBizIDForPlat && v.LevelValue != v.BKBizID {
+		return errors.New("level_name=bk_biz_id should have bk_biz_id=level_value")
+	}
+	if (v.BKBizID == constvar.BKBizIDForPlat && v.LevelName != constvar.LevelPlat) ||
+		(v.BKBizID != constvar.BKBizIDForPlat && v.LevelName == constvar.LevelPlat) {
+		return errors.New("bk_biz_id=0 should have level_name=plat")
+	}
+	/*
+	   // todo 暂不校验 level_info 里面的 level_name 是否合法
+	   if err := v.UpLevelInfo.Validate(v.LevelName); err != nil {
+	       return err
+	   } else if v.LevelName == constvar.LevelCluster && v.Module == "" {
+	       v.Module, _ = v.UpLevelInfo.LevelInfo[constvar.LevelModule]
+	   } else if v.LevelName == constvar.LevelInstance {
+	       if v.Module == "" {
+	           v.Module, _ = v.UpLevelInfo.LevelInfo[constvar.LevelModule]
+	       }
+	       if v.Cluster == "" {
+	           v.Cluster, _ = v.UpLevelInfo.LevelInfo[constvar.LevelCluster]
+	       }
+	   }
+	*/
+	if v.View == constvar.ViewMerge {
+		if v.LevelName != "" {
+			v.View = v.View + "." + v.LevelName
+		} else if v.Cluster != "" {
+			v.View = v.View + ".cluster"
+		} else if v.Module != "" {
+			v.View = v.View + ".module"
+		}
+	}
+	return nil
+}
+
+// SetLevelNameValue TODO
+func (v *SimpleConfigQueryReq) SetLevelNameValue() {
+	if v.LevelName == "" {
+		if v.Cluster != "" {
+			v.LevelName = constvar.LevelCluster
+			v.LevelValue = v.Cluster
+		} else if v.Module != "" {
+			v.LevelName = constvar.LevelModule
+			v.LevelValue = v.Module
+		} else if v.BKBizID == constvar.BKBizIDForPlat {
+			v.LevelName = constvar.LevelPlat
+			v.LevelValue = constvar.BKBizIDForPlat
+		}
+	} else {
+		if v.LevelName == constvar.LevelCluster {
+			v.Cluster = v.LevelValue
+		} else if v.LevelName == constvar.LevelModule {
+			v.Module = v.LevelValue
+		} else if v.LevelName == constvar.LevelApp && v.BKBizID == "" {
+			v.BKBizID = v.LevelValue
+		}
+	}
+	if v.LevelInfo != nil {
+		for upLevelName, upLevelValue := range v.LevelInfo {
+			if upLevelName == constvar.LevelModule {
+				v.Module = upLevelValue
+			} else if upLevelName == constvar.LevelCluster {
+				v.Cluster = upLevelValue
+			}
+		}
+	}
+	/*
+	   if !util.MapHasElement(v.LevelInfo, constvar.LevelApp) {
+	       v.LevelInfo[constvar.LevelApp] = v.BKBizID
+	   }
+
+	       if !util.MapHasElement(v.LevelInfo, constvar.LevelModule) {
+	           v.LevelInfo[constvar.LevelModule] = v.Module
+	       }
+
+	*/
+}
diff --git a/dbm-services/common/db-config/internal/handler/handler.go b/dbm-services/common/db-config/internal/handler/handler.go
new file mode 100644
index 0000000000..4e44891720
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/handler.go
@@ -0,0 +1,44 @@
+// Package handler TODO
+package handler
+
+import (
+	"bk-dbconfig/internal/pkg/errno"
+	"bk-dbconfig/pkg/core/logger"
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+// Response TODO
+type Response struct {
+	Code    int         `json:"code"`
+	Message string      `json:"message"`
+	Data    interface{} `json:"data"`
+}
+
+// Response2 TODO
+type Response2 struct {
+	Code    int    `json:"code"`
+	Message string `json:"message"`
+	Data    string `json:"data"`
+}
+
+// SendResponse TODO
+func SendResponse(ctx *gin.Context, err error, data interface{}) {
+	code, message := errno.DecodeErr(err)
+	req := fmt.Sprintf("url:%s params:%+v", ctx.Request.RequestURI, ctx.Params)
+	data2, _ := json.Marshal(data)
+	logger.Info("req:%s resp: %+v", req, Response2{
+		Code:    code,
+		Message: message,
+		Data:    string(data2),
+	})
+	// always return http.StatusOK
+	ctx.JSON(http.StatusOK, Response{
+		Code:    code,
+		Message: message,
+		Data:    data,
+	})
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/batchget.go b/dbm-services/common/db-config/internal/handler/simple/batchget.go
new file mode 100644
index 0000000000..f7f3f85d84
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/batchget.go
@@ -0,0 +1,80 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/service/dbha"
+	"bk-dbconfig/internal/service/simpleconfig"
+
+	"github.com/gin-gonic/gin"
+)
+
+// BatchGetConfigOneItem godoc
+//
+// @Summary      批量获取多个对象的某一配置项
+// @Description  批量获取多个对象的某一配置项,不会继承
+// @Tags         config_item
+// @Accept      json
+// @Produce      json
+// @Param        body body     api.BatchGetConfigItemReq  true  "BatchGetConfigItemReq"
+// @Success      200  {object}  api.BatchGetConfigItemResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confitem/batchget [post]
+func (cf *Config) BatchGetConfigOneItem(ctx *gin.Context) {
+	var r api.BatchGetConfigItemReq
+	var err error
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if resp, err := dbha.BatchGetConfigItem(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+		return
+	}
+}
+
+// BatchGetConfigItemMore godoc
+//
+// @Summary      批量获取多个对象的多个配置项
+// @Description  批量获取多个对象的多个配置项,不会继承
+// @Tags         config_item
+// @Accept      json
+// @Produce      json
+// @Param        body body     api.BatchGetConfigItemReq  true  "BatchGetConfigItemReq"
+// @Success      200  {object}  api.BatchGetConfigItemResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confitem/batchgetmore [post]
+func (cf *Config) BatchGetConfigItemMore(ctx *gin.Context) {
+	var r api.BatchGetConfigItemReq
+	var err error
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if resp, err := dbha.BatchGetConfigItem(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+		return
+	}
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/config_apply.go b/dbm-services/common/db-config/internal/handler/simple/config_apply.go
new file mode 100644
index 0000000000..6611d6cc37
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/config_apply.go
@@ -0,0 +1,210 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/internal/service/simpleconfig"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+
+	"github.com/gin-gonic/gin"
+)
+
+// VersionApplyInfo godoc
+//
+// @Summary      获取该目标 已发布且待应用 的配置内容
+// @Description  要应用的版本,必须已发布状态
+// @Tags         config_version
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.ApplyConfigInfoReq  true  "ApplyConfigInfoReq"
+// @Success      200  {object}  api.ApplyConfigInfoResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/applyinfo [post]
+func (cf *Config) VersionApplyInfo(ctx *gin.Context) {
+	var r api.ApplyConfigInfoReq
+	var resp *api.ApplyConfigInfoResp
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	// opUser := ctx.GetHeader(constvar.UserNameHeader)
+	if resp, err = simpleconfig.GetConfigsToApply(model.DB.Self, r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+	}
+}
+
+// VersionApplyStat godoc
+//
+// @Summary      配置已应用,更新 version 状态
+// @Description  该版本配置已全部应用
+// @Tags         config_version
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.ApplyConfigReq  true  "ApplyConfigInfoReq"
+// @Success      200  {object}  api.HTTPOkNilResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/applied [post]
+func (cf *Config) VersionApplyStat(ctx *gin.Context) {
+	var r api.ApplyConfigReq
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	// opUser := ctx.GetHeader(constvar.UserNameHeader)
+	verObj := model.ConfigVersionedModel{
+		BKBizID:    r.BKBizID,
+		Namespace:  r.Namespace,
+		ConfType:   r.ConfType,
+		ConfFile:   r.ConfFile,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+		Revision:   r.RevisionApplied,
+	}
+	if err = verObj.VersionApplyStatus(model.DB.Self); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, nil)
+	}
+}
+
+// VersionStat godoc
+//
+// @Summary      批量查看已发布版本的状态
+// @Description  主要查看已发布版本是否已应用
+// @Tags         config_version
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.VersionStatReq  true  "VersionStatReq"
+// @Success      200  {object}  api.VersionStatResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/status [post]
+func (cf *Config) VersionStat(ctx *gin.Context) {
+	var r api.VersionStatReq
+	var resp *api.VersionStatResp
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	// opUser := ctx.GetHeader(constvar.UserNameHeader)
+	if resp, err = simpleconfig.GetVersionStat(r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+	}
+}
+
+// VersionApply godoc
+//
+// @Summary      应用 level_config 配置
+// @Description  给所有下级发布配置(但不应用)
+// @Tags         config_version
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.VersionApplyReq  true  "ApplyConfigInfoReq"
+// @Success      200  {object}  api.HTTPOkNilResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/applylevel [post]
+func (cf *Config) VersionApply(ctx *gin.Context) {
+	var r api.VersionApplyReq
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	// opUser := ctx.GetHeader(constvar.UserNameHeader)
+	publish := simpleconfig.PublishConfig{}
+	if err = publish.ApplyVersionLevelNode(model.DB.Self, &r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, nil)
+	}
+}
+
+// ItemApply godoc
+//
+// @Summary      修改待应用配置为已应用状态
+// @Description  只针对 versioned_config
+// @Tags         config_version
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.ConfItemApplyReq  true  "ApplyConfigInfoReq"
+// @Success      200  {object}  api.HTTPOkNilResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/applyitem [post]
+func (cf *Config) ItemApply(ctx *gin.Context) {
+	var r api.ConfItemApplyReq
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	// opUser := ctx.GetHeader(constvar.UserNameHeader)
+	if err = simpleconfig.NodeTaskApplyItem(&r); err != nil {
+		// if err := verObj.ApplyConfig(model.DB.Self); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, nil)
+	}
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/config_file.go b/dbm-services/common/db-config/internal/handler/simple/config_file.go
new file mode 100644
index 0000000000..26c8defac7
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/config_file.go
@@ -0,0 +1,43 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/service/simpleconfig"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/validate"
+
+	"github.com/gin-gonic/gin"
+)
+
+// ListConfigFiles godoc
+//
+// @Summary      查询配置文件列表
+// @Description  查询配置文件模板列表。只有平台和业务才有配置文件列表
+// @Description  返回的 updated_by 代表操作人
+// @Tags         plat_config
+// @Produce      json
+// @Param        body query     api.ListConfFileReq  true  "query"
+// @Success      200  {object}  []api.ListConfFileResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/conffile/list [get]
+func (cf *Config) ListConfigFiles(ctx *gin.Context) {
+	var r api.ListConfFileReq
+	if err := ctx.BindQuery(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err := validate.GoValidateStruct(r, true); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if r.LevelValue == "" && r.LevelName == constvar.LevelApp {
+		r.LevelValue = r.BKBizID
+	}
+	resp, err := simpleconfig.ListConfigFiles(&r)
+	if err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, err, resp)
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/config_item.go b/dbm-services/common/db-config/internal/handler/simple/config_item.go
new file mode 100644
index 0000000000..a147cc7b78
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/config_item.go
@@ -0,0 +1,216 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/service/simpleconfig"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"fmt"
+
+	"github.com/gin-gonic/gin"
+)
+
+// MergeAndGetConfigItems godoc
+//
+// @Summary      获取多个配置文件配置项列表
+// @Description  根据业务/模块/集群的信息,获取某个配置文件的配置项。一般用户前端请求、再编辑的场景,后端服务直接获取配置文件使用 /version/generate 接口
+// @Description     conf_file 可以是,号分隔的多个文件名,返回结果是一个按照配置文件名组合的一个 list
+// @Description  需要指定返回格式 format, 可选值 map, list.
+// @Description    map 格式会丢弃 conf_item 的其它信息,只保留 conf_name=conf_value, 一般用于后台服务
+// @Description    list 格式会保留 conf_items 的其它信息,conf_name=conf_item,一般用于前端展示
+// @Description  获取cluster级别配置时,需要提供 level_info:{"module":"xxx"} 模块信息
+// @Tags         config_item
+// @Accept      json
+// @Produce      json
+// @Param        body body     api.GetConfigItemsReq  true  "GetConfigItemsReq"
+// @Success      200  {object}  []api.GetConfigItemsResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confitem/query [post]
+func (cf *Config) MergeAndGetConfigItems(ctx *gin.Context) {
+	var r api.GetConfigItemsReq
+	var resp []*api.GetConfigItemsResp
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	levelNode := api.BaseConfigNode{}
+	levelNode.Set(r.BKBizID, r.Namespace, r.ConfType, r.ConfFile, r.LevelName, r.LevelValue)
+	var r2 = &api.SimpleConfigQueryReq{
+		BaseConfigNode: levelNode,
+		Format:         r.Format,
+		View:           constvar.ViewMerge,
+		InheritFrom:    "0",
+		ConfName:       r.ConfName,
+		UpLevelInfo:    r.UpLevelInfo,
+	}
+	if err = r2.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	confFiles := util.SplitAnyRuneTrim(r.ConfFile, ",")
+	if resp, err = simpleconfig.GetConfigItemsForFiles(r2, confFiles); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else if len(resp) > 0 {
+		handler.SendResponse(ctx, nil, resp)
+		return
+	} else {
+		handler.SendResponse(ctx, fmt.Errorf("no result"), resp)
+	}
+}
+
+// MergeAndGetConfigItemsOne godoc
+//
+// @Summary      获取一个配置文件配置项列表
+// @Description  根据业务/模块/集群的信息,获取某个配置文件的配置项。一般用户前端请求、再编辑的场景,后端服务直接获取配置文件使用 /version/generate 接口
+// @Description     注:与`/confitem/query` 接口使用相同,但该`/confitem/queryone` 只接受一个 conf_file,返回的是一个map
+// @Description  需要指定返回格式 format, 可选值 map, list.
+// @Description    map 格式会丢弃 conf_item 的其它信息,只保留 conf_name=conf_value, 一般用于后台服务
+// @Description    list 格式会保留 conf_items 的其它信息,conf_name=conf_item,一般用于前端展示
+// @Description  获取cluster级别配置时,需要提供 level_info:{"module":"xxx"} 模块信息
+// @Tags         config_item
+// @Accept      json
+// @Produce      json
+// @Param        body body     api.GetConfigItemsReq  true  "GetConfigItemsReq"
+// @Success      200  {object}  api.GetConfigItemsResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confitem/queryone [post]
+func (cf *Config) MergeAndGetConfigItemsOne(ctx *gin.Context) {
+	var r api.GetConfigItemsReq
+	var err error
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	levelNode := api.BaseConfigNode{}
+	levelNode.Set(r.BKBizID, r.Namespace, r.ConfType, r.ConfType, r.LevelName, r.LevelValue)
+	var r2 = &api.SimpleConfigQueryReq{
+		BaseConfigNode: levelNode,
+		Format:         r.Format,
+		View:           constvar.ViewMerge,
+		InheritFrom:    "0",
+		ConfName:       r.ConfName,
+		UpLevelInfo:    r.UpLevelInfo,
+	}
+	if err := r2.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+
+	if resp, err := simpleconfig.QueryConfigItems(r2, true); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+		return
+	}
+}
+
+// UpdateConfigFileItems godoc
+//
+// @Summary      编辑发布层级配置
+// @Description  编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分
+// @Description  例1: level_name=app, level_value=testapp 表示修改业务 bk_biz_id=testapp 的配置
+// @Description  例2: level_name=module, level_value=account 表示某业务 bk_biz_id 的模块 module=account 的配置
+// @Description  HTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员
+// @Description  获取cluster级别配置时,需要提供 level_info:{"module":"xxx"} 模块信息
+// @Description  只修改配置项,不修改配置文件描述时,conf_file_info 只需要传 namespace, conf_type, conf_file
+// @Tags         config_item
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.UpsertConfItemsReq  true  "UpsertConfItemsReq"
+// @Success      200  {object}  api.UpsertConfItemsResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confitem/upsert [post]
+func (cf *Config) UpdateConfigFileItems(ctx *gin.Context) {
+	var r api.UpsertConfItemsReq
+	var resp *api.UpsertConfItemsResp
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType,
+		r.ConfFileInfo.ConfFile, r.LevelName, 1); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	opUser := api.GetHeaderUsername(ctx.GetHeader(constvar.BKApiAuthorization))
+	if resp, err = simpleconfig.UpdateConfigFileItems(&r, opUser); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+	}
+}
+
+// SaveConfigFileItems godoc
+//
+// @Summary      编辑配置(无版本概念)
+// @Description  编辑层级配置,层级包括业务app、模块module、集群cluster,需要指定修改哪个级别的配置,通过 level_name, level_value 来区分
+// @Description  针对编辑的配置类型 conf_type 无版本化的概念,即保存生效,无需发布
+// @Description  保存 cluster级别配置时,需要提供 level_info:{"module":"xxx"} 模块信息
+// @Tags         config_item
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.SaveConfItemsReq  true  "SaveConfItemsReq"
+// @Success      200  {object}  api.UpsertConfItemsResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confitem/save [post]
+func (cf *Config) SaveConfigFileItems(ctx *gin.Context) {
+	var r api.SaveConfItemsReq
+	var resp *api.UpsertConfItemsResp
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType,
+		r.ConfFileInfo.ConfFile, r.LevelName, 0); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	r2 := api.UpsertConfItemsReq{
+		RequestType:      api.RequestType{ReqType: constvar.MethodSave},
+		SaveConfItemsReq: r,
+	}
+	opUser := api.GetHeaderUsername(ctx.GetHeader(constvar.BKApiAuthorization))
+	if resp, err = simpleconfig.UpdateConfigFileItems(&r2, opUser); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+	}
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/config_meta.go b/dbm-services/common/db-config/internal/handler/simple/config_meta.go
new file mode 100644
index 0000000000..3f9f4cb4d3
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/config_meta.go
@@ -0,0 +1,48 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/service/simpleconfig"
+
+	"github.com/gin-gonic/gin"
+)
+
+// QueryConfigTypeNames godoc
+//
+// @Summary      查询预定义的配置名列表
+// @Description  查询某个配置类型/配置文件的配置名列表,会排除 已锁定的平台配置
+// @Tags         config_meta
+// @Produce      json
+// @Param        body query     api.QueryConfigNamesReq  true  "query"
+// @Success      200  {object}  api.QueryConfigNamesResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confname/list [get]
+func (cf *Config) QueryConfigTypeNames(ctx *gin.Context) {
+	var r api.QueryConfigNamesReq
+	if err := ctx.BindQuery(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	resp, err := simpleconfig.QueryConfigNames(&r, false)
+	if err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, err, resp)
+}
+
+// QueryConfigTypeInfo TODO
+func (cf *Config) QueryConfigTypeInfo(ctx *gin.Context) {
+	var r api.QueryConfigTypeReq
+	if err := ctx.BindQuery(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	resp, err := simpleconfig.QueryConfigTypeInfo(&r)
+	if err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, err, resp)
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/config_plat.go b/dbm-services/common/db-config/internal/handler/simple/config_plat.go
new file mode 100644
index 0000000000..f28f0b6656
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/config_plat.go
@@ -0,0 +1,125 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/service/simpleconfig"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+
+	"github.com/gin-gonic/gin"
+)
+
+// UpsertConfigFilePlat godoc
+//
+// @Summary      新增平台级配置文件
+// @Description  新增平台级配置文件,定义允许的配置名。指定 req_type 为 `SaveOnly` 仅保存, `SaveAndPublish` 保存并发布。保存并发布 也必须提供全量,而不能是前面保存基础上的增量
+// @Description  req_type=`SaveOnly` 已废弃
+// @Description  第一次保存时,会返回 `file_id`,下次 保存/发布 需传入 `file_id`
+// @Description  namespace,conf_type,conf_file 唯一确定一个配置文件,不同DB版本信息体现在 conf_file 里 (如MySQL-5.7), namespace_info 可以存前端传入的 数据库版本,只用于在展示
+// @Description  HTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员
+// @Tags         plat_config
+// @Accept       json
+// @Produce      json
+// @Param        body body      api.UpsertConfFilePlatReq  true  "ConfName for ConfType"
+// @Success      200  {object}  api.UpsertConfFilePlatResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/conffile/add [post]
+func (cf *Config) UpsertConfigFilePlat(ctx *gin.Context) {
+	var r api.UpsertConfFilePlatReq
+	if err := ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err := r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err := simpleconfig.CheckValidConfType(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType,
+		r.ConfFileInfo.ConfFile, "", 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	opUser := api.GetHeaderUsername(ctx.GetHeader(constvar.BKApiAuthorization))
+	if resp, err := simpleconfig.UpsertConfigFilePlat(&r, "new", opUser); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+	}
+
+}
+
+// UpdateConfigFilePlat godoc
+//
+// @Summary      编辑平台级配置文件
+// @Description  编辑平台级配置文件。指定 req_type 为 `SaveOnly` 仅保存, `SaveAndPublish` 保存并发布
+// @Description  HTTP Header 指定 `X-Bkapi-User-Name` 请求的操作人员
+// @Description  编辑平台配置时,如果设置 flag_disable=1 时,该配置不会显示在平台配置项列表,相当于管理 所有允许的配置项列表
+// @Description 保存时会校验输入的 value_default, value_type, value_allowed
+// @Description   1. value_type 目前允许 STRING, INT, FLOAT, NUMBER
+// @Description   2. value_type_sub 允许 ENUM, ENUMS, RANGE, STRING, JSON, REGEX(一种特殊的STRING,会验证 value_default 是否满足 value_allowed 正则), BYTES(64m, 128k格式,会转换成bytes与 value_allowed的范围进行比较)
+// @Description   3. value_allowed 允许 枚举: 例如`0|1|2`, `ON|OFF` 格式, 范围: 例如`(0, 1000]`
+// @Tags         plat_config
+// @Accept       json
+// @Produce      json
+// @Param        body body      api.UpsertConfFilePlatReq  true  "ConfName for ConfType"
+// @Success      200  {object}  api.UpsertConfFilePlatResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/conffile/update [post]
+func (cf *Config) UpdateConfigFilePlat(ctx *gin.Context) {
+	var r api.UpsertConfFilePlatReq
+	var resp *api.UpsertConfFilePlatResp
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err = simpleconfig.CheckValidConfType(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType,
+		r.ConfFileInfo.ConfFile, "", 2); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	opUser := api.GetHeaderUsername(ctx.GetHeader(constvar.BKApiAuthorization))
+	if resp, err = simpleconfig.UpsertConfigFilePlat(&r, "edit", opUser); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	} else {
+		handler.SendResponse(ctx, nil, resp)
+	}
+}
+
+// QueryConfigTypeNamesPlat godoc
+//
+// @Summary      查询平台配置项列表
+// @Description  查询 平台配置 某个配置类型/配置文件的所有配置名列表
+// @Tags         plat_config
+// @Produce      json
+// @Param        body query     api.QueryConfigNamesReq  true  "query"
+// @Success      200  {object}  api.QueryConfigNamesResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/conffile/query [get]
+func (cf *Config) QueryConfigTypeNamesPlat(ctx *gin.Context) {
+	var r api.QueryConfigNamesReq
+	var resp *api.QueryConfigNamesResp
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	if err = ctx.BindQuery(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	resp, err = simpleconfig.QueryConfigNames(&r, true)
+	if err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, err, resp)
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/config_version.go b/dbm-services/common/db-config/internal/handler/simple/config_version.go
new file mode 100644
index 0000000000..8450963255
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/config_version.go
@@ -0,0 +1,263 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/internal/service/simpleconfig"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"bk-dbconfig/pkg/validate"
+	"fmt"
+	"math/rand"
+	"time"
+
+	"github.com/gin-gonic/gin"
+	"github.com/pkg/errors"
+)
+
+// GenerateConfigVersion godoc
+//
+// @Summary      生成并获取配置文件新版本
+// @Description  从现有配置项直接生成配置文件并返回,每次调用会生成一个新版本,可以选择是否直接发布。这个接口一般用户后台服务查询配置
+// @Description  修改配置并发布,使用 /confitem/upsert 接口
+// @Description  直接查询配置文件内容,使用 /confitem/query 接口
+// @Description  根据 `method` 生成方式不同,可以生成配置并存储 `GenerateAndSave`、生成配置并存储且发布`GenerateAndPublish`
+// @Description   使用 `GenerateAndSave` 方式需要进一步调用 PublishConfigFile 接口进行发布
+// @Tags         config_version
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.GenerateConfigReq  true  "Generate config file versioned"
+// @Success      200  {object}  api.GenerateConfigResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/generate [post]
+func (cf *Config) GenerateConfigVersion(ctx *gin.Context) {
+	var r api.GenerateConfigReq
+	var resp *api.GenerateConfigResp
+	var err error
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err := r.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err := simpleconfig.CheckValidConfType(r.Namespace, r.ConfType, r.ConfFile, r.LevelName, 1); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	opUser := api.GetHeaderUsername(ctx.GetHeader(constvar.BKApiAuthorization))
+	levelNode := api.BaseConfigNode{}
+	levelNode.Set(r.BKBizID, r.Namespace, r.ConfType, r.ConfFile, r.LevelName, r.LevelValue)
+	var r2 = &api.SimpleConfigQueryReq{
+		BaseConfigNode: levelNode,
+		Format:         r.Format,
+		View:           constvar.ViewMerge,
+		CreatedBy:      opUser,
+		InheritFrom:    constvar.BKBizIDForPlat,
+		UpLevelInfo:    r.UpLevelInfo,
+	}
+	if err = r2.Validate(); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if !model.IsConfigLevelEntityVersioned(r.Namespace, r.ConfType, r.ConfFile, r.LevelName) {
+		handler.SendResponse(ctx, errors.New("only entity level allow generate api"), nil)
+		return
+	}
+	r2.Decrypt = true
+	r2.Generate = true
+
+	v := &model.ConfigVersionedModel{
+		BKBizID:    r.BKBizID,
+		Namespace:  r.Namespace,
+		ConfType:   r.ConfType,
+		ConfFile:   r.ConfFile,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+	}
+	// 这里先判断是否存在已 applied(即 generated)
+	var exists bool
+	var expires = true
+	// 可能存在并发的问题,随机 sleep 0-2 s
+	time.Sleep(time.Duration(rand.Intn(2000)) * time.Millisecond)
+	if exists, err = v.ExistsAppliedVersion(model.DB.Self); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+
+	if exists {
+		logger.Info("generated ever: %+v", r)
+		// 之前generate过,判断是否过期
+		if vConfigs, err := v.GetVersionPublished(model.DB.Self); err == nil {
+			nowTime := time.Now()
+			createTime, _ := time.ParseInLocation(model.DBTimeFormat, vConfigs.Versioned.CreatedAt.String(), time.Local)
+			if nowTime.Sub(createTime).Seconds() < 10 { // 10s 内重复 generate 会直接返回 published
+				expires = false
+			}
+		}
+		if !expires { // generate 没有过期,直接从 tb_config_version 查询 published
+			logger.Info("level_node has applied versioned and un-expire, query configs instead of generate")
+			if resp, err = simpleconfig.QueryConfigItemsFromVersion(r2, false); err != nil {
+				handler.SendResponse(ctx, err, nil)
+				return
+			} else {
+				handler.SendResponse(ctx, nil, resp)
+				return
+			}
+		}
+	} else {
+		logger.Info("generate the first time: %+v", r)
+	}
+	// generated version 不存在,或者存在但已过期,都需进行 generate
+	if err := simpleconfig.SaveConfigFileNode(model.DB.Self, &levelNode, opUser, "generated", ""); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	// generate 有 3 种情况:
+	// 1. 第一次 generate, 且顺利完成
+	// 2. 多个请求并行generate,报重复错误的,直接读取
+	// 3. 之前 generate 已过期
+	// 还有一种极端情况,多个请求并行generate,但时间是错开不在 1s内,也能generate成功
+	if resp, err = simpleconfig.GenerateConfigFile(model.DB.Self, r2, r.Method, nil); err != nil {
+		if util.IsErrorString(err, "Error 1062: Duplicate entry") {
+			// 前面已经判断不存在,现在写入报重复,说明有其它请求 generate version 了。直接读取
+			logger.Info("level_node has applied versioned, query configs instead of generate")
+			if resp, err = simpleconfig.QueryConfigItemsFromVersion(r2, false); err != nil {
+				handler.SendResponse(ctx, err, nil)
+				return
+			} else {
+				handler.SendResponse(ctx, nil, resp)
+				return
+			}
+		} else {
+			handler.SendResponse(ctx, err, nil)
+			return
+		}
+	} else if len(resp.Content) > 0 {
+		handler.SendResponse(ctx, nil, resp)
+		return
+	} else {
+		handler.SendResponse(ctx, fmt.Errorf("no result"), resp)
+		return
+	}
+}
+
+// PublishConfigFile godoc
+//
+// @Summary      直接发布一个版本[废弃]
+// @Description  发布指定版本的配置文件,未发布状态的配置文件是不能使用的
+// @Description  发布操作会把已有 published 状态的配置文件下线;同一个 revision 版本的配置无法重复发布
+// @Description  发布时带上 patch 参数可以覆盖配置中心该版本的配置项(只有配置项值是`{{`开头的才能被覆盖)
+// @Tags         config_version
+// @Accept       json
+// @Produce      json
+// @Param        body body     api.PublishConfigFileReq  true  "Publish config file versioned"
+// @Success      200  {object}  api.HTTPOkNilResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/publish [post]
+func (cf *Config) PublishConfigFile(ctx *gin.Context) {
+	var r api.PublishConfigFileReq
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+	if err = ctx.BindJSON(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	opUser := api.GetHeaderUsername(ctx.GetHeader(constvar.BKApiAuthorization))
+	var v = model.ConfigVersionedModel{
+		BKBizID:    r.BKBizID,
+		Namespace:  r.Namespace,
+		ConfFile:   r.ConfFile,
+		ConfType:   r.ConfType,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+		Revision:   r.Revision,
+		CreatedBy:  opUser,
+	}
+	publishService := simpleconfig.PublishConfig{
+		Versioned:     &v,
+		Patch:         r.Patch,
+		ConfigsLocked: nil,
+		LevelNode: api.BaseConfigNode{
+			BKBizIDDef: api.BKBizIDDef{BKBizID: r.BKBizID},
+			BaseConfFileDef: api.BaseConfFileDef{
+				Namespace: r.Namespace,
+				ConfType:  r.ConfType,
+				ConfFile:  r.ConfFile,
+			},
+			BaseLevelDef: api.BaseLevelDef{
+				LevelName:  r.LevelName,
+				LevelValue: r.LevelValue,
+			},
+		},
+	}
+	if err = publishService.PublishAndApplyVersioned(model.DB.Self, false); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, nil, nil)
+	return
+}
+
+// GetVersionedDetail godoc
+//
+// @Summary      查询版本的详细信息
+// @Description  查询历史配置版本的详情,format 指定返回格式,revision 指定查询哪个版本(当 revision = "v_latest" 时,会返回当前最新的版本)
+// @Tags         config_version
+// @Produce      json
+// @Param        body query     api.GetVersionedDetailReq  true  "query"
+// @Success      200  {object}  api.GetVersionedDetailResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/detail [get]
+func (cf *Config) GetVersionedDetail(ctx *gin.Context) {
+	var r api.GetVersionedDetailReq
+	if err := ctx.BindQuery(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err := validate.GoValidateStruct(r, true); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if r.Format == "" {
+		r.Format = constvar.FormatList
+	}
+	resp, err := simpleconfig.GetVersionedDetail(&r)
+	if err != nil {
+		logger.Errorf("%+v", err)
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, err, resp)
+}
+
+// ListConfigFileVersions godoc
+//
+// @Summary      查询历史配置版本名列表
+// @Description  Get config file versions list
+// @Tags         config_version
+// @Produce      json
+// @Param        body query     api.ListConfigVersionsReq  true  "query"
+// @Success      200  {object}  api.ListConfigVersionsResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/version/list [get]
+func (cf *Config) ListConfigFileVersions(ctx *gin.Context) {
+	var r api.ListConfigVersionsReq
+	if err := ctx.BindQuery(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	if err := validate.GoValidateStruct(r, true); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	resp, err := simpleconfig.ListConfigFileVersions(&r)
+	if err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, err, resp)
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/dbha.go b/dbm-services/common/db-config/internal/handler/simple/dbha.go
new file mode 100644
index 0000000000..29b3418c68
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/dbha.go
@@ -0,0 +1,33 @@
+package simple
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/handler"
+	"bk-dbconfig/internal/service/simpleconfig"
+
+	"github.com/gin-gonic/gin"
+)
+
+// QueryAllValuesConfigName godoc
+//
+// @Summary      查询平台配置项列表
+// @Description  查询 平台配置 某个配置类型/配置文件的所有配置名列表
+// @Tags         plat_config
+// @Produce      json
+// @Param        body query     api.QueryConfigNamesReq  true  "query"
+// @Success      200  {object}  api.QueryConfigNamesResp
+// @Failure      400  {object}  api.HTTPClientErrResp
+// @Router       /bkconfig/v1/confitem/queryname [get]
+func (cf *Config) QueryAllValuesConfigName(ctx *gin.Context) {
+	var r api.QueryConfigNamesReq
+	if err := ctx.BindQuery(&r); err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	resp, err := simpleconfig.QueryConfigNames(&r, true)
+	if err != nil {
+		handler.SendResponse(ctx, err, nil)
+		return
+	}
+	handler.SendResponse(ctx, err, resp)
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/simple.go b/dbm-services/common/db-config/internal/handler/simple/simple.go
new file mode 100644
index 0000000000..34f34a63c3
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/simple.go
@@ -0,0 +1,50 @@
+// Package simple TODO
+package simple
+
+import (
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+// ConfigIfce TODO
+type ConfigIfce interface {
+	RegisterRoutes(group string, router *gin.Engine)
+	Routes()
+}
+
+// Config TODO
+type Config struct {
+}
+
+// Routes TODO
+func (cf *Config) Routes() []*gin.RouteInfo {
+	return []*gin.RouteInfo{
+		// config_file
+		{Method: http.MethodPost, Path: "/conffile/add", HandlerFunc: cf.UpsertConfigFilePlat},
+		{Method: http.MethodPost, Path: "/conffile/update", HandlerFunc: cf.UpdateConfigFilePlat},
+		{Method: http.MethodGet, Path: "/conffile/list", HandlerFunc: cf.ListConfigFiles},
+		{Method: http.MethodGet, Path: "/conffile/query", HandlerFunc: cf.QueryConfigTypeNamesPlat},
+
+		// config_version
+		{Method: http.MethodGet, Path: "/version/list", HandlerFunc: cf.ListConfigFileVersions},
+		{Method: http.MethodGet, Path: "/version/detail", HandlerFunc: cf.GetVersionedDetail},
+		{Method: http.MethodPost, Path: "/version/generate", HandlerFunc: cf.GenerateConfigVersion},
+		{Method: http.MethodPost, Path: "/version/publish", HandlerFunc: cf.PublishConfigFile},
+		{Method: http.MethodPost, Path: "/version/applyinfo", HandlerFunc: cf.VersionApplyInfo},
+		{Method: http.MethodPost, Path: "/version/applied", HandlerFunc: cf.VersionApplyStat},
+		{Method: http.MethodPost, Path: "/version/status", HandlerFunc: cf.VersionStat},
+		{Method: http.MethodPost, Path: "/version/applyitem", HandlerFunc: cf.ItemApply},
+
+		// config_item
+		{Method: http.MethodPost, Path: "/confitem/query", HandlerFunc: cf.MergeAndGetConfigItems},
+		{Method: http.MethodPost, Path: "/confitem/queryone", HandlerFunc: cf.MergeAndGetConfigItemsOne},
+		{Method: http.MethodPost, Path: "/confitem/upsert", HandlerFunc: cf.UpdateConfigFileItems},
+		{Method: http.MethodPost, Path: "/confitem/save", HandlerFunc: cf.SaveConfigFileItems},
+		{Method: http.MethodPost, Path: "/confitem/batchget", HandlerFunc: cf.BatchGetConfigOneItem},
+
+		// config_meta
+		{Method: http.MethodGet, Path: "/conftype/query", HandlerFunc: cf.QueryConfigTypeInfo},
+		{Method: http.MethodGet, Path: "/confname/list", HandlerFunc: cf.QueryConfigTypeNames},
+	}
+}
diff --git a/dbm-services/common/db-config/internal/handler/simple/simple_item.go b/dbm-services/common/db-config/internal/handler/simple/simple_item.go
new file mode 100644
index 0000000000..2c29f4c703
--- /dev/null
+++ b/dbm-services/common/db-config/internal/handler/simple/simple_item.go
@@ -0,0 +1 @@
+package simple
diff --git a/dbm-services/common/db-config/internal/pkg/cst/const.go b/dbm-services/common/db-config/internal/pkg/cst/const.go
new file mode 100644
index 0000000000..96c1bc2be2
--- /dev/null
+++ b/dbm-services/common/db-config/internal/pkg/cst/const.go
@@ -0,0 +1,75 @@
+package cst
+
+import (
+	"bk-dbconfig/pkg/constvar"
+)
+
+// ConfigLevelMap TODO
+var ConfigLevelMap = map[string]int{
+	constvar.LevelPlat: 10,
+	"app":              20,
+	"bk_biz_id":        20,
+	"module":           30,
+	"db_module_id":     30,
+	"cluster":          50,
+	"role":             60,
+	"host":             70,
+	"instance":         80,
+}
+
+const (
+	// Spider TODO
+	Spider = "tendbcluster" // TenDBCluster
+	// TenDBCluster TODO
+	TenDBCluster = "tendbcluster"
+	// TenDBHA TODO
+	TenDBHA = "tendbha" // tendbha
+	// TenDBSingle TODO
+	TenDBSingle = "tendbsingle" // tendbsingle
+)
+
+// var NamespaceAllowed = []string{TenDBCluster, TenDBHA, TenDBSingle}
+
+// GetConfigLevelMap TODO
+func GetConfigLevelMap(confTpye string) map[string]int {
+	return ConfigLevelMap
+}
+
+// GetConfigLevels TODO
+func GetConfigLevels(confTpye string) []string {
+	configLevelMap := GetConfigLevelMap(confTpye)
+
+	configLevels := make([]string, len(configLevelMap))
+	for k, _ := range configLevelMap {
+		configLevels = append(configLevels, k)
+	}
+	return configLevels
+}
+
+// GetConfigLevelsUp TODO
+func GetConfigLevelsUp(levelName string) []string {
+	configLevelMap := GetConfigLevelMap("")
+	levelPriority, _ := configLevelMap[levelName]
+
+	configLevels := make([]string, 0)
+	for k, v := range configLevelMap {
+		if v < levelPriority {
+			configLevels = append(configLevels, k)
+		}
+	}
+	return configLevels
+}
+
+// GetConfigLevelsDown TODO
+func GetConfigLevelsDown(levelName string) []string {
+	configLevelMap := GetConfigLevelMap("")
+	levelPriority, _ := configLevelMap[levelName]
+
+	configLevels := make([]string, 0)
+	for k, v := range configLevelMap {
+		if v > levelPriority {
+			configLevels = append(configLevels, k)
+		}
+	}
+	return configLevels
+}
diff --git a/dbm-services/common/db-config/internal/pkg/cst/cst.go b/dbm-services/common/db-config/internal/pkg/cst/cst.go
new file mode 100644
index 0000000000..f558488b06
--- /dev/null
+++ b/dbm-services/common/db-config/internal/pkg/cst/cst.go
@@ -0,0 +1,2 @@
+// Package cst TODO
+package cst
diff --git a/dbm-services/common/db-config/internal/pkg/cst/mysql.go b/dbm-services/common/db-config/internal/pkg/cst/mysql.go
new file mode 100644
index 0000000000..8551ef235f
--- /dev/null
+++ b/dbm-services/common/db-config/internal/pkg/cst/mysql.go
@@ -0,0 +1,138 @@
+package cst
+
+import "bk-dbconfig/pkg/constvar"
+
+// common const variable
+const (
+	Default = "default"
+	Master  = "master"
+	Slave   = "slave"
+
+	MySQLMaster        = "mysql_master"
+	MySQLLogdb         = "mysql_logdb"
+	MySQLSlave         = "mysql_slave"
+	MySQLMasterSlave   = "mysql_master&mysql_slave"
+	MySQLMasterOrSlave = "mysql_master|mysql_slave"
+	SpiderMaster       = "spider_master"
+	SpiderSlave        = "spider_slave"
+	ProxyMaster        = "proxy_master"
+	ProxyPairs         = "proxy_pairs"
+	// ProxySlaveBak 后面废弃掉
+	ProxySlaveBak = "proxy_slave_abandoned"
+
+	Logdb        = "mysql_logdb"
+	Tokudb       = "tokudb"
+	Proxy        = "Proxy"
+	MySQL        = "MySQL"
+	Unlimit      = "unlimit"
+	IDCCutLength = 2 // 前2个字为城市
+	Linux        = "linux"
+	Windows      = "windows"
+	New          = "new"
+	Null         = "NULL"
+	NO           = "NO"
+	// Dumper TODO
+	// tbinlogdumper
+	Dumper = "Dumper"
+)
+
+// isntance status
+const (
+	RUNNING      = "RUNNING"
+	UNAVAILABLE  = "UNAVAILABLE"
+	AVAIL        = "AVAIL"
+	LOCKED       = "LOCKED"
+	ALONE        = "ALONE"
+	UNIQ_LOCK    = "UNIQ_LOCK"
+	INITIALIZING = "INITIALIZING"
+)
+
+// proxy status
+const (
+	REFRESH_ONE = 1
+	REFRESH_TWO = 2
+)
+
+// mysql switch type
+const (
+	AutoSwitch = "AutoSwitch" // 0
+	HandSwitch = "HandSwitch" // 1
+	NotSwitch  = "NotSwitch"  // 9
+)
+
+// tb_role_config
+const (
+	ConfigOn  = "on"
+	ConfigOff = "off" // 这个不确定关闭就是 off
+)
+
+// DNS type
+const (
+	DNS_NOT_RELATED      = 0
+	DNS_DIRECT_RELATED   = 1
+	DNS_INDIRECT_RELATED = 2
+)
+
+// master
+const (
+	IsMaster    = 1
+	IsNotMaster = 0
+)
+
+// switch
+const (
+	SwitchOn  = "on"
+	SwitchOff = "off"
+)
+
+// disasterLevel
+const (
+	IDC        = "idc"
+	City       = "city"
+	DiffCampus = "DiffCampus"
+	SameCampus = "SameCampus"
+)
+
+// config related
+const (
+	// MiscAttribute 猜测 miscellaneous  混杂的;各色各样混在一起;多才多艺的
+	MiscAttribute                  = "misc"
+	MySQLVersion                   = "mysqlversion"
+	CheckDbdrEquipSwitch           = "check_dbdr_equip_switch"
+	CheckDbdrSvrType               = "check_dbdr_svr_type"
+	CheckDbdrRaidType              = "check_dbdr_raid_type"
+	CheckDbdrLinkNetdeviceIDSwitch = "check_dbdr_LinkNetdeviceId_switch"
+	PubBKBizID                     = constvar.LevelPlat
+	ConfigRole                     = "config"
+	GamedbRole                     = "gamedb"
+)
+
+// number
+const (
+	ZERO = 0
+	ONE  = 1
+)
+
+// LeastTokuVersion TODO
+const LeastTokuVersion = "TMySQL-2.1.0"
+
+// intall mysql check item
+const (
+	CheckTokudb  = "check tokudb"
+	CheckSuccess = "OK"
+	CheckFail    = "FAIL"
+	CheckPass    = "PASS"
+)
+
+const (
+	// OneChance TODO
+	OneChance = 1
+)
+
+// uninstallOption
+const (
+	CancelOwn           = 1
+	ClearAndBackup      = 2
+	ForceClearAndBackup = 3
+	ForceClearAll       = 4
+)
diff --git a/dbm-services/common/db-config/internal/pkg/errno/code.go b/dbm-services/common/db-config/internal/pkg/errno/code.go
new file mode 100644
index 0000000000..0697ca8ba4
--- /dev/null
+++ b/dbm-services/common/db-config/internal/pkg/errno/code.go
@@ -0,0 +1,77 @@
+package errno
+
+var (
+	// OK TODO
+	// Common errors
+	// OK = Errno{Code: 0, Message: ""}
+	OK = Errno{Code: 0, Message: "", CNMessage: ""}
+	// InternalServerError TODO
+	InternalServerError = Errno{Code: 10001, Message: "Internal server error", CNMessage: "服务器内部错误。"}
+	// ErrBind TODO
+	ErrBind = Errno{Code: 10002, Message: "Error occurred while binding the request body to the struct.",
+		CNMessage: "请求参数发生错误。"}
+
+	// ErrTypeAssertion TODO
+	ErrTypeAssertion = Errno{Code: 10040, Message: "Error occurred while doing type assertion."}
+	// ErrParameterRequired TODO
+	ErrParameterRequired = Errno{Code: 10050, Message: "Input paramter required"}
+	// ErrBKBizIDIsEmpty TODO
+	ErrBKBizIDIsEmpty = Errno{Code: 10200, Message: "BKBizID is empty!", CNMessage: "BKBizID 名字不能为空!"}
+
+	// ErrInputParameter TODO
+	ErrInputParameter = Errno{Code: 10201, Message: "input pramater error.", CNMessage: "输入参数错误"}
+
+	// ErrInvokeAPI TODO
+	// call other service error
+	ErrInvokeAPI = Errno{Code: 15000, Message: "Error occurred while invoking API", CNMessage: "调用 API 发生错误!"}
+	// ErrInvokeSaveBillAPI TODO
+	ErrInvokeSaveBillAPI = Errno{Code: 15003, Message: "Error occurred while invoking SaveBill API",
+		CNMessage: "调用 SaveBill API 发生错误!"}
+	// GreaterThanOneConfigValue TODO
+	GreaterThanOneConfigValue = Errno{Code: 15010, Message: "number of  gcs config value  is greater than 1",
+		CNMessage: "获取 GCS config 的 value 个数超过 1 !"}
+	// InvalidHttpStatusCode TODO
+	InvalidHttpStatusCode = Errno{Code: 15015, Message: "Invalid http status code", CNMessage: "无效的 http 状态码!"}
+
+	// ErrRecordNotFound TODO
+	// model operation errors
+	ErrRecordNotFound = Errno{Code: 50202, Message: "There is no records in gcs database.", CNMessage: "GCS 数据库未找到对应的记录!"}
+
+	// ErrJSONMarshal TODO
+	// data handle error
+	ErrJSONMarshal = Errno{Code: 50302, Message: "Error occurred while marshaling the data to JSON.",
+		CNMessage: "Error occurred while marshaling the data to JSON."}
+	// ErrReadEntity TODO
+	ErrReadEntity = Errno{Code: 50303, Message: "Error occurred while parsing the request parameter.",
+		CNMessage: "Error occurred while parsing the request parameter."}
+	// ErrJSONUnmarshal TODO
+	ErrJSONUnmarshal = Errno{Code: 50304, Message: "Error occurred while Unmarshaling the JSON to data model.",
+		CNMessage: "Error occurred while Unmarshaling the JSON to data model."}
+
+	// ErrDuplicateItem TODO
+	ErrDuplicateItem = Errno{Code: 10000, Message: "duplicate conf_name", CNMessage: "配置项重复"}
+	// ErrNamespaceType TODO
+	ErrNamespaceType = Errno{Code: 10000, Message: "invalid namespace or conf_type or conf_file",
+		CNMessage: "namespace,conf_type,conf_file 参数错误"}
+	// ErrUnversionable TODO
+	ErrUnversionable = Errno{Code: 10000, Message: "this namespace conf_type is unVersion-able",
+		CNMessage: "该 namespace conf_type 不支持版本化"}
+	// ErrVersionable TODO
+	ErrVersionable = Errno{Code: 10000,
+		Message:   "version-able config file should use SaveOnly/SaveAndPublish api",
+		CNMessage: "可版本化配置需使用 SaveOnly/SaveAndPublish 接口"}
+	// ErrConflictWithLowerConfigLevel TODO
+	ErrConflictWithLowerConfigLevel = Errno{Code: 8705002, Message: "has conflicts with lower config level",
+		CNMessage: "与下层级配置存在冲突,会覆盖下级配置"}
+	// ErrConfigLevel TODO
+	ErrConfigLevel = Errno{Code: 10002, Message: "level should not be the same", CNMessage: "出现重复level_name"}
+	// ErrNodeNotFound TODO
+	ErrNodeNotFound = Errno{Code: 10005, Message: "level node_id not found", CNMessage: "没有找到 node_id"}
+	// ErrConfFile TODO
+	ErrConfFile = Errno{Code: 10005, Message: "conf_file definition error", CNMessage: "配置文件定义错误"}
+	// ErrLevelName TODO
+	ErrLevelName = Errno{Code: 10006, Message: "illegal level_name", CNMessage: "level_name 非法"}
+	// ErrOnlyLevelConfigAllowed TODO
+	ErrOnlyLevelConfigAllowed = Errno{Code: 10007, Message: "only level_config is allowed to be applied by default",
+		CNMessage: "只有 level_config 才能直接应用 config"}
+)
diff --git a/dbm-services/common/db-config/internal/pkg/errno/errno.go b/dbm-services/common/db-config/internal/pkg/errno/errno.go
new file mode 100644
index 0000000000..0776030e1b
--- /dev/null
+++ b/dbm-services/common/db-config/internal/pkg/errno/errno.go
@@ -0,0 +1,116 @@
+// Package errno TODO
+package errno
+
+import (
+	"bk-dbconfig/pkg/core/config"
+	"fmt"
+)
+
+// Errno TODO
+type Errno struct {
+	Code      int
+	Message   string
+	CNMessage string
+}
+
+var lang = config.GetString("lang")
+
+// Error 用于错误处理
+func (err Errno) Error() string {
+	switch lang {
+	case "zh_CN":
+		return err.CNMessage
+	case "en_US":
+		return err.Message
+	default:
+		return err.CNMessage
+	}
+}
+
+// Addf TODO
+func (err Errno) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// Add TODO
+func (err Errno) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+	case "en_US":
+		err.Message += message
+	default:
+		err.CNMessage += message
+	}
+	return err
+}
+
+// Err represents an error
+type Err struct {
+	Errno
+	Err error
+}
+
+// New TODO
+func New(errno Errno, err error) *Err {
+	return &Err{Errno: errno, Err: err}
+}
+
+// Add TODO
+func (err Err) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+		return err
+	case "en_US":
+		err.Message += message
+		return err
+	default:
+		err.CNMessage += message
+		return err
+	}
+}
+
+// SetMsg TODO
+func (err Err) SetMsg(message string) error {
+	err.Message = message
+	return err
+}
+
+// SetCNMsg TODO
+func (err Err) SetCNMsg(cnMessage string) error {
+	err.CNMessage = cnMessage
+	return err
+}
+
+// Addf TODO
+func (err Err) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// DecodeErr TODO
+func DecodeErr(err error) (int, string) {
+
+	var CN bool = true
+
+	if err == nil {
+		return OK.Code, OK.Message
+	}
+
+	switch typed := err.(type) {
+	case Err:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	case Errno:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	default:
+	}
+	return InternalServerError.Code, err.Error()
+}
diff --git a/dbm-services/common/db-config/internal/repository/migrate.go b/dbm-services/common/db-config/internal/repository/migrate.go
new file mode 100644
index 0000000000..94e21d5a81
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/migrate.go
@@ -0,0 +1,141 @@
+package repository
+
+import (
+	"fmt"
+
+	"bk-dbconfig/assets"
+	"bk-dbconfig/internal/repository/migratespec"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/core/config"
+	"bk-dbconfig/pkg/core/logger"
+
+	"github.com/golang-migrate/migrate/v4"
+	"github.com/golang-migrate/migrate/v4/database/mysql"
+	"github.com/golang-migrate/migrate/v4/source/iofs"
+	"github.com/pkg/errors"
+)
+
+// DoMigrateFromEmbed 先尝试从 go embed 文件系统查找 migrations
+// no changes: return nil
+func DoMigrateFromEmbed() error {
+	var mig *migrate.Migrate
+	// from embed
+	if d, err := iofs.New(assets.Migrations, "migrations"); err != nil {
+		return err
+	} else {
+		if err = reMigrateConfigPlat(); err != nil {
+			return err
+		}
+		dbURL := fmt.Sprintf(
+			"mysql://%s:%s@tcp(%s)/%s?charset=%s&parseTime=true&loc=Local&multiStatements=true&interpolateParams=true",
+			config.GetString("db.username"),
+			config.GetString("db.password"),
+			config.GetString("db.addr"),
+			config.GetString("db.name"),
+			"utf8",
+		)
+		mig, err = migrate.NewWithSourceInstance("iofs", d, dbURL)
+		if err != nil {
+			return errors.WithMessage(err, "migrate from embed")
+		}
+		defer mig.Close()
+		// 获取当前 migrate version,如果<=2,则要 migrate 敏感信息(step=3)
+		var versionLast uint
+		if versionLast, _, err = mig.Version(); err == migrate.ErrNilVersion {
+			versionLast = 0
+		} else if err != nil {
+			logger.Warn("fail to get current migrate version")
+		}
+		logger.Info("current migrate version: %d", versionLast)
+
+		if versionLast < migratespec.SensitiveMigVer-1 {
+			if err = mig.Migrate(migratespec.SensitiveMigVer - 1); err == nil || err == migrate.ErrNoChange {
+				logger.Info("migrate schema success with %v", err)
+			} else {
+				return errors.WithMessage(err, "migrate schema")
+			}
+		}
+		// migrate 到最新
+		if err = mig.Up(); err == nil || err == migrate.ErrNoChange {
+			logger.Info("migrate data from embed success with %v", err)
+
+			if versionLast < migratespec.SensitiveMigVer {
+				logger.Info("migrate sensitive info for the first time")
+				db := model.InitSelfDB("")
+				defer func() {
+					dbc, _ := db.DB()
+					dbc.Close()
+				}()
+				if err = migratespec.MigrateSensitive(db); err != nil {
+					logger.Errorf("fail to migrate sensitive: %s", err.Error())
+					return mig.Migrate(migratespec.SensitiveMigVer - 1)
+					//return errors.WithMessage(err, "migrate sensitive")
+				}
+				logger.Info("migrate sensitive success with %v", err)
+			}
+
+			return nil
+		} else {
+			logger.Errorf("migrate data from embed failed: %s", err.Error())
+			return err
+		}
+	}
+}
+
+// DoMigrateFromSource 根据指定的 source 进行 db migrate
+func DoMigrateFromSource() error {
+	db, err := model.InitSelfDB("multiStatements=true&interpolateParams=true").DB()
+	if err != nil {
+		return err
+	}
+	defer db.Close()
+	var mig *migrate.Migrate
+	driver, err := mysql.WithInstance(db, &mysql.Config{})
+	if err != nil {
+		return err
+	}
+	source := config.GetString("migrate.source")
+	if source == "" {
+		return errors.New("db migrate need source_url")
+	}
+	// from config migrate.source
+	if mig, err = migrate.NewWithDatabaseInstance(source, config.GetString("db.name"), driver); err != nil {
+		return err
+	} else {
+		forceVersion := config.GetInt("migrate.force")
+		if forceVersion != 0 {
+			return mig.Force(forceVersion)
+		}
+		return mig.Up()
+	}
+}
+
+// reMigrateConfigPlat 重新初始化 平台级默认 配置数据
+func reMigrateConfigPlat() error {
+	db, err := model.InitSelfDB("multiStatements=true&interpolateParams=true").DB()
+	if err != nil {
+		return errors.WithMessage(err, "reMigrate connect failed")
+	}
+	defer db.Close()
+	sqlStrs := []string{
+		fmt.Sprintf("update schema_migrations set version=%d,dirty=0 where version >%d",
+			migratespec.SensitiveMigVer, migratespec.SensitiveMigVer), // step 3 is sensitive mig
+		"delete from tb_config_file_def",
+		"delete from tb_config_name_def where flag_encrypt!=1 or value_default like '{{%'",
+	}
+
+	for i, sql := range sqlStrs {
+		if i == 0 {
+			if _, err := db.Exec(sql); err != nil {
+				// 更新 migrate 元数据失败,退出,同时忽略本次 reMigrate 动作
+				return nil
+			}
+			logger.Warnf("reset migrate to %d", migratespec.SensitiveMigVer)
+		} else {
+			if _, err = db.Exec(sql); err != nil {
+				return errors.WithMessage(err, "reMigrate failed")
+			}
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/migratespec/sensitive.go b/dbm-services/common/db-config/internal/repository/migratespec/sensitive.go
new file mode 100644
index 0000000000..80b1674b3d
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/migratespec/sensitive.go
@@ -0,0 +1,133 @@
+package migratespec
+
+import (
+	"fmt"
+
+	"dbm-services/common/go-pubpkg/logger"
+
+	"github.com/pkg/errors"
+	"github.com/sethvargo/go-password/password"
+	"gorm.io/gorm"
+
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/config"
+	"bk-dbconfig/pkg/util/crypt"
+)
+
+const SensitiveMigVer = 3
+
+func MigrateSensitive(db *gorm.DB) error {
+	confNames := []*model.ConfigNameDefModel{
+		// tendb
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "mysql#user", ConfName: "admin_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "mysql#user", ConfName: "repl_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "mysql#user", ConfName: "yw_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "mysql#user", ConfName: "monitor_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "mysql#user", ConfName: "monitor_access_all_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "mysql#user", ConfName: "backup_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "mysql#user", ConfName: "os_mysql_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "proxy#user", ConfName: "proxy_admin_pwd", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendb", ConfType: "init_user", ConfFile: "spider#user", ConfName: "tdbctl_pwd", FlagEncrypt: 1,
+		},
+
+		{
+			Namespace: "tendbha", ConfType: "backup", ConfFile: "binlog_rotate.yaml", ConfName: "encrypt.key_prefix", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendbsingle", ConfType: "backup", ConfFile: "binlog_rotate.yaml",
+			ConfName: "encrypt.key_prefix", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "tendbcluster", ConfType: "backup", ConfFile: "binlog_rotate.yaml", ConfName: "encrypt.key_prefix", FlagEncrypt: 1,
+		},
+
+		// common
+		{
+			Namespace: "common", ConfType: "osconf", ConfFile: "os", ConfName: "user_pwd", FlagEncrypt: 1,
+		},
+		// TendisCache
+		{
+			Namespace: "TendisCache", ConfType: "dbconf", ConfFile: "TendisCache-3.2", ConfName: "requirepass", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "TendisCache", ConfType: "dbconf", ConfFile: "TendisCache-3.2", ConfName: "requirepass1", FlagEncrypt: 1,
+		},
+		// kafka
+		{
+			Namespace: "kafka", ConfType: "dbconf", ConfFile: "2.4.0", ConfName: "adminPassword", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "kafka", ConfType: "dbconf", ConfFile: "2.4.0", ConfName: "password", FlagEncrypt: 1,
+		},
+		// hdfs
+		{
+			Namespace: "hdfs", ConfType: "dbconf", ConfFile: "2.6.0-cdh5.4.11-tendataV0.2", ConfName: "password", FlagEncrypt: 1,
+		},
+		// pulsar
+		{
+			Namespace: "pulsar", ConfType: "dbconf", ConfFile: "2.10.1", ConfName: "password", FlagEncrypt: 1,
+		},
+		// influxdb
+		{
+			Namespace: "influxdb", ConfType: "dbconf", ConfFile: "1.8.4", ConfName: "password", FlagEncrypt: 1,
+		},
+		// es
+		{
+			Namespace: "es", ConfType: "dbconf", ConfFile: "7.10.2", ConfName: "transport_pemkey_password",
+			ValueDefault: "", FlagEncrypt: 1,
+		},
+		{
+			Namespace: "es", ConfType: "dbconf", ConfFile: "7.10.2", ConfName: "http_pemkey_password",
+			ValueDefault: "", FlagEncrypt: 1,
+		},
+	}
+	for _, c := range confNames {
+		if c.ValueDefault == "" {
+			c.ValueDefault = password.MustGenerate(12, 3, 0, false, true)
+			logger.Info("sensitive: {Namespace:%s ConfType:%s ConfFile:%s ConfName:%s ValueDefault:%s}",
+				c.Namespace, c.ConfType, c.ConfFile, c.ConfName, c.ValueDefault)
+			if c.FlagEncrypt == 1 {
+				key := fmt.Sprintf("%s%s", config.GetString("encrypt.keyPrefix"), constvar.BKBizIDForPlat)
+				c.ValueDefault, _ = crypt.EncryptString(c.ValueDefault, key, constvar.EncryptEnableZip)
+			}
+		}
+		c.ConfType = "STRING"
+		c.ValueTypeSub = ""
+		c.ValueAllowed = ""
+		c.FlagStatus = 1
+	}
+	err := db.Transaction(func(tx *gorm.DB) error {
+
+		if err1 := tx.Omit("id", "value_formula", "order_index", "since_version", "stage").
+			Create(confNames).Error; err1 != nil {
+			return errors.WithMessage(err1, "init sensitive conf_name")
+		}
+		/*
+			if err1 := tx.Select("value_default", "flag_encrypt").
+				Where(c.UniqueWhere()).Updates(c).Error; err1 != nil {
+				return errors.WithMessage(err1, c.ConfName)
+			}
+		*/
+
+		return nil
+	})
+	return err
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/batchget.go b/dbm-services/common/db-config/internal/repository/model/batchget.go
new file mode 100644
index 0000000000..6b0f73d193
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/batchget.go
@@ -0,0 +1,27 @@
+package model
+
+import "bk-dbconfig/internal/api"
+
+// BatchGetConfigItem TODO
+func BatchGetConfigItem(r *api.BatchGetConfigItemReq, confNames []string) (configs []*ConfigModel, err error) {
+	where := &ConfigModel{
+		Namespace: r.Namespace,
+		ConfType:  r.ConfType,
+		ConfFile:  r.ConfFile,
+		LevelName: r.LevelName,
+		// ConfName:  r.ConfName, // in
+	}
+	sqlRes := DB.Self.Model(&ConfigModel{}).
+		Where(where).Where("level_value in ?", r.LevelValues).Where("conf_name in ?", confNames).
+		Select("id", "level_value", "conf_name", "conf_value").Find(&configs)
+	if err := sqlRes.Error; err != nil {
+		return nil, err
+	}
+	for _, c := range configs {
+		err = c.MayDecrypt()
+		if err != nil {
+			return nil, err
+		}
+	}
+	return configs, nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/cache.go b/dbm-services/common/db-config/internal/repository/model/cache.go
new file mode 100644
index 0000000000..133a39866d
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/cache.go
@@ -0,0 +1,24 @@
+package model
+
+import (
+	"log"
+
+	"github.com/coocood/freecache"
+)
+
+// CacheLocal TODO
+var CacheLocal *freecache.Cache
+
+// InitCache TODO
+func InitCache() {
+	cacheSize := 10 * 1024 * 1024
+	CacheLocal = freecache.NewCache(cacheSize)
+}
+
+// LoadCache TODO
+func LoadCache() {
+	// 加载Cache失败,panic
+	if err := AutoRefreshCache(); err != nil {
+		log.Panicln("Init start loading cache failed", err)
+	}
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/cache_config_file.go b/dbm-services/common/db-config/internal/repository/model/cache_config_file.go
new file mode 100644
index 0000000000..d6277d1206
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/cache_config_file.go
@@ -0,0 +1,190 @@
+package model
+
+import (
+	"fmt"
+
+	"bk-dbconfig/internal/api"
+
+	"github.com/coocood/freecache"
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util/serialize"
+)
+
+// NotFoundInDB TODO
+const NotFoundInDB = "1"
+
+// IsConfigLevelEntityVersioned TODO
+func IsConfigLevelEntityVersioned(namespace, confType, conFile, levelName string) bool {
+	fd := api.BaseConfFileDef{Namespace: namespace, ConfType: confType, ConfFile: conFile}
+	if fileDef, err := CacheGetConfigFile(fd); err == nil {
+		if fileDef.LevelVersioned == levelName {
+			return true
+		}
+	}
+	return false
+}
+
+// CacheGetConfigFile godoc
+// return nil,nil 表示db中不存在
+func CacheGetConfigFile(fd api.BaseConfFileDef) (*ConfigFileDefModel, error) {
+	cacheKey := []byte(fmt.Sprintf("%s|%s|%s", fd.Namespace, fd.ConfType, fd.ConfFile))
+
+	if cacheVal, err := CacheLocal.Get(cacheKey); err != nil {
+		if errors.Is(err, freecache.ErrNotFound) {
+			return CacheSetAndGetConfigFile(fd)
+			// return CacheGetConfigFile(namespace, confType, confFile)
+		}
+		return nil, err
+	} else {
+		cacheValStr := string(cacheVal)
+		if cacheValStr == NotFoundInDB || cacheVal == nil {
+			logger.Info("GetConfigFile not_found_in_db key=%s", cacheKey)
+			return nil, nil
+		}
+		f := ConfigFileDefModel{}
+		serialize.UnSerializeString(cacheValStr, &f, false)
+		logger.Info("GetConfigFile from cache: %+v", f)
+		return &f, nil
+	}
+}
+
+// CacheSetAndGetConfigFile godoc
+// 从 DB 里查询 plat config file,如果db中不存在 返回 nil, nil. 并cache不存在信息60s
+func CacheSetAndGetConfigFile(fd api.BaseConfFileDef) (*ConfigFileDefModel, error) {
+	cacheKey := []byte(fmt.Sprintf("%s|%s|%s", fd.Namespace, fd.ConfType, fd.ConfFile))
+
+	confFiles, err := QueryConfigFileDetail(fd.Namespace, fd.ConfType, fd.ConfFile)
+	if err != nil {
+		if errors.Is(err, gorm.ErrRecordNotFound) { // not in db
+			logger.Info("CacheSetAndGetConfigFile not_found_in_db key=%s", cacheKey)
+			CacheLocal.Set(cacheKey, []byte(NotFoundInDB), 60)
+			return nil, nil
+		}
+		return nil, err
+	}
+	f := confFiles[0]
+	// logger.Info("CacheSetAndGetConfigFile to cache: %+v", f)
+
+	cacheVal, _ := serialize.SerializeToString(f, false)
+	CacheLocal.Set(cacheKey, []byte(cacheVal), 300)
+	if f.Namespace == fd.Namespace && f.ConfType == fd.ConfType && f.ConfFile == fd.ConfFile {
+		return f, nil
+	} else {
+		return nil, errors.Errorf("found in db but set cache failed %s", fd.ConfFile)
+	}
+}
+
+// ConfTypeInfo TODO
+type ConfTypeInfo struct {
+	ConfFiles         []string `json:"conf_files"`
+	ConfNameValidate  int8     `json:"conf_name_validate"`
+	ConfValueValidate int8     `json:"conf_value_validate"`
+	ValueTypeStrict   int8     `json:"value_type_strict"`
+	LevelVersioned    string   `json:"level_versioned"`
+	LevelNames        string   `json:"level_names"`
+}
+
+// ConfTypeFile TODO
+type ConfTypeFile = map[string]ConfTypeInfo
+
+// CacheNamespaceList TODO
+var CacheNamespaceList []string
+
+// CacheNamespaceType TODO
+var CacheNamespaceType map[string]ConfTypeFile
+
+// ConfFileInfo TODO
+type ConfFileInfo struct {
+	ConfFile          string `json:"conf_file"`
+	ConfFileLC        string `json:"conf_file_lc"`
+	LevelNames        string `json:"level_names"`
+	LevelVersioned    string `json:"level_versioned"`
+	ConfNameValidate  int8   `json:"conf_name_validate"`
+	ConfValueValidate int8   `json:"conf_value_validate"`
+}
+
+// CacheNamespaceType2 key: namespace.dbconf, value: file_list
+var CacheNamespaceType2 map[string]ConfFileInfo
+
+// CacheGetConfigFileList TODO
+//
+//	{
+//	   "namespace1": {
+//	       "conf_type1": {"conf_files": ["f1", "f2"], "conf_name_validate":1, "level_name":""},
+//	       "conf_type2": {"conf_files": ["f1", "f2"]}
+//	   },
+//	   "namespace1": {
+//	       "conf_type3": {"conf_files": ["f1", "f2"]},
+//	       "conf_type4": {"conf_files": ["f1", "f2"]}
+//	   }
+//	}
+func CacheGetConfigFileList(namespace, confType, confFile string) (map[string]ConfTypeFile, error) {
+	cacheKey := []byte("namespace|conf_type")
+	if cacheVal, err := CacheLocal.Get(cacheKey); err != nil {
+		if errors.Is(err, freecache.ErrNotFound) {
+			return CacheSetAndGetConfigFileList(namespace, confType, confFile)
+		}
+		return nil, err
+	} else {
+		namespaceType := map[string]ConfTypeFile{}
+		cacheValStr := string(cacheVal)
+		serialize.UnSerializeString(cacheValStr, &namespaceType, false)
+		logger.Info("CacheGetConfigFileList from cache: %+v", namespaceType)
+
+		return namespaceType, nil
+	}
+}
+
+// CacheSetAndGetConfigFileList TODO
+func CacheSetAndGetConfigFileList(namespace, confType, confFile string) (map[string]ConfTypeFile, error) {
+	namespaceType := map[string]ConfTypeFile{}
+
+	confFiles, err := GetConfigFileList(namespace, confType, confFile)
+	if err != nil {
+		return nil, err
+	}
+	for _, tf := range confFiles {
+		if _, ok := namespaceType[tf.Namespace]; ok {
+			if confTypeInfo, ok := namespaceType[tf.Namespace][tf.ConfType]; !ok {
+				// confTypeInfo.ConfFiles = append(confTypeInfo.ConfFiles, tf.ConfFile)
+				confTypeInfo = ConfTypeInfo{
+					ConfFiles:         []string{tf.ConfFile},
+					ConfNameValidate:  tf.ConfNameValidate,
+					ConfValueValidate: tf.ConfValueValidate,
+					ValueTypeStrict:   tf.ValueTypeStrict,
+					LevelVersioned:    tf.LevelVersioned,
+				}
+				namespaceType[tf.Namespace][tf.ConfType] = confTypeInfo
+			} else {
+				confTypeInfo.ConfFiles = append(confTypeInfo.ConfFiles, tf.ConfFile)
+				namespaceType[tf.Namespace][tf.ConfType] = confTypeInfo
+				// should not reach here
+			}
+		} else {
+			namespaceType[tf.Namespace] = ConfTypeFile{ // key:conf_type, value:ConfTypeInfo
+				tf.ConfType: ConfTypeInfo{
+					ConfFiles:         []string{tf.ConfFile},
+					ConfNameValidate:  tf.ConfNameValidate,
+					ConfValueValidate: tf.ConfValueValidate,
+					ValueTypeStrict:   tf.ValueTypeStrict,
+					LevelVersioned:    tf.LevelVersioned,
+				},
+			}
+		}
+	}
+	// logger.Info("CacheSetAndGetConfigFileList to cache: %+v", namespaceType)
+
+	cacheKey := []byte("namespace|conf_type")
+	cacheVal, _ := serialize.SerializeToString(namespaceType, false)
+	CacheLocal.Set(cacheKey, []byte(cacheVal), 300)
+
+	CacheNamespaceList = nil
+	for k, _ := range namespaceType {
+		CacheNamespaceList = append(CacheNamespaceList, k)
+	}
+	CacheNamespaceType = namespaceType
+	return namespaceType, nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/cache_config_name.go b/dbm-services/common/db-config/internal/repository/model/cache_config_name.go
new file mode 100644
index 0000000000..e436e29f19
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/cache_config_name.go
@@ -0,0 +1,71 @@
+package model
+
+import (
+	"fmt"
+
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util/serialize"
+
+	"github.com/coocood/freecache"
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// CacheGetConfigNameDef godoc
+// 存放 conf_name 定义
+// key: confname|tendbha|dbconf|MySQL-5.7
+// value: map[conf_name]ConfigNameDefModel
+func CacheGetConfigNameDef(namespace, confType, confFile, confName string) (*ConfigNameDefModel, error) {
+	cacheKey := []byte(fmt.Sprintf("confname|%s|%s|%s", namespace, confType, confFile))
+
+	if cacheVal, err := CacheLocal.Get(cacheKey); err != nil {
+		if errors.Is(err, freecache.ErrNotFound) {
+			return CacheSetAndGetConfigName(namespace, confType, confFile, confName)
+		}
+		return nil, err
+	} else {
+		cacheValStr := string(cacheVal)
+		if cacheValStr == NotFoundInDB || cacheVal == nil {
+			logger.Info("CacheGetConfigNameDef not_found_in_db key=%s", cacheKey)
+			return nil, nil
+		}
+		f := make(map[string]*ConfigNameDefModel, 0)
+		// @TODO 可以做性能优化,不必每次都反序列化
+		serialize.UnSerializeString(cacheValStr, &f, true)
+		if v, ok := f[confName]; ok {
+			// logger.Info("CacheGetConfigNameDef from cache: %+v", v)
+			return v, nil
+		} else {
+			return CacheSetAndGetConfigName(namespace, confType, confFile, confName)
+		}
+	}
+}
+
+// CacheSetAndGetConfigName TODO
+func CacheSetAndGetConfigName(namespace, confType, confFile, confName string) (*ConfigNameDefModel, error) {
+	cacheKey := []byte(fmt.Sprintf("confname|%s|%s|%s", namespace, confType, confFile))
+
+	confFiles, err := QueryConfigNamesPlat(namespace, confType, confFile, "")
+	if err != nil {
+		if errors.Is(err, gorm.ErrRecordNotFound) { // not in db
+			logger.Info("SetAndGetCacheConfigName not_found_in_db key=%s", cacheKey)
+			CacheLocal.Set(cacheKey, []byte(NotFoundInDB), 60)
+			return nil, nil
+		}
+		return nil, err
+	}
+	f := confFiles
+	// logger.Info("SetAndGetCacheConfigName to cache: %+v", f)
+
+	fmap := make(map[string]*ConfigNameDefModel, 0)
+	for _, v := range f {
+		fmap[v.ConfName] = v
+	}
+
+	cacheVal, _ := serialize.SerializeToString(fmap, true)
+	CacheLocal.Set(cacheKey, []byte(cacheVal), 300)
+	if n, ok := fmap[confName]; ok {
+		return n, nil
+	}
+	return nil, freecache.ErrNotFound
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/cache_crond.go b/dbm-services/common/db-config/internal/repository/model/cache_crond.go
new file mode 100644
index 0000000000..9d45b09ca5
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/cache_crond.go
@@ -0,0 +1,29 @@
+package model
+
+import (
+	"github.com/robfig/cron"
+
+	"bk-dbconfig/pkg/core/logger"
+)
+
+// AutoRefreshCache TODO
+func AutoRefreshCache() error {
+	c := cron.New()
+	// 第一次直接load,后续每10分钟拉取一次
+	if _, err := CacheSetAndGetConfigFileList("", "", ""); err != nil {
+		return err
+	}
+	err := c.AddFunc("@every 1m", func() {
+		if _, err := CacheSetAndGetConfigFileList("", "", ""); err != nil {
+			logger.Info("AutoRefreshCache SetAndGetConfigFileList failed")
+		}
+		logger.Info("AutoRefreshCache SetAndGetConfigFileList success")
+	})
+	if err != nil {
+		logger.Info("AutoRefreshCache AddFunc failed %v", err)
+	} else {
+		logger.Info("AutoRefreshCache Start")
+		c.Start()
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/cache_file_node.go b/dbm-services/common/db-config/internal/repository/model/cache_file_node.go
new file mode 100644
index 0000000000..5d975e0d35
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/cache_file_node.go
@@ -0,0 +1,58 @@
+package model
+
+import (
+	"fmt"
+
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util/serialize"
+
+	"github.com/coocood/freecache"
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// CacheGetLevelNode godoc
+// 根据 node_id 查询 namespace,bk_biz_id,conf_type,conf_file,level_name,level_value
+func CacheGetLevelNode(nodeID uint64) (*ConfigFileNodeModel, error) {
+	cacheKey := []byte(fmt.Sprintf("LN.%d", nodeID))
+
+	if cacheVal, err := CacheLocal.Get(cacheKey); err != nil {
+		if errors.Is(err, freecache.ErrNotFound) {
+			return CacheSetAndGetLevelNode(nodeID)
+		}
+		return nil, err
+	} else {
+		cacheValStr := string(cacheVal)
+		if cacheValStr == NotFoundInDB || cacheVal == nil {
+			logger.Info("CacheGetLevelNode not_found_in_db key=%s", cacheKey)
+			return nil, nil
+		}
+		fileNode := &ConfigFileNodeModel{}
+		// @TODO 可以做性能优化,不必每次都反序列化
+		serialize.UnSerializeString(cacheValStr, &fileNode, true)
+		return fileNode, nil
+	}
+}
+
+// CacheSetAndGetLevelNode TODO
+func CacheSetAndGetLevelNode(nodeID uint64) (*ConfigFileNodeModel, error) {
+	cacheKey := []byte(fmt.Sprintf("LN.%d", nodeID))
+	fn := ConfigFileNodeModel{ID: nodeID}
+	fileNode, err := fn.Detail(DB.Self)
+	if err != nil {
+		if errors.Is(err, gorm.ErrRecordNotFound) { // not in db
+			logger.Info("CacheSetAndGetLevelNode not_found_in_db key=%s", cacheKey)
+			CacheLocal.Set(cacheKey, []byte(NotFoundInDB), 60)
+			return nil, nil
+		}
+		return nil, err
+	}
+	if fileNode == nil {
+		return nil, freecache.ErrNotFound
+	} else {
+		logger.Info("CacheSetAndGetLevelNode to cache: %+v", fileNode)
+		cacheVal, _ := serialize.SerializeToString(fileNode, true)
+		CacheLocal.Set(cacheKey, []byte(cacheVal), 300)
+		return fileNode, nil
+	}
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_apply.go b/dbm-services/common/db-config/internal/repository/model/config_apply.go
new file mode 100644
index 0000000000..e478ba9d55
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_apply.go
@@ -0,0 +1,142 @@
+package model
+
+import (
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// ExistsAppliedVersion 是否存在已发布的 version
+// 可用户判断是否已经 generate, 不存在 applied 时 error 返回 nil
+func (c *ConfigVersionedModel) ExistsAppliedVersion(db *gorm.DB) (bool, error) {
+	where := map[string]interface{}{
+		"bk_biz_id":   c.BKBizID,
+		"namespace":   c.Namespace,
+		"conf_type":   c.ConfType,
+		"conf_file":   c.ConfFile,
+		"level_name":  c.LevelName,
+		"level_value": c.LevelValue,
+		"is_applied":  1,
+	}
+	if _, err := RecordExists(db, c.TableName(), 0, where); err != nil {
+		if err == gorm.ErrRecordNotFound {
+			return false, nil
+		} else {
+			return false, err
+		}
+	} else {
+		return true, nil
+	}
+}
+
+// GetVersion 获取 ConfigVersionedModel
+// 如果不存在,则返回 NotFound
+func (c *ConfigVersionedModel) GetVersion(db *gorm.DB, where map[string]interface{}) (*ConfigVersioned, error) {
+	versioned := &ConfigVersionedModel{}
+	sqlRes := db.Table(c.TableName()).Where(c.UniqueWhere(true))
+	if where != nil {
+		sqlRes = sqlRes.Where(where)
+	}
+	err := sqlRes.First(&versioned).Error
+	if err != nil {
+		return nil, err
+	}
+	v := &ConfigVersioned{Versioned: versioned}
+	if err := v.UnPack(); err != nil {
+		return nil, err
+	}
+	if err := v.MayDecrypt(); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// GetVersionApplied 获取已应用版本
+func (c *ConfigVersionedModel) GetVersionApplied(db *gorm.DB) (*ConfigVersioned, error) {
+	// c.Revision = ""
+	where := map[string]interface{}{"is_applied": 1}
+	if v, err := c.GetVersion(db, where); err != nil {
+		return nil, errors.Wrap(err, "get applied config")
+	} else {
+		return v, nil
+	}
+}
+
+// GetVersionPublished 获取已发布版本
+func (c *ConfigVersionedModel) GetVersionPublished(db *gorm.DB) (*ConfigVersioned, error) {
+	// c.Revision = ""
+	where := map[string]interface{}{"is_published": 1}
+	if v, err := c.GetVersion(db, where); err != nil {
+		return nil, errors.Wrap(err, "get published config")
+	} else {
+		return v, nil
+	}
+}
+
+// BatchGetVersion TODO
+func (c *ConfigVersionedModel) BatchGetVersion(levelValues []string, db *gorm.DB) (
+	[]*ConfigVersionedModel, []*ConfigVersionedModel, error) {
+	where := map[string]interface{}{
+		"bk_biz_id":   c.BKBizID,
+		"namespace":   c.Namespace,
+		"conf_file":   c.ConfFile,
+		"level_name":  c.LevelName,
+		"level_value": levelValues,
+	}
+
+	published := make([]*ConfigVersionedModel, 0)
+	publishedRes := db.Table(c.TableName()).
+		Select("id", "level_value", "revision", "is_published", "is_applied").
+		Where(where).Where("is_published", 1).Find(&published)
+	if publishedRes.Error != nil {
+		return nil, nil, publishedRes.Error
+	}
+
+	applied := make([]*ConfigVersionedModel, 0)
+	appliedRes := db.Table(c.TableName()).
+		Select("id", "level_value", "revision", "is_published", "is_applied").
+		Where(where).Where("is_applied", 1).Find(&applied)
+	if appliedRes.Error != nil {
+		return nil, nil, appliedRes.Error
+	}
+	return published, applied, nil
+}
+
+// BatchGetPublished TODO
+func (c *ConfigVersionedModel) BatchGetPublished(levelValues []string, db *gorm.DB) ([]*ConfigVersionedModel, error) {
+	where := map[string]interface{}{
+		"bk_biz_id":   c.BKBizID,
+		"namespace":   c.Namespace,
+		"conf_file":   c.ConfFile,
+		"level_name":  c.LevelName,
+		"level_value": levelValues,
+	}
+
+	published := make([]*ConfigVersionedModel, 0)
+	publishedRes := db.Table(c.TableName()).
+		Select("id", "level_value", "revision", "is_published", "is_applied").
+		Where(where).Where("is_published", 1).Find(&published)
+	if publishedRes.Error != nil {
+		return nil, publishedRes.Error
+	}
+	return published, nil
+}
+
+// BatchGetApplied TODO
+func (c *ConfigVersionedModel) BatchGetApplied(levelValues []string, db *gorm.DB) ([]*ConfigVersionedModel, error) {
+	where := map[string]interface{}{
+		"bk_biz_id":   c.BKBizID,
+		"namespace":   c.Namespace,
+		"conf_file":   c.ConfFile,
+		"level_name":  c.LevelName,
+		"level_value": levelValues,
+	}
+
+	applied := make([]*ConfigVersionedModel, 0)
+	appliedRes := db.Table(c.TableName()).
+		Select("id", "level_value", "revision", "is_published", "is_applied").
+		Where(where).Where("is_applied", 1).Find(&applied)
+	if appliedRes.Error != nil {
+		return nil, appliedRes.Error
+	}
+	return applied, nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_file.go b/dbm-services/common/db-config/internal/repository/model/config_file.go
new file mode 100644
index 0000000000..39befe0cec
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_file.go
@@ -0,0 +1,117 @@
+package model
+
+import (
+	"fmt"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+
+	"bk-dbconfig/pkg/util"
+)
+
+// DeleteByUnique TODO
+func DeleteByUnique(db *gorm.DB, tableName string, uniqueWhere map[string]interface{}) error {
+	var sqlRes *gorm.DB
+	sqlRes = db.Debug().Table(tableName).Where(uniqueWhere).Delete(tableName)
+	if sqlRes.Error != nil {
+		return sqlRes.Error
+	}
+	return nil
+}
+
+// BatchDeleteByID TODO
+func BatchDeleteByID(db *gorm.DB, tableName string, ids []uint64) error {
+	var sqlRes *gorm.DB
+	sqlRes = db.Delete(tableName).Where("id in ?", ids)
+	if sqlRes.Error != nil {
+		return sqlRes.Error
+	}
+	return nil
+}
+
+type modelID struct {
+	ID uint64 `json:"id" gorm:"column:id;type:bigint;PRIMARY_KEY"`
+}
+
+// RecordExists TODO
+// 判断记录是否存在,如果存在则返回记录 id
+// 优先根据唯一建索引判断,如果记录存在但与输入的id不同(且输入id>0),则报错,否则返回实际id (from db);如果唯一索引不存在,则根据id判断
+// 只检查 1 条记录。外层根据 error 是否 ErrRecordNotFound 判断记录是否存在
+func RecordExists(db *gorm.DB, tbName string, id uint64, uniqueWhere map[string]interface{}) (uint64, error) {
+	var sqlRes *gorm.DB
+	var idnew modelID
+	if !util.IsEmptyMap(uniqueWhere) { // by unique key
+		sqlRes = db.Table(tbName).Select("id").Where(uniqueWhere).Take(&idnew)
+		if err := sqlRes.Error; err != nil {
+			// not found or error. 返回的 id 没有意义
+			return idnew.ID, err
+		} else if id > 0 && id != idnew.ID {
+			// found. 判断 id 是否与 idnew.ID 相同
+			return idnew.ID, fmt.Errorf("id error id_1=%d, id_2=%d", id, idnew.ID)
+		} else {
+			// found and return id
+			return idnew.ID, nil
+		}
+	} else { // by ID
+		sqlRes = db.Table(tbName).Select("id").Where("id = ?", id).Take(&idnew)
+		return id, sqlRes.Error // Take() have ErrRecordNotFound
+	}
+}
+
+// RecordGet TODO
+func RecordGet(db *gorm.DB, tbName string, id uint64, uniqueWhere map[string]interface{}) (map[string]interface{},
+	error) {
+	var sqlRes *gorm.DB
+	var idnew modelID
+	objMap := map[string]interface{}{}
+
+	if !util.IsEmptyMap(uniqueWhere) { // by unique key
+		sqlRes = db.Debug().Table(tbName).Select("*").Where(uniqueWhere).Take(&objMap)
+		if err := sqlRes.Error; err != nil {
+			// not found or error. 返回的 id 没有意义
+			return objMap, err
+		} else if id > 0 && id != idnew.ID {
+			// found. 判断 id 是否与 idnew.ID 相同
+			return objMap, fmt.Errorf("id error id_1=%d, id_2=%d", id, idnew.ID)
+		} else {
+			// found and return id
+			return objMap, nil
+		}
+	} else { // by ID
+		sqlRes = db.Debug().Table(tbName).Select("*").Where("id = ?", id).Take(&objMap)
+		return objMap, sqlRes.Error // Take() have ErrRecordNotFound
+	}
+}
+
+// Exists TODO
+func (c *ConfigFileDefModel) Exists(db *gorm.DB) (uint64, error) {
+	var sqlRes *gorm.DB
+	if c.ID != 0 { // by ID
+		if err := db.Select("id").Take(c).Error; err != nil {
+			// Take have ErrRecordNotFound
+			return 0, err
+		}
+		return c.ID, nil
+	} else { // by unique key
+		sqlRes = DB.Self.Model(ConfigFileDefModel{}).Select("id").Where(c.UniqueWhere()).Take(&c)
+		if err := sqlRes.Error; err != nil {
+			return 0, err
+		}
+		return c.ID, nil
+	}
+}
+
+// SaveAndGetID TODO
+func (c *ConfigFileDefModel) SaveAndGetID(db *gorm.DB) (uint64, error) {
+	id, err := RecordExists(db, c.TableName(), 0, c.UniqueWhere())
+	if errors.Is(err, gorm.ErrRecordNotFound) {
+		if err := db.Save(c).Error; err != nil {
+			return 0, err
+		}
+	} else {
+		if err := db.Updates(c).Error; err != nil {
+			return 0, err
+		}
+	}
+	return id, nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_item.go b/dbm-services/common/db-config/internal/repository/model/config_item.go
new file mode 100644
index 0000000000..36b6cf12cc
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_item.go
@@ -0,0 +1,494 @@
+package model
+
+import (
+	"fmt"
+	"strings"
+
+	"bk-dbconfig/internal/pkg/cst"
+	"bk-dbconfig/pkg/core/config"
+
+	"github.com/jinzhu/copier"
+	"github.com/pkg/errors"
+
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"bk-dbconfig/pkg/util/crypt"
+
+	"gorm.io/gorm"
+)
+
+// Create may have unique_key error
+// create should not have id
+func (c *ConfigModel) Create(upsert bool) error {
+	var err error
+	if err = c.HandleFlagEncrypt(); err != nil {
+		return err
+	}
+	if upsert { // same as c.Update(false)
+		if err = DB.Self.Save(c).Error; err != nil {
+			logger.Error("Save fail:%v, err:%s", *c, err.Error())
+			return err
+		}
+	} else {
+		if err = DB.Self.Create(c).Error; err != nil {
+			logger.Errorf("Create fail:%v, err:%s", *c, err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// CheckRecordExists TODO
+// check by id or unique_key
+func (c *ConfigModel) CheckRecordExists(db *gorm.DB) (uint64, error) {
+	var sqlRes *gorm.DB
+	var tmpModel ConfigModel
+	if c.ID != 0 { // by ID
+		if err := db.Select("id").Take(c).Error; err != nil {
+			// Take have ErrRecordNotFound
+			return 0, err
+		}
+		return c.ID, nil
+	} else { // by unique key
+		// unique key: bk_biz_id, namespace, conf_name, conf_type, conf_file, level_name, level_value => conf_value
+		c.ID = 0
+		sqlRes = db.Model(ConfigModel{}).Select("id").Where(
+			"bk_biz_id=? and namespace=? and conf_type=? and conf_name=? and conf_file=? and level_name=? and level_value=?",
+			c.BKBizID, c.Namespace, c.ConfType, c.ConfName, c.ConfFile, c.LevelName, c.LevelValue).Take(&tmpModel)
+		if err := sqlRes.Error; err != nil {
+			return 0, err
+		}
+		return tmpModel.ID, nil
+	}
+}
+
+// HandleFlagEncrypt 根据 flag_encrypt 判断是否需要加密
+func (c *ConfigModel) HandleFlagEncrypt() error {
+	if _, ok := crypt.IsEncryptedString(c.ConfValue); ok {
+		// 以 ** 开头,已经是加密过的密码
+		return nil
+	} else if util.ConfValueIsPlaceHolder(c.ConfValue) {
+		return nil
+	}
+	nameDef, err := CacheGetConfigNameDef(c.Namespace, c.ConfType, c.ConfFile, c.ConfName)
+	if err == nil && nameDef.FlagEncrypt == 1 {
+		key := fmt.Sprintf("%s%s", config.GetString("encrypt.keyPrefix"), c.LevelValue)
+		c.ConfValue, err = crypt.EncryptString(c.ConfValue, key, constvar.EncryptEnableZip)
+		if err != nil {
+			logger.Errorf("HandleFlagEncrypt %+v. Error: %w", c, err)
+			return errors.WithMessage(err, c.ConfName)
+		}
+	}
+	return nil
+}
+
+// MayDecrypt 只根据前缀判断已加密串,直接解密
+// 使用 encrypt.keyPrefix + level_value 作为 key来加密
+func (c *ConfigModel) MayDecrypt() error {
+	if _, ok := crypt.IsEncryptedString(c.ConfValue); !ok {
+		// 不以 ** 开头,未加密
+		return nil
+	}
+	var err error
+	// 确实是已加密字符串,可以不用去 tb_config_name_def 里面获取 flag_encrypt
+	key := fmt.Sprintf("%s%s", config.GetString("encrypt.keyPrefix"), c.LevelValue)
+	c.ConfValue, err = crypt.DecryptString(c.ConfValue, key, constvar.EncryptEnableZip)
+	if err != nil {
+		logger.Errorf("MayDecrypt %+v. Error: %w", c, err)
+		return errors.WithMessage(err, c.ConfName)
+	}
+	// }
+	return nil
+}
+
+// UpdateMust TODO
+// allow by id or by unique key
+// if record is not exists, return err
+func (c *ConfigModel) UpdateMust(db *gorm.DB) error {
+	if configID, err := c.CheckRecordExists(db); err != nil {
+		return err
+	} else {
+		c.ID = configID
+		if err = c.HandleFlagEncrypt(); err != nil {
+			return err
+		}
+		if err = db.Omit("time_created", "time_updated", "id").Updates(c).Error; err != nil {
+			return err
+		}
+		// if record exists, do not check RowsAffected
+		return nil
+	}
+}
+
+// Update upsert
+// found=true: if record is not exists, return error
+// found=false: upsert
+func (c *ConfigModel) Update(db *gorm.DB, ifNotFoundErr bool) error {
+	if ifNotFoundErr {
+		return c.UpdateMust(db)
+	} else {
+		// if err := DB.Self.Updates(c).Error; err != nil {
+		if err := c.HandleFlagEncrypt(); err != nil {
+			return err
+		}
+		if err := db.Omit("created_at", "updated_at").Save(c).Error; err != nil {
+			return err
+		}
+		return nil
+	}
+}
+
+// UpdateBatch TODO
+// allow update and create
+func UpdateBatch(db *gorm.DB, configs []*ConfigModel, ifNotFoundErr bool) error {
+	for _, c := range configs {
+		if err := c.Update(db, ifNotFoundErr); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CreateBatch TODO
+// only allow id=0, only create
+// if have duplicate, return err
+func CreateBatch(db *gorm.DB, configs []*ConfigModel) error {
+	var sqlRes *gorm.DB
+	var err error
+	for _, c := range configs {
+		if err = c.HandleFlagEncrypt(); err != nil {
+			return err
+		}
+	}
+	sqlRes = db.Omit("created_at", "updated_at").Create(&configs)
+	// sqlRes = DB.Self.Omit("time_created", "time_updated").Save(&configs)
+	if err = sqlRes.Error; err != nil {
+		logger.Errorf("add config items fail:%+v, err:%s", configs, err.Error())
+		return err
+	}
+	return nil
+}
+
+// DeleteBatch TODO
+// 批量根据 id 删除配置项
+func DeleteBatch(db *gorm.DB, configs []*ConfigModel) error {
+	var sqlRes *gorm.DB
+	var ids []uint64
+	for _, c := range configs {
+		ids = append(ids, c.ID)
+	}
+	sqlRes = db.Delete(&configs, ids)
+	if err := sqlRes.Error; err != nil {
+		logger.Errorf("delete config items fail:%+v, err:%s", configs, err.Error())
+		return err
+	}
+	return nil
+}
+
+// UpsertBatchConfigs TODO
+// strict = true
+//
+//	如果 id=0, insert 出现唯一建重复, 会报错
+//	如果 id!=0, update 不存在的记录, 会报错
+//
+// strict = false
+//
+//	如果 id=0, insert 出现唯一建重复时, 会更新
+//	如果 id!=0, update 不存在的记录, 会忽略
+func UpsertBatchConfigs(db *gorm.DB, configs []*ConfigModel, strict bool) (err error) {
+	configsAdd := make([]*ConfigModel, 0)
+	configsUpt := make([]*ConfigModel, 0)
+	for _, c := range configs {
+		if configID, err := c.CheckRecordExists(db); err != nil {
+			if errors.Is(err, gorm.ErrRecordNotFound) {
+				if c.ID != 0 {
+					return err
+				}
+				// c.ID = 0
+				configsAdd = append(configsAdd, c)
+			} else {
+				return err
+			}
+		} else {
+			c.ID = configID
+			configsUpt = append(configsUpt, c)
+		}
+	}
+	logger.Infof("UpsertBatchConfigs strict=%t, configsAdd:%#v, configsUpt:%+v", strict, configsAdd, configsUpt)
+	if len(configsAdd) != 0 {
+		if err = CreateBatch(db, configsAdd); err != nil {
+			return err
+		}
+	}
+	if len(configsUpt) != 0 {
+		if err = UpdateBatch(db, configsUpt, strict); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// UpsertBatchConfigsByID TODO
+func UpsertBatchConfigsByID(configs []*ConfigModel) (err error) {
+	configsAdd := make([]*ConfigModel, 0)
+	configsUpt := make([]*ConfigModel, 0)
+	for _, c := range configs {
+		if c.ID == 0 {
+			configsAdd = append(configsAdd, c)
+		} else {
+			configsUpt = append(configsUpt, c)
+		}
+	}
+	logger.Infof("CreateOrUpdateConfigs2 configsAdd:%#v, configsUpt:%+v", configsAdd, configsUpt)
+	if len(configsAdd) != 0 {
+		if err = CreateBatch(DB.Self, configsAdd); err != nil {
+			return err
+		}
+	}
+	if len(configsUpt) != 0 {
+		if err = UpdateBatch(DB.Self, configsUpt, true); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CheckConfigInherit TODO
+// inheritFrom = 0 表示继承自plat
+func CheckConfigInherit(confType, confName, namespace string, inheritFrom string) error {
+	type RowCount struct {
+		Count int64 `json:"cnt" gorm:"column:cnt"`
+	}
+	Cnt := make([]*RowCount, 0)
+	queryStr := "SELECT count(*) cnt FROM tb_config_node WHERE 1=1"
+	queryStr += fmt.Sprintf(" AND bk_biz_id = '%s' and level_name = '%s' and level_value = '%s'",
+		constvar.BKBizIDForPlat, constvar.LevelPlat, inheritFrom)
+	if confName != "" {
+		// queryStr += fmt.Sprintf(" AND conf_name = '%s'", conf_name)
+
+		confNameList := strings.Split(confName, ",")
+		nameIn := strings.Join(confNameList, "','")
+		queryStr += fmt.Sprintf(" AND conf_name in ('%s')", nameIn)
+	}
+	if confType != "" {
+		queryStr += fmt.Sprintf(" AND conf_type = '%s'", confType)
+	}
+	if namespace != "" {
+		queryStr += fmt.Sprintf(" AND namespace = '%s'", namespace)
+	}
+	logger.Warnf("CheckConfigInherit sql: %v", queryStr)
+
+	if err := DB.Self.Raw(queryStr).Scan(&Cnt).Error; err != nil {
+		if err == gorm.ErrRecordNotFound {
+			// return configs, nil  // return empty slice
+			return errors.New("no inherit config items found")
+		}
+		return err
+	} else if Cnt[0].Count == 0 {
+		logger.Warnf("CheckConfigInherit sql result: %+v, len:%d", Cnt[0], len(Cnt))
+		// return errors.New("0 inherit config items found")
+	}
+
+	return nil
+}
+
+// CheckUniqueKeyProvided TODO
+func (c *ConfigModel) CheckUniqueKeyProvided() bool {
+	if c.ID > 0 {
+		return true
+	} else if c.BKBizID == "" || c.ConfType == "" || c.ConfName == "" || c.ConfFile == "" || c.LevelName == "" ||
+		c.LevelValue == "" {
+		return false
+	} else {
+		return true
+	}
+}
+
+// GetOneConfigByUnique TODO
+func GetOneConfigByUnique(c *ConfigModel) (*ConfigModel, error) {
+	// UniqueKey: (bk_biz_id,conf_name,conf_type,conf_file,level_name,level_value) (namespace,conf_file不关键)
+	var sqlRes *gorm.DB
+	if !c.CheckUniqueKeyProvided() {
+		return nil, errors.New("get one config should have a unique key")
+	}
+	configs := make([]*ConfigModel, 0)
+	sqlRes = DB.Self.Where(c).Find(&configs) // .Find doesnot throw NotFound Error
+	if err := sqlRes.Error; err != nil {
+		return nil, err
+	} else if len(configs) == 0 {
+		return nil, gorm.ErrRecordNotFound
+	} else if len(configs) > 1 {
+		return nil, fmt.Errorf("expect 1 row found but get %d", len(configs))
+	}
+	return configs[0], nil
+}
+
+// GetConfigByIDs TODO
+func GetConfigByIDs(ids []uint64) ([]*ConfigModel, error) {
+	var sqlRes *gorm.DB
+	configs := make([]*ConfigModel, 0)
+	sqlRes = DB.Self.Select("id", "bk_biz_id", "conf_type", "conf_value", "description").Find(&configs, ids)
+	// DB.Self.Where("id IN ?", ids).Find(&configs)
+	if err := sqlRes.Error; err != nil {
+		return nil, err
+	}
+	logger.Warnf("GetConfigByIDs sql: %+v", configs)
+	return configs, nil
+}
+
+// GetUpLevelInfo TODO
+func GetUpLevelInfo(r *api.BaseConfigNode, up *api.UpLevelInfo) (*api.UpLevelInfo, error) {
+	fd := api.BaseConfFileDef{Namespace: r.Namespace, ConfType: r.ConfType, ConfFile: r.ConfFile}
+	fileDef, err := CacheGetConfigFile(fd)
+	upLevel := []string{}
+	if err != nil {
+		return nil, err
+	} else if fileDef != nil {
+		fileLevels := fileDef.LevelNameList
+		allUpLevels := cst.GetConfigLevelsUp(r.LevelName)
+		for _, l := range allUpLevels {
+			if util.StringsHas(fileLevels, l) {
+				upLevel = append(upLevel, l)
+			}
+		}
+	}
+	if up.LevelInfo == nil {
+		up.LevelInfo = make(map[string]string)
+	}
+	for _, l := range upLevel {
+		if l == constvar.LevelPlat || l == constvar.LevelApp {
+			continue
+		}
+		if _, ok := up.LevelInfo[l]; !ok {
+			// try get from versioned
+			return nil, errors.Errorf("level=%s need up level_info %s", r.LevelName, l)
+		}
+	}
+	return up, nil
+}
+
+// GetSimpleConfig godoc
+// todo 目前函数不用于获取平台配置
+// todo 查询之前判断是否有足够的 up level_info, 比如 mysql 需要 module=xxx 而 redis 不需要 (model.CacheGetConfigFile vs up_info)
+func GetSimpleConfig(db *gorm.DB, r *api.BaseConfigNode, up *api.UpLevelInfo,
+	o *api.QueryConfigOptions) ([]*ConfigModel, error) {
+	var err error
+	defer util.LoggerErrorStack(logger.Error, err)
+
+	upLevel, err := GetUpLevelInfo(r, up)
+	if err != nil {
+		return nil, err
+	}
+	upLevel.LevelInfo[r.LevelName] = r.LevelValue
+
+	configs := make([]*ConfigModel, 0)
+	subSelectWhere := []string{}
+	subSelect := []string{}
+	simpleColumns :=
+		"id, bk_biz_id,namespace,conf_type,conf_file,conf_name,level_name,level_value, conf_value,flag_locked,flag_disable,updated_revision,stage,description,created_at,updated_at"
+
+	sqlWhere := ""
+	// todo 考虑把 queryLevels 变成 map,记录 {levelname1:level_value1},拼sql时直接根据这个map来拼
+	var queryLevels []string
+	if o.InheritFrom != "" || (r.BKBizID == constvar.BKBizIDForPlat) {
+		subSelectPlat := fmt.Sprintf(
+			"select %s from v_tb_config_node_plat where bk_biz_id = '%s' and level_name = '%s' and level_value = '%s' and conf_type = '%s'",
+			simpleColumns, constvar.BKBizIDForPlat, constvar.LevelPlat, constvar.BKBizIDForPlat, r.ConfType)
+		if !o.Generate { // 只有 generate 时才需要带上 read_only 配置(flag_status=2)
+			subSelectPlat += " and flag_status >= 1" // 查询接口,也返回只读
+		} else {
+			subSelectPlat += " and flag_status >= 1"
+		}
+		subSelect = append(subSelect, subSelectPlat)
+		queryLevels = append(queryLevels, constvar.LevelPlat)
+	}
+	if r.BKBizID != "" && r.BKBizID != constvar.BKBizIDForPlat {
+		sqlWhere = fmt.Sprintf(" bk_biz_id = '%s' and level_name = '%s' and level_value = '%s' and conf_type = '%s'",
+			r.BKBizID, constvar.LevelApp, r.BKBizID, r.ConfType)
+		subSelectWhere = append(subSelectWhere, sqlWhere)
+		queryLevels = append(queryLevels, constvar.LevelApp)
+	}
+
+	for upLevelName, upLevelValue := range upLevel.LevelInfo {
+		if util.StringsHas(queryLevels, upLevelName) {
+			continue
+		}
+		sqlWhere = fmt.Sprintf(" bk_biz_id = '%s' and level_name = '%s' and level_value = '%s' and conf_type = '%s'",
+			r.BKBizID, upLevelName, upLevelValue, r.ConfType)
+		subSelectWhere = append(subSelectWhere, sqlWhere)
+		queryLevels = append(queryLevels, upLevelName)
+	}
+	for _, subWhere := range subSelectWhere {
+		subSelect = append(subSelect, fmt.Sprintf("select %s from tb_config_node where %s", simpleColumns, subWhere))
+	}
+	if len(subSelect) == 0 {
+		return nil, errors.New("GetSimpleConfig parameters error")
+	}
+	unionSelect := strings.Join(subSelect, " UNION ALL ")
+	queryStr := fmt.Sprintf("SELECT * FROM (\n%s\n) tt WHERE flag_disable = 0 AND namespace = '%s'", unionSelect,
+		r.Namespace)
+	if o.ConfName != "" {
+		// queryStr += fmt.Sprintf(" AND conf_name like '%s%%'", r.ConfName)
+		confNameList := strings.Split(o.ConfName, ",")
+		nameIn := strings.Join(confNameList, "','")
+		queryStr += fmt.Sprintf(" AND conf_name in ('%s')", nameIn)
+	}
+	if o.ConfValue != "" {
+		queryStr += fmt.Sprintf(" AND conf_value like '%%%s%%'", o.ConfValue)
+	}
+	if r.ConfFile != "" {
+		queryStr += fmt.Sprintf(" AND conf_file = '%s'", r.ConfFile)
+	}
+	logger.Infof("GetSimpleConfig sql: %v", queryStr)
+
+	if err = db.Debug().Raw(queryStr).Scan(&configs).Error; err != nil {
+		if err == gorm.ErrRecordNotFound {
+			// return configs, nil  // return empty slice
+			return configs, errors.New("no config items found")
+		}
+		return nil, err
+	}
+	if o.Decrypt {
+		for _, c := range configs {
+			err = c.MayDecrypt()
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return configs, nil
+}
+
+// QueryAndDeleteConfig TODO
+func QueryAndDeleteConfig(db *gorm.DB, levelNode *api.BaseConfigNode, configNames []string) error {
+	conf := &ConfigModel{}
+	copier.Copy(conf, levelNode)
+	for _, confName := range configNames {
+		conf.ConfName = confName
+		if cNew, err := GetOneConfigByUnique(conf); err != nil {
+			if err == gorm.ErrRecordNotFound {
+				continue
+			} else {
+				return err
+			}
+		} else {
+			delWhere := map[string]interface{}{
+				"bk_biz_id":   cNew.BKBizID,
+				"namespace":   cNew.Namespace,
+				"conf_type":   cNew.ConfType,
+				"conf_file":   cNew.ConfFile,
+				"level_name":  cNew.LevelName,
+				"level_value": cNew.LevelValue,
+				"conf_name":   cNew.ConfName,
+			}
+			logger.Warn("delete config: %+v", cNew)
+			if err := DeleteByUnique(db, cNew.TableName(), delWhere); err != nil {
+				return err
+			}
+		}
+	}
+	// DeleteBatch(db, configsDel)
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_item_check.go b/dbm-services/common/db-config/internal/repository/model/config_item_check.go
new file mode 100644
index 0000000000..08211bbea5
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_item_check.go
@@ -0,0 +1,127 @@
+package model
+
+import (
+	"fmt"
+	"strings"
+
+	"bk-dbconfig/internal/pkg/cst"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+)
+
+// GetConfigItemsAssociateNodes TODO
+// 获取与本层级节点相关的节点:当前节点的 所有父节点 与 所有子节点
+// 暂时允许的层级级别 plat,app,module,cluster
+func (c *ConfigModelView) GetConfigItemsAssociateNodes() (map[string]interface{}, map[string]interface{}, error) {
+	upLevels := cst.GetConfigLevelsUp(c.LevelName)
+	downLevels := cst.GetConfigLevelsDown(c.LevelName)
+	var err error
+	levelUpMaps := map[string]interface{}{}
+	if c.LevelName != constvar.LevelPlat {
+		levelUpMaps[constvar.LevelPlat] = constvar.BKBizIDForPlat // 顶层平台配置,始终是上级
+	}
+	logger.Info("GetConfigItemsAssociateNodes level=%s %v %v", c.LevelName, upLevels, downLevels)
+	versionObj := ConfigVersionedModel{}
+
+	// 获得当前层级的所有上级
+	for _, lName := range upLevels {
+		if lName == constvar.LevelPlat {
+			continue
+		} else if lName == constvar.LevelApp {
+			levelUpMaps[lName] = c.BKBizID
+		} else if lName == constvar.LevelModule {
+			if dbModule, ok := c.UpLevelInfo[constvar.LevelModule]; ok {
+				levelUpMaps[lName] = dbModule
+			} else {
+				dbModule, err = versionObj.GetModuleByCluster(c.BKBizID, c.LevelValue)
+				if dbModule == "" {
+					return nil, nil, errors.Errorf("cannot find module for cluster %s", c.LevelValue)
+				}
+				levelUpMaps[lName] = dbModule
+			}
+		} else if lName == constvar.LevelCluster {
+			if dbCluster, ok := c.UpLevelInfo[constvar.LevelCluster]; ok {
+				levelUpMaps[lName] = dbCluster
+			} else {
+				dbCluster, err = versionObj.GetClusterByInstance(c.BKBizID, c.LevelValue)
+				if dbCluster == "" {
+					return nil, nil, errors.Errorf("cannot find cluster for instance %s", c.LevelValue)
+				}
+				levelUpMaps[lName] = dbCluster
+			}
+		} else {
+			// return
+		}
+	}
+
+	levelDownMaps := map[string]interface{}{}
+	for _, lName := range downLevels {
+		if lName == constvar.LevelApp {
+			levelDownMaps[lName], err = versionObj.GetAppsAll()
+		} else if lName == constvar.LevelModule {
+			levelDownMaps[lName], err = versionObj.GetModulesByApp(c.BKBizID)
+		} else if lName == constvar.LevelCluster {
+			levelDownMaps[lName], err = versionObj.GetClustersByModule(c.BKBizID, c.LevelValue)
+		} else if lName == constvar.LevelInstance {
+			// @todo GetInstancesByCluster
+			levelDownMaps[lName], err = versionObj.GetInstancesByCluster(c.BKBizID, c.LevelValue)
+		}
+	}
+	return levelUpMaps, levelDownMaps, err
+}
+
+// GetConfigItemsAssociate TODO
+// 根据 levelName, levelValue 批量获取配置项
+// 访问视图 v_tb_config_node_plat
+func (c *ConfigModelView) GetConfigItemsAssociate(bkBizID string, levelNodes map[string]interface{}) ([]*ConfigModel,
+	error) {
+	logger.Info("GetConfigItemsAssociate params: %s, %+v", bkBizID, levelNodes)
+	sqlSubs := []string{}
+	params := make([]interface{}, 0)
+	configs := make([]*ConfigModel, 0)
+	sqlPreparePlat := fmt.Sprintf(
+		"select id,bk_biz_id, conf_name, conf_value,level_name,level_value,flag_locked " +
+			"from v_tb_config_node_plat " +
+			"where (namespace = ? and conf_type = ? and conf_file = ? and conf_name = ? ) " +
+			"and (level_name = ? and level_value = ? )")
+	sqlPrepare := fmt.Sprintf(
+		"select id,bk_biz_id, conf_name, conf_value,level_name,level_value,flag_locked " +
+			"from tb_config_node " +
+			"where (namespace = ? and conf_type = ? and conf_file = ? and conf_name = ? )" +
+			"and (level_name = ? and level_value in ? ) and bk_biz_id = ?")
+	for levelName, levelValue := range levelNodes {
+		if levelName == constvar.LevelPlat {
+			if bkBizID == constvar.BKBizIDForPlat {
+				return configs, nil // plat has no up level
+			}
+			param := []interface{}{c.Namespace, c.ConfType, c.ConfFile, c.ConfName,
+				constvar.LevelPlat, constvar.BKBizIDForPlat}
+			params = append(params, param...)
+			sqlSubs = append(sqlSubs, sqlPreparePlat)
+		} else {
+			levelValues := cast.ToStringSlice(levelValue)
+			if levelValue == nil || len(levelValues) == 0 {
+				continue
+			}
+			param := []interface{}{c.Namespace, c.ConfType, c.ConfFile, c.ConfName,
+				levelName, levelValues, bkBizID,
+			}
+			params = append(params, param...)
+			sqlSubs = append(sqlSubs, sqlPrepare)
+		}
+	}
+	sqlStr := strings.Join(sqlSubs, " UNION ALL ")
+	logger.Info("GetConfigItemsAssociate ~%s~ ~%v~", sqlStr, params)
+	if len(sqlStr) == 0 {
+		return configs, nil
+	}
+	if err := DB.Self.Debug().Raw(sqlStr, params...).Scan(&configs).Error; err != nil {
+		return nil, err
+	}
+	logger.Info("GetConfigItemsAssociate result: %+v", configs)
+
+	return configs, nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_item_test.go b/dbm-services/common/db-config/internal/repository/model/config_item_test.go
new file mode 100644
index 0000000000..6d154d2e46
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_item_test.go
@@ -0,0 +1,27 @@
+package model
+
+import (
+	"testing"
+
+	"bk-dbconfig/pkg/util/serialize"
+
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func TestConfigVersionedPack(t *testing.T) {
+	c := ConfigVersionedModel{}
+	Convey("Test Serialize configs object", t, func() {
+		configs := []*ConfigModel{
+			{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c11"},
+			{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "major_version", ConfValue: "mysql-5.7", LevelName: "module", LevelValue: "m10"},
+			{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "mycnf_template", ConfValue: "my.cnf#55", LevelName: "module", LevelValue: "m20"},
+		}
+		vc := ConfigVersioned{Versioned: &c, Configs: configs, ConfigsDiff: nil}
+		_ = vc.Pack()
+		configsExpect := make([]*ConfigModel, 0)
+		vc.Configs = nil // unpack wil set Configs
+		_ = vc.UnPack()
+		_ = serialize.UnSerializeString(c.ContentObj, &configsExpect, true)
+		So(configs, ShouldResemble, vc.Configs)
+	})
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_level.go b/dbm-services/common/db-config/internal/repository/model/config_level.go
new file mode 100644
index 0000000000..a5a5b76ecc
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_level.go
@@ -0,0 +1,86 @@
+package model
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/pkg/cst"
+	"bk-dbconfig/pkg/util"
+)
+
+// GetParentLevelStraight 获得合法的直接上层级名
+func GetParentLevelStraight(namespace, confType, confFile, curLevelName string) string {
+	fd := api.BaseConfFileDef{Namespace: namespace, ConfType: confType, ConfFile: confFile}
+	if fileDef, err := CacheGetConfigFile(fd); err == nil {
+		levelNames := util.SplitAnyRuneTrim(fileDef.LevelNames, ",")
+		if len(levelNames) > 0 {
+			return GetConfigLevelsUp(curLevelName, levelNames, true)[0]
+		}
+	}
+	return ""
+}
+
+// GetChildLevelStraight 获得合法的直接下层级名
+func GetChildLevelStraight(namespace, confType, confFile, curLevelName string) string {
+	fd := api.BaseConfFileDef{Namespace: namespace, ConfType: confType, ConfFile: confFile}
+	if fileDef, err := CacheGetConfigFile(fd); err == nil {
+		levelNames := util.SplitAnyRuneTrim(fileDef.LevelNames, ",")
+		if len(levelNames) > 0 {
+			return GetConfigLevelsDown(curLevelName, levelNames, true)[0]
+		}
+	}
+	return ""
+}
+
+// GetConfigLevelsUp 获取指定 level 的上级,如果 straight=true,只返回直接上级
+// priority 越大,优先级越高,level 越低(plat, app, module, cluster, instance)
+func GetConfigLevelsUp(levelName string, names []string, straight bool) []string {
+	configLevelMap := cst.GetConfigLevelMap("")
+	levelPriority, _ := configLevelMap[levelName]
+
+	configLevels := make([]string, 0)
+	maxLevelName := ""
+	maxLevelPrio := 0
+	for k, v := range configLevelMap {
+		if !util.StringsHas(names, k) {
+			continue
+		}
+		if v < levelPriority {
+			configLevels = append(configLevels, k)
+			// 在左边找个最大值 1 <2> [3] 4
+			if v > maxLevelPrio {
+				maxLevelPrio = v
+				maxLevelName = k
+			}
+		}
+	}
+	if straight {
+		return []string{maxLevelName}
+	}
+	return configLevels
+}
+
+// GetConfigLevelsDown 获取指定 level 的下级,如果 straight=true,只返回直接下级
+func GetConfigLevelsDown(levelName string, names []string, straight bool) []string {
+	configLevelMap := cst.GetConfigLevelMap("")
+	levelPriority, _ := configLevelMap[levelName]
+
+	configLevels := make([]string, 0)
+	minLevelName := ""
+	minLevelPrio := 9999
+	for k, v := range configLevelMap {
+		if !util.StringsHas(names, k) {
+			continue
+		}
+		if v > levelPriority {
+			configLevels = append(configLevels, k)
+			// 在右边找个最小值 [2] <3> 4 5
+			if v < minLevelPrio {
+				minLevelPrio = v
+				minLevelName = k
+			}
+		}
+	}
+	if straight {
+		return []string{minLevelName}
+	}
+	return configLevels
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_meta.go b/dbm-services/common/db-config/internal/repository/model/config_meta.go
new file mode 100644
index 0000000000..386e900bad
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_meta.go
@@ -0,0 +1,135 @@
+package model
+
+import (
+	"fmt"
+
+	"bk-dbconfig/pkg/core/config"
+	"bk-dbconfig/pkg/util"
+
+	"gorm.io/gorm"
+
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/util/crypt"
+)
+
+// QueryConfigNames TODO
+func QueryConfigNames(namespace, confType, confFile, confName string) ([]*ConfigNameDefModel, error) {
+	var sqlRes *gorm.DB
+	var err error
+	confNames := make([]*ConfigNameDefModel, 0)
+	columns :=
+		"conf_name,value_type,value_type_sub,value_default,value_allowed,need_restart,flag_locked,flag_disable,flag_encrypt,need_restart,conf_name_lc,description"
+	sqlRes = DB.Self.Debug().Model(ConfigNameDefModel{}).Select(columns).
+		Where("conf_type = ? and conf_file = ?  and flag_locked = 0 and flag_status = -1 and flag_disable = 0",
+			confType, confFile)
+	if confName != "" {
+		confName = confName + "%"
+		sqlRes = sqlRes.Where("conf_name like ?", confName)
+	}
+	if namespace != "" {
+		sqlRes = sqlRes.Where("namespace = ?", namespace)
+	}
+	if err = sqlRes.Find(&confNames).Error; err != nil {
+		if err != gorm.ErrRecordNotFound {
+			return nil, err
+		}
+	}
+	key := fmt.Sprintf("%s%s", config.GetString("encrypt.keyPrefix"), constvar.BKBizIDForPlat)
+	for _, cn := range confNames {
+		cn.ValueDefault, err = crypt.DecryptString(cn.ValueDefault, key, constvar.EncryptEnableZip)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return confNames, nil
+}
+
+// QueryConfigNamesPlat 平台配置
+// flag_status = 1 显式平台配置
+// flag_disable not in (1, 2)  // 1:disable, 2:enable but readonly
+func QueryConfigNamesPlat(namespace, confType, confFile, confName string) ([]*ConfigNameDefModel, error) {
+	var sqlRes *gorm.DB
+	var err error
+	confNames := make([]*ConfigNameDefModel, 0)
+	columns :=
+		"conf_name,value_type,value_type_sub,value_default,value_allowed,need_restart,flag_locked,flag_disable,flag_encrypt,flag_status,need_restart,stage,conf_name_lc,description"
+	sqlRes = DB.Self.Debug().Model(ConfigNameDefModel{}).Select(columns).
+		Where("conf_type = ? and conf_file = ? and flag_status >= 1 and flag_disable = 0",
+			confType, confFile)
+	if confName != "" {
+		confName = confName + "%"
+		sqlRes = sqlRes.Where("conf_name like ?", confName)
+	}
+	if namespace != "" {
+		sqlRes = sqlRes.Where("namespace = ?", namespace)
+	}
+	if err := sqlRes.Find(&confNames).Error; err != nil {
+		if err != gorm.ErrRecordNotFound {
+			return nil, err
+		}
+	}
+	key := fmt.Sprintf("%s%s", config.GetString("encrypt.keyPrefix"), constvar.BKBizIDForPlat)
+	for _, cn := range confNames {
+		cn.ValueDefault, err = crypt.DecryptString(cn.ValueDefault, key, constvar.EncryptEnableZip)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return confNames, nil
+}
+
+// QueryConfigFileDetail TODO
+func QueryConfigFileDetail(namespace, confType, confFile string) ([]*ConfigFileDefModel, error) {
+	var sqlRes *gorm.DB
+	confFiles := make([]*ConfigFileDefModel, 0)
+	sqlRes = DB.Self.Model(&ConfigFileDefModel{}).Where("namespace = ? and conf_type = ?", namespace, confType)
+	if confFile != "" {
+		sqlRes = sqlRes.Where("conf_file = ?", confFile)
+	}
+	if err := sqlRes.Find(&confFiles).Error; err != nil {
+		return nil, err
+	} else if len(confFiles) == 0 {
+		return nil, gorm.ErrRecordNotFound
+	}
+	for _, obj := range confFiles {
+		obj.LevelNameList = util.SplitAnyRuneTrim(obj.LevelNames, ",")
+
+	}
+	return confFiles, nil
+}
+
+// GetConfigFileList godoc
+// todo 用户替换QueryConfigFileDetail
+func GetConfigFileList(namespace, confType, confFile string) ([]*ConfigFileDefModel, error) {
+	sqlRes := DB.Self.Model(&ConfigFileDefModel{})
+	if namespace != "" {
+		sqlRes = sqlRes.Where("namespace = ?", namespace)
+	}
+	if confType != "" {
+		sqlRes = sqlRes.Where("conf_type = ?", confType)
+	}
+	if confFile != "" {
+		sqlRes = sqlRes.Where("conf_file = ?", confFile)
+	}
+	var confFiles []*ConfigFileDefModel
+	err := sqlRes.Find(&confFiles).Error
+	if err != nil {
+		return nil, err
+	}
+	return confFiles, nil
+}
+
+// QueryConfigLevel TODO
+func QueryConfigLevel(levels []string) ([]*ConfigLevelDefModel, error) {
+	var sqlRes *gorm.DB
+	confLevels := make([]*ConfigLevelDefModel, 0)
+	if len(levels) != 0 {
+		sqlRes = DB.Self.Debug().Model(ConfigLevelDefModel{}).Where("level_name in ?", levels)
+	} else { // query all levels
+		sqlRes = DB.Self.Model(ConfigLevelDefModel{})
+	}
+	if err := sqlRes.Find(&confLevels).Error; err != nil {
+		return nil, err
+	}
+	return confLevels, nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_plat.go b/dbm-services/common/db-config/internal/repository/model/config_plat.go
new file mode 100644
index 0000000000..34b26d321b
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_plat.go
@@ -0,0 +1,134 @@
+package model
+
+import (
+	"fmt"
+
+	"bk-dbconfig/pkg/core/config"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util/crypt"
+)
+
+// ConfigNamesBatchSave godoc
+// flag_status = -0 plat_config pre_defined
+// flag_status = 1 pub_config intended to be inherited
+func ConfigNamesBatchSave(db *gorm.DB, confNames []*ConfigNameDefModel) error {
+	for _, confName := range confNames {
+		if confName.FlagDisable == -1 { // 直接物理删除
+			if confName.FlagStatus != -1 || confName.FlagLocked == 1 {
+				return errors.Errorf("conf_name[%s] intend to be plat_config, delete is not allowed", confName.ConfName)
+			}
+			return errors.Errorf("conf_name[%s] intend to be plat_config, delete not support currently", confName.ConfName)
+		} else if confName.FlagDisable == 1 { // 禁用该选项,等于删除,只是软删
+			// todo 当 flag_status=-1 时,如果发布平台配置,需要从 tb_config_node plat_config 移除
+			confName.FlagStatus = -1
+		} else if confName.FlagStatus == -1 { // 页面删除,放回 pre_defined 配置项列表
+
+		} else {
+			// 只要在通过api/前端访问过来的修改请求,都把 flag_status 标志位1,代表平台配置
+			confName.FlagStatus = 1
+		}
+		if confName.FlagLocked == 1 && confName.FlagDisable == 0 { // 锁定的配置 显式出现在平台配置列表
+			confName.FlagStatus = 1
+		}
+		// 不能即 lock 又 disable
+	}
+	return ConfigNamesBatchUpdate(db, confNames)
+}
+
+// ConfigNamesBatchUpdate TODO
+// update 逐个进行,开启事务
+func ConfigNamesBatchUpdate(db *gorm.DB, confNames []*ConfigNameDefModel) error {
+	err := db.Transaction(func(tx *gorm.DB) error {
+		for _, c := range confNames {
+			cnDef, err := CacheGetConfigNameDef(c.Namespace, c.ConfType, c.ConfFile, c.ConfName)
+			if err == nil && cnDef.FlagEncrypt == 1 {
+				key := fmt.Sprintf("%s%s", config.GetString("encrypt.keyPrefix"), constvar.BKBizIDForPlat)
+				c.ValueDefault, _ = crypt.EncryptString(c.ValueDefault, key, constvar.EncryptEnableZip)
+			}
+			if err1 := tx.Debug().Select("value_default", "value_allowed", "flag_status", "flag_locked").
+				Where(c.UniqueWhere()).Updates(c).Error; err1 != nil {
+				return errors.WithMessage(err1, c.ConfName)
+			}
+		}
+		return nil
+	})
+	return err
+}
+
+// ConfigNamesBatchDelete TODO
+// 删除有两种逻辑:这里假设每一批删除都是同一个逻辑,任意取1元素的FlagDisable判断是那种逻辑
+// // 1. 从平台配置列表移除
+//
+//	   只修改 namestatus
+//	2. 从 conf_name 表删除
+//	   delete 根据主键id删除,或者使用唯一键. 这个操作目前没有对外 @todo
+func ConfigNamesBatchDelete(db *gorm.DB, confNames []*ConfigNameDefModel) error {
+	deleteConfName := false
+	for _, confName := range confNames {
+		confName.FlagStatus = -1 // 从平台配置 放回 配置名列表
+		if confName.FlagDisable == -1 {
+			deleteConfName = true
+		}
+	}
+	if deleteConfName { // 2. 从 conf_name 表删除
+		// 删除 conf_name 涉及到上下级锁定关系,暂不支持
+		return errors.New("delete conf_name is not allowed for now")
+		/*
+		   return db.Transaction(func(tx *gorm.DB) error {
+		       for _, c := range confNames {
+		           if c.ID > 0 {
+		               return DeleteByUnique(tx, c.TableName(), c.UniqueWhere())
+		           } else {
+		               return BatchDeleteByID(tx, c.TableName(), []uint64{c.ID})
+		           }
+		       }
+		       return nil
+		   })
+		*/
+	} else { // 1. 从平台配置列表移除
+		return ConfigNamesBatchSave(db, confNames)
+	}
+}
+
+// ConfigNamesBatchCreate2 TODO
+func ConfigNamesBatchCreate2(confNames []*ConfigNameDefModel) error {
+	var sqlRes *gorm.DB
+	sqlRes = DB.Self.Omit("time_created", "time_updated").Create(&confNames)
+	// sqlRes = DB.Self.Omit("time_created", "time_updated").Save(&confNames)
+	if err := sqlRes.Error; err != nil {
+		logger.Errorf("add conf_names :%+v, err:%s", confNames, err.Error())
+		return err
+	}
+	return nil
+}
+
+// ConfigNamesBatchUpdate2 TODO
+// update 逐个进行,开启事务
+func ConfigNamesBatchUpdate2(confNames []*ConfigNameDefModel) error {
+	err := DB.Self.Transaction(func(tx *gorm.DB) error {
+		for _, c := range confNames {
+			if err1 := DB.Self.UpdateColumns(c).Error; err1 != nil {
+				return err1
+			}
+		}
+		return nil
+	})
+	return err
+}
+
+// ConfigNamesBatchDelete2 TODO
+// delete 必须根据主键id删除,不使用唯一键
+func ConfigNamesBatchDelete2(confNames []*ConfigNameDefModel) error {
+	var sqlRes *gorm.DB
+	sqlRes = DB.Self.Delete(&confNames)
+	if err := sqlRes.Error; err != nil {
+		logger.Errorf("delete config names fail:%+v, err:%s", confNames, err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/config_version.go b/dbm-services/common/db-config/internal/repository/model/config_version.go
new file mode 100644
index 0000000000..63ae52abab
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/config_version.go
@@ -0,0 +1,402 @@
+package model
+
+import (
+	"fmt"
+	"time"
+
+	"bk-dbconfig/pkg/util"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util/serialize"
+)
+
+// ListConfigFileVersions TODO
+// 模块集群 的版本发布历史
+// 平台、业务 的配置文件,可能有多个版本
+func (c *ConfigVersionedModel) ListConfigFileVersions(app bool) ([]*ConfigVersionedModel, error) {
+	var sqlRes *gorm.DB
+	versions := make([]*ConfigVersionedModel, 0)
+	sqlRes = DB.Self.Debug().Model(ConfigVersionedModel{}).Order("created_at desc").
+		Select("revision", "conf_file", "created_at", "created_by", "rows_affected", "description", "is_published")
+	if app {
+		sqlRes.Where(
+			"bk_biz_id = ? and namespace = ? and level_name = ? and level_value = ? and conf_type = ? and conf_file = ?",
+			c.BKBizID, c.Namespace, c.LevelName, c.LevelValue, c.ConfType, c.ConfFile).Find(&versions)
+	} else {
+		sqlRes.Where(c.UniqueWhere(false)).Find(&versions)
+	}
+	if err := sqlRes.Error; err != nil {
+		if err != gorm.ErrRecordNotFound {
+			return nil, err
+		}
+	}
+	return versions, nil
+}
+
+// GetDetail TODO
+// 获取 revision 详情
+func (c *ConfigVersionedModel) GetDetail(db *gorm.DB, versionList []string) (*ConfigVersioned, error) {
+	var sqlRes *gorm.DB
+	sqlRes = db.Debug().Model(ConfigVersionedModel{}).Where(c.UniqueWhere(true))
+	if c.ID != 0 {
+		sqlRes = sqlRes.Where("id = ?", c.ID)
+	}
+	sqlRes = sqlRes.Find(&c)
+	if err := sqlRes.Error; err != nil {
+		if err != gorm.ErrRecordNotFound {
+			return nil, err
+		}
+	}
+	versioned := &ConfigVersioned{Versioned: c}
+	if err := versioned.UnPack(); err != nil {
+		return nil, err
+	}
+	return versioned, nil
+}
+
+// GetVersionedConfigFile TODO
+// level_value is cluster
+func (c *ConfigVersionedModel) GetVersionedConfigFile(db *gorm.DB, versionList []string) ([]*ConfigVersionedModel,
+	error) {
+	var sqlRes *gorm.DB
+	versions := make([]*ConfigVersionedModel, 0)
+	logger.Info("GetVersionedConfigFile ConfigVersionedModel=%+v", c)
+	if (c.ID == 0) && (c.BKBizID == "" || c.ConfType == "" || c.ConfFile == "" || c.LevelValue == "") {
+		return nil, fmt.Errorf("GetVersionedConfigFile wrong params")
+	}
+	// .Select("revision", "is_published", "content", "content_md5", "content_obj")
+	sqlRes = db.Debug().Model(ConfigVersionedModel{}).Where(c.UniqueWhere(false))
+	if c.ID != 0 {
+		sqlRes = sqlRes.Where("id = ?", c.ID)
+	}
+	if c.IsPublished == 1 {
+		sqlRes = sqlRes.Where("is_published = ?", c.IsPublished)
+	}
+	if c.IsApplied == 1 {
+		sqlRes = sqlRes.Where("is_applied = ?", c.IsApplied)
+	}
+	if len(versionList) > 0 {
+		sqlRes = sqlRes.Where("revision in ?", versionList)
+	} // else query all versions
+	sqlRes = sqlRes.Find(&versions)
+	if err := sqlRes.Error; err != nil {
+		if err != gorm.ErrRecordNotFound {
+			return nil, err
+		}
+	}
+	return versions, nil
+}
+
+// UpdateConfigVersioned TODO
+func (c *ConfigVersionedModel) UpdateConfigVersioned() error {
+	return nil
+}
+
+// SaveConfigVersioned TODO
+func (c *ConfigVersionedModel) SaveConfigVersioned(db *gorm.DB) error {
+	if id, err := RecordExists(db, c.TableName(), c.ID, c.UniqueWhere(true)); err != nil {
+		if errors.Is(err, gorm.ErrRecordNotFound) {
+			if err := db.Create(c).Error; err != nil {
+				logger.Errorf("Save fail2:%v, err:%s", *c, err.Error())
+				return err
+			}
+			return nil
+		}
+		return err
+	} else {
+		c.ID = id
+		sqlRes := db.Table(c.TableName()).Updates(c).Where(c.UniqueWhere(true))
+		if sqlRes.Error != nil {
+			return sqlRes.Error
+		}
+		return nil
+	}
+}
+
+// FormatAndSaveConfigVersioned TODO
+// 生成 version,但不发布. is_published=false, is_applied=false
+func (c *ConfigVersionedModel) FormatAndSaveConfigVersioned(db *gorm.DB,
+	configs []*ConfigModel, configsDiff []*ConfigModelOp) (string, error) {
+	// format: content versioned to save, list
+	if c.Revision == "" {
+		c.Revision = c.NewRevisionName()
+	}
+	if c.PreRevision == "" {
+		// 需要把当前已发布的版本作为 pre_revision
+		if publishedVersion, err := c.GetVersionPublished(db); err != nil {
+			if !errors.Is(err, gorm.ErrRecordNotFound) {
+				return "", err
+			}
+			logger.Warnf("no published pre_revision found %+v", c)
+		} else {
+			c.PreRevision = publishedVersion.Versioned.Revision
+		}
+	}
+	vc := &ConfigVersioned{
+		Versioned:   c,
+		Configs:     configs,
+		ConfigsDiff: configsDiff,
+	}
+	if err := vc.HandleFlagEncrypt(); err != nil {
+		return "", err
+	}
+	if err := vc.Pack(); err != nil {
+		return "", err
+	}
+	// 获取 node_id,用于关联
+	if c.NodeID == 0 {
+		fn := &ConfigFileNodeModel{
+			Namespace:  c.Namespace,
+			ConfType:   c.ConfType,
+			ConfFile:   c.ConfFile,
+			BKBizID:    c.BKBizID,
+			LevelName:  c.LevelName,
+			LevelValue: c.LevelValue,
+		}
+		if fnNew, err := fn.Detail(db); fnNew != nil && err == nil {
+			c.NodeID = fnNew.ID
+		} else if err == gorm.ErrRecordNotFound {
+			if nodeID, err := fn.CreateOrUpdate(true, db); err != nil {
+				return "", err
+			} else {
+				c.NodeID = nodeID
+			}
+			/*
+			   errInfo := fmt.Sprintf("bk_biz_id=%s,namespace=%s,conf_type=%s,conf_file=%s,level_name=%s,level_value=%s",
+			       c.BKBizID, c.Namespace, c.ConfType, c.ConfFile, c.LevelName, c.LevelValue)
+			   logger.Errorf("node_id not found: %s", errInfo)
+			   return "", errors.Wrap(errno.ErrNodeNotFound, errInfo)
+			*/
+		}
+	}
+	if err := c.SaveConfigVersioned(db); err != nil {
+		return "", err
+	}
+	logger.Info("FormatAndSaveConfigVersioned: v=%s id=%d", c.Revision, c.ID)
+	return c.Revision, nil
+}
+
+// GetConfigFileConfigs TODO
+// get configModels from contentObj that need unSerializing
+// not get from db
+func (c *ConfigVersionedModel) GetConfigFileConfigs() ([]*ConfigModel, error) {
+	configs := make([]*ConfigModel, 0)
+	if err := serialize.UnSerializeString(c.ContentObj, &configs, true); err != nil {
+		return nil, err
+	}
+	return configs, nil
+}
+
+// NewRevisionName TODO
+// auto set revision name with current time
+func (c *ConfigVersionedModel) NewRevisionName() string {
+	t := time.Now()
+	s := t.Format("20060102150405")
+	revision := fmt.Sprintf("v_%s", s)
+	c.Revision = revision
+	return revision
+}
+
+// PatchConfigVersioned TODO
+// patch config.Content. not save to db
+func (c *ConfigVersionedModel) PatchConfigVersioned(patch map[string]string) error {
+	// logger.Warnf("PatchConfigVersioned:%+v, patch:%v", c, patch)
+	configs := make([]*ConfigModel, 0)
+	var err error
+	if configs, err = c.GetConfigFileConfigs(); err != nil {
+		return err
+	}
+	for k, v := range patch {
+		for _, i := range configs {
+			if k == i.ConfName {
+				if !util.ConfValueIsPlaceHolder(i.ConfValue) {
+					return fmt.Errorf("cannot patch conf_name=%s conf_value=%s to v=%s", i.ConfName, i.ConfValue, v)
+				} else {
+					i.ConfValue = v // patch value only startswith {{
+				}
+			}
+			if err := i.HandleFlagEncrypt(); err != nil {
+				return err
+			}
+		}
+	}
+	c.RowsAffected += len(patch)
+	vc := ConfigVersioned{Versioned: c, Configs: configs, ConfigsDiff: nil}
+	if err = vc.Pack(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// VersionApplyStatus 修改版本状态为 已应用
+// 只用户发布版本,之前已生成revision
+// c 里面用到在字段 unique, revision
+// 每次应用完之后,都是 is_published=1, is_applied=1
+// 如果是 level_config 应用,则给所有下级发布新的配置版本
+func (c *ConfigVersionedModel) VersionApplyStatus(db *gorm.DB) error {
+	if c.Revision == "" {
+		return fmt.Errorf("revision to apply cannot be empty: %+v", c)
+	}
+	// 判断是否是否已发布
+	where := c.UniqueWhere(true)
+	where["is_published"] = 1
+	if _, err := RecordExists(db, c.TableName(), 0, where); errors.Is(err, gorm.ErrRecordNotFound) {
+		return errors.Errorf("版本应用必须是已发布: %s", c.Revision)
+	}
+	// 获取已应用的配置版本
+	var verObj *ConfigVersionedModel // queried from db
+	sqlRes := db.Model(c).
+		Where(c.UniqueWhere(false)).Where("is_applied = ?", 1).
+		Select("id", "revision", "is_applied", "is_published").First(&verObj)
+	if err := sqlRes.Error; err != nil {
+		if err == gorm.ErrRecordNotFound { // ignore no record found
+			logger.Warnf("applied version not found: %+v", c)
+		} else {
+			return err
+		}
+	} else {
+		if verObj.Revision == c.Revision {
+			return fmt.Errorf("revision is applied already: %s", c.Revision)
+		}
+	}
+	logger.Warnf("ApplyConfigVersioned get:%+v", c)
+	versionBefore := ConfigVersionedModel{
+		BKBizID:    c.BKBizID,
+		Namespace:  c.Namespace,
+		LevelName:  c.LevelName,
+		LevelValue: c.LevelValue,
+		ConfFile:   c.ConfFile,
+		ConfType:   c.ConfType,
+	}
+	versionNew := versionBefore
+	versionNew.Revision = c.Revision
+	//    versionNew.ID = verObj.ID
+
+	// 已应用版本,变为 0
+	if err := db.Debug().Table(c.TableName()).Where(&versionBefore).Where("is_applied = ?", 1).
+		Select("is_applied").Update("is_applied", 0).Error; err != nil {
+		if err != gorm.ErrRecordNotFound { // ignore no record found
+			return err
+		}
+	}
+	// 把将应用版本变为 1
+	if err := db.Debug().Table(c.TableName()).Where(&versionNew).
+		Select("is_applied").Update("is_applied", 1).Error; err != nil {
+		return err
+	}
+	return nil
+}
+
+// PublishConfig TODO
+// 只发布版本,之前已生成revision. is_published=true
+// c 里面用到在字段 unique, revision
+func (c *ConfigVersionedModel) PublishConfig(db *gorm.DB) (err error) {
+	if c.Revision == "" {
+		return fmt.Errorf("revision to publish cannot be empty: %+v", c)
+	}
+	// 获取已发布的配置版本
+	var verObj *ConfigVersionedModel // queried from db
+	sqlRes := db.Model(c).
+		Where(c.UniqueWhere(false)).Where("is_published = ?", 1).
+		Select("id", "revision", "is_applied", "is_published").First(&verObj)
+	if err = sqlRes.Error; err != nil {
+		if err == gorm.ErrRecordNotFound { // ignore no record found
+			logger.Warnf("published version not found: %+v", c)
+		} else {
+			return err
+		}
+	}
+
+	if verObj.Revision == c.Revision {
+		return fmt.Errorf("revision is applied already: %s", c.Revision)
+	}
+	// logger.Warnf("PublishConfigVersioned get:%+v", c)
+	versionBefore := ConfigVersionedModel{
+		BKBizID:    c.BKBizID,
+		Namespace:  c.Namespace,
+		LevelName:  c.LevelName,
+		LevelValue: c.LevelValue,
+		ConfFile:   c.ConfFile,
+		ConfType:   c.ConfType,
+	}
+	versionNew := versionBefore
+	versionNew.Revision = c.Revision
+	versionNew.ID = c.ID
+
+	// 已发布版本,变为 0
+	if err = db.Debug().Table(c.TableName()).Where(&versionBefore).Where("is_published = ?", 1).
+		Select("is_published").Update("is_published", 0).Error; err != nil {
+		if err != gorm.ErrRecordNotFound { // ignore no record found
+			return err
+		}
+	}
+	// 把将发布版本变为 1
+	if err = db.Debug().Table(c.TableName()).Where(&versionNew).
+		Select("is_published").Update("is_published", 1).Error; err != nil {
+		return err
+	}
+	return
+}
+
+// PatchConfig TODO
+// 只用户发布版本,之前已生成revision. is_published=true, is_applied=false
+func (c *ConfigVersionedModel) PatchConfig(db *gorm.DB, patch map[string]string) error {
+	if c.Revision == "" {
+		return fmt.Errorf("revision to patch cannot be empty: %+v", c)
+	} else if patch == nil || len(patch) == 0 {
+		return fmt.Errorf("no patch items givien")
+	}
+	// check if c exists
+	var verObj *ConfigVersionedModel // queried from db
+	sqlRes := db.Model(c).Where(c.UniqueWhere(true)).First(&verObj)
+	if err := sqlRes.Error; err != nil {
+		if err != gorm.ErrRecordNotFound { // ignore no record found
+			logger.Warnf("patch version not found: %+v", c)
+			return err
+		}
+	}
+	logger.Warnf("PatchConfigVersioned get:%+v", c)
+	if err := verObj.PatchConfigVersioned(patch); err != nil {
+		return err
+	}
+	if err := verObj.SaveConfigVersioned(db); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Update update 指定字段
+// 如果 where=nil, updates=nil, 直接 update c
+// 如果 where=nil, c 则作为条件
+// 如果 updates=nil, c 则作为 update 内容
+// 如果有 columns, 则 update 指定字段
+func (c *ConfigVersionedModel) Update(db *gorm.DB, where map[string]interface{}, updates map[string]interface{},
+	columns ...string) error {
+	// save c
+	if where == nil && updates == nil {
+		return db.Updates(c).Error
+	}
+	sqlRes := db.Debug().Table(c.TableName())
+	if where == nil {
+		sqlRes = sqlRes.Where(c)
+	} else {
+		sqlRes = sqlRes.Where(where)
+	}
+	if len(columns) > 0 {
+		sqlRes = sqlRes.Select(columns)
+	}
+	if updates == nil {
+		sqlRes = sqlRes.Updates(c)
+	} else {
+		sqlRes = sqlRes.Updates(updates)
+	}
+	if err := sqlRes.Error; err != nil {
+		if err != gorm.ErrRecordNotFound { // ignore no record found
+			return err
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/db.go b/dbm-services/common/db-config/internal/repository/model/db.go
new file mode 100644
index 0000000000..540441ac79
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/db.go
@@ -0,0 +1,163 @@
+package model
+
+import (
+	"database/sql"
+	"fmt"
+	"log"
+	"strconv"
+	"time"
+
+	"github.com/spf13/viper"
+
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+
+	"bk-dbconfig/pkg/core/config"
+)
+
+// Database TODO
+type Database struct {
+	Self *gorm.DB
+	// DBV1 *gorm.DB
+}
+
+// DB TODO
+var DB *Database
+
+// Connection TODO
+// DB
+type Connection struct {
+	IP   string `json:"ip"`
+	Port int    `json:"port"`
+	// User   string `json:"user"`
+	// Pwd    string `json:"pwd"`
+	DBName string `json:"dbName"`
+}
+
+// migrateDatabase initialize the database tables.
+func migrateDatabase(db *gorm.DB) {
+	// db.AutoMigrate(&UserModel{})
+	// db.AutoMigrate(&RoleModel{})
+}
+
+/*
+ cleanDatabase tear downs the database tables.
+func cleanDatabase(db *gorm.DB) {
+    //db.DropTable(&UserModel{})
+    //db.DropTable(&RoleModel{})
+}
+
+
+// resetDatabase resets the database tables.
+func resetDatabase(db *gorm.DB) {
+    //cleanDatabase(db)
+    migrateDatabase(db)
+}
+*/
+
+// openDB godoc
+// options="multiStatements=true&interpolateParams=true"
+func openDB(username, password, addr, name string, options string) *gorm.DB {
+	// multiStatements
+	dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=%t&loc=%s",
+		username,
+		password,
+		addr,
+		name,
+		true,
+		// "Asia/Shanghai"),
+		"Local")
+	if options != "" {
+		dsn += "&" + options
+	}
+	// log.Printf("connect string: %s", dsn)
+	sqlDB, err := sql.Open("mysql", dsn)
+	db, err := gorm.Open(mysql.New(mysql.Config{Conn: sqlDB}), &gorm.Config{})
+	if err != nil {
+		log.Fatalf("Database connection failed. Database name: %s, error: %v", name, err)
+	}
+
+	// set for db connection
+	setupDB(db)
+	return db
+}
+
+func setupDB(db *gorm.DB) {
+	// setup tables
+	sqlDB, err := db.DB()
+	if err != nil {
+		log.Fatalf("setupDB failed: %s", err.Error())
+		return
+	}
+	migrateDatabase(db)
+	// sqlDB.LogMode(viper.GetBool("gormlog"))
+	// 用于设置闲置的连接数.设置闲置的连接数则当开启的一个连接使用完成后可以放在池里等候下一次使用。
+	sqlDB.SetMaxIdleConns(viper.GetInt("dbConnConf.maxIdleConns"))
+	// 用于设置最大打开的连接数,默认值为0表示不限制.设置最大的连接数,可以避免并发太高导致连接mysql出现too many connections的错误。
+	sqlDB.SetMaxOpenConns(viper.GetInt("dbConnConf.maxOpenConns"))
+	sqlDB.SetConnMaxLifetime(time.Duration(viper.GetInt("dbConnConf.connMaxLifetime")) * time.Hour)
+}
+
+// InitSelfDB 获取 gorm.DB 对象
+func InitSelfDB(options string) *gorm.DB {
+	log.Println(config.GetString("db.username"), "****", config.GetString("db.addr"),
+		config.GetString("db.name"))
+	return openDB(config.GetString("db.username"),
+		config.GetString("db.password"),
+		config.GetString("db.addr"),
+		config.GetString("db.name"),
+		options)
+}
+
+func getTestDB() *gorm.DB {
+	return openDB(config.GetString("testdb.name"),
+		config.GetString("testdb.password"),
+		config.GetString("testdb.addr"),
+		config.GetString("testdb.name"), "")
+}
+
+// GetSelfDB 返回原生 sql.DB 对象
+func GetSelfDB() *sql.DB {
+	sqlDB, _ := DB.Self.DB()
+	return sqlDB
+}
+
+// Init TODO
+func (db *Database) Init() {
+	DB = &Database{
+		Self: InitSelfDB(""),
+		// DBV1: getDBV1(),
+	}
+}
+
+// Close TODO
+func (db *Database) Close() {
+	sqlDB, err := DB.Self.DB()
+	if err == nil {
+		sqlDB.Close()
+	}
+	// DB.DBV1.Close()
+}
+
+// GetDBconn TODO
+func GetDBconn(connParam Connection) *gorm.DB {
+	address := connParam.IP + ":" + strconv.Itoa(connParam.Port)
+	if connParam.DBName == "" {
+		connParam.DBName = "information_schema"
+	}
+	return openDB(config.GetString("monitor.username"),
+		config.GetString("monitor.password"),
+		address,
+		connParam.DBName, "")
+}
+
+// GetProxyconn TODO
+func GetProxyconn(username, password, addr, name string) *gorm.DB {
+	if username == "" {
+		username = config.GetString("proxy.username")
+	}
+	if password == "" {
+		password = config.GetString("proxy.password")
+	}
+	return openDB(username, password, addr, name, "")
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/dbmeta.go b/dbm-services/common/db-config/internal/repository/model/dbmeta.go
new file mode 100644
index 0000000000..bb901aa29b
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/dbmeta.go
@@ -0,0 +1,33 @@
+package model
+
+// GetAppInfo TODO
+// `/db_meta/db_module/query/{bk_biz_id}`
+func GetAppInfo(bkBizID string) error {
+	// return modules,clusters
+	return nil
+}
+
+// GetModuleInfo TODO
+func GetModuleInfo(bkBizID, module string) (string, error) {
+	// return clusters,instances
+	return "", nil
+}
+
+// GetClusterInfo TODO
+// `/db_meta/cluster/query`
+func GetClusterInfo(bkBizID, cluster string) (string, error) {
+	// return module,instances
+	return "", nil
+}
+
+// GetHostInfo TODO
+func GetHostInfo(bkBizID, host string) (string, error) {
+	// return modules,clusters,instances
+	return "", nil
+}
+
+// GetInstanceInfo TODO
+func GetInstanceInfo(bkBizID, instance string) (string, error) {
+	// return module,host,cluster
+	return "", nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/dbmeta_inside.go b/dbm-services/common/db-config/internal/repository/model/dbmeta_inside.go
new file mode 100644
index 0000000000..a9d668e3cd
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/dbmeta_inside.go
@@ -0,0 +1,109 @@
+package model
+
+import (
+	"errors"
+
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+
+	"gorm.io/gorm"
+)
+
+// GetModuleByCluster TODO
+// up: 根据 bk_biz_id, cluster 查询集群所属模块
+func (c *ConfigVersionedModel) GetModuleByCluster(bkBizID, cluster string) (string, error) {
+	var dbModule string
+	err := DB.Self.Table(c.TableName()).Select("module").
+		Where(c).
+		Where("is_published = 1 and bk_biz_id = ? and level_name=? and level_value = ? and module is not null",
+			bkBizID, constvar.LevelCluster, cluster).
+		Take(&dbModule).Error
+	if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) {
+		return "", err
+	}
+	return dbModule, nil
+}
+
+// GetClusterByInstance TODO
+// up
+func (c *ConfigVersionedModel) GetClusterByInstance(bkBizID, instance string) (string, error) {
+	var cluster string
+	err := DB.Self.Table(c.TableName()).Select("cluster").
+		Where(c).
+		Where("is_published = 1 and bk_biz_id = ? and level_name = ? and level_value = ?",
+			bkBizID, constvar.LevelInstance, instance).Take(&cluster).Error
+	if err != nil {
+		return "", err
+	}
+	return cluster, nil
+}
+
+// GetAppsAll TODO
+// down: 获取所有 bk_biz_id 列表
+func (c *ConfigVersionedModel) GetAppsAll() ([]string, error) {
+	var bkBizIDs []string
+	err := DB.Self.Debug().Table(c.TableName()).
+		Where("is_published = 1 and bk_biz_id != ?", constvar.BKBizIDForPlat).
+		Where(c).
+		Distinct().Pluck("bk_biz_id", &bkBizIDs).Error
+	if err != nil {
+		return nil, err
+	}
+	return bkBizIDs, nil
+}
+
+// GetModulesByApp TODO
+// down: 根据 bk_biz_id 查询所有db模块
+func (c *ConfigVersionedModel) GetModulesByApp(bkBizID string) ([]string, error) {
+	var modules []string
+	err := DB.Self.Debug().Table(c.TableName()).
+		Where("is_published = 1 and bk_biz_id = ? and level_name=?", bkBizID, constvar.LevelModule).
+		Where(c).
+		Distinct().Pluck("level_value", &modules).Error
+	if err != nil {
+		return nil, err
+	}
+	return modules, nil
+}
+
+// GetClustersByModule TODO
+// down: 根据 bk_biz_id, db_module 查询所有集群
+func (c *ConfigVersionedModel) GetClustersByModule(bkBizID, dbModule string) ([]string, error) {
+	var clusters []string
+	err := DB.Self.Debug().Table(c.TableName()).
+		Where("is_published = 1 and bk_biz_id = ? and module = ?", bkBizID, dbModule).
+		Where(c).
+		Distinct().Pluck("level_value", &clusters).Error
+	if err != nil {
+		return nil, err
+	}
+	return clusters, nil
+}
+
+// GetInstancesByCluster TODO
+// 根据集群查询 集群实例列表
+// down: 不支持 instance 级配置. 暂时不用
+func (c *ConfigVersionedModel) GetInstancesByCluster(bkBizID, dbCluster string) ([]string, error) {
+	return nil, nil
+}
+
+// GetHostsByCluster TODO
+// 根据集群查询 集群主机列表. 暂时不用
+// down: 不支持 machine 级配置. 暂时不用
+func (c *ConfigVersionedModel) GetHostsByCluster() ([]string, error) {
+	return nil, nil
+}
+
+// GetModuleByCluster TODO
+func GetModuleByCluster(bkBizID, cluster string) ([]*CmdbInfoBase, error) {
+	var sqlRes *gorm.DB
+	cmdbInfoBase := make([]*CmdbInfoBase, 0)
+	sqlRes = DB.Self.Where("bk_biz_id=? and cluster=?", bkBizID, cluster).Find(&cmdbInfoBase)
+	if err := sqlRes.Error; err != nil {
+		if err != gorm.ErrRecordNotFound {
+			return nil, err
+		}
+	}
+	logger.Warnf("GetModuleByCluster sql: %+v", cmdbInfoBase)
+	return cmdbInfoBase, nil
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/dbtime.go b/dbm-services/common/db-config/internal/repository/model/dbtime.go
new file mode 100644
index 0000000000..19d370ce3b
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/dbtime.go
@@ -0,0 +1,52 @@
+package model
+
+import (
+	"database/sql/driver"
+	"fmt"
+	"time"
+)
+
+// DBTime TODO
+type DBTime struct {
+	time.Time
+}
+
+// DBTimeFormat TODO
+const DBTimeFormat = "2006-01-02 15:04:05"
+
+// MarshalJSON TODO
+func (t DBTime) MarshalJSON() ([]byte, error) {
+	str := fmt.Sprintf(`"%s"`, t.Format(DBTimeFormat))
+	return []byte(str), nil
+}
+
+// Value TODO
+func (t DBTime) Value() (driver.Value, error) {
+	var zeroTime time.Time
+	if t.Time.UnixNano() == zeroTime.UnixNano() {
+		return nil, nil
+	}
+	return t.Time, nil
+}
+
+// Scan TODO
+func (t *DBTime) Scan(v interface{}) error {
+	if val, ok := v.(time.Time); ok {
+		*t = DBTime{Time: val}
+		return nil
+	}
+	return fmt.Errorf("error when converting %v to datetime", v)
+}
+
+// String 用于打印
+func (t DBTime) String() string {
+	// 以当前机器时区
+	return t.Format(DBTimeFormat)
+}
+
+// BaseDatetime TODO
+type BaseDatetime struct {
+	// gorm.Model
+	CreatedAt DBTime `json:"created_at" gorm:"->;column:created_at;type:varchar(30)"`
+	UpdatedAt DBTime `json:"updated_at" gorm:"->;column:updated_at;type:varchar(30)"`
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/file_node.go b/dbm-services/common/db-config/internal/repository/model/file_node.go
new file mode 100644
index 0000000000..f3a937604e
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/file_node.go
@@ -0,0 +1,115 @@
+package model
+
+import (
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// CreateOrUpdate TODO
+// create 表示明确是插入
+func (c *ConfigFileNodeModel) CreateOrUpdate(create bool, db *gorm.DB) (uint64, error) {
+	if create {
+		return c.ID, db.Create(c).Error
+	}
+	id, _ := RecordExists(db, c.TableName(), 0, c.UniqueWhere())
+	if id > 0 {
+		// node_id 已经存在,且本次是 generate,不覆盖原 description 信息. 见 GenerateConfigVersion
+		if c.ConfFileLC == "" && c.Description == "generated" {
+			return c.ID, nil
+		}
+		c.ID = id
+		return c.ID, db.Updates(c).Error
+	} else {
+		return c.ID, db.Create(c).Error
+	}
+}
+
+// List TODO
+func (c *ConfigFileNodeModel) List(db *gorm.DB, inheritFrom string) ([]*ConfigFileNodeModel, error) {
+	var platFiles []*ConfigFileNodeModel
+	columnsRet := "id, namespace,conf_type,conf_file, conf_file_lc,description,updated_by,created_at,updated_at"
+	platFile := &ConfigFileDefModel{
+		Namespace: c.Namespace,
+		ConfType:  c.ConfType,
+	}
+	platRes := db.Debug().Table(platFile.TableName()).Select(columnsRet).Where(platFile)
+	if c.ConfFile != "" {
+		platRes = platRes.Where("conf_file = ?", c.ConfFile)
+	}
+	if c.LevelName == constvar.LevelPlat {
+		// query plat
+		err := platRes.Find(&platFiles).Error
+		if err != nil {
+			return nil, err
+		}
+		return platFiles, nil
+		// } else if c.LevelName == constvar.LevelApp && c.BKBizID == c.LevelValue{
+	} else if c.BKBizID != constvar.BKBizIDForPlat {
+		var files []*ConfigFileNodeModel
+		// query app
+		sqlRes := db.Debug().Table(c.TableName()).Select(columnsRet).
+			Where("namespace = ? and conf_type = ? and level_name = ? and level_value = ? and bk_biz_id = ?",
+				c.Namespace, c.ConfType, c.LevelName, c.LevelValue, c.BKBizID)
+		if c.ConfFile != "" {
+			sqlRes = sqlRes.Where("conf_file = ?", c.ConfFile)
+		}
+		err := sqlRes.Find(&files).Error
+		if err != nil {
+			return nil, err
+		}
+		if inheritFrom == constvar.BKBizIDForPlat {
+			// query plat
+			err := platRes.Find(&platFiles).Error
+			if err != nil {
+				return nil, err
+			}
+			var filesNew []*ConfigFileNodeModel
+			logger.Info("platFiles: %+v  appFiles: %+v", platFiles, files)
+			for _, fb := range platFiles {
+				flag := false
+				for _, f := range files {
+					if fb.Namespace == f.Namespace && fb.ConfType == f.ConfType && fb.ConfFile == f.ConfFile {
+						// app 优先
+						filesNew = append(filesNew, f)
+						flag = true
+						continue
+					}
+				}
+				if !flag {
+					filesNew = append(filesNew, fb)
+				}
+			}
+			return filesNew, nil
+		} else {
+			return files, nil
+		}
+	} else {
+		return nil, errors.Errorf("illegal params for level=%s bk_biz_id=%s", c.LevelName, c.BKBizID)
+	}
+}
+
+// Detail 有 id 则根据 id 查,无 id 则根据 unique key 查
+// 如果没查到,node 返回 nil,不返回 ErrorNotFound
+func (c *ConfigFileNodeModel) Detail(db *gorm.DB) (*ConfigFileNodeModel, error) {
+	var files []*ConfigFileNodeModel
+	// query app
+	columnsRet :=
+		"id,namespace,bk_biz_id,conf_type,conf_file,level_name,level_value,conf_file_lc,description,updated_by,created_at,updated_at"
+	sqlRes := db.Debug().Table(c.TableName()).Select(columnsRet)
+	if c.ID == 0 {
+		sqlRes = sqlRes.Where(c.UniqueWhere())
+	} else {
+		sqlRes = sqlRes.Where("id = ?", c.ID)
+	}
+	err := sqlRes.Find(&files).Error
+	if err != nil {
+		return nil, err
+	} else if len(files) == 0 {
+		return nil, nil
+	} else {
+		return files[0], nil
+	}
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/level_node.go b/dbm-services/common/db-config/internal/repository/model/level_node.go
new file mode 100644
index 0000000000..5d727868a6
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/level_node.go
@@ -0,0 +1,88 @@
+package model
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+
+	"github.com/pkg/errors"
+)
+
+// QueryChildLevelValues 获取当前层级的所有直接下级
+// 如果 childLevelName不为空,会校验下级 level_name 对不对
+func QueryChildLevelValues(r *api.BaseConfigNode, childLevelName string) (childVals []string, err error) {
+	v := ConfigVersionedModel{ // 这里只给 3 个值,来拼接条件,其它的条件在具体方法里面拼接
+		Namespace: r.Namespace,
+		ConfType:  r.ConfType,
+		ConfFile:  r.ConfFile,
+	}
+	var levelErr bool
+	switch r.LevelName {
+	case constvar.LevelPlat:
+		if childLevelName == "" || childLevelName == constvar.LevelApp {
+			childVals, err = v.GetAppsAll()
+		} else {
+			levelErr = true
+		}
+	case constvar.LevelApp:
+		if childLevelName == "" || childLevelName == constvar.LevelModule {
+			childVals, err = v.GetModulesByApp(r.BKBizID)
+		} else {
+			levelErr = true
+		}
+	case constvar.LevelModule:
+		if childLevelName == "" || childLevelName == constvar.LevelCluster {
+			childVals, err = v.GetClustersByModule(r.BKBizID, r.LevelValue)
+		} else {
+			levelErr = true
+		}
+	case constvar.LevelCluster:
+		if childLevelName == "" || childLevelName == constvar.LevelInstance {
+			childVals, err = v.GetInstancesByCluster(r.BKBizID, r.LevelValue)
+		} else {
+			levelErr = true
+		}
+	default:
+		return nil, errors.Errorf("fail to get child level for %s %s", r.LevelName, r.LevelValue)
+	}
+	if levelErr {
+		err = errors.Errorf("level error: bk_biz_id=%s, level_name=%s, child_level_name=%s",
+			r.BKBizID, r.LevelName, childLevelName)
+		logger.Errorf(err.Error())
+	}
+	return childVals, errors.Wrap(err, "QueryChildLevelValues")
+}
+
+// QueryParentLevelValue 获取当前层级的的直接上级
+func QueryParentLevelValue(r *api.BaseConfigNode) (levelInfo map[string]string, err error) {
+	v := ConfigVersionedModel{ // 这里只给 3 个值,来拼接条件
+		Namespace: r.Namespace,
+		ConfType:  r.ConfType,
+		ConfFile:  r.ConfFile,
+	}
+	levelInfo = make(map[string]string)
+	var parentVal string
+	switch r.LevelName {
+	case constvar.LevelPlat: // plat 上级是它自己
+		parentVal = constvar.BKBizIDForPlat
+		levelInfo[constvar.LevelPlat] = parentVal
+	case constvar.LevelApp:
+		parentVal = constvar.BKBizIDForPlat
+		levelInfo[constvar.LevelPlat] = parentVal
+	case constvar.LevelModule:
+		parentVal = r.BKBizID
+		levelInfo[constvar.LevelApp] = parentVal
+	case constvar.LevelCluster:
+		parentVal, err = v.GetModuleByCluster(r.BKBizID, r.LevelValue)
+		levelInfo[constvar.LevelModule] = parentVal
+	case constvar.LevelInstance:
+		parentVal, _ = v.GetClusterByInstance(r.BKBizID, r.LevelValue)
+		levelInfo[constvar.LevelCluster] = parentVal
+	default:
+		return nil, errors.Errorf("fail to get parent level for %s %s", r.LevelName, r.LevelValue)
+	}
+	if parentVal == "" {
+		return nil, errors.Errorf("cannot find parent level for %s %s", r.LevelName, r.LevelValue)
+	}
+	return levelInfo, errors.Wrap(err, "QueryParentLevelValue")
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/model.go b/dbm-services/common/db-config/internal/repository/model/model.go
new file mode 100644
index 0000000000..d709abb551
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/model.go
@@ -0,0 +1,367 @@
+// Package model TODO
+package model
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"bk-dbconfig/pkg/util/serialize"
+)
+
+// ConfigFileBaseModel TODO
+type ConfigFileBaseModel struct {
+	ConfType  string `json:"conf_type" gorm:"column:conf_type;type:varchar(60)"`
+	ConfFile  string `json:"conf_file" gorm:"column:conf_file;type:varchar(120)"`
+	Namespace string `json:"namespace" gorm:"column:namespace;type:varchar(120)"`
+}
+
+// ConfigItemBaseModel TODO
+type ConfigItemBaseModel struct {
+	ConfName    string `json:"conf_name" gorm:"column:conf_name;type:varchar(120);not null"`
+	ConfValue   string `json:"conf_value" gorm:"conf_value:dbs;type:varchar(255);not null"`
+	LevelName   string `json:"level_name" gorm:"column:level_name;type:varchar(120);not null"`
+	LevelValue  string `json:"level_value" gorm:"column:level_value;type:varchar(120)"`
+	ExtraInfo   string `json:"extra_info" gorm:"column:extra_info;type:text"`
+	Description string `json:"description" gorm:"column:description;type:varchar(255)"`
+	FlagDisable int8   `json:"flag_disable" gorm:"column:flag_disable;type:tinyint(4);default:0"`
+	FlagLocked  int8   `json:"flag_locked" gorm:"column:flag_locked;type:tinyint(4);default:0"`
+}
+
+// CmdbInfoBase TODO
+// tb_app_module_cluster
+type CmdbInfoBase struct {
+	ID          uint64 `json:"id" gorm:"column:id;type:int;AUTO_INCREMENT;PRIMARY_KEY"`
+	BKBizID     string `json:"bk_biz_id" gorm:"column:bk_biz_id;type:varchar(120);not null"`
+	Cluster     string `json:"cluster" gorm:"column:cluster;type:varchar(120)"`
+	Module      string `json:"module" gorm:"column:module;type:varchar(120)"`
+	DBZone      string `json:"dbzone" gorm:"column:dbzone;type:varchar(120)"`
+	DBZoneInfo  string `json:"dbzone_info" gorm:"column:dbzone_info;type:varchar(255)"`
+	Service     string `json:"service" gorm:"column:service;type:varchar(120)"`
+	Description string `json:"description" gorm:"column:description;type:varchar(255)"`
+	Status      int8   `json:"status" gorm:"column:status;type:tinyint;default:1"`
+}
+
+// String 用于打印
+func (c *CmdbInfoBase) String() string {
+	return fmt.Sprintf("CmdbInfoBase{ID:%d BKBizID:%s Cluster:%s Module:%s DBZone:%s Service:%s City:%s}",
+		c.ID, c.BKBizID, c.Cluster, c.Module, c.DBZone, c.Service, c.Description)
+}
+
+// TableName TODO
+func (c *CmdbInfoBase) TableName() string {
+	return "tb_app_module_cluster"
+}
+
+// ConfigVersionedModel TODO
+// tb_config_versioned
+type ConfigVersionedModel struct {
+	ID         uint64 `json:"id" gorm:"column:id;type:int;AUTO_INCREMENT;PRIMARY_KEY"`
+	NodeID     uint64 `json:"node_id" gorm:"column:node_id;type:int"` // level node id
+	BKBizID    string `json:"bk_biz_id" gorm:"column:bk_biz_id;type:varchar(120);not null"`
+	ConfType   string `json:"conf_type" gorm:"column:conf_type;type:varchar(60)"`
+	ConfFile   string `json:"conf_file" gorm:"column:conf_file;type:varchar(120)"`
+	Namespace  string `json:"namespace" gorm:"column:namespace;type:varchar(120)"`
+	LevelName  string `json:"level_name" gorm:"column:level_name;type:varchar(120)"`
+	LevelValue string `json:"level_value" gorm:"column:level_value;type:varchar(120)"`
+	Revision   string `json:"revision" gorm:"column:revision;type:varchar(120)"`
+	ContentStr string `json:"content_str" gorm:"column:content_str;type:text"`
+	// content_str 的 md5 值
+	ContentMd5     string `json:"content_md5" gorm:"column:content_md5;type:varchar(60)"`
+	ContentObj     string `json:"content_obj" gorm:"column:content_obj;type:blob"`
+	IsPublished    int8   `json:"is_published" gorm:"column:is_published;type:tinyint"`
+	IsApplied      int8   `json:"is_applied" gorm:"column:is_applied;type:tinyint"`
+	PreRevision    string `json:"pre_revision" gorm:"column:pre_revision;type:varchar(120)"`
+	RowsAffected   int    `json:"rows_affected" gorm:"column:rows_affected;type:int"`
+	ContentObjDiff string `json:"content_obj_diff" gorm:"column:content_obj_diff;type:blob"`
+	Module         string `json:"module" gorm:"column:module;type:varchar(120)"`
+	Cluster        string `json:"cluster" gorm:"column:cluster;type varchar(120)"`
+	Description    string `json:"description" gorm:"column:description;type:varchar(255)"`
+	CreatedBy      string `json:"created_by" gorm:"column:created_by;type:varchar(120)"`
+	BaseDatetime
+}
+
+// TableName TODO
+func (c ConfigVersionedModel) TableName() string {
+	return "tb_config_versioned"
+}
+
+// UniqueWhere TODO
+// revision 控制是否添加 revision where条件,前提是 c.Revision != ""
+func (c ConfigVersionedModel) UniqueWhere(revision bool) map[string]interface{} {
+	uniqueWhere := map[string]interface{}{
+		"bk_biz_id":   c.BKBizID,
+		"namespace":   c.Namespace,
+		"level_name":  c.LevelName,
+		"level_value": c.LevelValue,
+		"conf_type":   c.ConfType,
+		"conf_file":   c.ConfFile,
+	}
+	if c.Revision != "" && revision {
+		uniqueWhere["revision"] = c.Revision
+	}
+	return uniqueWhere
+}
+
+// ConfigVersioned TODO
+type ConfigVersioned struct {
+	Versioned   *ConfigVersionedModel
+	Configs     []*ConfigModel   // unpacked ContentObj
+	ConfigsDiff []*ConfigModelOp // unpacked ContentObjDiff
+}
+
+// Pack Configs object to Versioned(base64)
+func (v *ConfigVersioned) Pack() error {
+	confItems := make([]string, 0) // fileContent
+	for _, c := range v.Configs {
+		confItems = append(confItems, fmt.Sprintf("%s = %s", c.ConfName, c.ConfValue))
+	}
+	sort.Strings(confItems)
+	fileContent := strings.Join(confItems, "\n")
+	v.Versioned.ContentStr = fileContent
+	v.Versioned.ContentMd5 = util.Str2md5(fileContent)
+	if len(v.Configs) > 0 {
+		if contentObj, err := serialize.SerializeToString(v.Configs, true); err != nil {
+			return err
+		} else {
+			v.Versioned.ContentObj = contentObj
+		}
+	}
+	if len(v.ConfigsDiff) > 0 {
+		if diffContentObj, err := serialize.SerializeToString(v.ConfigsDiff, false); err != nil {
+			return err
+		} else {
+			v.Versioned.ContentObjDiff = diffContentObj
+		}
+	}
+	return nil
+}
+
+// UnPack Versioned(base64) to Configs object
+func (v *ConfigVersioned) UnPack() error {
+	if v.Versioned.ContentObj == "" {
+		v.Configs = nil
+	} else if err := serialize.UnSerializeString(v.Versioned.ContentObj, &v.Configs, true); err != nil {
+		return err
+	}
+	if v.Versioned.ContentObjDiff == "" {
+		// v.ConfigsDiff = nil
+	} else if err := serialize.UnSerializeString(v.Versioned.ContentObjDiff, &v.ConfigsDiff, false); err != nil {
+		return err
+	}
+	return nil
+}
+
+// HandleFlagEncrypt 加密
+func (v *ConfigVersioned) HandleFlagEncrypt() (err error) {
+	for _, c := range v.ConfigsDiff {
+		if err = c.Config.HandleFlagEncrypt(); err != nil {
+			logger.Errorf("version HandleFlagEncrypt %+v. Error: %w", c.Config, err)
+			return err
+		}
+	}
+	for _, c := range v.Configs {
+		if err = c.HandleFlagEncrypt(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// MayDecrypt TODO
+func (v *ConfigVersioned) MayDecrypt() (err error) {
+	for _, c := range v.ConfigsDiff {
+		if err = c.Config.MayDecrypt(); err != nil {
+			logger.Errorf("ConfigVersioned diffs MayDecrypt, Error: %w", err)
+			return err
+		}
+	}
+	for _, c := range v.Configs {
+		if err = c.MayDecrypt(); err != nil {
+			logger.Errorf("ConfigVersioned configs MayDecrypt, Error: %w", err)
+			return err
+		}
+	}
+	return nil
+}
+
+// ConfigFileDefModel TODO
+// tb_config_file_def
+type ConfigFileDefModel struct {
+	ID                uint64 `json:"id" gorm:"column:id;type:int;AUTO_INCREMENT;PRIMARY_KEY"`
+	Namespace         string `json:"namespace" gorm:"column:namespace;type:varchar(120)"`
+	ConfType          string `json:"conf_type" gorm:"column:conf_type;type:varchar(60)"`
+	ConfTypeLC        string `json:"conf_type_lc" gorm:"column:conf_type_lc;type:varchar(60)"`
+	ConfFile          string `json:"conf_file" gorm:"column:conf_file;type:varchar(120)"`
+	ConfFileLC        string `json:"conf_file_lc" gorm:"column:conf_file_lc;type:varchar(120)"`
+	NamespaceInfo     string `json:"namespace_info" gorm:"column:namespace_info;type:varchar(120)"`
+	LevelNames        string `json:"level_names" gorm:"column:level_names;type:varchar(255);not null"`
+	LevelVersioned    string `json:"level_versioned" gorm:"column:level_versioned;type:varchar(120)"`
+	VersionKeepLimit  int    `json:"version_keep_limit" gorm:"column:version_keep_limit;type:int;not null"`
+	VersionKeepDays   int    `json:"version_keep_days" gorm:"column:version_keep_days;type:int;not null"`
+	ConfNameValidate  int8   `json:"conf_name_validate" gorm:"column:conf_name_validate;type:tinyint;not null"`
+	ConfValueValidate int8   `json:"conf_value_validate" gorm:"column:conf_value_validate;type:tinyint;not null"`
+	// 严格按照定义的 value_type 类型返回
+	ValueTypeStrict int8   `json:"value_type_strict" gorm:"column:value_type_strict;type:tinyint;not null"`
+	ConfNameOrder   int8   `json:"conf_name_order" gorm:"column:conf_name_order;type:tinyint;not null"`
+	Description     string `json:"description" gorm:"column:description;type:varchar(255)"`
+	UpdatedBy       string `json:"updated_by" gorm:"column:updated_by;type:varchar(120)"`
+	BaseDatetime
+
+	LevelNameList []string `json:"level_name_list" gorm:"-"`
+}
+
+// TableName TODO
+func (c ConfigFileDefModel) TableName() string {
+	return "tb_config_file_def"
+}
+
+// UniqueWhere TODO
+// 定义该表唯一键的查询条件
+func (c ConfigFileDefModel) UniqueWhere() map[string]interface{} {
+	uniqueWhere := map[string]interface{}{
+		"namespace": c.Namespace,
+		"conf_type": c.ConfType,
+		"conf_file": c.ConfFile,
+	}
+	return uniqueWhere
+}
+
+// ConfigNameDefModel TODO
+// tb_config_name_def
+type ConfigNameDefModel struct {
+	ID           uint64 `json:"id" gorm:"column:id;type:int;AUTO_INCREMENT;PRIMARY_KEY"`
+	Namespace    string `json:"namespace" gorm:"column:namespace;type:varchar(120)"`
+	ConfType     string `json:"conf_type" gorm:"column:conf_type;type:varchar(60)"`
+	ConfFile     string `json:"conf_file" gorm:"column:conf_file;type:varchar(120)"`
+	ConfName     string `json:"conf_name" gorm:"column:conf_name;type:varchar(120);not null"`
+	ConfNameLC   string `json:"conf_name_lc" gorm:"column:conf_name_lc;type:varchar(120);not null"`
+	ValueType    string `json:"value_type" gorm:"column:value_type;type:varchar(120)"`
+	ValueDefault string `json:"value_default" gorm:"column:value_default;type:varchar(120)"`
+	ValueAllowed string `json:"value_allowed" gorm:"column:value_allowed;type:varchar(120)"`
+	ValueTypeSub string `json:"value_type_sub" gorm:"column:value_type_sub;type:varchar(60)"`
+	FlagLocked   int8   `json:"flag_locked" gorm:"column:flag_locked;type:tinyint"`
+	FlagEncrypt  int8   `json:"flag_encrypt" gorm:"column:flag_encrypt;type:tinyint"`
+	// 0:enable, 1:disable
+	FlagDisable int8 `json:"flag_disable" gorm:"column:flag_disable;type:tinyint"`
+	// 1: 显式的公共配置 0:不会显式出现在配置文件的全量配置项, 2: 显式的公共配置且只读即 visible only when rendering
+	FlagStatus int8 `json:"flag_status" gorm:"column:flag_status;type:tinyint"`
+
+	NeedRestart  int8   `json:"need_restart" gorm:"column:need_restart;type:tinyint"`
+	ValueFormula string `json:"value_formula" gorm:"column:value_formula;type:varchar(120)"`
+	OrderIndex   int    `json:"order_index" gorm:"column:order_index;type:int"`
+	SinceVersion string `json:"since_version" gorm:"column:since_version;type:varchar(120)"`
+	Description  string `json:"description" gorm:"column:description;type:text"`
+	Stage        int8   `json:"stage" gorm:"column:stage;type:tinyint"`
+	BaseDatetime
+}
+
+// BaseAutoTimeModel TODO
+type BaseAutoTimeModel struct {
+	CreatedAt string `json:"created_at" gorm:"->;column:created_at;type:varchar(30)"`
+	UpdatedAt string `json:"updated_at" gorm:"->;column:updated_at;type:varchar(30)"`
+}
+
+// TableName TODO
+func (c ConfigNameDefModel) TableName() string {
+	return "tb_config_name_def"
+}
+
+// UniqueWhere TODO
+// 定义该表唯一键的查询条件
+func (c ConfigNameDefModel) UniqueWhere() map[string]interface{} {
+	uniqueWhere := map[string]interface{}{
+		"namespace": c.Namespace,
+		"conf_type": c.ConfType,
+		"conf_file": c.ConfFile,
+		"conf_name": c.ConfName,
+	}
+	return uniqueWhere
+}
+
+// IsReadOnly TODO
+func (c ConfigNameDefModel) IsReadOnly() bool {
+	if c.FlagStatus == 2 {
+		return true
+	}
+	return false
+}
+
+// IsFormula TODO
+func (c ConfigNameDefModel) IsFormula() bool {
+	if c.ValueFormula != "" {
+		return true
+	}
+	return false
+}
+
+// ConfigLevelDefModel TODO
+// tb_config_level_def
+type ConfigLevelDefModel struct {
+	LevelName     string `json:"level_name" gorm:"column:level_name;type:varchar(120);not null"`
+	LevelPriority int    `json:"level_priority" gorm:"column:level_priority;type:int;not null"`
+	LevelNameCN   string `json:"level_name_cn" gorm:"column:level_name_cn;type:varchar(120);not null"`
+}
+
+// TableName TODO
+func (c ConfigLevelDefModel) TableName() string {
+	return "tb_config_level_def"
+}
+
+// ConfigOplogModel TODO
+// tb_config_oplog
+type ConfigOplogModel struct {
+}
+
+// TableName TODO
+func (c ConfigOplogModel) TableName() string {
+	return "tb_config_oplog"
+}
+
+// ConfigTaskModel TODO
+// tb_config_task
+type ConfigTaskModel struct {
+}
+
+// TableName TODO
+func (c ConfigTaskModel) TableName() string {
+	return "tb_config_task"
+}
+
+// ConfigFileNodeModel TODO
+// tb_config_file_node
+type ConfigFileNodeModel struct {
+	ID          uint64 `json:"id" gorm:"column:id;type:int;AUTO_INCREMENT;PRIMARY_KEY"`
+	BKBizID     string `json:"bk_biz_id" gorm:"column:bk_biz_id;type:varchar(120);not null"`
+	Namespace   string `json:"namespace" gorm:"column:namespace;type:varchar(120)"`
+	LevelName   string `json:"level_name" gorm:"column:level_name;type:varchar(120)"`
+	LevelValue  string `json:"level_value" gorm:"column:level_value;type:varchar(120)"`
+	ConfType    string `json:"conf_type" gorm:"column:conf_type;type:varchar(60)"`
+	ConfTypeLC  string `json:"conf_type_lc" gorm:"column:conf_type_lc;type:varchar(60)"`
+	ConfFile    string `json:"conf_file" gorm:"column:conf_file;type:varchar(120)"`
+	ConfFileLC  string `json:"conf_file_lc" gorm:"column:conf_file_lc;type:varchar(120)"`
+	Description string `json:"description" gorm:"column:description;type:varchar(255)"`
+	UpdatedBy   string `json:"updated_by" gorm:"column:updated_by;type:varchar(120)"`
+	BaseDatetime
+}
+
+// TableName TODO
+func (c ConfigFileNodeModel) TableName() string {
+	return "tb_config_file_node"
+}
+
+// UniqueWhere TODO
+// 定义该表唯一键的查询条件
+func (c ConfigFileNodeModel) UniqueWhere() map[string]interface{} {
+	uniqueWhere := map[string]interface{}{
+		"bk_biz_id":   c.BKBizID,
+		"namespace":   c.Namespace,
+		"conf_type":   c.ConfType,
+		"conf_file":   c.ConfFile,
+		"level_name":  c.LevelName,
+		"level_value": c.LevelValue,
+	}
+	return uniqueWhere
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/model_config_node.go b/dbm-services/common/db-config/internal/repository/model/model_config_node.go
new file mode 100644
index 0000000000..a019f4fb2f
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/model_config_node.go
@@ -0,0 +1,76 @@
+package model
+
+import "fmt"
+
+// ConfigModel TODO
+// tb_config_node
+type ConfigModel struct {
+	ID          uint64 `json:"id" gorm:"column:id;type:int;AUTO_INCREMENT;PRIMARY_KEY"`
+	NodeID      uint64 `json:"node_id" gorm:"column:node_id;type:int"`
+	BKBizID     string `json:"bk_biz_id" gorm:"column:bk_biz_id;type:varchar(120);not null"`
+	Namespace   string `json:"namespace" gorm:"column:namespace;type:varchar(120)"`
+	ConfType    string `json:"conf_type" gorm:"column:conf_type;type:varchar(60)"`
+	ConfFile    string `json:"conf_file" gorm:"column:conf_file;type:varchar(120)"`
+	ConfName    string `json:"conf_name" gorm:"column:conf_name;type:varchar(120);not null"`
+	ConfValue   string `json:"conf_value" gorm:"conf_value:dbs;type:varchar(255);not null"`
+	LevelName   string `json:"level_name" gorm:"column:level_name;type:varchar(120);not null"`
+	LevelValue  string `json:"level_value" gorm:"column:level_value;type:varchar(120)"`
+	Description string `json:"description" gorm:"column:description;type:varchar(255)"`
+	FlagDisable int8   `json:"flag_disable" gorm:"column:flag_disable;type:tinyint(4);default:0"`
+	FlagLocked  int8   `json:"flag_locked" gorm:"column:flag_locked;type:tinyint(4);default:0"`
+	// 是哪个发布版本进行的修改
+	UpdatedRevision string `json:"updated_revision" gorm:"column:updated_revision;type:varchar(120)"`
+	Stage           int8   `json:"stage" gorm:"column:stage;type:tinyint"`
+	BaseDatetime
+}
+
+// TableName TODO
+func (c *ConfigModel) TableName() string {
+	return "tb_config_node"
+}
+
+// String 用于打印
+func (c *ConfigModel) String() string {
+	return fmt.Sprintf(
+		"ConfigModel{ID:%d BKBizID:%s Namespace:%s ConfType:%s ConfFile:%s ConfName:%s ConfValue:%s LevelName:%s LevelValue:%s Description:%s FlagDisable:%d TimeCreated:%s TimeUpdated:%s}",
+		c.ID, c.BKBizID, c.Namespace, c.ConfType, c.ConfFile, c.ConfName, c.ConfValue, c.LevelName, c.LevelValue,
+		c.Description, c.FlagDisable, c.CreatedAt, c.UpdatedAt)
+}
+
+// UniqueWhere TODO
+func (c *ConfigModel) UniqueWhere() map[string]interface{} {
+	uniqueWhere := map[string]interface{}{
+		"bk_biz_id":   c.BKBizID,
+		"namespace":   c.Namespace,
+		"conf_file":   c.ConfFile,
+		"conf_name":   c.ConfName,
+		"level_name":  c.LevelName,
+		"level_value": c.LevelValue,
+		// "conf_type": c.ConfType,
+	}
+	return uniqueWhere
+}
+
+// ConfigModelOp TODO
+type ConfigModelOp struct {
+	Config *ConfigModel
+	// config model 操作类型, add,delete,update, delete_ref
+	OPType string `json:"op_type"`
+}
+
+// ConfigModelVOp will replace ConfigModelOp
+type ConfigModelVOp struct {
+	Config *ConfigModelView
+	// config model 操作类型, add,delete,update, delete_ref
+	OPType string `json:"op_type"`
+}
+
+// ConfigModelView TODO
+// v_tb_config_node
+type ConfigModelView struct {
+	ConfigModel
+	Cluster string `json:"cluster" gorm:"column:cluster;type:varchar(120)"`
+	Module  string `json:"module" gorm:"column:module;type:varchar(120)"`
+	// todo used to replace Cluster Module, 用户存放改item的上层信息
+	UpLevelInfo map[string]string `json:"up_level_info"`
+}
diff --git a/dbm-services/common/db-config/internal/repository/model/node_task.go b/dbm-services/common/db-config/internal/repository/model/node_task.go
new file mode 100644
index 0000000000..eb39739ca6
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/model/node_task.go
@@ -0,0 +1,88 @@
+package model
+
+import (
+	"gorm.io/gorm"
+)
+
+// NodeTaskModel TODO
+type NodeTaskModel struct {
+	ID uint64 `json:"id" gorm:"column:id;type:int;AUTO_INCREMENT;PRIMARY_KEY"`
+	/*
+	   BKBizID        string `json:"bk_biz_id" gorm:"column:bk_biz_id;type:varchar(120);not null"`
+	   ConfType       string `json:"conf_type" gorm:"column:conf_type;type:varchar(60)"`
+	   ConfFile       string `json:"conf_file" gorm:"column:conf_file;type:varchar(120)"`
+	   Namespace      string `json:"namespace" gorm:"column:namespace;type:varchar(120)"`
+	   LevelName      string `json:"level_name" gorm:"column:level_name;type:varchar(120)"`
+	   LevelValue     string `json:"level_value" gorm:"column:level_value;type:varchar(120)"`
+	   IsPublished    int8   `json:"is_published" gorm:"column:is_published;type:tinyint"`
+	   IsApplied      int8   `json:"is_applied" gorm:"column:is_applied;type:tinyint"`
+	   ContentObjDiff string `json:"content_obj_diff" gorm:"column:content_obj_diff;type:blob"`
+	   Module         string `json:"module" gorm:"column:module;type:varchar(120)"`
+	   Cluster        string `json:"cluster" gorm:"column:cluster;type varchar(120)"`
+	   Description    string `json:"description" gorm:"column:description;type:varchar(255)"`
+	   CreatedBy      string `json:"created_by" gorm:"column:created_by;type:varchar(120)"`
+	*/
+
+	NodeID          uint64 `json:"node_id" gorm:"column:node_id;type:int"`
+	VersionID       uint64 `json:"version_id" gorm:"column:version_id;type:int"`
+	Revision        string `json:"revision" gorm:"column:revision;type:varchar(120)"`
+	OPType          string `json:"op_type" gorm:"column:op_type;type:varchar(120)"`
+	UpdatedRevision string `json:"updated_revision" gorm:"column:updated_revision;type:varchar(120)"`
+	ConfName        string `json:"conf_name" gorm:"column:conf_name;type:varchar(120)"`
+	ConfValue       string `json:"conf_value" gorm:"column:conf_value;type:varchar(120)"`
+	ValueBefore     string `json:"value_before" gorm:"column:value_before;type:varchar(120)"`
+	Stage           int8   `json:"stage" gorm:"column:stage;type:tinyint"`
+}
+
+// TableName TODO
+func (c NodeTaskModel) TableName() string {
+	return "tb_config_node_task"
+}
+
+// BatchSaveNodeTask TODO
+func BatchSaveNodeTask(db *gorm.DB, tasks []*NodeTaskModel) error {
+	return db.Model(&NodeTaskModel{}).Save(tasks).Error
+	// return nil
+}
+
+// DeleteNodeTask TODO
+func DeleteNodeTask(db *gorm.DB, nodeID uint64) error {
+	return db.Where("node_id = ?", nodeID).Delete(&NodeTaskModel{}).Error
+	// return nil
+}
+
+// UpdateNodeTaskStage TODO
+func UpdateNodeTaskStage(db *gorm.DB, nodeID uint64, confName []string, stage int) error {
+	where := map[string]interface{}{
+		"node_id":   nodeID,
+		"conf_name": confName,
+	}
+	return db.Model(&NodeTaskModel{}).Update("stage = ?", stage).Where(where).Error
+	// return nil
+}
+
+// GenTaskForApply 开启事务
+func GenTaskForApply(db *gorm.DB, nodeID uint64, tasks []*NodeTaskModel) (err error) {
+	err = db.Transaction(func(tx *gorm.DB) error {
+		if err = DeleteNodeTask(tx, nodeID); err != nil {
+			return err
+		}
+		if err = BatchSaveNodeTask(tx, tasks); err != nil {
+			return err
+		}
+		return nil
+	})
+	return err
+}
+
+// UpdateStage godoc
+func (c *NodeTaskModel) UpdateStage(db *gorm.DB, confNames []string, stage int) error {
+	return db.Debug().Model(&NodeTaskModel{}).Where("conf_name in ?", confNames).Where(c).Update("stage", stage).Error
+}
+
+// QueryTasksByNode 确保同一个 node_id 只有一个 revision 存在
+func (c *NodeTaskModel) QueryTasksByNode(db *gorm.DB) ([]*NodeTaskModel, error) {
+	var tasks []*NodeTaskModel
+	err := db.Model(&NodeTaskModel{}).Where("node_id = ?", c.NodeID).Find(&tasks).Error
+	return tasks, err
+}
diff --git a/dbm-services/common/db-config/internal/repository/repository.go b/dbm-services/common/db-config/internal/repository/repository.go
new file mode 100644
index 0000000000..944fe37732
--- /dev/null
+++ b/dbm-services/common/db-config/internal/repository/repository.go
@@ -0,0 +1,2 @@
+// Package repository TODO
+package repository
diff --git a/dbm-services/common/db-config/internal/router/router.go b/dbm-services/common/db-config/internal/router/router.go
new file mode 100644
index 0000000000..5d44454c05
--- /dev/null
+++ b/dbm-services/common/db-config/internal/router/router.go
@@ -0,0 +1,27 @@
+// Package router TODO
+package router
+
+import (
+	"bk-dbconfig/internal/handler/simple"
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+// RegisterRestRoutes TODO
+func RegisterRestRoutes(engine *gin.Engine) {
+	simpleConfig := simple.Config{}
+	RegisterRoutes(engine, "/bkconfig/v1/", simpleConfig.Routes())
+
+	/*
+	   userConfig := user.UserConfig{}
+	   RegisterRoutes(engine, "/bkconfig/v1/", userConfig.Routes())
+	*/
+}
+
+// RegisterPing TODO
+func RegisterPing(engine *gin.Engine) {
+	engine.GET("/ping", func(c *gin.Context) {
+		c.String(http.StatusOK, "ok")
+	})
+}
diff --git a/dbm-services/common/db-config/internal/router/router_restapi.go b/dbm-services/common/db-config/internal/router/router_restapi.go
new file mode 100644
index 0000000000..b828eb2071
--- /dev/null
+++ b/dbm-services/common/db-config/internal/router/router_restapi.go
@@ -0,0 +1,55 @@
+package router
+
+import (
+	"bk-dbconfig/docs"
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+// RegisterRoutesSwagger TODO
+func RegisterRoutesSwagger(r *gin.Engine) {
+	r.StaticFS("/docs", http.FS(docs.SwaggerDocs)) // embed
+	// r.Static("/swagger", "./assets/swagger-ui")    // not embed
+}
+
+// RegisterRoutes TODO
+func RegisterRoutes(router *gin.Engine, group string, routesInfo []*gin.RouteInfo) {
+	r := router.Group(group)
+	for _, route := range routesInfo {
+		r.Handle(route.Method, route.Path, route.HandlerFunc)
+	}
+}
+
+/*
+// not use
+func RegisterRoutesSimpleConfig(router *gin.Engine, c *simple.Config) {
+    v1Router := router.Group("/bkconfig/v1/")
+    {
+        // config_file
+        v1Router.GET("/conffile/get", c.GetVersionedConfigFile)
+        v1Router.GET("/conffile/list", c.ListConfigFileVersions)
+        v1Router.GET("/conffile/query", c.GenerateAndQueryConfig)
+        v1Router.POST("/conffile/generate", c.GenerateConfigFile)
+        v1Router.POST("/conffile/publish", c.PublishConfigFile)
+
+        // config_meta
+        v1Router.GET("/conftype/query", c.QueryConfigTypeInfo)
+        v1Router.GET("/confname/query", c.QueryConfigTypeNames)
+        v1Router.GET("/metafield/query", c.GetConfigMetaField)
+
+        // cmdb
+        v1Router.GET("/cmdb/module/query", c.GetConfigMetaField)
+        v1Router.GET("/cmdb/module/list", c.ListModuleClusters)
+        v1Router.POST("/cmdb/module/upsert", c.UpdateModuleClusters)
+
+        // config_item
+        v1Router.GET("/confitem/list", c.GetConfigList)
+        v1Router.PUT("/confitem/upsert", c.CreateOrUpdateConfig)
+        v1Router.POST("/confitem/upsert", c.CreateOrUpdateConfig)
+        v1Router.POST("/confitem/commit", c.CreateOrUpdateConfig)
+        v1Router.POST("/confitem/rollback", c.CreateOrUpdateConfig)
+    }
+
+}
+*/
diff --git a/dbm-services/common/db-config/internal/service/configcheck/README.MD b/dbm-services/common/db-config/internal/service/configcheck/README.MD
new file mode 100644
index 0000000000..8d6e5b43c2
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/configcheck/README.MD
@@ -0,0 +1,2 @@
+
+配置定时检查
\ No newline at end of file
diff --git a/dbm-services/common/db-config/internal/service/configcheck/config_check.go b/dbm-services/common/db-config/internal/service/configcheck/config_check.go
new file mode 100644
index 0000000000..6586fb7798
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/configcheck/config_check.go
@@ -0,0 +1,2 @@
+// Package configcheck TODO
+package configcheck
diff --git a/dbm-services/common/db-config/internal/service/configcheck/config_file.go b/dbm-services/common/db-config/internal/service/configcheck/config_file.go
new file mode 100644
index 0000000000..67a1426d8e
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/configcheck/config_file.go
@@ -0,0 +1 @@
+package configcheck
diff --git a/dbm-services/common/db-config/internal/service/dbha/batchget.go b/dbm-services/common/db-config/internal/service/dbha/batchget.go
new file mode 100644
index 0000000000..c115b095f0
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/dbha/batchget.go
@@ -0,0 +1,35 @@
+package dbha
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/util"
+)
+
+// BatchGetConfigItem TODO
+func BatchGetConfigItem(r *api.BatchGetConfigItemReq) (resp *api.BatchGetConfigItemResp, err error) {
+	configs := make([]*model.ConfigModel, 0)
+	confNames := util.SplitAnyRuneTrim(r.ConfName, ",")
+	if configs, err = model.BatchGetConfigItem(r, confNames); err != nil {
+		return nil, err
+	}
+	resp = &api.BatchGetConfigItemResp{
+		BaseConfFileDef: api.BaseConfFileDef{
+			Namespace: r.Namespace,
+			ConfType:  r.ConfType,
+			ConfFile:  r.ConfFile,
+		},
+		LevelName: r.LevelName,
+		ConfName:  r.ConfName,
+	}
+	var content = map[string]map[string]string{}
+	for _, conf := range configs {
+		if _, ok := content[conf.LevelValue]; ok {
+			content[conf.LevelValue][conf.ConfName] = conf.ConfValue
+		} else {
+			content[conf.LevelValue] = map[string]string{conf.ConfName: conf.ConfValue}
+		}
+	}
+	resp.Content = content
+	return resp, nil
+}
diff --git a/dbm-services/common/db-config/internal/service/dbha/dbha.go b/dbm-services/common/db-config/internal/service/dbha/dbha.go
new file mode 100644
index 0000000000..1f40c68df3
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/dbha/dbha.go
@@ -0,0 +1,2 @@
+// Package dbha TODO
+package dbha
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_apply.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_apply.go
new file mode 100644
index 0000000000..4b12854a4d
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_apply.go
@@ -0,0 +1,353 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/pkg/errno"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"fmt"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// GetConfigsToApply 获取两个版本之间的差异
+// 比较 published(to_apply) 和 applied 之间的差异
+func GetConfigsToApply(db *gorm.DB, req api.ApplyConfigInfoReq) (*api.ApplyConfigInfoResp, error) {
+	// 判断是否是 versioned 级别配置
+	version := model.ConfigVersionedModel{
+		Namespace:  req.Namespace,
+		ConfType:   req.ConfType,
+		ConfFile:   req.ConfFile,
+		LevelName:  req.LevelName,
+		LevelValue: req.LevelValue,
+		BKBizID:    req.BKBizID,
+	}
+	version.IsPublished = 0
+	applied, err := version.GetVersionApplied(db)
+	if err != nil {
+		logger.Warn("applied version not found %+v", version)
+	}
+	version.IsApplied = 0
+	published, err := version.GetVersionPublished(db)
+	if err != nil {
+		return nil, errors.Wrap(err, "没有找到已发布且待下发的配置")
+	}
+
+	if applied != nil && applied.Versioned.ID == published.Versioned.ID {
+		return nil, errors.New("最新版本已应用至目标")
+	}
+	resp := &api.ApplyConfigInfoResp{
+		ConfigsDiff:     map[string]*api.ApplyConfigItem{},
+		RevisionToApply: published.Versioned.Revision,
+		VersionID:       published.Versioned.ID,
+		NodeID:          published.Versioned.NodeID,
+	}
+	if applied == nil {
+		// 我们认为历史没有应用过,此次是第一次 publish
+		return resp, nil
+	} else {
+		resp.RevisionBefore = applied.Versioned.Revision
+		for _, c := range applied.Configs {
+			resp.ConfigsDiff[c.ConfName] = &api.ApplyConfigItem{
+				ValueBefore: c.ConfValue,
+			}
+		}
+	}
+
+	for _, c := range published.Configs {
+		newItem := &api.ApplyConfigItem{
+			ConfValue:       c.ConfValue,
+			UpdatedRevision: c.UpdatedRevision,
+			OPType:          constvar.OPTypeUpdate,
+			LevelNameFrom:   c.LevelName,
+			FlagLocked:      c.FlagLocked,
+		}
+		if val, ok := resp.ConfigsDiff[c.ConfName]; ok {
+			newItem.ValueBefore = val.ValueBefore
+		} else {
+			// no value_before
+			newItem.OPType = constvar.OPTypeAdd
+		}
+		resp.ConfigsDiff[c.ConfName] = newItem
+	}
+
+	nTask := model.NodeTaskModel{NodeID: published.Versioned.NodeID}
+	confNamesApplied := make(map[string]string)
+	if tasks, err := nTask.QueryTasksByNode(db); err != nil {
+		return nil, err
+	} else {
+		for _, t := range tasks {
+			if t.Stage == 2 { // stage 废弃?
+				confNamesApplied[t.ConfName] = t.ConfValue
+			}
+		}
+	}
+
+	ConfigsDiffNew := make(map[string]*api.ApplyConfigItem)
+	for confName, diff := range resp.ConfigsDiff {
+		if diff.ConfValue == diff.ValueBefore {
+			continue
+		} else if util.ConfValueIsPlaceHolder(diff.ConfValue) {
+			// 新值为计算得出,忽略
+			logger.Warn("new conf_value is a variable %s: %s", confName, diff.ConfValue)
+			continue
+		}
+		if _, ok := confNamesApplied[confName]; ok {
+			diff.Applied = 1 // 已应用
+		}
+		ConfigsDiffNew[confName] = diff
+	}
+	resp.ConfigsDiff = ConfigsDiffNew
+
+	if resp.NodeID == 0 {
+		return nil, errors.New("illegal node_id")
+	} else if resp.RevisionToApply == "" {
+		return nil, errors.New("illegal revision")
+	}
+	return resp, nil
+}
+
+// GetVersionStat 批量获取某个 levelNode 的配置状态
+func GetVersionStat(req api.VersionStatReq) (*api.VersionStatResp, error) {
+	type objStatus struct {
+		published string
+		applied   string
+		status    int
+	}
+	version := model.ConfigVersionedModel{
+		Namespace: req.Namespace,
+		ConfType:  req.ConfType,
+		ConfFile:  req.ConfFile,
+		LevelName: req.LevelName,
+		BKBizID:   req.BKBizID,
+	}
+	applied, err := version.BatchGetApplied(req.LevelValues, model.DB.Self)
+	if err != nil {
+		logger.Warn("applied version not found %+v", version)
+	}
+	published, err := version.BatchGetPublished(req.LevelValues, model.DB.Self)
+	if err != nil {
+		return nil, errors.Wrap(err, "没有找到已发布且待下发的配置")
+	}
+
+	// todo 目前没有检查 已保存版本但未发布(且需要发布) 的情况,这个情况需要配置 version generate 时设定 is_published=2
+	// {"cluster_id1": {"published":"xx", {"applied":"yy"}}, "cluster_id2":{}}
+	objMap := make(map[string]*objStatus)
+	for _, ver := range applied {
+		objMap[ver.LevelValue] = &objStatus{}
+		objMap[ver.LevelValue].applied = ver.Revision
+	}
+	for _, ver := range published {
+		if _, ok := objMap[ver.LevelValue]; !ok {
+			objMap[ver.LevelValue] = &objStatus{}
+		}
+		objMap[ver.LevelValue].published = ver.Revision
+	}
+	resp := &api.VersionStatResp{
+		LevelValues: map[string][]int{},
+	}
+	for _, obj := range req.LevelValues {
+		resp.LevelValues[obj] = make([]int, 0, 1)
+		if val, ok := objMap[obj]; ok {
+			if val.published == "" {
+				objMap[obj].status = 3
+			} else if val.published == "" {
+				objMap[obj].status = 4
+			} else if val.published == val.applied {
+				objMap[obj].status = 1
+			} else if val.published != val.applied {
+				objMap[obj].status = 2
+			}
+			resp.LevelValues[obj] = []int{objMap[obj].status}
+		} else {
+			resp.LevelValues[obj] = []int{3, 4}
+		}
+	}
+	resp.StatusInfo = api.StatusMap
+	return resp, nil
+}
+
+// UpdateVersionApplied 修改 version 状态为已应用
+// 在该 version 下的所有变动已经应用生效完成后,修改 version 状态
+func UpdateVersionApplied() error {
+	return nil
+}
+
+// ApplyVersionLevelNode 版本应用
+// 对于 level_config, version apply 是找到所有直属下级,删除locked 冲突配置,然后对他们生成版本(但不应用)
+func (p *PublishConfig) ApplyVersionLevelNode(db *gorm.DB, req *api.VersionApplyReq) error {
+	// 生成的版本可以发布is_published=1,也可以不发布但需要下级自己发布 is_published=2
+	if model.IsConfigLevelEntityVersioned(req.Namespace, req.ConfType, req.ConfFile, req.LevelName) {
+		// 对于 versioned_config,version apply 需要外部下发命令去修改(但可以修改部分 config_item)
+		// 当所有变更的配置项都已应用,才修改为 applied
+		return errno.ErrOnlyLevelConfigAllowed
+	}
+	// todo 将来这两步,可以换成从 dbmeta 获取:比如获取 app 的所有 module,或者获取 app=1,module=2 的所有 cluster
+	childLevelName := model.GetChildLevelStraight(req.Namespace, req.ConfType, req.ConfFile,
+		req.LevelName) // todo 这里会重连连接中断事务?
+	// childLevelValues := model.QueryLevelValuesWithName(req.Namespace, req.ConfType, req.ConfFile, req.BKBizID, childLevelName)
+	childLevelValues, err := model.QueryChildLevelValues(&req.BaseConfigNode, childLevelName)
+	if err != nil {
+		return err
+	}
+	levelNode := api.BaseConfigNode{
+		BKBizIDDef: api.BKBizIDDef{BKBizID: req.BKBizID},
+		BaseConfFileDef: api.BaseConfFileDef{
+			Namespace: req.Namespace,
+			ConfType:  req.ConfType,
+			ConfFile:  req.ConfFile,
+		},
+	}
+	namesToDel := make([]string, 0) // 需要删除的下级 conf_name
+	if p.ConfigsLocked != nil {
+		for _, config := range p.ConfigsLocked {
+			namesToDel = append(namesToDel, config.ConfName)
+		}
+	} else {
+		// 获取 已应用 和 将应用(=已发布) 之间的差异
+		levelNode.LevelName = req.LevelName
+		levelNode.LevelValue = req.LevelValue
+		applyInfo := api.ApplyConfigInfoReq{BaseConfigNode: levelNode}
+		diffInfo, err := GetConfigsToApply(db, applyInfo)
+		if err != nil {
+			return err
+		}
+		for _, c := range diffInfo.ConfigsDiff {
+			if c.FlagLocked == 1 && (c.OPType == constvar.OPTypeAdd || c.OPType == constvar.OPTypeUpdate) {
+				namesToDel = append(namesToDel, c.ConfName)
+			}
+		}
+	}
+	for _, child := range childLevelValues {
+		levelNode.LevelName = childLevelName
+		levelNode.LevelValue = child
+		if req.BKBizID == constvar.BKBizIDForPlat {
+			levelNode.BKBizID = child // childLevelName 一定是 app
+		}
+		options := api.QueryConfigOptions{
+			Generate:              false, // 不应用
+			FromNodeConfigApplied: true,
+			Description: fmt.Sprintf("generated by apply up level_name=%s, level_value=%s",
+				req.LevelName, req.LevelValue),
+			InheritFrom: constvar.LevelPlat,
+			View:        constvar.ViewMerge,
+		}
+		upLevelInfo := api.UpLevelInfo{
+			LevelInfo: map[string]string{
+				req.LevelName: req.LevelValue,
+			},
+		}
+		logger.Info("ApplyVersionLevelNode: %+v", levelNode)
+
+		// 删除与上层锁定 存在冲突的 直接下级
+		if err := model.QueryAndDeleteConfig(db, &levelNode, namesToDel); err != nil {
+			return err
+		}
+		// 发布直接下级,代表应用
+		if err := GenerateAndPublish(db, &levelNode, &options, &upLevelInfo, p.Revision, nil); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// NodeTaskApplyItem TODO
+func NodeTaskApplyItem(r *api.ConfItemApplyReq) error {
+	if r.NodeID == 0 {
+		v := model.ConfigFileNodeModel{
+			BKBizID:    r.BKBizID,
+			Namespace:  r.Namespace,
+			ConfType:   r.ConfType,
+			ConfFile:   r.ConfFile,
+			LevelName:  r.LevelName,
+			LevelValue: r.LevelValue,
+		}
+		if node, err := v.Detail(model.DB.Self); err != nil {
+			return err
+		} else if node == nil {
+			return errors.Wrapf(errno.ErrNodeNotFound, "bk_biz_id=%s,namespace=%s,conf_file=%s,level_value=%s",
+				r.BKBizID, r.Namespace, r.ConfFile, r.LevelValue)
+		} else {
+			r.NodeID = node.ID
+		}
+	}
+
+	// configItems:
+
+	return UpdateNodeTaskApplied(r.NodeID, r.RevisionApply, r.ConfNames)
+}
+
+// UpdateNodeTaskApplied 修改 node_task 里面任务状态为 applied
+func UpdateNodeTaskApplied(nodeID uint64, revision string, confNames []string) error {
+	n := model.NodeTaskModel{
+		NodeID:   nodeID,
+		Revision: revision,
+	}
+	logCtx := fmt.Sprintf("node_id=%d revision=%s", nodeID, revision)
+	namesNotApplied := []string{}
+	namesApplied := []string{}
+	// 如果要 update task 存在跟 db 里的 revision 不一样,则报错
+	if tasks, err := n.QueryTasksByNode(model.DB.Self); err != nil {
+		return err
+	} else {
+		if len(tasks) == 0 {
+			return errors.Errorf("no items to apply for %s", logCtx)
+		} else {
+			for _, t := range tasks {
+				if t.Revision != revision {
+					return errors.Errorf("当前需要应用的版本 %s,与待应用任务版本 %s 不一致", revision, t.Revision)
+				}
+				if t.Stage == 0 {
+					namesNotApplied = append(namesNotApplied, t.ConfName)
+				} else {
+					namesApplied = append(namesApplied, t.ConfName)
+				}
+			}
+		}
+	}
+	if len(namesNotApplied) == 0 {
+		return errors.Errorf("没有找到待应用配置项 %s", logCtx)
+	}
+	for _, confName := range confNames {
+		if util.StringsHas(namesApplied, confName) {
+			logger.Errorf("配置项 %s 已应用 %s", confName, logCtx) // 不报错,只记录 log
+		}
+		if !util.StringsHas(namesNotApplied, confName) {
+			return errors.Errorf("conf_name %s 未找到待应用任务 %s", confName, logCtx)
+		}
+	}
+
+	txErr := model.DB.Self.Transaction(func(tx *gorm.DB) error {
+		// 判断如果所有的 conf_name 都已 applied,update version 为 applied
+		isAllApplied := true
+		for _, confName := range namesNotApplied {
+			if !util.StringsHas(confNames, confName) {
+				isAllApplied = false
+			}
+		}
+		if isAllApplied {
+			v := model.ConfigVersionedModel{
+				NodeID:   nodeID,
+				Revision: revision,
+			}
+			if err := v.VersionApplyStatus(tx); err != nil {
+				return err
+			}
+			// 如果全部应用,则清空列表
+			if err := model.DeleteNodeTask(tx, nodeID); err != nil {
+				return err
+			}
+		} else {
+			// 设置为已应用
+			err := n.UpdateStage(tx, confNames, 2)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+	return txErr
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_file.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_file.go
new file mode 100644
index 0000000000..5b4b000e67
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_file.go
@@ -0,0 +1,189 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	util "bk-dbconfig/pkg/util/dbutil"
+	"strconv"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// GetConfigLockLevel TODO
+func GetConfigLockLevel(locked int8, levelName string) string {
+	if locked <= 0 {
+		return ""
+	} else {
+		return levelName
+	}
+}
+
+func checkConfigFileExists(r *api.BaseConfFileDef) (bool, *model.ConfigFileDefModel, error) {
+	cf := &model.ConfigFileDefModel{
+		Namespace: r.Namespace,
+		ConfType:  r.ConfType,
+		ConfFile:  r.ConfFile,
+	}
+	fileDefObj, err := model.RecordGet(model.DB.Self, cf.TableName(), cf.ID, cf.UniqueWhere())
+
+	if err != nil {
+		if errors.Is(err, gorm.ErrRecordNotFound) {
+			// conf_file 不存在
+			return false, cf, nil
+		} else {
+			// 其余错误,包括输入了错误id与唯一建
+			return false, cf, err
+		}
+	}
+	fileDef := util.ConvDBResultToStr(fileDefObj)
+	logger.Infof("checkConfigFileExists fileDef:+v", fileDef)
+	idNew, _ := fileDef["id"]
+	cf.ID, _ = strconv.ParseUint(idNew, 10, 64)
+	cf.ConfFileLC, _ = fileDef["conf_file_lc"]
+	cf.Description, _ = fileDef["description"]
+	return true, cf, nil
+}
+
+// NewConfigModels TODO
+func NewConfigModels(r *api.UpsertConfFilePlatReq) ([]*model.ConfigModel, []*model.ConfigModelOp) {
+	configs := make([]*model.ConfigModel, 0)
+	configsDiff := make([]*model.ConfigModelOp, 0)
+	for _, cn := range r.ConfNames {
+		confItem := &model.ConfigModel{
+			BKBizID:     constvar.BKBizIDForPlat,
+			Namespace:   r.ConfFileInfo.Namespace,
+			ConfType:    r.ConfFileInfo.ConfType,
+			ConfFile:    r.ConfFileInfo.ConfFile,
+			ConfName:    cn.ConfName,
+			ConfValue:   cn.ValueDefault,
+			LevelName:   constvar.LevelPlat,
+			LevelValue:  constvar.BKBizIDForPlat,
+			FlagLocked:  cn.FlagLocked,
+			FlagDisable: cn.FlagDisable,
+			Description: cn.Description,
+		}
+		configs = append(configs, confItem)
+		COP := &model.ConfigModelOp{
+			Config: confItem,
+			OPType: cn.OPType,
+		}
+		configsDiff = append(configsDiff, COP)
+	}
+	return configs, configsDiff
+}
+
+// ProcessOPConfig TODO
+func ProcessOPConfig(opConfigs map[string]*ConfigModelRef) error {
+	for _, opConfig := range opConfigs {
+		for optype, configs := range *opConfig {
+			if optype == constvar.OPTypeRemoveRef {
+				if err := UpsertConfig(configs, false, false); err != nil {
+					// model.UpsertBatchConfigs(configs, false); err != nil {
+				}
+			} else if optype == constvar.OPTypeNotified {
+				// 值有变化,红点通知同步
+			} else { // locked
+				// 暂不处理
+			}
+		}
+	}
+	return nil
+}
+
+// ListConfigFiles godoc
+// 查询平台配置文件列表 和 业务配置文件列表
+func ListConfigFiles(r *api.ListConfFileReq) ([]*api.ListConfFileResp, error) {
+	// confFiles := make([]*model.ConfigFileDefModel, 0)
+	fileNode := &model.ConfigFileNodeModel{
+		Namespace:  r.Namespace,
+		ConfType:   r.ConfType,
+		BKBizID:    r.BKBizID,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+		ConfFile:   r.ConfFile,
+	}
+	confFiles, err := fileNode.List(model.DB.Self, constvar.BKBizIDForPlat)
+	if err != nil {
+		return nil, err
+	}
+
+	resp := make([]*api.ListConfFileResp, 0)
+	for _, f := range confFiles {
+		p := &api.ListConfFileResp{
+			ConfFileDef: api.ConfFileDef{
+				BaseConfFileDef: api.BaseConfFileDef{
+					Namespace: r.Namespace,
+					ConfType:  r.ConfType,
+					ConfFile:  f.ConfFile,
+				},
+				ConfFileLC:  f.ConfFileLC,
+				ConfTypeLC:  f.ConfTypeLC,
+				Description: f.Description,
+			},
+			CreatedAt: f.CreatedAt.String(),
+			UpdatedAt: f.UpdatedAt.String(),
+			UpdatedBy: f.UpdatedBy,
+		}
+		resp = append(resp, p)
+	}
+	return resp, nil
+}
+
+// GetConfigFileSimpleInfo godoc
+// 查询配置文件信息,会合并平台配置文件
+func GetConfigFileSimpleInfo(r *api.BaseConfigNode) (*api.ConfFileResp, error) {
+	confFile := &model.ConfigFileNodeModel{
+		Namespace:  r.Namespace,
+		ConfType:   r.ConfType,
+		ConfFile:   r.ConfFile,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+		BKBizID:    r.BKBizID,
+	}
+	// confFile := model.ConfigFileNodeModel{}
+	// copier.CopyWithOption(confFile, r)
+	// get config file info
+	cf, err := confFile.Detail(model.DB.Self)
+	if err != nil {
+		return nil, err
+	}
+	fd := api.BaseConfFileDef{
+		Namespace: r.Namespace,
+		ConfType:  r.ConfType,
+		ConfFile:  r.ConfFile,
+	}
+	resp := &api.ConfFileResp{
+		ConfFileDef: api.ConfFileDef{
+			BaseConfFileDef: fd,
+		},
+	}
+	if cf == nil || cf.ConfFileLC == "" { // 如果没有找到本节点的 config_file,使用平台的(从cache中拿)
+		platFile, err := model.CacheGetConfigFile(fd)
+		if err != nil {
+			return nil, err
+		} else if platFile == nil { // 不存在平台配置文件
+			return nil, errors.Errorf("config file %s not found", r.ConfFile)
+			// NotFoundInDB
+		} else {
+			resp.ConfFileLC = platFile.ConfFileLC
+			resp.ConfTypeLC = platFile.ConfTypeLC
+			resp.Description = platFile.Description
+		}
+		if r.BKBizID == constvar.BKBizIDForPlat && r.LevelName == constvar.LevelPlat {
+			resp.CreatedAt = platFile.CreatedAt.String()
+			resp.UpdatedAt = platFile.UpdatedAt.String()
+			resp.UpdatedBy = platFile.UpdatedBy
+		}
+	} else {
+		resp.ConfFileLC = cf.ConfFileLC
+		resp.ConfTypeLC = cf.ConfTypeLC
+		resp.Description = cf.Description
+		resp.CreatedAt = cf.CreatedAt.String()
+		resp.UpdatedAt = cf.UpdatedAt.String()
+		resp.UpdatedBy = cf.UpdatedBy
+	}
+	return resp, nil
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_item.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_item.go
new file mode 100644
index 0000000000..72390793bb
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_item.go
@@ -0,0 +1,665 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/pkg/errno"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"fmt"
+
+	"github.com/jinzhu/copier"
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// UpsertConfigByUnique TODO
+// 同 model.UpsertBatchConfigs()
+func UpsertConfigByUnique(configModels []*model.ConfigModel) error {
+	configsAdd := make([]*model.ConfigModel, 0)
+	configsUpt := make([]*model.ConfigModel, 0)
+	for _, c := range configModels {
+		if configID, err := c.CheckRecordExists(model.DB.Self); err != nil {
+			if errors.Is(err, gorm.ErrRecordNotFound) {
+				configsAdd = append(configsAdd, c)
+			} else {
+				return err
+			}
+		} else {
+			c.ID = configID
+			configsUpt = append(configsUpt, c)
+		}
+	}
+	logger.Infof("UpsertConfigByUnique configsAdd:%#v, configsUpt:%+v", configsAdd, configsUpt)
+	if len(configsAdd) != 0 {
+		if err := model.CreateBatch(model.DB.Self, configsAdd); err != nil {
+			return err
+		}
+	}
+	if len(configsUpt) != 0 {
+		// set ifNotFoundErr=false because we have checked CheckRecordExists
+		if err := model.UpdateBatch(model.DB.Self, configsUpt, false); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// UpsertConfigItems TODO
+// 操作 config node,已明确操作类型
+// 会首先根据唯一建,获得 id
+// @todo 返回影响行数
+func UpsertConfigItems(db *gorm.DB, configsOp []*model.ConfigModelOp, revision string) ([]*model.ConfigModel, error) {
+	configsLocked := make([]*model.ConfigModel, 0)
+	if configsOp == nil || len(configsOp) == 0 {
+		return configsLocked, nil
+	}
+	configsAdd := make([]*model.ConfigModel, 0)
+	configsUpt := make([]*model.ConfigModel, 0)
+	configsDel := make([]*model.ConfigModel, 0)
+	for _, c := range configsOp {
+		if id, err := model.RecordExists(db, c.Config.TableName(), c.Config.ID, c.Config.UniqueWhere()); err != nil {
+			if !errors.Is(err, gorm.ErrRecordNotFound) {
+				return nil, err
+			}
+		} else {
+			c.Config.ID = id
+		}
+		c.Config.UpdatedRevision = revision
+		c.Config.Stage = 1
+		if c.OPType == constvar.OPTypeAdd {
+			configsAdd = append(configsAdd, c.Config)
+		} else if c.OPType == constvar.OPTypeUpdate {
+			configsUpt = append(configsUpt, c.Config)
+		} else if c.OPType == constvar.OPTypeRemove {
+			configsDel = append(configsDel, c.Config)
+		} else if c.OPType == constvar.OPTypeRemoveRef {
+			configsDel = append(configsDel, c.Config)
+		}
+		if c.Config.FlagLocked == 1 && c.OPType != constvar.OPTypeRemove {
+			configsLocked = append(configsLocked, c.Config)
+		}
+	}
+	logger.Info("configsAdd: %+v  configsUpt: %+v  configsDel: %+v", configsAdd, configsUpt, configsDel)
+	if len(configsAdd) != 0 {
+		configsAdd = ProcessConfig(configsAdd)
+		if err := model.CreateBatch(db, configsAdd); err != nil {
+			return nil, err
+		}
+	}
+	if len(configsUpt) != 0 {
+		configsUpt = ProcessConfig(configsUpt)
+		// 这里应该是一定存在(已经CheckRecordExists)且能update
+		// 这里精确点的话,SaveOnly: ifNotFoundErr=true, SaveAndPublish: ifNotFoundErr=false
+		if err := model.UpdateBatch(db, configsUpt, false); err != nil {
+			return nil, err
+		}
+	}
+	if len(configsDel) != 0 {
+		if err := model.DeleteBatch(db, configsDel); err != nil {
+			return nil, err
+		}
+	}
+	return configsLocked, nil
+}
+
+// UpsertConfig TODO
+// update: 如果是update模式,当没找到对应id的记录时会报错;update=false 记录不存在则忽略
+// isOverride: 如果是override模式,记录已经存在则根据id update覆盖;false 依然是insert,会报错
+func UpsertConfig(configModels []*model.ConfigModel, update, isOverride bool) error {
+	configsID0 := make([]*model.ConfigModel, 0)
+	configsIDn := make([]*model.ConfigModel, 0)
+	for _, c := range configModels {
+		if c.ID == 0 {
+			configsID0 = append(configsID0, c)
+		} else {
+			configsIDn = append(configsIDn, c)
+		}
+	}
+	logger.Infof("UpsertConfig configsID0:%#v, configsIDn:%+v", configsID0, configsIDn)
+	configsAdd := make([]*model.ConfigModel, 0)
+	configsUpt := make([]*model.ConfigModel, 0)
+	configsDel := make([]*model.ConfigModel, 0)
+	for _, c := range configsID0 {
+		if configID, err := c.CheckRecordExists(model.DB.Self); err != nil {
+			if errors.Is(err, gorm.ErrRecordNotFound) {
+				configsAdd = append(configsAdd, c)
+			} else {
+				return err
+			}
+		} else {
+			c.ID = configID
+			if update || isOverride {
+				// update: by unique key
+				configsUpt = append(configsUpt, c)
+			} else {
+				// insert: ErrDuplicateKey
+				configsAdd = append(configsAdd, c) // will return err
+			}
+		}
+	}
+	for _, c := range configsIDn {
+		if _, err := c.CheckRecordExists(model.DB.Self); err != nil {
+			if errors.Is(err, gorm.ErrRecordNotFound) {
+				if update {
+					return fmt.Errorf("ErrNotFound id=%d", c.ID)
+				} else {
+					return fmt.Errorf("ErrInsertWithID id=%d", c.ID)
+				}
+			} else {
+				return err
+			}
+		} else {
+			// c.ID = configID
+			if c.FlagDisable == -1 {
+				configsDel = append(configsDel, c)
+			} else {
+				configsUpt = append(configsUpt, c)
+			}
+		}
+	}
+	logger.Infof("UpsertConfig configsAdd:%#v, configsUpt:%+v, configsDel:%+v",
+		configsAdd, configsUpt, configsDel)
+	if len(configsAdd) != 0 {
+		configsAdd = ProcessConfig(configsAdd)
+		if err := model.CreateBatch(model.DB.Self, configsAdd); err != nil {
+			return err
+		}
+	}
+	if len(configsUpt) != 0 {
+		configsUpt = ProcessConfig(configsUpt)
+		// 这里应该是一定存在(已经CheckRecordExists)且能update,如果不存在抛出错误
+		if err := model.UpdateBatch(model.DB.Self, configsUpt, true); err != nil {
+			return err
+		}
+	}
+	if len(configsDel) != 0 {
+		if err := model.DeleteBatch(model.DB.Self, configsDel); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetMergedConfig TODO
+func GetMergedConfig(db *gorm.DB, s *api.BaseConfigNode, upLevelInfo *api.UpLevelInfo,
+	options *api.QueryConfigOptions) ([]*model.ConfigModel, error) {
+	// 获取集群的配置,必须要有上层级模块的信息
+	if options.Module == "" && options.Cluster != "" {
+		// we get module from backend
+		if res, err := model.GetModuleByCluster(s.BKBizID, options.Cluster); err != nil {
+			// module = ""
+		} else if len(res) >= 1 {
+			options.Module = res[0].Module
+		}
+	}
+	// 目前这 3 个级别需要 up level_info 信息
+	if s.LevelName == constvar.LevelCluster || s.LevelName == constvar.LevelInstance || s.LevelName == constvar.LevelHost {
+		if len(upLevelInfo.LevelInfo) == 0 {
+			// todo 这里只尝试获取直接上级
+			if levelInfo, err := model.QueryParentLevelValue(s); err == nil {
+				upLevelInfo.LevelInfo = util.MapMerge(upLevelInfo.LevelInfo, levelInfo)
+			} else {
+				return nil, err
+			}
+		}
+	}
+
+	configs, err := model.GetSimpleConfig(db, s, upLevelInfo, options)
+	if err != nil {
+		return nil, err
+	}
+	if configs, err = MergeConfig(configs, options.View); err != nil {
+		return nil, err
+	} else {
+		configs = ProcessConfig(configs)
+	}
+	return configs, nil
+}
+
+// ConfigLevels TODO
+type ConfigLevels map[string][]*model.ConfigModel
+
+// ConfigTypeLevel TODO
+type ConfigTypeLevel map[string]ConfigLevels
+
+// NewBaseConfItemWithModel TODO
+func NewBaseConfItemWithModel(c *model.ConfigModel, opType string) interface{} {
+	baseItem := api.BaseConfItemResp{
+		BaseConfItemDef: api.BaseConfItemDef{
+			ConfName:    c.ConfName,
+			ConfValue:   c.ConfValue,
+			FlagLocked:  c.FlagLocked,
+			FlagDisable: c.FlagDisable,
+			// Description: c.Description,
+			Stage: c.Stage,
+		},
+		BaseLevelDef: api.BaseLevelDef{
+			LevelName:  c.LevelName,
+			LevelValue: c.LevelValue,
+		},
+	}
+	if opType != "" {
+		baseItem.OPType = opType
+	}
+	return baseItem
+}
+
+// NewBaseConfItemWithModels TODO
+func NewBaseConfItemWithModels(configs []*model.ConfigModel) map[string]interface{} {
+	confItems := make(map[string]interface{}, len(configs))
+	for _, c := range configs {
+		confItems[c.ConfName] = NewBaseConfItemWithModel(c, "")
+	}
+	return confItems
+}
+
+func getLevelNameFromMap(levels api.UpLevelInfo) {
+
+}
+
+// NewConfigModelsWithItemReq TODO
+// 转换更新请求,为实际的 config model
+func NewConfigModelsWithItemReq(r *api.UpsertConfItemsReq) ([]*model.ConfigModelView, []*model.ConfigModelOp) {
+	configs := make([]*model.ConfigModelView, 0)
+	configsDiff := make([]*model.ConfigModelOp, 0)
+	for _, cn := range r.ConfItems {
+		confItem := &model.ConfigModelView{
+			ConfigModel: model.ConfigModel{
+				BKBizID:     r.BKBizID,
+				Namespace:   r.ConfFileInfo.Namespace,
+				ConfType:    r.ConfFileInfo.ConfType,
+				ConfFile:    r.ConfFileInfo.ConfFile,
+				ConfName:    cn.ConfName,
+				ConfValue:   cn.ConfValue,
+				LevelName:   r.LevelName,
+				LevelValue:  r.LevelValue,
+				FlagDisable: cn.FlagDisable,
+				FlagLocked:  cn.FlagLocked,
+				// LevelLocked: GetConfigLockLevel(cn.FlagLocked, constvar.LevelPlat),
+				Description: cn.Description,
+			},
+			UpLevelInfo: r.UpLevelInfo.LevelInfo,
+			// Module: r.UpLevelInfo.GetLevelValue(constvar.LevelModule),
+		}
+		configs = append(configs, confItem)
+		COP := &model.ConfigModelOp{
+			Config: &confItem.ConfigModel,
+			OPType: cn.OPType,
+		}
+		configsDiff = append(configsDiff, COP)
+	}
+	return configs, configsDiff
+}
+
+// UpdateConfigFileItems 修改配置
+func UpdateConfigFileItems(r *api.UpsertConfItemsReq, opUser string) (*api.UpsertConfItemsResp, error) {
+	fileDef := r.ConfFileInfo.BaseConfFileDef
+	exists, cf, err := checkConfigFileExists(&fileDef)
+	defer util.LoggerErrorStack(logger.Error, err)
+	if err != nil {
+		return nil, err
+	}
+	if !exists {
+		// return nil, fmt.Errorf("conf_file %s for %s does not exists with level_name=%s,level_value=%s", cf.ConfFile, cf.Namespace, r.LevelName, r.LevelValue)
+	}
+	resp := &api.UpsertConfItemsResp{
+		BKBizID:         r.BKBizID,
+		BaseConfFileDef: fileDef,
+	}
+	configs, configsDiff := NewConfigModelsWithItemReq(r)
+	// 先判断上层级是否安全, 强制约束,confirm=1 无效
+	configsRef, err := BatchPreCheck(configs)
+	if err != nil {
+		return nil, err
+	}
+
+	configsRefDiff := AddConfigsRefToDiff(configsRef)
+	configsDiff = append(configsDiff, configsRefDiff...)
+
+	// 存在下层级配置与当前配置冲突,confirm=1 确认修改
+	if len(configsRefDiff) > 0 && r.Confirm == 0 {
+		names := []string{}
+		for _, conf := range configsRefDiff {
+			names = append(names, conf.Config.ConfName)
+		}
+		return nil, errors.WithMessagef(errno.ErrConflictWithLowerConfigLevel, "%v", names)
+	}
+	txErr := model.DB.Self.Transaction(func(tx *gorm.DB) error {
+		// 保存到 to tb_config_file_node
+		levelNode := api.BaseConfigNode{}
+		levelNode.Set(r.BKBizID, cf.Namespace, cf.ConfType, cf.ConfFile, r.LevelName, r.LevelValue)
+
+		confFileLC := r.ConfFileInfo.ConfFileLC
+		if confFileLC == "" {
+			confFileLC = cf.ConfFileLC
+		}
+		if err = SaveConfigFileNode(tx, &levelNode, opUser, r.ConfFileInfo.Description, confFileLC); err != nil {
+			return err
+		}
+
+		if len(configs) == 0 { // 如果 items 为空,只修改 conf_file 信息
+			return nil
+		}
+		publishReq := &api.SimpleConfigQueryReq{
+			BaseConfigNode: levelNode,
+			InheritFrom:    "0",
+			View:           fmt.Sprintf("merge.%s", r.LevelName),
+			Format:         constvar.FormatMap,
+			Description:    r.Description, // 发布描述
+			Revision:       r.Revision,
+			CreatedBy:      opUser,
+			UpLevelInfo:    r.UpLevelInfo,
+		}
+		publishReq.Decrypt = false
+		if err = publishReq.Validate(); err != nil {
+			return err
+		}
+		if r.ReqType == constvar.MethodSaveOnly {
+			if !checkVersionable(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType) {
+				return errors.WithMessagef(errno.ErrUnversionable, "%s,%s", fileDef.Namespace, fileDef.ConfType)
+			}
+			// 保存到 tb_config_versioned
+			if v, err := GenerateConfigFile(tx, publishReq, constvar.MethodGenAndSave, configsDiff); err != nil {
+				return err
+			} else {
+				resp.Revision = v.Revision
+				resp.IsPublished = 0
+			}
+		} else if r.ReqType == constvar.MethodSaveAndPublish {
+			if !checkVersionable(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType) {
+				return errors.WithMessagef(errno.ErrUnversionable, "%s,%s", fileDef.Namespace, fileDef.ConfType)
+			}
+			/*
+			   // confirm 处理下层级冲突 tb_config_node
+			   if err := ProcessOPConfig(opConfigs); err != nil {
+			       return err
+			   }
+			*/
+			// 保存到 tb_config_versioned
+			// 保存到 tb_config_node
+			if v, err := GenerateConfigFile(tx, publishReq, constvar.MethodGenAndPublish, configsDiff); err != nil {
+				return err
+			} else {
+				resp.Revision = v.Revision
+				resp.IsPublished = 1
+			}
+		} else if r.ReqType == constvar.MethodSave {
+			if checkVersionable(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType) {
+				return errno.ErrVersionable
+			}
+			if _, err := UpsertConfigItems(tx, configsDiff, ""); err != nil {
+				return err
+			}
+			resp.IsPublished = 1
+		}
+		return nil
+	})
+	if txErr == nil {
+		model.CacheSetAndGetConfigFile(fileDef) // refresh cache
+	}
+	return resp, txErr
+}
+
+// QueryConfigItems godoc
+// 如果是 entity level, 则查询 tb_config_versioned 返回
+// 如果是 template level, 则查询 tb_config_node 合并
+// queryFileInfo 选项控制是否查询 conf_file 信息。一般对 web 页面需要 info,对接后端 api 不需要 info
+func QueryConfigItems(r *api.SimpleConfigQueryReq, queryFileInfo bool) (*api.GetConfigItemsResp, error) {
+	resp := &api.GetConfigItemsResp{
+		BKBizID: r.BKBizID,
+		BaseLevelDef: api.BaseLevelDef{
+			LevelName:  r.LevelName,
+			LevelValue: r.LevelValue,
+		},
+	}
+	r.Decrypt = true
+	if model.IsConfigLevelEntityVersioned(r.Namespace, r.ConfType, r.ConfFile, r.LevelName) {
+		if ret, err := QueryConfigItemsFromVersion(r, true); err != nil {
+			return nil, err
+		} else {
+			resp.Content = ret.Content
+		}
+	} else {
+		// 查询合并 nodeLevel
+		ret, err := GenerateConfigFile(model.DB.Self, r, constvar.MethodGenerateOnly, nil)
+		if err != nil {
+			return nil, err
+		}
+		resp.Content = ret.Content
+	}
+	if queryFileInfo {
+		cf, err := GetConfigFileSimpleInfo(&r.BaseConfigNode)
+		if err != nil {
+			return nil, err
+		}
+		resp.ConfFileResp = *cf
+	}
+	return resp, nil
+}
+
+// QueryConfigItemsFromVersion 直接查询已发布的配置
+// hasApplied 表示必须要求已经 applied 过的,才能获取它的 published 。applied 表示曾经 generate 过
+func QueryConfigItemsFromVersion(r *api.SimpleConfigQueryReq, hasApplied bool) (*api.GenerateConfigResp, error) {
+	v := model.ConfigVersionedModel{
+		BKBizID:    r.BKBizID,
+		Namespace:  r.Namespace,
+		ConfType:   r.ConfType,
+		ConfFile:   r.ConfFile,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+	}
+	if hasApplied {
+		if _, err := v.ExistsAppliedVersion(model.DB.Self); err != nil {
+			return nil, errors.WithMessage(err, "get published config need applied")
+		}
+	}
+	if vConfigs, err := v.GetVersionPublished(model.DB.Self); err != nil {
+		// 没有找到也会返回错误
+		return nil, err
+	} else {
+		if resp, err := FormatConfigFileForResp(r, vConfigs.Configs); err != nil {
+			return nil, err
+		} else {
+			return resp, nil
+		}
+	}
+}
+
+// GetConfigItemsForFiles godoc
+// 查询多个配置文件
+func GetConfigItemsForFiles(r *api.SimpleConfigQueryReq, confFiles []string) ([]*api.GetConfigItemsResp, error) {
+	resp := make([]*api.GetConfigItemsResp, 0)
+	for _, f := range confFiles {
+		r.ConfFile = f
+		if ret, err := QueryConfigItems(r, true); err != nil {
+			return nil, err
+		} else {
+			resp = append(resp, ret)
+		}
+	}
+	return resp, nil
+}
+
+// ProcessConfigsDiff 把 configsDiff 变更到 configs 上
+func ProcessConfigsDiff(configs []*model.ConfigModel, configsDiff []*model.ConfigModelOp) ([]*model.ConfigModel, int,
+	error) {
+	if len(configsDiff) == 0 {
+		return configs, 0, nil
+	}
+	configsNew := make(map[string]*model.ConfigModel, 0)
+	for _, c := range configs {
+		if _, ok := configsNew[c.ConfName]; ok {
+			return nil, 0, errors.WithMessagef(errno.ErrDuplicateItem, "conf_name=%s", c.ConfName)
+		}
+		configsNew[c.ConfName] = c
+	}
+	affectedRows := 0
+	// logger.Info("ProcessConfigsDiff configs=%+v   configsDiff=%+v", configs, configsDiff)
+	for _, c := range configsDiff {
+		affectedRows += 1
+		confName := c.Config.ConfName
+		optype := c.OPType
+		if optype == constvar.OPTypeAdd {
+			if _, ok := configsNew[confName]; ok {
+				if configsNew[confName].LevelName == c.Config.LevelName {
+					return nil, 0, errors.WithMessagef(errno.ErrDuplicateItem, "conf_name=%s", confName)
+				}
+				configsNew[confName] = c.Config
+			}
+			configsNew[confName] = c.Config
+		} else if optype == constvar.OPTypeRemove {
+			delete(configsNew, confName)
+		} else if optype == constvar.OPTypeUpdate {
+			configsNew[confName] = c.Config
+		} else if optype == constvar.OPTypeRemoveRef {
+			// remove_ref 是修改导致的关联删除,一般是当前修改层级的下级配置冲突,不会出现在当前层级的配置信息里
+			affectedRows -= 1
+		}
+	}
+	configsProcessed := make([]*model.ConfigModel, 0, len(configsNew))
+	for _, c := range configsNew {
+		configsProcessed = append(configsProcessed, c)
+	}
+	return configsProcessed, affectedRows, nil
+}
+
+// GenerateConfigFile TODO
+// call GetConfig, FormatAndSaveConfigFile
+func GenerateConfigFile(db *gorm.DB, r *api.SimpleConfigQueryReq,
+	method string, configsDiff []*model.ConfigModelOp) (*api.GenerateConfigResp, error) {
+	// query
+	var options = api.QueryConfigOptions{}
+	if err := copier.Copy(&options, r); err != nil {
+		return nil, err
+	}
+	configs, err := GetMergedConfig(db, &r.BaseConfigNode, &r.UpLevelInfo, &options) // @TODO use transaction
+	if err != nil {
+		return nil, err
+	}
+	var m = model.ConfigVersionedModel{
+		BKBizID:    r.BKBizID,
+		Namespace:  r.Namespace,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+		ConfType:   r.ConfType,
+		ConfFile:   r.ConfFile,
+
+		Description: r.Description,
+		Module:      r.Module,
+		Cluster:     r.Cluster,
+		CreatedBy:   r.CreatedBy,
+	}
+	// todo 根据前端输入,当前已发布版本的snapshot + 变更的diffs,生成新的 configs
+	// 是否改成直接根据后端 published 来判断影响行数?
+	configsNew, affected, err := ProcessConfigsDiff(configs, configsDiff)
+	if err != nil {
+		return nil, err
+	}
+	m.RowsAffected = affected
+	options.RowsAffected = affected
+
+	if r.Revision == "" {
+		r.Revision = m.NewRevisionName() // m.Revision
+	}
+	// @TODO 需要启用事务
+	if method == constvar.MethodGenAndPublish { // release: save and publish
+		if err := GenerateAndPublish(db, &r.BaseConfigNode, &options, &r.UpLevelInfo, r.Revision, configsDiff); err != nil {
+			return nil, err
+		}
+	} else if method == constvar.MethodGenAndSave { // save
+		if _, err = m.FormatAndSaveConfigVersioned(db, configsNew, configsDiff); err != nil {
+			return nil, err
+		}
+	} else if method == constvar.MethodGenerateOnly {
+		r.Revision = ""
+	} else {
+		err = fmt.Errorf("illegal param method: %s", method)
+		return nil, err
+	}
+	// response
+	resp, err := FormatConfigFileForResp(r, configsNew)
+	if err != nil {
+		return nil, err
+	} else {
+		resp.Revision = r.Revision
+	}
+	return resp, nil
+}
+
+// SaveConfigFileNode upsert
+func SaveConfigFileNode(db *gorm.DB, r *api.BaseConfigNode, opUser, description, confFileLC string) error {
+	configFile := &model.ConfigFileNodeModel{
+		BKBizID:     r.BKBizID,
+		Namespace:   r.Namespace,
+		ConfType:    r.ConfType,
+		ConfFile:    r.ConfFile,
+		LevelName:   r.LevelName,
+		LevelValue:  r.LevelValue,
+		UpdatedBy:   opUser,
+		ConfFileLC:  confFileLC,
+		Description: description,
+	}
+	if _, err := configFile.CreateOrUpdate(false, db); err != nil {
+		return err
+	}
+	return nil
+}
+
+// GenerateAndPublish todo revision 可以去掉
+func GenerateAndPublish(db *gorm.DB, r *api.BaseConfigNode, o *api.QueryConfigOptions, up *api.UpLevelInfo,
+	revision string, configsDiff []*model.ConfigModelOp) (err error) {
+	if revision == "" {
+		return errors.New("revision should not be empty")
+	}
+	var m = model.ConfigVersionedModel{}
+	copier.Copy(&m, r)
+	m.CreatedBy = o.CreatedBy
+	m.Description = o.Description
+	m.RowsAffected = o.RowsAffected
+	if val, ok := up.LevelInfo[constvar.LevelModule]; ok {
+		m.Module = val
+	} else {
+		m.Module = o.Module
+	}
+	if val, ok := up.LevelInfo[constvar.LevelCluster]; ok {
+		m.Cluster = val
+	} else if r.LevelName == constvar.LevelCluster {
+		m.Cluster = r.LevelValue
+	}
+	// copier.Copy(&m, o)
+	txErr := db.Transaction(func(tx *gorm.DB) error { // new transaction
+		// 回写 tb_config_node 保存到层级树
+		configsLocked, err := UpsertConfigItems(tx, configsDiff, revision)
+		if err != nil {
+			return err
+		}
+
+		// 重新基于最新的 tb_config_node 生成 merged_configs
+		configs, err := GetMergedConfig(tx, r, up, o)
+		if err != nil {
+			return err
+		}
+		// 保存新版本到 tb_config_versioned
+		if _, err = m.FormatAndSaveConfigVersioned(tx, configs, configsDiff); err != nil {
+			return err
+		}
+		// logger.Info("GenerateConfigFile ConfigVersionedModel=%+v", m)
+		publish := PublishConfig{
+			Versioned:     &m,
+			ConfigsLocked: configsLocked,
+			Patch:         nil,
+			FromGenerated: o.Generate,
+			Revision:      revision,
+		}
+		if err = publish.PublishAndApplyVersioned(tx, o.FromNodeConfigApplied); err != nil {
+			return err
+		}
+		return nil
+	})
+	if txErr != nil {
+		return txErr
+	}
+	return txErr
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_item_check.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_check.go
new file mode 100644
index 0000000000..d3cc79020c
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_check.go
@@ -0,0 +1,201 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// AddConfigsRefToDiff TODO
+func AddConfigsRefToDiff(configsRef map[string]*ConfigModelRef) []*model.ConfigModelOp {
+	configsDiff := make([]*model.ConfigModelOp, 0)
+	for _, configsMap := range configsRef {
+		for optype, configs := range *configsMap {
+			for _, c := range configs {
+				configDiff := &model.ConfigModelOp{
+					Config: c,
+					OPType: optype,
+				}
+				configsDiff = append(configsDiff, configDiff)
+			}
+		}
+	}
+	return configsDiff
+}
+
+// BatchPreCheckPlat TODO
+// 批量检查要写入的 conf_item 的上下层级合法性
+// 返回一个map, key是conf_name, value 是需要处理的下级配置
+func BatchPreCheckPlat(r *api.UpsertConfFilePlatReq, configs []*model.ConfigModel) (map[string]*ConfigModelRef, error) {
+	var errs []error
+	var configsRefMap = map[string]*ConfigModelRef{}
+	for _, cn := range r.ConfNames {
+		configsRef, err := PreCheckPlat(&r.ConfFileInfo.BaseConfFileDef, cn)
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			if len(*configsRef) > 0 {
+				configsRefMap[cn.ConfName] = configsRef
+			}
+		}
+	}
+	// 存在不满足插入条件的 config item 集合
+	if len(errs) > 0 {
+		return configsRefMap, util.SliceErrorsToError(errs)
+	}
+	logger.Info("BatchPreCheckPlat precheck result: %+v", configsRefMap)
+	return configsRefMap, nil
+}
+
+// BatchPreCheck TODO
+func BatchPreCheck(configs []*model.ConfigModelView) (map[string]*ConfigModelRef, error) {
+	var errs []error
+	var configsRefMap = map[string]*ConfigModelRef{}
+	for _, cn := range configs {
+		configsRef, err := PreCheck(cn, false)
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			if len(*configsRef) > 0 {
+				configsRefMap[cn.ConfName] = configsRef
+			}
+		}
+	}
+	// 存在不满足插入条件的 config item 集合
+	if len(errs) > 0 {
+		return configsRefMap, util.SliceErrorsToError(errs)
+	}
+	return configsRefMap, nil
+}
+
+// PreCheck TODO
+func PreCheck(c *model.ConfigModelView, checkValue bool) (*ConfigModelRef, error) {
+	if err := CheckConfNameAndValue(&c.ConfigModel, checkValue, "", "", ""); err != nil {
+		return nil, err
+	}
+	return PrecheckConfigItemUpsert(c)
+}
+
+// PreCheckPlat TODO
+func PreCheckPlat(f *api.BaseConfFileDef, cn *api.UpsertConfNames) (*ConfigModelRef, error) {
+	c := &model.ConfigModel{
+		Namespace: f.Namespace,
+		ConfFile:  f.ConfFile,
+		ConfType:  f.ConfType,
+		ConfName:  cn.ConfName,
+		ConfValue: cn.ValueDefault,
+		BKBizID:   constvar.BKBizIDForPlat,
+	}
+	// 如果校验的 value_type 为空,任务前端没有传递 value_type, value_allowed 值,直接从后端取再校验
+	if cn.ValueType == "" {
+		if err := CheckConfNameAndValue(c, true, "", "", ""); err != nil {
+			return nil, err
+		}
+	} else {
+		if err := CheckConfNameAndValue(c, true, cn.ValueType, cn.ValueTypeSub, cn.ValueAllowed); err != nil {
+			return nil, err
+		}
+	}
+	cmv := &model.ConfigModelView{
+		ConfigModel: *c,
+		// no UpLevelInfo
+	}
+	return PrecheckConfigItemUpsert(cmv)
+}
+
+// PrecheckConfigItemUpsert TODO
+// 检查当前配置项是否可以写入,add, update
+func PrecheckConfigItemUpsert(c *model.ConfigModelView) (*ConfigModelRef, error) {
+
+	up, down, err := c.GetConfigItemsAssociateNodes()
+	if err != nil {
+		return nil, err
+	}
+	upConfigs, err := c.GetConfigItemsAssociate(c.BKBizID, up)
+	if err != nil {
+		return nil, err
+	}
+	downConfig := make([]*model.ConfigModel, 0)
+	if c.FlagLocked == 1 {
+		downConfig, err = c.GetConfigItemsAssociate(c.BKBizID, down)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	configsRef, err := CheckConfigItemWritable(&c.ConfigModel, upConfigs, downConfig)
+	return configsRef, err
+}
+
+// ConfigModelRef2 TODO
+// 存放与即将写入的 conf_item 有关的上下层级的节点操作信息
+// "remove_ref":[{},{}] 需要删除的下级节点
+// "notify":[{},{}] 配置值有变动的下级节点
+type ConfigModelRef2 struct {
+	OPConfig map[string][]*model.ConfigModel
+}
+
+// ConfigModelRef TODO
+type ConfigModelRef map[string][]*model.ConfigModel
+
+// Add TODO
+func (d ConfigModelRef) Add(optype string, c *model.ConfigModel) {
+	if _, ok := d[optype]; ok {
+		d[optype] = append(d[optype], c)
+	} else {
+		d[optype] = []*model.ConfigModel{c}
+	}
+}
+
+// CheckConfigItemWritable TODO
+// 检查该配置项是否允许 写入/修改
+// 输入的是统一配置项的 上下级配置
+func CheckConfigItemWritable(current *model.ConfigModel, up, down []*model.ConfigModel) (*ConfigModelRef, error) {
+	logger.Info("CheckConfigItemWritable current:%+v up:%+v down:%+v", current, up, down)
+	// 检查上层级
+	var errsString []string
+	for _, c := range up {
+		if c.FlagLocked == 1 {
+			errsString = append(errsString, fmt.Sprintf("上层级 [%s] 已锁定配置项 [%s]", c.LevelName, c.ConfName))
+		}
+	}
+	// 上级配置有加锁,不允许当前层级或者下级 存在显示配置项
+	if len(errsString) > 0 {
+		errStr := strings.Join(errsString, "\n")
+		return nil, errors.New(errStr)
+	}
+	opConfigs := &ConfigModelRef{}
+
+	// 检查下层级
+	if current.FlagLocked == 1 {
+		for _, c := range down {
+			// delete 需要计划删除下级节点
+			// notify 下级配置值与当前不一致
+			// locked 下级有锁
+			c.FlagDisable = -1
+			opConfigs.Add(constvar.OPTypeRemoveRef, c)
+			if c.FlagLocked == 0 && c.ConfValue != current.ConfValue {
+				opConfigs.Add(constvar.OPTypeNotified, c)
+			} else if c.FlagLocked == 1 && c.ConfValue != current.ConfValue {
+				// 下级配置有锁,且与当前层级要加锁的配置值不相等,也强制下级产生红点
+				opConfigs.Add(constvar.OPTypeLocked, c)
+				opConfigs.Add(constvar.OPTypeNotified, c)
+			} else if c.FlagLocked == 1 && c.ConfValue == current.ConfValue {
+				// 下级配置有锁,且与当前层级要锁的配置值相同,可直接删除
+				opConfigs.Add(constvar.OPTypeLocked, c)
+			} else {
+				// ignore or unreachable
+			}
+		}
+		logger.Info("CheckConfigItemWritable down:%+v", opConfigs)
+		return opConfigs, nil
+	} else { // 当前配置不加锁
+		return opConfigs, nil
+	}
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_item_format.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_format.go
new file mode 100644
index 0000000000..a79fc80576
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_format.go
@@ -0,0 +1,165 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"bk-dbconfig/pkg/validate"
+	"encoding/json"
+	"strings"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+)
+
+// FormatConfItemForResp TODO
+// 返回配置项列表,可以根据 map / list 格式返回
+// map后还可以跟 .#| 3个格式,作为分隔符,返回的格式会进一步
+// 这里没有检查这一批 item 是否合法,比如重复的 conf_name
+func FormatConfItemForResp(format string, configs []*model.ConfigModel) (map[string]interface{}, error) {
+	confItems := make(map[string]interface{})
+	if len(configs) == 0 {
+		return confItems, nil
+	}
+	c0 := configs[0]
+	fd := api.BaseConfFileDef{Namespace: c0.Namespace, ConfType: c0.ConfType, ConfFile: c0.ConfFile}
+	cFile, err := model.CacheGetConfigFile(fd)
+	if err != nil {
+		return nil, err
+	}
+	if format == constvar.FormatMap {
+		for _, c := range configs {
+			confItems[c.ConfName] = CastValueType(c.ConfName, c.ConfValue, fd, cFile.ValueTypeStrict)
+		}
+	} else if strings.HasPrefix(format, constvar.FormatMap) {
+		separator := strings.TrimPrefix(format, constvar.FormatMap)
+		if separator == "" {
+			for _, c := range configs {
+				confItems[c.ConfName] = CastValueType(c.ConfName, c.ConfValue, fd, cFile.ValueTypeStrict)
+			}
+		} else {
+			tmpContent := map[string]map[string]interface{}{}
+			for _, c := range configs {
+				confNames := strings.SplitN(c.ConfName, separator, 2) //  mysqld.max_connections
+				if len(confNames) != 2 {
+					// return nil, fmt.Errorf("confName error %s. format:%s", c.ConfName, format)
+					confItems[c.ConfName] = CastValueType(c.ConfName, c.ConfValue, fd, cFile.ValueTypeStrict)
+					continue
+				}
+				cSection := confNames[0]
+				confName := confNames[1]
+				if _, ok := tmpContent[cSection]; ok {
+					tmpContent[cSection][confName] = CastValueType(c.ConfName, c.ConfValue, fd, cFile.ValueTypeStrict)
+				} else {
+					tmpContent[cSection] = make(map[string]interface{})
+					tmpContent[cSection][confName] = CastValueType(c.ConfName, c.ConfValue, fd, cFile.ValueTypeStrict)
+				}
+			}
+			for k, v := range tmpContent {
+				confItems[k] = v
+			}
+		}
+	} else if format == constvar.FormatList {
+		for _, c := range configs {
+			baseItem := NewBaseConfItemWithModel(c, "")
+			confItems[c.ConfName] = baseItem
+		}
+	} else {
+		return nil, errors.Errorf("illegal format %s", format)
+	}
+	return confItems, nil
+}
+
+// FormatConfItemOpForResp TODO
+func FormatConfItemOpForResp(format string, configs []*model.ConfigModelOp) (map[string]interface{}, error) {
+	confItems := make(map[string]interface{}, 0)
+	if len(configs) == 0 {
+		return confItems, nil
+	}
+	c0 := configs[0].Config
+	fd := api.BaseConfFileDef{Namespace: c0.Namespace, ConfType: c0.ConfType, ConfFile: c0.ConfFile}
+	confFile, err := model.CacheGetConfigFile(fd)
+	if err != nil {
+		return nil, err
+	}
+	if format == constvar.FormatMap {
+		for _, config := range configs {
+			c := config.Config
+			confItems[c.ConfName] = CastValueType(c.ConfName, c.ConfValue, fd, confFile.ValueTypeStrict)
+		}
+	} else if format == constvar.FormatList {
+		for _, config := range configs {
+			c := config.Config
+			baseItem := NewBaseConfItemWithModel(c, config.OPType)
+			// OPType: config.OPType,
+			confItems[c.ConfName] = baseItem
+		}
+	} else {
+		return nil, errors.Errorf("illegal format %s", format)
+	}
+	return confItems, nil
+}
+
+// FormatConfigFileForResp TODO
+// simple
+func FormatConfigFileForResp(r *api.SimpleConfigQueryReq, configs []*model.ConfigModel) (*api.GenerateConfigResp,
+	error) {
+	simpleContent, err := FormatConfItemForResp(r.Format, configs)
+	if err != nil {
+		return nil, err
+	}
+	var resp = &api.GenerateConfigResp{
+		BKBizID: r.BKBizID,
+		BaseLevelDef: api.BaseLevelDef{
+			LevelName:  r.LevelName,
+			LevelValue: r.LevelValue,
+		},
+		ConfFile: r.ConfFile,
+		Content:  simpleContent,
+	}
+	return resp, nil
+}
+
+// CastValueType 将 value string 转换成具体的类型
+func CastValueType(confName string, confValue string, f api.BaseConfFileDef, valueTypeStrict int8) interface{} {
+	if valueTypeStrict == 0 || util.ConfValueIsPlaceHolder(confValue) {
+		return confValue
+	}
+	var valueType string
+	var valueSubType string
+	if nameDef, err := model.CacheGetConfigNameDef(f.Namespace, f.ConfType, f.ConfFile, confName); err != nil {
+		logger.Error(errors.Wrapf(err, confName).Error())
+	} else {
+		valueType = nameDef.ValueType
+		valueSubType = nameDef.ValueTypeSub
+	}
+	if valueType == "" {
+		return confValue
+	}
+	if valueType == validate.DTypeInt {
+		return cast.ToInt(confValue)
+	} else if valueType == validate.DTypeFloat || valueType == validate.DTypeNumber {
+		return cast.ToFloat32(confValue)
+	} else if valueType == validate.DTypeBool {
+		return util.ToBoolExt(confValue)
+	} else if valueType == validate.DTypeString {
+		if valueSubType == validate.DTypeSubList {
+			newValue := util.SplitAnyRuneTrim(confValue, ",")
+			return newValue
+		} else if valueSubType == validate.DTypeSubMap {
+			mapI := make(map[string]interface{})
+			err := json.Unmarshal([]byte(confValue), &mapI)
+			if err != nil {
+				logger.Error("fail to unmarshal conf_value %s. err:%s", confValue, err.Error())
+				return confValue
+			}
+			return mapI
+		}
+		return confValue
+	} else {
+		logger.Warn("%sun-support value_type %s to cast %s", f.ConfFile, valueType, confValue)
+		return confValue
+	}
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_item_merge.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_merge.go
new file mode 100644
index 0000000000..4297550aa9
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_merge.go
@@ -0,0 +1,189 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/pkg/cst"
+	"bk-dbconfig/internal/pkg/errno"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/util"
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// ConfigLevelCompare TODO
+func ConfigLevelCompare(a, b *model.ConfigModel) (int, error) {
+	ConfigLevelMap := cst.GetConfigLevelMap("")
+	logger.Info("ConfigLevelCompare LevelName=%s", b.LevelName)
+	if aLevel, ok := ConfigLevelMap[a.LevelName]; ok {
+		if bLevel, ok := ConfigLevelMap[b.LevelName]; ok {
+			if aLevel > bLevel {
+				return 1, nil
+			} else if aLevel < bLevel {
+				return -1, nil
+			} else {
+				// app#conf_type#conf_name#namespace=>level should be unique
+				return 0, errors.WithMessage(errno.ErrConfigLevel, a.ConfName)
+			}
+		} else {
+			return 0, errors.Errorf("unknown configLevel1 %s", b.LevelName)
+		}
+	} else {
+		return 0, errors.Errorf("unknown configLevel2 %s", a.LevelName)
+	}
+}
+
+// MergeConfig TODO
+// view = merge.xxx
+func MergeConfig(configs []*model.ConfigModel, view string) ([]*model.ConfigModel, error) {
+	// ConfigLevelKeys := ConfigLevelMap
+	// ConfigUniqueKeys := []string{"bk_biz_id", "conf_type", "conf_name", "namespace"}
+	// viewTmp := strings.Split(view, ".")
+	configMergeMap := make(map[string]*model.ConfigModel)
+	for _, config := range configs {
+		configKey := ""
+
+		if view == constvar.ViewRaw {
+			configKey = fmt.Sprintf("%s|%s|%s|%s|%s|%s|%s",
+				config.BKBizID, config.Namespace, config.ConfType, config.ConfFile,
+				config.ConfName, config.LevelName, config.LevelValue)
+		} else if strings.HasPrefix(view, constvar.ViewMerge) {
+			configKey = fmt.Sprintf("%s|%s|%s|%s",
+				config.Namespace, config.ConfType, config.ConfFile, config.ConfName)
+		} else {
+			return nil, errors.New("no view given")
+		}
+		logger.Debugf("service.MergeConfig merge: %s", configKey)
+		if _, ok := configMergeMap[configKey]; !ok {
+			configMergeMap[configKey] = config
+		} else {
+			if r, e := ConfigLevelCompare(config, configMergeMap[configKey]); e == nil {
+				if r > 0 {
+					logger.Warnf("service.MergeConfig replace: %+v", config)
+					configMergeMap[configKey] = config
+				}
+			} else {
+				return nil, e
+			}
+		}
+	}
+	// convert configMergeMap values to slice
+	logger.Debugf("service.GetConfig configMergeMap: %+v", configMergeMap)
+	configItems := make([]*model.ConfigModel, 0)
+	for _, config := range configMergeMap {
+		configItems = append(configItems, config)
+	}
+	return configItems, nil
+}
+
+// MergeConfigView TODO
+func MergeConfigView(configs []*model.ConfigModelView, view string) ([]*model.ConfigModelView, error) {
+	configMergeMap := make(map[string]*model.ConfigModelView)
+	for _, config := range configs {
+		configKey := ""
+		if view == constvar.ViewRaw {
+			configKey = fmt.Sprintf("%s#1-#%s#2-#%s#3-#%s#4-#%s#5-#%s#6%s",
+				config.BKBizID, config.Namespace, config.ConfType, config.ConfName, config.LevelName, config.LevelValue,
+				config.Cluster)
+		} else if strings.HasPrefix(view, constvar.ViewMerge) {
+			// configKey = fmt.Sprintf("%s=%s#1#%s#2#%s#3#%s", viewTmp[1], config.LevelValue, config.Namespace, config.ConfType, config.ConfName)
+			if config.Cluster != "" {
+				configKey = fmt.Sprintf("#1#%s#2#%s#3#%s#c4%s", config.Namespace, config.ConfType, config.ConfName, config.Cluster)
+			} else if config.Module != "" {
+				configKey = fmt.Sprintf("#1#%s#2#%s#3#%s#m4%s", config.Namespace, config.ConfType, config.ConfName, config.Module)
+			} else {
+				configKey = fmt.Sprintf("#1#%s#2#%s#3#%sa4%s", config.Namespace, config.ConfType, config.ConfName, config.BKBizID)
+			}
+
+		} else {
+			return nil, fmt.Errorf("no view given")
+		}
+		logger.Debugf("service.MergeConfig merge: %s", configKey)
+		if _, ok := configMergeMap[configKey]; !ok {
+			configMergeMap[configKey] = config
+		} else {
+			if r, e := ConfigVLevelCompare(config, configMergeMap[configKey]); e == nil {
+				if r > 0 {
+					logger.Warnf("service.MergeConfig replace: %+v", config)
+					configMergeMap[configKey] = config
+				}
+			} else {
+				return nil, e
+			}
+		}
+	}
+	// convert configMergeMap values to slice
+	logger.Debugf("service.GetConfig configMergeMap: %+v", configMergeMap)
+	configItems := make([]*model.ConfigModelView, 0)
+	for _, config := range configMergeMap {
+		configItems = append(configItems, config)
+	}
+	return configItems, nil
+}
+
+// ConfigVLevelCompare TODO
+func ConfigVLevelCompare(a, b *model.ConfigModelView) (int, error) {
+	ConfigLevelMap := cst.GetConfigLevelMap("")
+	if aLevel, ok := ConfigLevelMap[a.LevelName]; ok {
+		if bLevel, ok := ConfigLevelMap[b.LevelName]; ok {
+			if aLevel > bLevel {
+				return 1, nil
+			} else if aLevel < bLevel {
+				return -1, nil
+			} else {
+				// app#conf_type#conf_name#namespace=>level should be unique
+				return 0, errors.WithMessage(errno.ErrConfigLevel, a.ConfName)
+			}
+		} else {
+			return 0, errors.New("unknown configLevel")
+		}
+	} else {
+		return 0, errors.New("unknown configLevel")
+	}
+}
+
+// ProcessConfig TODO
+func ProcessConfig(configs []*model.ConfigModel) []*model.ConfigModel {
+	for _, c := range configs {
+		if c.ConfType == "user" || c.ConfFile == "notifier" {
+			// split by ", ;"
+			userList := util.SplitAnyRune(util.ReplaceBlank(c.ConfValue), ",;")
+			userListUnique := util.SliceUniqMap(userList) // keep original order
+			c.ConfValue = strings.Join(userListUnique, ",")
+		}
+	}
+	return configs
+}
+
+// ProcessConfigV TODO
+func ProcessConfigV(configs []*model.ConfigModelView) []*model.ConfigModelView {
+	for _, c := range configs {
+		if c.ConfType == "user" || c.ConfFile == "notifier" {
+			// split by ", ;"
+			userList := util.SplitAnyRune(util.ReplaceBlank(c.ConfValue), ",;")
+			userListUnique := util.SliceUniqMap(userList) // keep original order
+			c.ConfValue = strings.Join(userListUnique, ",")
+		}
+	}
+	return configs
+}
+
+// FormatConfNameValueSimple TODO
+func FormatConfNameValueSimple(configs []*model.ConfigModel) map[string]map[string]string {
+	confValues := make(map[string]map[string]string, 0)
+	for _, c := range configs {
+		NSConfFile := ""
+		if c.Namespace == "" {
+			NSConfFile = c.ConfFile
+		} else {
+			NSConfFile = fmt.Sprintf("%s|%s", c.Namespace, c.ConfFile)
+		}
+		if _, ok := confValues[NSConfFile]; !ok {
+			confValues[NSConfFile] = make(map[string]string, 0)
+		}
+		confValues[NSConfFile][c.ConfName] = c.ConfValue
+	}
+	return confValues
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_item_test.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_test.go
new file mode 100644
index 0000000000..d4971f831a
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_item_test.go
@@ -0,0 +1,123 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"testing"
+
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func GetConfigsExample() (configs []*model.ConfigModel) {
+	configsExample := []*model.ConfigModel{
+		{BKBizID: constvar.BKBizIDForPlat, Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: "default", LevelValue: constvar.LevelPlat},
+		{BKBizID: constvar.BKBizIDForPlat, Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: "bk_biz_id", LevelValue: constvar.LevelPlat},
+
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8mb4", LevelName: "bk_biz_id", LevelValue: "testapp"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: "module", LevelValue: "m10"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c11"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c12"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: "module", LevelValue: "m20"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: "cluster", LevelValue: "c21"},
+
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "major_version", ConfValue: "mysql-5.7", LevelName: "bk_biz_id", LevelValue: "testapp"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "major_version", ConfValue: "mysql-5.7", LevelName: "module", LevelValue: "m10"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "major_version", ConfValue: "mysql-5.5", LevelName: "module", LevelValue: "m20"},
+
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "mycnf_template", ConfValue: "MySQL-5.7", LevelName: "bk_biz_id", LevelValue: "testapp"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "mycnf_template", ConfValue: "MySQL-5.7", LevelName: "module", LevelValue: "m10"},
+		{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "mycnf_template", ConfValue: "MySQL-5.5", LevelName: "module", LevelValue: "m20"},
+	}
+	return configsExample
+}
+
+func TestMergeConfig(t *testing.T) {
+	Convey("Test Config Merge", t, func() {
+		// replace function GetConfigLevelMap
+		/*
+		   configLevelStub := gomonkey.ApplyFunc(cst.GetConfigLevelMap, func() map[string]int {
+		       return cst.ConfigLevelMap
+		   })
+		   defer configLevelStub.Reset()
+
+		*/
+		Convey("Get one conf_name item", func() {
+			configs := []*model.ConfigModel{
+				{BKBizID: constvar.BKBizIDForPlat, Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: constvar.LevelPlat, LevelValue: constvar.BKBizIDForPlat},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8mb4", LevelName: "app", LevelValue: "testapp"},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "gbk", LevelName: "module", LevelValue: "m10"},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c11"},
+			}
+
+			configMerged, err := MergeConfig(configs, constvar.ViewMerge)
+			configsExpect1 := []*model.ConfigModel{
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c11"},
+			}
+			So(err, ShouldEqual, nil)
+			So(configMerged, ShouldResemble, configsExpect1)
+		})
+
+		Convey("Get two conf_name item", func() {
+			configs := []*model.ConfigModel{
+				{BKBizID: constvar.BKBizIDForPlat, Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: constvar.LevelPlat, LevelValue: constvar.BKBizIDForPlat},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8mb4", LevelName: "app", LevelValue: "testapp"},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "major_version", ConfValue: "mysql-5.5", LevelName: "app", LevelValue: "testapp"},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c11"},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "major_version", ConfValue: "mysql-5.7", LevelName: "module", LevelValue: "m10"},
+			}
+			configMerged, _ := MergeConfig(configs, constvar.ViewMerge)
+			configsExpect2 := []*model.ConfigModel{
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c11"},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "major_version", ConfValue: "mysql-5.7", LevelName: "module", LevelValue: "m10"},
+			}
+			So(configMerged, ShouldResemble, configsExpect2)
+		})
+	})
+}
+
+func TestCheckConfigItemWritable(t *testing.T) {
+	Convey("Test ConfigItem writable", t, func() {
+
+		upConfigs := []*model.ConfigModel{
+			{BKBizID: constvar.BKBizIDForPlat, Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: constvar.LevelPlat, LevelValue: constvar.BKBizIDForPlat, FlagLocked: 0},
+			{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "app", LevelValue: "testapp", FlagLocked: 0},
+		}
+		downConfigs := []*model.ConfigModel{
+			{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "cluster", LevelValue: "c11", FlagLocked: 0},
+		}
+		currents := []*model.ConfigModel{
+			{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "gbk", LevelName: "module", LevelValue: "m10", FlagLocked: 0},
+			{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "gbk", LevelName: "module", LevelValue: "m10", FlagLocked: 1},
+		}
+		Convey("Has no any lock", func() {
+			ret, err := CheckConfigItemWritable(currents[0], upConfigs, downConfigs)
+			So(err, ShouldEqual, nil)
+			So(len(*ret), ShouldEqual, 0)
+		})
+		Convey("Level module add lock1 (has remove)", func() {
+			ret, err := CheckConfigItemWritable(currents[1], upConfigs, downConfigs)
+			So(err, ShouldEqual, nil)
+			removeRefCount := len((*ret)[constvar.OPTypeRemoveRef])
+			So(removeRefCount, ShouldEqual, 1)
+		})
+		Convey("Level module add lock2 (no remove)", func() {
+			downConfigs = nil
+			ret, err := CheckConfigItemWritable(currents[1], upConfigs, downConfigs)
+			So(err, ShouldEqual, nil)
+			So(len(*ret), ShouldEqual, 0)
+		})
+
+		Convey("Level app has lock", func() {
+			upConfigs := []*model.ConfigModel{
+				{BKBizID: constvar.BKBizIDForPlat, Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "utf8", LevelName: constvar.LevelPlat, LevelValue: constvar.BKBizIDForPlat, FlagLocked: 0},
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "latin1", LevelName: "app", LevelValue: "testapp", FlagLocked: 1},
+			}
+			downConfigs := []*model.ConfigModel{}
+			currents := []*model.ConfigModel{
+				{BKBizID: "testapp", Namespace: "MySQL", ConfType: "deploy", ConfFile: "tb_app_info", ConfName: "charset", ConfValue: "gbk", LevelName: "module", LevelValue: "m10", FlagLocked: 0},
+			}
+			_, err := CheckConfigItemWritable(currents[0], upConfigs, downConfigs)
+			So(err.Error(), ShouldContainSubstring, "已锁定配置")
+		})
+	})
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_meta.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_meta.go
new file mode 100644
index 0000000000..15055e9794
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_meta.go
@@ -0,0 +1,192 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/pkg/errno"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/util"
+	"bk-dbconfig/pkg/validate"
+	"fmt"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// CheckConfNameAndValue godoc
+// 检查配置项名字与值 是否合法
+// 如果传递了 valueAllowed!="" 则检查传递的值,否则从db获取检查规则
+// todo 去掉checkValue
+func CheckConfNameAndValue(c *model.ConfigModel, checkValue bool, valueType, valueTypeSub, valueAllowed string) error {
+	cn := model.ConfigNameDefModel{
+		Namespace: c.Namespace,
+		ConfType:  c.ConfType,
+		ConfFile:  c.ConfFile,
+		ConfName:  c.ConfName,
+	}
+	fd := api.BaseConfFileDef{Namespace: c.Namespace, ConfType: c.ConfType, ConfFile: c.ConfFile}
+	checkName := true
+	checkValue = true
+	confFile, err := model.CacheGetConfigFile(fd)
+	if err != nil {
+		return err
+	} else if confFile == nil { // 如果 db中没有该配置文件元数据,默认true
+		checkValue = true
+		checkName = true
+	} else {
+		checkValue = confFile.ConfValueValidate == 1
+		checkName = confFile.ConfNameValidate == 1
+	}
+	sqlRes := model.DB.Self.Table(cn.TableName()).Where(cn.UniqueWhere()).Take(&cn)
+	if checkName {
+		if sqlRes.Error != nil {
+			if errors.Is(sqlRes.Error, gorm.ErrRecordNotFound) {
+				return errors.Errorf("illegal conf_name [%s] for conf_type=%s", c.ConfName, c.ConfType)
+			}
+			return sqlRes.Error
+		}
+		// 在 entity level 时,还是要允许编辑
+		if cn.IsReadOnly() && !model.IsConfigLevelEntityVersioned(c.Namespace, c.ConfType, c.ConfFile, c.LevelName) {
+			return errors.Errorf("conf_name %s is readonly", c.ConfName)
+		}
+	}
+	if checkValue && !util.ConfValueIsPlaceHolder(c.ConfValue) { // 如果 value 以 {{ 开头表示值待定
+		if valueAllowed == "" {
+			// 如果给了 valueAllowed 说明是检查平台配置, 平台配置有可能来自页面的修改,以页面的 valueType 和 valueAllowed 为准
+			cn.ValueAllowed = valueAllowed
+			cn.ValueType = valueType
+			cn.ValueTypeSub = valueTypeSub
+		}
+		cn.ValueDefault = c.ConfValue
+		// 如果不校验 conf_name, 那么 conf_name 可能在 name_def 里没定义,value_type, value_type_sub, value_allowed 都为空
+		err := validate.ValidateConfValue(cn.ValueDefault, cn.ValueType, cn.ValueTypeSub, cn.ValueAllowed)
+		if err != nil {
+			errors.WithMessage(err, c.ConfName)
+		}
+	} else {
+		return nil
+	}
+	return nil
+}
+
+// QueryConfigNames TODO
+func QueryConfigNames(r *api.QueryConfigNamesReq, isPub bool) (*api.QueryConfigNamesResp, error) {
+	var confNames []*model.ConfigNameDefModel
+	var err error
+	if isPub {
+		confNames, err = model.QueryConfigNamesPlat(r.Namespace, r.ConfType, r.ConfFile, r.ConfName)
+	} else {
+		confNames, err = model.QueryConfigNames(r.Namespace, r.ConfType, r.ConfFile, r.ConfName)
+	}
+	if err != nil {
+		return nil, err
+	}
+	var resp = &api.QueryConfigNamesResp{
+		ConfFile: r.ConfFile,
+	}
+	namesMap := make(map[string]*api.ConfNameDef)
+	for _, c := range confNames {
+		namesMap[c.ConfName] = &api.ConfNameDef{
+			ConfName:     c.ConfName,
+			ConfNameLC:   c.ConfNameLC,
+			ValueType:    c.ValueType,
+			ValueTypeSub: c.ValueTypeSub,
+			ValueDefault: c.ValueDefault,
+			ValueAllowed: c.ValueAllowed,
+			NeedRestart:  c.NeedRestart,
+			FlagDisable:  c.FlagDisable,
+			FlagLocked:   c.FlagLocked,
+			Description:  c.Description,
+			FlagStatus:   c.FlagStatus,
+		}
+	}
+	resp.ConfNames = namesMap
+	return resp, nil
+}
+
+// QueryConfigTypeInfo TODO
+func QueryConfigTypeInfo(r *api.QueryConfigTypeReq) (*api.QueryConfigTypeResp, error) {
+	// query conf type info
+	confTypes, err := model.QueryConfigFileDetail(r.Namespace, r.ConfType, r.ConfFile)
+	if err != nil {
+		return nil, err
+	}
+	// conf type info
+	ct := confTypes[0]
+
+	// conf_files
+	confFiles := make(map[string]string)
+	for _, confType := range confTypes {
+		confFiles[confType.ConfFile] = confType.ConfFileLC
+	}
+	// conf_levels
+	confLevels := make(map[string]string)
+	levelNames := util.SplitAnyRune(ct.LevelNames, ", ")
+	if levels, err := model.QueryConfigLevel(levelNames); err != nil {
+		return nil, err
+	} else {
+		for _, l := range levels {
+			confLevels[l.LevelName] = l.LevelNameCN
+		}
+	}
+
+	var resp = &api.QueryConfigTypeResp{
+		ConfTypeInfo: &api.ConfTypeDef{
+			// ConfType:         ct.ConfType,
+			// ConfTypeLC:       ct.ConfTypeLC,
+			LevelVersioned:    ct.LevelVersioned,
+			LevelNames:        ct.LevelNames,
+			VersionKeepDays:   ct.VersionKeepDays,
+			VersionKeepLimit:  ct.VersionKeepLimit,
+			ConfNameValidate:  ct.ConfNameValidate,
+			ConfValueValidate: ct.ConfValueValidate,
+			ConfNameOrder:     ct.ConfNameOrder,
+		},
+		ConfFiles:  confFiles,
+		ConfLevels: confLevels,
+	}
+	return resp, nil
+}
+
+// CheckValidConfType 检查 namespace, conf_type, conf_file, level_name 的合法性
+// 如果 level_name = "" 不检查 level_name
+// 如果 needVersioned >=2 不做版本化相关检查
+func CheckValidConfType(namespace, confType, confFiles2, levelName string, needVersioned int8) error {
+	confFiles := util.SplitAnyRuneTrim(confFiles2, ",")
+	for _, confFile := range confFiles {
+		errStr := fmt.Sprintf("namespace=%s, conf_type=%s, conf_file=%s", namespace, confType, confFile)
+		fd := api.BaseConfFileDef{Namespace: namespace, ConfType: confType, ConfFile: confFile}
+		if f, e := model.CacheGetConfigFile(fd); e != nil {
+			return errors.Wrapf(errno.ErrConfFile, "NotFound: %s", errStr)
+		} else if f == nil {
+			return errors.Wrapf(errno.ErrNamespaceType, errStr)
+		} else {
+			if levelName != "" {
+				if !util.StringsHas(f.LevelNameList, levelName) {
+					return errors.Wrapf(errno.ErrLevelName, "allowed [%s] but given %s", f.LevelNames, levelName)
+				}
+			}
+			if needVersioned < 2 {
+				if needVersioned == 1 && f.LevelVersioned == "" {
+					return errors.Errorf("conf_file is un-versionable for %s", errStr)
+				} else if needVersioned == 0 && f.LevelVersioned != "" {
+					return errors.Errorf("conf_file is versionable for %s", errStr)
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// checkVersionable 判断是否可以版本化
+// 需先确认namespace confType 已合法
+// 从 cache 中取,不涉及 DB 操作
+func checkVersionable(namespace, confType string) bool {
+	if namespaceInfo, ok := model.CacheNamespaceType[namespace]; ok {
+		if typeInfo, ok := namespaceInfo[confType]; ok {
+			if typeInfo.LevelVersioned != "" {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_plat.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_plat.go
new file mode 100644
index 0000000000..6c6400dc48
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_plat.go
@@ -0,0 +1,181 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/pkg/errno"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+	"fmt"
+
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// ConfigNamesBatchUpsert TODO
+func ConfigNamesBatchUpsert(db *gorm.DB, cf api.ConfFileDef, confNames []*api.UpsertConfNames) error {
+	adds := make([]*model.ConfigNameDefModel, 0)
+	updates := make([]*model.ConfigNameDefModel, 0)
+	deletes := make([]*model.ConfigNameDefModel, 0)
+
+	// 目前只允许 update 这几个属性 "value_default", "value_allowed", "flag_status", "flag_locked",见 ConfigNamesBatchUpdate
+	for _, cn := range confNames {
+		confName := &model.ConfigNameDefModel{
+			Namespace:    cf.Namespace,
+			ConfType:     cf.ConfType,
+			ConfFile:     cf.ConfFile,
+			ConfName:     cn.ConfName,
+			ConfNameLC:   cn.ConfNameLC,
+			ValueAllowed: cn.ValueAllowed,
+			ValueDefault: cn.ValueDefault,
+			ValueType:    cn.ValueType,
+			FlagDisable:  cn.FlagDisable,
+			FlagLocked:   cn.FlagLocked,
+			NeedRestart:  cn.NeedRestart,
+			Description:  cn.Description,
+			FlagStatus:   cn.FlagStatus, // 只读属性,允许 api去修改,不允许页面修改
+			Stage:        1,
+		}
+		// platConfig = append(platConfig, confName)
+
+		if cn.OPType == constvar.OPTypeAdd {
+			adds = append(adds, confName)
+		} else if cn.OPType == constvar.OPTypeUpdate {
+			updates = append(updates, confName)
+		} else if cn.OPType == constvar.OPTypeRemove {
+			deletes = append(deletes, confName)
+		}
+	}
+	err := db.Transaction(func(tx *gorm.DB) error {
+		if len(adds) > 0 {
+			if err := model.ConfigNamesBatchSave(tx, adds); err != nil {
+				return err
+			}
+		}
+		if len(updates) > 0 {
+			if err := model.ConfigNamesBatchSave(tx, updates); err != nil {
+				return err
+			}
+		}
+		if len(deletes) > 0 {
+			if err := model.ConfigNamesBatchDelete(tx, deletes); err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+	return err
+}
+
+// UpsertConfigFilePlat TODO
+// 添加平台配置
+// 如果 conf_file 已经存在,则报错
+// 新建 conf_file,保存操作在 def 表,发布时进入 node 表,生成revision并发布
+func UpsertConfigFilePlat(r *api.UpsertConfFilePlatReq, clientOPType, opUser string) (*api.UpsertConfFilePlatResp,
+	error) {
+	fileDef := r.ConfFileInfo.BaseConfFileDef
+	exists, cf, err := checkConfigFileExists(&fileDef)
+	if err != nil {
+		return nil, err
+	} else {
+		cf.Description = r.ConfFileInfo.Description // 文件描述
+		cf.ConfTypeLC = r.ConfFileInfo.ConfTypeLC
+		cf.ConfFileLC = r.ConfFileInfo.ConfFileLC
+		cf.UpdatedBy = opUser
+	}
+	logger.Info("UpsertConfigFilePlat conf_file info %+v", cf)
+	if exists && r.FileID == 0 {
+		if clientOPType == "new" {
+			return nil, fmt.Errorf("conf_file %s for %s already exists with id=%d",
+				cf.ConfFile, cf.Namespace, cf.ID)
+		} else { // edit
+		}
+	}
+	resp := &api.UpsertConfFilePlatResp{
+		BaseConfFileDef: fileDef,
+	}
+	// build config item model
+	configs, configsDiff := NewConfigModels(r)
+	// 平台配置永远可以修改,如果与下级存在锁冲突,后面会生成修复提示
+	configsRef, err := BatchPreCheckPlat(r, configs)
+	if err != nil {
+		return nil, err
+	}
+
+	// configsDiff 是用于操作db的差异部分
+	// configsRef 是展示给前端的差异部分
+	configsRefDiff := AddConfigsRefToDiff(configsRef)
+	configsDiff = append(configsDiff, configsRefDiff...)
+	logger.Info("UpsertConfigFilePlat configsRefDiff=%+v", configsRefDiff)
+	// 存在下层级配置与当前配置冲突,confirm=1 确认修改
+	if len(configsRefDiff) > 0 && r.Confirm == 0 {
+		names := []string{}
+		for _, conf := range configsRefDiff {
+			names = append(names, conf.Config.ConfName)
+		}
+		return nil, errors.WithMessagef(errno.ErrConflictWithLowerConfigLevel, "%v", names)
+	}
+
+	txErr := model.DB.Self.Transaction(func(tx *gorm.DB) error {
+		// 保存逻辑
+		{
+			// 保存到 tb_config_file_def
+			if fileID, err := cf.SaveAndGetID(tx); err != nil {
+				return err
+			} else {
+				resp.FileID = fileID
+				cf.ID = fileID
+			}
+			if len(configs) == 0 { // 如果 items 为空,只修改 conf_file 信息
+				return nil
+			}
+			/*
+			   // confirm 处理下层级冲突 tb_config_node
+			   if err := ProcessOPConfig(configsRef); err != nil {
+			       return err
+			   }
+
+			*/
+			// 保存到 tb_config_name_def
+			// @todo 这里保存到 tb_config_name_def 就意味着发布了,与 tb_config_versioned 不一致
+			if err := ConfigNamesBatchUpsert(tx, r.ConfFileInfo, r.ConfNames); err != nil {
+				return err
+			}
+			resp.IsPublished = 0
+		}
+		// 发布逻辑
+		if r.ReqType == constvar.MethodSaveAndPublish {
+			if !checkVersionable(r.ConfFileInfo.Namespace, r.ConfFileInfo.ConfType) {
+				resp.IsPublished = 1
+				return nil
+			}
+			// 保存到 tb_config_node
+			levelNode := api.BaseConfigNode{}
+			levelNode.Set(constvar.BKBizIDForPlat, fileDef.Namespace, fileDef.ConfType, fileDef.ConfFile, constvar.LevelPlat,
+				constvar.BKBizIDForPlat)
+			publishReq := &api.SimpleConfigQueryReq{
+				BaseConfigNode: levelNode,
+				InheritFrom:    "",
+				View:           constvar.ViewRaw, // plat不存在合并的问题
+				Description:    r.Description,    // 发布描述
+				Format:         constvar.FormatList,
+				CreatedBy:      opUser,
+			}
+			publishReq.Decrypt = false
+			// todo 从 tb_config_node 移除 flag_status = -1 的平台配置
+
+			// 保存到 tb_config_versioned, 增量回写 tb_config_node
+			if v, err := GenerateConfigFile(tx, publishReq, constvar.MethodGenAndPublish, configsDiff); err != nil {
+				return err
+			} else {
+				resp.Revision = v.Revision
+				resp.IsPublished = 1
+			}
+		}
+		return nil
+	})
+	if txErr == nil {
+		model.CacheSetAndGetConfigFile(fileDef)
+	}
+	return resp, txErr
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/config_version.go b/dbm-services/common/db-config/internal/service/simpleconfig/config_version.go
new file mode 100644
index 0000000000..54244add6b
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/config_version.go
@@ -0,0 +1,232 @@
+package simpleconfig
+
+import (
+	"bk-dbconfig/internal/api"
+	"bk-dbconfig/internal/repository/model"
+	"bk-dbconfig/pkg/constvar"
+	"bk-dbconfig/pkg/core/logger"
+
+	"github.com/jinzhu/copier"
+	"github.com/pkg/errors"
+	"gorm.io/gorm"
+)
+
+// ListConfigFileVersions TODO
+// get versions history list and mark the latest one
+func ListConfigFileVersions(r *api.ListConfigVersionsReq) (*api.ListConfigVersionsResp, error) {
+	var m = model.ConfigVersionedModel{
+		BKBizID:    r.BKBizID,
+		Namespace:  r.Namespace,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+		ConfType:   r.ConfType,
+		ConfFile:   r.ConfFile,
+	}
+	var resp = &api.ListConfigVersionsResp{
+		BKBizID:      r.BKBizID,
+		Namespace:    r.Namespace,
+		BaseLevelDef: r.BaseLevelDef,
+	}
+	verList := make([]string, 0)
+	if versions, err := m.ListConfigFileVersions(true); err != nil {
+		return nil, err
+	} else {
+		for _, v := range versions {
+			verList = append(verList, v.Revision)
+			if v.IsPublished == 1 { // should have only one
+				resp.VersionLatest = v.Revision
+			}
+			ver := map[string]interface{}{
+				"revision":      v.Revision,
+				"conf_file":     v.ConfFile,
+				"created_at":    v.CreatedAt,
+				"created_by":    v.CreatedBy,
+				"rows_affected": v.RowsAffected,
+				"is_published":  v.IsPublished,
+				"description":   v.Description,
+			}
+			resp.Versions = append(resp.Versions, ver)
+		}
+		// resp.Versions = verList
+		return resp, nil
+	}
+}
+
+// GetVersionedDetail TODO
+func GetVersionedDetail(r *api.GetVersionedDetailReq) (*api.GetVersionedDetailResp, error) {
+	var m = model.ConfigVersionedModel{
+		BKBizID:    r.BKBizID,
+		Namespace:  r.Namespace,
+		LevelName:  r.LevelName,
+		LevelValue: r.LevelValue,
+		ConfType:   r.ConfType,
+		ConfFile:   r.ConfFile,
+	}
+	vc := &model.ConfigVersioned{}
+	versionList := []string{r.Revision}
+	if versions, err := m.GetVersionedConfigFile(model.DB.Self, versionList); err != nil {
+		return nil, err
+	} else if len(versions) == 0 {
+		return nil, errors.Errorf("no version found %s", r.Revision)
+	} else if len(versions) != 1 {
+		return nil, errors.Errorf("err record found %d for %v", len(versions), m)
+	} else {
+		vc.Versioned = versions[0]
+		v := vc.Versioned
+		resp := &api.GetVersionedDetailResp{
+			ID:           v.ID,
+			Revision:     v.Revision,
+			PreRevision:  v.PreRevision,
+			RowsAffected: v.RowsAffected,
+			Description:  v.Description,
+			// ContentStr:   v.ContentStr,
+			CreatedAt: v.CreatedAt.String(),
+			CreatedBy: v.CreatedBy,
+		}
+		if err = vc.UnPack(); err != nil {
+			return nil, err
+		}
+		if err = vc.MayDecrypt(); err != nil {
+			return nil, err
+		}
+
+		// unpack 后,将 configs, configsDiff 转换成resp格式,并情况原对象避免返回太多无用信息
+		if confValues, err := FormatConfItemForResp(r.Format, vc.Configs); err != nil {
+			return nil, err
+		} else {
+			resp.Configs = confValues
+			// resp.Content = confValues
+		}
+		if confValues, err := FormatConfItemOpForResp(r.Format, vc.ConfigsDiff); err != nil {
+			return nil, err
+		} else {
+			resp.ConfigsDiff = confValues
+		}
+		return resp, nil
+	}
+}
+
+// PublishConfig TODO
+type PublishConfig struct {
+	Versioned     *model.ConfigVersionedModel
+	LevelNode     api.BaseConfigNode
+	ConfigsLocked []*model.ConfigModel
+	Patch         map[string]string
+	FromGenerated bool
+	Revision      string
+}
+
+// PublishAndApplyVersioned TODO
+// 只发布版本,之前已生成revision. is_published=true, is_applied=false
+// 如果 configsLocked 数大于 1,表示修改了锁定配置,需要应用到下级
+// level_config 在发布包含 locked config 时,都会 apply
+func (p *PublishConfig) PublishAndApplyVersioned(db *gorm.DB, isFromApplied bool) error {
+	logger.Info("PublishAndApplyVersioned %+v", p)
+	c := p.Versioned
+	if p.Patch != nil {
+		// update tb_config_versioned
+		if err := c.PatchConfig(db, p.Patch); err != nil {
+			return err
+		}
+		// update tb_config_node,不检查冲突、是否只读
+		var cms []*model.ConfigModelOp
+		for confName, confValue := range p.Patch {
+			cm := &model.ConfigModelOp{
+				Config: &model.ConfigModel{
+					BKBizID:         p.LevelNode.BKBizID,
+					Namespace:       p.LevelNode.Namespace,
+					ConfType:        p.LevelNode.ConfType,
+					ConfFile:        p.LevelNode.ConfFile,
+					LevelName:       p.LevelNode.LevelName,
+					LevelValue:      p.LevelNode.LevelValue,
+					UpdatedRevision: p.Revision,
+					ConfName:        confName,
+					ConfValue:       confValue,
+					Description:     "updated by internal api",
+				},
+				OPType: constvar.OPTypeAdd,
+			}
+			cms = append(cms, cm)
+		}
+		if _, err := UpsertConfigItems(db, cms, p.Revision); err != nil {
+			return err
+		}
+		// 走 delete version + update + GenAndPublish 流程
+	}
+	if err := c.PublishConfig(db); err != nil {
+		return err
+	}
+
+	levelNode := api.BaseConfigNode{}
+	copier.Copy(&levelNode, c)
+	p.LevelNode = levelNode
+	if model.IsConfigLevelEntityVersioned(c.Namespace, c.ConfType, c.ConfFile, c.LevelName) {
+		// versioned config 有修改,就生成更新提示
+		return p.GenTaskForApplyEntityConfig(db)
+	}
+	if isFromApplied { // 不做级联应用
+		return nil
+	}
+
+	if p.FromGenerated { // 来自 generate 接口,直接设置为 applied,只有 entity level 才能 generate
+		if err := p.Versioned.VersionApplyStatus(db); err != nil {
+			return err
+		}
+	} else {
+		// level config,仅有 locked 配置修改时,生成更新提示
+		if p.ConfigsLocked == nil || len(p.ConfigsLocked) == 0 {
+			return p.Versioned.VersionApplyStatus(db)
+		}
+		return p.ApplyLevelConfig(db)
+	}
+	return nil
+}
+
+// ApplyLevelConfig 向下应用 level config 配置
+// 应用行为,会删除与上级锁定冲突的配置
+func (p *PublishConfig) ApplyLevelConfig(db *gorm.DB) error {
+	logger.Info("ApplyLevelConfig %+v", p)
+	applyReq := api.VersionApplyReq{
+		BaseConfigNode: p.LevelNode,
+		RevisionApply:  p.Versioned.Revision,
+	}
+	if err := p.ApplyVersionLevelNode(db, &applyReq); err != nil {
+		return err
+	}
+	if err := p.Versioned.VersionApplyStatus(db); err != nil {
+		return err
+	}
+	return nil
+}
+
+// GenTaskForApplyEntityConfig 将未应用的配置项,写入 node_task
+func (p *PublishConfig) GenTaskForApplyEntityConfig(db *gorm.DB) error {
+	// versioned_config 无论是否应用,都生成 node_task,且保持未应用状态
+	applyInfo := api.ApplyConfigInfoReq{BaseConfigNode: p.LevelNode}
+	diffInfo, err := GetConfigsToApply(db, applyInfo)
+	if err != nil {
+		return err
+	}
+	nodeTasks := make([]*model.NodeTaskModel, 0)
+	for confName, diff := range diffInfo.ConfigsDiff {
+		task := &model.NodeTaskModel{
+			VersionID:       diffInfo.VersionID,
+			NodeID:          diffInfo.NodeID,
+			Revision:        diffInfo.RevisionToApply,
+			UpdatedRevision: diff.UpdatedRevision,
+			ConfName:        confName,
+			ConfValue:       diff.ConfValue,
+			OPType:          diff.OPType,
+			ValueBefore:     diff.ValueBefore,
+		}
+		nodeTasks = append(nodeTasks, task)
+	}
+	if len(nodeTasks) == 0 {
+		if err := p.Versioned.VersionApplyStatus(db); err != nil {
+			return err
+		}
+		return nil
+	} else {
+		return model.GenTaskForApply(db, diffInfo.NodeID, nodeTasks)
+	}
+}
diff --git a/dbm-services/common/db-config/internal/service/simpleconfig/simple_config.go b/dbm-services/common/db-config/internal/service/simpleconfig/simple_config.go
new file mode 100644
index 0000000000..8835a0b996
--- /dev/null
+++ b/dbm-services/common/db-config/internal/service/simpleconfig/simple_config.go
@@ -0,0 +1,2 @@
+// Package simpleconfig TODO
+package simpleconfig
diff --git a/dbm-services/common/db-config/pkg/constvar/const.go b/dbm-services/common/db-config/pkg/constvar/const.go
new file mode 100644
index 0000000000..3381536f79
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/constvar/const.go
@@ -0,0 +1,81 @@
+package constvar
+
+const (
+	// Environment TODO
+	Environment = "enviroment"
+	// Test TODO
+	Test = "test"
+)
+
+// RequestType
+const (
+	MethodSaveOnly       = "SaveOnly"       // save only
+	MethodSaveAndPublish = "SaveAndPublish" // snapshot version
+)
+const (
+	// OPTypeAdd TODO
+	OPTypeAdd = "add"
+	// OPTypeUpdate TODO
+	OPTypeUpdate = "update"
+	// OPTypeRemove TODO
+	OPTypeRemove = "remove"
+	// OPTypeRemoveRef TODO
+	OPTypeRemoveRef = "remove_ref" // 非用户操作的直接删除,而是用户操作需要级联删除
+
+	// OPTypeLocked TODO
+	OPTypeLocked = "locked"
+	// OPTypeNotified TODO
+	OPTypeNotified = "notify"
+)
+
+// config file version generate method
+const (
+	MethodGenerateOnly  = "GenerateOnly"       // generate only
+	MethodGenAndSave    = "GenerateAndSave"    // snapshot
+	MethodGenAndPublish = "GenerateAndPublish" // release
+	MethodSave          = "Save"               // 只用于保存 无版本概念 的配置类型 no-versioned
+)
+
+// BKBizIDForPlat TODO
+const BKBizIDForPlat = "0"
+const (
+	// LevelPlat TODO
+	LevelPlat = "plat" // 平台级配置
+	// LevelApp TODO
+	LevelApp = "app" // 业务级配置
+	// LevelModule TODO
+	LevelModule = "module" // 模块级配置
+	// LevelCluster TODO
+	LevelCluster = "cluster" // 集群级配置
+	// LevelHost TODO
+	LevelHost = "host" // 主机级配置
+	// LevelInstance TODO
+	LevelInstance = "instance" // 实例级配置
+)
+
+// conf_type
+const (
+	ConfTypeMycnf  = "dbconf" // 数据库参数配置
+	ConfTypeBackup = "backup" // 数据库备份配置
+	ConfTypeDeploy = "deploy" // 部署类配置
+)
+
+const (
+	// FormatMap TODO
+	FormatMap = "map"
+	// FormatList TODO
+	FormatList = "list"
+	// ViewRaw TODO
+	ViewRaw = "raw"
+	// ViewMerge TODO
+	ViewMerge = "merge"
+)
+
+// BKApiAuthorization bkapigw
+const BKApiAuthorization = "X-Bkapi-Authorization"
+
+// DraftVersion TODO
+const DraftVersion = "v_draft"
+
+// EncryptEnableZip TODO
+const EncryptEnableZip = false
diff --git a/dbm-services/common/db-config/pkg/constvar/constvar.go b/dbm-services/common/db-config/pkg/constvar/constvar.go
new file mode 100644
index 0000000000..47405fe6eb
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/constvar/constvar.go
@@ -0,0 +1,2 @@
+// Package constvar TODO
+package constvar
diff --git a/dbm-services/common/db-config/pkg/constvar/mysql.go b/dbm-services/common/db-config/pkg/constvar/mysql.go
new file mode 100644
index 0000000000..60d8203fca
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/constvar/mysql.go
@@ -0,0 +1,31 @@
+package constvar
+
+const (
+	// MysqldInstallRootPath TODO
+	MysqldInstallRootPath = "/usr/local"
+	// MysqldInstallPath TODO
+	MysqldInstallPath = "/usr/local/mysql"
+	// DefaultMysqlLogRootPath 默认存放mysql日志的根路径
+	DefaultMysqlLogRootPath = "/data"
+	// AlterNativeMysqlLogRootPath 备选路径
+	AlterNativeMysqlLogRootPath = "/data1"
+	// DefaultMysqlLogBasePath TODO
+	DefaultMysqlLogBasePath = "mysqllog"
+	// DefaultMysqlDataRootPath 默认存放mysql数据的根路径
+	DefaultMysqlDataRootPath = "/data1"
+	// AlterNativeMysqlDataRootPath TODO
+	AlterNativeMysqlDataRootPath = "/data"
+	// DefaultMysqlDataBasePath TODO
+	DefaultMysqlDataBasePath = "mysqldata"
+	// DefaultMycnfRootPath 默认配置文件路径
+	DefaultMycnfRootPath = "/etc"
+
+	// DefaultMyCnfName TODO
+	DefaultMyCnfName = "/etc/my.cnf"
+
+	// DefaultSocketName TODO
+	DefaultSocketName = "mysql.sock"
+
+	// DefaultMySQLPort TODO
+	DefaultMySQLPort = 3306
+)
diff --git a/dbm-services/common/db-config/pkg/constvar/os.go b/dbm-services/common/db-config/pkg/constvar/os.go
new file mode 100644
index 0000000000..0137c60486
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/constvar/os.go
@@ -0,0 +1,117 @@
+package constvar
+
+// status
+const (
+	Unknown      = "unknown"
+	RUNNING      = "RUNNING"
+	UNAVAILABLE  = "UNAVAILABLE"
+	AVAIL        = "AVAIL"
+	LOCKED       = "LOCKED"
+	ALONE        = "ALONE"
+	UNIQ_LOCK    = "UNIQ_LOCK"
+	INITIALIZING = "INITIALIZING"
+	NULL         = "NULL"
+)
+
+const (
+	// Default TODO
+	Default = "default"
+)
+
+// db role
+const (
+	MySQLMaster        = "mysql_master"
+	MySQLLogDB         = "mysql_logdb"
+	MySQLSlave         = "mysql_slave"
+	MySQLMasterSlave   = "mysql_master&mysql_slave"
+	MySQLMasterOrSlave = "mysql_master/mysql_slave"
+	ProxyMaster        = "proxy_master"
+	ProxySlave         = "proxy_slave"
+	ProxyMasterSlave   = "proxy_master&proxy_slave"
+)
+
+// db Category(dbtype) 和 job 的 gamedb gamedr 是两个东西。
+const (
+	Logdb  = "logdb"
+	MySQL  = "MySQL"
+	Proxy  = "Proxy"
+	Spider = "Spider"
+	Dumper = "Dumper"
+)
+
+// switch type
+const (
+	AutoSwitch = "AutoSwitch"
+	HandSwitch = "HandSwitch"
+	NotSwitch  = "NotSwitch"
+)
+
+// switch weight
+const (
+	SwitchWeight0   = "0"
+	SwitchWeight1   = "1"
+	SwitchWeight100 = "100"
+)
+
+// os type
+const (
+	RedHat    = "redhat"
+	Suse      = "suse"
+	Slackware = "slackware"
+)
+
+// bits
+const (
+	Bit64  = "64"
+	Bit32  = "32"
+	OSBits = 32 << uintptr(^uintptr(0)>>63)
+)
+
+// switch
+const (
+	ON  = "ON"
+	OFF = "OFF"
+)
+
+// proxy related
+const (
+	ProxyAdminPortInc = 1000
+	// ProxyAdminAccoutArray TODO
+	// string array, split by comma
+	ProxyAdminAccoutArray = "MONITOR@%"
+)
+
+// dbmstype
+const (
+	MySQLCluster = "mysql_cluster"
+	MySQLSingle  = "mysql_single"
+)
+
+// disasterLevel
+const (
+	IDC        = "IDC"
+	City       = "CITY"
+	DiffCampus = "DiffCampus"
+	SameCampus = "SameCampus"
+)
+
+// proxy related
+const (
+	DefaultBackends = "1.1.1.1:3306"
+)
+
+// etcd finished key value
+const (
+	Success = "success"
+	Failed  = "failed"
+)
+
+// type in tb_map_info
+const (
+	// MapRole TODO
+	// master and slave's type
+	MapRole = "role"
+	// MapData TODO
+	// proxy and backend's type
+	MapData = "data"
+)
diff --git a/dbm-services/common/db-config/pkg/core/config/base.go b/dbm-services/common/db-config/pkg/core/config/base.go
new file mode 100644
index 0000000000..7a8750d0fd
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/config/base.go
@@ -0,0 +1,61 @@
+package config
+
+import (
+	"log"
+	"strings"
+
+	"github.com/spf13/viper"
+)
+
+var (
+	// Get TODO
+	Get = viper.Get
+	// GetBool TODO
+	GetBool = viper.GetBool
+	// GetDuration TODO
+	GetDuration = viper.GetDuration
+	// GetFloat64 TODO
+	GetFloat64 = viper.GetFloat64
+	// GetInt TODO
+	GetInt = viper.GetInt
+	// GetInt32 TODO
+	GetInt32 = viper.GetInt32
+	// GetInt64 TODO
+	GetInt64 = viper.GetInt64
+	// GetIntSlice TODO
+	GetIntSlice = viper.GetIntSlice
+	// GetString TODO
+	GetString = viper.GetString
+	// GetStringMap TODO
+	GetStringMap = viper.GetStringMap
+	// GetStringMapString TODO
+	GetStringMapString = viper.GetStringMapString
+	// GetStringMapStringSlice TODO
+	GetStringMapStringSlice = viper.GetStringMapStringSlice
+	// GetStringSlice TODO
+	GetStringSlice = viper.GetStringSlice
+	// GetTime TODO
+	GetTime = viper.GetTime
+	// GetUint TODO
+	GetUint = viper.GetUint
+	// GetUint32 TODO
+	GetUint32 = viper.GetUint32
+	// GetUint64 TODO
+	GetUint64 = viper.GetUint64
+	// SetDefault TODO
+	SetDefault = viper.SetDefault
+)
+
+// InitConfig TODO
+func InitConfig(fileName string) {
+	viper.AddConfigPath("conf")
+	viper.SetConfigType("yaml")
+	viper.SetConfigName(fileName)
+	viper.AutomaticEnv() // read in environment variables that match
+	// viper.SetEnvPrefix("ACCOUNT")
+	replacer := strings.NewReplacer(".", "_")
+	viper.SetEnvKeyReplacer(replacer)
+	if err := viper.MergeInConfig(); err != nil {
+		log.Fatal(err)
+	}
+}
diff --git a/dbm-services/common/db-config/pkg/core/config/config.go b/dbm-services/common/db-config/pkg/core/config/config.go
new file mode 100644
index 0000000000..0579a3a16d
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/config/config.go
@@ -0,0 +1,2 @@
+// Package config TODO
+package config
diff --git a/dbm-services/common/db-config/pkg/core/config/logger.go b/dbm-services/common/db-config/pkg/core/config/logger.go
new file mode 100644
index 0000000000..407cf2f818
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/config/logger.go
@@ -0,0 +1,47 @@
+package config
+
+import "github.com/spf13/viper"
+
+// 配置文件格式:
+/*
+log:
+    # 可选: stdout, stderr, /path/to/log/file
+    output: /data/logs/myapp/myapp.log
+    # 可选: logfmt, json
+    formater: logfmt
+    # 可选: debug, info, warn, error, fatal, panic
+    level: info
+    # 100M
+    maxsize: 100
+    # 保留备份日志文件数
+    maxbackups: 3
+    # 保留天数
+    maxage: 30
+    # 启动 level server
+    levelserver: false
+*/
+
+// Logger TODO
+var Logger struct {
+	Formater    string
+	Level       string
+	Output      string
+	LocalTime   bool
+	TimeFormat  string
+	MaxSize     int
+	MaxBackups  int
+	MaxAge      int
+	LevelServer bool
+}
+
+// InitLogger TODO
+func InitLogger() {
+	Logger.Formater = viper.GetString("log.formater")
+	Logger.Level = viper.GetString("log.level")
+	Logger.Output = viper.GetString("log.output")
+	Logger.LocalTime = true
+	Logger.TimeFormat = viper.GetString("log.timeformat")
+	Logger.MaxSize = viper.GetInt("log.maxsize")
+	Logger.MaxBackups = viper.GetInt("log.maxbackups")
+	Logger.MaxAge = viper.GetInt("log.maxage")
+}
diff --git a/dbm-services/common/db-config/pkg/core/config/tls.go b/dbm-services/common/db-config/pkg/core/config/tls.go
new file mode 100644
index 0000000000..30d42c1288
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/config/tls.go
@@ -0,0 +1,24 @@
+package config
+
+// 配置文件格式:
+/*
+tls:
+ ca: configs/certs/ca.crt
+ server_name: server.example.com
+ server_cert: configs/certs/server.crt
+ server_key: configs/certs/server.key
+ auth: true
+ client_cert: configs/certs/client.crt
+ client_key: configs/certs/client.key
+*/
+
+// Tls TODO
+var Tls struct {
+	CA         string `mapstructure:"ca"`
+	ServerName string `mapstructure:"server_name"`
+	ServerCert string `mapstructure:"server_cert"`
+	ServerKey  string `mapstructure:"server_key"`
+	Auth       bool   `mapstructure:"auth"`
+	ClientCert string `mapstructure:"client_cert"`
+	ClientKey  string `mapstructure:"client_key"`
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/README.md b/dbm-services/common/db-config/pkg/core/logger/README.md
new file mode 100644
index 0000000000..66b3c4490e
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/README.md
@@ -0,0 +1,41 @@
+
+
+# logger
+
+### 日志管理模块。
+
+* 可选:需使用 Init 初始化指定日志实现(默认为zap)。设置来自配置文件。
+
+
+	如果没有初始化,则使用默认设置,即:输出到stdout,日志格式为logfmt,日志等级为info。
+
+* 支持输出格式有2种:
+
+
+	1. JSON: 建议在生产环境使用,便于日志系统正确解析并采集到日志查询数据库。
+	2. logfmt: 可以在开发环境使用,输出结构化的日志。
+
+
+配置文件格式:
+
+
+
+	  log:
+	    # 可选: stdout, stderr, /path/to/log/file
+	    output: /data/logs/myapp/myapp.log
+	    # 可选: logfmt, json
+	    formater: logfmt
+	    # 可选: debug, info, warn, error, fatal, panic
+	    level: info
+	    # 100M
+	    # 时间格式
+	    timeformat: 2006-01-02T15:04:05.000Z07:00
+	    maxsize: 100
+	    # 保留备份日志文件数
+	    maxbackups: 3
+	    # 保留天数
+	    maxage: 30
+	    # 启动 level server
+	    levelserver: false
+
+
diff --git a/dbm-services/common/db-config/pkg/core/logger/base/base.go b/dbm-services/common/db-config/pkg/core/logger/base/base.go
new file mode 100644
index 0000000000..d3dd5ad848
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/base/base.go
@@ -0,0 +1,2 @@
+// Package base TODO
+package base
diff --git a/dbm-services/common/db-config/pkg/core/logger/base/interface.go b/dbm-services/common/db-config/pkg/core/logger/base/interface.go
new file mode 100644
index 0000000000..550eebcfee
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/base/interface.go
@@ -0,0 +1,14 @@
+package base
+
+// ILogger TODO
+type ILogger interface {
+	ImpleLogger()
+	Init()
+	Debug(format string, args ...interface{})
+	Info(format string, args ...interface{})
+	Warn(format string, args ...interface{})
+	Error(format string, args ...interface{})
+	Fatal(format string, args ...interface{})
+	Panic(format string, args ...interface{})
+	WithFields(mapFields map[string]interface{}) ILogger
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/example_test.go b/dbm-services/common/db-config/pkg/core/logger/example_test.go
new file mode 100644
index 0000000000..98db7f6252
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/example_test.go
@@ -0,0 +1,20 @@
+package logger_test
+
+import (
+	"bk-dbconfig/pkg/core/logger"
+	"bk-dbconfig/pkg/core/logger/zap"
+)
+
+// 初始化日志
+func ExampleInitLog() {
+	// 用指定日志初始化
+	zapLogger := zap.New()
+	logger.InitLogger(zapLogger)
+}
+
+func ExampleInfo() {
+	msg := "something happened"
+	// time="2020-04-17T09:34:13+08:00" level=info msg="Message: something happened" hostname=localhost src="mod1/myapp.go:20"
+	fields := map[string]interface{}{"field1": "value1"}
+	logger.WithFields(fields).Info("Message: %s", msg)
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/init.go b/dbm-services/common/db-config/pkg/core/logger/init.go
new file mode 100644
index 0000000000..d323e6faf4
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/init.go
@@ -0,0 +1,16 @@
+package logger
+
+import (
+	"bk-dbconfig/pkg/core/config"
+	"bk-dbconfig/pkg/core/logger/zap"
+)
+
+func init() {
+	// DefaultLogger = zap.New()
+}
+
+// Init 用于在程序初始化时,确保import了模块,以执行注册的初始化方法。
+func Init() {
+	config.InitLogger()
+	DefaultLogger = zap.New()
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/log.go b/dbm-services/common/db-config/pkg/core/logger/log.go
new file mode 100644
index 0000000000..2cc2c3fc92
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/log.go
@@ -0,0 +1,87 @@
+package logger
+
+import (
+	"bk-dbconfig/pkg/core/logger/base"
+	"bk-dbconfig/pkg/core/logger/zap"
+)
+
+var (
+	// DefaultLogger 确保不为空
+	DefaultLogger base.ILogger = zap.New()
+)
+
+// InitLog 向后兼容的遗留函数。
+func InitLog() {
+	// DefaultLogger = zap.New()
+}
+
+// InitLogger 通用的初始化方法。
+func InitLogger(logger base.ILogger) {
+	DefaultLogger = logger
+}
+
+// Debug 以下为log模块的输出方法。
+func Debug(format string, args ...interface{}) {
+	DefaultLogger.Debug(format, args...)
+}
+
+// Info TODO
+func Info(format string, args ...interface{}) {
+	DefaultLogger.Info(format, args...)
+}
+
+// Warn TODO
+func Warn(format string, args ...interface{}) {
+	DefaultLogger.Warn(format, args...)
+}
+
+// Error TODO
+func Error(format string, args ...interface{}) {
+	DefaultLogger.Error(format, args...)
+}
+
+// Fatal TODO
+func Fatal(format string, args ...interface{}) {
+	DefaultLogger.Fatal(format, args...)
+}
+
+// Panic TODO
+func Panic(format string, args ...interface{}) {
+	DefaultLogger.Panic(format, args...)
+}
+
+// WithFields TODO
+func WithFields(mapFields map[string]interface{}) base.ILogger {
+	return DefaultLogger.WithFields(mapFields)
+}
+
+// Debugf 兼容
+// Debug 以下为log模块的输出方法。
+func Debugf(format string, args ...interface{}) {
+	DefaultLogger.Debug(format, args...)
+}
+
+// Infof TODO
+func Infof(format string, args ...interface{}) {
+	DefaultLogger.Info(format, args...)
+}
+
+// Warnf TODO
+func Warnf(format string, args ...interface{}) {
+	DefaultLogger.Warn(format, args...)
+}
+
+// Errorf TODO
+func Errorf(format string, args ...interface{}) {
+	DefaultLogger.Error(format, args...)
+}
+
+// Fatalf TODO
+func Fatalf(format string, args ...interface{}) {
+	DefaultLogger.Fatal(format, args...)
+}
+
+// Panicf TODO
+func Panicf(format string, args ...interface{}) {
+	DefaultLogger.Panic(format, args...)
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/logger.go b/dbm-services/common/db-config/pkg/core/logger/logger.go
new file mode 100644
index 0000000000..3dc273e398
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/logger.go
@@ -0,0 +1,2 @@
+// Package logger TODO
+package logger
diff --git a/dbm-services/common/db-config/pkg/core/logger/logrus/fields.go b/dbm-services/common/db-config/pkg/core/logger/logrus/fields.go
new file mode 100644
index 0000000000..4409f99bd9
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/logrus/fields.go
@@ -0,0 +1,61 @@
+package logrus
+
+import (
+	"bk-dbconfig/pkg/core/logger/base"
+	"fmt"
+
+	"github.com/sirupsen/logrus"
+)
+
+// 处理带fields的日志
+type logrusFields struct {
+	fields logrus.Fields
+}
+
+// ImpleLogger TODO
+func (s *logrusFields) ImpleLogger() {}
+
+// Init TODO
+func (s *logrusFields) Init() {}
+
+// Debug TODO
+func (s *logrusFields) Debug(format string, args ...interface{}) {
+	GetLogger().WithFields(s.fields).Debugf(format, args...)
+}
+
+// Info TODO
+func (s *logrusFields) Info(format string, args ...interface{}) {
+	GetLogger().WithFields(s.fields).Infof(format, args...)
+}
+
+// Warn TODO
+func (s *logrusFields) Warn(format string, args ...interface{}) {
+	GetLogger().WithFields(s.fields).Warnf(format, args...)
+}
+
+// Error 用于错误处理
+func (s *logrusFields) Error(format string, args ...interface{}) {
+	GetLogger().WithFields(s.fields).Errorf(format, args...)
+}
+
+// Fatal TODO
+func (s *logrusFields) Fatal(format string, args ...interface{}) {
+	GetLogger().WithFields(s.fields).Fatalf(format, args...)
+}
+
+// Panic TODO
+func (s *logrusFields) Panic(format string, args ...interface{}) {
+	GetLogger().WithFields(s.fields).Panicf(format, args...)
+}
+
+// String 用于打印
+func (s *logrusFields) String(format string, args ...interface{}) string {
+	return fmt.Sprintf(format, args...)
+}
+
+// WithFields 支持链式调用
+func (s *logrusFields) WithFields(mapFields map[string]interface{}) base.ILogger {
+	s.fields = mapFields
+
+	return s
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/logrus/log.go b/dbm-services/common/db-config/pkg/core/logger/logrus/log.go
new file mode 100644
index 0000000000..754e58803f
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/logrus/log.go
@@ -0,0 +1,64 @@
+package logrus
+
+import (
+	"bk-dbconfig/pkg/core/logger/base"
+
+	"github.com/sirupsen/logrus"
+)
+
+type logrusLogger struct{}
+
+// NewLogger TODO
+func NewLogger() *logrusLogger {
+	return &logrusLogger{}
+}
+
+// ImpleLogger TODO
+func (s *logrusLogger) ImpleLogger() {}
+
+// Init TODO
+func (s *logrusLogger) Init() {}
+
+// Debug TODO
+func (s *logrusLogger) Debug(format string, args ...interface{}) {
+	GetLogger().Debugf(format, args...)
+}
+
+// Info TODO
+func (s *logrusLogger) Info(format string, args ...interface{}) {
+	GetLogger().Infof(format, args...)
+}
+
+// Warn TODO
+func (s *logrusLogger) Warn(format string, args ...interface{}) {
+	GetLogger().Warnf(format, args...)
+}
+
+// Error 用于错误处理
+func (s *logrusLogger) Error(format string, args ...interface{}) {
+	GetLogger().Errorf(format, args...)
+}
+
+// Fatal 注意: 此方法将导致程序终止!!!
+func (s *logrusLogger) Fatal(format string, args ...interface{}) {
+	GetLogger().Fatalf(format, args...)
+}
+
+// Panic 注意: 此方法将导致panic!!!
+func (s *logrusLogger) Panic(format string, args ...interface{}) {
+	GetLogger().Panicf(format, args...)
+}
+
+// WithFields 指定fields
+func (s *logrusLogger) WithFields(mapFields map[string]interface{}) base.ILogger {
+	return &logrusFields{}
+}
+
+// GetLogger TODO
+func GetLogger() *logrus.Entry {
+	fields := logrus.Fields{
+		"hostname": _LOGHOSTNAME,
+	}
+
+	return logrus.WithFields(fields)
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/logrus/logrus.go b/dbm-services/common/db-config/pkg/core/logger/logrus/logrus.go
new file mode 100644
index 0000000000..84f44997a8
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/logrus/logrus.go
@@ -0,0 +1,2 @@
+// Package logrus TODO
+package logrus
diff --git a/dbm-services/common/db-config/pkg/core/logger/logrus/new.go b/dbm-services/common/db-config/pkg/core/logger/logrus/new.go
new file mode 100644
index 0000000000..68aea70f5f
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/logrus/new.go
@@ -0,0 +1,63 @@
+package logrus
+
+import (
+	"bk-dbconfig/pkg/core/config"
+	"os"
+	"path"
+
+	"github.com/sirupsen/logrus"
+)
+
+var (
+	_LOGHOSTNAME = ""
+)
+
+// New TODO
+func New() *logrusLogger {
+	// 初始化全局变量
+	_LOGHOSTNAME, _ = os.Hostname()
+
+	switch config.Logger.Formater {
+	case "json":
+		logrus.SetFormatter(&logrus.JSONFormatter{})
+	default:
+		logrus.SetFormatter(&logrus.TextFormatter{})
+	}
+
+	switch config.Logger.Output {
+	case "stdout":
+		logrus.SetOutput(os.Stdout)
+	default:
+		dir := path.Dir(config.Logger.Output)
+		if _, err := os.Stat(dir); os.IsNotExist(err) {
+			err := os.MkdirAll(dir, os.ModePerm)
+			if err != nil {
+				logrus.Panicf("Failed to log to file: %s", config.Logger.Output)
+			}
+		}
+
+		file, err := os.OpenFile(config.Logger.Output, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)
+		if err != nil {
+			logrus.Panicf("Failed to log to file: %s", config.Logger.Output)
+		}
+
+		logrus.SetOutput(file)
+	}
+
+	switch config.Logger.Level {
+	case "debug":
+		logrus.SetLevel(logrus.DebugLevel)
+	case "info":
+		logrus.SetLevel(logrus.InfoLevel)
+	case "warning", "warn":
+		logrus.SetLevel(logrus.WarnLevel)
+	case "error":
+		logrus.SetLevel(logrus.ErrorLevel)
+	case "fatal":
+		logrus.SetLevel(logrus.FatalLevel)
+	case "panic":
+		logrus.SetLevel(logrus.PanicLevel)
+	}
+
+	return NewLogger()
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/lumberjack/lumberjack.go b/dbm-services/common/db-config/pkg/core/logger/lumberjack/lumberjack.go
new file mode 100644
index 0000000000..aaed5eab9d
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/lumberjack/lumberjack.go
@@ -0,0 +1,535 @@
+// Package lumberjack provides a rolling logger.
+//
+// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
+// thusly:
+//
+//	import "gopkg.in/natefinch/lumberjack.v2"
+//
+// The package name remains simply lumberjack, and the code resides at
+// https://github.com/natefinch/lumberjack under the v2.0 branch.
+//
+// Lumberjack is intended to be one part of a logging infrastructure.
+// It is not an all-in-one solution, but instead is a pluggable
+// component at the bottom of the logging stack that simply controls the files
+// to which logs are written.
+//
+// Lumberjack plays well with any logging package that can write to an
+// io.Writer, including the standard library's log package.
+//
+// Lumberjack assumes that only one process is writing to the output files.
+// Using the same lumberjack configuration from multiple processes on the same
+// machine will result in improper behavior.
+package lumberjack
+
+import (
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+const (
+	backupTimeFormat = "2006-01-02T15-04-05.000"
+	compressSuffix   = ".gz"
+	defaultMaxSize   = 100
+)
+
+// ensure we always implement io.WriteCloser
+var _ io.WriteCloser = (*Logger)(nil)
+
+// Logger is an io.WriteCloser that writes to the specified filename.
+//
+// Logger opens or creates the logfile on first Write.  If the file exists and
+// is less than MaxSize megabytes, lumberjack will open and append to that file.
+// If the file exists and its size is >= MaxSize megabytes, the file is renamed
+// by putting the current time in a timestamp in the name immediately before the
+// file's extension (or the end of the filename if there's no extension). A new
+// log file is then created using original filename.
+//
+// Whenever a write would cause the current log file exceed MaxSize megabytes,
+// the current file is closed, renamed, and a new log file created with the
+// original name. Thus, the filename you give Logger is always the "current" log
+// file.
+//
+// Backups use the log file name given to Logger, in the form
+// `name-timestamp.ext` where name is the filename without the extension,
+// timestamp is the time at which the log was rotated formatted with the
+// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
+// original extension.  For example, if your Logger.Filename is
+// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
+// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
+//
+// # Cleaning Up Old Log Files
+//
+// Whenever a new logfile gets created, old log files may be deleted.  The most
+// recent files according to the encoded timestamp will be retained, up to a
+// number equal to MaxBackups (or all of them if MaxBackups is 0).  Any files
+// with an encoded timestamp older than MaxAge days are deleted, regardless of
+// MaxBackups.  Note that the time encoded in the timestamp is the rotation
+// time, which may differ from the last time that file was written to.
+//
+// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
+type Logger struct {
+	// Filename is the file to write logs to.  Backup log files will be retained
+	// in the same directory.  It uses -lumberjack.log in
+	// os.TempDir() if empty.
+	Filename string `json:"filename" mapstructure:"filename"`
+
+	// MaxSize is the maximum size in megabytes of the log file before it gets
+	// rotated. It defaults to 100 megabytes.
+	MaxSize int `json:"maxsize" mapstructure:"maxsize"`
+
+	// MaxAge is the maximum number of days to retain old log files based on the
+	// timestamp encoded in their filename.  Note that a day is defined as 24
+	// hours and may not exactly correspond to calendar days due to daylight
+	// savings, leap seconds, etc. The default is not to remove old log files
+	// based on age.
+	MaxAge int `json:"maxage" mapstructure:"maxage"`
+
+	// MaxBackups is the maximum number of old log files to retain.  The default
+	// is to retain all old log files (though MaxAge may still cause them to get
+	// deleted.)
+	MaxBackups int `json:"maxbackups" mapstructure:"maxbackups"`
+
+	// LocalTime determines if the time used for formatting the timestamps in
+	// backup files is the computer's local time.  The default is to use UTC
+	// time.
+	LocalTime bool `json:"localtime" mapstructure:"localtime"`
+
+	// Compress determines if the rotated log files should be compressed
+	// using gzip. The default is not to perform compression.
+	Compress bool `json:"compress" mapstructure:"compress"`
+
+	size int64
+	file *os.File
+	mu   sync.Mutex
+
+	millCh    chan bool
+	startMill sync.Once
+}
+
+var (
+	// currentTime exists so it can be mocked out by tests.
+	currentTime = time.Now
+
+	// os_Stat exists so it can be mocked out by tests.
+	os_Stat = os.Stat
+
+	// megabyte is the conversion factor between MaxSize and bytes.  It is a
+	// variable so tests can mock it out and not need to write megabytes of data
+	// to disk.
+	megabyte = 1024 * 1024
+)
+
+// Write implements io.Writer.  If a write would cause the log file to be larger
+// than MaxSize, the file is closed, renamed to include a timestamp of the
+// current time, and a new log file is created using the original log file name.
+// If the length of the write is greater than MaxSize, an error is returned.
+func (l *Logger) Write(p []byte) (n int, err error) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	writeLen := int64(len(p))
+	if writeLen > l.max() {
+		return 0, fmt.Errorf(
+			"write length %d exceeds maximum file size %d", writeLen, l.max(),
+		)
+	}
+
+	if l.file == nil {
+		if err = l.openExistingOrNew(len(p)); err != nil {
+			return 0, err
+		}
+	}
+
+	if l.size+writeLen > l.max() {
+		if err := l.rotate(); err != nil {
+			return 0, err
+		}
+	}
+
+	n, err = l.file.Write(p)
+	l.size += int64(n)
+
+	return n, err
+}
+
+// Close implements io.Closer, and closes the current logfile.
+func (l *Logger) Close() error {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	return l.close()
+}
+
+// close closes the file if it is open.
+func (l *Logger) close() error {
+	if l.file == nil {
+		return nil
+	}
+	err := l.file.Close()
+	l.file = nil
+	return err
+}
+
+// Rotate causes Logger to close the existing log file and immediately create a
+// new one.  This is a helper function for applications that want to initiate
+// rotations outside of the normal rotation rules, such as in response to
+// SIGHUP.  After rotating, this initiates compression and removal of old log
+// files according to the configuration.
+func (l *Logger) Rotate() error {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	return l.rotate()
+}
+
+// rotate closes the current file, moves it aside with a timestamp in the name,
+// (if it exists), opens a new file with the original filename, and then runs
+// post-rotation processing and removal.
+func (l *Logger) rotate() error {
+	if err := l.close(); err != nil {
+		return err
+	}
+	if err := l.openNew(); err != nil {
+		return err
+	}
+	l.mill()
+	return nil
+}
+
+// openNew opens a new log file for writing, moving any old log file out of the
+// way.  This methods assumes the file has already been closed.
+func (l *Logger) openNew() error {
+	err := os.MkdirAll(l.dir(), 0744)
+	if err != nil {
+		return fmt.Errorf("can't make directories for new logfile: %s", err)
+	}
+
+	name := l.filename()
+	mode := os.FileMode(0644)
+	info, err := os_Stat(name)
+	if err == nil {
+		// Copy the mode off the old logfile.
+		mode = info.Mode()
+		// move the existing file
+		newname := backupName(name, l.LocalTime)
+		if err := os.Rename(name, newname); err != nil {
+			return fmt.Errorf("can't rename log file: %s", err)
+		}
+	}
+
+	// we use truncate here because this should only get called when we've moved
+	// the file ourselves. if someone else creates the file in the meantime,
+	// just wipe out the contents.
+	f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
+	if err != nil {
+		return fmt.Errorf("can't open new logfile: %s", err)
+	}
+	l.file = f
+	l.size = 0
+	return nil
+}
+
+// backupName creates a new filename from the given name, inserting a timestamp
+// between the filename and the extension, using the local time if requested
+// (otherwise UTC).
+func backupName(name string, local bool) string {
+	dir := filepath.Dir(name)
+	filename := filepath.Base(name)
+	ext := filepath.Ext(filename)
+	prefix := filename[:len(filename)-len(ext)]
+	t := currentTime()
+	if !local {
+		t = t.UTC()
+	}
+
+	timestamp := t.Format(backupTimeFormat)
+	return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
+}
+
+// openExistingOrNew opens the logfile if it exists and if the current write
+// would not put it over MaxSize.  If there is no such file or the write would
+// put it over the MaxSize, a new file is created.
+func (l *Logger) openExistingOrNew(writeLen int) error {
+	l.mill()
+
+	filename := l.filename()
+	info, err := os_Stat(filename)
+	if os.IsNotExist(err) {
+		return l.openNew()
+	}
+	if err != nil {
+		return fmt.Errorf("error getting log file info: %s", err)
+	}
+
+	if info.Size()+int64(writeLen) >= l.max() {
+		return l.rotate()
+	}
+
+	file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
+	if err != nil {
+		// if we fail to open the old log file for some reason, just ignore
+		// it and open a new log file.
+		return l.openNew()
+	}
+	l.file = file
+	l.size = info.Size()
+	return nil
+}
+
+// filename generates the name of the logfile from the current time.
+func (l *Logger) filename() string {
+	if l.Filename != "" {
+		return l.Filename
+	}
+	name := filepath.Base(os.Args[0]) + "-lumberjack.log"
+	return filepath.Join(os.TempDir(), name)
+}
+
+// millRunOnce performs compression and removal of stale log files.
+// Log files are compressed if enabled via configuration and old log
+// files are removed, keeping at most l.MaxBackups files, as long as
+// none of them are older than MaxAge.
+func (l *Logger) millRunOnce() error {
+	if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
+		return nil
+	}
+
+	files, err := l.oldLogFiles()
+	if err != nil {
+		return err
+	}
+
+	var compress, remove []logInfo
+
+	if l.MaxBackups > 0 && l.MaxBackups < len(files) {
+		preserved := make(map[string]bool)
+		var remaining []logInfo
+		for _, f := range files {
+			// Only count the uncompressed log file or the
+			// compressed log file, not both.
+			fn := f.Name()
+			if strings.HasSuffix(fn, compressSuffix) {
+				fn = fn[:len(fn)-len(compressSuffix)]
+			}
+			preserved[fn] = true
+
+			if len(preserved) > l.MaxBackups {
+				remove = append(remove, f)
+			} else {
+				remaining = append(remaining, f)
+			}
+		}
+		files = remaining
+	}
+	if l.MaxAge > 0 {
+		diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
+		cutoff := currentTime().Add(-1 * diff)
+
+		var remaining []logInfo
+		for _, f := range files {
+			if f.timestamp.Before(cutoff) {
+				remove = append(remove, f)
+			} else {
+				remaining = append(remaining, f)
+			}
+		}
+		files = remaining
+	}
+
+	if l.Compress {
+		for _, f := range files {
+			if !strings.HasSuffix(f.Name(), compressSuffix) {
+				compress = append(compress, f)
+			}
+		}
+	}
+
+	for _, f := range remove {
+		errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
+		if err == nil && errRemove != nil {
+			err = errRemove
+		}
+	}
+	for _, f := range compress {
+		fn := filepath.Join(l.dir(), f.Name())
+		errCompress := compressLogFile(fn, fn+compressSuffix)
+		if err == nil && errCompress != nil {
+			err = errCompress
+		}
+	}
+
+	return err
+}
+
+// millRun runs in a goroutine to manage post-rotation compression and removal
+// of old log files.
+func (l *Logger) millRun() {
+	for _ = range l.millCh {
+		// what am I going to do, log this?
+		_ = l.millRunOnce()
+	}
+}
+
+// mill performs post-rotation compression and removal of stale log files,
+// starting the mill goroutine if necessary.
+func (l *Logger) mill() {
+	l.startMill.Do(func() {
+		l.millCh = make(chan bool, 1)
+		go l.millRun()
+	})
+	select {
+	case l.millCh <- true:
+	default:
+	}
+}
+
+// oldLogFiles returns the list of backup log files stored in the same
+// directory as the current log file, sorted by ModTime
+func (l *Logger) oldLogFiles() ([]logInfo, error) {
+	files, err := ioutil.ReadDir(l.dir())
+	if err != nil {
+		return nil, fmt.Errorf("can't read log file directory: %s", err)
+	}
+	logFiles := []logInfo{}
+
+	prefix, ext := l.prefixAndExt()
+
+	for _, f := range files {
+		if f.IsDir() {
+			continue
+		}
+		if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
+			logFiles = append(logFiles, logInfo{t, f})
+			continue
+		}
+		if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
+			logFiles = append(logFiles, logInfo{t, f})
+			continue
+		}
+		// error parsing means that the suffix at the end was not generated
+		// by lumberjack, and therefore it's not a backup file.
+	}
+
+	sort.Sort(byFormatTime(logFiles))
+
+	return logFiles, nil
+}
+
+// timeFromName extracts the formatted time from the filename by stripping off
+// the filename's prefix and extension. This prevents someone's filename from
+// confusing time.parse.
+func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
+	if !strings.HasPrefix(filename, prefix) {
+		return time.Time{}, errors.New("mismatched prefix")
+	}
+	if !strings.HasSuffix(filename, ext) {
+		return time.Time{}, errors.New("mismatched extension")
+	}
+	ts := filename[len(prefix) : len(filename)-len(ext)]
+	return time.Parse(backupTimeFormat, ts)
+}
+
+// max returns the maximum size in bytes of log files before rolling.
+func (l *Logger) max() int64 {
+	if l.MaxSize == 0 {
+		return int64(defaultMaxSize * megabyte)
+	}
+	return int64(l.MaxSize) * int64(megabyte)
+}
+
+// dir returns the directory for the current filename.
+func (l *Logger) dir() string {
+	return filepath.Dir(l.filename())
+}
+
+// prefixAndExt returns the filename part and extension part from the Logger's
+// filename.
+func (l *Logger) prefixAndExt() (prefix, ext string) {
+	filename := filepath.Base(l.filename())
+	ext = filepath.Ext(filename)
+	prefix = filename[:len(filename)-len(ext)] + "-"
+	return prefix, ext
+}
+
+// compressLogFile compresses the given log file, removing the
+// uncompressed log file if successful.
+func compressLogFile(src, dst string) (err error) {
+	f, err := os.Open(src)
+	if err != nil {
+		return fmt.Errorf("failed to open log file: %v", err)
+	}
+	defer f.Close()
+
+	fi, err := os_Stat(src)
+	if err != nil {
+		return fmt.Errorf("failed to stat log file: %v", err)
+	}
+
+	// If this file already exists, we presume it was created by
+	// a previous attempt to compress the log file.
+	gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
+	if err != nil {
+		return fmt.Errorf("failed to open compressed log file: %v", err)
+	}
+	defer gzf.Close()
+
+	gz := gzip.NewWriter(gzf)
+
+	defer func() {
+		if err != nil {
+			os.Remove(dst)
+			err = fmt.Errorf("failed to compress log file: %v", err)
+		}
+	}()
+
+	if _, err := io.Copy(gz, f); err != nil {
+		return err
+	}
+	if err := gz.Close(); err != nil {
+		return err
+	}
+	if err := gzf.Close(); err != nil {
+		return err
+	}
+
+	if err := f.Close(); err != nil {
+		return err
+	}
+	if err := os.Remove(src); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// logInfo is a convenience struct to return the filename and its embedded
+// timestamp.
+type logInfo struct {
+	timestamp time.Time
+	os.FileInfo
+}
+
+// byFormatTime sorts by newest time formatted in the name.
+type byFormatTime []logInfo
+
+// Less 用于排序
+func (b byFormatTime) Less(i, j int) bool {
+	return b[i].timestamp.After(b[j].timestamp)
+}
+
+// Swap 用于排序
+func (b byFormatTime) Swap(i, j int) {
+	b[i], b[j] = b[j], b[i]
+}
+
+// Len 用于排序
+func (b byFormatTime) Len() int {
+	return len(b)
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/zap/new.go b/dbm-services/common/db-config/pkg/core/logger/zap/new.go
new file mode 100644
index 0000000000..47af7b247d
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/zap/new.go
@@ -0,0 +1,137 @@
+package zap
+
+// zap是日志功能的一种实现。
+// 支持特性:
+// 1. 支持2种日志格式: json 和 logfmt
+// 2. 支持日志轮换: 可按文件大小,时间轮换,可设置保留备份文件数。
+
+import (
+	"bk-dbconfig/pkg/core/config"
+	"bk-dbconfig/pkg/core/logger/lumberjack"
+	"bk-dbconfig/pkg/core/safego"
+	stdlog "log"
+	"net/http"
+	"os"
+	"time"
+
+	zapfmt "github.com/jsternberg/zap-logfmt"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+var (
+	_LOGHOSTNAME = ""
+)
+
+// TODO log by date
+
+// New 初始化,并返回日志对象。
+func New() *zapLogger {
+	// 初始化全局变量
+	_LOGHOSTNAME, _ = os.Hostname()
+
+	// 设置时间戳格式
+	encoderConfig := zap.NewProductionEncoderConfig()
+	encoderConfig.EncodeTime = func(ts time.Time, encoder zapcore.PrimitiveArrayEncoder) {
+		ts = ts.Local()
+		if !config.Logger.LocalTime {
+			ts = ts.UTC()
+		}
+		encoder.AppendString(ts.Format(config.Logger.TimeFormat))
+	}
+
+	// 设置日志格式为
+	var encoder zapcore.Encoder
+	switch config.Logger.Formater {
+	case "json":
+		encoder = zapcore.NewJSONEncoder(encoderConfig)
+	case "console":
+		encoder = zapcore.NewConsoleEncoder(encoderConfig)
+	default:
+		encoder = zapfmt.NewEncoder(encoderConfig)
+	}
+
+	// 设置日志rotate
+	var writerSyncer zapcore.WriteSyncer
+	switch config.Logger.Output {
+	case "", "stdout":
+		writerSyncer = zapcore.AddSync(os.Stdout)
+	case "stderr":
+		writerSyncer = zapcore.AddSync(os.Stderr)
+	default:
+		if config.Logger.MaxSize == 0 && config.Logger.MaxBackups == 0 && config.Logger.MaxAge == 0 {
+			// 未启动日志切换
+			ws, _, err := zap.Open(config.Logger.Output)
+			if err != nil {
+				stdlog.Fatalf("Failed open log file: %s", config.Logger.Output)
+				return nil
+			}
+			writerSyncer = ws
+		} else {
+			// 启用日志切换
+			output := &lumberjack.Logger{
+				Filename:   config.Logger.Output,
+				MaxSize:    config.Logger.MaxSize,
+				MaxBackups: config.Logger.MaxBackups,
+				MaxAge:     config.Logger.MaxAge,
+				LocalTime:  config.Logger.LocalTime,
+			}
+			writerSyncer = zapcore.AddSync(output)
+		}
+	}
+
+	// info level
+	atomicLevel := zap.NewAtomicLevel()
+
+	// 使用 zapcore
+	core := zapcore.NewCore(
+		encoder,
+		writerSyncer,
+		atomicLevel,
+	)
+
+	// 生成logger
+	logger := zap.New(core)
+
+	// 显 caller
+	// logger = logger.WithOptions(zap.AddCaller())
+
+	// 显示stacktrace
+	// logger = logger.WithOptions(zap.AddStacktrace(zap.ErrorLevel))
+
+	// 初始化fields
+	fs := make([]zap.Field, 0)
+	fs = append(fs, zap.String("hostname", _LOGHOSTNAME))
+	logger = logger.WithOptions(zap.Fields(fs...))
+
+	// 设置level
+	switch config.Logger.Level {
+	case "debug":
+		atomicLevel.SetLevel(zap.DebugLevel)
+	case "info":
+		atomicLevel.SetLevel(zap.InfoLevel)
+	case "warning", "warn":
+		atomicLevel.SetLevel(zap.WarnLevel)
+	case "error":
+		atomicLevel.SetLevel(zap.ErrorLevel)
+	case "fatal":
+		atomicLevel.SetLevel(zap.FatalLevel)
+	case "panic":
+		atomicLevel.SetLevel(zap.PanicLevel)
+	default:
+		atomicLevel.SetLevel(zap.InfoLevel)
+	}
+
+	zap.ReplaceGlobals(logger)
+
+	// 动态调整level服务
+	if config.Logger.LevelServer {
+		mux := http.NewServeMux()
+		mux.Handle("/log_level", atomicLevel)
+		safego.Go(func() {
+			_ = http.ListenAndServe("localhost:10900", mux)
+		})
+	}
+
+	return NewLogger()
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/zap/zap.go b/dbm-services/common/db-config/pkg/core/logger/zap/zap.go
new file mode 100644
index 0000000000..38f52418cd
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/zap/zap.go
@@ -0,0 +1,2 @@
+// Package zap TODO
+package zap
diff --git a/dbm-services/common/db-config/pkg/core/logger/zap/zapfields.go b/dbm-services/common/db-config/pkg/core/logger/zap/zapfields.go
new file mode 100644
index 0000000000..ef4a639afc
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/zap/zapfields.go
@@ -0,0 +1,67 @@
+package zap
+
+import (
+	"bk-dbconfig/pkg/core/logger/base"
+	"fmt"
+
+	"go.uber.org/zap"
+)
+
+// 处理带fields的日志
+type zapFields struct {
+	fields []zap.Field
+}
+
+// ImpleLogger TODO
+func (s *zapFields) ImpleLogger() {}
+
+// Init TODO
+func (s *zapFields) Init() {}
+
+// Debug TODO
+func (s *zapFields) Debug(format string, args ...interface{}) {
+	GetLogger().Debug(fmt.Sprintf(format, args...), s.fields...)
+}
+
+// Info TODO
+func (s *zapFields) Info(format string, args ...interface{}) {
+	GetLogger().Info(fmt.Sprintf(format, args...), s.fields...)
+}
+
+// Warn TODO
+func (s *zapFields) Warn(format string, args ...interface{}) {
+	GetLogger().Warn(fmt.Sprintf(format, args...), s.fields...)
+}
+
+// Error 用于错误处理
+func (s *zapFields) Error(format string, args ...interface{}) {
+	GetLogger().Error(fmt.Sprintf(format, args...), s.fields...)
+}
+
+// Fatal TODO
+func (s *zapFields) Fatal(format string, args ...interface{}) {
+	GetLogger().Fatal(fmt.Sprintf(format, args...), s.fields...)
+}
+
+// Panic TODO
+func (s *zapFields) Panic(format string, args ...interface{}) {
+	GetLogger().Panic(fmt.Sprintf(format, args...), s.fields...)
+}
+
+// WithFields 支持链式调用
+func (s *zapFields) WithFields(mapFields map[string]interface{}) base.ILogger {
+	if s.fields == nil {
+		s.fields = make([]zap.Field, 0, len(mapFields))
+	}
+
+	for key, val := range mapFields {
+		s.fields = append(s.fields, zap.Any(key, val))
+	}
+
+	return s
+}
+
+// String 用于打印
+func (s *zapFields) String(format string, args ...interface{}) string {
+	return fmt.Sprintf(format, args...)
+}
diff --git a/dbm-services/common/db-config/pkg/core/logger/zap/zaplog.go b/dbm-services/common/db-config/pkg/core/logger/zap/zaplog.go
new file mode 100644
index 0000000000..5236367738
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/logger/zap/zaplog.go
@@ -0,0 +1,66 @@
+package zap
+
+import (
+	"bk-dbconfig/pkg/core/logger/base"
+	"fmt"
+
+	"go.uber.org/zap"
+)
+
+type zapLogger struct{}
+
+// NewLogger TODO
+func NewLogger() *zapLogger {
+	return &zapLogger{}
+}
+
+// ImpleLogger TODO
+func (s *zapLogger) ImpleLogger() {}
+
+// Init TODO
+func (s *zapLogger) Init() {}
+
+// Debug TODO
+func (s *zapLogger) Debug(format string, args ...interface{}) {
+	GetLogger().Debug(fmt.Sprintf(format, args...))
+}
+
+// Info TODO
+func (s *zapLogger) Info(format string, args ...interface{}) {
+	GetLogger().Info(fmt.Sprintf(format, args...))
+}
+
+// Warn TODO
+func (s *zapLogger) Warn(format string, args ...interface{}) {
+	GetLogger().Warn(fmt.Sprintf(format, args...))
+}
+
+// Error 用于错误处理
+func (s *zapLogger) Error(format string, args ...interface{}) {
+	GetLogger().Error(fmt.Sprintf(format, args...))
+}
+
+// Fatal 注意: 此方法将导致程序终止!!!
+func (s *zapLogger) Fatal(format string, args ...interface{}) {
+	GetLogger().Fatal(fmt.Sprintf(format, args...))
+}
+
+// Panic 注意: 此方法将导致panic!!!
+func (s *zapLogger) Panic(format string, args ...interface{}) {
+	GetLogger().Panic(fmt.Sprintf(format, args...))
+}
+
+// WithFields 指定fields
+func (s *zapLogger) WithFields(mapFields map[string]interface{}) base.ILogger {
+	fields := make([]zap.Field, 0, len(mapFields))
+	for key, val := range mapFields {
+		fields = append(fields, zap.Any(key, val))
+	}
+
+	return &zapFields{fields}
+}
+
+// GetLogger TODO
+func GetLogger() *zap.Logger {
+	return zap.L()
+}
diff --git a/dbm-services/common/db-config/pkg/core/safego/graceful.go b/dbm-services/common/db-config/pkg/core/safego/graceful.go
new file mode 100644
index 0000000000..1cf0e45827
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/safego/graceful.go
@@ -0,0 +1,35 @@
+package safego
+
+import (
+	"context"
+	"log"
+	"os"
+	"os/signal"
+	"syscall"
+)
+
+// Shutdowner TODO
+type Shutdowner interface {
+	Shutdown(context.Context) error
+}
+
+// Graceful TODO
+func Graceful(ctx context.Context, s Shutdowner) error {
+	// Wait for interrupt signal to gracefully shutdown the server with
+	// a timeout of ctxutil.
+	quit := make(chan os.Signal)
+
+	// kill (no param) default send syscall.SIGTERM
+	// kill -2 is syscall.SIGINT
+	signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
+
+	<-quit
+	log.Printf("Shutting down all...")
+
+	err := s.Shutdown(ctx)
+	if err != nil {
+		log.Fatal("Forced to shutdown: %v", err)
+	}
+
+	return err
+}
diff --git a/dbm-services/common/db-config/pkg/core/safego/recover.go b/dbm-services/common/db-config/pkg/core/safego/recover.go
new file mode 100644
index 0000000000..3be9a60218
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/safego/recover.go
@@ -0,0 +1,34 @@
+package safego
+
+import (
+	"fmt"
+	"runtime/debug"
+
+	"go.uber.org/zap"
+)
+
+// Go TODO
+func Go(f func()) {
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				zap.L().Error(fmt.Sprintf("Panic recovered: %s, stack: %s", r, string(debug.Stack())))
+			}
+		}()
+
+		f()
+	}()
+}
+
+// GoArgs 较少用。用此函数启动带任意参数的goroutine,参数类型只能是interface{},在函数内部再进行类型转换。
+func GoArgs(f func(...interface{}), args ...interface{}) {
+	go func() {
+		defer func() {
+			if r := recover(); r != nil {
+				zap.L().Error(fmt.Sprintf("Panic recovered: %s, stack: %s", r, string(debug.Stack())))
+			}
+		}()
+
+		f(args...)
+	}()
+}
diff --git a/dbm-services/common/db-config/pkg/core/safego/safego.go b/dbm-services/common/db-config/pkg/core/safego/safego.go
new file mode 100644
index 0000000000..5db69ca98b
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/safego/safego.go
@@ -0,0 +1,2 @@
+// Package safego TODO
+package safego
diff --git a/dbm-services/common/db-config/pkg/core/trace/file.go b/dbm-services/common/db-config/pkg/core/trace/file.go
new file mode 100644
index 0000000000..96e863b19f
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/trace/file.go
@@ -0,0 +1,23 @@
+package trace
+
+import (
+	"fmt"
+	"runtime"
+	"strings"
+)
+
+// AtWhere TODO
+func AtWhere() string {
+	pc, _, _, ok := runtime.Caller(1)
+	if ok {
+		fileName, line := runtime.FuncForPC(pc).FileLine(pc)
+		result := strings.Index(fileName, "/dbconfig/")
+		if result > 1 {
+			preStr := fileName[0:result]
+			fileName = strings.Replace(fileName, preStr, "", 1)
+		}
+		return fmt.Sprintf("%s:%d", fileName, line)
+	} else {
+		return "Method not Found!"
+	}
+}
diff --git a/dbm-services/common/db-config/pkg/core/trace/trace.go b/dbm-services/common/db-config/pkg/core/trace/trace.go
new file mode 100644
index 0000000000..cacc2874f5
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/core/trace/trace.go
@@ -0,0 +1,2 @@
+// Package trace TODO
+package trace
diff --git a/dbm-services/common/db-config/pkg/httpclient/client.go b/dbm-services/common/db-config/pkg/httpclient/client.go
new file mode 100644
index 0000000000..d029ca17cd
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/httpclient/client.go
@@ -0,0 +1,85 @@
+package httpclient
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"math/rand"
+	"net/http"
+	"strings"
+	"time"
+)
+
+// Client TODO
+type Client struct {
+	api    string
+	token  string
+	client *http.Client
+}
+
+// NewHTTPClient TODO
+func NewHTTPClient() *Client {
+	cli := &Client{}
+	cli.client = &http.Client{Transport: &http.Transport{}}
+	return cli
+}
+
+// Do TODO
+func (c *Client) Do(method, url string, params []byte, headers map[string]string) (result *Response, err error) {
+	for idx := 0; idx < 5; idx++ {
+		result, err = c.do(method, url, params, headers)
+		if err == nil {
+			break
+		}
+		wait := idx*idx*1000 + rand.Intn(1000)
+		time.Sleep(time.Duration(wait) * time.Millisecond)
+		continue
+	}
+	return result, err
+}
+
+func (c *Client) do(method, url string, body []byte, headers map[string]string) (result *Response, err error) {
+	req, err := http.NewRequest(method, url, bytes.NewReader(body))
+	if err != nil {
+		log.Printf("[error] NewRequest failed %v", err)
+		return
+	}
+	// set headers
+	c.setHeader(req, headers)
+	resp, err := c.client.Do(req)
+	if err != nil {
+		log.Printf("[error] invoke http request failed %v", err)
+		return
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode >= 400 {
+		return nil, fmt.Errorf("http response failed %v", resp)
+	}
+	b, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		log.Printf("[error] http response body read failed %v", err)
+		return nil, err
+	}
+	err = json.Unmarshal(b, &result)
+	if err != nil {
+		log.Printf("[error] response unmarshal failed %v", err)
+		return nil, err
+	}
+	return result, nil
+}
+
+func (c *Client) setHeader(req *http.Request, others map[string]string) {
+	user := "scr-system"
+	if _, ok := others["user"]; ok {
+		user = strings.TrimSpace(others["user"])
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+	req.Header.Set("user", user)
+	// Set JWT token
+	if token, err := Sign(user); err == nil {
+		req.Header.Set("Authorization", "Bearer "+token)
+	}
+}
diff --git a/dbm-services/common/db-config/pkg/httpclient/httpclient.go b/dbm-services/common/db-config/pkg/httpclient/httpclient.go
new file mode 100644
index 0000000000..826525400c
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/httpclient/httpclient.go
@@ -0,0 +1,2 @@
+// Package httpclient TODO
+package httpclient
diff --git a/dbm-services/common/db-config/pkg/httpclient/response.go b/dbm-services/common/db-config/pkg/httpclient/response.go
new file mode 100644
index 0000000000..e5081c521e
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/httpclient/response.go
@@ -0,0 +1,10 @@
+package httpclient
+
+import "encoding/json"
+
+// Response TODO
+type Response struct {
+	Code    int             `json:"code"`
+	Message string          `json:"message"`
+	Data    json.RawMessage `json:"data"`
+}
diff --git a/dbm-services/common/db-config/pkg/httpclient/sign.go b/dbm-services/common/db-config/pkg/httpclient/sign.go
new file mode 100644
index 0000000000..c9861aede2
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/httpclient/sign.go
@@ -0,0 +1,25 @@
+package httpclient
+
+import (
+	"time"
+
+	"github.com/golang-jwt/jwt"
+)
+
+const (
+	secretId  string = "2d96cd392adb4d29bcd52fa48d5b4352"
+	secretKey string = "Xu1I~TDqB0dUR9Zj"
+)
+
+// Sign TODO
+func Sign(rtx string) (tokenString string, err error) {
+	// The token content.
+	token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{
+		"sub":  secretId,
+		"user": rtx,
+		"iat":  time.Now().Add(-1 * time.Minute).Unix(),
+	})
+	// Sign the token with the specified secret.
+	tokenString, err = token.SignedString([]byte(secretKey))
+	return
+}
diff --git a/dbm-services/common/db-config/pkg/middleware/cors.go b/dbm-services/common/db-config/pkg/middleware/cors.go
new file mode 100644
index 0000000000..80f5c6d859
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/middleware/cors.go
@@ -0,0 +1,16 @@
+package middleware
+
+import (
+	"github.com/gin-contrib/cors"
+	"github.com/gin-gonic/gin"
+)
+
+// CorsMiddleware TODO
+func CorsMiddleware() gin.HandlerFunc {
+	corsConfig := cors.DefaultConfig()
+	corsConfig.AllowCredentials = true
+	corsConfig.AllowOriginFunc = func(origin string) bool {
+		return true
+	}
+	return cors.New(corsConfig)
+}
diff --git a/dbm-services/common/db-config/pkg/middleware/middleware.go b/dbm-services/common/db-config/pkg/middleware/middleware.go
new file mode 100644
index 0000000000..bd03aa418b
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/middleware/middleware.go
@@ -0,0 +1,2 @@
+// Package middleware TODO
+package middleware
diff --git a/dbm-services/common/db-config/pkg/middleware/request_body.go b/dbm-services/common/db-config/pkg/middleware/request_body.go
new file mode 100644
index 0000000000..dfd8626876
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/middleware/request_body.go
@@ -0,0 +1,41 @@
+package middleware
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+)
+
+// RequestLoggerMiddleware TODO
+func RequestLoggerMiddleware() gin.HandlerFunc {
+	return func(c *gin.Context) {
+		if c.Request.Method == http.MethodPost {
+			var buf bytes.Buffer
+			tee := io.TeeReader(c.Request.Body, &buf)
+			body, _ := ioutil.ReadAll(tee)
+			c.Request.Body = ioutil.NopCloser(&buf)
+			log.Println(c.Request.RequestURI, simplifyHeader(c.Request.Header))
+			log.Println("body:", string(body))
+		} else {
+			if !strings.HasPrefix(c.Request.RequestURI, "/ping") {
+				log.Println(c.Request.RequestURI, simplifyHeader(c.Request.Header))
+			}
+		}
+		c.Next()
+	}
+}
+
+func simplifyHeader(header http.Header) http.Header {
+	httpHeader := http.Header{}
+	for k, v := range header {
+		if k != "Cookie" {
+			httpHeader[k] = v
+		}
+	}
+	return httpHeader
+}
diff --git a/dbm-services/common/db-config/pkg/middleware/request_id.go b/dbm-services/common/db-config/pkg/middleware/request_id.go
new file mode 100644
index 0000000000..d1a428afa3
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/middleware/request_id.go
@@ -0,0 +1,22 @@
+package middleware
+
+import (
+	"github.com/gin-gonic/gin"
+	"github.com/google/uuid"
+)
+
+// RequestMiddleware TODO
+func RequestMiddleware() gin.HandlerFunc {
+	return func(ctx *gin.Context) {
+		reqId := ctx.Request.Header.Get("X-Request-Id")
+		if reqId == "" {
+			uid, err := uuid.NewUUID()
+			if err != nil {
+				ctx.Abort()
+			}
+			ctx.Request.Header.Set("X-Request-Id", uid.String())
+		}
+		ctx.Header("X-Request-Id", reqId)
+		ctx.Next()
+	}
+}
diff --git a/dbm-services/common/db-config/pkg/util/backoff.go b/dbm-services/common/db-config/pkg/util/backoff.go
new file mode 100644
index 0000000000..ab5e96c78b
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/backoff.go
@@ -0,0 +1,67 @@
+package util
+
+import (
+	"fmt"
+	"log"
+	"math/rand"
+	"runtime/debug"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// Backoff TODO
+func Backoff(fn func() error, retry uint, interval int) (attempt uint, err error) {
+
+	defer func() {
+		if r := recover(); r != nil {
+			stack := fmt.Sprintf("your function panic! %v ;%s", r, string(debug.Stack()))
+			err = errors.New(stack)
+		}
+	}()
+
+	for n := uint(0); n <= retry; n++ {
+		if n > 0 {
+			attempt = n
+			log.Printf("retry (attempt: #%d), error: %v", attempt, err)
+		}
+
+		err = fn()
+		if err == nil {
+			break
+		}
+		time.Sleep(time.Duration(interval) * time.Second)
+	}
+
+	return attempt, err
+}
+
+// RandomBackoff TODO
+func RandomBackoff(fn func() error, retry uint, maxInterval int) (attempt uint, err error) {
+
+	defer func() {
+		if r := recover(); r != nil {
+			stack := fmt.Sprintf("your function panic! %v ;%s", r, string(debug.Stack()))
+			err = errors.New(stack)
+		}
+	}()
+
+	if maxInterval < 3 {
+		maxInterval = 3
+	}
+	for n := uint(0); n <= retry; n++ {
+		if n > 0 {
+			attempt = n
+			log.Printf("retry (attempt: #%d), error: %v", attempt, err)
+		}
+
+		err = fn()
+		if err == nil {
+			break
+		}
+		rand.Seed(time.Now().Unix())
+		currentInterval := 1 + rand.Intn(maxInterval-1)
+		time.Sleep(time.Duration(currentInterval) * time.Second)
+	}
+	return attempt, err
+}
diff --git a/dbm-services/common/db-config/pkg/util/boolext.go b/dbm-services/common/db-config/pkg/util/boolext.go
new file mode 100644
index 0000000000..c1c6f018c6
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/boolext.go
@@ -0,0 +1,65 @@
+package util
+
+import (
+	"fmt"
+
+	"github.com/spf13/cast"
+)
+
+// ToBoolE 使用 cast.ToBoolE
+func ToBoolE(val interface{}) (bool, error) {
+	return cast.ToBoolE(val)
+}
+
+// ToBool 使用 cast.ToBool
+func ToBool(val interface{}) bool {
+	return cast.ToBool(val)
+}
+
+// ToBoolExtE 扩展 bool string, 支持 y/Y yes/YES on/off
+func ToBoolExtE(val interface{}) (bool, error) {
+	return parseBool(val)
+}
+
+// ToBoolExt 扩展 bool string, 支持 y/Y yes/YES on/off
+func ToBoolExt(val interface{}) bool {
+	ret, _ := parseBool(val)
+	return ret
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, 1.0, t, T, TRUE, true, True, YES, yes, Yes,Y, y, ON, on, On,
+// 0, 0.0, f, F, FALSE, false, False, NO, no, No, N,n, OFF, off, Off.
+// Any other value returns an error.
+// from: https://github.com/beego/beego/blob/master/core/config/config.go 扩展了 y/Y yes/YES on/off
+func parseBool(val interface{}) (value bool, err error) {
+	if val != nil {
+		switch v := val.(type) {
+		case bool:
+			return v, nil
+		case string:
+			switch v {
+			case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "Y", "y", "ON", "on", "On":
+				return true, nil
+			case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "N", "n", "OFF", "off", "Off":
+				return false, nil
+			}
+		case int8, int32, int64:
+			strV := fmt.Sprintf("%d", v)
+			if strV == "1" {
+				return true, nil
+			} else if strV == "0" {
+				return false, nil
+			}
+		case float64:
+			if v == 1.0 {
+				return true, nil
+			} else if v == 0.0 {
+				return false, nil
+			}
+		}
+		return false, fmt.Errorf("parsing %q: invalid syntax", val)
+	}
+	return false, fmt.Errorf("parsing : invalid syntax")
+}
diff --git a/dbm-services/common/db-config/pkg/util/compress/compress.go b/dbm-services/common/db-config/pkg/util/compress/compress.go
new file mode 100644
index 0000000000..682e1b2191
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/compress/compress.go
@@ -0,0 +1,40 @@
+// Package compress TODO
+package compress
+
+import (
+	"bytes"
+	"compress/gzip"
+	"io/ioutil"
+)
+
+// GzipBytes TODO
+func GzipBytes(in []byte) ([]byte, error) {
+	var (
+		buffer bytes.Buffer
+		err    error
+	)
+	// writer := gzip.NewWriter(&buffer)
+	writer, _ := gzip.NewWriterLevel(&buffer, 5)
+	if _, err = writer.Write(in); err != nil {
+		err = writer.Close()
+		return nil, err
+	}
+	if err = writer.Close(); err != nil {
+		return nil, err
+	}
+	return buffer.Bytes(), nil
+}
+
+// GunzipBytes TODO
+func GunzipBytes(in []byte) ([]byte, error) {
+	var out []byte
+	reader, err := gzip.NewReader(bytes.NewReader(in))
+	if err != nil {
+		return nil, err
+	}
+	defer reader.Close()
+	if out, err = ioutil.ReadAll(reader); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
diff --git a/dbm-services/common/db-config/pkg/util/confvalue.go b/dbm-services/common/db-config/pkg/util/confvalue.go
new file mode 100644
index 0000000000..19c8891347
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/confvalue.go
@@ -0,0 +1,11 @@
+package util
+
+import "strings"
+
+// ConfValueIsPlaceHolder 判断 conf_value 是不是一个变量,当前认为 {{xxx}} 格式则为变量
+func ConfValueIsPlaceHolder(s string) bool {
+	if strings.HasPrefix(s, "{{") && strings.HasSuffix(s, "}}") {
+		return true
+	}
+	return false
+}
diff --git a/dbm-services/common/db-config/pkg/util/crypt/auth.go b/dbm-services/common/db-config/pkg/util/crypt/auth.go
new file mode 100644
index 0000000000..cd18b0dce3
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/crypt/auth.go
@@ -0,0 +1,14 @@
+package crypt
+
+import "golang.org/x/crypto/bcrypt"
+
+// Encrypt encrypts the plain text with bcrypt.
+func Encrypt(source string) (string, error) {
+	hashedBytes, err := bcrypt.GenerateFromPassword([]byte(source), bcrypt.DefaultCost)
+	return string(hashedBytes), err
+}
+
+// Compare compares the encrypted text with the plain text if it's the same.
+func Compare(hashedPassword, password string) error {
+	return bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(password))
+}
diff --git a/dbm-services/common/db-config/pkg/util/crypt/encrypt.go b/dbm-services/common/db-config/pkg/util/crypt/encrypt.go
new file mode 100644
index 0000000000..a450bc1034
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/crypt/encrypt.go
@@ -0,0 +1,164 @@
+// Package crypt TODO
+package crypt
+
+import (
+	"bk-dbconfig/pkg/util/compress"
+	"bytes"
+	"crypto/aes"
+	"crypto/cipher"
+	"encoding/base64"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// 加密过程:
+//  1、处理数据,对数据进行填充,采用PKCS7(当密钥长度不够时,缺几位补几个几)的方式。
+//  2、对数据进行加密,采用AES加密方法中CBC加密模式
+//  3、对得到的加密数据,进行base64加密,得到字符串
+// 解密过程相反
+
+// 16,24,32位字符串的话,分别对应AES-128,AES-192,AES-256 加密方法
+// key不能泄露
+
+const flagEncrypt = "**"
+
+// pkcs7Padding 填充
+func pkcs7Padding(data []byte, blockSize int) []byte {
+	// 判断缺少几位长度。最少1,最多 blockSize
+	padding := blockSize - len(data)%blockSize
+	// 补足位数。把切片[]byte{byte(padding)}复制padding个
+	padText := bytes.Repeat([]byte{byte(padding)}, padding)
+	return append(data, padText...)
+}
+
+// pkcs7UnPadding 填充的反向操作
+func pkcs7UnPadding(data []byte) ([]byte, error) {
+	length := len(data)
+	if length == 0 {
+		return nil, errors.New("解密字符串错误!")
+	}
+	// 获取填充的个数
+	unPadding := int(data[length-1])
+	if num := length - unPadding; num <= 0 {
+		return nil, errors.New("解密出现异常")
+	} else {
+		return data[:num], nil
+	}
+}
+
+// aesEncrypt 加密
+func aesEncrypt(plaintext []byte, key []byte) ([]byte, error) {
+	// 创建加密实例
+	if len(key) < 32 {
+		key = pkcs7Padding(key, 32)
+	} else if len(key) > 32 {
+		key = key[0:32]
+	}
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		return nil, err
+	}
+	// 判断加密快的大小
+	blockSize := block.BlockSize()
+	// 填充
+	plaintext = pkcs7Padding(plaintext, blockSize)
+	// 初始化加密数据接收切片
+	crypted := make([]byte, len(plaintext))
+	// 使用cbc加密模式
+	blockMode := cipher.NewCBCEncrypter(block, key[:blockSize])
+	// 执行加密
+	blockMode.CryptBlocks(crypted, plaintext)
+	return crypted, nil
+}
+
+// aesDecrypt 解密
+func aesDecrypt(ciphertext []byte, key []byte) ([]byte, error) {
+	// 创建实例
+	if len(key) < 32 {
+		key = pkcs7Padding(key, 32)
+	} else if len(key) > 32 {
+		key = key[0:32]
+	}
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		return nil, err
+	}
+	// 获取块的大小
+	blockSize := block.BlockSize()
+	// 使用cbc
+	blockMode := cipher.NewCBCDecrypter(block, key[:blockSize])
+	// 初始化解密数据接收切片
+	decrypted := make([]byte, len(ciphertext))
+	// 执行解密
+	blockMode.CryptBlocks(decrypted, ciphertext)
+	// 去除填充
+
+	decrypted, err = pkcs7UnPadding(decrypted)
+	if err != nil {
+		return nil, err
+	}
+	return decrypted, nil
+}
+
+// EncryptByAes Aes加密后,再 base64,最后前面加 ** 表示已加密
+func EncryptByAes(data []byte, key []byte, zip bool) (string, error) {
+	res, err := aesEncrypt(data, key)
+	if err != nil {
+		return "", errors.Wrapf(err, "p=%s k=%s, zip=%t", data, key, zip)
+	}
+	if zip {
+		res, err = compress.GzipBytes(res)
+		if err != nil {
+			return "", errors.Wrapf(err, "p=%s k=%s, zip=%t", data, key, zip)
+		}
+	}
+	base64Str := base64.StdEncoding.EncodeToString(res)
+	return flagEncrypt + base64Str, nil
+}
+
+// DecryptByAes Aes 解密
+func DecryptByAes(data string, key []byte, unzip bool) ([]byte, error) {
+	dataByte, err := base64.StdEncoding.DecodeString(data)
+	if err != nil {
+		return nil, errors.Wrapf(err, "c=%s k=%s, unzip=%t", data, key, unzip)
+	}
+	if unzip {
+		dataByte, err = compress.GunzipBytes(dataByte)
+		if err != nil {
+			return nil, errors.Wrapf(err, "c=%s k=%s, unzip=%t", data, key, unzip)
+		}
+	}
+	return aesDecrypt(dataByte, key)
+}
+
+// IsEncryptedString TODO
+func IsEncryptedString(data string) (string, bool) {
+	if strings.HasPrefix(data, flagEncrypt) {
+		return strings.TrimLeft(data, flagEncrypt), true
+	} else {
+		return data, false
+	}
+}
+
+// EncryptString TODO
+func EncryptString(p, k string, zip bool) (string, error) {
+	if _, isEncrypted := IsEncryptedString(p); isEncrypted {
+		return p, nil
+	} else if p == "" {
+		return "", nil
+	}
+	return EncryptByAes([]byte(p), []byte(k), zip)
+}
+
+// DecryptString TODO
+// 如果字符串不是以 ** 开头,表示已是明文,直接返回
+func DecryptString(c, k string, unzip bool) (string, error) {
+	c, isEncrypted := IsEncryptedString(c) // will trim ** if isEncrypted is true
+	if !isEncrypted {
+		return c, nil
+	}
+	// else remove ** prefix from data
+	p, e := DecryptByAes(c, []byte(k), unzip)
+	return string(p), e
+}
diff --git a/dbm-services/common/db-config/pkg/util/datasize.go b/dbm-services/common/db-config/pkg/util/datasize.go
new file mode 100644
index 0000000000..0857b25966
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/datasize.go
@@ -0,0 +1,70 @@
+package util
+
+import (
+	"strings"
+	"unicode"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+)
+
+// ParseSizeInBytesE converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+// withB indicate where sizeStr has suffix b/B
+func ParseSizeInBytesE(sizeStr string) (int64, error) {
+	sizeStr = strings.TrimSpace(sizeStr)
+	if unicode.ToLower(rune(sizeStr[len(sizeStr)-1])) != 'b' {
+		sizeStr += "b"
+	}
+	lastChar := len(sizeStr) - 1
+	multiplier := uint(1)
+	if lastChar > 0 {
+		if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+			if lastChar > 1 {
+				switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+				case 'k':
+					multiplier = 1 << 10
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'm':
+					multiplier = 1 << 20
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'g':
+					multiplier = 1 << 30
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				default:
+					multiplier = 1
+					sizeStr = strings.TrimSpace(strings.TrimSuffix(sizeStr, "b"))
+				}
+			} else if lastChar == 1 {
+				multiplier = 1
+				sizeStr = strings.TrimSpace(strings.TrimSuffix(sizeStr, "b"))
+			}
+		}
+	}
+	size, err := cast.ToInt64E(sizeStr)
+	if err != nil {
+		return -1, errors.Errorf("parse failed to bytes: %s", sizeStr)
+	} else if size < 0 {
+		return -2, errors.Errorf("bytes canot be negative: %s", sizeStr)
+	}
+	return safeMul(size, int64(multiplier)), nil
+}
+
+func safeMul(a, b int64) int64 {
+	c := a * b
+	if a > 1 && b > 1 && c/b != a {
+		return 0
+	}
+	return c
+}
+
+// ParseSizeInBytes 将 gb, MB 转换成 bytes 数字. b 不区分大小写,代表 1字节
+// ignore error
+func ParseSizeInBytes(sizeStr string) int64 {
+	sizeBytes, err := ParseSizeInBytesE(sizeStr)
+	if err != nil {
+		sizeBytes = 0
+	} else if sizeBytes < 0 {
+		sizeBytes = 0
+	}
+	return sizeBytes
+}
diff --git a/dbm-services/common/db-config/pkg/util/dbutil/dbutil.go b/dbm-services/common/db-config/pkg/util/dbutil/dbutil.go
new file mode 100644
index 0000000000..0fadcf4b38
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/dbutil/dbutil.go
@@ -0,0 +1,131 @@
+package util
+
+import (
+	"context"
+	"database/sql"
+	"fmt"
+	"time"
+
+	"gorm.io/gorm"
+)
+
+// ConnectAdminProxy TODO
+func ConnectAdminProxy(user, password, address string) (*sql.DB, error) {
+	config := fmt.Sprintf("%s:%s@tcp(%s)/?timeout=10s&maxAllowedPacket=%s",
+		user,
+		password,
+		address,
+		"4194304")
+	db, err := sql.Open("mysql", config)
+	if err != nil {
+		return nil, err
+	}
+
+	return db, nil
+}
+
+// NewConn TODO
+func NewConn(user, password, address, dbName string) (*sql.DB, error) {
+	config := fmt.Sprintf("%s:%s@tcp(%s)/%s?timeout=10s",
+		user,
+		password,
+		address, dbName)
+	db, err := sql.Open("mysql", config)
+	if err != nil {
+		return nil, err
+	}
+	var ctx2SecondTimeout, cancelFunc2SecondTimeout = context.WithTimeout(context.Background(), time.Second*5)
+	defer cancelFunc2SecondTimeout()
+	if err = db.PingContext(ctx2SecondTimeout); err != nil {
+		db.Close()
+		return nil, err
+	}
+	// set for db connection
+	// setupDB(db)
+	return db, nil
+}
+
+// DBExec TODO
+func DBExec(exeuteSQL string, db *sql.DB) error {
+	var (
+		err error
+	)
+	_, err = db.Exec(exeuteSQL)
+	return err
+}
+
+// DBQuery TODO
+func DBQuery(querySQL string, db *sql.DB) ([]map[string]sql.RawBytes, error) {
+	var (
+		err error
+	)
+
+	rows, err := db.Query(querySQL)
+	if err != nil {
+		return nil, err
+	}
+
+	columns, err := rows.Columns()
+	if err != nil {
+		return nil, err
+	}
+
+	values := make([]sql.RawBytes, len(columns))
+	scanArgs := make([]interface{}, len(values))
+	for i := range values {
+		scanArgs[i] = &values[i]
+	}
+
+	results := make([]map[string]sql.RawBytes, 0)
+
+	// Fetch rows
+	for rows.Next() {
+		// get RawBytes from data
+		result := make(map[string]sql.RawBytes, 0)
+		err = rows.Scan(scanArgs...)
+		if err != nil {
+			return nil, err
+		}
+		for i, col := range values {
+			if col == nil {
+				// value = "NULL"
+				result[columns[i]] = nil
+			} else {
+				result[columns[i]] = col
+			}
+			// fmt.Println(columns[i], ": ", value)
+		}
+		results = append(results, result)
+	}
+
+	fmt.Println("results:", results)
+
+	return results, err
+}
+
+// ExecuteRawSQL TODO
+func ExecuteRawSQL(db *gorm.DB, sql string, i interface{}) error {
+	return db.Raw(sql).Scan(i).Error
+}
+
+// ConvDBResultToStr TODO
+func ConvDBResultToStr(objMap map[string]interface{}) map[string]string {
+	objMapRes := make(map[string]string, 0)
+	for colName, colValue := range objMap {
+		if colValue == nil {
+			// value = "NULL"
+			objMapRes[colName] = ""
+		} else {
+			// value = string(col)
+			objMapRes[colName] = fmt.Sprintf("%v", colValue)
+		}
+	}
+	return objMapRes
+}
+
+/*
+func main() {
+  db, err := gorm.Open("mysql", "user:password@/dbname?charset=utf8&parseTime=True&loc=Local")
+  defer db.Close()
+}
+*/
diff --git a/dbm-services/common/db-config/pkg/util/dbutil/json.go b/dbm-services/common/db-config/pkg/util/dbutil/json.go
new file mode 100644
index 0000000000..e552777477
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/dbutil/json.go
@@ -0,0 +1,64 @@
+package util
+
+import (
+	"bytes"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+)
+
+// JSON TODO
+type JSON []byte
+
+// Value TODO
+func (j JSON) Value() (driver.Value, error) {
+	fmt.Println("Value()")
+	if j.IsNull() {
+		return nil, nil
+	}
+	return string(j), nil
+}
+
+// Scan TODO
+func (j *JSON) Scan(value interface{}) error {
+	fmt.Println("Scan()")
+	if value == nil {
+		*j = nil
+		return nil
+	}
+	s, ok := value.([]byte)
+	if !ok {
+		return errors.New("Invalid Scan Source")
+	}
+	*j = append((*j)[0:0], s...)
+	return nil
+}
+
+// MarshalJSON TODO
+func (m JSON) MarshalJSON() ([]byte, error) {
+	fmt.Println("MarshalJSON()")
+	if m == nil {
+		return []byte("null"), nil
+	}
+	return m, nil
+}
+
+// UnmarshalJSON TODO
+func (m *JSON) UnmarshalJSON(data []byte) error {
+	fmt.Println("MarshalJSON()")
+	if m == nil {
+		return errors.New("null point exception")
+	}
+	*m = append((*m)[0:0], data...)
+	return nil
+}
+
+// IsNull TODO
+func (j JSON) IsNull() bool {
+	return len(j) == 0 || string(j) == "null"
+}
+
+// Equals TODO
+func (j JSON) Equals(j1 JSON) bool {
+	return bytes.Equal([]byte(j), []byte(j1))
+}
diff --git a/dbm-services/common/db-config/pkg/util/dbutil/time.go b/dbm-services/common/db-config/pkg/util/dbutil/time.go
new file mode 100644
index 0000000000..6ec9bf9a88
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/dbutil/time.go
@@ -0,0 +1,102 @@
+package util
+
+import (
+	"database/sql/driver"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"time"
+)
+
+// TimeFormat TODO
+type TimeFormat string
+
+// Value TODO
+func (t TimeFormat) Value() (driver.Value, error) {
+	if t.IsNull() {
+		return nil, nil
+	}
+	localTimezone, err := time.LoadLocation("Local") // 服务器设置的时区
+	if err != nil {
+		fmt.Printf("time.LoadLocation error:%s", err)
+		localTimezone, _ = time.LoadLocation("Asia/Shanghai") // 失败的话,默认就是上海的时区
+	}
+	ti, err := time.ParseInLocation("2006-01-02 15:04:05", string(t), localTimezone)
+	if err != nil {
+		fmt.Printf("TimeFormat Value error:%s", err)
+		return time.Now(), nil
+	}
+	return ti.In(localTimezone), nil
+}
+
+// Scan TODO
+func (t *TimeFormat) Scan(value interface{}) error {
+	localTimezone, err := time.LoadLocation("Local") // 服务器设置的时区
+	if err != nil {
+		fmt.Printf("time.LoadLocation error:%s", err)
+		localTimezone, _ = time.LoadLocation("Asia/Shanghai") // 失败的话,默认就是上海的时区
+	}
+	// fmt.Println("Scan()")
+	if value == nil {
+		*t = ""
+		return nil
+	}
+	s, ok := value.(time.Time)
+	if !ok {
+		return errors.New("Invalid Scan Source")
+	}
+	// 记得哪里需要加上反引号。。
+	// *t = TimeFormat(s.In(localTimezone).Format("2006-01-02 15:04:05"))
+	*t = TimeFormat("\"" + s.In(localTimezone).Format("2006-01-02 15:04:05") + "\"")
+	return nil
+}
+
+// MarshalJSON TODO
+func (t TimeFormat) MarshalJSON() ([]byte, error) {
+	if t == "" {
+		return []byte("\"\""), nil
+	}
+	return []byte(fmt.Sprintf("\"%s\"", string(t))), nil
+	// return []byte(t), nil
+}
+
+// UnmarshalJSON TODO
+func (t *TimeFormat) UnmarshalJSON(data []byte) error {
+	/*
+	   fmt.Println("UnmarshalJSON()")
+	   if t == nil {
+	       return errors.New("null point exception")
+	   }
+	   *t = TimeFormat(string(data[:]))
+	   return nil
+	*/
+	var str string
+	err := json.Unmarshal(data, &str)
+	*t = TimeFormat(str)
+	return err
+}
+
+// IsNull TODO
+func (t TimeFormat) IsNull() bool {
+	// fmt.Println("IsNull()")
+	return len(t) == 0 || t == ""
+}
+
+// Add TODO
+func (t TimeFormat) Add(d time.Duration) time.Time {
+	// fmt.Println("IsNull()")
+	if t.IsNull() {
+		return time.Now()
+	}
+	localTimezone, err := time.LoadLocation("Local") // 服务器设置的时区
+	if err != nil {
+		fmt.Printf("time.LoadLocation error:%s", err)
+		localTimezone, _ = time.LoadLocation("Asia/Shanghai") // 失败的话,默认就是上海的时区
+	}
+	ti, err := time.ParseInLocation("2006-01-02 15:04:05", string(t), localTimezone)
+	if err != nil {
+		fmt.Printf("TimeFormat Value error:%s", err)
+		return time.Now()
+	}
+	return ti.Add(d)
+}
diff --git a/dbm-services/common/db-config/pkg/util/durationext.go b/dbm-services/common/db-config/pkg/util/durationext.go
new file mode 100644
index 0000000000..894c8486aa
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/durationext.go
@@ -0,0 +1,216 @@
+package util
+
+import (
+	"errors"
+	"time"
+
+	"github.com/spf13/cast"
+	"github.com/spf13/viper"
+)
+
+// modify from: https://gist.github.com/xhit/79c9e137e1cfe332076cdda9f5e24699
+
+// ViperGetDuration TODO
+func ViperGetDuration(s string) time.Duration {
+	return ToDurationExt(viper.GetString(s))
+}
+
+// ViperGetDurationE TODO
+func ViperGetDurationE(s string) (time.Duration, error) {
+	return ToDurationExtE(viper.GetString(s))
+}
+
+// ToDurationExt 使用扩展的 duration, 支持 1d 1w 格式
+func ToDurationExt(s string) time.Duration {
+	d, _ := parseDuration(s)
+	return d
+}
+
+// ToDurationExtE 使用扩展的 duration, 支持 1d 1w 格式
+func ToDurationExtE(s string) (time.Duration, error) {
+	return parseDuration(s)
+}
+
+// ToDuration 使用内置的 duration, 不支持 1d 格式
+func ToDuration(s string) time.Duration {
+	return cast.ToDuration(s)
+}
+
+// ToDurationE 使用内置的 duration, 不支持 1d 格式
+func ToDurationE(s string) (time.Duration, error) {
+	return cast.ToDurationE(s)
+}
+
+var unitMap = map[string]int64{
+	"ns": int64(time.Nanosecond),
+	"us": int64(time.Microsecond),
+	"µs": int64(time.Microsecond), // U+00B5 = micro symbol
+	"μs": int64(time.Microsecond), // U+03BC = Greek letter mu
+	"ms": int64(time.Millisecond),
+	"s":  int64(time.Second),
+	"m":  int64(time.Minute),
+	"h":  int64(time.Hour),
+	"d":  int64(time.Hour) * 24,
+	"w":  int64(time.Hour) * 168,
+}
+
+// parseDuration parses a duration string.
+// A duration string is a possibly signed sequence of
+// decimal numbers, each with optional fraction and a unit suffix,
+// such as "300ms", "-1.5h" or "2h45m".
+// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w".
+func parseDuration(s string) (time.Duration, error) {
+	// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
+	orig := s
+	var d int64
+	neg := false
+
+	// Consume [-+]?
+	if s != "" {
+		c := s[0]
+		if c == '-' || c == '+' {
+			neg = c == '-'
+			s = s[1:]
+		}
+	}
+	// Special case: if all that is left is "0", this is zero.
+	if s == "0" {
+		return 0, nil
+	}
+	if s == "" {
+		return 0, errors.New("time: invalid duration " + quote(orig))
+	}
+	for s != "" {
+		var (
+			v, f  int64       // integers before, after decimal point
+			scale float64 = 1 // value = v + f/scale
+		)
+
+		var err error
+
+		// The next character must be [0-9.]
+		if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+		// Consume [0-9]*
+		pl := len(s)
+		v, s, err = leadingInt(s)
+		if err != nil {
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+		pre := pl != len(s) // whether we consumed anything before a period
+
+		// Consume (\.[0-9]*)?
+		post := false
+		if s != "" && s[0] == '.' {
+			s = s[1:]
+			pl := len(s)
+			f, scale, s = leadingFraction(s)
+			post = pl != len(s)
+		}
+		if !pre && !post {
+			// no digits (e.g. ".s" or "-.s")
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+
+		// Consume unit.
+		i := 0
+		for ; i < len(s); i++ {
+			c := s[i]
+			if c == '.' || '0' <= c && c <= '9' {
+				break
+			}
+		}
+		if i == 0 {
+			return 0, errors.New("time: missing unit in duration " + quote(orig))
+		}
+		u := s[:i]
+		s = s[i:]
+		unit, ok := unitMap[u]
+		if !ok {
+			return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig))
+		}
+		if v > (1<<63-1)/unit {
+			// overflow
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+		v *= unit
+		if f > 0 {
+			// float64 is needed to be nanosecond accurate for fractions of hours.
+			// v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
+			v += int64(float64(f) * (float64(unit) / scale))
+			if v < 0 {
+				// overflow
+				return 0, errors.New("time: invalid duration " + quote(orig))
+			}
+		}
+		d += v
+		if d < 0 {
+			// overflow
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+	}
+
+	if neg {
+		d = -d
+	}
+	return time.Duration(d), nil
+}
+
+func quote(s string) string {
+	return "\"" + s + "\""
+}
+
+var errLeadingInt = errors.New("time: bad [0-9]*") // never printed
+
+// leadingInt consumes the leading [0-9]* from s.
+func leadingInt(s string) (x int64, rem string, err error) {
+	i := 0
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c < '0' || c > '9' {
+			break
+		}
+		if x > (1<<63-1)/10 {
+			// overflow
+			return 0, "", errLeadingInt
+		}
+		x = x*10 + int64(c) - '0'
+		if x < 0 {
+			// overflow
+			return 0, "", errLeadingInt
+		}
+	}
+	return x, s[i:], nil
+}
+
+// leadingFraction consumes the leading [0-9]* from s.
+// It is used only for fractions, so does not return an error on overflow,
+// it just stops accumulating precision.
+func leadingFraction(s string) (x int64, scale float64, rem string) {
+	i := 0
+	scale = 1
+	overflow := false
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c < '0' || c > '9' {
+			break
+		}
+		if overflow {
+			continue
+		}
+		if x > (1<<63-1)/10 {
+			// It's possible for overflow to give a positive number, so take care.
+			overflow = true
+			continue
+		}
+		y := x*10 + int64(c) - '0'
+		if y < 0 {
+			overflow = true
+			continue
+		}
+		x = y
+		scale *= 10
+	}
+	return x, scale, s[i:]
+}
diff --git a/dbm-services/common/db-config/pkg/util/map.go b/dbm-services/common/db-config/pkg/util/map.go
new file mode 100644
index 0000000000..810529537c
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/map.go
@@ -0,0 +1,59 @@
+package util
+
+// IsEmptyMap TODO
+// is a map is nil or len = 0
+func IsEmptyMap(m map[string]interface{}) bool {
+	if m == nil {
+		return true
+	} else if len(m) == 0 {
+		return true
+	} else {
+		return false
+	}
+}
+
+// IsEmptyMapString TODO
+func IsEmptyMapString(m map[string]string) bool {
+	if m == nil {
+		return true
+	} else if len(m) == 0 {
+		return true
+	} else {
+		return false
+	}
+}
+
+// MapHasElement TODO
+func MapHasElement(aMap map[string]string, elem string) bool {
+	if _, ok := aMap[elem]; ok {
+		return true
+	} else {
+		return false
+	}
+}
+
+// MapMerge goc
+// 合并两个 map,传入的 map 可为 nil
+// 如果 2 个都是 nil,返回一个 0 元素的初始化 map
+func MapMerge(toMap, fromMap map[string]string) map[string]string {
+	if toMap == nil && fromMap == nil {
+		return make(map[string]string)
+	} else if toMap == nil {
+		return fromMap
+	} else if fromMap == nil {
+		return toMap
+	}
+	for k, v := range fromMap {
+		toMap[k] = v
+	}
+	return toMap
+}
+
+// MapCopy TODO
+func MapCopy(fromMap map[string]interface{}) map[string]interface{} {
+	newMap := make(map[string]interface{})
+	for k, v := range fromMap {
+		newMap[k] = v
+	}
+	return newMap
+}
diff --git a/dbm-services/common/db-config/pkg/util/serialize/serialize.go b/dbm-services/common/db-config/pkg/util/serialize/serialize.go
new file mode 100644
index 0000000000..0a6253f232
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/serialize/serialize.go
@@ -0,0 +1,45 @@
+// Package serialize TODO
+package serialize
+
+import (
+	"bk-dbconfig/pkg/util/compress"
+	"encoding/base64"
+
+	"github.com/pkg/errors"
+	"github.com/vmihailenco/msgpack/v5"
+)
+
+// SerializeToString TODO
+// serialize, compress, base64.Encode
+func SerializeToString(v interface{}, compression bool) (string, error) {
+	b, err := msgpack.Marshal(v)
+	if err != nil {
+		return "", err
+	}
+	if compression {
+		if b, err = compress.GzipBytes(b); err != nil {
+			return "", err
+		}
+	}
+	s := base64.StdEncoding.EncodeToString(b)
+	return s, nil
+}
+
+// UnSerializeString TODO
+// base64.Decode, unCompress, unSerialize,
+func UnSerializeString(s string, v interface{}, unCompress bool) error {
+	if s == "" {
+		return errors.New("数据包为空")
+	}
+	b, err := base64.StdEncoding.DecodeString(s)
+	if unCompress {
+		if b, err = compress.GunzipBytes(b); err != nil {
+			return errors.Wrap(err, "数据包解压失败")
+		}
+	}
+	err = msgpack.Unmarshal(b, &v)
+	if err != nil {
+		return errors.Wrap(err, "解包失败")
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-config/pkg/util/set.go b/dbm-services/common/db-config/pkg/util/set.go
new file mode 100644
index 0000000000..c6ebb7eeb7
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/set.go
@@ -0,0 +1,101 @@
+package util
+
+import (
+	"sync"
+)
+
+// Set TODO
+type Set struct {
+	m map[interface{}]bool
+	sync.RWMutex
+}
+
+// NewSet TODO
+func NewSet() *Set {
+	return &Set{
+		m: map[interface{}]bool{},
+	}
+}
+
+// AddList TODO
+func (s *Set) AddList(items []interface{}) {
+	s.Lock()
+	defer s.Unlock()
+	for _, item := range items {
+		s.m[item] = true
+	}
+}
+
+// Add TODO
+func (s *Set) Add(item interface{}) {
+	s.Lock()
+	defer s.Unlock()
+	s.m[item] = true
+}
+
+// Remove TODO
+func (s *Set) Remove(item interface{}) {
+	s.Lock()
+	defer s.Unlock()
+	delete(s.m, item)
+}
+
+// Has TODO
+func (s *Set) Has(item interface{}) bool {
+	s.RLock()
+	defer s.RUnlock()
+	_, ok := s.m[item]
+	return ok
+}
+
+// Len 用于排序
+func (s *Set) Len() int {
+	return len(s.List())
+}
+
+// Clear TODO
+func (s *Set) Clear() {
+	s.Lock()
+	defer s.Unlock()
+	s.m = map[interface{}]bool{}
+}
+
+// IsEmpty TODO
+func (s *Set) IsEmpty() bool {
+	if s.Len() == 0 {
+		return true
+	}
+	return false
+}
+
+// List TODO
+func (s *Set) List() []interface{} {
+	s.RLock()
+	defer s.RUnlock()
+	list := []interface{}{}
+	for item := range s.m {
+		list = append(list, item)
+	}
+	return list
+}
+
+/*
+func (s *Set) SortList() []int {
+    s.RLock()
+    defer s.RUnlock()
+    list := []int{}
+    for item := range s.m {
+        list = append(list, item)
+    }
+    sort.Ints(list)
+    return list
+}
+
+func main() {
+    s := New()
+    s.Add("1.1.1.1")
+    s.Add("2.2.2.2")
+    fmt.Println("无序的切片", s.List())
+    fmt.Println("length:", len(s.List()))
+}
+*/
diff --git a/dbm-services/common/db-config/pkg/util/slice.go b/dbm-services/common/db-config/pkg/util/slice.go
new file mode 100644
index 0000000000..c09e7aab4c
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/slice.go
@@ -0,0 +1,255 @@
+package util
+
+import (
+	"reflect"
+	"strconv"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+var (
+	// ErrConvertFail TODO
+	ErrConvertFail = errors.New("convert data type is failure")
+)
+
+// Reverse string slice [site user info 0] -> [0 info user site]
+func Reverse(ss []string) {
+	ln := len(ss)
+	for i := 0; i < ln/2; i++ {
+		li := ln - i - 1
+		// fmt.Println(i, "<=>", li)
+		ss[i], ss[li] = ss[li], ss[i]
+	}
+}
+
+// StringsRemove an value form an string slice
+func StringsRemove(ss []string, s string) []string {
+	var ns []string
+	for _, v := range ss {
+		if v != s {
+			ns = append(ns, v)
+		}
+	}
+
+	return ns
+}
+
+// StringsToInts string slice to int slice
+func StringsToInts(ss []string) (ints []int, err error) {
+	for _, str := range ss {
+		iVal, err := strconv.Atoi(str)
+		if err != nil {
+			return []int{}, err
+		}
+
+		ints = append(ints, iVal)
+	}
+	return
+}
+
+// TrimStrings trim string slice item.
+func TrimStrings(ss []string, cutSet ...string) (ns []string) {
+	hasCutSet := len(cutSet) > 0 && cutSet[0] != ""
+
+	for _, str := range ss {
+		if hasCutSet {
+			ns = append(ns, strings.Trim(str, cutSet[0]))
+		} else {
+			ns = append(ns, strings.TrimSpace(str))
+		}
+	}
+	return
+}
+
+// IntsHas check the []int contains the given value
+func IntsHas(ints []int, val int) bool {
+	for _, ele := range ints {
+		if ele == val {
+			return true
+		}
+	}
+	return false
+}
+
+// Int64sHas check the []int64 contains the given value
+func Int64sHas(ints []int64, val int64) bool {
+	for _, ele := range ints {
+		if ele == val {
+			return true
+		}
+	}
+	return false
+}
+
+// StringsHas check the []string contains the given element
+func StringsHas(ss []string, val string) bool {
+	for _, ele := range ss {
+		if ele == val {
+			return true
+		}
+	}
+	return false
+}
+
+// Contains assert array(strings, intXs, uintXs) should be contains the given value(int(X),string).
+func Contains(arr, val interface{}) bool {
+	if val == nil || arr == nil {
+		return false
+	}
+
+	// if is string value
+	if strVal, ok := val.(string); ok {
+		if ss, ok := arr.([]string); ok {
+			return StringsHas(ss, strVal)
+		}
+
+		rv := reflect.ValueOf(arr)
+		if rv.Kind() == reflect.Slice || rv.Kind() == reflect.Array {
+			for i := 0; i < rv.Len(); i++ {
+				if v, ok := rv.Index(i).Interface().(string); ok && strings.EqualFold(v, strVal) {
+					return true
+				}
+			}
+		}
+
+		return false
+	}
+
+	// as int value
+	intVal, err := ToInt64(val)
+	if err != nil {
+		return false
+	}
+
+	if int64s, ok := toInt64Slice(arr); ok {
+		return Int64sHas(int64s, intVal)
+	}
+	return false
+}
+
+// NotContains array(strings, ints, uints) should be not contains the given value.
+func NotContains(arr, val interface{}) bool {
+	return false == Contains(arr, val)
+}
+
+func toInt64Slice(arr interface{}) (ret []int64, ok bool) {
+	rv := reflect.ValueOf(arr)
+	if rv.Kind() != reflect.Slice && rv.Kind() != reflect.Array {
+		return
+	}
+
+	for i := 0; i < rv.Len(); i++ {
+		i64, err := ToInt64(rv.Index(i).Interface())
+		if err != nil {
+			return []int64{}, false
+		}
+
+		ret = append(ret, i64)
+	}
+
+	ok = true
+	return
+}
+
+// ToInt64 convert string to int64
+func ToInt64(in interface{}) (i64 int64, err error) {
+	switch tVal := in.(type) {
+	case nil:
+		i64 = 0
+	case string:
+		i64, err = strconv.ParseInt(strings.TrimSpace(tVal), 10, 0)
+	case int:
+		i64 = int64(tVal)
+	case int8:
+		i64 = int64(tVal)
+	case int16:
+		i64 = int64(tVal)
+	case int32:
+		i64 = int64(tVal)
+	case int64:
+		i64 = tVal
+	case uint:
+		i64 = int64(tVal)
+	case uint8:
+		i64 = int64(tVal)
+	case uint16:
+		i64 = int64(tVal)
+	case uint32:
+		i64 = int64(tVal)
+	case uint64:
+		i64 = int64(tVal)
+	case float32:
+		i64 = int64(tVal)
+	case float64:
+		i64 = int64(tVal)
+	default:
+		err = ErrConvertFail
+	}
+	return
+}
+
+// MinValueInArry TODO
+func MinValueInArry(arry []uint64) (minv uint64) {
+	if len(arry) < 1 {
+		return 0
+	}
+	minv = arry[0]
+	for _, v := range arry {
+		if v <= minv {
+			minv = v
+		}
+	}
+	return
+}
+
+// IsMinInArry TODO
+func IsMinInArry(ele uint64, arry []uint64) bool {
+	return ele <= uint64(MinValueInArry(arry))
+}
+
+// SliceUniq TODO
+func SliceUniq(input []string) []string {
+	newData := []string{}
+	if len(input) > 0 {
+		temp := make(map[string]struct{})
+		for _, value := range input {
+			temp[value] = struct{}{}
+		}
+		for k, _ := range temp {
+			newData = append(newData, k)
+		}
+	}
+	return newData
+}
+
+// SliceUniqMap TODO
+// with order
+func SliceUniqMap(s []string) []string {
+	seen := make(map[string]bool, len(s))
+	j := 0
+	for _, v := range s {
+		if _, ok := seen[v]; ok {
+			continue
+		}
+		seen[v] = true
+		s[j] = v
+		j++
+	}
+	return s[:j]
+}
+
+// SliceErrorsToError TODO
+func SliceErrorsToError(errs []error) error {
+	var errStrs []string
+	for _, e := range errs {
+		errStrs = append(errStrs, e.Error())
+	}
+	errString := strings.Join(errStrs, "\n")
+	return errors.New(errString)
+}
+
+// IsSlice TODO
+func IsSlice(v interface{}) bool {
+	return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/dbm-services/common/db-config/pkg/util/str.go b/dbm-services/common/db-config/pkg/util/str.go
new file mode 100644
index 0000000000..d37539a510
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/str.go
@@ -0,0 +1,86 @@
+package util
+
+import (
+	"crypto/md5"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"regexp"
+	"strings"
+)
+
+// SplitInputs TODO
+func SplitInputs(input string) []string {
+	splitRegex := regexp.MustCompile(`[;,\n\t ]+`)
+	splitResults := splitRegex.Split(input, -1)
+	results := make([]string, 0)
+	for _, s := range splitResults {
+		if strings.TrimSpace(s) != "" {
+			results = append(results, strings.TrimSpace(s))
+		}
+	}
+	return results
+}
+
+// SplitAny TODO
+// util.SplitAny("ab##cd$$ef", "(##|\$\$)")
+func SplitAny(s string, delimiters string) []string {
+	// seps := fmt.Sprintf()
+	// splitRegex := regexp.MustCompile(`[;,\n\t ]+`)
+	// delimiters=[;,\t\s ]+
+	splitRegex := regexp.MustCompile(delimiters)
+	splitResults := splitRegex.Split(s, -1)
+	results := make([]string, 0)
+	for _, s := range splitResults {
+		if strings.TrimSpace(s) != "" {
+			results = append(results, strings.TrimSpace(s))
+		}
+	}
+	return results
+}
+
+// SplitAnyRune TODO
+// util.SplitAnyRune("a,b c", ", ")
+// if s is empty, return [], not [""]
+func SplitAnyRune(s string, seps string) []string {
+	splitter := func(r rune) bool {
+		return strings.ContainsRune(seps, r)
+	}
+	return strings.FieldsFunc(s, splitter)
+}
+
+// SplitAnyRuneTrim godoc
+func SplitAnyRuneTrim(s string, seps string) []string {
+	ss := SplitAnyRune(s, seps)
+	for i, el := range ss {
+		ss[i] = strings.TrimSpace(el)
+	}
+	return ss
+}
+
+// ReplaceBlank 清楚字符串中的空格以及制表符
+func ReplaceBlank(str string) string {
+	if str == "" {
+		return ""
+	}
+	// 匹配一个或多个空白符的正则表达式
+	reg := regexp.MustCompile("\\s+")
+	return reg.ReplaceAllString(str, "")
+}
+
+// SafeBase64Decode try base64 decode input, if failed, return input direct
+func SafeBase64Decode(text string) string {
+	bs, err := base64.StdEncoding.DecodeString(text)
+	if err != nil {
+		return text
+	}
+	return string(bs)
+}
+
+// Str2md5 TODO
+func Str2md5(s string) string {
+	w := md5.New()
+	io.WriteString(w, s)
+	md5str := fmt.Sprintf("%x", w.Sum(nil))
+	return md5str
+}
diff --git a/dbm-services/common/db-config/pkg/util/tls.go b/dbm-services/common/db-config/pkg/util/tls.go
new file mode 100644
index 0000000000..bafd544115
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/tls.go
@@ -0,0 +1,175 @@
+package util
+
+import (
+	"crypto/sha256"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/pem"
+	"fmt"
+	"io/ioutil"
+	"math/big"
+)
+
+// TLSInfo TODO
+type TLSInfo struct {
+	CertFile       string
+	KeyFile        string
+	CAFile         string
+	TrustedCAFile  string
+	ClientCertAuth bool
+
+	// parseFunc exists to simplify testing. Typically, parseFunc
+	// should be left nil. In that case, tls.X509KeyPair will be used.
+	parseFunc func([]byte, []byte) (tls.Certificate, error)
+}
+
+// ClientConfig generates a tls.Config object for use by an HTTP client.
+func (info TLSInfo) ClientConfig() (*tls.Config, error) {
+	var cfg *tls.Config
+	var err error
+
+	if !info.Empty() {
+		cfg, err = info.baseConfig()
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		cfg = &tls.Config{}
+	}
+
+	CAFiles := info.cafiles()
+	if len(CAFiles) > 0 {
+		cfg.RootCAs, err = newCertPool(CAFiles)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return cfg, nil
+}
+
+// newCertPool creates x509 certPool with provided CA files.
+func newCertPool(CAFiles []string) (*x509.CertPool, error) {
+	certPool := x509.NewCertPool()
+
+	for _, CAFile := range CAFiles {
+		pemByte, err := ioutil.ReadFile(CAFile)
+		if err != nil {
+			return nil, err
+		}
+
+		for {
+			var block *pem.Block
+			block, pemByte = pem.Decode(pemByte)
+			if block == nil {
+				break
+			}
+			cert, err := x509.ParseCertificate(block.Bytes)
+			if err != nil {
+				return nil, err
+			}
+			certPool.AddCert(cert)
+		}
+	}
+
+	return certPool, nil
+}
+
+// String 用于打印
+func (info TLSInfo) String() string {
+	return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v", info.CertFile, info.KeyFile,
+		info.CAFile, info.TrustedCAFile, info.ClientCertAuth)
+}
+
+// Empty TODO
+func (info TLSInfo) Empty() bool {
+	return info.CertFile == "" && info.KeyFile == ""
+}
+
+func (info TLSInfo) baseConfig() (*tls.Config, error) {
+	if info.KeyFile == "" || info.CertFile == "" {
+		return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
+	}
+
+	cert, err := ioutil.ReadFile(info.CertFile)
+	if err != nil {
+		return nil, err
+	}
+
+	key, err := ioutil.ReadFile(info.KeyFile)
+	if err != nil {
+		return nil, err
+	}
+
+	parseFunc := info.parseFunc
+	if parseFunc == nil {
+		parseFunc = tls.X509KeyPair
+	}
+
+	tlsCert, err := parseFunc(cert, key)
+	if err != nil {
+		return nil, err
+	}
+
+	cfg := &tls.Config{
+		Certificates: []tls.Certificate{tlsCert},
+		MinVersion:   tls.VersionTLS10,
+	}
+	return cfg, nil
+}
+
+// cafiles returns a list of CA file paths.
+func (info TLSInfo) cafiles() []string {
+	cs := make([]string, 0)
+	if info.CAFile != "" {
+		cs = append(cs, info.CAFile)
+	}
+	if info.TrustedCAFile != "" {
+		cs = append(cs, info.TrustedCAFile)
+	}
+	return cs
+}
+
+// ======== BASE58
+const (
+	base58Table = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
+)
+
+func base58Hash(ba []byte) []byte {
+	sha := sha256.New()
+	sha2 := sha256.New() // hash twice
+	ba = sha.Sum(ba)
+	return sha2.Sum(ba)
+}
+
+// EncodeBase58 TODO
+func EncodeBase58(ba []byte) []byte {
+	if len(ba) == 0 {
+		return nil
+	}
+
+	// Expected size increase from base58Table conversion is approximately 137%, use 138% to be safe
+	ri := len(ba) * 138 / 100
+	ra := make([]byte, ri+1)
+
+	x := new(big.Int).SetBytes(ba) // ba is big-endian
+	x.Abs(x)
+	y := big.NewInt(58)
+	m := new(big.Int)
+
+	for x.Sign() > 0 {
+		x, m = x.DivMod(x, y, m)
+		ra[ri] = base58Table[int32(m.Int64())]
+		ri--
+	}
+
+	// Leading zeroes encoded as base58Table zeros
+	for i := 0; i < len(ba); i++ {
+		if ba[i] != 0 {
+			break
+		}
+		ra[ri] = '1'
+		ri--
+	}
+	return ra[ri+1:]
+}
diff --git a/dbm-services/common/db-config/pkg/util/trim.go b/dbm-services/common/db-config/pkg/util/trim.go
new file mode 100644
index 0000000000..47f1660262
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/trim.go
@@ -0,0 +1,90 @@
+package util
+
+import (
+	"reflect"
+	"strings"
+)
+
+// reference: https://gist.github.com/hvoecking/10772475
+
+// TrimSpace trim space recursive with all string field
+func TrimSpace(obj interface{}) interface{} {
+	// Wrap the original in a reflect.Value
+	original := reflect.ValueOf(obj)
+
+	copy := reflect.New(original.Type()).Elem()
+	trimSpaceRecursive(copy, original)
+
+	// Remove the reflection wrapper
+	return copy.Interface()
+}
+
+func trimSpaceRecursive(copy, original reflect.Value) {
+	switch original.Kind() {
+	// The first cases handle nested structures and translate them recursively
+
+	// If it is a pointer we need to unwrap and call once again
+	case reflect.Ptr:
+		// To get the actual value of the original we have to call Elem()
+		// At the same time this unwraps the pointer so we don't end up in
+		// an infinite recursion
+		originalValue := original.Elem()
+		// Check if the pointer is nil
+		if !originalValue.IsValid() {
+			return
+		}
+		// Allocate a new object and set the pointer to it
+		copy.Set(reflect.New(originalValue.Type()))
+		// Unwrap the newly created pointer
+		trimSpaceRecursive(copy.Elem(), originalValue)
+
+	// If it is an interface (which is very similar to a pointer), do basically the
+	// same as for the pointer. Though a pointer is not the same as an interface so
+	// note that we have to call Elem() after creating a new object because otherwise
+	// we would end up with an actual pointer
+	case reflect.Interface:
+		// Get rid of the wrapping interface
+		originalValue := original.Elem()
+		// Create a new object. Now new gives us a pointer, but we want the value it
+		// points to, so we have to call Elem() to unwrap it
+		copyValue := reflect.New(originalValue.Type()).Elem()
+		trimSpaceRecursive(copyValue, originalValue)
+		copy.Set(copyValue)
+
+	// If it is a struct we translate each field
+	case reflect.Struct:
+		for i := 0; i < original.NumField(); i += 1 {
+			trimSpaceRecursive(copy.Field(i), original.Field(i))
+		}
+
+	// If it is a slice we create a new slice and translate each element
+	case reflect.Slice:
+		copy.Set(reflect.MakeSlice(original.Type(), original.Len(), original.Cap()))
+		for i := 0; i < original.Len(); i += 1 {
+			trimSpaceRecursive(copy.Index(i), original.Index(i))
+		}
+
+	// If it is a map we create a new map and translate each value
+	case reflect.Map:
+		copy.Set(reflect.MakeMap(original.Type()))
+		for _, key := range original.MapKeys() {
+			originalValue := original.MapIndex(key)
+			// New gives us a pointer, but again we want the value
+			copyValue := reflect.New(originalValue.Type()).Elem()
+			trimSpaceRecursive(copyValue, originalValue)
+			copy.SetMapIndex(key, copyValue)
+		}
+
+	// Otherwise we cannot traverse anywhere so this finishes the the recursion
+
+	// If it is a string translate it (yay finally we're doing what we came for)
+	case reflect.String:
+		str := strings.TrimSpace(original.Interface().(string))
+		copy.SetString(str)
+
+	// And everything else will simply be taken from the original
+	default:
+		copy.Set(original)
+	}
+
+}
diff --git a/dbm-services/common/db-config/pkg/util/trim_test.go b/dbm-services/common/db-config/pkg/util/trim_test.go
new file mode 100644
index 0000000000..6cc1fe9dc3
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/trim_test.go
@@ -0,0 +1,141 @@
+package util_test
+
+import (
+	"bk-dbconfig/pkg/util"
+	"fmt"
+	"testing"
+)
+
+type I interface{}
+
+type A struct {
+	Greeting string
+	Message  string
+	Pi       float64
+}
+
+type B struct {
+	Struct    A
+	Ptr       *A
+	Answer    int
+	Map       map[string]string
+	StructMap map[string]interface{}
+	Slice     []string
+}
+
+func create() I {
+	// The type C is actually hidden, but reflection allows us to look inside it
+	type C struct {
+		String string
+	}
+
+	return B{
+		Struct: A{
+			Greeting: " Hello!\n",
+			Message:  " translate this\n",
+			Pi:       3.14,
+		},
+		Ptr: &A{
+			Greeting: " What's up?\n",
+			Message:  " point here\n",
+			Pi:       3.14,
+		},
+		Map: map[string]string{
+			"Test": " translate this as well\n",
+		},
+		StructMap: map[string]interface{}{
+			"C": C{
+				String: " deep\n",
+			},
+		},
+		Slice: []string{
+			" and one more\n",
+		},
+		Answer: 42,
+	}
+}
+
+func TestTrimSpace(t *testing.T) {
+	// Some example test cases so you can mess around and see if it's working
+	// To check if it's correct look at the output, no automated checking here
+
+	// Test the simple cases
+	{
+		fmt.Println("Test with nil pointer to struct:")
+		var original *B
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", original)
+		fmt.Println("translated:", translated)
+		fmt.Println()
+	}
+	{
+		fmt.Println("Test with nil pointer to interface:")
+		var original *I
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", original)
+		fmt.Println("translated:", translated)
+		fmt.Println()
+	}
+	{
+		fmt.Println("Test with struct that has no elements:")
+		type E struct {
+		}
+		var original E
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", original)
+		fmt.Println("translated:", translated)
+		fmt.Println()
+	}
+	{
+		fmt.Println("Test with empty struct:")
+		var original B
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", original, "->", original.Ptr)
+		fmt.Println("translated:", translated, "->", translated.(B).Ptr)
+		fmt.Println()
+	}
+
+	// Imagine we have no influence on the value returned by create()
+	created := create()
+	{
+		// Assume we know that `created` is of type B
+		fmt.Println("Translating a struct:")
+		original := created.(B)
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", original, "->", original.Ptr)
+		fmt.Println("translated:", translated, "->", translated.(B).Ptr)
+		fmt.Println()
+	}
+	{
+		// Assume we don't know created's type
+		fmt.Println("Translating a struct wrapped in an interface:")
+		original := created
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", original, "->", original.(B).Ptr)
+		fmt.Println("translated:", translated, "->", translated.(B).Ptr)
+		fmt.Println()
+	}
+	{
+		// Assume we don't know B's type and want to pass a pointer
+		fmt.Println("Translating a pointer to a struct wrapped in an interface:")
+		original := &created
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", (*original), "->", (*original).(B).Ptr)
+		fmt.Println("translated:", (*translated.(*I)), "->", (*translated.(*I)).(B).Ptr)
+		fmt.Println()
+	}
+	{
+		// Assume we have a struct that contains an interface of an unknown type
+		fmt.Println("Translating a struct containing a pointer to a struct wrapped in an interface:")
+		type D struct {
+			Payload *I
+		}
+		original := D{
+			Payload: &created,
+		}
+		translated := util.TrimSpace(original)
+		fmt.Println("original:  ", original, "->", (*original.Payload), "->", (*original.Payload).(B).Ptr)
+		fmt.Println("translated:", translated, "->", (*translated.(D).Payload), "->", (*(translated.(D).Payload)).(B).Ptr)
+		fmt.Println()
+	}
+}
diff --git a/dbm-services/common/db-config/pkg/util/util.go b/dbm-services/common/db-config/pkg/util/util.go
new file mode 100644
index 0000000000..01cea24dd4
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/util/util.go
@@ -0,0 +1,18 @@
+// Package util TODO
+package util
+
+import "strings"
+
+// LoggerErrorStack 在最外层遇到 error 时打印 stack 信息到日志
+// err == nil 时不打印
+// output 是个 logger,避免在 util 里引入 logger导致循环 import
+func LoggerErrorStack(output func(format string, args ...interface{}), err error) {
+	if err != nil {
+		output("%+v", err)
+	}
+}
+
+// IsErrorString TODO
+func IsErrorString(err error, subStr string) bool {
+	return strings.Contains(err.Error(), subStr)
+}
diff --git a/dbm-services/common/db-config/pkg/validate/check_value.go b/dbm-services/common/db-config/pkg/validate/check_value.go
new file mode 100644
index 0000000000..4af492b28b
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/validate/check_value.go
@@ -0,0 +1,387 @@
+package validate
+
+import (
+	"bk-dbconfig/pkg/util"
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/go-playground/locales/en"
+	ut "github.com/go-playground/universal-translator"
+	"github.com/go-playground/validator/v10"
+	en_translations "github.com/go-playground/validator/v10/translations/en"
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+)
+
+// ValueTypeDef TODO
+type ValueTypeDef struct {
+	ValueType    string
+	ValueTypeSub string
+}
+
+// Validate TODO
+func (v *ValueTypeDef) Validate() error {
+	return CheckDataTypeSub(v.ValueType, v.ValueTypeSub)
+}
+
+// ParseExprEnums 解析枚举类型,可以 |, 两种符号任意一种分隔
+// 如果枚举值前后有空格,需要用 'a '|' b'| 这样单引号
+func ParseExprEnums(enumStr string) ([]string, error) {
+	enums := util.SplitAnyRune(enumStr, "|,")
+	var enumsNew []string
+	for _, e := range enums {
+		// if e == ""
+		enumsNew = append(enumsNew, strings.TrimSpace(e))
+	}
+	return enumsNew, nil
+}
+
+// ParseRange 解析范围格式
+// 会把值转换成 float32, 返回 (['(', ']', ['0', '10'], err)
+func ParseRange(rangeValue string) ([]string, []string, error) {
+	rangeValue = strings.ReplaceAll(rangeValue, " ", "")
+	matchAllowed := regexRange.FindStringSubmatch(rangeValue)
+	err := errors.Errorf("wrong format for rangeValue %s", rangeValue)
+	if len(matchAllowed) != 5 {
+		return nil, nil, err
+	}
+	left, leftV, rightV, right := matchAllowed[1], matchAllowed[2], matchAllowed[3], matchAllowed[4]
+	return []string{left, right}, []string{leftV, rightV}, nil
+}
+
+// ParseFloat32ExprRange 解析范围格式
+// 会把值转换成 float32, 返回 (['(', ']', ['0', '10'], err)
+func ParseFloat32ExprRange(rangeValue string) ([]string, []float32, error) {
+	bound, vals, err := ParseRange(rangeValue)
+	if err != nil {
+		return nil, nil, err
+	}
+	leftVal, err1 := strconv.ParseFloat(vals[0], 32)
+	rightVal, err2 := strconv.ParseFloat(vals[1], 32)
+	if err1 != nil || err2 != nil {
+		return nil, nil, err
+	} else if leftVal > rightVal {
+		return nil, nil, err
+	}
+	return bound, []float32{float32(leftVal), float32(rightVal)}, nil
+}
+
+// CheckInEnums TODO
+// 当 valueAllowed 允许为空时,比如 "ON|OFF|",渲染时表示要把 value 为空渲染出来,类似 --sql-mode=”
+// 注意与 CheckInBool 进行区分
+func CheckInEnums(valueGiven, valueAllowed string, multiple bool) error {
+	// valueGiven := "abc"
+	//  valueAllowed := "abc | def" // 会转换成 "abc def"
+	valueAlloweds, err := ParseExprEnums(valueAllowed)
+	if err != nil {
+		return err
+	}
+	if !multiple { // 单选
+		if !util.StringsHas(valueAlloweds, valueGiven) {
+			return errors.Errorf("expect one of %s but given %s", valueAlloweds, valueGiven)
+		}
+	} else { // 多选
+		valueGivens := strings.Split(valueGiven, ",")
+		for _, g := range valueGivens {
+			if !util.StringsHas(valueAlloweds, g) {
+				return errors.Errorf("expect multi of %s but given %s", valueAlloweds, g)
+			}
+		}
+		// 多值允许空值时,空值与具体值,不能同时存在
+		if len(valueGivens) >= 2 &&
+			(util.StringsHas(valueGivens, "''") || util.StringsHas(valueGivens, "\"\"") || util.StringsHas(valueGivens, "")) {
+			return errors.Errorf("empty value cannot be given with non-empty value")
+		}
+	}
+
+	return nil
+}
+
+// CheckInRange TODO
+func CheckInRange(value, valueAllowed string) error {
+	// valueGiven := float32(1)
+	// valueAllowed := "(0, 2]"
+	tmp, _ := strconv.ParseFloat(value, 32)
+	valueGiven := float32(tmp)
+	direct, values, err := ParseFloat32ExprRange(valueAllowed)
+	if err != nil {
+		return err
+	}
+	err = errors.Errorf("value %s is not in range %s", value, valueAllowed)
+	if direct[0] == "[" { // left
+		if valueGiven < values[0] {
+			return err
+		}
+	} else {
+		if valueGiven <= values[0] {
+			return err
+		}
+	}
+	if direct[1] == "]" { // right
+		if valueGiven > values[1] {
+			return err
+		}
+	} else {
+		if valueGiven >= values[1] {
+			return err
+		}
+	}
+	return nil
+}
+
+// CheckInSizeRange TODO
+func CheckInSizeRange(value, valueAllowed string) error {
+	sizeBytes, err := util.ParseSizeInBytesE(value)
+	if err != nil {
+		return err
+	}
+	subType := AutoDetectTypeSub(valueAllowed)
+	if subType == DTypeSubRange {
+		bound, vals, err := ParseRange(valueAllowed)
+		if err != nil {
+			return nil
+		}
+		leftVal := util.ParseSizeInBytes(vals[0])
+		rightVal := util.ParseSizeInBytes(vals[1])
+		valueAllowedNew := fmt.Sprintf("%s%d, %d%s", bound[0], leftVal, rightVal, bound[1])
+		valueNew := cast.ToString(sizeBytes)
+		return CheckInRange(valueNew, valueAllowedNew)
+	} else if subType == DTypeSubEnum {
+		return CheckInEnums(value, valueAllowed, false)
+	}
+	// 如果 valueAllowed='' 或者 位置 subType,默认放通
+	return nil
+}
+
+// CheckInDuration TODO
+// 数字不带单位,默认是 秒s
+func CheckInDuration(value, valueAllowed string) error {
+	dura, err := util.ToDurationExtE(value)
+	if err != nil {
+		return err
+	}
+	subType := AutoDetectTypeSub(valueAllowed)
+	if subType == DTypeSubRange {
+		bound, vals, err := ParseRange(valueAllowed)
+		if err != nil {
+			return nil
+		}
+		leftVal := util.ToDurationExt(vals[0]).Seconds()
+		rightVal := util.ToDurationExt(vals[1]).Seconds()
+		valueAllowedNew := fmt.Sprintf("%s%d, %d%s", bound[0], int(leftVal), int(rightVal), bound[1])
+		valueNew := cast.ToString(int(dura.Seconds()))
+		return CheckInRange(valueNew, valueAllowedNew)
+	} else if subType == DTypeSubEnum {
+		return CheckInEnums(value, valueAllowed, false)
+	}
+	// 如果 valueAllowed='' 或者 位置 subType,默认放通
+	return nil
+}
+
+// CheckInRegex TODO
+func CheckInRegex(valueGiven, valueAllowed string) error {
+	// valueGiven := "1.1.1.1"
+	// valueAllowed := "^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(#\\d+)?$"
+	reg, err := regexp.Compile(valueAllowed)
+	if err != nil {
+		return errors.Errorf("invalid regex %s", valueAllowed)
+	}
+	if reg.MatchString(valueGiven) {
+		return nil
+	} else {
+		return errors.Errorf("value %s match regex failed %s", valueGiven, valueAllowed)
+	}
+}
+
+// CheckInJson TODO
+func CheckInJson(valueGiven string) error {
+	var js json.RawMessage
+	err := json.Unmarshal([]byte(valueGiven), &js)
+	// if json.Valid([]byte(valueGiven)) {
+	if err != nil {
+		return errors.Errorf("invalid json %s", valueGiven)
+	} else {
+		return nil
+	}
+}
+
+// CheckGoValidate TODO
+func CheckGoValidate(valueGiven, valueAllowed string) error {
+	vali := validator.New()
+	err := vali.Var(valueGiven, valueAllowed)
+	if _, ok := err.(*validator.InvalidValidationError); ok {
+		return errors.Wrap(err, "invalid string to validate")
+	}
+	uni := ut.New(en.New())
+	trans, _ := uni.GetTranslator("en")
+	if err := en_translations.RegisterDefaultTranslations(vali, trans); err != nil {
+		return err
+	}
+	errStrings := make([]string, 0)
+	for _, vErr := range err.(validator.ValidationErrors) {
+		errStrings = append(errStrings, vErr.Translate(trans))
+	}
+	if len(errStrings) > 0 {
+		return errors.New(strings.Join(errStrings, " || "))
+	}
+	return nil
+}
+
+// CheckInBool TODO
+// 当 valueAllowed 允许为空时,比如 "ON|OFF|",当 valueGiven = "",表示渲染的时候可以不用值,直接用 --enable 类似的flag
+func CheckInBool(valueGiven, valueAllowed string) error {
+	if valueGiven == "" && valueAllowed == "" {
+		return nil
+	}
+	return CheckInEnums(valueGiven, valueAllowed, false)
+}
+
+// CheckDataType 检验数据类型
+func CheckDataType(name, value string) error {
+	err2 := errors.Errorf("expect type %s but given value %s", name, value)
+	if name == DTypeInt {
+		if _, err := strconv.ParseInt(value, 10, 64); err != nil {
+			return errors.Wrap(err2, err.Error())
+		}
+	} else if name == DTypeFloat {
+		if _, err := strconv.ParseFloat(value, 32); err != nil {
+			return errors.Wrap(err2, err.Error())
+		}
+	} else if name == DTypeNumber {
+		if _, err := strconv.ParseFloat(value, 64); err != nil {
+			return errors.Wrap(err2, err.Error())
+		}
+	} else if name == DTypeBool {
+		if _, err := util.ToBoolExtE(value); err != nil {
+			return errors.Wrap(err2, err.Error())
+		}
+	} else if name == "" {
+		// return errors.Errorf("empty value_type for value [%s]", value)
+	}
+	return nil
+}
+
+// CheckDataTypeSub TODO
+func CheckDataTypeSub(dataType, subType string) error {
+	if subType == "" {
+		return nil
+	}
+	if subs, ok := ValueTypeSubRef[dataType]; ok {
+		if !util.StringsHas(subs, subType) {
+			return errors.Errorf("value_type %s doesnot has sub type %s, allowed %s",
+				dataType, subType, subs)
+		}
+	} else {
+		return errors.Errorf("unknown value_type %s", dataType)
+	}
+	return nil
+}
+
+// AutoDetectTypeSub TODO
+func AutoDetectTypeSub(valueAllowed string) string {
+	if valueAllowed == "" {
+		return ""
+	}
+	if regexRange.MatchString(valueAllowed) {
+		return DTypeSubRange
+	}
+	reg := regexp.MustCompile(`(\|,)`) // enum 分隔符 ParseExprEnums()
+	if reg.MatchString(valueAllowed) {
+		return DTypeSubEnum
+	}
+	return ""
+}
+
+// ValidateConfValue godoc
+// use ConfValue,ValueType,ValueTypeSub,ValueAllowed as params to check
+func ValidateConfValue(confValue, valueType, valueTypeSub, valueAllowed string) error {
+	if valueType == "" && valueTypeSub == "" && valueAllowed == "" {
+		return nil
+	}
+	if err := CheckDataType(valueType, confValue); err != nil {
+		return err
+	} else if err = CheckDataTypeSub(valueType, valueTypeSub); err != nil {
+		return err
+	}
+	var invalidErr = errors.Errorf("invalid value_type_sub %s for %s", valueTypeSub, valueType)
+	if valueType == DTypeBool {
+		// valueTypeSub = DTypeSubEnum
+		switch valueTypeSub {
+		case DTypeSubEnum, "":
+			return CheckInBool(confValue, valueAllowed)
+		case DTypeSubFlag:
+			return nil
+		default:
+			return invalidErr
+		}
+	} else if util.StringsHas([]string{DTypeInt, DTypeFloat, DTypeNumber}, valueType) {
+		if valueTypeSub == "" {
+			valueTypeSub = AutoDetectTypeSub(valueAllowed)
+			if valueTypeSub == "" {
+				return errors.Errorf("cannot detect value_type_sub for %s", valueAllowed)
+			}
+		}
+		switch valueTypeSub {
+		case DTypeSubEnum:
+			return CheckInEnums(confValue, valueAllowed, false)
+		case DTypeSubRange:
+			return CheckInRange(confValue, valueAllowed)
+		default:
+			return invalidErr
+		}
+	} else { // STRING
+		if valueAllowed == "" && !(valueTypeSub == DTypeSubJson || valueTypeSub == DTypeSubMap) {
+			// JSON,MAP 合法性 不依赖 valueAllowed
+			return nil
+		}
+		switch valueTypeSub {
+		case DTypeSubEnum:
+			if err := CheckInEnums(confValue, valueAllowed, false); err != nil {
+				return err
+			}
+		case DTypeSubEnums:
+			if err := CheckInEnums(confValue, valueAllowed, true); err != nil {
+				return err
+			}
+		case DTypeSubRegex:
+			if err := CheckInRegex(confValue, valueAllowed); err != nil {
+				return err
+			}
+		case DTypeSubBytes:
+			if err := CheckInSizeRange(confValue, valueAllowed); err != nil {
+				return err
+			}
+		case DTypeSubDuration:
+			if err := CheckInDuration(confValue, valueAllowed); err != nil {
+				return err
+			}
+		case DTypeSubJson, DTypeSubMap:
+			if err := CheckInJson(confValue); err != nil {
+				return err
+			}
+		case DTypeSubGovalidate:
+			if err := CheckGoValidate(confValue, valueAllowed); err != nil {
+				return err
+			}
+		case DTypeSubList:
+			// 忽略 value_allowed,只用户返回格式化
+			return nil
+		case DTypeSubString, "":
+			if valueAllowed != "" {
+				// value_allowed !='' and value_type_sub='',要求 conf_value 只能一个值即 value_allowed
+				if confValue != valueAllowed {
+					return errors.Errorf("value must equal value_allowed:%s", valueAllowed)
+				}
+				return nil
+			}
+			return nil
+		default:
+			return invalidErr
+		}
+	}
+
+	return nil
+}
diff --git a/dbm-services/common/db-config/pkg/validate/check_value_test.go b/dbm-services/common/db-config/pkg/validate/check_value_test.go
new file mode 100644
index 0000000000..f00c7af3b4
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/validate/check_value_test.go
@@ -0,0 +1,131 @@
+package validate
+
+import (
+	"bk-dbconfig/pkg/util"
+	"log"
+	"testing"
+
+	. "github.com/smartystreets/goconvey/convey"
+)
+
+func BatchValidate(tvs [][4]string) int {
+	var errList []error
+	for _, tv := range tvs {
+		if err := ValidateConfValue(tv[0], tv[1], tv[2], tv[3]); err != nil {
+			errList = append(errList, err)
+		}
+	}
+	log.Println("\n", util.SliceErrorsToError(errList))
+	return len(errList)
+}
+
+func TestValidateConfValue(t *testing.T) {
+	// [confValue, confType, confTypeSub, valueAllowed]
+
+	Convey("Test conf_name conf_value validator", t, func() {
+		Convey("Validate Enum", func() {
+			valuesEnumSucc := [][4]string{
+				{"1", "INT", "ENUM", "0|1|2"},
+				{"1.1", "FLOAT", "ENUM", "1.1|2.1"},
+				{"1", "STRING", "ENUM", "0|1"},
+				{"ON", "STRING", "ENUM", "ON|OFF"},
+				// {"", "STRING", "ENUM", "A|B|C|"},
+				{"A,B", "STRING", "ENUMS", "A|B|C|"},
+				{"C", "STRING", "ENUM", "A,B,C"},
+				{"B", "STRING", "ENUM", "A, B, C"},
+			}
+			valuesEnumFail := [][4]string{
+				{"3", "INT", "ENUM", "0|1|2"},
+				{"1.2", "FLOAT", "ENUM", "1.1|2.1"},
+				{"2", "STRING", "ENUM", "0|1"},
+				{"on", "STRING", "ENUM", "ON|OFF"},
+				{"D", "STRING", "ENUM", "A|B|C|"},
+				{"A,D", "STRING", "ENUMS", "A|B|C|"},
+			}
+			errCount := BatchValidate(valuesEnumSucc)
+			So(errCount, ShouldEqual, 0)
+			errCount = BatchValidate(valuesEnumFail)
+			So(errCount, ShouldEqual, len(valuesEnumFail))
+		})
+
+		Convey("Validate Range", func() {
+			valuesRangeSucc := [][4]string{
+				{"1", "INT", "RANGE", "[0,1]"},
+				{"1.5", "FLOAT", "RANGE", "(0.0, 2.0]"},
+				{"-2", "NUMBER", "RANGE", "[-2, 3.0]"},
+			}
+			valuesRangeFail := [][4]string{
+				{"2", "INT", "RANGE", "[0,1]"},
+				{"2.5", "FLOAT", "RANGE", "(0.0, 2.0]"},
+				{"-2", "NUMBER", "RANGE", "(-2, 3.0]"},
+			}
+			errCount := BatchValidate(valuesRangeSucc)
+			So(errCount, ShouldEqual, 0)
+			errCount = BatchValidate(valuesRangeFail)
+			So(errCount, ShouldEqual, len(valuesRangeFail))
+		})
+
+		Convey("Validate Bytes", func() {
+			valuesSucc := [][4]string{
+				{"1024", "STRING", "BYTES", "(0, 2048)"},
+				{"1k", "STRING", "BYTES", "(0, 2048)"},
+				{"64m", "STRING", "BYTES", "(0m, 1g)"},
+				{"64m", "STRING", "BYTES", "64m | 128m"}, // enum
+				{"1G", "STRING", "BYTES", "[0, 1024m]"},
+			}
+			valuesFail := [][4]string{
+				{"0", "STRING", "BYTES", "(0, 2048)"},
+				{"2g", "STRING", "BYTES", "(0, 2048k)"},
+				{"1mBB", "STRING", "BYTES", "(0, 2048)"},
+			}
+			errCount := BatchValidate(valuesSucc)
+			So(errCount, ShouldEqual, 0)
+			errCount = BatchValidate(valuesFail)
+			So(errCount, ShouldEqual, len(valuesFail))
+		})
+		Convey("Validate Duration", func() {
+			valuesSucc := [][4]string{
+				{"2m", "STRING", "DURATION", "(0, 5m)"},
+				{"2h", "STRING", "DURATION", "(0, 2h]"},
+				{"2d", "STRING", "DURATION", "(3600s, 7d]"},
+				{"1w2d3m1s", "STRING", "DURATION", "[1d, 10d]"},
+				{"2d", "STRING", "DURATION", "1d | 2d"}, // enum
+			}
+			valuesFail := [][4]string{
+				{"2m", "STRING", "DURATION", "[2m1s, 5m)"},
+				{"2h", "STRING", "DURATION", "[0s, 3600s]"},
+			}
+			errCount := BatchValidate(valuesSucc)
+			So(errCount, ShouldEqual, 0)
+			errCount = BatchValidate(valuesFail)
+			So(errCount, ShouldEqual, len(valuesFail))
+		})
+
+		Convey("Validate Regex and Json", func() {
+			valuesRegexSucc := [][4]string{
+				{"0.0.0.0", "STRING", "REGEX", "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$"},
+				{"110", "STRING", "REGEX", "([1-9])|(110|120)"},
+				{"{\"key\":\"value\"}", "STRING", "JSON", ""},
+			}
+			valuesRegexFail := [][4]string{
+				{"0.0.0.0", "STRING", "REGEX", "172"},
+				{"111", "STRING", "REGEX", "(^[1-9]$)|(^110$)|(^120$)"},
+				{"110", "STRING", "REGEX", "(110"},
+				{"{\"key\":\"value", "STRING", "JSON", ""},
+			}
+			errCount := BatchValidate(valuesRegexSucc)
+			So(errCount, ShouldEqual, 0)
+			errCount = BatchValidate(valuesRegexFail)
+			So(errCount, ShouldEqual, len(valuesRegexFail))
+		})
+
+		Convey("Validate DataType", func() {
+			valuesTypeFail := [][4]string{
+				{"1", "INT", "JSON", "1"},
+				{"ddd", "INT", "ENUM", ""},
+			}
+			errCount := BatchValidate(valuesTypeFail)
+			So(errCount, ShouldEqual, len(valuesTypeFail))
+		})
+	})
+}
diff --git a/dbm-services/common/db-config/pkg/validate/const.go b/dbm-services/common/db-config/pkg/validate/const.go
new file mode 100644
index 0000000000..f8f0c56330
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/validate/const.go
@@ -0,0 +1,61 @@
+package validate
+
+import "regexp"
+
+const (
+	// RegexRangeStr 正则 (0, 2] 匹配上下边界,不限制为数字
+	RegexRangeStr = `^([\(\[])(.+),(.+)([\)\]])$`
+)
+
+var regexRange = regexp.MustCompile(RegexRangeStr)
+
+const (
+	// DTypeString TODO
+	DTypeString = "STRING"
+	// DTypeInt TODO
+	DTypeInt = "INT"
+	// DTypeFloat TODO
+	DTypeFloat = "FLOAT"
+	// DTypeNumber TODO
+	DTypeNumber = "NUMBER"
+	// DTypeBool TODO
+	DTypeBool = "BOOL"
+
+	// DTypeSubString 普通任意string
+	DTypeSubString = "STRING"
+	// DTypeSubEmpty 空字符
+	DTypeSubEmpty = ""
+	// DTypeSubEnum 枚举类型,单值
+	DTypeSubEnum = "ENUM"
+	// DTypeSubEnums 枚举类型,多值
+	DTypeSubEnums = "ENUMS"
+	// DTypeSubRange 范围,支持开闭区间
+	DTypeSubRange = "RANGE"
+	// DTypeSubBytes 特殊的RANGE, range范围是数字[1024, 2048],但值可以是 1M
+	DTypeSubBytes = "BYTES"
+	// DTypeSubRegex 正则
+	DTypeSubRegex = "REGEX"
+	// DTypeSubJson json 类型
+	DTypeSubJson = "JSON"
+	// DTypeSubMap 特殊的 json 类型, strict 模式下会转换成 map 返回
+	DTypeSubMap = "MAP"
+	// DTypeSubList list 类型,字符串。只影响数据返回格式,不检查写入
+	DTypeSubList = "LIST"
+	// DTypeSubDuration 时间间隔, 比如 1d2h3m1s
+	DTypeSubDuration = "DURATION"
+	// DTypeSubGovalidate TODO
+	DTypeSubGovalidate = "GOVALIDATE"
+	// DTypeSubFlag BOOL FLAG
+	DTypeSubFlag = "FLAG"
+)
+
+// ValueTypeSubRef 定义合法的 value_type 与 value_type_sub 的关系
+// value_type_sub 会用于控件展示、合法性校验
+var ValueTypeSubRef = map[string][]string{
+	DTypeString: []string{DTypeSubEmpty, DTypeSubString, DTypeSubEnum, DTypeSubEnums, DTypeSubBytes, DTypeSubRegex,
+		DTypeSubJson, DTypeSubMap, DTypeSubDuration, DTypeSubGovalidate, DTypeSubList}, // 暂不支持复杂类型,比如 (1, 100] || on|off
+	DTypeInt:    []string{DTypeSubEnum, DTypeSubEmpty, DTypeSubRange},
+	DTypeFloat:  []string{DTypeSubEnum, DTypeSubEmpty, DTypeSubRange},
+	DTypeNumber: []string{DTypeSubEnum, DTypeSubEmpty, DTypeSubRange},
+	DTypeBool:   []string{DTypeSubEnum, DTypeSubEmpty, DTypeSubFlag},
+}
diff --git a/dbm-services/common/db-config/pkg/validate/validate.go b/dbm-services/common/db-config/pkg/validate/validate.go
new file mode 100644
index 0000000000..d86c7e248a
--- /dev/null
+++ b/dbm-services/common/db-config/pkg/validate/validate.go
@@ -0,0 +1,140 @@
+// Package validate TODO
+package validate
+
+import (
+	"bk-dbconfig/pkg/util"
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+
+	"github.com/go-playground/locales/en"
+	ut "github.com/go-playground/universal-translator"
+	"github.com/go-playground/validator/v10"
+	en_translations "github.com/go-playground/validator/v10/translations/en"
+	"github.com/pkg/errors"
+)
+
+// ValidateEnums TODO
+// make validate tag work with enums tag
+// 避免 validate oneof 和 swagger enums 写 2 份重复的校验和文档
+// example: Method string `validate:"required,enums" enums:"post,get" json:"method"`
+func ValidateEnums(f validator.FieldLevel) bool {
+	fieldValue := f.Field().String()
+	fieldName := f.StructFieldName()
+	// get StructField
+	sf, _ := f.Parent().Type().FieldByName(fieldName)
+	// get tag value from tag_field enums
+	tagValue := sf.Tag.Get(TagEnum)
+	enumsValues := strings.Split(tagValue, ",")
+	if util.StringsHas(enumsValues, fieldValue) {
+		return true
+	} else {
+		return false
+	}
+}
+
+// GoValidateStructSimple TODO
+// 简单校验 struct,不涉及逻辑
+// 如果 struct 上有 tag validate:"enums",必须启用enum=true校验
+func GoValidateStructSimple(v interface{}, enum bool) error {
+	validate := validator.New()
+	if enum {
+		_ = validate.RegisterValidation("enums", ValidateEnums)
+	}
+	if err := validate.Struct(v); err != nil {
+		return err
+	}
+	return nil
+}
+
+// TagEnum TODO
+const TagEnum = "enums"
+
+// GoValidateStruct v 不能是Ptr
+func GoValidateStruct(v interface{}, enum bool) error {
+	validate := validator.New()
+	uni := ut.New(en.New())
+	trans, _ := uni.GetTranslator("en")
+	// 提示时显示 json 字段的名字
+	validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
+		// name := fld.Tag.Get("json")
+		name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
+		if name == "-" {
+			return ""
+		}
+		return name
+	})
+	if err := en_translations.RegisterDefaultTranslations(validate, trans); err != nil {
+		return err
+	}
+
+	if enum {
+		_ = validate.RegisterValidation(TagEnum, ValidateEnums)
+	}
+	if err := validate.Struct(v); err != nil {
+		return translateErr2Msg(v, trans, err)
+	}
+	return nil
+}
+
+// translateErr2Msg v 不能是Ptr
+func translateErr2Msg(v interface{}, trans ut.Translator, err error) error {
+	var errStr []string
+	for _, vErr := range err.(validator.ValidationErrors) {
+		if vErr.Tag() == TagEnum {
+			errmsg := ""
+			// errmsg := customEnumTransFunc(vErr, v)
+			if vErr.Param() == "" {
+				sf, _ := reflect.TypeOf(v).FieldByName(vErr.StructField())
+				tagValue := sf.Tag.Get(TagEnum)
+				errmsg = fmt.Sprintf("%s must be one of [%s]", vErr.Field(), tagValue)
+			} else {
+				errmsg = vErr.Param()
+			}
+			errStr = append(errStr, errmsg)
+			continue
+		}
+		errStr = append(errStr, vErr.Translate(trans))
+	}
+	return errors.New(strings.Join(errStr, " || "))
+}
+func customEnumTransFunc(fe validator.FieldError, v interface{}) string {
+	if fe.Param() == "" {
+		sf, _ := reflect.TypeOf(v).FieldByName(fe.StructField())
+		tagValue := sf.Tag.Get(TagEnum)
+		errmsg := fmt.Sprintf("%s must be one of [%s]", fe.Field(), tagValue)
+		return errmsg
+	} else {
+		return fe.Param()
+	}
+}
+
+// registerTranslator 为自定义字段添加翻译功能
+func registerTranslator(tag string, msg string) validator.RegisterTranslationsFunc {
+	return func(trans ut.Translator) error {
+		if err := trans.Add(tag, msg, false); err != nil {
+			return err
+		}
+		return nil
+	}
+}
+
+// customTransFunc TODO
+// translate 自定义字段的翻译方法
+func customTransFunc(trans ut.Translator, fe validator.FieldError) string {
+	msg, err := trans.T(fe.Tag(), fe.Field())
+	if err != nil {
+		panic(fe.(error).Error())
+	}
+	return msg
+}
+
+func translate(ut ut.Translator, fe validator.FieldError) string {
+	s, err := ut.T(fe.Tag(), fe.Field(), "fe.Param()")
+	if err != nil {
+		log.Printf("warning: error translating FieldError: %#v", fe)
+		return fe.(error).Error()
+	}
+	return s
+}
diff --git a/dbm-services/common/db-dns/dns-api/.gitignore b/dbm-services/common/db-dns/dns-api/.gitignore
new file mode 100644
index 0000000000..00b067abc1
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/.gitignore
@@ -0,0 +1,4 @@
+.idea
+*.exe
+log
+conf
\ No newline at end of file
diff --git a/dbm-services/common/db-dns/dns-api/Dockerfile b/dbm-services/common/db-dns/dns-api/Dockerfile
new file mode 100644
index 0000000000..fc3f08c023
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/Dockerfile
@@ -0,0 +1,12 @@
+FROM centos:7
+ARG SRV_NAME
+MAINTAINER vincixu vincixu
+
+RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
+RUN echo "Asia/Shanghai" > /etc/timezone
+
+WORKDIR /
+COPY ./$SRV_NAME .
+COPY ./conf ./conf
+
+ENTRYPOINT ["/bk-dnsapi"]
diff --git a/dbm-services/common/db-dns/dns-api/Makefile b/dbm-services/common/db-dns/dns-api/Makefile
new file mode 100644
index 0000000000..708b9a8625
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/Makefile
@@ -0,0 +1,21 @@
+SRV_NAME = bk-dnsapi
+VER = v0.2.0
+CURRENT_VERSION = release-$(VER)
+NAMESPACE = sccmsp
+DH_URL = mirrors.tencent.com
+
+hook:
+	cp ./scripts/git/pre-commit ./.git/hooks/pre-commit && chmod 711 ./.git/hooks/pre-commit
+
+clean:
+	-rm ./$(SRV_NAME)
+
+build:clean
+	CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build  -o $(SRV_NAME) -v ./cmd/$(SRV_NAME)
+
+publish:build
+	docker build --build-arg SRV_NAME=$(SRV_NAME) --rm -t $(SRV_NAME):$(CURRENT_VERSION) .
+	docker tag $(SRV_NAME):$(CURRENT_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+
+.PHONY: init clean build publish gotool clean help api curl
diff --git a/dbm-services/common/db-dns/dns-api/README.md b/dbm-services/common/db-dns/dns-api/README.md
new file mode 100644
index 0000000000..23e5bcc81c
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/README.md
@@ -0,0 +1,19 @@
+# bk-dns-api
+
+## 概述
+该服务用于提供dbm项目中的dns接口调用
+包含以下:
+- 域名的增删改查
+- dns server的查询
+
+
+## 常用命令
+以下命令需要处于项目根目录下执行
+- `go mod download`: 恢复项目依赖,如果已经迁入到 `vendor` 则不需要
+- `make init`: 用于初始化一个项目
+- `make hook`: 安装预置的git钩子,在`commit`时自动`gofmt`代码
+- `make build`: 编译二进制文件
+- `make publish [VER=v0.0.1]`: 编译镜像并发布到镜像仓库, *VER* 如果不指定会使用 *v0.0.1*
+- `go get somerepo[@version]`: 为你的服务添加或者更新某个依赖
+- `go mod -replace=somerepo[@ver]=anotherrepo[@ver] `: 在不修改原依赖的情况替换掉原依赖
+- `go mod tidy`: 清理依赖
diff --git a/dbm-services/common/db-dns/dns-api/cmd/bk-dnsapi/main.go b/dbm-services/common/db-dns/dns-api/cmd/bk-dnsapi/main.go
new file mode 100644
index 0000000000..f5743f330f
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/cmd/bk-dnsapi/main.go
@@ -0,0 +1,58 @@
+package main
+
+import (
+	"bk-dnsapi/internal/dao"
+	"bk-dnsapi/internal/handler/domain"
+	"log"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+	"github.com/spf13/viper"
+)
+
+func main() {
+	engine := gin.Default()
+	RouterGroup(engine)
+	InitConfig("config")
+
+	if err := dao.Init(); err != nil {
+		log.Fatal(err)
+	}
+
+	httpAddr := viper.GetString("http.listenAddress")
+	if err := engine.Run(httpAddr); err != nil {
+		log.Fatal(err)
+	}
+
+	if err := dao.Close(); err != nil {
+		log.Println(err.Error())
+	}
+}
+
+// RouterGroup 注册路由
+func RouterGroup(engine *gin.Engine) {
+	h := domain.Handler{}
+	RegisterRoutes(engine, "/api/v1/dns/domain", h.Routes())
+}
+
+// RegisterRoutes TODO
+func RegisterRoutes(router *gin.Engine, group string, routesInfo []*gin.RouteInfo) {
+	r := router.Group(group)
+	for _, route := range routesInfo {
+		r.Handle(route.Method, route.Path, route.HandlerFunc)
+	}
+}
+
+// InitConfig 初始化配置文件
+func InitConfig(fileName string) {
+	viper.AddConfigPath("conf")
+	viper.SetConfigType("yaml")
+	viper.SetConfigName(fileName)
+	viper.AutomaticEnv() // read in environment variables that match
+	// viper.SetEnvPrefix("ACCOUNT")
+	replacer := strings.NewReplacer(".", "_")
+	viper.SetEnvKeyReplacer(replacer)
+	if err := viper.MergeInConfig(); err != nil {
+		log.Fatal(err)
+	}
+}
diff --git a/dbm-services/common/db-dns/dns-api/docs/.gitkeep b/dbm-services/common/db-dns/dns-api/docs/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/common/db-dns/dns-api/go.mod b/dbm-services/common/db-dns/dns-api/go.mod
new file mode 100644
index 0000000000..406abb9a5d
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/go.mod
@@ -0,0 +1,50 @@
+module bk-dnsapi
+
+go 1.19
+
+replace bk-dnsapi/pkg => ./pkg
+
+require (
+	bk-dnsapi/pkg v0.0.0-20200327131337-b2b67ca8129b
+	github.com/gin-gonic/gin v1.7.0
+	github.com/go-mesh/openlogging v1.0.1
+	github.com/jinzhu/gorm v1.9.10
+	github.com/pkg/errors v0.9.1
+	github.com/spf13/viper v1.15.0
+)
+
+require (
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-playground/locales v0.13.0 // indirect
+	github.com/go-playground/universal-translator v0.17.0 // indirect
+	github.com/go-playground/validator/v10 v10.4.1 // indirect
+	github.com/go-sql-driver/mysql v1.4.1 // indirect
+	github.com/golang/protobuf v1.5.2 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/leodido/go-urn v1.2.0 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mattn/go-isatty v0.0.14 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.6 // indirect
+	github.com/spf13/afero v1.9.3 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	github.com/ugorji/go/codec v1.1.7 // indirect
+	golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect
+	golang.org/x/sys v0.3.0 // indirect
+	golang.org/x/text v0.5.0 // indirect
+	google.golang.org/appengine v1.6.7 // indirect
+	google.golang.org/protobuf v1.28.1 // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v2 v2.2.8 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+replace github.com/emicklei/go-restful => github.com/emicklei/go-restful v2.11.0+incompatible
diff --git a/dbm-services/common/db-dns/dns-api/go.sum b/dbm-services/common/db-dns/dns-api/go.sum
new file mode 100644
index 0000000000..1afab150a2
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/go.sum
@@ -0,0 +1,711 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
+github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
+github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
+github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shonminh/apollo-client v0.2.0/go.mod h1:Jk6K99uIGxQm7Uyy1gCQTvM/kc1YLp4Qo9/jtGkEXvI=
+github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
+github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/cenkalti/backoff v2.0.0+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/coocood/freecache v1.0.1/go.mod h1:ePwxCDzOYvARfHdr1pByNct1at3CoKnsipOHwKlNbzI=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3 h1:tkum0XDgfR0jcVVXuTsYv/erY2NnEDqwRojbxR1rBYA=
+github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
+github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
+github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v2.11.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM=
+github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU=
+github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY=
+github.com/go-chassis/foundation v0.1.1-0.20191113114104-2b05871e9ec4/go.mod h1:21/ajGtgJlWTCeM0TxGJdRhO8bJkKirWyV8Stlh6g6c=
+github.com/go-chassis/go-archaius v1.2.1/go.mod h1:gVP52u/jCU0fgUjXdUW1VLp5YLLJ+Yl2zoOPrLM/WOM=
+github.com/go-chassis/go-chassis v1.8.3/go.mod h1:GTfwh1eXsOgMRFtLM7q8qSbceI+TsrkLL2UAS11Oey8=
+github.com/go-chassis/go-restful-swagger20 v1.0.2/go.mod h1:ZK4hlfS6Q6E46ViezAjn6atrzoteyWl1OBEpUBn/36k=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-mesh/openlogging v1.0.1 h1:6raaXo8SK+wuQX1VoNi6QJCSf1fTOFWh7f5f6b2ZEmY=
+github.com/go-mesh/openlogging v1.0.1/go.mod h1:qaKi+amO+hsGin2q1GmW+/NcbZpMPnTufwrWzDmIuuU=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q=
+github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8=
+github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no=
+github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA=
+github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE=
+github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4=
+github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA=
+github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/jinzhu/gorm v1.9.10 h1:HvrsqdhCW78xpJF67g1hMxS6eCToo9PZH4LDB8WKPac=
+github.com/jinzhu/gorm v1.9.10/go.mod h1:Kh6hTsSGffh4ui079FHrR5Gg+5D0hgihqDcsDN2BBJY=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.0.1 h1:HjfetcXq097iXP0uoPCdnM4Efp5/9MsM0/M+XOTeR3M=
+github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y=
+github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
+github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4=
+github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o=
+github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
+github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
+github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
+github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
+github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
+github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
+github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/ratelimit v0.1.0/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e h1:T8NU3HyQ8ClP4SEE+KbFlg6n0NhuTsN4MyznaarGsZM=
+golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
+gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
+k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
+k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/dbm-services/common/db-dns/dns-api/internal/dao/dao.go b/dbm-services/common/db-dns/dns-api/internal/dao/dao.go
new file mode 100644
index 0000000000..1d7a6e365d
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/dao/dao.go
@@ -0,0 +1,64 @@
+// Package dao TODO
+package dao
+
+import (
+	"bk-dnsapi/internal/domain/entity"
+
+	"github.com/jinzhu/gorm"
+	_ "github.com/jinzhu/gorm/dialects/mysql" // mysql TODO
+	"github.com/pkg/errors"
+	"github.com/spf13/viper"
+)
+
+var (
+	// DnsDB TODO
+	DnsDB *gorm.DB
+)
+
+// Init TODO
+func Init() error {
+	if err := InitDnsDB(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// InitDnsDB TODO
+func InitDnsDB() error {
+	db, err := gorm.Open("mysql", viper.GetString("db.dns_conn"))
+	if err != nil {
+		return errors.Wrap(err, "init config db failed")
+	}
+	// 开启SQL,便于排查
+	db.LogMode(viper.GetBool("debug"))
+
+	// 自动建表
+	// TODO 没建立索引
+	if viper.GetBool("db.auto_migration") {
+		db.Set("gorm:table_options", "ENGINE=InnoDB").
+			Set("gorm:table_options", "CHARSET=utf8").AutoMigrate(
+			&entity.TbDnsBase{},
+			&entity.TbDnsIdcMap{},
+			&entity.TbDnsServer{})
+		// 创建索引
+		db.Table("tb_dns_base").AddIndex("idx_ip_port", "ip", "port")
+		db.Table("tb_dns_base").AddIndex("idx_domain_name_app", "domain_name", "app")
+		db.Table("tb_dns_base").AddIndex("idx_app_manager", "app", "manager")
+		db.Table("tb_dns_base").AddUniqueIndex("uidx_domain_name_ip_port", "domain_name", "ip", "port")
+
+		db.Table("tb_dns_server").AddUniqueIndex("uidx_ip", "ip")
+
+		db.Table("")
+	}
+	DnsDB = db
+	return nil
+}
+
+// Close TODO
+func Close() error {
+	if err := DnsDB.Close(); err != nil {
+		return errors.Wrap(err, "close config db failed")
+	}
+
+	return nil
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/domain/entity/base.go b/dbm-services/common/db-dns/dns-api/internal/domain/entity/base.go
new file mode 100644
index 0000000000..5e43799e74
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/domain/entity/base.go
@@ -0,0 +1,106 @@
+package entity
+
+import (
+	"time"
+)
+
+/*
+	GCS DNS 表相关
+| tb_dns_alias                |			-----   域名别名表
+| tb_dns_base                 |			-----	域名表
+| tb_dns_config               |			-----	域名配置表
+| tb_dns_forward_config       |
+| tb_dns_forward_info         |
+| tb_dns_idc_map              |			-----	域名地区映射表
+| tb_dns_multi                |
+| tb_dns_op_log               |
+| tb_dns_server               |			-----	域名服务相关机器表
+| tb_dns_tencent_idc_svr      |
+| tb_dns_type                 |
+| tb_dns_zone_info            |
+*/
+
+// TbDnsBase 域名表
+type TbDnsBase struct {
+	Uid            int64     `gorm:"column:uid;size:11;primary_key;AUTO_INCREMENT"  json:"uid"`
+	App            string    `gorm:"size:32;column:app" json:"app"`
+	DomainName     string    `gorm:"size:64;column:domain_name" json:"domain_name"`
+	Ip             string    `gorm:"size:20;column:ip" json:"ip"`
+	Port           int       `gorm:"size:11;column:port" json:"port"`
+	StartTime      time.Time `gorm:"column:start_time" json:"start_time"`
+	LastChangeTime time.Time `gorm:"column:last_change_time" json:"last_change_time"`
+	Manager        string    `gorm:"size:32;column:manager" json:"manager"`
+	Remark         string    `gorm:"size:128;column:remark" json:"remark"`
+	DnsStr         string    `gorm:"size:128;column:dns_str" json:"dns_str"`
+	Status         string    `gorm:"size:10;column:status" json:"status"`
+	DomainType     int64     `gorm:"size:11;column:domain_type" json:"domain_type"`
+	BkCloudId      int64     `gorm:"size:32;column:bk_cloud_id" json:"bk_cloud_id"`
+}
+
+// TableName TODO
+func (t *TbDnsBase) TableName() string {
+	return "tb_dns_base"
+}
+
+// Columns TODO
+func (t *TbDnsBase) Columns() []string {
+	return []string{"uid", "app", "domain_name", "ip", "port", "start_time", "last_change_time", "manager", "remark",
+		"dns_str", "status", "domain_type", "bk_cloud_id"}
+}
+
+// TableIndex 索引
+func (t *TbDnsBase) TableIndex() [][]string {
+	return [][]string{
+		[]string{"ip", "port"},
+		[]string{"domain_name", "app"},
+		[]string{"app", "manager"},
+	}
+}
+
+// TableUnique 唯一索引
+func (t *TbDnsBase) TableUnique() [][]string {
+	return [][]string{
+		[]string{"domain_name", "ip", "port"},
+	}
+}
+
+// TbDnsServer 服务器表
+type TbDnsServer struct {
+	Uid            int64     `gorm:"column:uid;size:11;primary_key;AUTO_INCREMENT"  json:"uid"`
+	Ip             string    `gorm:"column:ip;size:20" json:"ip"`
+	ForwardIp      string    `gorm:"column:forward_ip;size:100" json:"forward_ip"`
+	Idc            string    `gorm:"column:idc;size:64" json:"idc"`
+	StartTime      time.Time `gorm:"column:start_time" json:"start_time"`
+	LastConfigTime time.Time `gorm:"column:last_config_time" json:"last_config_time"`
+	LastAlived     time.Time `gorm:"column:last_alived" json:"last_alived"`
+	Remark         string    `gorm:"column:remark;size:128" json:"remark"`
+	UpdateCounter  int64     `gorm:"column:update_counter;size:20" json:"update_counter"`
+	Type           string    `gorm:"column:type;size:64" json:"type" form:"type"`
+	Status         int64     `gorm:"column:status;size:11" json:"status" form:"status"`
+	BkCloudId      int64     `gorm:"size:32;column:bk_cloud_id" json:"bk_cloud_id"`
+}
+
+// TableName TODO
+func (t *TbDnsServer) TableName() string {
+	return "tb_dns_server"
+}
+
+// Columns TODO
+func (t *TbDnsServer) Columns() []string {
+	return []string{"uid", "ip", "forward_ip", "idc", "start_time", "last_config_time", "last_alived", "remark",
+		"update_counter", "type", "status"}
+}
+
+// TbDnsIdcMap 地区映射表
+type TbDnsIdcMap struct {
+	Uid       int64  `gorm:"column:uid;size:11;primary_key;AUTO_INCREMENT"  json:"uid"`
+	Oidc      string `gorm:"column:oidc;size:64" json:"oidc"`
+	Nidc      string `gorm:"column:nidc;size:64" json:"nidc"`
+	Status    int64  `gorm:"column:status;size:11" json:"status" form:"status"`
+	BkCloudId int64  `gorm:"size:32;column:bk_cloud_id" json:"bk_cloud_id"`
+}
+
+// TableName TODO
+func (t *TbDnsIdcMap) TableName() string {
+	return "tb_dns_idc_map"
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/domain/entity/entity.go b/dbm-services/common/db-dns/dns-api/internal/domain/entity/entity.go
new file mode 100644
index 0000000000..b24d64cfff
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/domain/entity/entity.go
@@ -0,0 +1,2 @@
+// Package entity TODO
+package entity
diff --git a/dbm-services/common/db-dns/dns-api/internal/domain/entity/error.go b/dbm-services/common/db-dns/dns-api/internal/domain/entity/error.go
new file mode 100644
index 0000000000..6e5fe3a12b
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/domain/entity/error.go
@@ -0,0 +1,15 @@
+package entity
+
+import "strings"
+
+// IsNoRowFoundError TODO
+func IsNoRowFoundError(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	if strings.Contains(err.Error(), "no row found") {
+		return true
+	}
+	return false
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/base.go b/dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/base.go
new file mode 100644
index 0000000000..7e43d27386
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/base.go
@@ -0,0 +1,189 @@
+package domain
+
+import (
+	"bk-dnsapi/internal/dao"
+	"bk-dnsapi/internal/domain/entity"
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/go-mesh/openlogging"
+)
+
+// DnsDomainBaseRepo TODO
+type DnsDomainBaseRepo interface {
+	Get(map[string]interface{}, []string) ([]interface{}, error)
+	Insert(d []*entity.TbDnsBase) (num int64, err error)
+	Delete(tableName, app, domainName string, bkCloudId int64, ins []string) (rowsAffected int64, err error)
+	Update(d *entity.TbDnsBase, newIP string, newPort int) (rowsAffected int64, err error)
+	UpdateDomainBatch(bs []UpdateBatchDnsBase) (rowsAffected int64, err error)
+}
+
+// DnsDomainBaseImpl TODO
+type DnsDomainBaseImpl struct {
+}
+
+// UpdateBatchDnsBase TODO
+type UpdateBatchDnsBase struct {
+	App        string
+	DomainName string
+	OIp        string
+	OPort      int
+	NIp        string
+	NPort      int
+	BkCloudId  int64
+}
+
+// DnsDomainResource TODO
+func DnsDomainResource() DnsDomainBaseRepo {
+	return &DnsDomainBaseImpl{}
+}
+
+// Get TODO
+func (base *DnsDomainBaseImpl) Get(query map[string]interface{}, fields []string) (
+	[]interface{}, error) {
+	rs := []interface{}{}
+	var err error
+	where := "1 = 1"
+	for k, v := range query {
+		if k == "ins" || k == "ip" {
+			continue
+		}
+		switch v.(type) {
+		case []string:
+			if len(v.([]string)) != 0 {
+				t := ""
+				for _, tv := range v.([]string) {
+					t = fmt.Sprintf("%s,'%s'", t, tv)
+				}
+				t = strings.Trim(t, ",")
+				where = fmt.Sprintf("%s and %s in (%s)", where, k, t)
+			}
+		case string:
+			where = fmt.Sprintf("%s and %s = '%s' ", where, k, v)
+		default:
+			continue
+		}
+	}
+	insStr := "''"
+	ipStr := "''"
+	if ins, _ok := query["ins"]; _ok {
+		insStr = "'" + strings.Join(ins.([]string), "','") + "'"
+	}
+	if ip, _ok := query["ip"]; _ok {
+		ipStr = "'" + strings.Join(ip.([]string), "','") + "'"
+	}
+	if insStr != "''" || ipStr != "''" {
+		where = fmt.Sprintf("%s and (ip in (%s) or concat(ip,'#',port) in (%s))", where, ipStr, insStr)
+	}
+
+	q := fmt.Sprintf("select * from %s where %s", new(entity.TbDnsBase).TableName(), where)
+	openlogging.Info(fmt.Sprintf("query sql is [%+v]", q))
+	var l []entity.TbDnsBase
+	if err := dao.DnsDB.Raw(q).Scan(&l).Error; err == nil || entity.IsNoRowFoundError(err) {
+		// rs = append(rs, l)
+		if len(fields) == 0 {
+			for _, v := range l {
+				rs = append(rs, v)
+			}
+		} else {
+			// trim unused fields
+			for _, v := range l {
+				m := make(map[string]interface{})
+				val := reflect.ValueOf(v)
+				s := reflect.TypeOf(&v).Elem()
+				for _, fname := range fields {
+					for i := 0; i < s.NumField(); i++ {
+						if s.Field(i).Tag.Get("json") == fname {
+							m[fname] = val.FieldByName(s.Field(i).Name).Interface()
+						}
+					}
+				}
+				rs = append(rs, m)
+			}
+		}
+		return rs, nil
+	}
+
+	return rs, err
+}
+
+// Insert TODO
+func (base *DnsDomainBaseImpl) Insert(dnsList []*entity.TbDnsBase) (num int64, err error) {
+	tx := dao.DnsDB.Begin()
+	for _, l := range dnsList {
+		r := tx.Create(&l)
+		if r.Error != nil {
+			tx.Rollback()
+			return 0, r.Error
+		}
+		num += r.RowsAffected
+	}
+	if err = tx.Commit().Error; err != nil {
+		return 0, err
+	}
+	return
+}
+
+// Delete TODO
+func (base *DnsDomainBaseImpl) Delete(tableName, app, domainName string, bkCloudId int64,
+	ins []string) (rowsAffected int64, err error) {
+	execSql := fmt.Sprintf("delete from %s where  app = '%s' and bk_cloud_id = '%d'",
+		tableName, app, bkCloudId)
+	if domainName != "" {
+		execSql = fmt.Sprintf("%s and domain_name = '%s'", execSql, domainName)
+	}
+	if len(ins) != 0 {
+		insStr := "''"
+		ipStr := "''"
+		for _, i := range ins {
+			if strings.HasSuffix(i, "#0") {
+				ip := strings.Split(i, "#")[0]
+				ipStr = fmt.Sprintf("%s,'%s'", ipStr, ip)
+			} else {
+				insStr = fmt.Sprintf("%s,'%s'", insStr, i)
+			}
+
+		}
+		insStr = strings.Trim(insStr, ",")
+		ipStr = strings.Trim(ipStr, ",")
+		execSql = fmt.Sprintf("%s and  (concat(ip,'#',port) in (%s) or ip in (%s))",
+			execSql, insStr, ipStr)
+	} else {
+		execSql = fmt.Sprintf("delete from %s where  domain_name = '%s' and app = '%s' and bk_cloud_id ='%d'",
+			tableName, domainName, app, bkCloudId)
+	}
+	r := dao.DnsDB.Exec(execSql)
+
+	if r.Error != nil {
+		return 0, r.Error
+	}
+	return r.RowsAffected, nil
+}
+
+// Update TODO
+func (base *DnsDomainBaseImpl) Update(d *entity.TbDnsBase, newIP string, newPort int) (rowsAffected int64, err error) {
+	r := dao.DnsDB.Model(d).Update(map[string]interface{}{"ip": newIP, "port": newPort})
+	return r.RowsAffected, r.Error
+}
+
+// UpdateDomainBatch TODO
+func (base *DnsDomainBaseImpl) UpdateDomainBatch(bs []UpdateBatchDnsBase) (rowsAffected int64, err error) {
+	rowsAffected = 0
+	tx := dao.DnsDB.Begin()
+
+	for _, b := range bs {
+		r := tx.Model(&entity.TbDnsBase{}).Where("app = ? and bk_cloud_id = ?", b.App, b.BkCloudId).
+			Where("domain_name = ? and ip = ? and port = ?", b.DomainName, b.OIp, b.OPort).
+			Update(map[string]interface{}{"ip": b.NIp, "port": b.NPort})
+		if r.Error != nil {
+			tx.Rollback()
+			return 0, r.Error
+		}
+		rowsAffected += r.RowsAffected
+	}
+	if err = tx.Commit().Error; err != nil {
+		return 0, err
+	}
+	return
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/domain.go b/dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/domain.go
new file mode 100644
index 0000000000..4224839f12
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/domain/repo/domain/domain.go
@@ -0,0 +1,2 @@
+// Package domain TODO
+package domain
diff --git a/dbm-services/common/db-dns/dns-api/internal/domain/service/.gitkeep b/dbm-services/common/db-dns/dns-api/internal/domain/service/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/common/db-dns/dns-api/internal/handler/domain/base.go b/dbm-services/common/db-dns/dns-api/internal/handler/domain/base.go
new file mode 100644
index 0000000000..564a501e7f
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/handler/domain/base.go
@@ -0,0 +1,51 @@
+package domain
+
+import (
+	"bk-dnsapi/pkg/errno"
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+// Handler TODO
+type Handler struct {
+}
+
+// Routes TODO
+func (h *Handler) Routes() []*gin.RouteInfo {
+	return []*gin.RouteInfo{
+		{Method: http.MethodPut, Path: "/", HandlerFunc: h.AddDns},
+
+		{Method: http.MethodDelete, Path: "//", HandlerFunc: h.DelDns},
+
+		{Method: http.MethodPost, Path: "/", HandlerFunc: h.UpdateDns},
+		{Method: http.MethodPost, Path: "/batch", HandlerFunc: h.UpdateBatchDns},
+
+		{Method: http.MethodGet, Path: "/", HandlerFunc: h.GetDns},
+		{Method: http.MethodGet, Path: "/all", HandlerFunc: h.GetAllDns},
+	}
+}
+
+// Response TODO
+type Response struct {
+	Code    int    `json:"code"`
+	Message string `json:"message"`
+	Data    Data   `json:"data"`
+}
+
+// Data TODO
+type Data struct {
+	Detail  interface{} `json:"detail"`
+	RowsNum int64       `json:"rowsNum"`
+}
+
+// SendResponse TODO
+func SendResponse(c *gin.Context, err error, data Data) {
+	code, message := errno.DecodeErr(err)
+
+	c.JSON(http.StatusOK, Response{
+		Code:    code,
+		Message: message,
+		Data:    data,
+	})
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/handler/domain/delete.go b/dbm-services/common/db-dns/dns-api/internal/handler/domain/delete.go
new file mode 100644
index 0000000000..b5890965f8
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/handler/domain/delete.go
@@ -0,0 +1,103 @@
+package domain
+
+import (
+	"bk-dnsapi/internal/domain/entity"
+	"bk-dnsapi/internal/domain/repo/domain"
+	"bk-dnsapi/pkg/tools"
+	"fmt"
+	"runtime/debug"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+	"github.com/go-mesh/openlogging"
+)
+
+// DnsBaseDelReqParam TODO
+type DnsBaseDelReqParam struct {
+	// Appid 	int64		`json:"appid"`
+	App       string `json:"app,required"`
+	BkCloudId int64  `json:"bk_cloud_id"`
+	Domains   []struct {
+		DomainName string   `json:"domain_name"`
+		Instances  []string `json:"instances,required"`
+	} `json:"domains"`
+}
+
+// DelDns TODO
+func (h *Handler) DelDns(c *gin.Context) {
+	defer func() {
+		if r := recover(); r != nil {
+			openlogging.Error(fmt.Sprintf("panic error:%v,stack:%s", r, string(debug.Stack())))
+			SendResponse(c,
+				fmt.Errorf("panic error:%v", r),
+				Data{})
+		}
+	}()
+
+	var delParam DnsBaseDelReqParam
+	err := c.BindJSON(&delParam)
+	if err != nil {
+		SendResponse(c, err, Data{})
+		return
+	}
+
+	if delParam.App == "" || len(delParam.Domains) == 0 {
+		SendResponse(c,
+			fmt.Errorf("param must have  [domain_name and app]"),
+			Data{})
+		return
+	}
+
+	var errMsg string
+	var rowsAffected int64
+	var domainList []string
+	ipsList := [][]string{}
+
+	dnsBase := &entity.TbDnsBase{}
+	for i := 0; i < len(delParam.Domains); i++ {
+		domain := delParam.Domains[i]
+		if domain.DomainName != "" {
+			if domain.DomainName, err = tools.CheckDomain(domain.DomainName); err != nil {
+				errMsg += err.Error() + "\r\n"
+				continue
+			}
+		} else {
+			// 不允许域名和实例同时为空
+			if len(domain.Instances) == 0 {
+				errMsg += "domain_name and instances is empty" + "\r\n"
+				continue
+			}
+		}
+		var ips []string
+		for j := 0; j < len(domain.Instances); j++ {
+			ins := strings.TrimSpace(domain.Instances[j])
+			if !strings.Contains(ins, "#") {
+				ins += "#0"
+			}
+			_, _, err := tools.GetIpPortByIns(ins)
+			if err != nil {
+				errMsg += err.Error() + "\r\n"
+				continue
+			}
+			ips = append(ips, ins)
+		}
+
+		domainList = append(domainList, domain.DomainName)
+		ipsList = append(ipsList, ips)
+	}
+	if errMsg != "" {
+		SendResponse(c, err, Data{})
+		return
+	}
+
+	for i := 0; i < len(domainList); i++ {
+		rowsNum, _ := domain.DnsDomainResource().Delete(dnsBase.TableName(), delParam.App,
+			domainList[i], delParam.BkCloudId, ipsList[i])
+		rowsAffected += rowsNum
+	}
+
+	SendResponse(c, nil, Data{
+		Detail:  nil,
+		RowsNum: rowsAffected,
+	})
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/handler/domain/domain.go b/dbm-services/common/db-dns/dns-api/internal/handler/domain/domain.go
new file mode 100644
index 0000000000..4224839f12
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/handler/domain/domain.go
@@ -0,0 +1,2 @@
+// Package domain TODO
+package domain
diff --git a/dbm-services/common/db-dns/dns-api/internal/handler/domain/insert.go b/dbm-services/common/db-dns/dns-api/internal/handler/domain/insert.go
new file mode 100644
index 0000000000..d7857a1585
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/handler/domain/insert.go
@@ -0,0 +1,104 @@
+package domain
+
+import (
+	"bk-dnsapi/internal/domain/entity"
+	"bk-dnsapi/internal/domain/repo/domain"
+	"bk-dnsapi/pkg/tools"
+	"encoding/json"
+	"fmt"
+	"runtime/debug"
+	"strings"
+	"time"
+
+	"github.com/gin-gonic/gin"
+	"github.com/go-mesh/openlogging"
+)
+
+// DnsBasePutReqParam TODO
+type DnsBasePutReqParam struct {
+	// Appid 	int64		`json:"appid"`
+	App       string `json:"app,required"`
+	BkCloudId int64  `json:"bk_cloud_id"`
+	Domains   []struct {
+		DomainName string   `json:"domain_name"`
+		Instances  []string `json:"instances,required"`
+		Manager    string   `json:"manager"`
+		Remark     string   `json:"remark"`
+		DomainType string   `json:"domain_type"`
+	} `json:"domains"`
+}
+
+// AddDns TODO
+func (h *Handler) AddDns(c *gin.Context) {
+	defer func() {
+		if r := recover(); r != nil {
+			openlogging.Error(fmt.Sprintf("panic error:%v,stack:%s", r, string(debug.Stack())))
+			SendResponse(c,
+				fmt.Errorf("panic error:%v", r),
+				Data{})
+		}
+	}()
+
+	var addParam DnsBasePutReqParam
+	err := c.BindJSON(&addParam)
+	if err != nil {
+		SendResponse(c, err, Data{})
+		return
+	}
+
+	openlogging.Info(fmt.Sprintf("add dns begin, param [%+v]", addParam))
+
+	// TODO check
+	// check app exists
+	var errMsg string
+	var dnsBaseList []*entity.TbDnsBase
+	for i := 0; i < len(addParam.Domains); i++ {
+		domain := addParam.Domains[i]
+		if domain.DomainName, err = tools.CheckDomain(domain.DomainName); err != nil {
+			errMsg += err.Error() + "\r\n"
+			continue
+		}
+		for j := 0; j < len(domain.Instances); j++ {
+			ins := strings.TrimSpace(domain.Instances[j])
+			// 支持ip格式,默认端口为0
+			if !strings.Contains(ins, "#") {
+				ins += "#0"
+			}
+			ip, port, err := tools.GetIpPortByIns(ins)
+			if err != nil {
+				errMsg += err.Error() + "\r\n"
+				continue
+			}
+			// ip, _ = tools.CheckIp(ip)
+			if domain.Manager == "" {
+				domain.Manager = "DBAManager"
+			}
+
+			t := &entity.TbDnsBase{
+				Uid:            0,
+				App:            addParam.App,
+				DomainName:     domain.DomainName,
+				Ip:             ip,
+				Port:           port,
+				StartTime:      time.Now(),
+				LastChangeTime: time.Now(),
+				Manager:        domain.Manager,
+				Remark:         domain.Remark,
+				Status:         "1",
+				BkCloudId:      addParam.BkCloudId,
+			}
+
+			dnsBaseList = append(dnsBaseList, t)
+		}
+	}
+
+	if errMsg != "" {
+		SendResponse(c, fmt.Errorf(errMsg), Data{})
+		return
+	}
+	info, _ := json.Marshal(dnsBaseList)
+	openlogging.Info(fmt.Sprintf("add insert begin exec, param [%+v]", string(info)))
+
+	rowsAffected, err := domain.DnsDomainResource().Insert(dnsBaseList)
+	SendResponse(c, err, Data{RowsNum: rowsAffected})
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/handler/domain/query.go b/dbm-services/common/db-dns/dns-api/internal/handler/domain/query.go
new file mode 100644
index 0000000000..7612851a8e
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/handler/domain/query.go
@@ -0,0 +1,127 @@
+package domain
+
+import (
+	"bk-dnsapi/internal/domain/entity"
+	"bk-dnsapi/internal/domain/repo/domain"
+	"bk-dnsapi/pkg/tools"
+	"fmt"
+	"runtime/debug"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+	"github.com/go-mesh/openlogging"
+)
+
+// GetDns 查询dns记录
+func (h *Handler) GetDns(c *gin.Context) {
+	defer func() {
+		if r := recover(); r != nil {
+			openlogging.Error(fmt.Sprintf("panic error:%v,stack:%s", r, string(debug.Stack())))
+			SendResponse(c,
+				fmt.Errorf("panic error:%v", r),
+				Data{})
+		}
+	}()
+
+	openlogging.Info("get dns query begin")
+	app := tools.TransZeroStrings(c.QueryArray("app"))
+	insList := tools.TransZeroStrings(c.QueryArray("ip"))
+	domainName := tools.TransZeroStrings(c.QueryArray("domain_name"))
+	bkCloudId := tools.TransZeroString(c.Query("bk_cloud_id"))
+	columns := tools.TransZeroStrings(c.QueryArray("columns"))
+
+	// 初步检查
+	if len(insList) == 0 && len(domainName) == 0 {
+		SendResponse(c, fmt.Errorf("param must have one of [domain_name|ip]"), Data{})
+		return
+	}
+
+	params := make(map[string]interface{})
+	for i, d := range domainName {
+		if !strings.HasSuffix(d, ".") {
+			d += "."
+			domainName[i] = d
+		}
+	}
+
+	var ins []string
+	var ip []string
+	var errMsg string
+	for _, t := range insList {
+		// ip#port
+		if strings.Contains(t, "#") {
+			if tt, err := tools.CheckInstance(t); err != nil {
+				errMsg += err.Error() + "\r\n"
+				continue
+			} else {
+				ins = append(ins, strings.TrimSpace(tt))
+			}
+			//	ip
+		} else {
+			if tt, err := tools.CheckIp(t); err != nil {
+				errMsg += err.Error() + "\r\n"
+				continue
+			} else {
+				ip = append(ip, strings.TrimSpace(tt))
+			}
+		}
+	}
+	if errMsg != "" {
+		SendResponse(c, fmt.Errorf(errMsg), Data{})
+		return
+	}
+
+	params["domain_name"] = domainName
+	params["app"] = app
+	params["bk_cloud_id"] = bkCloudId
+	if len(ins) != 0 {
+		params["ins"] = ins
+	}
+	if len(ip) != 0 {
+		params["ip"] = ip
+	}
+
+	if len(columns) == 0 {
+		columns = new(entity.TbDnsBase).Columns()
+	}
+	openlogging.Info(fmt.Sprintf("query exec. params[%+v], columns[%+v]", params, columns))
+	rs, err := domain.DnsDomainResource().Get(params, columns)
+	if err != nil {
+		SendResponse(c, err, Data{})
+		return
+	}
+
+	SendResponse(c, nil, Data{
+		Detail:  rs,
+		RowsNum: int64(len(rs)),
+	})
+	return
+}
+
+// GetAllDns 查询所有域名。共reload程序用
+func (h *Handler) GetAllDns(c *gin.Context) {
+	defer func() {
+		if r := recover(); r != nil {
+			openlogging.Error(fmt.Sprintf("panic error:%v,stack:%s", r, string(debug.Stack())))
+			SendResponse(c,
+				fmt.Errorf("panic error:%v", r),
+				Data{})
+		}
+	}()
+
+	bkCloudId := tools.TransZeroString(c.Query("bk_cloud_id"))
+	columns := []string{"ip", "domain_name"}
+	openlogging.Info(fmt.Sprintf("get all dns  query begin. bk_cloud_id is %v", bkCloudId))
+
+	params := make(map[string]interface{})
+	params["bk_cloud_id"] = bkCloudId
+	rs, err := domain.DnsDomainResource().Get(params, columns)
+	if err != nil {
+		SendResponse(c, err, Data{})
+		return
+	}
+	SendResponse(c, nil, Data{rs, int64(len(rs))})
+
+	return
+
+}
diff --git a/dbm-services/common/db-dns/dns-api/internal/handler/domain/update.go b/dbm-services/common/db-dns/dns-api/internal/handler/domain/update.go
new file mode 100644
index 0000000000..03dcd6c7c9
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/internal/handler/domain/update.go
@@ -0,0 +1,156 @@
+package domain
+
+import (
+	"bk-dnsapi/internal/domain/repo/domain"
+	"bk-dnsapi/pkg/tools"
+	"fmt"
+	"runtime/debug"
+
+	"github.com/gin-gonic/gin"
+	"github.com/go-mesh/openlogging"
+)
+
+// DnsBasePostReqParam TODO
+type DnsBasePostReqParam struct {
+	App        string `json:"app,required"`
+	BkCloudId  int64  `json:"bk_cloud_id"`
+	Instance   string `json:"instance,required"`
+	DomainName string `json:"domain_name,required"`
+	Set        struct {
+		Instance string `json:"instance,required"`
+	} `json:"set,required"`
+}
+
+// DnsBaseBatchPostReqParam TODO
+type DnsBaseBatchPostReqParam struct {
+	App        string `json:"app,required"`
+	DomainName string `json:"domain_name,required"`
+	BkCloudId  int64  `json:"bk_cloud_id"`
+	Sets       []struct {
+		OldInstance string `json:"old_instance,required"`
+		NewInstance string `json:"new_instance,required"`
+	} `json:"sets,required"`
+}
+
+// UpdateDns TODO
+func (h *Handler) UpdateDns(c *gin.Context) {
+	defer func() {
+		if r := recover(); r != nil {
+			openlogging.Error(fmt.Sprintf("panic error:%v,stack:%s", r, string(debug.Stack())))
+			SendResponse(c,
+				fmt.Errorf("panic error:%v", r),
+				Data{})
+		}
+	}()
+
+	var updateParam DnsBasePostReqParam
+	err := c.BindJSON(&updateParam)
+	if err != nil {
+		SendResponse(c, err, Data{})
+		return
+	}
+	// TODO check
+	// check app exists、 domain_name、 instance format
+	var errMsg string
+	var ip, newIp string
+	var port, newPort int
+
+	if updateParam.DomainName, err = tools.CheckDomain(updateParam.DomainName); err != nil {
+		errMsg += err.Error() + "\r\n"
+	}
+	if ip, port, err = tools.GetIpPortByIns(updateParam.Instance); err != nil {
+		errMsg += err.Error() + "\r\n"
+	}
+	if newIp, newPort, err = tools.GetIpPortByIns(updateParam.Set.Instance); err != nil {
+		errMsg += err.Error() + "\r\n"
+	}
+
+	// if ip,err = models.CheckIp(ip); err != nil{
+	//	errMsg += err.Error() + "\r\n"
+	// }
+	// if newIp,err = models.CheckIp(newIp); err != nil{
+	//	errMsg += err.Error() + "\r\n"
+	// }
+
+	if errMsg != "" {
+		SendResponse(c, fmt.Errorf(errMsg), Data{})
+		return
+	}
+
+	var batchDnsBases []domain.UpdateBatchDnsBase
+	batchDnsBases = append(batchDnsBases, struct {
+		App        string
+		DomainName string
+		OIp        string
+		OPort      int
+		NIp        string
+		NPort      int
+		BkCloudId  int64
+	}{App: updateParam.App, DomainName: updateParam.DomainName, OIp: ip,
+		OPort: port, NIp: newIp, NPort: newPort, BkCloudId: updateParam.BkCloudId})
+
+	rowsAffected, err := domain.DnsDomainResource().UpdateDomainBatch(batchDnsBases)
+
+	SendResponse(c, err, Data{
+		Detail:  nil,
+		RowsNum: rowsAffected,
+	})
+}
+
+// UpdateBatchDns TODO
+func (h *Handler) UpdateBatchDns(c *gin.Context) {
+	defer func() {
+		if r := recover(); r != nil {
+			openlogging.Error(fmt.Sprintf("panic error:%v,stack:%s", r, string(debug.Stack())))
+			SendResponse(c,
+				fmt.Errorf("panic error:%v", r),
+				Data{})
+		}
+	}()
+
+	var updateParam DnsBaseBatchPostReqParam
+	err := c.BindJSON(&updateParam)
+	if err != nil {
+		SendResponse(c, err, Data{})
+		return
+	}
+	// TODO check
+	// check app exists、 domain_name、 instance format
+	var errMsg string
+	var ip, newIp string
+	var port, newPort int
+	var batchDnsBases []domain.UpdateBatchDnsBase
+
+	if updateParam.DomainName, err = tools.CheckDomain(updateParam.DomainName); err != nil {
+		errMsg += err.Error() + "\r\n"
+	}
+	for _, s := range updateParam.Sets {
+		if ip, port, err = tools.GetIpPortByIns(s.OldInstance); err != nil {
+			errMsg += err.Error() + "\r\n"
+		}
+		if newIp, newPort, err = tools.GetIpPortByIns(s.NewInstance); err != nil {
+			errMsg += err.Error() + "\r\n"
+		}
+
+		batchDnsBases = append(batchDnsBases, struct {
+			App        string
+			DomainName string
+			OIp        string
+			OPort      int
+			NIp        string
+			NPort      int
+			BkCloudId  int64
+		}{App: updateParam.App, DomainName: updateParam.DomainName, OIp: ip,
+			OPort: port, NIp: newIp, NPort: newPort, BkCloudId: updateParam.BkCloudId})
+	}
+	if errMsg != "" {
+		SendResponse(c, fmt.Errorf(errMsg), Data{})
+		return
+	}
+
+	rowsAffected, err := domain.DnsDomainResource().UpdateDomainBatch(batchDnsBases)
+	SendResponse(c, err, Data{
+		Detail:  nil,
+		RowsNum: rowsAffected,
+	})
+}
diff --git a/dbm-services/common/db-dns/dns-api/pkg/README.md b/dbm-services/common/db-dns/dns-api/pkg/README.md
new file mode 100644
index 0000000000..45fc90328a
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/pkg/README.md
@@ -0,0 +1,8 @@
+# pkg目录注意点
+由于此目录是提供其他服务使用,为了避免多余的依赖,此目录独立为子模块(SubModule)。
+放入此目录的代码请切引用项目内的其他代码,保持独立。
+
+如果需要给该子模块打 `Tga`, 可以使用
+```
+git tag pkg/v1.0.0
+```
\ No newline at end of file
diff --git a/dbm-services/common/db-dns/dns-api/pkg/errno/code.go b/dbm-services/common/db-dns/dns-api/pkg/errno/code.go
new file mode 100755
index 0000000000..d3567d5db2
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/pkg/errno/code.go
@@ -0,0 +1,32 @@
+package errno
+
+var (
+	// OK TODO
+	// Common errors
+	OK = &Errno{Code: 0, Message: "OK"}
+	// InternalServerError TODO
+	InternalServerError = &Errno{Code: 10001, Message: "Internal server error"}
+	// ErrBind TODO
+	ErrBind = &Errno{Code: 10002, Message: "Error occurred while binding the request body to the struct."}
+
+	// ErrValidation TODO
+	ErrValidation = &Errno{Code: 20001, Message: "Validation failed."}
+	// ErrDatabase TODO
+	ErrDatabase = &Errno{Code: 20002, Message: "Database error."}
+	// ErrToken TODO
+	ErrToken = &Errno{Code: 20003, Message: "Error occurred while signing the JSON web token."}
+
+	// ErrEncrypt TODO
+	// user errors
+	ErrEncrypt = &Errno{Code: 20101, Message: "Error occurred while encrypting the user password."}
+	// ErrUserNotFound TODO
+	ErrUserNotFound = &Errno{Code: 20102, Message: "The user was not found."}
+	// ErrTokenInvalid TODO
+	ErrTokenInvalid = &Errno{Code: 20103, Message: "The token was invalid."}
+	// ErrPasswordIncorrect TODO
+	ErrPasswordIncorrect = &Errno{Code: 20104, Message: "The password was incorrect."}
+
+	// ErrRoleNotFound TODO
+	// role errors
+	ErrRoleNotFound = &Err{Code: 30000, Message: "The role was not found."}
+)
diff --git a/dbm-services/common/db-dns/dns-api/pkg/errno/errno.go b/dbm-services/common/db-dns/dns-api/pkg/errno/errno.go
new file mode 100755
index 0000000000..09963bb2d8
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/pkg/errno/errno.go
@@ -0,0 +1,67 @@
+// Package errno TODO
+package errno
+
+import "fmt"
+
+// Errno TODO
+type Errno struct {
+	Code    int
+	Message string
+}
+
+// Error 用于错误处理
+func (err Errno) Error() string {
+	return err.Message
+}
+
+// Err represents an error
+type Err struct {
+	Code    int
+	Message string
+	Err     error
+}
+
+// New TODO
+func New(errno *Errno, err error) *Err {
+	return &Err{Code: errno.Code, Message: errno.Message, Err: err}
+}
+
+// Add TODO
+func (err *Err) Add(message string) error {
+	err.Message += " " + message
+	return err
+}
+
+// Addf TODO
+func (err *Err) Addf(format string, args ...interface{}) error {
+	err.Message += " " + fmt.Sprintf(format, args...)
+	return err
+}
+
+// Error 用于错误处理
+func (err *Err) Error() string {
+	return fmt.Sprintf("Err - code: %d, message: %s, error: %s", err.Code, err.Message, err.Err)
+}
+
+// IsErrUserNotFound TODO
+func IsErrUserNotFound(err error) bool {
+	code, _ := DecodeErr(err)
+	return code == ErrUserNotFound.Code
+}
+
+// DecodeErr TODO
+func DecodeErr(err error) (int, string) {
+	if err == nil {
+		return OK.Code, OK.Message
+	}
+
+	switch typed := err.(type) {
+	case *Err:
+		return typed.Code, typed.Message
+	case *Errno:
+		return typed.Code, typed.Message
+	default:
+	}
+
+	return InternalServerError.Code, err.Error()
+}
diff --git a/dbm-services/common/db-dns/dns-api/pkg/go.mod b/dbm-services/common/db-dns/dns-api/pkg/go.mod
new file mode 100644
index 0000000000..99d9fe65f1
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/pkg/go.mod
@@ -0,0 +1,3 @@
+module bk-dnsapi/pkg
+
+go 1.19
diff --git a/dbm-services/common/db-dns/dns-api/pkg/go.sum b/dbm-services/common/db-dns/dns-api/pkg/go.sum
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/common/db-dns/dns-api/pkg/tools/tools.go b/dbm-services/common/db-dns/dns-api/pkg/tools/tools.go
new file mode 100644
index 0000000000..851174ec7c
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/pkg/tools/tools.go
@@ -0,0 +1,41 @@
+// Package tools TODO
+package tools
+
+import (
+	"errors"
+	"strconv"
+)
+
+// ChangeValueArrayToString TODO
+func ChangeValueArrayToString(value []interface{}) ([]string, error) {
+	results := []string{}
+	for _, item := range value {
+		result, err := ChangeValueToString(item)
+		if err != nil {
+			return results, err
+		}
+		results = append(results, result)
+	}
+
+	return results, nil
+}
+
+// ChangeValueToString TODO
+func ChangeValueToString(value interface{}) (string, error) {
+
+	var result string
+	if item, ok := value.(string); ok {
+		result = item
+	} else if item1, ok := value.(int); ok {
+		result = strconv.Itoa(item1)
+	} else if item2, ok := value.(int64); ok {
+		result = strconv.FormatInt(item2, 10)
+	} else if item3, ok := value.(float64); ok {
+		result = strconv.FormatFloat(item3, 'f', -1, 64)
+	} else if item4, ok := value.(bool); ok {
+		result = strconv.FormatBool(item4)
+	} else {
+		return result, errors.New("[ChangeValueToString]value type unknow,not in (string,int,int64,float64,bool)")
+	}
+	return result, nil
+}
diff --git a/dbm-services/common/db-dns/dns-api/pkg/tools/util.go b/dbm-services/common/db-dns/dns-api/pkg/tools/util.go
new file mode 100644
index 0000000000..16d041ec94
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/pkg/tools/util.go
@@ -0,0 +1,97 @@
+package tools
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// CheckDomain TODO
+func CheckDomain(domain string) (string, error) {
+	domain = strings.TrimSpace(domain)
+	if !strings.HasSuffix(domain, ".") {
+		domain = domain + "."
+	}
+
+	var pattern string = `^[a-zA-Z0-9][-a-zA-Z0-9]{0,62}(\.[a-zA-Z0-9][-a-zA-Z0-9]{0,62}){2,8}\.*(#(\d+))?$`
+	if idDns, err := regexp.MatchString(pattern, domain); err != nil {
+		return "", err
+	} else {
+		if idDns {
+			return domain, nil
+		} else {
+			return "", fmt.Errorf("domain_name[%s] format error", domain)
+		}
+	}
+}
+
+// CheckIp TODO
+func CheckIp(ip string) (string, error) {
+	ip = strings.TrimSpace(ip)
+	var pattern string = `^(\d+)\.(\d+)\.(\d+)\.(\d+)$`
+	if isIp, err := regexp.MatchString(pattern, ip); err != nil {
+		return ip, err
+	} else {
+		if isIp {
+			return ip, nil
+		} else {
+			return "", fmt.Errorf("ip[%s] format error", ip)
+		}
+	}
+}
+
+// CheckInstance TODO
+func CheckInstance(instance string) (string, error) {
+	instance = strings.TrimSpace(instance)
+	var pattern string = `^(\d+)\.(\d+)\.(\d+)\.(\d+)#(\d+)$`
+	if isInstance, err := regexp.MatchString(pattern, instance); err != nil {
+		return "", err
+	} else {
+		if isInstance {
+			return instance, nil
+		} else {
+			return "", fmt.Errorf("instance[%s] format error", instance)
+		}
+	}
+}
+
+// GetIpPortByIns TODO
+func GetIpPortByIns(ins string) (ip string, port int, err error) {
+	if strings.Contains(ins, "#") {
+		ins, err = CheckInstance(ins)
+		if err != nil {
+			return "", 0, err
+		}
+		// ip格式错误
+		ip, err = CheckIp(strings.Split(ins, "#")[0])
+		if err != nil {
+			return "", 0, err
+		}
+		// 端口格式错误
+		port, err = strconv.Atoi(strings.Split(ins, "#")[1])
+		if err != nil {
+			return "", 0, err
+		}
+	} else {
+		// 必须带端口
+		return "", 0, fmt.Errorf("ins[%s] format not like ip#port", ins)
+	}
+	return
+}
+
+// TransZeroStrings TODO
+func TransZeroStrings(s []string) []string {
+	if s == nil {
+		return []string{}
+	}
+	return s
+}
+
+// TransZeroString TODO
+func TransZeroString(s string) string {
+	if s == "" {
+		return "0"
+	}
+	return s
+}
diff --git a/dbm-services/common/db-dns/dns-api/scripts/ddl/init.sql b/dbm-services/common/db-dns/dns-api/scripts/ddl/init.sql
new file mode 100644
index 0000000000..d8403662f5
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/scripts/ddl/init.sql
@@ -0,0 +1,18 @@
+CREATE TABLE IF NOT EXISTS `users`(
+    `id` INT UNSIGNED AUTO_INCREMENT,
+    `name` VARCHAR(32) NOT NULL,
+    `age` INT UNSIGNED NOT NULL,
+    `address` VARCHAR(64) NOT NULL,
+    `area` VARCHAR(32) NOT NULL,
+	 `number` VARCHAR(64) NOT NULL,
+   PRIMARY KEY ( `id` )
+)ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE IF NOT EXISTS `machines`(
+    `id` INT UNSIGNED AUTO_INCREMENT,
+    `type` VARCHAR(32) NOT NULL,
+    `count` INT UNSIGNED NOT NULL,
+    `created_at` timestamp NOT NULL,
+    `updated_at` timestamp NOT NULL,
+    PRIMARY KEY ( `id` )
+)ENGINE=InnoDB DEFAULT CHARSET=utf8;
\ No newline at end of file
diff --git a/dbm-services/common/db-dns/dns-api/scripts/git/pre-commit b/dbm-services/common/db-dns/dns-api/scripts/git/pre-commit
new file mode 100644
index 0000000000..588e532824
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-api/scripts/git/pre-commit
@@ -0,0 +1,31 @@
+#!/bin/sh
+# NOTE: This script does not play well when adding partial changes, such as with
+# git add -p or git commit -p.
+
+test_fmt() {
+    hash gofmt 2>&- || { echo >&2 "gofmt not in PATH."; exit 1; }
+    IFS='
+'
+    exitcode=0
+    for file in `git diff --cached --name-only --diff-filter=ACM | grep '\.go$'`
+    do
+        output=`gofmt -w "$file"`
+        if test -n "$output"
+        then
+            # any output is a syntax error
+            echo >&2 "$output"
+            exitcode=1
+        fi
+        git add "$file"
+    done
+    exit $exitcode
+}
+
+case "$1" in
+    --about )
+        echo "Check Go code formatting"
+        ;;
+    * )
+        test_fmt
+        ;;
+esac
\ No newline at end of file
diff --git a/dbm-services/common/db-dns/dns-reload/Makefile b/dbm-services/common/db-dns/dns-reload/Makefile
new file mode 100644
index 0000000000..1b9832322b
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/Makefile
@@ -0,0 +1,21 @@
+SRV_NAME = bk-dnsreload
+VER = v0.1.0
+CURRENT_VERSION = release-$(VER)
+NAMESPACE = sccmsp
+DH_URL = mirrors.tencent.com
+
+hook:
+	cp ./scripts/git/pre-commit ./.git/hooks/pre-commit && chmod 711 ./.git/hooks/pre-commit
+
+clean:
+	-rm ./$(SRV_NAME)
+
+build:clean
+	CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build  -o $(SRV_NAME) -v ./main/main.go
+
+publish:build
+	docker build --build-arg SRV_NAME=$(SRV_NAME) --rm -t $(SRV_NAME):$(CURRENT_VERSION) .
+	docker tag $(SRV_NAME):$(CURRENT_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+
+.PHONY: init clean build publish gotool clean help api curl
diff --git a/dbm-services/common/db-dns/dns-reload/api/api.go b/dbm-services/common/db-dns/dns-reload/api/api.go
new file mode 100644
index 0000000000..71c44eae7b
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/api/api.go
@@ -0,0 +1,68 @@
+// Package api TODO
+package api
+
+import (
+	"bytes"
+	"dnsReload/config"
+	"dnsReload/dao"
+	"dnsReload/logger"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+)
+
+// QueryForwardIp 根据dns ip 查询forward ip
+func QueryForwardIp(ip string) string {
+	forward_ip := config.GetConfig("forward_ip")
+	return forward_ip
+}
+
+// BkapiAuthor TODO
+type BkapiAuthor struct {
+	BkAppCode   string `json:"bk_app_code"`
+	BkAppSecret string `json:"bk_app_secret"`
+}
+
+// ApiResp TODO
+type ApiResp struct {
+	Code    int    `json:"code"`
+	Message string `json:"message"`
+	Data    struct {
+		Detail  []dao.TbDnsBase
+		RowsNum int `json:"rowsNum"`
+	}
+}
+
+// QueryAllDomainPost TODO
+// POST方法 查询所有域名记录
+func QueryAllDomainPost() ([]dao.TbDnsBase, error) {
+	queryBody := make(map[string]string)
+	queryBody["db_cloud_token"] = config.GetConfig("db_cloud_token")
+	queryBody["bk_cloud_id"] = config.GetConfig("bk_cloud_id")
+	logger.Info.Printf(fmt.Sprintf("body query params is ['%+v']", queryBody))
+
+	bodyData, err := json.Marshal(queryBody)
+	if err != nil {
+		return nil, err
+	}
+
+	bk_url := config.GetConfig("bk_dns_api_url")
+	req, err := http.NewRequest("POST", bk_url+"/apis/proxypass/dns/domain/all/", bytes.NewBuffer(bodyData))
+	if err != nil {
+		return nil, err
+	}
+
+	req.Header.Add("Content-Type", "application/json")
+	resp, err := http.DefaultClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+	var data ApiResp
+	json.Unmarshal(body, &data)
+	return data.Data.Detail, nil
+}
diff --git a/dbm-services/common/db-dns/dns-reload/config/config.conf b/dbm-services/common/db-dns/dns-reload/config/config.conf
new file mode 100644
index 0000000000..a5cd579fcc
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/config/config.conf
@@ -0,0 +1,17 @@
+info_log_path="../log/info.log"
+error_log_path="../log/err.log"
+
+db_cloud_token=""
+bk_dns_api_url=""
+bk_cloud_id="2"
+
+options_named_file="/usr/local/bind/etc/named.conf"
+options_named_file_tpl="/usr/local/bind/etc/named.conf_tpl"
+local_named_file="/usr/local/bind/etc/named.conf.local"
+zone_dir_path="/usr/local/bind/var/run/named/"
+rndc="/usr/local/bind/sbin/rndc"
+rndc_config="/usr/local/bind/etc/rndc.conf"
+
+interval="3"
+flush_switch="true"
+forward_ip="1.1.1.1,2.2.2.2"
\ No newline at end of file
diff --git a/dbm-services/common/db-dns/dns-reload/config/config.go b/dbm-services/common/db-dns/dns-reload/config/config.go
new file mode 100644
index 0000000000..0579a3a16d
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/config/config.go
@@ -0,0 +1,2 @@
+// Package config TODO
+package config
diff --git a/dbm-services/common/db-dns/dns-reload/config/init.go b/dbm-services/common/db-dns/dns-reload/config/init.go
new file mode 100644
index 0000000000..30ca268a2c
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/config/init.go
@@ -0,0 +1,59 @@
+package config
+
+import (
+	"bufio"
+	"io"
+	"log"
+	"os"
+	"strings"
+)
+
+// ConfigMap 读取配置文件
+var ConfigMap map[string]string
+
+// InitConfig TODO
+func InitConfig(configFile string) {
+	ConfigMap = make(map[string]string)
+	f, err := os.Open(configFile)
+	defer f.Close()
+	if err != nil {
+		panic(err)
+	}
+
+	r := bufio.NewReader(f)
+	for {
+		b, _, err := r.ReadLine()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			log.Fatalln("read config error ")
+			os.Exit(2)
+		}
+		s := strings.TrimSpace(strings.ReplaceAll(string(b), "\"", ""))
+		index := strings.Index(s, "=")
+		if index < 0 {
+			continue
+		}
+		key := strings.TrimSpace(s[:index])
+		if len(key) == 0 {
+			continue
+		}
+		value := strings.TrimSpace(s[index+1:])
+		if len(value) == 0 {
+			continue
+		}
+		ConfigMap[key] = value
+	}
+}
+
+// GetConfig TODO
+func GetConfig(k string) string {
+	v, _ok := ConfigMap[k]
+	if !_ok {
+		log.Fatalln(" unknown  parameter %s in config ", k)
+		os.Exit(2)
+	}
+
+	return v
+}
diff --git a/dbm-services/common/db-dns/dns-reload/dao/dao.go b/dbm-services/common/db-dns/dns-reload/dao/dao.go
new file mode 100644
index 0000000000..36f3ccc4b4
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/dao/dao.go
@@ -0,0 +1,2 @@
+// Package dao TODO
+package dao
diff --git a/dbm-services/common/db-dns/dns-reload/dao/domain.go b/dbm-services/common/db-dns/dns-reload/dao/domain.go
new file mode 100644
index 0000000000..805eeb2485
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/dao/domain.go
@@ -0,0 +1,44 @@
+package dao
+
+// TbDnsBase TODO
+type TbDnsBase struct {
+	Uid            int64  `gorm:"column:uid" db:"column:uid" json:"uid" form:"uid"`
+	App            string `gorm:"column:app" db:"column:app" json:"app" form:"app"`
+	DomainName     string `gorm:"column:domain_name" db:"column:domain_name" json:"domain_name" form:"domain_name"`
+	Ip             string `gorm:"column:ip" db:"column:ip" json:"ip" form:"ip"`
+	Port           int64  `gorm:"column:port" db:"column:port" json:"port" form:"port"`
+	StartTime      string `gorm:"column:start_time" db:"column:start_time" json:"start_time" form:"start_time"`
+	LastChangeTime string `gorm:"column:last_change_time" db:"column:last_change_time" json:"last_change_time" form:"last_change_time"`
+	Manager        string `gorm:"column:manager" db:"column:manager" json:"manager" form:"manager"`
+	Remark         string `gorm:"column:remark" db:"column:remark" json:"remark" form:"remark"`
+	DnsStr         string `gorm:"column:dns_str" db:"column:dns_str" json:"dns_str" form:"dns_str"`
+	Status         string `gorm:"column:status" db:"column:status" json:"status" form:"status"`
+	DomainType     int64  `gorm:"column:domain_type" db:"column:domain_type" json:"domain_type" form:"domain_type"`
+	BkCloudId      string `gorm:"column:bk_cloud_id" db:"column:bk_cloud_id" json:"bk_cloud_id" form:"bk_cloud_id"`
+}
+
+// TableName TODO
+func (t *TbDnsBase) TableName() string {
+	return "tb_dns_base"
+}
+
+// TbDnsServer TODO
+type TbDnsServer struct {
+	Uid            int64  `gorm:"column:uid" db:"column:uid" json:"uid" form:"uid"`
+	Ip             string `gorm:"column:ip" db:"column:ip" json:"ip" form:"ip"`
+	ForwardIp      string `gorm:"column:forward_ip" db:"column:forward_ip" json:"forward_ip" form:"forward_ip"`
+	Idc            string `gorm:"column:idc" db:"column:idc" json:"idc" form:"idc"`
+	StartTime      string `gorm:"column:start_time" db:"column:start_time" json:"start_time" form:"start_time"`
+	LastConfigTime string `gorm:"column:last_config_time" db:"column:last_config_time" json:"last_config_time" form:"last_config_time"`
+	LastAlived     string `gorm:"column:last_alived" db:"column:last_alived" json:"last_alived" form:"last_alived"`
+	Remark         string `gorm:"column:remark" db:"column:remark" json:"remark" form:"remark"`
+	UpdateCounter  int64  `gorm:"column:update_counter" db:"column:update_counter" json:"update_counter" form:"update_counter"`
+	Type           string `gorm:"column:type" db:"column:type" json:"type" form:"type"`
+	Status         int64  `gorm:"column:status" db:"column:status" json:"status" form:"status"`
+	BkCloudId      string `gorm:"column:bk_cloud_id" db:"column:bk_cloud_id" json:"bk_cloud_id" form:"bk_cloud_id"`
+}
+
+// TableName TODO
+func (t *TbDnsServer) TableName() string {
+	return "tb_dns_server"
+}
diff --git a/dbm-services/common/db-dns/dns-reload/doc/named.conf_tpl b/dbm-services/common/db-dns/dns-reload/doc/named.conf_tpl
new file mode 100644
index 0000000000..0fe4e23994
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/doc/named.conf_tpl
@@ -0,0 +1,78 @@
+options {
+        /*dns file dir,may set more than 1*/
+        directory "/usr/local/bind/var/run/named";
+
+        /*forwarder the dns to another*/
+        forwarders {FORWARD_IPS;};
+
+        /*dns pid file*/
+        pid-file "/usr/local/bind/var/run/named/pid.named";
+    
+        /* only allow inner IP to this dns server*/
+        allow-query { any; };
+
+        /*allow transfer to other dns server,we donnot need*/
+        allow-transfer { "none"; };
+
+	/* set the rrset to round-robin */
+	rrset-order{
+            class IN type A name "*" order cyclic;
+        };
+        clients-per-query 200;
+	recursive-clients 2000;
+        max-clients-per-query 10000;
+        minimal-responses yes;
+        max-ncache-ttl 6;
+
+};
+
+key "rndc-key" {
+        algorithm hmac-md5;
+        secret "xxxxxx";
+};
+
+controls { 
+      inet 127.0.0.1 port 953 
+              allow { 127.0.0.1; } keys { "rndc-key"; }; 
+};
+
+ZONES_CONFIG
+
+
+zone "172.in-addr.arpa" {
+        type master;
+        file "named.172";
+};
+
+zone "10.in-addr.arpa" {
+        type master;
+        file "named.10";
+};
+
+zone "in-addr.arpa" {
+        type master;
+        file "named.arpa";
+};
+
+
+/*
+ * log option
+ */
+logging {
+    channel default_syslog { syslog local2; severity dynamic; };
+    channel audit_log { file "/usr/local/bind/log/named.log" versions 10 size 100m; severity dynamic; print-time yes; };
+    channel query_log { file "/usr/local/bind/log/dns_query.log" versions 10 size 100m; severity dynamic; print-time yes; };
+    category default { default_syslog; };
+    category general { default_syslog; };
+    category security { audit_log; default_syslog; };
+    category config { default_syslog; };
+    category resolver { audit_log; };
+    category xfer-in { audit_log; };
+    category xfer-out { audit_log; };
+    category notify { audit_log; };
+    category client { audit_log; };
+    category network { audit_log; };
+    category update { audit_log; };
+    category queries { query_log; };
+    category lame-servers { audit_log; };
+};
diff --git a/dbm-services/common/db-dns/dns-reload/go.mod b/dbm-services/common/db-dns/dns-reload/go.mod
new file mode 100644
index 0000000000..2fb6f0a651
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/go.mod
@@ -0,0 +1,3 @@
+module dnsReload
+
+go 1.19
diff --git a/dbm-services/common/db-dns/dns-reload/go.sum b/dbm-services/common/db-dns/dns-reload/go.sum
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/common/db-dns/dns-reload/logger/init.go b/dbm-services/common/db-dns/dns-reload/logger/init.go
new file mode 100644
index 0000000000..3020cbd1d0
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/logger/init.go
@@ -0,0 +1,42 @@
+package logger
+
+import (
+	"dnsReload/config"
+	"io"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+// 初始化日志
+var (
+	Trace   *log.Logger
+	Info    *log.Logger
+	Warning *log.Logger
+	Error   *log.Logger
+)
+
+// InitLogger TODO
+func InitLogger() {
+	errFile, err := os.OpenFile(config.GetConfig("error_log_path"),
+		os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+	if err != nil {
+		log.Fatalln("Failed to open error log file:", err)
+	}
+
+	Trace = log.New(ioutil.Discard,
+		"TRACE: ",
+		log.Ldate|log.Ltime|log.Lshortfile)
+
+	Info = log.New(os.Stdout,
+		"INFO: ",
+		log.Ldate|log.Ltime|log.Lshortfile)
+
+	Warning = log.New(os.Stdout,
+		"WARNING: ",
+		log.Ldate|log.Ltime|log.Lshortfile)
+
+	Error = log.New(io.MultiWriter(errFile),
+		"ERROR: ",
+		log.Ldate|log.Ltime|log.Lshortfile)
+}
diff --git a/dbm-services/common/db-dns/dns-reload/logger/logger.go b/dbm-services/common/db-dns/dns-reload/logger/logger.go
new file mode 100644
index 0000000000..3dc273e398
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/logger/logger.go
@@ -0,0 +1,2 @@
+// Package logger TODO
+package logger
diff --git a/dbm-services/common/db-dns/dns-reload/main/main.go b/dbm-services/common/db-dns/dns-reload/main/main.go
new file mode 100644
index 0000000000..bd4f970019
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/main/main.go
@@ -0,0 +1,57 @@
+// Package main TODO
+package main
+
+import (
+	"dnsReload/config"
+	"dnsReload/logger"
+	"dnsReload/service"
+	"dnsReload/util"
+	"flag"
+	"fmt"
+	"os"
+	"strconv"
+	"time"
+)
+
+func main() {
+
+	localIp, err := util.GetClientIp()
+	if err != nil {
+		logger.Error.Printf("GetClientIp Error[%+v]", err)
+	}
+
+	interval := config.GetConfig("interval")
+	intervalTime, err := strconv.Atoi(interval)
+	if err != nil {
+		intervalTime = 3
+	}
+	for {
+		err := service.Reload(localIp)
+		if err != nil {
+			//	TODO 发送告警、通知。。
+		}
+		time.Sleep(time.Duration(intervalTime) * time.Second)
+		// 重新读取一下配置。避免修改配置文件不生效
+		config.InitConfig(configFile)
+	}
+}
+
+func init() {
+	initFlag()
+	config.InitConfig(configFile)
+	logger.InitLogger()
+	// dao.InitDB()
+}
+
+// 读取参数
+var configFile string
+
+func initFlag() {
+	flag.StringVar(&configFile, "c", "", "config file")
+	flag.Parse()
+
+	if configFile == "" {
+		fmt.Println("arg -c [configFile] is must")
+		os.Exit(2)
+	}
+}
diff --git a/dbm-services/common/db-dns/dns-reload/service/dnsService.go b/dbm-services/common/db-dns/dns-reload/service/dnsService.go
new file mode 100644
index 0000000000..caebb9b51e
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/service/dnsService.go
@@ -0,0 +1,194 @@
+package service
+
+import (
+	"dnsReload/api"
+	"dnsReload/config"
+	"dnsReload/dao"
+	"dnsReload/logger"
+	"fmt"
+	"io/ioutil"
+	"os/exec"
+	"strings"
+	"sync"
+)
+
+var (
+	wg      sync.WaitGroup
+	errChan chan string
+)
+
+func getZoneFileName(d dao.TbDnsBase) string {
+	domain := strings.Trim(strings.TrimSpace(strings.ToLower(d.DomainName)), ".")
+	t := strings.Split(domain, ".")
+	// 取后面三个字段作为zone name
+	if len(t) <= 3 || !strings.HasSuffix(domain, "db") {
+		return domain
+	}
+	beginIndex := len(t) - 3
+	return strings.Join(t[beginIndex:], ".")
+}
+
+func makeZoneFile(d dao.TbDnsBase, head string) string {
+	if head == "" {
+		head = `$TTL    6
+@       IN      SOA     db.        root.db.   (
+        2012011601      ; Serial
+        600             ; Refresh
+        14400           ; Retry
+        7200            ; Expire
+        900     )       ; minimum`
+	}
+
+	domain := strings.TrimSpace(strings.ToLower(d.DomainName))
+	ip := strings.TrimSpace(d.Ip)
+	return fmt.Sprintf("%s\n@               IN      NS    %s\n%s    IN    A    %s",
+		head, domain, domain, ip)
+}
+
+func replaceForwardIps(forwardIp string) error {
+	namedFileTpl := config.GetConfig("options_named_file_tpl")
+	content, err := ioutil.ReadFile(namedFileTpl)
+	if err != nil {
+		return err
+	}
+	newContent := strings.ReplaceAll(string(content), "FORWARD_IPS", forwardIp)
+
+	namedFile := config.GetConfig("options_named_file")
+	return ioutil.WriteFile(namedFile, []byte(newContent), 0666)
+}
+
+func replaceZoneInfo(zoneNamedInfo string) error {
+	namedFileTpl := config.GetConfig("options_named_file")
+	content, err := ioutil.ReadFile(namedFileTpl)
+	if err != nil {
+		return err
+	}
+	newContent := strings.ReplaceAll(string(content), "ZONES_CONFIG", zoneNamedInfo)
+
+	namedFile := config.GetConfig("options_named_file")
+	return ioutil.WriteFile(namedFile, []byte(newContent), 0666)
+}
+
+func writeNamedConfig(zoneNamedInfo string) error {
+	namedFile := config.GetConfig("local_named_file")
+	return ioutil.WriteFile(namedFile, []byte(zoneNamedInfo), 0666)
+}
+
+func writeZoneName(fileName, fileContent string) {
+	err := ioutil.WriteFile(fileName, []byte(fileContent), 0666)
+	if err != nil {
+		errChan <- err.Error()
+	}
+	wg.Done()
+}
+
+func rndcReload() error {
+	rndc := config.GetConfig("rndc")
+	var cmd *exec.Cmd
+	// server reload successful
+	cmd = exec.Command(rndc, "reload")
+	_, err := cmd.Output()
+	if err != nil {
+		return err
+	}
+	logger.Info.Printf("rndc reload success...")
+
+	cmd = exec.Command(rndc, "flush")
+	_, err = cmd.Output()
+	if err != nil {
+		return err
+	}
+	logger.Info.Printf("rndc flush success...")
+
+	return nil
+}
+
+// checkReload 判断是否需要reload
+func checkReload() bool {
+	flushSwitch := config.GetConfig("flush_switch")
+	return flushSwitch == "true"
+}
+
+// Reload TODO
+func Reload(localIp string) error {
+	logger.Info.Printf("reload begin...")
+	defer logger.Info.Printf("reload end...")
+	if !checkReload() {
+		logger.Warning.Printf("flush_switch not is 1. need't do reload")
+		return nil
+	}
+
+	forwardIp := api.QueryForwardIp(localIp)
+	if forwardIp == "" {
+		logger.Warning.Printf("%s forwardIp is empty.. you sould to set on table[tb_dns_server]", localIp)
+	} else {
+		err := replaceForwardIps(forwardIp)
+		if err != nil {
+			logger.Error.Printf("replace forward ips error [%+v]", err)
+			return err
+		}
+	}
+
+	domainList, err := api.QueryAllDomainPost()
+	if err != nil {
+		logger.Error.Printf("query domain info error [%+v]", err)
+		return err
+	}
+
+	zoneFileMap := make(map[string]string)
+	zoneNamedInfo := ""
+	zoneDir := config.GetConfig("zone_dir_path")
+	for _, data := range domainList {
+		zoneName := getZoneFileName(data)
+		if zoneName == "" {
+			logger.Error.Printf("%s get zone file name is empty!!!", data.DomainName)
+		}
+		if _, _ok := zoneFileMap[zoneName]; !_ok {
+			zoneNamedInfo = fmt.Sprintf("%s\n\nzone \"%s\" {\n        type master;\n        file \"%s\";\n};",
+				zoneNamedInfo, zoneName, zoneDir+zoneName)
+		}
+		zoneFileMap[zoneName] = makeZoneFile(data, zoneFileMap[zoneName])
+	}
+
+	// TODO 这个地方暂时不引入tb_dns_zone_info
+
+	// 更新named.conf文件
+	if err := replaceZoneInfo(zoneNamedInfo); err != nil {
+		logger.Error.Printf("replaceZoneInfo error[%+v]", err)
+		return err
+	}
+
+	wg = sync.WaitGroup{}
+	wg.Add(len(zoneFileMap))
+	logger.Info.Printf("zoneFileMap len is %d", len(zoneFileMap))
+	for fn, fc := range zoneFileMap {
+		go writeZoneName(zoneDir+fn, fc)
+	}
+
+	errMsg := ""
+	errChan = make(chan string, 100)
+	go func() {
+		for msg := range errChan {
+			if msg == "" {
+				break
+			}
+			logger.Warning.Printf(msg)
+			errMsg += "\n" + msg
+		}
+	}()
+
+	wg.Wait()
+	errChan <- ""
+
+	if errMsg != "" {
+		return fmt.Errorf(errMsg)
+	}
+
+	// 触发rndc reload
+	if err := rndcReload(); err != nil {
+		logger.Error.Printf("rndc reload error [%+v]", err)
+		return err
+	}
+
+	return nil
+}
diff --git a/dbm-services/common/db-dns/dns-reload/service/service.go b/dbm-services/common/db-dns/dns-reload/service/service.go
new file mode 100644
index 0000000000..2d680ff1db
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/service/service.go
@@ -0,0 +1,2 @@
+// Package service TODO
+package service
diff --git a/dbm-services/common/db-dns/dns-reload/util/tools.go b/dbm-services/common/db-dns/dns-reload/util/tools.go
new file mode 100644
index 0000000000..0414fbdad6
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/util/tools.go
@@ -0,0 +1,37 @@
+package util
+
+import (
+	"errors"
+	"net"
+	"os"
+)
+
+// GetClientIp 获取本机IP
+func GetClientIp() (string, error) {
+	addrs, err := net.InterfaceAddrs()
+
+	if err != nil {
+		return "", err
+	}
+
+	for _, address := range addrs {
+		// 检查ip地址判断是否回环地址
+		if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
+			if ipnet.IP.To4() != nil {
+				return ipnet.IP.String(), nil
+			}
+
+		}
+	}
+
+	return "", errors.New("Can not find the client ip address!")
+
+}
+
+// CheckFileIsExist TODO
+func CheckFileIsExist(filename string) bool {
+	if _, err := os.Stat(filename); os.IsNotExist(err) {
+		return false
+	}
+	return true
+}
diff --git a/dbm-services/common/db-dns/dns-reload/util/util.go b/dbm-services/common/db-dns/dns-reload/util/util.go
new file mode 100644
index 0000000000..80d62b1ad1
--- /dev/null
+++ b/dbm-services/common/db-dns/dns-reload/util/util.go
@@ -0,0 +1,2 @@
+// Package util TODO
+package util
diff --git a/dbm-services/common/db-resource/.ci/codecc.yml b/dbm-services/common/db-resource/.ci/codecc.yml
new file mode 100644
index 0000000000..9be59c2114
--- /dev/null
+++ b/dbm-services/common/db-resource/.ci/codecc.yml
@@ -0,0 +1,29 @@
+version: v2.0
+resources:
+  repositories:
+    - repository: ci_templates/public/codecc
+      name: codecc
+on:
+  mr:
+    target-branches:  [ "*" ]
+stages:
+  - name: "代码检查"
+    check-out:
+      gates:
+        - template: commonGate.yml@codecc
+      timeout-hours: 10
+    jobs:
+      codecc:
+        name: "CodeCC代码检查"
+        runs-on:
+          pool-name: docker  #docker-on-devcloud、docker、local、agentless
+          container:
+            image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0
+        steps:
+          - checkout: self
+          - uses: CodeccCheckAtomDebug@4.*
+            name: 腾讯代码分析
+            with:
+                beAutoLang: true # 自动检测项目语言
+                checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置
+                toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1
diff --git a/dbm-services/common/db-resource/.ci/open_source_check.yml b/dbm-services/common/db-resource/.ci/open_source_check.yml
new file mode 100644
index 0000000000..f421f315f3
--- /dev/null
+++ b/dbm-services/common/db-resource/.ci/open_source_check.yml
@@ -0,0 +1,84 @@
+version: "v2.0"
+name: "开源检查"
+label: []
+variables: {}
+stages:
+- name: "开源检查"
+  label:
+  - "Build"
+  jobs:
+    job_AfK:
+      name: "构建环境-LINUX"
+      runs-on:
+        pool-name: "docker"
+        container:
+          image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0"
+        needs: {}
+      steps:
+      - checkout: self
+      - name: "敏感信息检查-部门RTX"
+        uses: "SensitiveRtxChecker@3.*"
+      - name: "腾讯代码分析(官方-代码分析工作组)"
+        uses: "CodeccCheckAtomDebug@4.*"
+        with:
+          beAutoLang: true
+          languages:
+          - "GOLANG"
+          checkerSetType: "communityOpenScan"
+          tools:
+          - "WOODPECKER_COMMITSCAN"
+          - "SCC"
+          - "PECKER_SECURITY"
+          - "SENSITIVE"
+          - "DUPC"
+          - "IP_CHECK"
+          - "WOODPECKER_SENSITIVE"
+          - "HORUSPY"
+          - "XCHECK"
+          - "CCN"
+          asyncTask: false
+          asyncTaskId: ""
+          scriptType: "SHELL"
+          script: |-
+            # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷
+            # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh
+            # 确保build.sh能够编译代码
+            # cd path/to/build.sh
+            # sh build.sh
+          languageRuleSetMap: {}
+          checkerSetEnvType: "prod"
+          multiPipelineMark: ""
+          rtxReceiverType: "1"
+          botWebhookUrl: ""
+          botRemindRange: "2"
+          botRemindSeverity: "7"
+          botRemaindTools: []
+          emailReceiverType: "1"
+          emailCCReceiverList: []
+          instantReportStatus: "2"
+          reportDate: []
+          reportTime: ""
+          reportTools: []
+          toolScanType: "1"
+          diffBranch: ""
+          byFile: false
+          mrCommentEnable: true
+          prohibitIgnore: false
+          newDefectJudgeFromDate: ""
+          transferAuthorList: []
+          path: []
+          customPath: []
+          scanTestSource: false
+          openScanPrj: false
+          openScanFilterEnable: false
+          issueSystem: "TAPD"
+          issueSubSystem: ""
+          issueResolvers: []
+          issueReceivers: []
+          issueFindByVersion: ""
+          maxIssue: 1000
+          issueAutoCommit: false
+  check-out:
+    gates:
+      - template: open_source_gate.yml
+    timeout-hours: 10
\ No newline at end of file
diff --git a/dbm-services/common/db-resource/.ci/templates/open_source_gate.yml b/dbm-services/common/db-resource/.ci/templates/open_source_gate.yml
new file mode 100644
index 0000000000..34ff9b0cb8
--- /dev/null
+++ b/dbm-services/common/db-resource/.ci/templates/open_source_gate.yml
@@ -0,0 +1,26 @@
+parameters:
+- name: receivers
+  type: array
+  default: [ "${{ ci.actor }}" ]
+ 
+gates:
+- name: open-source-gate
+  rule:
+    - "CodeccCheckAtomDebug.all_risk <= 0"
+    - "CodeccCheckAtomDebug.high_med_new_issue <= 0"
+    - "CodeccCheckAtomDebug.ccn_new_max_value <= 40"
+    - "CodeccCheckAtomDebug.sensitive_defect <= 0"
+    - "CodeccCheckAtomDebug.dupc_average <= 15"
+    - "CodeccCheckAtomDebug.ccn_average <= 3"
+    - "CodeccCheckAtomDebug.ccn_new_defect <= 0"
+    - "CodeccCheckAtomDebug.ccn_funcmax <= 20"
+    - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0"
+    - "CodeccCheckAtomDebug.horuspy_all_defect <= 0"
+    - "CodeccCheckAtomDebug.go_serious_defect <= 0"
+    - "CodeccCheckAtomDebug.go_all_defect <= 100"
+  notify-on-fail:
+  - type: wework-message
+    receivers: ${{ parameters.receivers }}
+  continue-on-fail:
+    gatekeepers:
+    - "${{ ci.actor }}"
\ No newline at end of file
diff --git a/dbm-services/common/db-resource/.gitignore b/dbm-services/common/db-resource/.gitignore
new file mode 100644
index 0000000000..7591f6ce5e
--- /dev/null
+++ b/dbm-services/common/db-resource/.gitignore
@@ -0,0 +1,13 @@
+vendor/
+log/
+build/
+conf/*
+*exe
+*.log
+.idea/
+.vscode/
+.DS_Store
+logs/
+*.env
+bkdbrms
+db-resource
\ No newline at end of file
diff --git a/dbm-services/common/db-resource/.golangci.yml b/dbm-services/common/db-resource/.golangci.yml
new file mode 100644
index 0000000000..023e934a2f
--- /dev/null
+++ b/dbm-services/common/db-resource/.golangci.yml
@@ -0,0 +1,58 @@
+linters-settings:
+  lll:
+    line-length:  120  
+  funlen:
+    lines: 80
+    statements: 80
+  gocritic:
+    enabled-checks:
+      - nestingReduce
+      - commentFormatting
+      
+run:
+  # default concurrency is a available CPU number
+  concurrency: 4
+  # timeout for analysis, e.g. 30s, 5m, default is 1m
+  timeout: 2m
+  # exit code when at least one issue was found, default is 1
+  issues-exit-code: 1
+  # include test files or not, default is true
+  tests: false
+  # default is true. Enables skipping of directories:
+  #   vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
+  skip-dirs-use-default: true
+
+  skip-files:
+    - ".*/mock/.*.go"
+    - ".*testing.go"
+
+linters:
+  # enable-all: true
+  # disable-all: true
+  disable:
+    - errcheck
+  enable:
+    - nilerr
+    - nakedret
+    #- lll
+    - gofmt
+    - gocritic
+    - gocyclo
+    - whitespace
+    - sqlclosecheck
+    - deadcode
+    - govet
+    - bodyclose
+    - staticcheck
+    - rowserrcheck
+    # - errorlint
+    # - varcheck
+    # - typecheck
+    # - nestif
+    # - gofumpt
+    # - godox
+    # - wsl
+    - funlen
+    # - golint
+    - cyclop
+  fast: false
\ No newline at end of file
diff --git a/dbm-services/common/db-resource/Dockerfile b/dbm-services/common/db-resource/Dockerfile
new file mode 100644
index 0000000000..10287eb795
--- /dev/null
+++ b/dbm-services/common/db-resource/Dockerfile
@@ -0,0 +1,8 @@
+FROM mirrors.tencent.com/bcs/golang:1.19
+
+RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
+RUN echo "Asia/Shanghai" > /etc/timezone
+
+ADD db-resource /
+WORKDIR /
+CMD /db-resource
\ No newline at end of file
diff --git a/dbm-services/common/db-resource/Makefile b/dbm-services/common/db-resource/Makefile
new file mode 100644
index 0000000000..077d25f2e1
--- /dev/null
+++ b/dbm-services/common/db-resource/Makefile
@@ -0,0 +1,45 @@
+SHELL := /bin/bash
+BASEDIR = $(shell pwd)
+
+SRV_NAME = db-resource
+COMMAND_NAME = db-resource
+VER = v0.0.1
+CURRENT_VERSION = release-$(VER)
+TEST_VERSION = test-$(VER)
+NAMESPACE = sccmsp
+DH_URL = mirrors.tencent.com
+BUILD_FLAG = "-X main.version=${VER} -X main.buildstamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.githash=`git rev-parse HEAD` "
+all: build
+api:
+	go build  -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD} -ldflags ${BUILD_FLAG}    -o bkdbrms  -v .
+
+build:clean
+	CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD}  -ldflags ${BUILD_FLAG}  -o $(COMMAND_NAME) -v .
+
+publish:build
+	docker build  --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(CURRENT_VERSION) .
+	docker tag $(SRV_NAME):$(CURRENT_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+
+latest:build
+	docker build  --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):latest .
+	docker tag $(SRV_NAME):latest $(DH_URL)/${NAMESPACE}/$(SRV_NAME):latest
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):latest
+
+test:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(TEST_VERSION) .
+	docker tag $(SRV_NAME):$(TEST_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+
+gotool:
+	@-gofmt -w  .
+	@-go tool vet . 2>&1 | grep -v vendor;true
+clean:
+	# find . -name "[._]*.s[a-w][a-z]" | xargs -i rm -f {}
+	rm -f bkdbrms
+help:
+	@echo "make - compile go source"
+	@echo "make gotool - run gofmt & go too vet"
+	@echo "make clean - do some clean job"
+
+.PHONY: all gotool clean help api curl
diff --git a/dbm-services/common/db-resource/README.md b/dbm-services/common/db-resource/README.md
new file mode 100644
index 0000000000..fc5c3e086e
--- /dev/null
+++ b/dbm-services/common/db-resource/README.md
@@ -0,0 +1 @@
+# bk-dbrms [DB资源池]
diff --git a/dbm-services/common/db-resource/go.mod b/dbm-services/common/db-resource/go.mod
new file mode 100644
index 0000000000..f15cdae184
--- /dev/null
+++ b/dbm-services/common/db-resource/go.mod
@@ -0,0 +1,58 @@
+module dbm-services/common/db-resource
+
+go 1.19
+
+require (
+	github.com/deckarep/golang-set/v2 v2.3.0
+	github.com/gin-contrib/pprof v1.4.0
+	github.com/gin-contrib/requestid v0.0.6
+	github.com/gin-gonic/gin v1.9.0
+	github.com/go-redis/redis/v8 v8.11.5
+	github.com/spf13/viper v1.15.0
+	github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cbs v1.0.604
+	github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.604
+	gorm.io/driver/mysql v1.5.0
+	gorm.io/gorm v1.25.0
+)
+
+require (
+	github.com/bytedance/sonic v1.8.8 // indirect
+	github.com/cespare/xxhash/v2 v2.1.2 // indirect
+	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
+	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-playground/locales v0.14.1 // indirect
+	github.com/go-playground/universal-translator v0.18.1 // indirect
+	github.com/go-playground/validator/v10 v10.12.0 // indirect
+	github.com/go-sql-driver/mysql v1.7.1 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
+	github.com/google/uuid v1.3.0 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.5 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+	github.com/leodido/go-urn v1.2.3 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mattn/go-isatty v0.0.18 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.7 // indirect
+	github.com/spf13/afero v1.9.5 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
+	github.com/ugorji/go/codec v1.2.11 // indirect
+	golang.org/x/arch v0.3.0 // indirect
+	golang.org/x/crypto v0.8.0 // indirect
+	golang.org/x/net v0.9.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	google.golang.org/protobuf v1.30.0 // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/dbm-services/common/db-resource/go.sum b/dbm-services/common/db-resource/go.sum
new file mode 100644
index 0000000000..f8d79fa566
--- /dev/null
+++ b/dbm-services/common/db-resource/go.sum
@@ -0,0 +1,597 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
+github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q=
+github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/deckarep/golang-set/v2 v2.3.0 h1:qs18EKUfHm2X9fA50Mr/M5hccg2tNnVqsiBImnyDs0g=
+github.com/deckarep/golang-set/v2 v2.3.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
+github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
+github.com/gin-contrib/requestid v0.0.6 h1:mGcxTnHQ45F6QU5HQRgQUDsAfHprD3P7g2uZ4cSZo9o=
+github.com/gin-contrib/requestid v0.0.6/go.mod h1:9i4vKATX/CdggbkY252dPVasgVucy/ggBeELXuQztm4=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
+github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
+github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
+github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
+github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
+github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
+github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cbs v1.0.604 h1:GTPOz3rke4PitOgPZbQ5DaEK3e7zLzm2+r8XzAQgIF4=
+github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cbs v1.0.604/go.mod h1:UX1a6+JQNYvxr3qg7Wg7ACSDNYP4MeP5Wc2WxyKGYgs=
+github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.604 h1:BiuYcs8fINjZT9omCf8tV+rZkrZdf+Hq4TMWUNqYNgY=
+github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.604/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y=
+github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
+github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
+github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
+github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
+golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM=
+gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo=
+gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU=
+gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/dbm-services/common/db-resource/internal/config/config.go b/dbm-services/common/db-resource/internal/config/config.go
new file mode 100644
index 0000000000..1d2fa21c4c
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/config/config.go
@@ -0,0 +1,78 @@
+// Package config TODO
+package config
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"log"
+
+	"github.com/spf13/viper"
+)
+
+// AppConfig TODO
+var AppConfig Config
+
+// Config TODO
+type Config struct {
+	Gormlog          bool              `yaml:"gormlog"`
+	ListenAddress    string            `yaml:"listenAddress"`
+	Db               Db                `yaml:"db"`
+	CmdbDb           Db                `yaml:"cmdb_db" mapstructure:"cmdb_db"`
+	LoggerConfig     LoggerConfig      `yaml:"loggerConfig"`
+	BkSecretConfig   BkSecretConfig    `yaml:"bkSecretConfig"`
+	RedisDb          RedisDb           `yaml:"redis"`
+	CloudCertificate *CloudCertificate `yaml:"cloudCertificate"`
+	//	dbmeta: http://bk-dbm
+	DbMeta string `json:"dbmeta"`
+}
+
+// Db TODO
+type Db struct {
+	Name     string `yaml:"name"`
+	Addr     string `yaml:"addr"`
+	UserName string `yaml:"username"`
+	PassWord string `yaml:"password"`
+}
+
+// LoggerConfig 日志配置
+type LoggerConfig struct {
+	LogWriters string `yaml:"logWriters"` // file,stdout
+	LogLevel   string `yaml:"logLevel"`
+	LogFile    string `yaml:"logfile"`
+}
+
+// BkSecretConfig TODO
+type BkSecretConfig struct {
+	BkAppCode   string `yaml:"bk_app_code" mapstructure:"bk_app_code"`
+	BKAppSecret string `yaml:"bk_app_secret" mapstructure:"bk_app_secret"`
+	BkUserName  string `yaml:"bk_username" mapstructure:"bk_username"`
+	BkBaseUrl   string `yaml:"bk_base_url" mapstructure:"bk_base_url"`
+}
+
+// RedisDb TODO
+type RedisDb struct {
+	Addr string `yaml:"addr"`
+	Pwd  string `yaml:"password"`
+}
+
+// CloudCertificate TODO
+type CloudCertificate struct {
+	// cloud vendor reserved field
+	CloudVendor string `yaml:"cloud_vendor" mapstructure:"cloud_vendor"`
+	SecretId    string `yaml:"secret_id" mapstructure:"secret_id"`
+	SecretKey   string `yaml:"secret_key" mapstructure:"secret_key"`
+} // load configuration file
+
+func init() {
+	log.Println("init config")
+	viper.SetConfigName("config")
+	viper.SetConfigType("yaml")
+	viper.AddConfigPath("$HOME/conf")
+	viper.AddConfigPath("./conf")
+	viper.AddConfigPath("./")
+	if err := viper.ReadInConfig(); err != nil {
+		logger.Fatal("failed to read configuration file:%v", err)
+	}
+	if err := viper.Unmarshal(&AppConfig); err != nil {
+		logger.Fatal("unmarshal configuration failed: %v", err)
+	}
+}
diff --git a/dbm-services/common/db-resource/internal/controller/apply/apply.go b/dbm-services/common/db-resource/internal/controller/apply/apply.go
new file mode 100644
index 0000000000..c729e87d93
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/controller/apply/apply.go
@@ -0,0 +1,162 @@
+// Package apply TODO
+package apply
+
+import (
+	"dbm-services/common/db-resource/internal/controller"
+	"dbm-services/common/db-resource/internal/lock"
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/db-resource/internal/svr/apply"
+	"dbm-services/common/db-resource/internal/svr/task"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"time"
+
+	"github.com/gin-gonic/gin"
+)
+
+// ApplyHandler TODO
+type ApplyHandler struct {
+	controller.BaseHandler
+}
+
+// RegisterRouter TODO
+//
+//	@receiver c
+//	@param engine
+func (c *ApplyHandler) RegisterRouter(engine *gin.Engine) {
+	r := engine.Group("resource")
+	{
+		r.POST("/apply", c.ApplyResource)
+		r.POST("/pre-apply", c.PreApplyResource)
+		r.POST("/confirm/apply", c.ConfirmApply)
+	}
+}
+
+func newLocker(key string, requestId string) *lock.SpinLock {
+	return lock.NewSpinLock(&lock.RedisLock{Name: key, RandKey: requestId, Expiry: 120 * time.Second}, 60,
+		350*time.Millisecond)
+}
+
+// ConfirmApplyParam TODO
+type ConfirmApplyParam struct {
+	RequestId string `json:"request_id" binding:"required"`
+	HostIds   []int  `json:"host_ids" binding:"gt=0,dive,required" `
+}
+
+// ConfirmApply TODO
+func (c *ApplyHandler) ConfirmApply(r *gin.Context) {
+	var param ConfirmApplyParam
+	if c.Prepare(r, ¶m) != nil {
+		return
+	}
+	requestId := r.GetString("request_id")
+	hostIds := cmutil.RemoveDuplicateIntElement(param.HostIds)
+	var cnt int64
+	err := model.DB.Self.Table(model.TbRpApplyDetailLogName()).Where("request_id = ?", param.RequestId).Count(&cnt).Error
+	if err != nil {
+		logger.Error("use request id %s,query apply resouece failed %s", param.RequestId, err.Error())
+		c.SendResponse(r, fmt.Errorf("%w", err), requestId, "use request id search applyed resource failed")
+		return
+	}
+	if len(hostIds) != int(cnt) {
+		c.SendResponse(r, fmt.Errorf("need return resource count is %d,but use request id only found total count %d",
+			len(hostIds), cnt), requestId, "")
+		return
+	}
+	var rs []model.TbRpDetail
+	err = model.DB.Self.Table(model.TbRpDetailName()).Where(" bk_host_id in (?) and status != ? ", hostIds,
+		model.Prepoccupied).Find(&rs).Error
+	if err != nil {
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	if len(rs) > 0 {
+		var errMsg string
+		for _, v := range rs {
+			errMsg += fmt.Sprintf("%s:%s\n", v.IP, v.Status)
+		}
+		c.SendResponse(r, fmt.Errorf("the following example:%s,abnormal state", errMsg), requestId, "")
+		return
+	}
+	// update to used status
+	err = cmutil.Retry(
+		cmutil.RetryConfig{Times: 3, DelayTime: 1 * time.Second},
+		func() error {
+			return model.DB.Self.Table(model.TbRpDetailName()).Where(" bk_host_id in (?) ", hostIds).Update("status",
+				model.Used).Error
+		},
+	)
+	if err != nil {
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	c.SendResponse(r, nil, requestId, "successful")
+}
+
+// ApplyResource TODO
+func (c *ApplyHandler) ApplyResource(r *gin.Context) {
+	c.ApplyBase(r, model.Used)
+}
+
+// PreApplyResource TODO
+func (c *ApplyHandler) PreApplyResource(r *gin.Context) {
+	c.ApplyBase(r, model.Prepoccupied)
+}
+
+// ApplyBase TODO
+func (c *ApplyHandler) ApplyBase(r *gin.Context, mode string) {
+	task.RuningTask <- struct{}{}
+	defer func() { <-task.RuningTask }()
+	var param apply.ApplyRequestInputParam
+	var pickers []*apply.PickerObject
+	var err error
+	var requestId string
+	if c.Prepare(r, ¶m) != nil {
+		return
+	}
+	requestId = r.GetString("request_id")
+	if err := param.ParamCheck(); err != nil {
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	// get the resource lock if it is dry run you do not need to acquire it
+	if !param.DryRun {
+		lock := newLocker(param.LockKey(), requestId)
+		if err := lock.Lock(); err != nil {
+			c.SendResponse(r, err, requestId, err.Error())
+			return
+		}
+		defer func() {
+			if err := lock.Unlock(); err != nil {
+				logger.Error(fmt.Sprintf("unlock failed %s", err.Error()))
+				return
+			}
+		}()
+	}
+	defer func() {
+		apply.RollBackAllInstanceUnused(pickers)
+	}()
+	pickers, err = apply.CycleApply(param)
+	if err != nil {
+		logger.Error("apply machine failed %s", err.Error())
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	if !param.DryRun {
+		data, err := apply.LockReturnPickers(pickers, mode)
+		if err != nil {
+			c.SendResponse(r, err, nil, requestId)
+			return
+		}
+		logger.Info(fmt.Sprintf("The %s, will return %d machines", requestId, len(data)))
+		task.ApplyResponeLogChan <- task.ApplyResponeLogItem{
+			RequestId: requestId,
+			Data:      data,
+		}
+		task.RecordRsOperatorInfoChan <- param.GetOperationInfo(requestId)
+		c.SendResponse(r, nil, data, requestId)
+		return
+	}
+	c.SendResponse(r, nil, map[string]interface{}{"check_success": true}, requestId)
+}
diff --git a/dbm-services/common/db-resource/internal/controller/controller.go b/dbm-services/common/db-resource/internal/controller/controller.go
new file mode 100644
index 0000000000..6c87537647
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/controller/controller.go
@@ -0,0 +1,53 @@
+// Package controller TODO
+package controller
+
+import (
+	"dbm-services/common/db-resource/pkg/errno"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+// BaseHandler TODO
+type BaseHandler struct{}
+
+// Response TODO
+type Response struct {
+	Code      int         `json:"code"`
+	Message   string      `json:"message"`
+	Data      interface{} `json:"data"`
+	RequestId string      `json:"request_id"`
+}
+
+// Prepare TODO
+func (c *BaseHandler) Prepare(r *gin.Context, schema interface{}) error {
+	requestId := r.GetString("request_id")
+	if cmutil.IsEmpty(requestId) {
+		err := fmt.Errorf("get request id error ~")
+		c.SendResponse(r, err, nil, requestId)
+		return err
+
+	}
+	if err := r.ShouldBind(&schema); err != nil {
+		logger.Error("ShouldBind Failed %s", err.Error())
+		c.SendResponse(r, err, nil, requestId)
+		return err
+	}
+	logger.Info("param is %v", schema)
+	return nil
+}
+
+// SendResponse TODO
+// SendResponseT TODO
+func (c *BaseHandler) SendResponse(r *gin.Context, err error, data interface{}, requestId string) {
+	code, message := errno.DecodeErr(err)
+	r.JSON(http.StatusOK, Response{
+		Code:      code,
+		Message:   message,
+		Data:      data,
+		RequestId: requestId,
+	})
+}
diff --git a/dbm-services/common/db-resource/internal/controller/manage/manage.go b/dbm-services/common/db-resource/internal/controller/manage/manage.go
new file mode 100644
index 0000000000..fe704b2c50
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/controller/manage/manage.go
@@ -0,0 +1,2 @@
+// Package manage TODO
+package manage
diff --git a/dbm-services/common/db-resource/internal/controller/manage/rs.go b/dbm-services/common/db-resource/internal/controller/manage/rs.go
new file mode 100644
index 0000000000..9ab2ab6899
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/controller/manage/rs.go
@@ -0,0 +1,325 @@
+package manage
+
+import (
+	"dbm-services/common/db-resource/internal/controller"
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/db-resource/internal/svr/apply"
+	"dbm-services/common/db-resource/internal/svr/bk"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+
+	rf "github.com/gin-gonic/gin"
+	"gorm.io/gorm"
+)
+
+// MachineResourceHandler TODO
+type MachineResourceHandler struct {
+	controller.BaseHandler
+}
+
+// RegisterRouter TODO
+func (c *MachineResourceHandler) RegisterRouter(engine *rf.Engine) {
+	r := engine.Group("resource")
+	{
+		r.POST("/list", c.List)
+		r.POST("/update", c.Update)
+		r.POST("/delete", c.Delete)
+		r.POST("/import", c.Import)
+		r.POST("/mountpoints", c.GetMountPoints)
+		r.POST("/disktypes", c.GetDiskTypes)
+		r.POST("/subzones", c.GetSubZones)
+		r.POST("/deviceclass", c.GetDeviceClass)
+		r.POST("/operation/list", c.OperationInfoList)
+	}
+}
+
+// MachineResourceGetterInputParam TODO
+type MachineResourceGetterInputParam struct {
+	// 专用业务Ids
+	ForBizs     []int              `json:"for_bizs"`
+	City        []string           `json:"city"`
+	SubZones    []string           `json:"subzones"`
+	DeviceClass []string           `json:"device_class"`
+	Labels      map[string]string  `json:"labels"`
+	Hosts       []string           `json:"hosts"`
+	BkCloudIds  []int              `json:"bk_cloud_ids"`
+	RsTypes     []string           `json:"resource_types"`
+	MountPoint  string             `json:"mount_point"`
+	Cpu         apply.MeasureRange `json:"cpu"`
+	Mem         apply.MeasureRange `json:"mem"`
+	Disk        apply.MeasureRange `json:"disk"`
+	DiskType    string             `json:"disk_type"`
+	Limit       int                `json:"limit"`
+	Offset      int                `json:"offset"`
+}
+
+// List TODO
+func (c *MachineResourceHandler) List(r *rf.Context) {
+	var input MachineResourceGetterInputParam
+	if c.Prepare(r, &input) != nil {
+		return
+	}
+	requestId := r.GetString("request_id")
+	db := model.DB.Self.Table(model.TbRpDetailName())
+	input.queryBs(db)
+	var data []model.TbRpDetail
+	if err := db.Scan(&data).Error; err != nil {
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	for index := range data {
+		if cmutil.IsNotEmpty(data[index].Label) {
+			k := make(map[string]string)
+			if err := json.Unmarshal([]byte(data[index].Label), &k); err != nil {
+				logger.Error("Unmarshal Label Failed %s", err.Error())
+				continue
+			}
+			data[index].LabelMap = cmutil.CleanStrMap(k)
+		}
+	}
+	var count int64
+	if err := db.Count(&count).Error; err != nil {
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	c.SendResponse(r, nil, map[string]interface{}{"details": data, "count": count}, requestId)
+}
+
+func (c *MachineResourceGetterInputParam) queryBs(db *gorm.DB) {
+	if len(c.Hosts) > 0 {
+		db.Where("ip in (?)", c.Hosts)
+		return
+	}
+	if len(c.BkCloudIds) > 0 {
+		db.Where("bk_cloud_id in (?) ", c.BkCloudIds)
+	}
+	if len(c.RsTypes) > 0 {
+		db.Where(model.JSONQuery("rs_types").Contains(c.RsTypes))
+	}
+	if c.Cpu.Iegal() && c.Cpu.IsNotEmpty() {
+		db.Where("cpu_num >= ? and cpu_num <= ?", c.Cpu.Min, c.Cpu.Max)
+	}
+	if c.Mem.Iegal() && c.Mem.IsNotEmpty() {
+		db.Where("dram_cap >= ? and dram_cap <= ?", c.Mem.Min, c.Mem.Max)
+	}
+	if c.Disk.Iegal() && c.Disk.IsNotEmpty() {
+		db.Where("total_storage_cap >= ? and total_storage_cap <= ? ", c.Disk.Min, c.Disk.Max)
+	}
+	if cmutil.IsNotEmpty(c.MountPoint) {
+		if cmutil.IsNotEmpty(c.DiskType) {
+			db.Where(model.JSONQuery("storage_device").Equals(c.DiskType, c.MountPoint, "disk_type"))
+		} else {
+			db.Where(model.JSONQuery("storage_device").KeysContains([]string{c.MountPoint}))
+		}
+	} else {
+		if cmutil.IsNotEmpty(c.DiskType) {
+			db.Where(model.JSONQuery("storage_device").SubValContains(c.DiskType, "disk_type"))
+		}
+	}
+	db.Where("status = ? ", model.Unused)
+	if len(c.City) > 0 {
+		db.Where(" city in (?) ", c.City)
+	}
+	if len(c.SubZones) > 0 {
+		db.Where(" sub_zone in (?) ", c.SubZones)
+	}
+	if len(c.DeviceClass) > 0 {
+		db.Where("device_class in ? ", c.DeviceClass)
+	}
+	if len(c.ForBizs) > 0 {
+		db.Where(model.JSONQuery("dedicated_bizs").Contains(cmutil.IntSliceToStrSlice(c.ForBizs)))
+	}
+	if len(c.Labels) > 0 {
+		for key, v := range c.Labels {
+			db.Where("json_contains(label,json_object(?,?))", key, v)
+		}
+	}
+	db.Offset(c.Offset).Limit(c.Limit)
+}
+
+// Delete TODO
+func (c *MachineResourceHandler) Delete(r *rf.Context) {
+	var input MachineDeleteInputParam
+	if err := c.Prepare(r, &input); err != nil {
+		logger.Error("Preare Error %s", err.Error())
+		return
+	}
+	requestId := r.GetString("request_id")
+	affect_row, err := model.DeleteTbRpDetail(input.BkHostIds)
+	if err != nil {
+		logger.Error("failed to delete data:%s", err.Error())
+		c.SendResponse(r, err, nil, requestId)
+		return
+	}
+	if affect_row == 0 {
+		c.SendResponse(r, fmt.Errorf("no data was deleted"), nil, requestId)
+		return
+	}
+	c.SendResponse(r, nil, requestId, "Delete Success")
+}
+
+// Update TODO
+func (c *MachineResourceHandler) Update(r *rf.Context) {
+	var input MachineResourceInputParam
+	requestId := r.GetString("request_id")
+	if err := c.Prepare(r, &input); err != nil {
+		logger.Error("Preare Error %s", err.Error())
+		return
+	}
+	logger.Debug(fmt.Sprintf("get params %v", input.Data))
+	tx := model.DB.Self.Begin()
+	for _, v := range input.Data {
+		updateMap := make(map[string]interface{})
+		if len(v.Labels) > 0 {
+			l, err := cmutil.ConverMapToJsonStr(v.Labels)
+			if err != nil {
+				logger.Error(fmt.Sprintf("ConverMapToJsonStr Failed %s", err.Error()))
+			}
+			updateMap["lable"] = l
+		}
+		if len(v.ForBizs) > 0 {
+			bizJson, err := json.Marshal(cmutil.IntSliceToStrSlice(v.ForBizs))
+			if err != nil {
+				logger.Error(fmt.Sprintf("conver biz json Failed,Error:%s", err.Error()))
+				c.SendResponse(r, err, requestId, err.Error())
+				return
+			}
+			updateMap["dedicated_bizs"] = bizJson
+		}
+		if len(v.RsTypes) > 0 {
+			rstypes, err := json.Marshal(v.RsTypes)
+			if err != nil {
+				logger.Error(fmt.Sprintf("conver resource types Failed,Error:%s", err.Error()))
+				c.SendResponse(r, err, requestId, err.Error())
+				return
+			}
+			updateMap["rs_types"] = rstypes
+		}
+		if len(v.StorageDevice) > 0 {
+			storageJson, err := json.Marshal(v.StorageDevice)
+			if err != nil {
+				logger.Error(fmt.Sprintf("conver resource types Failed,Error:%s", err.Error()))
+				c.SendResponse(r, err, requestId, err.Error())
+				return
+			}
+			updateMap["storage_device"] = storageJson
+		}
+		err := tx.Model(&model.TbRpDetail{}).Table(model.TbRpDetailName()).Select("dedicated_bizs", "rs_types",
+			"label").Where("bk_host_id=?", v.BkHostID).Updates(updateMap).Error
+		if err != nil {
+			tx.Rollback()
+			logger.Error(fmt.Sprintf("conver resource types Failed,Error:%s", err.Error()))
+			c.SendResponse(r, err, requestId, err.Error())
+			return
+		}
+	}
+	if err := tx.Commit().Error; err != nil {
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	c.SendResponse(r, nil, requestId, "Save Success")
+}
+
+// MachineDeleteInputParam TODO
+type MachineDeleteInputParam struct {
+	BkHostIds []int `json:"bk_host_ids"  binding:"required"`
+}
+
+// MachineResourceInputParam TODO
+type MachineResourceInputParam struct {
+	Data []MachineResource `json:"data" binding:"required,dive,gt=0"`
+}
+
+// MachineResource TODO
+type MachineResource struct {
+	BkHostID      int                      `json:"bk_host_id" binding:"required"`
+	Labels        map[string]string        `json:"labels"`
+	ForBizs       []int                    `json:"for_bizs"`
+	RsTypes       []string                 `json:"resource_types"`
+	StorageDevice map[string]bk.DiskDetail `json:"storage_device"`
+}
+
+// GetMountPoints TODO
+func (c MachineResourceHandler) GetMountPoints(r *rf.Context) {
+	db := model.DB.Self.Table(model.TbRpDetailName())
+	var rs []json.RawMessage
+	if err := db.Select("json_keys(storage_device)").Where("JSON_LENGTH(storage_device) > 0").Find(&rs).Error; err != nil {
+		logger.Error("get mountpoints failed %s", err.Error())
+		c.SendResponse(r, err, err.Error(), "")
+		return
+	}
+	var mountpoints []string
+	for _, v := range rs {
+		var mp []string
+		if err := json.Unmarshal(v, &mp); err != nil {
+			logger.Error("unmarshal failed %s", err.Error())
+			c.SendResponse(r, err, err.Error(), "")
+			return
+		}
+		if len(mp) > 0 {
+			mountpoints = append(mountpoints, mp...)
+		}
+	}
+	c.SendResponse(r, nil, cmutil.RemoveDuplicate(mountpoints), r.GetString("request_id"))
+}
+
+// GetDiskTypes TODO
+func (c MachineResourceHandler) GetDiskTypes(r *rf.Context) {
+	db := model.DB.Self.Table(model.TbRpDetailName())
+	var rs []json.RawMessage
+	err := db.Select("json_extract(storage_device,'$.*.\"disk_type\"')").Where("JSON_LENGTH(storage_device) > 0").
+		Find(&rs).Error
+	if err != nil {
+		logger.Error("get DiskType failed %s", err.Error())
+		c.SendResponse(r, err, err.Error(), "")
+		return
+	}
+	var diskTypes []string
+	for _, v := range rs {
+		var mp []string
+		if err := json.Unmarshal(v, &mp); err != nil {
+			logger.Error("unmarshal failed %s", err.Error())
+			c.SendResponse(r, err, err.Error(), "")
+			return
+		}
+		if len(mp) > 0 {
+			diskTypes = append(diskTypes, mp...)
+		}
+	}
+	c.SendResponse(r, nil, cmutil.RemoveDuplicate(diskTypes), r.GetString("request_id"))
+}
+
+// GetSubZoneParam TODO
+type GetSubZoneParam struct {
+	LogicCitys []string `json:"citys"`
+}
+
+// GetSubZones TODO
+func (c MachineResourceHandler) GetSubZones(r *rf.Context) {
+	var input GetSubZoneParam
+	if c.Prepare(r, &input) != nil {
+		return
+	}
+	var subZones []string
+	db := model.DB.Self.Table(model.TbRpDetailName())
+	err := db.Distinct("sub_zone").Where("city in ? ", input.LogicCitys).Find(&subZones).Error
+	if err != nil {
+		c.SendResponse(r, err, "", err.Error())
+		return
+	}
+	c.SendResponse(r, nil, subZones, r.GetString("request_id"))
+}
+
+// GetDeviceClass TODO
+func (c MachineResourceHandler) GetDeviceClass(r *rf.Context) {
+	var class []string
+	db := model.DB.Self.Table(model.TbRpDetailName())
+	err := db.Distinct("device_class").Where("device_class !=''").Find(&class).Error
+	if err != nil {
+		c.SendResponse(r, err, "", err.Error())
+		return
+	}
+	c.SendResponse(r, nil, class, r.GetString("request_id"))
+}
diff --git a/dbm-services/common/db-resource/internal/controller/manage/rs_import.go b/dbm-services/common/db-resource/internal/controller/manage/rs_import.go
new file mode 100644
index 0000000000..a937c3a85a
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/controller/manage/rs_import.go
@@ -0,0 +1,248 @@
+package manage
+
+import (
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/db-resource/internal/svr/apply"
+	"dbm-services/common/db-resource/internal/svr/bk"
+	"dbm-services/common/db-resource/internal/svr/cloud"
+	"dbm-services/common/db-resource/internal/svr/task"
+	"dbm-services/common/go-pubpkg/cc.v3"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"sync"
+	"time"
+
+	rf "github.com/gin-gonic/gin"
+)
+
+// ImportMachParam TODO
+type ImportMachParam struct {
+	BkCloudId int `json:"bk_cloud_id"`
+	// ForBizs 业务标签,表示这个资源将来给ForBizs这个业务使用
+	ForBizs []int             `json:"for_bizs"`
+	RsTypes []string          `json:"resource_types"`
+	BkBizId int               `json:"bk_biz_id"  binding:"number"`
+	Hosts   []HostBase        `json:"hosts" binding:"gt=0,dive,required"`
+	Labels  map[string]string `json:"labels"`
+	apply.ActionInfo
+}
+
+func (p ImportMachParam) getOperationInfo(requestId string) model.TbRpOperationInfo {
+	return model.TbRpOperationInfo{
+		RequestID:     requestId,
+		OperationType: model.Imported,
+		TotalCount:    len(p.getIps()),
+		TaskId:        p.TaskId,
+		BillId:        p.BillId,
+		Operator:      p.Operator,
+		CreateTime:    time.Now(),
+		UpdateTime:    time.Now(),
+	}
+}
+
+func (p ImportMachParam) getIps() (ips []string) {
+	for _, v := range p.Hosts {
+		if !cmutil.IsEmpty(v.Ip) {
+			ips = append(ips, v.Ip)
+		}
+	}
+	return
+}
+
+// HostBase TODO
+type HostBase struct {
+	Ip     string `json:"ip" `
+	HostId int    `json:"host_id" binding:"required"`
+}
+
+func (p *ImportMachParam) existCheck() (err error) {
+	var alreadyExistRs []model.TbRpDetail
+	err = model.DB.Self.Table(model.TbRpDetailName()).Where("bk_cloud_id = ? and ip in (?)", p.BkCloudId, p.getIps()).
+		Scan(&alreadyExistRs).Error
+	if err != nil {
+		return err
+	}
+	if len(alreadyExistRs) > 0 {
+		errMsg := "already exist:\n "
+		for _, r := range alreadyExistRs {
+			errMsg += fmt.Sprintf(" bk_cloud_id:%d,ip:%s \n", r.BkCloudID, r.IP)
+		}
+		return fmt.Errorf(errMsg)
+	}
+	return nil
+}
+
+// Import TODO
+func (c *MachineResourceHandler) Import(r *rf.Context) {
+	var input ImportMachParam
+	if err := c.Prepare(r, &input); err != nil {
+		logger.Error(fmt.Sprintf("Preare Error %s", err.Error()))
+		return
+	}
+	requestId := r.GetString("request_id")
+	if err := input.existCheck(); err != nil {
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	resp, err := ImportByListHostBiz(input)
+	if err != nil {
+		logger.Error(fmt.Sprintf("ImportByIps failed %s", err.Error()))
+		c.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	if len(resp.NotFoundInCCHosts) == len(input.Hosts) {
+		c.SendResponse(r, fmt.Errorf("all machines failed to query cmdb information"), resp, requestId)
+		return
+	}
+	task.RecordRsOperatorInfoChan <- input.getOperationInfo(requestId)
+	c.SendResponse(r, err, resp, requestId)
+}
+
+// ImportHostResp TODO
+type ImportHostResp struct {
+	SearchDiskErrInfo map[string]string `json:"search_disk_err_info"`
+	NotFoundInCCHosts []string          `json:"not_found_in_cc_hosts"`
+}
+
+// ImportByListHostBiz TODO
+func ImportByListHostBiz(param ImportMachParam) (resp *ImportHostResp, err error) {
+	var ccHostsInfo []*cc.Host
+	var berr, derr error
+	var failedHostInfo map[string]string
+	var notFoundHosts []string
+	var elems []model.TbRpDetail
+	resp = &ImportHostResp{}
+	wg := sync.WaitGroup{}
+	diskMap := make(map[string]*bk.ShellResCollection)
+
+	lableJson, err := cmutil.ConverMapToJsonStr(cmutil.CleanStrMap(param.Labels))
+	if err != nil {
+		logger.Error(fmt.Sprintf("ConverLableToJsonStr Failed,Error:%s", err.Error()))
+		return nil, err
+	}
+	bizJson := []byte("[]")
+	if len(param.ForBizs) > 0 {
+		bizJson, err = json.Marshal(cmutil.IntSliceToStrSlice(param.ForBizs))
+		if err != nil {
+			logger.Error(fmt.Sprintf("conver biz json Failed,Error:%s", err.Error()))
+			return nil, err
+		}
+	}
+	rstypes := []byte("[]")
+	if len(param.RsTypes) > 0 {
+		rstypes, err = json.Marshal(param.RsTypes)
+		if err != nil {
+			logger.Error(fmt.Sprintf("conver resource types Failed,Error:%s", err.Error()))
+			return nil, err
+		}
+	}
+	targetHosts := cmutil.RemoveDuplicate(param.getIps())
+	wg.Add(2)
+	go func() {
+		defer wg.Done()
+		ccHostsInfo, notFoundHosts, berr = bk.BatchQueryHostsInfo(param.BkBizId, targetHosts)
+	}()
+	// get disk information in batch
+	go func() {
+		defer wg.Done()
+		diskMap, failedHostInfo, derr = bk.GetDiskInfo(targetHosts, param.BkCloudId, param.BkBizId)
+	}()
+	wg.Wait()
+	resp.SearchDiskErrInfo = failedHostInfo
+	resp.NotFoundInCCHosts = notFoundHosts
+	if berr != nil {
+		logger.Error("query host cc info failed %s", berr.Error())
+		return resp, berr
+	}
+	if len(notFoundHosts) >= len(param.Hosts) {
+		return resp, fmt.Errorf("all hosts query empty in cc")
+	}
+
+	if derr != nil {
+		logger.Error("search disk info by job  failed %s", derr.Error())
+		// return
+	}
+	hostsMap := make(map[string]struct{})
+	for _, host := range targetHosts {
+		hostsMap[host] = struct{}{}
+	}
+	for _, emptyhost := range notFoundHosts {
+		delete(hostsMap, emptyhost)
+	}
+	// further probe disk specific information
+	probeFromCloud(diskMap)
+	logger.Info("more info %v", ccHostsInfo)
+	for _, h := range ccHostsInfo {
+		delete(hostsMap, h.InnerIP)
+		el := model.TbRpDetail{
+			RsTypes:         rstypes,
+			DedicatedBizs:   bizJson,
+			BkCloudID:       param.BkCloudId,
+			BkBizId:         param.BkBizId,
+			AssetID:         h.AssetID,
+			BkHostID:        h.BKHostId,
+			IP:              h.InnerIP,
+			Label:           lableJson,
+			DeviceClass:     h.DeviceClass,
+			DramCap:         h.BkMem,
+			CPUNum:          h.BkCpu,
+			City:            h.IdcCityName,
+			CityID:          h.IdcCityId,
+			SubZone:         h.SZone,
+			SubZoneID:       h.SZoneID,
+			RackID:          h.Equipment,
+			SvrTypeName:     h.SvrTypeName,
+			Status:          model.Unused,
+			NetDeviceID:     h.LinkNetdeviceId,
+			StorageDevice:   []byte("{}"),
+			TotalStorageCap: h.BkDisk,
+			UpdateTime:      time.Now(),
+			CreateTime:      time.Now(),
+		}
+		el.SetMore(h.InnerIP, diskMap)
+		elems = append(elems, el)
+	}
+
+	if err := model.DB.Self.Table(model.TbRpDetailName()).Create(elems).Error; err != nil {
+		logger.Error("failed to save resource: %s", err.Error())
+		return resp, err
+	}
+	return resp, err
+}
+
+// probeFromCloud Detect The Disk Type Again Through The Cloud Interface
+func probeFromCloud(diskMap map[string]*bk.ShellResCollection) {
+	var clouder cloud.Disker
+	var err error
+	if clouder, err = cloud.NewDisker(); err != nil {
+		return
+	}
+	ctr := make(chan struct{}, 5)
+	wg := sync.WaitGroup{}
+	for ip := range diskMap {
+		// if the disk id and region obtained by job are all empty skip the request for cloud api
+		ctr <- struct{}{}
+		wg.Add(1)
+		go func(ip string) {
+			defer func() { wg.Done(); <-ctr }()
+			dkinfo := diskMap[ip]
+			diskIds := bk.GetAllDiskIds(dkinfo.Disk)
+			if cmutil.IsEmpty(dkinfo.TxRegion) || len(diskIds) <= 0 {
+				return
+			}
+			cloudInfo, err := clouder.DescribeDisk(diskIds, dkinfo.TxRegion)
+			if err != nil {
+				logger.Error("call clouder describe disk info failed %s", err.Error())
+				return
+			}
+			for _, dk := range dkinfo.Disk {
+				if v, ok := cloudInfo[dk.DiskId]; ok {
+					dk.DiskType = v
+				}
+			}
+		}(ip)
+	}
+	wg.Wait()
+}
diff --git a/dbm-services/common/db-resource/internal/controller/manage/rs_lable.go b/dbm-services/common/db-resource/internal/controller/manage/rs_lable.go
new file mode 100644
index 0000000000..a7496c24ee
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/controller/manage/rs_lable.go
@@ -0,0 +1,51 @@
+package manage
+
+import (
+	"dbm-services/common/db-resource/internal/controller"
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+
+	rf "github.com/gin-gonic/gin"
+)
+
+// LableHandler TODO
+type LableHandler struct {
+	controller.BaseHandler
+}
+
+// LableEditInput TODO
+type LableEditInput struct {
+	BkHostIds []int             `json:"bk_host_ids"  binding:"required"`
+	Labels    map[string]string `json:"labels"`
+}
+
+// Edit TODO
+func (c *LableHandler) Edit(r *rf.Context) {
+	var input LableEditInput
+	if err := c.Prepare(r, &input); err != nil {
+		logger.Error(fmt.Sprintf("Preare Error %s", err.Error()))
+		return
+	}
+	requestId := r.GetString("request_id")
+	lableJson, err := cmutil.ConverMapToJsonStr(cmutil.CleanStrMap(input.Labels))
+	if err != nil {
+		logger.Error(fmt.Sprintf("ConverLableToJsonStr Failed,Error:%s", err.Error()))
+		c.SendResponse(r, err, nil, requestId)
+		return
+	}
+	if len(input.BkHostIds) <= 0 {
+		c.SendResponse(r, nil, nil, requestId)
+		return
+	}
+	err = model.DB.Self.Table(model.TbRpDetailName()).Where("bk_host_id in ? ", input.BkHostIds).Update("label",
+		lableJson).
+		Error
+	if err != nil {
+		logger.Error(fmt.Sprintf("Update Lable Failed %s", err.Error()))
+		c.SendResponse(r, err, nil, requestId)
+		return
+	}
+	c.SendResponse(r, nil, nil, requestId)
+}
diff --git a/dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go b/dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go
new file mode 100644
index 0000000000..a3cfe6e04f
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go
@@ -0,0 +1,70 @@
+package manage
+
+import (
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+
+	"github.com/gin-gonic/gin"
+	"gorm.io/gorm"
+)
+
+// GetOperationInfoParam TODO
+type GetOperationInfoParam struct {
+	OperationType string   `json:"operation_type"`
+	BillIds       []string `json:"bill_ids"`
+	TaskIds       []string `json:"task_ids"`
+	Operator      string   `json:"operator"`
+	BeginTime     string   `json:"begin_time"  binding:"omitempty,datetime=2006-01-02 15:04:05" `
+	EndTime       string   `json:"end_time"  binding:"omitempty,datetime=2006-01-02 15:04:05"`
+	Limit         int      `json:"limit"`
+	Offset        int      `json:"offset"`
+}
+
+// OperationInfoList TODO
+func (o MachineResourceHandler) OperationInfoList(r *gin.Context) {
+	var input GetOperationInfoParam
+	requestId := r.GetString("request_id")
+	if err := o.Prepare(r, &input); err != nil {
+		logger.Error(fmt.Sprintf("Preare Error %s", err.Error()))
+		return
+	}
+	db := model.DB.Self.Table(model.TbRpOperationInfoTableName())
+	input.query(db)
+	var data []model.TbRpOperationInfo
+	if err := db.Scan(&data).Error; err != nil {
+		o.SendResponse(r, err, err.Error(), requestId)
+		return
+	}
+	var count int64
+	if err := db.Count(&count).Error; err != nil {
+		o.SendResponse(r, err, requestId, err.Error())
+		return
+	}
+	o.SendResponse(r, nil, map[string]interface{}{"details": data, "count": count}, requestId)
+}
+
+func (p GetOperationInfoParam) query(db *gorm.DB) {
+	if len(p.BillIds) > 0 {
+		db.Where("bill_id in (?)", p.BillIds)
+	}
+	if len(p.TaskIds) > 0 {
+		db.Where("task_id in (?)", p.TaskIds)
+	}
+	if cmutil.IsNotEmpty(p.Operator) {
+		db.Where("operator = ?", p.Operator)
+	}
+	if cmutil.IsNotEmpty(p.OperationType) {
+		db.Where("operation_type = ? ", p.OperationType)
+	}
+	if cmutil.IsNotEmpty(p.EndTime) {
+		db.Where("create_time <= ? ", p.EndTime)
+	}
+	if cmutil.IsNotEmpty(p.BeginTime) {
+		db.Where("create_time >= ? ", p.BeginTime)
+	}
+	if p.Limit > 0 {
+		db.Offset(p.Offset).Limit(p.Limit)
+	}
+}
diff --git a/dbm-services/common/db-resource/internal/lock/lock.go b/dbm-services/common/db-resource/internal/lock/lock.go
new file mode 100644
index 0000000000..02804bc982
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/lock/lock.go
@@ -0,0 +1,2 @@
+// Package lock TODO
+package lock
diff --git a/dbm-services/common/db-resource/internal/lock/redis_lock.go b/dbm-services/common/db-resource/internal/lock/redis_lock.go
new file mode 100644
index 0000000000..5958fd55ed
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/lock/redis_lock.go
@@ -0,0 +1,55 @@
+package lock
+
+import (
+	"context"
+	"dbm-services/common/db-resource/internal/config"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"time"
+
+	"github.com/go-redis/redis/v8"
+)
+
+var rdb *redis.Client
+
+// init TODO
+func init() {
+	rdb = redis.NewClient(&redis.Options{
+		Addr:     config.AppConfig.RedisDb.Addr,
+		Password: config.AppConfig.RedisDb.Pwd,
+		DB:       0,
+	})
+}
+
+// RedisLock TODO
+type RedisLock struct {
+	Name    string
+	RandKey string
+	Expiry  time.Duration
+}
+
+// TryLock TODO
+func (r *RedisLock) TryLock() (err error) {
+	ok, err := rdb.SetNX(context.TODO(), r.Name, r.RandKey, r.Expiry).Result()
+	if err != nil {
+		return err
+	}
+	if !ok {
+		return fmt.Errorf("setnx %s lock failed", r.Name)
+	}
+	return nil
+}
+
+// Unlock TODO
+func (r *RedisLock) Unlock() (err error) {
+	luaStript := `if redis.call('get',KEYS[1]) == ARGV[1] then return redis.call('del',KEYS[1]) else return 0 end`
+	v, err := rdb.Eval(context.TODO(), luaStript, []string{r.Name}, []interface{}{r.RandKey}).Int()
+	if err != nil {
+		logger.Error("del lock failed %s", err.Error())
+		return err
+	}
+	if v != 1 {
+		return fmt.Errorf("unlock failed,key is %s,val %s", r.Name, r.RandKey)
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-resource/internal/lock/redis_lock_test.go b/dbm-services/common/db-resource/internal/lock/redis_lock_test.go
new file mode 100644
index 0000000000..942294a960
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/lock/redis_lock_test.go
@@ -0,0 +1,31 @@
+package lock_test
+
+import (
+	"dbm-services/common/db-resource/internal/lock"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"testing"
+	"time"
+)
+
+func TestRedisLock(t *testing.T) {
+	t.Log("start testing...")
+	// lock.InitRedisDb()
+	l := lock.NewSpinLock(&lock.RedisLock{
+		Name:    "Tendb",
+		RandKey: cmutil.RandStr(16),
+		Expiry:  10 * time.Second,
+	}, 30, 1*time.Second)
+	for i := 0; i < 20; i++ {
+		go func(j int) {
+			if err := l.Lock(); err != nil {
+				t.Log(j, "lock failed")
+				return
+			}
+			t.Log(j, "lock success")
+			time.Sleep(100 * time.Millisecond)
+			l.Unlock()
+		}(i)
+	}
+
+	time.Sleep(20 * time.Second)
+}
diff --git a/dbm-services/common/db-resource/internal/lock/spinlock.go b/dbm-services/common/db-resource/internal/lock/spinlock.go
new file mode 100644
index 0000000000..d0dfe63851
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/lock/spinlock.go
@@ -0,0 +1,47 @@
+package lock
+
+import (
+	"fmt"
+	"math/rand"
+	"time"
+)
+
+// TryLocker TODO
+type TryLocker interface {
+	TryLock() error
+	Unlock() error
+}
+
+// NewSpinLock 自旋锁
+func NewSpinLock(lock TryLocker, spinTries int, spinInterval time.Duration) *SpinLock {
+	return &SpinLock{
+		lock:         lock,
+		spinTries:    spinTries,
+		spinInterval: spinInterval,
+	}
+}
+
+// SpinLock TODO
+type SpinLock struct {
+	lock         TryLocker
+	spinTries    int
+	spinInterval time.Duration
+}
+
+// Lock TODO
+func (l *SpinLock) Lock() error {
+	for i := 0; i < l.spinTries; i++ {
+		var err error
+		if err = l.lock.TryLock(); err == nil {
+			return nil
+		}
+		time.Sleep(l.spinInterval)
+		time.Sleep(time.Duration(rand.Intn(1000)) * time.Millisecond)
+	}
+	return fmt.Errorf("spin lock failed after %f seconds", float64(l.spinTries)*l.spinInterval.Seconds())
+}
+
+// Unlock TODO
+func (l *SpinLock) Unlock() error {
+	return l.lock.Unlock()
+}
diff --git a/dbm-services/common/db-resource/internal/middleware/middleware.go b/dbm-services/common/db-resource/internal/middleware/middleware.go
new file mode 100644
index 0000000000..811830aee0
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/middleware/middleware.go
@@ -0,0 +1,80 @@
+// Package middleware TODO
+package middleware
+
+import (
+	"bytes"
+	"dbm-services/common/db-resource/internal/controller"
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"io/ioutil"
+	"net/http"
+	"time"
+
+	"github.com/gin-contrib/requestid"
+	"github.com/gin-gonic/gin"
+)
+
+type bodyLogWriter struct {
+	gin.ResponseWriter
+	body *bytes.Buffer
+}
+
+// Write 用于常见IO
+func (w bodyLogWriter) Write(b []byte) (int, error) {
+	w.body.Write(b)
+	return w.ResponseWriter.Write(b)
+}
+
+// BodyLogMiddleware TODO
+func BodyLogMiddleware(c *gin.Context) {
+	blw := &bodyLogWriter{body: bytes.NewBufferString(""), ResponseWriter: c.Writer}
+	c.Writer = blw
+	c.Next()
+	statusCode := c.Writer.Status()
+	// if statusCode >= 400 {
+	// ok this is an request with error, let's make a record for it
+	// now print body (or log in your preferred way)
+	var rp controller.Response
+	if blw.body == nil {
+		rp = controller.Response{}
+	} else {
+		if err := json.Unmarshal(blw.body.Bytes(), &rp); err != nil {
+			logger.Error("unmarshal respone body failed %s", err.Error())
+			return
+		}
+	}
+	if err := model.UpdateTbRequestLog(rp.RequestId, map[string]interface{}{"respone_body": blw.body.String(),
+		"respone_code": statusCode, "update_time": time.Now()}); err != nil {
+		logger.Warn("update request respone failed %s", err.Error())
+	}
+}
+
+// ApiLogger TODO
+func ApiLogger(c *gin.Context) {
+	rid := requestid.Get(c)
+	c.Set("request_id", rid)
+	if c.Request.Method == http.MethodPost {
+		var bodyBytes []byte
+		// read from the original request body
+		bodyBytes, err := ioutil.ReadAll(c.Request.Body)
+		if err != nil {
+			return
+		}
+		if len(bodyBytes) <= 0 {
+			bodyBytes = []byte("{}")
+		}
+		// create a new buffer and replace the original request body
+		c.Request.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes))
+		if err := model.CreateTbRequestLog(model.TbRequestLog{
+			RequestID:   rid,
+			RequestUser: "",
+			RequestBody: string(bodyBytes),
+			SourceIP:    c.Request.RemoteAddr,
+			CreateTime:  time.Now(),
+			ResponeBody: "{}",
+		}); err != nil {
+			logger.Warn("record request log failed %s", err.Error())
+		}
+	}
+}
diff --git a/dbm-services/common/db-resource/internal/model/TbDeviceSpec.go b/dbm-services/common/db-resource/internal/model/TbDeviceSpec.go
new file mode 100644
index 0000000000..29fa2e1482
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/TbDeviceSpec.go
@@ -0,0 +1,34 @@
+package model
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+)
+
+// TbDeviceSpec TODO
+type TbDeviceSpec struct {
+	ID            int    `gorm:"primaryKey;column:id;type:int(11);not null" json:"-"`
+	ResourceType  string `gorm:"column:resource_type;type:int(11);not null" json:"resource_type"`
+	DeviceClass   string `gorm:"unique;column:device_class;type:varchar(64);not null" json:"device_class"`
+	CPUNum        int    `gorm:"column:cpu_num;type:int(11);not null" json:"cpu_num"`
+	DramCap       int    `gorm:"column:dram_cap;type:int(11);not null" json:"dram_cap"`
+	SsdCap        int    `gorm:"column:ssd_cap;type:int(11);not null" json:"ssd_cap"`
+	SsdNum        int    `gorm:"column:ssd_num;type:int(11);not null" json:"ssd_num"`
+	HddCap        int    `gorm:"column:hdd_cap;type:int(11);not null" json:"hdd_cap"`
+	IsLocalStorge int    `gorm:"column:is_local_storge;type:int(11);not null" json:"is_local_storge"`
+}
+
+// TbDeviceSpecName TODO
+func TbDeviceSpecName() string {
+	return "tb_device_spec"
+}
+
+// GetDeviceSpecFromClass TODO
+func GetDeviceSpecFromClass(deviceClass string) (m TbDeviceSpec, err error) {
+	err = DB.Self.Table(TbDeviceSpecName()).Where("device_class = ? ", deviceClass).First(&m).Error
+	if err != nil {
+		logger.Error(fmt.Sprintf("Query DeviceSpec By DeviceClass Failed %s", err.Error()))
+		return
+	}
+	return
+}
diff --git a/dbm-services/common/db-resource/internal/model/TbRequestLog.go b/dbm-services/common/db-resource/internal/model/TbRequestLog.go
new file mode 100644
index 0000000000..4e5c292346
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/TbRequestLog.go
@@ -0,0 +1,39 @@
+package model
+
+import "time"
+
+// TbRequestLog TODO
+// TbRpOpsAPILog [...]
+type TbRequestLog struct {
+	ID              int       `gorm:"primary_key;auto_increment;not_null" json:"-"`
+	RequestID       string    `gorm:"unique;column:request_id;type:varchar(64);not null" json:"request_id"`             // 响应的request_id
+	RequestUser     string    `gorm:"column:request_user;type:varchar(32);not null" json:"request_user"`                // 请求的用户
+	RequestBody     string    `gorm:"column:request_body;type:json" json:"request_body"`                                // 请求体
+	RequestUrl      string    `gorm:"column:request_url;type:varchar(32);not null" json:"request_url"`                  // 请求路径
+	SourceIP        string    `gorm:"column:source_ip;type:varchar(32);not null" json:"source_ip"`                      // 请求来源Ip
+	ResponeBody     string    `gorm:"column:respone_body;type:json" json:"respone_body"`                                // respone data message
+	ResponeCode     int       `gorm:"column:respone_code;type:int(11);not null" json:"respone_code"`                    // respone code
+	ResponeMesssage string    `gorm:"column:respone_messsage;type:text" json:"respone_messsage"`                        // respone data message
+	UpdateTime      time.Time `gorm:"column:update_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"update_time"` // 最后修改时间
+	CreateTime      time.Time `gorm:"column:create_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"create_time"` // 创建时间
+}
+
+// TableName TODO
+func (TbRequestLog) TableName() string {
+	return TbRequestLogName()
+}
+
+// TbRequestLogName TODO
+func TbRequestLogName() string {
+	return "tb_request_log"
+}
+
+// CreateTbRequestLog TODO
+func CreateTbRequestLog(m TbRequestLog) (err error) {
+	return DB.Self.Table(TbRequestLogName()).Create(&m).Error
+}
+
+// UpdateTbRequestLog TODO
+func UpdateTbRequestLog(requestid string, updatesCols map[string]interface{}) (err error) {
+	return DB.Self.Table(TbRequestLogName()).Where("request_id = ?", requestid).Updates(updatesCols).Error
+}
diff --git a/dbm-services/common/db-resource/internal/model/TbRpApplyDetailLog.go b/dbm-services/common/db-resource/internal/model/TbRpApplyDetailLog.go
new file mode 100644
index 0000000000..0f848f2d8e
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/TbRpApplyDetailLog.go
@@ -0,0 +1,37 @@
+package model
+
+import "time"
+
+// TbRpApplyDetailLog TODO
+type TbRpApplyDetailLog struct {
+	ID         int       `gorm:"primaryKey;auto_increment;not null" json:"-"`
+	RequestID  string    `gorm:"index:idx_request_id;column:request_id;type:varchar(64);not null" json:"request_id"` // 响应的request_id
+	Item       string    `gorm:"column:item;type:varchar(64);not null" json:"item"`                                  // apply for item
+	BkCloudID  int       `gorm:"column:bk_cloud_id;type:int(11);not null;comment:'云区域 ID'" json:"bk_cloud_id"`
+	IP         string    `gorm:"ip;column:ip;type:varchar(20);not null" json:"ip"` //  svr ip
+	BkHostID   int       `gorm:"column:bk_host_id;type:int(11);not null;comment:'bk主机ID'" json:"bk_host_id"`
+	UpdateTime time.Time `gorm:"column:update_time;type:timestamp" json:"update_time"` // 最后修改时间
+	CreateTime time.Time `gorm:"column:create_time;type:datetime" json:"create_time"`  // 创建时间
+}
+
+// TableName TODO
+func (TbRpApplyDetailLog) TableName() string {
+	return TbRpApplyDetailLogName()
+}
+
+// TbRpApplyDetailLogName TODO
+func TbRpApplyDetailLogName() string {
+	return "tb_rp_apply_detail_log"
+}
+
+// CreateTbRpOpsAPIDetailLog TODO
+// record  log
+func CreateTbRpOpsAPIDetailLog(m TbRpApplyDetailLog) error {
+	return DB.Self.Table(TbRpApplyDetailLogName()).Create(&m).Error
+}
+
+// CreateBatchTbRpOpsAPIDetailLog TODO
+// record  log
+func CreateBatchTbRpOpsAPIDetailLog(m []TbRpApplyDetailLog) error {
+	return DB.Self.Table(TbRpApplyDetailLogName()).CreateInBatches(m, len(m)).Error
+}
diff --git a/dbm-services/common/db-resource/internal/model/TbRpDetail.go b/dbm-services/common/db-resource/internal/model/TbRpDetail.go
new file mode 100644
index 0000000000..5b63f6a0ee
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/TbRpDetail.go
@@ -0,0 +1,231 @@
+package model
+
+import (
+	"dbm-services/common/db-resource/internal/svr/bk"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"time"
+
+	"gorm.io/gorm"
+)
+
+const (
+	// Unused TODO
+	Unused = "Unused"
+	// Preselected 预选中
+	Preselected = "Preselected"
+	// Prepoccupied 已被接口申请,但不一定实际使用
+	Prepoccupied = "Prepoccupied"
+	// Used TODO
+	Used = "Used"
+)
+
+// TbRpDetail  机器资源明细表
+type TbRpDetail struct {
+	ID              int                      `gorm:"primary_key;auto_increment;not_null" json:"-"`
+	BkCloudID       int                      `gorm:"uniqueIndex:ip;column:bk_cloud_id;type:int(11);not null;comment:'云区域 ID'" json:"bk_cloud_id"`
+	BkBizId         int                      `gorm:"column:bk_biz_id;type:int(11);not null;comment:'机器当前所属业务'" json:"bk_biz_id"`
+	DedicatedBizs   json.RawMessage          `gorm:"column:dedicated_bizs;type:json;comment:'专属业务,可属于多个'" json:"for_bizs"`
+	RsTypes         json.RawMessage          `gorm:"column:rs_types;type:json;comment:'资源类型标签'" json:"resource_types"`
+	Bizs            map[string]string        `gorm:"-" json:"-"`
+	BkHostID        int                      `gorm:"column:bk_host_id;type:int(11);not null;comment:'bk主机ID'" json:"bk_host_id"`
+	IP              string                   `gorm:"uniqueIndex:ip;column:ip;type:varchar(20);not null" json:"ip"` //  svr ip
+	AssetID         string                   `gorm:"column:asset_id;type:varchar(64);not null;comment:'固定资产编号'" json:"asset_id"`
+	DeviceClass     string                   `gorm:"column:device_class;type:varchar(64);not null" json:"device_class"` //  对应机型 A30,D3
+	SvrTypeName     string                   `gorm:"column:svr_type_name;type:varchar(64);not null;comment:'服务器型号,判断是否是云机器'" json:"svr_type_name"`
+	CPUNum          int                      `gorm:"column:cpu_num;type:int(11);not null;comment:'cpu核数'" json:"cpu_num"`
+	DramCap         int                      `gorm:"column:dram_cap;type:int(11);not null;comment:'内存大小'" json:"dram_cap"`
+	StorageDevice   json.RawMessage          `gorm:"column:storage_device;type:json;comment:'磁盘设备'" json:"storage_device"`
+	TotalStorageCap int                      `gorm:"column:total_storage_cap;type:int(11);comment:'磁盘总容量'" json:"total_storage_cap"`
+	Storages        map[string]bk.DiskDetail `gorm:"-" json:"-"`
+	Raid            string                   `gorm:"column:raid;type:varchar(20);not null" json:"raid"`               //  磁盘Raid
+	CityID          string                   `gorm:"column:city_id;type:varchar(64);not null" json:"city_id"`         //  实际城市ID
+	City            string                   `gorm:"column:city;type:varchar(128);not null" json:"city"`              //  实际城市名称
+	SubZone         string                   `gorm:"column:sub_zone;type:varchar(32);not null" json:"sub_zone"`       //  园区, 例如光明 cc_device_szone
+	SubZoneID       string                   `gorm:"column:sub_zone_id;type:varchar(64);not null" json:"sub_zone_id"` //  园区ID cc_device_szone_id
+	RackID          string                   `gorm:"column:rack_id;type:varchar(64);not null" json:"rack_id"`         //  存放机架ID,判断是否是同机架
+	NetDeviceID     string                   `gorm:"column:net_device_id;type:varchar(128)" json:"net_device_id"`     //  网络设备ID, 判断是同交换机
+	Label           string                   `gorm:"column:label;type:json" json:"label"`                             //  标签
+	LabelMap        map[string]string        `gorm:"-" json:"-"`
+	IsInit          int                      `gorm:"column:is_init;type:int(11);comment:'是否初始化过'" json:"-"`                              // 是否初始化过
+	IsIdle          int                      `gorm:"column:is_idle;type:int(11);comment:'是否空闲检查过'" json:"-"`                             // 是否空闲检查过
+	Status          string                   `gorm:"column:status;type:varchar(20);not null" json:"status"`                              //  Unused: 未使用 Used: 已经售卖被使用: Preselected:预占用
+	ConsumeTime     time.Time                `gorm:"column:consume_time;type:timestamp;default:1970-01-01 08:00:01" json:"consume_time"` // 消费时间
+	UpdateTime      time.Time                `gorm:"column:update_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"update_time"`   // 最后修改时间
+	CreateTime      time.Time                `gorm:"column:create_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"create_time"`   // 创建时间
+}
+
+// TableName TODO
+func (TbRpDetail) TableName() string {
+	return TbRpDetailName()
+}
+
+// TbRpDetailName TODO
+func TbRpDetailName() string {
+	return "tb_rp_detail"
+}
+
+// DeviceClassIsLocalSSD TODO
+func (t TbRpDetail) DeviceClassIsLocalSSD() bool {
+	if cmutil.IsEmpty(t.DeviceClass) {
+		return false
+	}
+	r := regexp.MustCompile("^IT")
+	return r.MatchString(t.DeviceClass)
+}
+
+// UnmarshalDiskInfo TODO
+func (t *TbRpDetail) UnmarshalDiskInfo() (err error) {
+	t.Storages = make(map[string]bk.DiskDetail)
+	err = json.Unmarshal(t.StorageDevice, &t.Storages)
+	return
+}
+
+// GetTbRpDetailAll TODO
+func GetTbRpDetailAll(sqlstr string) ([]TbRpDetail, error) {
+	var m []TbRpDetail
+	err := DB.Self.Table(TbRpDetailName()).Raw(sqlstr).Scan(&m).Error
+	if err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// SetMore TODO
+func (t *TbRpDetail) SetMore(ip string, diskMap map[string]*bk.ShellResCollection) {
+	if disk, ok := diskMap[ip]; ok {
+		if t.CPUNum <= 0 {
+			t.CPUNum = disk.Cpu
+		}
+		if t.DramCap <= 0 {
+			t.DramCap = disk.Mem
+		}
+		dks := disk.Disk
+		if t.DeviceClassIsLocalSSD() {
+			dks = bk.SetDiskType(disk.Disk, bk.SSD)
+		}
+		if r, err := bk.MarshalDisk(dks); err != nil {
+			logger.Warn("disk marshal failed %s", err.Error())
+		} else {
+			t.StorageDevice = []byte(r)
+		}
+	}
+}
+
+// TbRpDetailGetter TODO
+func TbRpDetailGetter() ([]TbRpDetail, error) {
+	return nil, nil
+}
+
+// UpdateTbRpDetail TODO
+func UpdateTbRpDetail(ids []int, status string) (int64, error) {
+	db := DB.Self.Table(TbRpDetailName()).Where("bk_host_id in (?)", ids).Update("status", status)
+	return db.RowsAffected, db.Error
+}
+
+// UpdateTbRpDetailStatusAtSelling TODO
+func UpdateTbRpDetailStatusAtSelling(ids []int, status string) error {
+	return DB.Self.Table(TbRpDetailName()).Where("bk_host_id in (?) and status = ? ", ids, Preselected).
+		Update("status", status).Error
+}
+
+// DeleteTbRpDetail TODO
+func DeleteTbRpDetail(ids []int) (int64, error) {
+	db := DB.Self.Table(TbRpDetailName()).Where("bk_host_id in (?)", ids).Delete(&TbRpDetail{})
+	return db.RowsAffected, db.Error
+}
+
+// BatchGetTbDetail TODO
+type BatchGetTbDetail struct {
+	Item      string `json:"item"`
+	BkHostIds []int  `json:"bk_host_ids"`
+}
+
+// BatchGetTbDetailResult TODO
+type BatchGetTbDetailResult struct {
+	Item string       `json:"item"`
+	Data []TbRpDetail `json:"data"`
+}
+
+// BatchGetSatisfiedByAssetIds TODO
+func BatchGetSatisfiedByAssetIds(elements []BatchGetTbDetail, mode string) (result []BatchGetTbDetailResult,
+	err error) {
+	db := DB.Self.Begin()
+	defer func() {
+		if err != nil {
+			db.Rollback()
+		}
+	}()
+	for _, v := range elements {
+		d, err := SetSatisfiedStatus(db, v.BkHostIds, mode)
+		if err != nil {
+			logger.Error(fmt.Sprintf("Item:%s,failed to obtain resource details!,Error is %s", v.Item, err.Error()))
+			return nil, err
+		}
+		result = append(result, BatchGetTbDetailResult{Item: v.Item, Data: d})
+	}
+	err = db.Commit().Error
+	if err != nil {
+		logger.Error(fmt.Sprintf("transaction commit failed: %s", err.Error()))
+		return nil, err
+	}
+	return
+}
+
+// SetSatisfiedStatus TODO
+func SetSatisfiedStatus(tx *gorm.DB, bkhostIds []int, status string) (result []TbRpDetail, err error) {
+	err = tx.Exec("select * from tb_rp_detail where bk_host_id in (?) for update", bkhostIds).Error
+	if err != nil {
+		return nil, err
+	}
+	err = tx.Raw("select * from tb_rp_detail where  bk_host_id in ? ", bkhostIds).Scan(&result).Error
+	if err != nil {
+		return nil, err
+	}
+	if len(bkhostIds) != len(result) {
+		logger.Error(fmt.Sprintf("Get TbRpDetail is %v", result))
+		return nil, fmt.Errorf("requried count is %d,But Only Get %d", len(bkhostIds), len(result))
+	}
+	rdb := tx.Exec("update tb_rp_detail set status=?,consume_time=now() where bk_host_id in ?", status, bkhostIds)
+	if rdb.Error != nil {
+		logger.Error(fmt.Sprintf("update status Failed,Error %s", err.Error()))
+		return nil, err
+	}
+	if int(rdb.RowsAffected) != len(bkhostIds) {
+		return nil, fmt.Errorf("requried Update Instance count is %d,But Affected Rows Count Only %d", len(bkhostIds),
+			rdb.RowsAffected)
+	}
+	return result, nil
+}
+
+// GetLabels TODO
+func GetLabels(applyfor string) (data []map[string]string, err error) {
+	var ls []string
+	db := DB.Self.Table(TbRpDetailName())
+	if cmutil.IsNotEmpty(applyfor) {
+		db.Where("apply_for = ? and label is not null ", applyfor)
+	}
+	if err = db.Select("label").Scan(&ls).Error; err != nil {
+		logger.Error(fmt.Sprintf("Get Labels Failes %s", err.Error()))
+		return
+	}
+	checkExist := make(map[string]struct{})
+	for _, v := range ls {
+		var ldata map[string]string
+		if err = json.Unmarshal([]byte(v), &ldata); err != nil {
+			logger.Error(fmt.Sprintf("Json Unmarshal Failed %s", err.Error()))
+			continue
+		}
+		for key, value := range ldata {
+			exkey := fmt.Sprintf("%s:%s", key, value)
+			if _, ok := checkExist[exkey]; !ok {
+				checkExist[exkey] = struct{}{}
+				data = append(data, map[string]string{key: value})
+			}
+		}
+	}
+	return
+}
diff --git a/dbm-services/common/db-resource/internal/model/TbRpDetailArchive.go b/dbm-services/common/db-resource/internal/model/TbRpDetailArchive.go
new file mode 100644
index 0000000000..2eedcf5dc8
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/TbRpDetailArchive.go
@@ -0,0 +1,90 @@
+package model
+
+import (
+	"dbm-services/common/db-resource/internal/svr/bk"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// TbRpDetailArchive 资源池资源归档表
+type TbRpDetailArchive struct {
+	ID              int                      `gorm:"primary_key;auto_increment;not_null" json:"-"`
+	BkCloudID       int                      `gorm:"column:bk_cloud_id;type:int(11);not null;comment:'云区域 ID'" json:"bk_cloud_id"`
+	BkBizId         int                      `gorm:"column:bk_biz_id;type:int(11);not null;comment:'机器当前所属业务'" json:"bk_biz_id"`
+	DedicatedBizs   json.RawMessage          `gorm:"column:dedicated_bizs;type:json;comment:'专属业务,可属于多个'" json:"dedicated_bizs"`
+	RsTypes         json.RawMessage          `gorm:"column:rs_types;type:json;comment:'资源类型标签'" json:"rs_types"`
+	Bizs            map[string]string        `gorm:"-"`
+	BkHostID        int                      `gorm:"column:bk_host_id;type:int(11);not null;comment:'bk主机ID'" json:"bk_host_id"`
+	IP              string                   `gorm:"column:ip;type:varchar(20);not null" json:"ip"` //  svr ip
+	AssetID         string                   `gorm:"column:asset_id;type:varchar(64);not null;comment:'固定资产编号'" json:"asset_id"`
+	DeviceClass     string                   `gorm:"column:device_class;type:varchar(64);not null" json:"device_class"` //  对应机型 A30,D3
+	SvrTypeName     string                   `gorm:"column:svr_type_name;type:varchar(64);not null;comment:'服务器型号,判断是否是云机器'" json:"svr_type_name"`
+	CPUNum          int                      `gorm:"column:cpu_num;type:int(11);not null;comment:'cpu核数'" json:"cpu_num"`
+	DramCap         int                      `gorm:"column:dram_cap;type:int(11);not null;comment:'内存大小'" json:"dram_cap"`
+	StorageDevice   json.RawMessage          `gorm:"column:storage_device;type:json;comment:'磁盘设备'" json:"storage_device"`
+	TotalStorageCap int                      `gorm:"column:total_storage_cap;type:int(11);comment:'磁盘总容量'" json:"total_storage_cap"`
+	Storages        map[string]bk.DiskDetail `gorm:"-"`
+	Raid            string                   `gorm:"column:raid;type:varchar(20);not null" json:"raid"`               // 磁盘Raid
+	CityID          string                   `gorm:"column:city_id;type:varchar(64);not null" json:"city_id"`         //  实际城市ID
+	City            string                   `gorm:"column:city;type:varchar(128);not null" json:"city"`              // 实际城市名称
+	SubZone         string                   `gorm:"column:sub_zone;type:varchar(32);not null" json:"sub_zone"`       //  园区, 例如光明 cc_device_szone
+	SubZoneID       string                   `gorm:"column:sub_zone_id;type:varchar(64);not null" json:"sub_zone_id"` //  园区ID cc_device_szone_id
+	RackID          string                   `gorm:"column:rack_id;type:varchar(64);not null" json:"rack_id"`         //  存放机架ID,判断是否是同机架
+	NetDeviceID     string                   `gorm:"column:net_device_id;type:varchar(128)" json:"net_device_id"`     //  网络设备ID, 判断是同交换机
+	Label           string                   `gorm:"column:label;type:json" json:"label"`                             // 标签
+	LabelMap        map[string]string        `gorm:"-"`
+	IsInit          int                      `gorm:"column:is_init;type:int(11);comment:'是否初始化过'" json:"-"`                              // 是否初始化过
+	IsIdle          int                      `gorm:"column:is_idle;type:int(11);comment:'是否空闲检查过'" json:"-"`                             // 是否空闲检查过
+	Status          string                   `gorm:"column:status;type:varchar(20);not null" json:"status"`                              //  Unused: 未使用 Used: 已经售卖被使用: Preselected:预占用
+	ConsumeTime     time.Time                `gorm:"column:consume_time;type:timestamp;default:1970-01-01 08:00:01" json:"consume_time"` // 消费时间
+	UpdateTime      time.Time                `gorm:"column:update_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"update_time"`   // 最后修改时间
+	CreateTime      time.Time                `gorm:"column:create_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"create_time"`   // 创建时间
+}
+
+// initarchive TODO
+func initarchive() {
+	tx := DB.Self.Begin()
+	if err := tx.Exec("insert into tb_rp_detail_archive select * from tb_rp_detail where status = ? ", Used).
+		Error; err != nil {
+		logger.Error("insert into tb_rp_detail_archive failed %s", err.Error())
+	}
+	if err := tx.Exec("delete from tb_rp_detail where status = ? ", Used).Error; err != nil {
+		logger.Error("delte from tb_rp_detail failed %s", err.Error())
+	}
+	tx.Commit()
+}
+
+// TableName TODO
+func (TbRpDetailArchive) TableName() string {
+	return TbRpDetailArchiveName()
+}
+
+// TbRpDetailArchiveName TODO
+func TbRpDetailArchiveName() string {
+	return "tb_rp_detail_archive"
+}
+
+// ArchiverResouce TODO
+func ArchiverResouce(ids []int) (err error) {
+	tx := DB.Self.Begin()
+	defer func() {
+		if err != nil {
+			if tx.Rollback().Error != nil {
+				logger.Error(fmt.Sprintf("archive resource exception %s,rollback failed!!", err))
+			}
+		}
+	}()
+	if err = tx.Exec("insert into tb_rp_detail_archive select * from tb_rp_detail where id in ? and status = ? ", ids,
+		Used).Error; err != nil {
+		return err
+	}
+	if err = tx.Exec("delete from tb_rp_detail where  id in ?  and status = ? ", ids, Used).Error; err != nil {
+		return err
+	}
+	if err = tx.Commit().Error; err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/db-resource/internal/model/TbRpOperatorInfo.go b/dbm-services/common/db-resource/internal/model/TbRpOperatorInfo.go
new file mode 100644
index 0000000000..1da16c17da
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/TbRpOperatorInfo.go
@@ -0,0 +1,34 @@
+package model
+
+import "time"
+
+const (
+	// Consumed TODO
+	Consumed = "consumed"
+	// Imported TODO
+	Imported = "imported"
+)
+
+// TbRpOperationInfo TODO
+type TbRpOperationInfo struct {
+	ID            int       `gorm:"primaryKey;auto_increment;not null" json:"-"`
+	RequestID     string    `gorm:"index:idx_request_id;column:request_id;type:varchar(64);not null" json:"request_id"`
+	TotalCount    int       `gorm:"column:total_count;type:int(11);comment:'task Id'" json:"total_count"`
+	OperationType string    `gorm:"column:operation_type;type:varchar(64);not null;comment:'operation type'" json:"operation_type"`
+	Operator      string    `gorm:"column:operator;type:varchar(64);not null;comment:'operator user'" json:"operator"`
+	Status        string    `gorm:"column:status;type:varchar(64);not null;comment:'operator user'" json:"-"`
+	TaskId        string    `gorm:"column:task_id;type:varchar(128);not null;comment:'task Id'" json:"task_id"`
+	BillId        string    `gorm:"column:bill_id;type:varchar(128);not null;comment:'bill Id'" json:"bill_id"`
+	UpdateTime    time.Time `gorm:"column:update_time;type:timestamp" json:"update_time"` // 最后修改时间
+	CreateTime    time.Time `gorm:"column:create_time;type:datetime" json:"create_time"`  // 创建时间
+}
+
+// TableName TODO
+func (TbRpOperationInfo) TableName() string {
+	return TbRpOperationInfoTableName()
+}
+
+// TbRpOperationInfoTableName TODO
+func TbRpOperationInfoTableName() string {
+	return "tb_rp_operation_info"
+}
diff --git a/dbm-services/common/db-resource/internal/model/TbRpReturnDetail.go b/dbm-services/common/db-resource/internal/model/TbRpReturnDetail.go
new file mode 100644
index 0000000000..f682faf3e3
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/TbRpReturnDetail.go
@@ -0,0 +1,25 @@
+package model
+
+import "time"
+
+// TbRpReturnDetail [...]
+type TbRpReturnDetail struct {
+	ID         int       `gorm:"primaryKey;column:id;type:int(11);not null" json:"-"`
+	IP         string    `gorm:"column:ip;type:varchar(20);not null" json:"ip"`               // macheine ip
+	User       string    `gorm:"column:user;type:varchar(32);not null" json:"user"`           // 请求的用户
+	FromSys    string    `gorm:"column:from_sys;type:varchar(32);not null" json:"from_sys"`   // 来着哪个系统
+	ApplyFor   string    `gorm:"column:apply_for;type:varchar(32);not null" json:"apply_for"` // 资源类型 proxy|tendis|tendb ...
+	Desc       string    `gorm:"column:desc;type:varchar(1024);not null" json:"desc"`         // 描述
+	UpdateTime time.Time `gorm:"column:update_time;type:timestamp" json:"update_time"`        // 最后修改时间
+	CreateTime time.Time `gorm:"column:create_time;type:datetime" json:"create_time"`         // 创建时间
+}
+
+// TbRpReturnDetailName TODO
+func TbRpReturnDetailName() string {
+	return "tb_rp_return_detail"
+}
+
+// BatchCreateTbRpReturnDetail TODO
+func BatchCreateTbRpReturnDetail(m []TbRpReturnDetail) error {
+	return DB.Self.Table(TbRpReturnDetailName()).CreateInBatches(m, len(m)).Error
+}
diff --git a/dbm-services/common/db-resource/internal/model/model.go b/dbm-services/common/db-resource/internal/model/model.go
new file mode 100644
index 0000000000..6c9ee2ced0
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/model/model.go
@@ -0,0 +1,422 @@
+// Package model TODO
+package model
+
+import (
+	"database/sql"
+	"dbm-services/common/db-resource/internal/config"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"log"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+	"gorm.io/gorm/clause"
+	gormlogger "gorm.io/gorm/logger"
+)
+
+// Database TODO
+type Database struct {
+	Self      *gorm.DB
+	SelfSqlDB *sql.DB
+}
+
+// DB TODO
+var DB *Database
+
+// CMDBDB TODO
+var CMDBDB *Database
+
+func init() {
+	log.Println("init db model..")
+	createSysDb()
+	orm_db := initSelfDB()
+	sqlDB, err := orm_db.DB()
+	if err != nil {
+		logger.Fatal("init db connect failed %s", err.Error())
+		return
+	}
+	DB = &Database{
+		Self:      orm_db,
+		SelfSqlDB: sqlDB,
+	}
+	d2 := initDBMDB()
+	d2sqlDb, err := d2.DB()
+	if err != nil {
+		logger.Fatal("init db connect failed %s", err.Error())
+		return
+	}
+	CMDBDB = &Database{
+		Self:      d2,
+		SelfSqlDB: d2sqlDb,
+	}
+	migration()
+	initarchive()
+}
+
+func createSysDb() {
+	user := config.AppConfig.Db.UserName
+	pwd := config.AppConfig.Db.PassWord
+	addr := config.AppConfig.Db.Addr
+	testConn := openDB(user, pwd, addr, "")
+	err := testConn.Exec(fmt.Sprintf("create database IF NOT EXISTS `%s`;", config.AppConfig.Db.Name)).Error
+	if err != nil {
+		log.Fatalf("init create db failed:%s", err.Error())
+	}
+	sqldb, err := testConn.DB()
+	if err != nil {
+		log.Fatalf("init create db failed:%s", err.Error())
+	}
+	sqldb.Close()
+}
+
+func openDB(username, password, addr, name string) *gorm.DB {
+	dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=%t&loc=%s",
+		username,
+		password,
+		addr,
+		name,
+		true,
+		"Local")
+	newLogger := gormlogger.New(
+		log.New(os.Stdout, "\r\n", log.LstdFlags),
+		gormlogger.Config{
+			SlowThreshold:             time.Second,
+			LogLevel:                  gormlogger.Info,
+			IgnoreRecordNotFoundError: false,
+			Colorful:                  true,
+			ParameterizedQueries:      false,
+		},
+	)
+	db, err := gorm.Open(mysql.New(mysql.Config{
+		DSN: dsn,
+	}), &gorm.Config{
+		Logger: newLogger,
+	})
+	if err != nil {
+		logger.Fatal("Database connection failed. Database name: %s, error: %v", name, err)
+	}
+	return db
+}
+
+// initSelfDB TODO
+// used for cli
+func initSelfDB() *gorm.DB {
+	return openDB(
+		config.AppConfig.Db.UserName,
+		config.AppConfig.Db.PassWord,
+		config.AppConfig.Db.Addr,
+		config.AppConfig.Db.Name,
+	)
+}
+
+func initDBMDB() *gorm.DB {
+	return openDB(
+		config.AppConfig.CmdbDb.UserName,
+		config.AppConfig.CmdbDb.PassWord,
+		config.AppConfig.CmdbDb.Addr,
+		config.AppConfig.CmdbDb.Name,
+	)
+}
+
+// migration TODO
+func migration() {
+	DB.Self.AutoMigrate(&TbRpDetail{}, &TbRequestLog{}, &TbRpDetailArchive{}, &TbRpApplyDetailLog{}, &TbRpOperationInfo{})
+}
+
+// QueryCountSelfCommon TODO
+func (db Database) QueryCountSelfCommon(sqltext string) (int, error) {
+	var count int
+	c_db := db.Self.Raw(sqltext)
+	if c_db.Error != nil {
+		return 0, c_db.Error
+	}
+	if err := c_db.Row().Scan(&count); err != nil {
+		return 0, err
+	}
+	return count, nil
+}
+
+// QuerySelfCommon TODO
+func (db *Database) QuerySelfCommon(sqltext string) ([]map[string]interface{}, error) {
+	cursor, err := db.SelfSqlDB.Query(sqltext)
+	if err != nil || cursor.Err() != nil {
+		return nil, fmt.Errorf("db query failed, err: %+v", err)
+	}
+	defer cursor.Close()
+	columns, err := cursor.Columns()
+	if err != nil {
+		return nil, fmt.Errorf("get columns failed, err: %+v", err)
+	}
+	columnTypes, err := cursor.ColumnTypes()
+	if err != nil {
+		return nil, fmt.Errorf("get column types failed, err: %+v", err)
+	}
+
+	count := len(columns)
+	values := make([]interface{}, count)
+	scanArgs := make([]interface{}, count)
+	for i := range values {
+		scanArgs[i] = &values[i]
+	}
+
+	dataRows := make([]map[string]interface{}, 0)
+	for cursor.Next() {
+		row := make(map[string]interface{})
+		err := cursor.Scan(scanArgs...)
+		if err != nil {
+			return nil, fmt.Errorf("scan data failed, err: %+v", err)
+		}
+		for i, col := range columns {
+			columnType := columnTypes[i]
+			columnType.ScanType()
+			var v interface{}
+			val := values[i]
+			b, ok := val.([]byte)
+			if ok {
+				v = string(b)
+			} else {
+				v = val
+			}
+			row[col] = v
+		}
+		dataRows = append(dataRows, row)
+	}
+	return dataRows, err
+}
+
+// JSONQueryExpression json query expression, implements clause.Expression interface to use as querier
+type JSONQueryExpression struct {
+	column         string
+	keys           []string
+	hasKeys        bool
+	equals         bool
+	equalsValue    interface{}
+	extract        bool
+	path           string
+	numranges      bool
+	numRange       NumRange
+	Gtv            int
+	gte            bool
+	Ltv            int
+	lte            bool
+	contains       bool
+	containVals    []string
+	mapcontains    bool
+	mapcontainVals []string
+	subcontains    bool
+	subcontainVal  string
+}
+
+// NumRange TODO
+type NumRange struct {
+	Min int
+	Max int
+}
+
+// JSONQuery query column as json
+func JSONQuery(column string) *JSONQueryExpression {
+	return &JSONQueryExpression{column: column}
+}
+
+// SubValContains TODO
+func (jsonQuery *JSONQueryExpression) SubValContains(val string, key string) *JSONQueryExpression {
+	jsonQuery.subcontains = true
+	jsonQuery.subcontainVal = val
+	jsonQuery.keys = []string{key}
+	return jsonQuery
+}
+
+// KeysContains TODO
+func (jsonQuery *JSONQueryExpression) KeysContains(val []string) *JSONQueryExpression {
+	jsonQuery.mapcontains = true
+	jsonQuery.mapcontainVals = val
+	return jsonQuery
+}
+
+// Contains TODO
+// Extract extract json with path
+func (jsonQuery *JSONQueryExpression) Contains(val []string) *JSONQueryExpression {
+	jsonQuery.contains = true
+	jsonQuery.containVals = val
+	return jsonQuery
+}
+
+// Extract extract json with path
+func (jsonQuery *JSONQueryExpression) Extract(path string) *JSONQueryExpression {
+	jsonQuery.extract = true
+	jsonQuery.path = path
+	return jsonQuery
+}
+
+// NumRange TODO
+// HasKey returns clause.Expression
+func (jsonQuery *JSONQueryExpression) NumRange(min int, max int, keys ...string) *JSONQueryExpression {
+	jsonQuery.keys = keys
+	jsonQuery.numRange = NumRange{
+		Min: min,
+		Max: max,
+	}
+	jsonQuery.numranges = true
+	return jsonQuery
+}
+
+// Gte TODO
+func (jsonQuery *JSONQueryExpression) Gte(val int, keys ...string) *JSONQueryExpression {
+	jsonQuery.keys = keys
+	jsonQuery.Gtv = val
+	return jsonQuery
+}
+
+// Lte TODO
+func (jsonQuery *JSONQueryExpression) Lte(val int, keys ...string) *JSONQueryExpression {
+	jsonQuery.keys = keys
+	jsonQuery.Ltv = val
+	return jsonQuery
+}
+
+// HasKey returns clause.Expression
+func (jsonQuery *JSONQueryExpression) HasKey(keys ...string) *JSONQueryExpression {
+	jsonQuery.keys = keys
+	jsonQuery.hasKeys = true
+	return jsonQuery
+}
+
+// Equals TODO
+// Keys returns clause.Expression
+func (jsonQuery *JSONQueryExpression) Equals(value interface{}, keys ...string) *JSONQueryExpression {
+	jsonQuery.keys = keys
+	jsonQuery.equals = true
+	jsonQuery.equalsValue = value
+	return jsonQuery
+}
+
+// Build implements clause.Expression
+func (jsonQuery *JSONQueryExpression) Build(builder clause.Builder) {
+	if stmt, ok := builder.(*gorm.Statement); ok {
+		switch stmt.Dialector.Name() {
+		case "mysql", "sqlite":
+			switch {
+			case jsonQuery.extract:
+				builder.WriteString("JSON_EXTRACT(")
+				builder.WriteQuoted(jsonQuery.column)
+				builder.WriteByte(',')
+				builder.AddVar(stmt, jsonQuery.path)
+				builder.WriteString(")")
+			case jsonQuery.hasKeys:
+				if len(jsonQuery.keys) > 0 {
+					builder.WriteString("JSON_EXTRACT(")
+					builder.WriteQuoted(jsonQuery.column)
+					builder.WriteByte(',')
+					builder.AddVar(stmt, jsonQueryJoin(jsonQuery.keys))
+					builder.WriteString(") IS NOT NULL")
+				}
+			case jsonQuery.gte:
+				builder.WriteString("JSON_EXTRACT(")
+				builder.WriteQuoted(jsonQuery.column)
+				builder.WriteByte(',')
+				builder.AddVar(stmt, jsonQueryJoin(jsonQuery.keys))
+				builder.WriteString(") >=")
+				builder.WriteString(strconv.Itoa(jsonQuery.Gtv))
+			case jsonQuery.lte:
+				builder.WriteString("JSON_EXTRACT(")
+				builder.WriteQuoted(jsonQuery.column)
+				builder.WriteByte(',')
+				builder.AddVar(stmt, jsonQueryJoin(jsonQuery.keys))
+				builder.WriteString(") <=")
+				builder.WriteString(strconv.Itoa(jsonQuery.Ltv))
+			case jsonQuery.numranges:
+				builder.WriteString("JSON_EXTRACT(")
+				builder.WriteQuoted(jsonQuery.column)
+				builder.WriteByte(',')
+				builder.AddVar(stmt, jsonQueryJoin(jsonQuery.keys))
+				builder.WriteString(") ")
+				builder.WriteString(" BETWEEN ")
+				builder.WriteString(strconv.Itoa(jsonQuery.numRange.Min))
+				builder.WriteString(" AND ")
+				builder.WriteString(strconv.Itoa(jsonQuery.numRange.Max))
+			case jsonQuery.mapcontains:
+				builder.WriteString("JSON_CONTAINS(JSON_KEYS(")
+				builder.WriteQuoted(jsonQuery.column)
+				builder.WriteString("),'[")
+				builder.WriteString(jsonArryJoin(jsonQuery.mapcontainVals))
+				builder.WriteString("]') ")
+			case jsonQuery.contains:
+				builder.WriteString("JSON_CONTAINS(")
+				builder.WriteQuoted(jsonQuery.column)
+				builder.WriteString(",'")
+				builder.WriteString("[")
+				builder.WriteString(jsonArryJoin(jsonQuery.containVals))
+				builder.WriteString("]') ")
+			case jsonQuery.subcontains:
+				builder.WriteString("JSON_CONTAINS(JSON_EXTRACT(")
+				builder.WriteQuoted(jsonQuery.column)
+				builder.WriteString(",'$.*.\"")
+				builder.WriteString(jsonQuery.keys[0])
+				builder.WriteString("\"'),'[\"")
+				builder.WriteString(jsonQuery.subcontainVal)
+				builder.WriteString("\"]') ")
+			case jsonQuery.equals:
+				if len(jsonQuery.keys) > 0 {
+					builder.WriteString("JSON_EXTRACT(")
+					builder.WriteQuoted(jsonQuery.column)
+					builder.WriteByte(',')
+					builder.AddVar(stmt, jsonQueryJoin(jsonQuery.keys))
+					builder.WriteString(") = ")
+					if value, ok := jsonQuery.equalsValue.(bool); ok {
+						builder.WriteString(strconv.FormatBool(value))
+					} else {
+						stmt.AddVar(builder, jsonQuery.equalsValue)
+					}
+				}
+			}
+		}
+	}
+}
+
+func jsonArryJoin(vals []string) string {
+	n := len(vals) - 1
+	for i := 0; i < len(vals); i++ {
+		n += len(vals[i])
+	}
+	var b strings.Builder
+	b.Grow(n)
+	for idx, val := range vals {
+		b.WriteString("\"")
+		b.WriteString(val)
+		b.WriteString("\"")
+		if idx < len(vals)-1 {
+			b.WriteString(",")
+		}
+	}
+	return b.String()
+}
+
+const prefix = "$."
+
+func jsonQueryJoin(keys []string) string {
+	if len(keys) == 1 {
+		return prefix + keys[0]
+	}
+
+	n := len(prefix)
+	n += len(keys) - 1
+	for i := 0; i < len(keys); i++ {
+		n += len(keys[i])
+	}
+
+	var b strings.Builder
+	b.Grow(n)
+	b.WriteString(prefix)
+	b.WriteString("\"")
+	b.WriteString(keys[0])
+	b.WriteString("\"")
+	for _, key := range keys[1:] {
+		b.WriteString(".")
+		b.WriteString(key)
+	}
+	return b.String()
+}
diff --git a/dbm-services/common/db-resource/internal/routers/router.go b/dbm-services/common/db-resource/internal/routers/router.go
new file mode 100644
index 0000000000..f71fe1887a
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/routers/router.go
@@ -0,0 +1,18 @@
+package routers
+
+import (
+	"dbm-services/common/db-resource/internal/controller/apply"
+	"dbm-services/common/db-resource/internal/controller/manage"
+
+	"github.com/gin-gonic/gin"
+)
+
+// RegisterRoutes TODO
+func RegisterRoutes(engine *gin.Engine) {
+	// 注册路由
+	apply := apply.ApplyHandler{}
+	apply.RegisterRouter(engine)
+	// 机器资源管理
+	manage := manage.MachineResourceHandler{}
+	manage.RegisterRouter(engine)
+}
diff --git a/dbm-services/common/db-resource/internal/routers/routers.go b/dbm-services/common/db-resource/internal/routers/routers.go
new file mode 100644
index 0000000000..c0dc2a6696
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/routers/routers.go
@@ -0,0 +1,2 @@
+// Package routers TODO
+package routers
diff --git a/dbm-services/common/db-resource/internal/svr/apply/api.go b/dbm-services/common/db-resource/internal/svr/apply/api.go
new file mode 100644
index 0000000000..ed650878dc
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/apply/api.go
@@ -0,0 +1,201 @@
+package apply
+
+import (
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"fmt"
+	"path"
+	"time"
+)
+
+// ParamCheck TODO
+func (param *ApplyRequestInputParam) ParamCheck() (err error) {
+	for _, a := range param.Details {
+		// 如果只是申请一个机器,则没有亲和性的必要
+		if a.Count <= 1 {
+			return nil
+		}
+		switch a.Affinity {
+		case SAME_SUBZONE, SAME_SUBZONE_CROSS_SWTICH:
+			if a.LocationSpec.IsEmpty() {
+				return fmt.Errorf("you need choose a city !!! ")
+			}
+		case CROS_SUBZONE:
+			if a.LocationSpec.IsEmpty() {
+				return fmt.Errorf("you need choose a city !!! ")
+			}
+			if a.LocationSpec.IncludeOrExclude && len(a.LocationSpec.SubZoneIds) < 2 {
+				return fmt.Errorf("because need cros subzone,you special subzones need more than 2 subzones")
+			}
+		case NONE:
+			return nil
+		}
+		for _, d := range a.StorageSpecs {
+			if d.MaxSize > 0 && d.MinSize > d.MaxSize {
+				return fmt.Errorf("min %d great thane min %d", d.MinSize, d.MaxSize)
+			}
+		}
+	}
+	return
+}
+
+// ActionInfo TODO
+type ActionInfo struct {
+	TaskId   string `json:"task_id"`
+	BillId   string `json:"bill_id"`
+	Operator string `json:"operator"`
+}
+
+// ApplyRequestInputParam 请求接口参数
+type ApplyRequestInputParam struct {
+	ResourceType string              `json:"resource_type"` // 申请的资源用作的用途 Redis|MySQL|Proxy
+	DryRun       bool                `json:"dry_run"`
+	BkCloudId    int                 `json:"bk_cloud_id"  binding:"number"`
+	ForbizId     int                 `json:"for_biz_id"`
+	Details      []ApplyObjectDetail `json:"details" binding:"required,gt=0,dive"`
+	ActionInfo
+}
+
+// GetOperationInfo TODO
+func (c ApplyRequestInputParam) GetOperationInfo(requestId string) model.TbRpOperationInfo {
+	var count int
+	for _, v := range c.Details {
+		count += v.Count
+	}
+	return model.TbRpOperationInfo{
+		RequestID:     requestId,
+		TotalCount:    count,
+		OperationType: model.Consumed,
+		BillId:        c.BillId,
+		TaskId:        c.TaskId,
+		Operator:      c.Operator,
+		CreateTime:    time.Now(),
+		UpdateTime:    time.Now(),
+	}
+}
+
+// LockKey TODO
+func (c ApplyRequestInputParam) LockKey() string {
+	if cmutil.IsEmpty(c.ResourceType) {
+		return fmt.Sprintf("dbrms:lock:%d:bizid.%d", c.BkCloudId, c.ForbizId)
+	}
+	return fmt.Sprintf("dbrms:lock:%d:%s:bizid.%d", c.BkCloudId, c.ResourceType, c.ForbizId)
+}
+
+const (
+	// SAME_SUBZONE_CROSS_SWTICH TODO
+	SAME_SUBZONE_CROSS_SWTICH = "SAME_ZONE_CROSS_SWTICH"
+	// SAME_SUBZONE TODO
+	SAME_SUBZONE = "SAME_SUBZONE"
+	// CROS_SUBZONE TODO
+	CROS_SUBZONE = "CROS_SUBZONE"
+	// NONE TODO
+	NONE = "NONE"
+)
+
+// ApplyObjectDetail TODO
+type ApplyObjectDetail struct {
+	GroupMark string            `json:"group_mark" binding:"required" ` // 资源组标记
+	Labels    map[string]string `json:"labels"`                         // 标签
+	// 通过机型规格 或者 资源规格描述来匹配资源
+	// 这两个条件是 || 关系
+	DeviceClass  []string     `json:"device_class"` // 机器类型 "IT5.8XLARGE128" "SA3.2XLARGE32"
+	Spec         Spec         `json:"spec"`         // 规格描述
+	StorageSpecs []DiskSpec   `json:"storage_spec"`
+	LocationSpec LocationSpec `json:"location_spec"` // 地域区间
+	// 反亲和性 目前只有一种选项,当campus是空的时候,则此值生效
+	// SAME_SUBZONE_CROSS_SWTICH: 同城同subzone跨交换机跨机架、
+	// SAME_SUBZONE: 同城同subzone
+	// CROS_SUBZONE:同城跨subzone
+	// NONE: 无需亲和性处理
+	Affinity string `json:"affinity"`
+	Count    int    `json:"count" binding:"required,min=1"` // 申请数量
+}
+
+// GetEmptyDiskSpec TODO
+func GetEmptyDiskSpec(ds []DiskSpec) (dms []DiskSpec) {
+	for _, v := range ds {
+		if v.MountPointIsEmpty() {
+			dms = append(dms, v)
+		}
+	}
+	return
+}
+
+// GetDiskSpecMountPoints TODO
+func GetDiskSpecMountPoints(ds []DiskSpec) (mountPoints []string) {
+	for _, v := range ds {
+		if v.MountPointIsEmpty() {
+			continue
+		}
+		mountPoints = append(mountPoints, path.Clean(v.MountPoint))
+	}
+	return
+}
+
+// Spec TODO
+type Spec struct {
+	Cpu MeasureRange `json:"cpu"` // cpu range
+	Mem MeasureRange `json:"mem"`
+}
+
+// IsEmpty TODO
+func (s Spec) IsEmpty() bool {
+	return s.Cpu.IsEmpty() && s.Mem.IsEmpty()
+}
+
+// NotEmpty TODO
+func (s Spec) NotEmpty() bool {
+	return s.Cpu.IsNotEmpty() && s.Mem.IsNotEmpty()
+}
+
+// MeasureRange TODO
+type MeasureRange struct {
+	Min int `json:"min"`
+	Max int `json:"max"`
+}
+
+// Iegal TODO
+func (m MeasureRange) Iegal() bool {
+	return m.Max >= m.Min
+}
+
+// IsNotEmpty TODO
+func (m MeasureRange) IsNotEmpty() bool {
+	return m.Max > 0 && m.Min > 0
+}
+
+// IsEmpty TODO
+func (m MeasureRange) IsEmpty() bool {
+	return m.Min == 0 && m.Max == 0
+}
+
+// DiskSpec TODO
+type DiskSpec struct {
+	DiskType   string `json:"disk_type"`
+	MinSize    int    `json:"min"`
+	MaxSize    int    `json:"max"`
+	MountPoint string `json:"mount_point"`
+}
+
+// LocationSpec TODO
+type LocationSpec struct {
+	City             string   `json:"city" validate:"required"` // 所属城市获取地域
+	SubZoneIds       []string `json:"sub_zone_ids"`
+	IncludeOrExclude bool     `json:"include_or_exclue"`
+}
+
+// MountPointIsEmpty TODO
+func (d DiskSpec) MountPointIsEmpty() bool {
+	return cmutil.IsEmpty(d.MountPoint)
+}
+
+// IsEmpty TODO
+func (l LocationSpec) IsEmpty() bool {
+	return cmutil.IsEmpty(l.City)
+}
+
+// SubZoneIsEmpty TODO
+func (l LocationSpec) SubZoneIsEmpty() bool {
+	return l.IsEmpty() || len(l.SubZoneIds) == 0
+}
diff --git a/dbm-services/common/db-resource/internal/svr/apply/apply.go b/dbm-services/common/db-resource/internal/svr/apply/apply.go
new file mode 100644
index 0000000000..6a2f85f57b
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/apply/apply.go
@@ -0,0 +1,331 @@
+// Package apply TODO
+package apply
+
+import (
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/db-resource/internal/svr/bk"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"path"
+	"strconv"
+
+	"gorm.io/gorm"
+)
+
+// SearchContext TODO
+type SearchContext struct {
+	*ApplyObjectDetail
+	BkCloudId       int
+	RsType          string
+	IntetionBkBizId int
+	IdcCitys        []string
+}
+
+func getRealCitys(logicCity string) (realCitys []string, err error) {
+	if cmutil.IsEmpty(logicCity) {
+		return
+	}
+	err = model.CMDBDB.Self.Raw(
+		"select distinct bk_idc_city_name from db_meta_bkcity where  logical_city_id in (select id from db_meta_logicalcity  where name = ?  ) ",
+		logicCity).Scan(&realCitys).Error
+	if err != nil {
+		logger.Error("from region %s find real city failed %s", logicCity, err.Error())
+		return
+	}
+	return
+}
+
+// CycleApply TODO
+func CycleApply(param ApplyRequestInputParam) (pickers []*PickerObject, err error) {
+	for _, v := range param.Details {
+		var picker *PickerObject
+		logger.Debug(fmt.Sprintf("input.Detail %v", v))
+		// 预检查资源是否充足
+		if v.Affinity == "" {
+			v.Affinity = NONE
+		}
+		idcCitys, errx := getRealCitys(v.LocationSpec.City)
+		if errx != nil {
+			return pickers, errx
+		}
+		s := &SearchContext{
+			BkCloudId:         param.BkCloudId,
+			IntetionBkBizId:   param.ForbizId,
+			RsType:            param.ResourceType,
+			ApplyObjectDetail: &v,
+			IdcCitys:          idcCitys,
+		}
+		if err = s.PickCheck(); err != nil {
+			return pickers, err
+		}
+		// 挑选符合需求的资源
+		picker, err = s.PickInstance()
+		if err != nil {
+			return pickers, fmt.Errorf("Picker for %s Failed,Error is %v", v.GroupMark, err)
+		}
+		// Debug Print Log 挑选实例分区的情况
+		picker.DebugDistrubuteLog()
+		// 更新挑选到的资源的状态为Preselected
+		if update_err := picker.PreselectedSatisfiedInstance(); update_err != nil {
+			return pickers, fmt.Errorf("update %s Picker Out Satisfied Instance Status In Selling Failed:%v", v.GroupMark,
+				update_err.Error())
+		}
+		// 追加到挑选好的分组
+		pickers = append(pickers, picker)
+	}
+	return pickers, nil
+}
+
+// RollBackAllInstanceUnused 将 Instance Status  Selling  ==> Not Selled : 2 --> 0
+func RollBackAllInstanceUnused(ms []*PickerObject) {
+	for _, m := range ms {
+		if err := m.RollbackSatisfiedInstanceStatusUnused(); err != nil {
+			logger.Error(fmt.Sprintf("Rollback Satisfied Instance Status NotSelled Failed,Error %s", err.Error()))
+		}
+	}
+}
+
+// Matcher TODO
+func (o *SearchContext) Matcher() (fns []func(db *gorm.DB)) {
+	switch {
+	//  机型参数不存在、资源规格参数存在,匹配资源规格参数
+	case len(o.DeviceClass) == 0 && o.Spec.NotEmpty():
+		fns = append(fns, o.MatchSpec)
+	// 机型参数存在、资源规格参数不存在,匹配机型
+	case len(o.DeviceClass) > 0 && o.Spec.NotEmpty():
+		fns = append(fns, o.MatchDeviceClass)
+	// 机型参数存在、资源规格参数存在,先匹配机型,在匹配资源规格
+	case len(o.DeviceClass) > 0 && o.Spec.NotEmpty():
+		fns = append(fns, o.MatchSpec)
+		fns = append(fns, o.MatchDeviceClass)
+	}
+	// 没有条件的时候也需要遍历一遍
+	fns = append(fns, func(db *gorm.DB) {})
+	return
+}
+
+func (o *SearchContext) pickBase(db *gorm.DB) (err error) {
+	db.Where(" bk_cloud_id = ? and status = ?  ", o.BkCloudId, model.Unused)
+	// 如果没有指定资源类型,表示只能选择无资源类型标签的资源
+	// 没有资源类型标签的资源可以被所有其他类型使用
+	if cmutil.IsEmpty(o.RsType) {
+		db.Where("JSON_LENGTH(rs_types) <= 0")
+	} else {
+		db.Where(model.JSONQuery("rs_types").Contains([]string{o.RsType}))
+	}
+	// 如果没有指定专属业务,就表示只能选用公共的资源
+	// 不能匹配打了业务标签的资源
+	if o.IntetionBkBizId <= 0 {
+		db.Where("JSON_LENGTH(dedicated_bizs) <= 0")
+	} else {
+		db.Where(model.JSONQuery("dedicated_bizs").Contains([]string{
+			strconv.Itoa(o.IntetionBkBizId)}))
+	}
+	o.MatchLables(db)
+	if err = o.MatchLocationSpec(db); err != nil {
+		return err
+	}
+	o.MatchStorage(db)
+	// 如果需要存在跨园区检查则需要判断是否存在网卡id,机架id等
+	if o.Affinity == SAME_SUBZONE_CROSS_SWTICH {
+		o.UseNetDeviceIsNotEmpty(db)
+	}
+	return
+}
+
+// PickCheck TODO
+func (o *SearchContext) PickCheck() (err error) {
+	var count int64
+	db := model.DB.Self.Table(model.TbRpDetailName()).Select("count(*)")
+	if err := o.pickBase(db); err != nil {
+		return err
+	}
+	for _, fn := range o.Matcher() {
+		fn(db)
+		var cnt int64
+		if err := db.Scan(&cnt).Error; err != nil {
+			logger.Error("query pre check count failed %s", err.Error())
+			return err
+		}
+		count += cnt
+	}
+	logger.Info("count is  %d", count)
+	if int(count) < o.Count {
+		return fmt.Errorf("[pre inspection]: total number of resources initially eligible:%d,number of interface requests:%d",
+			count, o.Count)
+	}
+	return nil
+}
+
+// MatchLables TODO
+func (o *SearchContext) MatchLables(db *gorm.DB) {
+	if len(o.Labels) > 0 {
+		for key, v := range o.Labels {
+			db.Where(" ( json_contains(label,json_object(?,?) )", key, v)
+		}
+		return
+	}
+	db.Where(" JSON_TYPE(label) = 'NULL' OR JSON_LENGTH(label) <= 1 ")
+}
+
+// PickInstance TODO
+func (o *SearchContext) PickInstance() (picker *PickerObject, err error) {
+	picker = NewPicker(o.Count, o.GroupMark)
+	for _, fn := range o.Matcher() {
+		var items []model.TbRpDetail
+		db := model.DB.Self.Table(model.TbRpDetailName())
+		if err = o.pickBase(db); err != nil {
+			return
+		}
+		fn(db)
+		if err = db.Scan(&items).Error; err != nil {
+			logger.Error("query failed %s", err.Error())
+			return
+		}
+		// 过滤没有挂载点的磁盘匹配需求
+		esspec := GetEmptyDiskSpec(o.StorageSpecs)
+		if len(esspec) > 0 {
+			ts := []model.TbRpDetail{}
+			for _, ins := range items {
+				if err := ins.UnmarshalDiskInfo(); err != nil {
+					logger.Error("umarshal disk failed %s", err.Error())
+				}
+				logger.Info("%v", ins.Storages)
+				noUseStorages := make(map[string]bk.DiskDetail)
+				smp := GetDiskSpecMountPoints(o.StorageSpecs)
+				for mp, v := range ins.Storages {
+					if cmutil.ElementNotInArry(mp, smp) {
+						noUseStorages[mp] = v
+					}
+				}
+				logger.Info("nouse: %v", noUseStorages)
+				if matchNoMountPointStorage(esspec, noUseStorages) {
+					ts = append(ts, ins)
+				}
+			}
+			if len(ts) <= 0 {
+				return picker, fmt.Errorf("did not match the appropriate resources")
+			}
+			items = ts
+		}
+		o.PickInstanceBase(picker, items)
+		logger.Info("picker now is %v", picker)
+		if picker.PickerDone() {
+			return picker, nil
+		}
+	}
+	return nil, fmt.Errorf("all Instances Cannot Satisfy The Requested Parameters")
+}
+
+func matchNoMountPointStorage(spec []DiskSpec, sinc map[string]bk.DiskDetail) bool {
+	mcount := 0
+	for _, s := range spec {
+		for mp, d := range sinc {
+			if diskDetailMatch(d, s) {
+				delete(sinc, mp)
+				mcount += 1
+				break
+			}
+		}
+	}
+	return mcount == len(spec)
+}
+
+func diskDetailMatch(d bk.DiskDetail, s DiskSpec) bool {
+	logger.Info("spec %v", s)
+	logger.Info("detail %v", d)
+	if d.DiskType != s.DiskType && cmutil.IsNotEmpty(s.DiskType) {
+		logger.Info("disk type not match")
+		return false
+	}
+	if d.Size > s.MaxSize && s.MaxSize > 0 {
+		logger.Info("max size not match")
+		return false
+	}
+	if d.Size < s.MinSize {
+		logger.Info("min size not match")
+		return false
+	}
+	return true
+}
+
+// PickInstanceBase TODO
+func (o *ApplyObjectDetail) PickInstanceBase(picker *PickerObject, items []model.TbRpDetail) {
+	logger.Info("the anti-affinity is %s", o.Affinity)
+	switch o.Affinity {
+	case NONE:
+		data := AnalysisResource(items, true)
+		picker.PickeElements = data
+		picker.PickerSameSubZone(false)
+	case CROS_SUBZONE:
+		data := AnalysisResource(items, false)
+		picker.PickeElements = data
+		picker.Picker(true)
+	case SAME_SUBZONE, SAME_SUBZONE_CROSS_SWTICH:
+		data := AnalysisResource(items, false)
+		picker.PickeElements = data
+		picker.PickerSameSubZone(false)
+	}
+}
+
+// MatchLocationSpec TODO
+func (o *SearchContext) MatchLocationSpec(db *gorm.DB) (err error) {
+	if o.LocationSpec.IsEmpty() {
+		return
+	}
+	logger.Info("get real city is %v", o.IdcCitys)
+	if len(o.IdcCitys) > 0 {
+		db = db.Where("city in ? ", o.IdcCitys)
+	} else {
+		db = db.Where("city = ? ", o.LocationSpec.City)
+	}
+	if o.LocationSpec.SubZoneIsEmpty() {
+		return
+	}
+	if o.LocationSpec.IncludeOrExclude {
+		db.Where("sub_zone_id in ?", o.LocationSpec.SubZoneIds)
+	} else {
+		db.Where("sub_zone_id  not in ?", o.LocationSpec.SubZoneIds)
+	}
+	return
+}
+
+// MatchStorage TODO
+func (o *SearchContext) MatchStorage(db *gorm.DB) {
+	if len(o.StorageSpecs) <= 0 {
+		return
+	}
+	for _, d := range o.StorageSpecs {
+		if cmutil.IsNotEmpty(d.MountPoint) {
+			mp := path.Clean(d.MountPoint)
+			if cmutil.IsNotEmpty(d.DiskType) {
+				db.Where(model.JSONQuery("storage_device").Equals(d.DiskType, mp, "disk_type"))
+			}
+			switch {
+			case d.MaxSize > 0:
+				db.Where(model.JSONQuery("storage_device").NumRange(d.MinSize, d.MaxSize, mp, "size"))
+			case d.MaxSize <= 0 && d.MinSize > 0:
+				db.Where(model.JSONQuery("storage_device").Gte(d.MinSize, mp, "size"))
+			}
+		}
+	}
+}
+
+// MatchSpec TODO
+// MatchSpec TODO
+func (o *SearchContext) MatchSpec(db *gorm.DB) {
+	db.Where(" ( cpu_num >= ?  and cpu_num <= ? ) and ( dram_cap >= ? and dram_cap <= ? ) ", o.Spec.Cpu.Min,
+		o.Spec.Cpu.Max,
+		o.Spec.Mem.Min, o.Spec.Mem.Max)
+}
+
+// MatchDeviceClass TODO
+func (o *SearchContext) MatchDeviceClass(db *gorm.DB) {
+	db.Where(" device_class in ? ", o.DeviceClass)
+}
+
+// UseNetDeviceIsNotEmpty TODO
+func (o *SearchContext) UseNetDeviceIsNotEmpty(db *gorm.DB) {
+	db.Where("(net_device_id  is not null or  net_device_id != '') and (rack_id is not null or rack_id != '')")
+}
diff --git a/dbm-services/common/db-resource/internal/svr/apply/core.go b/dbm-services/common/db-resource/internal/svr/apply/core.go
new file mode 100644
index 0000000000..3e351a047b
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/apply/core.go
@@ -0,0 +1,354 @@
+package apply
+
+import (
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/db-resource/internal/svr/task"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"sort"
+	"strings"
+
+	mapset "github.com/deckarep/golang-set/v2"
+)
+
+const (
+	// MINDISTRUTE TODO
+	MINDISTRUTE = 20
+)
+
+type subzone = string
+
+// PickerObject TODO
+type PickerObject struct {
+	Item          string
+	Count         int
+	PickDistrbute map[string]int
+	ExistSubZone  []subzone // 已存在的园区
+	// SatisfiedAssetIds []string                     // 已选择 满足的实例
+	SatisfiedHostIds []int
+	PickeElements    map[subzone][]InstanceObject // 待选择实例
+
+	// 资源请求在同园区的时候才生效
+	ExistEquipmentIds     []string // 已存在的设备Id
+	ExistLinkNetdeviceIds []string // 已存在的网卡Id
+}
+
+// LockReturnPickers TODO
+// GetDetailInfoFromPickers 将匹配好的机器资源,查询出详情结果返回
+//
+//	@param elements
+//	@return []model.BatchGetTbDetailResult
+//	@return error
+func LockReturnPickers(elements []*PickerObject, mode string) ([]model.BatchGetTbDetailResult, error) {
+	var getter []model.BatchGetTbDetail
+	for _, v := range elements {
+		getter = append(getter, model.BatchGetTbDetail{
+			Item:      v.Item,
+			BkHostIds: v.SatisfiedHostIds,
+		})
+	}
+	data, err := model.BatchGetSatisfiedByAssetIds(getter, mode)
+	if err != nil {
+		logger.Error(fmt.Sprintf("选择到合适的实例,获取实例详情失败%s", err.Error()))
+	}
+	if mode == model.Used {
+		sendArchiverTask(data)
+	}
+	return data, err
+}
+
+// sendArchiverTask 归档
+//
+//	@param data
+func sendArchiverTask(data []model.BatchGetTbDetailResult) {
+	for _, v := range data {
+		for _, l := range v.Data {
+			task.ArchiverResourceChan <- l.ID
+		}
+	}
+}
+
+// createNice 创建Nice值
+//
+//	@param cpu
+//	@param mem
+//	@param sdd
+//	@param hdd
+//	@return rs
+func createNice(cpu int, mem, sdd, hdd int) (rs int64) {
+	rs = int64(cpu*1000000000000 + mem*100000 + sdd + hdd)
+	return
+}
+
+// AnalysisResource 待选取资源排序
+//
+//	@param ins
+//	@return map
+func AnalysisResource(ins []model.TbRpDetail, israndom bool) map[string][]InstanceObject {
+	result := make(map[string][]InstanceObject)
+	for _, v := range ins {
+		linkids := strings.Split(v.NetDeviceID, ",")
+		t := InstanceObject{
+			BkHostId:        v.BkHostID,
+			Equipment:       v.RackID,
+			LinkNetdeviceId: linkids,
+			Nice:            createNice(int(v.CPUNum), v.DramCap, 0, 0),
+		}
+		if israndom {
+			result["RANDOM"] = append(result["RANDOM"], t)
+		} else {
+			result[v.SubZone] = append(result[v.SubZone], t)
+		}
+	}
+
+	// 对个每个camp里面机器按照规则排序,便于后续picker的时候取最优的
+	for key := range result {
+		sort.Sort(Wrapper{result[key], func(p, q *InstanceObject) bool {
+			return q.Nice > p.Nice // Nice 递减排序
+		}})
+	}
+	return result
+}
+
+// NewPicker 初始化资源选择器
+//
+//	@param count
+//	@param item
+//	@return *PickerObject
+func NewPicker(count int, item string) *PickerObject {
+	return &PickerObject{
+		Item:                  item,
+		Count:                 count,
+		ExistEquipmentIds:     make([]string, 0),
+		ExistLinkNetdeviceIds: make([]string, 0),
+		SatisfiedHostIds:      make([]int, 0),
+		PickDistrbute:         make(map[string]int),
+	}
+}
+
+// PickerSameSubZone TODO
+func (c *PickerObject) PickerSameSubZone(cross_switch bool) {
+	sortSubZones := c.sortSubZone(false)
+	if len(sortSubZones) <= 0 {
+		return
+	}
+	for _, subzone := range sortSubZones {
+		logger.Info("PickerSameSubZone:PickeElements: %v", c.PickeElements[subzone])
+		if len(c.PickeElements[subzone]) < c.Count || len(c.PickeElements[subzone]) <= 0 {
+			continue
+		}
+		logger.Info("dbeug %v", subzone)
+		logger.Info("dbeug %v", c.PickeElements[subzone])
+		c.SatisfiedHostIds = []int{}
+		c.ExistEquipmentIds = []string{}
+		c.ExistLinkNetdeviceIds = []string{}
+		for idx := range c.PickeElements[subzone] {
+			logger.Info("loop %d", idx)
+			c.pickerOne(subzone, cross_switch)
+			// 匹配资源完成
+			logger.Info(fmt.Sprintf("surplus %s,%d", subzone, len(c.PickeElements[subzone])))
+			logger.Info(fmt.Sprintf("%s,%d,%d", subzone, c.Count, len(c.SatisfiedHostIds)))
+			if c.PickerDone() {
+				return
+			}
+		}
+	}
+}
+
+// Picker 筛选,匹配资源
+//
+//	@receiver c
+//	@param cross_campus 是否跨园区
+func (c *PickerObject) Picker(cross_subzone bool) {
+	campKeys := c.sortSubZone(cross_subzone)
+	if len(campKeys) <= 0 {
+		return
+	}
+	subzoneChan := make(chan subzone, len(campKeys))
+	for _, v := range campKeys {
+		subzoneChan <- v
+	}
+	for {
+		select {
+		case subzone := <-subzoneChan:
+			if len(c.PickeElements[subzone]) == 0 {
+				delete(c.PickeElements, subzone)
+			}
+			if len(c.sortSubZone(cross_subzone)) == 0 {
+				logger.Info("go out here")
+				return
+			}
+			logger.Info(fmt.Sprintf("surplus %s,%d", subzone, len(c.PickeElements[subzone])))
+			logger.Info(fmt.Sprintf("%s,%d,%d", subzone, c.Count, len(c.SatisfiedHostIds)))
+			if c.pickerOne(subzone, false) {
+				delete(c.PickeElements, subzone)
+			}
+			// 匹配资源完成
+			if c.PickerDone() {
+				return
+			}
+			// 非跨园区循环读取
+			if !cross_subzone {
+				subzoneChan <- subzone
+				continue
+			}
+			// 跨园区
+			if len(subzoneChan) <= 0 {
+				return
+			}
+		}
+	}
+}
+
+func (c *PickerObject) pickerOne(key string, cross_switch bool) bool {
+	c.ExistSubZone = append(c.ExistSubZone, key)
+	for _, v := range c.PickeElements[key] {
+		if cross_switch {
+			if !c.CrossRackCheck(v) || !c.CrossSwitchCheck(v) {
+				// 如果存在交集,则删除该元素
+				c.deleteElement(key, v.BkHostId)
+				continue
+			}
+		}
+		c.ExistEquipmentIds = append(c.ExistEquipmentIds, v.Equipment)
+		c.SatisfiedHostIds = append(c.SatisfiedHostIds, v.BkHostId)
+		c.ExistLinkNetdeviceIds = append(c.ExistLinkNetdeviceIds, v.LinkNetdeviceId...)
+		c.PickDistrbute[key]++
+		c.deleteElement(key, v.BkHostId)
+		return true
+	}
+	return len(c.PickeElements) <= 0
+}
+
+// CrossSwitchCheck 跨交换机检查
+func (c *PickerObject) CrossSwitchCheck(v InstanceObject) bool {
+	if len(v.LinkNetdeviceId) <= 0 {
+		return false
+	}
+	return c.InterSectForLinkNetDevice(v.LinkNetdeviceId) == 0
+}
+
+// CrossRackCheck 跨机架检查
+func (c *PickerObject) CrossRackCheck(v InstanceObject) bool {
+	if cmutil.IsEmpty(v.Equipment) {
+		return false
+	}
+	return c.InterSectForEquipment(v.Equipment) == 0
+}
+
+// DebugDistrubuteLog TODO
+func (o *PickerObject) DebugDistrubuteLog() {
+	for key, v := range o.PickDistrbute {
+		logger.Debug(fmt.Sprintf("Zone:%s,PickCount:%d", key, v))
+	}
+}
+
+func (c *PickerObject) deleteElement(key string, bkhostId int) {
+	var k []InstanceObject
+	for _, v := range c.PickeElements[key] {
+		if v.BkHostId != bkhostId {
+			k = append(k, v)
+		}
+	}
+	c.PickeElements[key] = k
+}
+
+// PreselectedSatisfiedInstance TODO
+func (c *PickerObject) PreselectedSatisfiedInstance() error {
+	affectRows, err := model.UpdateTbRpDetail(c.SatisfiedHostIds, model.Preselected)
+	if err != nil {
+		return err
+	}
+	if int(affectRows) != len(c.SatisfiedHostIds) {
+		return fmt.Errorf("update %d qualified resouece to preselectd,only %d real update status", len(c.SatisfiedHostIds),
+			affectRows)
+	}
+	return nil
+}
+
+// RollbackSatisfiedInstanceStatusUnused TODO
+func (c *PickerObject) RollbackSatisfiedInstanceStatusUnused() error {
+	return model.UpdateTbRpDetailStatusAtSelling(c.SatisfiedHostIds, model.Unused)
+}
+
+// CampusNice TODO
+type CampusNice struct {
+	Campus string `json:"campus"`
+	Count  int    `json:"count"`
+}
+
+// CampusWrapper TODO
+type CampusWrapper struct {
+	Campus []CampusNice
+	by     func(p, q *CampusNice) bool
+}
+
+// Len 用于排序
+func (pw CampusWrapper) Len() int {
+	return len(pw.Campus)
+}
+
+// Swap 用于排序
+func (pw CampusWrapper) Swap(i, j int) {
+	pw.Campus[i], pw.Campus[j] = pw.Campus[j], pw.Campus[i]
+}
+
+// Less 用于排序
+func (pw CampusWrapper) Less(i, j int) bool {
+	return pw.by(&pw.Campus[i], &pw.Campus[j])
+}
+
+// sortSubZone 根据排序剩下有效的园区
+func (c *PickerObject) sortSubZone(cross_subzone bool) []string {
+	var keys []string
+	var campusNice []CampusNice
+	for key, campusIntances := range c.PickeElements {
+		//	keys = append(keys, key)
+		if !cross_subzone || cmutil.ElementNotInArry(key, c.ExistSubZone) {
+			campusNice = append(campusNice, CampusNice{
+				Campus: key,
+				Count:  len(campusIntances),
+			})
+		}
+	}
+	// 按照每个园区的数量从大到小排序
+	sort.Sort(CampusWrapper{campusNice, func(p, q *CampusNice) bool {
+		return q.Count < p.Count
+	}})
+	for _, capmus := range campusNice {
+		keys = append(keys, capmus.Campus)
+	}
+	return keys
+}
+
+// PickerDone TODO
+func (c *PickerObject) PickerDone() bool {
+	return len(c.SatisfiedHostIds) == c.Count
+}
+
+// InterSectForEquipment 求交集 EquipmentID
+func (c *PickerObject) InterSectForEquipment(equipmentId string) int {
+	baseSet := mapset.NewSet[string]()
+	for _, v := range cmutil.RemoveDuplicate(c.ExistEquipmentIds) {
+		baseSet.Add(v)
+	}
+	myset := mapset.NewSet[string]()
+	myset.Add(equipmentId)
+	return baseSet.Intersect(myset).Cardinality()
+}
+
+// InterSectForLinkNetDevice 求交集 LinkNetDeviceIds
+func (c *PickerObject) InterSectForLinkNetDevice(linkDeviceIds []string) int {
+	baseSet := mapset.NewSet[string]()
+	for _, v := range cmutil.RemoveDuplicate(c.ExistLinkNetdeviceIds) {
+		baseSet.Add(v)
+	}
+	myset := mapset.NewSet[string]()
+	for _, linkId := range linkDeviceIds {
+		if cmutil.IsNotEmpty(linkId) {
+			myset.Add(linkId)
+		}
+	}
+	return baseSet.Intersect(myset).Cardinality()
+}
diff --git a/dbm-services/common/db-resource/internal/svr/apply/instance.go b/dbm-services/common/db-resource/internal/svr/apply/instance.go
new file mode 100644
index 0000000000..a1af259855
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/apply/instance.go
@@ -0,0 +1,42 @@
+package apply
+
+// InstanceObject TODO
+type InstanceObject struct {
+	BkHostId        int
+	Equipment       string
+	LinkNetdeviceId []string
+	Nice            int64
+}
+
+// GetLinkNetDeviceIdsInterface TODO
+func (c *InstanceObject) GetLinkNetDeviceIdsInterface() []interface{} {
+	var k []interface{}
+	for _, v := range c.LinkNetdeviceId {
+		k = append(k, v)
+	}
+	return k
+}
+
+// Wrapper TODO
+type Wrapper struct {
+	Instances []InstanceObject
+	by        func(p, q *InstanceObject) bool
+}
+
+// SortBy TODO
+type SortBy func(p, q *InstanceObject) bool
+
+// Len 用于排序
+func (pw Wrapper) Len() int { // 重写 Len() 方法
+	return len(pw.Instances)
+}
+
+// Swap 用于排序
+func (pw Wrapper) Swap(i, j int) { // 重写 Swap() 方法
+	pw.Instances[i], pw.Instances[j] = pw.Instances[j], pw.Instances[i]
+}
+
+// Less 用于排序
+func (pw Wrapper) Less(i, j int) bool { // 重写 Less() 方法
+	return pw.by(&pw.Instances[i], &pw.Instances[j])
+}
diff --git a/dbm-services/common/db-resource/internal/svr/bk/bk.go b/dbm-services/common/db-resource/internal/svr/bk/bk.go
new file mode 100644
index 0000000000..bacbdcba09
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/bk/bk.go
@@ -0,0 +1,2 @@
+// Package bk TODO
+package bk
diff --git a/dbm-services/common/db-resource/internal/svr/bk/cc.go b/dbm-services/common/db-resource/internal/svr/bk/cc.go
new file mode 100644
index 0000000000..3eee0e3cfc
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/bk/cc.go
@@ -0,0 +1,98 @@
+package bk
+
+import (
+	"dbm-services/common/db-resource/internal/config"
+	"dbm-services/common/go-pubpkg/cc.v3"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"time"
+)
+
+// EsbClient TODO
+var EsbClient *cc.Client
+
+// CCModuleFields TODO
+var CCModuleFields []string
+
+// init TODO
+func init() {
+	var err error
+	EsbClient, err = NewClient()
+	if err != nil {
+		logger.Fatal("init cmdb client failed %s", err.Error())
+		return
+	}
+	CCModuleFields = []string{
+		"bk_host_id",
+		"bk_cloud_id",
+		"bk_host_innerip",
+		"bk_asset_id",
+		"svr_device_class",
+		"bk_mem",
+		"bk_cpu",
+		"bk_disk",
+		"idc_city_id",
+		"idc_city_name",
+		"sub_zone",
+		"sub_zone_id",
+		"rack_id",
+		"svr_type_name",
+		"net_device_id",
+	}
+}
+
+// NewClient TODO
+func NewClient() (*cc.Client, error) {
+	return cc.NewClient(config.AppConfig.BkSecretConfig.BkBaseUrl, cc.Secret{
+		BKAppCode:   config.AppConfig.BkSecretConfig.BkAppCode,
+		BKAppSecret: config.AppConfig.BkSecretConfig.BKAppSecret,
+		BKUsername:  config.AppConfig.BkSecretConfig.BkUserName,
+	})
+}
+
+// BatchQueryHostsInfo TODO
+func BatchQueryHostsInfo(bizId int, allhosts []string) (ccHosts []*cc.Host, nofoundHosts []string, err error) {
+	for _, hosts := range cmutil.SplitGroup(allhosts, int64(200)) {
+		err = cmutil.Retry(cmutil.RetryConfig{Times: 3, DelayTime: 1 * time.Second}, func() error {
+			data, resp, err := cc.NewListBizHosts(EsbClient).QueryListBizHosts(&cc.ListBizHostsParam{
+				BkBizId: bizId,
+				Fileds:  CCModuleFields,
+				Page: cc.BKPage{
+					Start: 0,
+					Limit: len(hosts),
+				},
+				HostPropertyFilter: cc.HostPropertyFilter{
+					Condition: "AND",
+					Rules: []cc.Rule{
+						{
+							Field:    "bk_host_innerip",
+							Operator: "in",
+							Value:    hosts,
+						},
+					},
+				},
+			})
+			if resp != nil {
+				logger.Info("respone request id is %s,message:%s,code:%d", resp.RequestId, resp.Message, resp.Code)
+			}
+			if err != nil {
+				logger.Error("QueryListBizHosts failed %s", err.Error())
+				return err
+			}
+			ccHosts = append(ccHosts, data.Info...)
+			return nil
+		})
+	}
+	searchMap := make(map[string]struct{})
+	for _, host := range allhosts {
+		searchMap[host] = struct{}{}
+	}
+	for _, hf := range ccHosts {
+		delete(searchMap, hf.InnerIP)
+		logger.Info("cc info %v", hf)
+	}
+	for host := range searchMap {
+		nofoundHosts = append(nofoundHosts, host)
+	}
+	return ccHosts, nofoundHosts, err
+}
diff --git a/dbm-services/common/db-resource/internal/svr/bk/cc_test.go b/dbm-services/common/db-resource/internal/svr/bk/cc_test.go
new file mode 100644
index 0000000000..756bb4e4a4
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/bk/cc_test.go
@@ -0,0 +1,73 @@
+package bk_test
+
+import (
+	"dbm-services/common/db-resource/internal/controller/manage"
+	"dbm-services/common/go-pubpkg/cc.v3"
+	"os"
+	"testing"
+)
+
+func TestReserverCC(t *testing.T) {
+	client, err := cc.NewClient(os.Getenv("BK_COMPONENT_API_URL"), cc.Secret{
+		BKAppCode:   os.Getenv("BK_APP_CODE"),
+		BKAppSecret: os.Getenv("BK_APP_SECRET"),
+		BKUsername:  os.Getenv("BK_USERNAME"),
+	})
+	if err != nil {
+		t.Fatalf("new client failed %s", err.Error())
+		return
+	}
+	listBizHosts := cc.NewListBizHosts(client)
+
+	resp, _, err := listBizHosts.QueryListBizHosts(&cc.ListBizHostsParam{
+		BkBizId: 100443,
+		HostPropertyFilter: cc.HostPropertyFilter{
+			Condition: "AND",
+			Rules: []cc.Rule{{
+				Field:    "bk_cloud_id",
+				Operator: "equal",
+				Value:    0,
+			}},
+		},
+		Fileds: []string{
+			"bk_host_id",
+			"bk_cloud_id",
+			"bk_host_innerip",
+			"bk_asset_id",
+			"bk_mem",
+			"bk_cpu",
+			"idc_city_name",
+			"idc_city_id",
+			"sub_zone",
+			"sub_zone_id",
+		},
+		Page: cc.BKPage{
+			Start: 100,
+			Limit: 100,
+		},
+	})
+	if err != nil {
+		t.Fatalf("query list biz hosts failed %s", err.Error())
+	}
+	t.Log(resp.Count)
+	// t.Logf("all count is %d", resp.Count)
+	var hosts []manage.HostBase
+	for _, host := range resp.Info {
+		t.Log(host.BKHostId, host.InnerIP)
+		hosts = append(hosts, manage.HostBase{
+			HostId: host.BKHostId,
+			Ip:     host.InnerIP,
+		})
+	}
+	param := manage.ImportMachParam{
+		ForBizs: []int{1001, 1002},
+		BkBizId: 100443,
+		RsTypes: []string{"MySQL", "Redis"},
+		Hosts:   hosts,
+	}
+	importResp, err := manage.ImportByListHostBiz(param)
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Log(importResp)
+}
diff --git a/dbm-services/common/db-resource/internal/svr/bk/disk.go b/dbm-services/common/db-resource/internal/svr/bk/disk.go
new file mode 100644
index 0000000000..9d8dd026e1
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/bk/disk.go
@@ -0,0 +1,187 @@
+package bk
+
+import (
+	"dbm-services/common/db-resource/internal/config"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// ShellResCollection TODO
+type ShellResCollection struct {
+	Cpu      int        `json:"cpu"`
+	Mem      int        `json:"mem"` // MB
+	TxRegion string     `json:"region"`
+	TxZone   string     `json:"zone"`
+	Disk     []DiskInfo `json:"disk"`
+}
+
+const (
+	// SSD TODO
+	SSD = "SSD"
+)
+
+// DiskInfo TODO
+type DiskInfo struct {
+	// 挂载点
+	MountPoint string `json:"mount_point"`
+	DiskDetail
+}
+
+// DiskDetail TODO
+type DiskDetail struct {
+	Size int `json:"size"`
+	// 磁盘格式化类型 ext4,xfs
+	FileType string `json:"file_type"`
+	// 磁盘类型,是SSD还是非ssd
+	DiskType string `json:"disk_type"`
+	DiskId   string `json:"disk_id"`
+}
+
+// GetDiskInfoShellContent TODO
+var GetDiskInfoShellContent []byte
+
+func init() {
+	c, err := GetDiskInfoScript.ReadFile(DiskInfoScriptName)
+	if err != nil {
+		logger.Fatal("read get disk info shell content  failed %s", err.Error())
+	}
+	GetDiskInfoShellContent = c
+}
+
+// GetAllDiskIds TODO
+func GetAllDiskIds(c []DiskInfo) (diskIds []string) {
+	for _, v := range c {
+		if cmutil.IsNotEmpty(v.DiskId) {
+			diskIds = append(diskIds, v.DiskId)
+		}
+	}
+	return
+}
+
+// MarshalDisk TODO
+func MarshalDisk(c []DiskInfo) (result string, err error) {
+	var b []byte
+	t := make(map[string]DiskDetail)
+	for idx, v := range c {
+		if cmutil.IsEmpty(v.MountPoint) {
+			v.MountPoint = fmt.Sprintf("NOMOUNT%d", idx)
+		}
+		t[v.MountPoint] = v.DiskDetail
+	}
+	if b, err = json.Marshal(t); err != nil {
+		logger.Error("marshal disk info failed ")
+		return "{}", err
+	}
+	return string(b), nil
+}
+
+// SetDiskType TODO
+func SetDiskType(elems []DiskInfo, t string) (ds []DiskInfo) {
+	for _, v := range elems {
+		d := v
+		d.DiskType = t
+		ds = append(ds, d)
+	}
+	return ds
+}
+
+// GetDiskInfo TODO
+func GetDiskInfo(hosts []string, bk_cloud_id, bk_biz_id int) (ipLogContentMap map[string]*ShellResCollection,
+	failedipLogInfo map[string]string, err error) {
+	iplist := []IPList{}
+	for _, ip := range hosts {
+		iplist = append(iplist, IPList{
+			IP:        ip,
+			BkCloudID: bk_cloud_id,
+		})
+	}
+	jober := JobV3{
+		Client: EsbClient,
+	}
+	logger.Info("api %s", config.AppConfig.BkSecretConfig.BkBaseUrl)
+	job, err := jober.ExecuteJob(&FastExecuteScriptParam{
+		BkBizID:        bk_biz_id,
+		ScriptContent:  base64.StdEncoding.EncodeToString(GetDiskInfoShellContent),
+		ScriptTimeout:  300,
+		ScriptLanguage: 1,
+		AccountAlias:   "mysql",
+		TargetServer: TargetServer{
+			IPList: iplist,
+		},
+	},
+	)
+	if err != nil {
+		logger.Error("call execute job failed %s", err.Error())
+		return nil, nil, err
+	}
+	// 查询任务
+	var errCnt int
+	var jobStatus GetJobInstanceStatusRpData
+	for i := 0; i < 100; i++ {
+		jobStatus, err = jober.GetJobStatus(&GetJobInstanceStatusParam{
+			BKBizId:       bk_biz_id,
+			JobInstanceID: job.JobInstanceID,
+		})
+		if err != nil {
+			logger.Error("query job %d status failed %s", job.JobInstanceID, err.Error())
+			errCnt++
+		}
+		if jobStatus.Finished {
+			break
+		}
+		if errCnt > 10 {
+			return nil, nil, fmt.Errorf("more than 10 errors when query job %d,some err: %s", job.JobInstanceID, err.Error())
+		}
+		time.Sleep(1 * time.Second)
+	}
+	// 在查询一遍转态
+	jobStatus, err = jober.GetJobStatus(&GetJobInstanceStatusParam{
+		BKBizId:       bk_biz_id,
+		JobInstanceID: job.JobInstanceID,
+	})
+	if err != nil {
+		logger.Error("query job %d status failed %s", job.JobInstanceID, err.Error())
+		return nil, nil, err
+	}
+	failedipLogInfo = make(map[string]string)
+	for _, stepInstance := range jobStatus.StepInstanceList {
+		for _, step_ip_result := range stepInstance.StepIpResultList {
+			switch step_ip_result.Status {
+			case 1:
+				failedipLogInfo[step_ip_result.IP] += "Agent异常\n"
+			case 12:
+				failedipLogInfo[step_ip_result.IP] += "任务下发失败\n"
+			case 403:
+				failedipLogInfo[step_ip_result.IP] += "任务强制终止成功\n"
+			case 404:
+				failedipLogInfo[step_ip_result.IP] += "任务强制终止失败\n"
+			case 11:
+				failedipLogInfo[step_ip_result.IP] += "执行失败;\n"
+			default:
+				continue
+			}
+		}
+	}
+	// 查询执行输出
+	var ipLogs BatchGetJobInstanceIpLogRpData
+	ipLogs, err = jober.BatchGetJobInstanceIpLog(&BatchGetJobInstanceIpLogParam{
+		BKBizId:        bk_biz_id,
+		JobInstanceID:  job.JobInstanceID,
+		StepInstanceID: job.StepInstanceID,
+		IPList:         iplist,
+	})
+	ipLogContentMap = make(map[string]*ShellResCollection)
+	for _, d := range ipLogs.ScriptTaskLogs {
+		var dl ShellResCollection
+		if err = json.Unmarshal([]byte(d.LogContent), &dl); err != nil {
+			logger.Error("unmarshal log content failed %s", err.Error())
+			continue
+		}
+		ipLogContentMap[d.Ip] = &dl
+	}
+	return ipLogContentMap, failedipLogInfo, err
+}
diff --git a/dbm-services/common/db-resource/internal/svr/bk/get_block_info.sh b/dbm-services/common/db-resource/internal/svr/bk/get_block_info.sh
new file mode 100644
index 0000000000..6b329a4f95
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/bk/get_block_info.sh
@@ -0,0 +1,147 @@
+#!/bin/bash 
+
+SYSBLOCK_DIR="/sys/block"
+META_DOMAIN='127.0.0.1'
+#META_DOMAIN='metadata.tencentyun.com'
+
+getDiskType(){
+    dname=$1    
+    if [[ $dname =~ ^fd ]]
+    then
+        echo "FDD"
+    elif [[ $dname =~ ^sd ]]
+    then
+        echo "HDD"
+    elif [[ $dname =~ ^hd ]]
+    then
+        echo "HDD"
+    elif [[ $dname =~ ^vd ]]
+    then
+        echo "HDD"
+    elif [[ $dname =~ ^nvme ]]
+    then
+        echo "SSD"
+    elif [[ $dname =~ ^sr ]]
+    then
+        echo "ODD"
+    elif [[ $dname =~ ^xvd ]]
+    then
+        echo "HDD"
+    elif [[ $dname =~ ^mmc ]]
+    then
+        echo "SSD"
+    fi
+}
+
+getMountPoint(){
+    pname="/dev/$1"
+    mp=`df -hT|egrep "${pname} "|awk '{print $NF}'`
+    echo ${mp}
+}
+
+# 文件系统类型
+getSysFileType(){
+    pname="/dev/$1"
+    sz=`df -hT|grep ${pname}|awk '{print $2}'`
+    echo ${sz}
+}
+
+## get all  block
+tmp_arr=()
+for dname in `ls ${SYSBLOCK_DIR}/`
+do
+    if [[  ${dname} =~ ^loop ||  ${dname} =~ ^nb ||   ${dname} =~ ^ram  ||  ${dname} =~ ^sr  ]]
+    then
+        continue
+    fi
+    #echo  -n "${dname}: "
+    diskType=`getDiskType ${dname}`
+    #echo "${diskType}"
+    rotational=`cat ${SYSBLOCK_DIR}/${dname}/queue/rotational`
+    #echo ${rotational}
+    if [ ! $rotational == 1 ]
+    then
+        diskType="SSD"
+    fi
+
+    if [[ ${dname} =~ nvme && ${dname} != nvme ]]  || [[ ${dname} =~  ^md[0-9] ]] || [[ ${dname} == "fioa" ]] 
+    then
+        mp=`getMountPoint ${dname}`
+        sz=`cat ${SYSBLOCK_DIR}/${dname}/size`
+        sz=$((sz+0)) 
+        totalSize=`expr $sz \* 512 / 1024 / 1024 / 1024`   
+        sft=`getSysFileType ${dname}`
+        #echo -n "{\"MountPoint\":\"${mp}\",\"Size\":${totalSize},\"FileType\":\"${sft}\",\"DiskType\":\"${diskType}\"}"
+        if [ -f ${SYSBLOCK_DIR}/${dname}/serial ]
+        then
+            diskId=`cat  ${SYSBLOCK_DIR}/${dname}/serial`
+        fi
+        tmp_arr[${#tmp_arr[*]}]="{\"mount_point\":\"${mp}\",\"size\":${totalSize},\"file_type\":\"${sft}\",\"disk_type\":\"${diskType}\",\"disk_id\":\"${diskId}\"}"
+        continue
+   fi
+
+    pt=0
+    for pname in `ls  ${SYSBLOCK_DIR}/${dname}`
+    do
+        if [[  ! ${pname} =~ ^${dname} ]]
+        then
+            continue   
+        fi
+        pt=$((pt+1))
+        mp=`getMountPoint ${pname}`
+        sft=`getSysFileType ${pname}` 
+        sz=`cat ${SYSBLOCK_DIR}/${dname}/${pname}/size`     
+        sz=$((sz+0)) 
+        totalSize=`expr $sz \* 512 / 1024 / 1024 / 1024  `   
+        if [[ ! ${mp} =~ data ]]
+        then
+            continue
+        fi
+        if [ -f ${SYSBLOCK_DIR}/${dname}/serial ]
+        then
+            diskId=`cat  ${SYSBLOCK_DIR}/${dname}/serial`
+        fi
+        tmp_arr[${#tmp_arr[*]}]="{\"mount_point\":\"${mp}\",\"size\":${totalSize},\"file_type\":\"${sft}\",\"disk_type\":\"${diskType}\",\"disk_id\":\"${diskId}\"}"
+    done
+    if [[ $pt == 0 ]];
+    then
+        mp=`getMountPoint ${dname}`
+        sz=`cat ${SYSBLOCK_DIR}/${dname}/size`
+        sz=$((sz+0)) 
+        totalSize=`expr $sz \* 512 / 1024 / 1024 / 1024`   
+        sft=`getSysFileType ${dname}`
+        #echo -n "{\"MountPoint\":\"${mp}\",\"Size\":${totalSize},\"FileType\":\"${sft}\",\"DiskType\":\"${diskType}\"}"
+        if [ -f ${SYSBLOCK_DIR}/${dname}/serial ]
+        then
+            diskId=`cat  ${SYSBLOCK_DIR}/${dname}/serial`
+        fi
+        tmp_arr[${#tmp_arr[*]}]="{\"mount_point\":\"${mp}\",\"size\":${totalSize},\"file_type\":\"${sft}\",\"disk_type\":\"${diskType}\",\"disk_id\":\"${diskId}\"}"
+    fi
+done
+
+
+cpunum=`cat /proc/cpuinfo| grep "processor"| wc -l`
+memsize=`free -m | awk '/Mem/ {print $2}'`
+curl http://${META_DOMAIN}/latest/meta-data/placement/region -s -o /dev/null
+if [ $? -eq 0 ]
+then
+    region=`curl http://${META_DOMAIN}/latest/meta-data/placement/region -s`
+fi
+curl http://$META_DOMAIN/metadata.tencentyun.com/latest/meta-data/placement/zone -s -o /dev/null
+if [ $? -eq 0 ]
+then
+    zone=`curl http://${META_DOMAIN}/latest/meta-data/placement/zone -s`
+fi
+echo -n "{\"cpu\":${cpunum},\"mem\":${memsize},\"region\":\"${region}\",\"zone\":\"${zone}\","
+length=${#tmp_arr[@]}
+stop=$(($length-1))
+echo -n "\"disk\":["
+for ((i=0; i<$length; i++))
+do
+    echo -n ${tmp_arr[$i]}
+    if [[ ! $i == $stop ]];
+    then
+        echo -n ","
+    fi
+done
+echo -n "]}"
\ No newline at end of file
diff --git a/dbm-services/common/db-resource/internal/svr/bk/job_v3.go b/dbm-services/common/db-resource/internal/svr/bk/job_v3.go
new file mode 100644
index 0000000000..91466aaa55
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/bk/job_v3.go
@@ -0,0 +1,217 @@
+package bk
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3"
+	"dbm-services/common/go-pubpkg/logger"
+	"embed"
+	"encoding/json"
+	"net/http"
+	"path"
+)
+
+// Jober TODO
+type Jober interface {
+	Execute()
+}
+
+// DiskInfoScriptName TODO
+var DiskInfoScriptName = "get_block_info.sh"
+
+// GetDiskInfoScript TODO
+//
+//go:embed get_block_info.sh
+var GetDiskInfoScript embed.FS
+
+const (
+	// ESB_PREFIX TODO
+	ESB_PREFIX = "/api/c/compapi/v2/jobv3/"
+	// 快速执行脚本
+	fast_execute_script = "fast_execute_script/"
+	// 查询作业执行状态
+	get_job_status = "get_job_instance_status/"
+	// 根据作业实例ID查询作业执行日志
+	get_job_instance_ip_log = "get_job_instance_ip_log/"
+	// 根据ip列表批量查询作业执行日志
+	batch_get_job_instance_ip_log = "batch_get_job_instance_ip_log/"
+)
+
+// FastExecuteScriptParam TODO
+type FastExecuteScriptParam struct {
+	cc.BaseSecret
+	BkBizID          int          `json:"bk_biz_id"`
+	ScriptID         int          `json:"script_id,omitempty"`
+	ScriptContent    string       `json:"script_content"`
+	ScriptParam      string       `json:"script_param,omitempty"`
+	ScriptTimeout    int          `json:"script_timeout,omitempty"`
+	AccountAlias     string       `json:"account_alias"`
+	IsParamSensitive int          `json:"is_param_sensitive,omitempty"`
+	ScriptLanguage   int          `json:"script_language"`
+	TargetServer     TargetServer `json:"target_server"`
+}
+
+// FastExecuteScriptRpData TODO
+type FastExecuteScriptRpData struct {
+	JobInstanceName string `json:"job_instance_name"`
+	JobInstanceID   int64  `json:"job_instance_id"`
+	StepInstanceID  int64  `json:"step_instance_id"`
+}
+
+// TargetServer TODO
+type TargetServer struct {
+	DynamicGroupIDList []string       `json:"dynamic_group_id_list,omitempty"`
+	IPList             []IPList       `json:"ip_list"`
+	TopoNodeList       []TopoNodeList `json:"topo_node_list,omitempty"`
+}
+
+// IPList TODO
+type IPList struct {
+	BkCloudID int    `json:"bk_cloud_id"`
+	IP        string `json:"ip"`
+}
+
+// TopoNodeList TODO
+type TopoNodeList struct {
+	ID       int    `json:"id"`
+	NodeType string `json:"node_type"`
+}
+
+// BatchGetJobInstanceIpLogParam TODO
+type BatchGetJobInstanceIpLogParam struct {
+	cc.BaseSecret  `json:",inline"`
+	BKBizId        int      `json:"bk_biz_id"`
+	JobInstanceID  int64    `json:"job_instance_id"`
+	StepInstanceID int64    `json:"step_instance_id"`
+	IPList         []IPList `json:"ip_list"`
+}
+
+// BatchGetJobInstanceIpLogRpData TODO
+type BatchGetJobInstanceIpLogRpData struct {
+	BkCloudID      int             `json:"bk_cloud_id"`
+	LogType        int             `json:"log_type"`
+	ScriptTaskLogs []ScriptTaskLog `json:"script_task_logs"`
+}
+
+// ScriptTaskLog TODO
+type ScriptTaskLog struct {
+	BkCloudID  int    `json:"bk_cloud_id"`
+	Ip         string `json:"ip"`
+	LogContent string `json:"log_content"`
+}
+
+// GetJobInstanceStatusParam TODO
+type GetJobInstanceStatusParam struct {
+	cc.BaseSecret `json:",inline"`
+	BKBizId       int   `json:"bk_biz_id"`
+	JobInstanceID int64 `json:"job_instance_id"`
+	// 是否返回每个ip上的任务详情,对应返回结果中的step_ip_result_list。默认值为false。
+	ReturnIpResult bool `json:"return_ip_result"`
+}
+
+// GetJobInstanceStatusRpData TODO
+type GetJobInstanceStatusRpData struct {
+	Finished         bool           `json:"finished"`
+	JobInstance      JobInstance    `json:"job_instance"`
+	StepInstanceList []StepInstance `json:"step_instance_list"`
+}
+
+// JobInstance TODO
+type JobInstance struct {
+	Name          string `json:"name"`
+	Status        int    `json:"status"`
+	CreateTime    int64  `json:"create_time"`
+	StartTime     int64  `json:"start_time"`
+	EndTime       int64  `json:"end_time"`
+	TotalTime     int64  `json:"total_time"`
+	BkBizID       int    `json:"bk_biz_id"`
+	JobInstanceID int    `json:"job_instance_id"`
+}
+
+// StepInstance TODO
+type StepInstance struct {
+	StepInstanceID   int            `json:"step_instance_id"`
+	Type             int            `json:"type"`
+	Name             string         `json:"name"`
+	Status           int            `json:"status"`
+	CreateTime       int64          `json:"create_time"`
+	StartTime        int64          `json:"start_time"`
+	EndTime          int64          `json:"end_time"`
+	TotalTime        int64          `json:"total_time"`
+	RetryCount       int            `json:"execute_count"` // 步骤重试次数
+	StepIpResultList []StepIpResult `json:"step_ip_result_list"`
+}
+
+// StepIpResult TODO
+type StepIpResult struct {
+	IP        string `json:"ip"`
+	BkCloudID int    `json:"bk_cloud_id"`
+	// 作业执行状态:1.Agent异常; 5.等待执行; 7.正在执行; 9.执行成功; 11.执行失败; 12.任务下发失败; 403.任务强制终止成功; 404.任务强制终止失败
+	Status    int `json:"status"`
+	ExitCode  int `json:"exit_code"`
+	TotalTime int `json:"total_time"`
+}
+
+// JobV3 TODO
+type JobV3 struct {
+	Client *cc.Client
+}
+
+// ExecuteJob TODO
+func (g *JobV3) ExecuteJob(param *FastExecuteScriptParam) (data FastExecuteScriptRpData, err error) {
+	logger.Info("will execute job at %v", param.TargetServer.IPList)
+	resp, err := g.Client.Do(http.MethodPost, g.get_fast_execute_script_url(), param)
+	if err != nil {
+		logger.Error("call fast_execute_script failed %s", err.Error())
+		return FastExecuteScriptRpData{}, err
+	}
+	if err = json.Unmarshal(resp.Data, &data); err != nil {
+		logger.Error("unmarshal respone data  failed %s,respone message:%s,code:%d", err.Error(), resp.Message, resp.Code)
+		return
+	}
+	return
+}
+
+// GetJobStatus TODO
+func (g *JobV3) GetJobStatus(param *GetJobInstanceStatusParam) (data GetJobInstanceStatusRpData, err error) {
+	resp, err := g.Client.Do(http.MethodPost, g.get_job_status_url(), param)
+	if err != nil {
+		logger.Error("call get_job_status failed %s", err.Error())
+		return GetJobInstanceStatusRpData{}, err
+	}
+	if err = json.Unmarshal(resp.Data, &data); err != nil {
+		logger.Error("unmarshal respone data  failed %s,respone message:%s,code:%d", err.Error(), resp.Message, resp.Code)
+		return
+	}
+	return
+}
+
+// BatchGetJobInstanceIpLog TODO
+func (g *JobV3) BatchGetJobInstanceIpLog(param *BatchGetJobInstanceIpLogParam) (data BatchGetJobInstanceIpLogRpData,
+	err error) {
+	resp, err := g.Client.Do(http.MethodPost, g.batch_get_job_instance_ip_log_url(), param)
+	if err != nil {
+		logger.Error("call batch_get_job_instance_ip_log failed %s", err.Error())
+		return BatchGetJobInstanceIpLogRpData{}, err
+	}
+	if err = json.Unmarshal(resp.Data, &data); err != nil {
+		logger.Error("unmarshal respone data  failed %s,respone message:%s,code:%d", err.Error(), resp.Message, resp.Code)
+		return
+	}
+	logger.Info("shell return content %v", data.ScriptTaskLogs)
+	return
+}
+
+func (g *JobV3) get_fast_execute_script_url() string {
+	return path.Join(ESB_PREFIX, fast_execute_script)
+}
+
+func (g *JobV3) get_job_status_url() string {
+	return path.Join(ESB_PREFIX, get_job_status)
+}
+
+// func (g *JobV3) get_job_instance_ip_log_url() string {
+// 	return path.Join(ESB_PREFIX, get_job_instance_ip_log)
+// }
+
+func (g *JobV3) batch_get_job_instance_ip_log_url() string {
+	return path.Join(ESB_PREFIX, batch_get_job_instance_ip_log)
+}
diff --git a/dbm-services/common/db-resource/internal/svr/bk/job_v3_test.go b/dbm-services/common/db-resource/internal/svr/bk/job_v3_test.go
new file mode 100644
index 0000000000..717d31d60e
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/bk/job_v3_test.go
@@ -0,0 +1,87 @@
+package bk_test
+
+import (
+	"dbm-services/common/db-resource/internal/svr/bk"
+	"dbm-services/common/go-pubpkg/cc.v3"
+	"encoding/base64"
+	"os"
+	"testing"
+)
+
+var BKBizId = 9999431
+
+func TestExecuteJob(t *testing.T) {
+	client, err := cc.NewClient(os.Getenv("BK_BASE_URL"), cc.Secret{
+		BKAppCode:   os.Getenv("BK_APP_CODE"),
+		BKAppSecret: os.Getenv("BK_APP_SECRET"),
+		BKUsername:  os.Getenv("BK_USERNAME"),
+	})
+	if err != nil {
+		t.Fatal("new cc client failed", err.Error())
+		return
+	}
+	c, err := bk.GetDiskInfoScript.ReadFile(bk.DiskInfoScriptName)
+	if err != nil {
+		t.Fatal(err)
+	}
+	jober := bk.JobV3{
+		Client: client,
+	}
+	data, err := jober.ExecuteJob(&bk.FastExecuteScriptParam{
+		BkBizID:        BKBizId,
+		ScriptContent:  base64.StdEncoding.EncodeToString(c),
+		ScriptTimeout:  180,
+		ScriptLanguage: 1,
+		AccountAlias:   "mysql",
+		TargetServer: bk.TargetServer{
+			IPList: []bk.IPList{
+				{
+					BkCloudID: 0,
+					IP:        "127.0.0.1",
+				},
+			},
+		},
+	})
+	if err != nil {
+		t.Logf("execute job failed %s\n", err.Error())
+		return
+	}
+	t.Log(data.JobInstanceID)
+}
+
+func TestGetJobInstanceStatus(t *testing.T) {
+	t.Logf("start testing \n")
+	// t.Log(os.Getenv("BK_BASE_URL"))
+	client, err := cc.NewClient(os.Getenv("BK_BASE_URL"), cc.Secret{
+		BKAppCode:   os.Getenv("BK_APP_CODE"),
+		BKAppSecret: os.Getenv("BK_APP_SECRET"),
+		BKUsername:  os.Getenv("BK_USERNAME"),
+	})
+	if err != nil {
+		t.Fatal("new cc client failed", err.Error())
+		return
+	}
+	jober := bk.JobV3{
+		Client: client,
+	}
+	data, err := jober.GetJobStatus(&bk.GetJobInstanceStatusParam{
+		BKBizId:       BKBizId,
+		JobInstanceID: 27936528246,
+	})
+	if err != nil {
+		t.Fatal("get job status failed", err.Error())
+	}
+	t.Logf("%v", data.JobInstance)
+	t.Logf("%v", data.StepInstanceList)
+	data1, err := jober.BatchGetJobInstanceIpLog(&bk.BatchGetJobInstanceIpLogParam{
+		BKBizId:        BKBizId,
+		JobInstanceID:  27936528246,
+		StepInstanceID: 27996200699,
+		IPList:         []bk.IPList{{BkCloudID: 0, IP: "127.0.0.1"}},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Log(data1.ScriptTaskLogs[0].LogContent)
+	t.Logf("end testing ...")
+}
diff --git a/dbm-services/common/db-resource/internal/svr/cloud/cloud.go b/dbm-services/common/db-resource/internal/svr/cloud/cloud.go
new file mode 100644
index 0000000000..6cd5eaa28e
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/cloud/cloud.go
@@ -0,0 +1,22 @@
+// Package cloud TODO
+package cloud
+
+import (
+	"dbm-services/common/db-resource/internal/svr/cloud/tencent"
+	"fmt"
+)
+
+// Disker TODO
+type Disker interface {
+	// DescribeDisk TODO
+	DescribeDisk(diskIds []string, region string) (diskTypeDic map[string]string, err error)
+}
+
+// NewDisker TODO
+func NewDisker() (dr Disker, err error) {
+	if tencent.TencentDisker.IsOk() {
+		dr = tencent.TencentDisker
+		return
+	}
+	return dr, fmt.Errorf("not found available cloud disker")
+}
diff --git a/dbm-services/common/db-resource/internal/svr/cloud/tencent/tencent.go b/dbm-services/common/db-resource/internal/svr/cloud/tencent/tencent.go
new file mode 100644
index 0000000000..6da148fbb8
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/cloud/tencent/tencent.go
@@ -0,0 +1,65 @@
+// Package tencent TODO
+package tencent
+
+import (
+	"dbm-services/common/db-resource/internal/config"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+
+	cbs "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cbs/v20170312"
+	"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
+	"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
+	"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
+)
+
+var credential *common.Credential
+var cpf *profile.ClientProfile
+
+// TencentDisker TODO
+var TencentDisker BcsClient
+
+func init() {
+	if config.AppConfig.CloudCertificate != nil {
+		credential = common.NewCredential(
+			config.AppConfig.CloudCertificate.SecretId,
+			config.AppConfig.CloudCertificate.SecretKey,
+		)
+	}
+	cpf = profile.NewClientProfile()
+	cpf.HttpProfile.ReqTimeout = 30
+	// SDK会自动指定域名。通常是不需要特地指定域名的,但是如果你访问的是金融区的服务,
+	// 则必须手动指定域名,例如云服务器的上海金融区域名: cvm.ap-shanghai-fsi.tencentcloudapi.com
+	cpf.HttpProfile.Endpoint = "cbs.internal.tencentcloudapi.com"
+}
+
+// BcsClient TODO
+type BcsClient struct{}
+
+// IsOk TODO
+func (t BcsClient) IsOk() bool {
+	return credential != nil
+}
+
+// DescribeDisk TODO
+func (t BcsClient) DescribeDisk(diskIds []string, region string) (diskTypeDic map[string]string, err error) {
+	client, _ := cbs.NewClient(credential, region, cpf)
+	request := cbs.NewDescribeDisksRequest()
+	request.DiskIds = common.StringPtrs(diskIds)
+	response, err := client.DescribeDisks(request)
+	// 处理异常
+	if _, ok := err.(*errors.TencentCloudSDKError); ok {
+		fmt.Printf("An API error has returned: %s", err)
+		return
+	}
+	// // 非SDK异常,直接失败。实际代码中可以加入其他的处理。
+	if err != nil {
+		logger.Error("call describe disk failed %s", err.Error())
+		return
+	}
+	logger.Info("disk info %s", response.ToJsonString())
+	diskTypeDic = make(map[string]string)
+	for _, disk := range response.Response.DiskSet {
+		diskTypeDic[*disk.DiskId] = *disk.DiskType
+	}
+	return
+}
diff --git a/dbm-services/common/db-resource/internal/svr/cloud/tencent/tencentcloud_test.go b/dbm-services/common/db-resource/internal/svr/cloud/tencent/tencentcloud_test.go
new file mode 100644
index 0000000000..d86a88ac2c
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/cloud/tencent/tencentcloud_test.go
@@ -0,0 +1,89 @@
+package tencent_test
+
+import (
+	"fmt"
+	"os"
+	"testing"
+
+	cbs "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cbs/v20170312"
+	"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
+	"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/errors"
+	"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
+	"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/regions"
+)
+
+func TestDescribeTencentCloud(t *testing.T) {
+	// 必要步骤:
+	// 实例化一个认证对象,入参需要传入腾讯云账户密钥对 SecretId,SecretKey。
+	// 硬编码密钥到代码中有可能随代码泄露而暴露,有安全隐患,并不推荐。
+	// 为了保护密钥安全,建议将密钥设置在环境变量中或者配置文件中,请参考本文凭证管理章节。
+	// credential := common.NewCredential("SecretId", "SecretKey")
+	credential := common.NewCredential(
+		os.Getenv("SecretId"),
+		os.Getenv("SecretKey"),
+	)
+
+	// 非必要步骤
+	// 实例化一个客户端配置对象,可以指定超时时间等配置
+	cpf := profile.NewClientProfile()
+	// SDK默认使用POST方法。
+	// 如果你一定要使用GET方法,可以在这里设置。GET方法无法处理一些较大的请求。
+	// 如非必要请不要修改默认设置。
+	cpf.HttpProfile.ReqMethod = "POST"
+	// SDK有默认的超时时间,如非必要请不要修改默认设置。
+	// 如有需要请在代码中查阅以获取最新的默认值。
+	cpf.HttpProfile.ReqTimeout = 30
+	// SDK会自动指定域名。通常是不需要特地指定域名的,但是如果你访问的是金融区的服务,
+	// 则必须手动指定域名,例如云服务器的上海金融区域名: cvm.ap-shanghai-fsi.tencentcloudapi.com
+	cpf.HttpProfile.Endpoint = "cbs.internal.tencentcloudapi.com"
+	// SDK默认用TC3-HMAC-SHA256进行签名,它更安全但是会轻微降低性能。
+	// 如非必要请不要修改默认设置。
+	cpf.SignMethod = "TC3-HMAC-SHA256"
+	// SDK 默认用 zh-CN 调用返回中文。此外还可以设置 en-US 返回全英文。
+	// 但大部分产品或接口并不支持全英文的返回。
+	// 如非必要请不要修改默认设置。
+	cpf.Language = "en-US"
+	// 打印日志,默认是false
+	// cpf.Debug = true
+
+	// 实例化要请求产品(以cvm为例)的client对象
+	// 第二个参数是地域信息,可以直接填写字符串ap-guangzhou,或者引用预设的常量
+	client, _ := cbs.NewClient(credential, regions.Shanghai, cpf)
+	// 实例化一个请求对象,根据调用的接口和实际情况,可以进一步设置请求参数
+	// 你可以直接查询SDK源码确定DescribeInstancesRequest有哪些属性可以设置,
+	// 属性可能是基本类型,也可能引用了另一个数据结构。
+	// 推荐使用IDE进行开发,可以方便的跳转查阅各个接口和数据结构的文档说明。
+	request := cbs.NewDescribeDisksRequest()
+
+	// 基本类型的设置。
+	// 此接口允许设置返回的实例数量。此处指定为只返回一个。
+	// SDK采用的是指针风格指定参数,即使对于基本类型你也需要用指针来对参数赋值。
+	// SDK提供对基本类型的指针引用封装函数
+
+	// 数组类型的设置。
+	// 此接口允许指定实例 ID 进行过滤,但是由于和接下来要演示的 Filter 参数冲突,先注释掉。
+	// request.InstanceIds = common.StringPtrs([]string{"ins-r8hr2upy"})
+	request.DiskIds = common.StringPtrs([]string{"disk-qayi7b9k"})
+	// 复杂对象的设置。
+	// 在这个接口中,Filters是数组,数组的元素是复杂对象Filter,Filter的成员Values是string数组。
+	// request.Filters = []*cbs.Filter{
+	// 	&cbs.Filter{
+	// 		Name:   common.StringPtr("zone"),
+	// 		Values: common.StringPtrs([]string{"ap-shanghai-1"}),
+	// 	},
+	// }
+
+	// 通过client对象调用想要访问的接口,需要传入请求对象
+	response, err := client.DescribeDisks(request)
+	// 处理异常
+	if _, ok := err.(*errors.TencentCloudSDKError); ok {
+		fmt.Printf("An API error has returned: %s", err)
+		return
+	}
+	// // 非SDK异常,直接失败。实际代码中可以加入其他的处理。
+	if err != nil {
+		panic(err)
+	}
+	// // 打印返回的json字符串
+	fmt.Printf("%s\n", response.ToJsonString())
+}
diff --git a/dbm-services/common/db-resource/internal/svr/meta/meta.go b/dbm-services/common/db-resource/internal/svr/meta/meta.go
new file mode 100644
index 0000000000..1d8522412c
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/meta/meta.go
@@ -0,0 +1,77 @@
+// Package meta TODO
+package meta
+
+import (
+	"bytes"
+	"dbm-services/common/db-resource/internal/config"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"io"
+	"net/http"
+	"net/url"
+)
+
+// GetIdcCityByLogicCityParam TODO
+type GetIdcCityByLogicCityParam struct {
+	LogicCityName string `json:"logic_city_name"`
+}
+
+// IdcCitysResp TODO
+type IdcCitysResp struct {
+	Code      int      `json:"code"`
+	Message   string   `json:"message"`
+	Data      []string `json:"data"`
+	RequestId string   `json:"request_id"`
+}
+
+func getRequestUrl() (string, error) {
+	base := config.AppConfig.DbMeta
+	if cmutil.IsEmpty(config.AppConfig.DbMeta) {
+		base = "http://bk-dbm"
+	}
+	return url.JoinPath(base, "/apis/proxypass/dbmeta/bk_city_name/")
+}
+
+// GetIdcCityByLogicCity TODO
+func GetIdcCityByLogicCity(logicCity string) (idcCitys []string, err error) {
+	u, err := getRequestUrl()
+	if err != nil {
+		return
+	}
+	p := GetIdcCityByLogicCityParam{
+		LogicCityName: logicCity,
+	}
+	client := &http.Client{} // 客户端,被Get,Head以及Post使用
+	body, err := json.Marshal(p)
+	if err != nil {
+		logger.Error("marshal GetIdcCityByLogicCityParam body failed %s ", err.Error())
+		return
+	}
+	request, err := http.NewRequest(u, "application/json;charset=utf-8",
+		bytes.NewBuffer(body))
+	if err != nil {
+		return
+	}
+	request.AddCookie(&http.Cookie{Name: "bk_app_code", Path: "/", Value: config.AppConfig.BkSecretConfig.BkAppCode,
+		MaxAge: 86400})
+	request.AddCookie(&http.Cookie{Name: "bk_app_secret", Path: "/", Value: config.AppConfig.BkSecretConfig.BKAppSecret,
+		MaxAge: 86400})
+	resp, err := client.Do(request)
+	if err != nil {
+		logger.Error("request /apis/proxypass/dbmeta/bk_city_name/ failed %s", err.Error())
+		return
+	}
+	defer resp.Body.Close()
+	content, err := io.ReadAll(resp.Body)
+	if err != nil {
+		logger.Error("read respone body failed %s", err.Error())
+		return
+	}
+	logger.Info("respone %v", string(content))
+	var d IdcCitysResp
+	if err = json.Unmarshal(content, &d); err != nil {
+		return
+	}
+	return d.Data, nil
+}
diff --git a/dbm-services/common/db-resource/internal/svr/task/task.go b/dbm-services/common/db-resource/internal/svr/task/task.go
new file mode 100644
index 0000000000..37f994935d
--- /dev/null
+++ b/dbm-services/common/db-resource/internal/svr/task/task.go
@@ -0,0 +1,114 @@
+// Package task TODO
+package task
+
+import (
+	"dbm-services/common/db-resource/internal/model"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"runtime/debug"
+	"time"
+)
+
+// ApplyResponeLogItem TODO
+type ApplyResponeLogItem struct {
+	RequestId string
+	Data      []model.BatchGetTbDetailResult
+}
+
+// ApplyResponeLogChan TODO
+var ApplyResponeLogChan chan ApplyResponeLogItem
+
+// ArchiverResourceChan TODO
+var ArchiverResourceChan chan int
+
+// RecordRsOperatorInfoChan TODO
+var RecordRsOperatorInfoChan chan model.TbRpOperationInfo
+
+// RuningTask TODO
+// RuningApplyTask
+var RuningTask chan struct{}
+
+func init() {
+	ApplyResponeLogChan = make(chan ApplyResponeLogItem, 100)
+	ArchiverResourceChan = make(chan int, 200)
+	RecordRsOperatorInfoChan = make(chan model.TbRpOperationInfo, 20)
+	RuningTask = make(chan struct{}, 100)
+}
+
+// init TODO
+// StartTask 异步写日志
+func init() {
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic error:%v,stack:%s", r, string(debug.Stack()))
+			return
+		}
+	}()
+	go func() {
+		var archIds []int
+		ticker := time.NewTicker(10 * time.Second)
+		defer ticker.Stop()
+		for {
+			select {
+			case d := <-ApplyResponeLogChan:
+				err := recordTask(d)
+				if err != nil {
+					logger.Error("record log failed, %s", err.Error())
+				}
+			case id := <-ArchiverResourceChan:
+				if len(RuningTask) > 0 {
+					archIds = append(archIds, id)
+				} else {
+					archIds = append(archIds, id)
+					if err := archiverResource(archIds); err != nil {
+						logger.Warn("archiver resouce failed %s", err.Error())
+					}
+					archIds = []int{}
+				}
+			case <-ticker.C:
+				if len(RuningTask) <= 0 && len(archIds) > 0 {
+					if err := archiverResource(archIds); err != nil {
+						logger.Warn("archiver resouce failed %s", err.Error())
+					}
+					archIds = []int{}
+				}
+			case info := <-RecordRsOperatorInfoChan:
+				if err := recordRsOperationInfo(info); err != nil {
+					logger.Error("failed to record resource operation log %s", err.Error())
+				}
+			}
+
+		}
+	}()
+}
+
+// archiverResource 异步归档资源
+func archiverResource(ids []int) (err error) {
+	return model.ArchiverResouce(ids)
+}
+
+func recordTask(data ApplyResponeLogItem) error {
+	if data.Data == nil {
+		return fmt.Errorf("data is nill")
+	}
+	m := []model.TbRpApplyDetailLog{}
+	for _, v := range data.Data {
+		for _, vv := range v.Data {
+			m = append(m, model.TbRpApplyDetailLog{
+				RequestID:  data.RequestId,
+				IP:         vv.IP,
+				BkCloudID:  vv.BkCloudID,
+				Item:       v.Item,
+				BkHostID:   vv.BkHostID,
+				UpdateTime: time.Now(),
+				CreateTime: time.Now(),
+			})
+			logger.Debug("%s -- %s -- %s -- %s", v.Item, vv.IP, vv.RackID, vv.NetDeviceID)
+		}
+	}
+	return model.CreateBatchTbRpOpsAPIDetailLog(m)
+}
+
+func recordRsOperationInfo(data model.TbRpOperationInfo) (err error) {
+	return model.DB.Self.Table(model.TbRpOperationInfoTableName()).Create(&data).Error
+}
diff --git a/dbm-services/common/db-resource/main.go b/dbm-services/common/db-resource/main.go
new file mode 100644
index 0000000000..89a94b79b5
--- /dev/null
+++ b/dbm-services/common/db-resource/main.go
@@ -0,0 +1,53 @@
+package main
+
+import (
+	"dbm-services/common/db-resource/internal/config"
+	"dbm-services/common/db-resource/internal/middleware"
+	"dbm-services/common/db-resource/internal/routers"
+	"dbm-services/common/go-pubpkg/logger"
+	"net/http"
+	"os"
+
+	"github.com/gin-contrib/pprof"
+	"github.com/gin-contrib/requestid"
+	"github.com/gin-gonic/gin"
+)
+
+var buildstamp = ""
+var githash = ""
+var version = ""
+
+func main() {
+	logger.Info("buildstamp:%s,githash:%s,version:%s", buildstamp, githash, version)
+	engine := gin.New()
+	pprof.Register(engine)
+	engine.Use(requestid.New())
+	engine.Use(middleware.ApiLogger)
+	engine.Use(middleware.BodyLogMiddleware)
+	routers.RegisterRoutes(engine)
+	engine.POST("/app", func(ctx *gin.Context) {
+		ctx.SecureJSON(http.StatusOK, map[string]interface{}{"buildstamp": buildstamp, "githash": githash,
+			"version": version})
+	})
+	engine.Run(config.AppConfig.ListenAddress)
+}
+
+// init TODO
+func init() {
+	if err := initLogger(); err != nil {
+		logger.Fatal("Init Logger Failed %s", err.Error())
+		return
+	}
+}
+
+// initLogger initialization log
+func initLogger() (err error) {
+	var writer *os.File
+	formatJson := true
+	level := logger.InfoLevel
+	writer = os.Stdin
+	l := logger.New(writer, formatJson, level, map[string]string{})
+	logger.ResetDefault(l)
+	defer logger.Sync()
+	return
+}
diff --git a/dbm-services/common/db-resource/pkg/errno/code.go b/dbm-services/common/db-resource/pkg/errno/code.go
new file mode 100644
index 0000000000..fbd1a6f5c0
--- /dev/null
+++ b/dbm-services/common/db-resource/pkg/errno/code.go
@@ -0,0 +1,76 @@
+package errno
+
+var (
+	// OK TODO
+	// Common errors
+	// OK = Errno{Code: 0, Message: ""}
+	OK = Errno{Code: 0, Message: "", CNMessage: ""}
+
+	// InternalServerError TODO
+	InternalServerError = Errno{Code: 10001, Message: "Internal server error", CNMessage: "服务器内部错误。"}
+	// ErrBind TODO
+	ErrBind = Errno{Code: 10002, Message: "Error occurred while binding the request body to the struct.",
+		CNMessage: "请求参数发生错误。"}
+	// ErrString2Int TODO
+	ErrString2Int = Errno{Code: 10010, Message: "Error occurred while convert string to int."}
+	// ErrorJsonToMap TODO
+	ErrorJsonToMap = Errno{Code: 10030, Message: "Error occured while converting json to Map.",
+		CNMessage: "Json 转为 Map 出现错误!"}
+	// ErrorUIDBeZero TODO
+	ErrorUIDBeZero = Errno{Code: 10035, Message: "uid can not be 0!", CNMessage: "uid 不能为 0.!"}
+
+	// ErrTypeAssertion TODO
+	ErrTypeAssertion = Errno{Code: 10040, Message: "Error occurred while doing type assertion."}
+	// ErrParameterRequired TODO
+	ErrParameterRequired = Errno{Code: 10050, Message: "Input paramter required"}
+	// StartBiggerThanEndTime TODO
+	StartBiggerThanEndTime = Errno{Code: 10060, Message: "Start time is bigger than end time."}
+
+	// ErrInputParameter TODO
+	ErrInputParameter = Errno{Code: 10201, Message: "input pramater error.", CNMessage: "输入参数错误"}
+
+	// ErrInvokeAPI TODO
+	// call other service error
+	ErrInvokeAPI = Errno{Code: 15000, Message: "Error occurred while invoking API", CNMessage: "调用 API 发生错误!"}
+
+	// InvalidHttpStatusCode TODO
+	InvalidHttpStatusCode = Errno{Code: 15015, Message: "Invalid http status code", CNMessage: "无效的 http 状态码!"}
+
+	// ErrDoNotHavePrivs TODO
+	// user errors
+	ErrDoNotHavePrivs = Errno{Code: 20106, Message: "User don't have Privs."}
+	// ErrUserIsEmpty TODO
+	ErrUserIsEmpty = Errno{Code: 20110, Message: "User can't be empty.", CNMessage: "user 不能为空!"}
+
+	// dbrms
+
+	// ErrDBQuery TODO
+	// model operation errors
+	ErrDBQuery = Errno{Code: 50200, Message: "DB Query error.", CNMessage: "查询DB错误!"}
+	// ErrModelFunction TODO
+	ErrModelFunction = Err{Errno: Errno{Code: 50201, Message: "Error occured while invoking model function.",
+		CNMessage: "调用 DB model 方法发生错误!"}, Err: nil}
+
+	// ErrGetJSONArray TODO
+	// data handle error
+	ErrGetJSONArray = Errno{Code: 50300, Message: "Get simplejson Array error.", CNMessage: ""}
+	// ErrConvert2Map TODO
+	ErrConvert2Map = Errno{Code: 50301, Message: "Error occurred while converting the data to Map.",
+		CNMessage: "Error occurred while converting the data to Map."}
+	// ErrJSONMarshal TODO
+	ErrJSONMarshal = Errno{Code: 50302, Message: "Error occurred while marshaling the data to JSON.",
+		CNMessage: "Error occurred while marshaling the data to JSON."}
+	// ErrReadEntity TODO
+	ErrReadEntity = Errno{Code: 50303, Message: "Error occurred while parsing the request parameter.",
+		CNMessage: "Error occurred while parsing the request parameter."}
+	// ErrJSONUnmarshal TODO
+	ErrJSONUnmarshal = Errno{Code: 50304, Message: "Error occurred while Unmarshaling the JSON to data model.",
+		CNMessage: "Error occurred while Unmarshaling the JSON to data model."}
+	// ErrBytesToMap TODO
+	ErrBytesToMap = Errno{Code: 50307, Message: "Error occurred while converting bytes to map.",
+		CNMessage: "Error occurred while converting bytes to map."}
+	// ErrorResourceinsufficient TODO
+	// dbrms
+	// ErrorResourceinsufficient TODO
+	ErrorResourceinsufficient = Errno{Code: 60001, Message: "resource insufficient", CNMessage: "资源不足"}
+)
diff --git a/dbm-services/common/db-resource/pkg/errno/errno.go b/dbm-services/common/db-resource/pkg/errno/errno.go
new file mode 100644
index 0000000000..1486264ce9
--- /dev/null
+++ b/dbm-services/common/db-resource/pkg/errno/errno.go
@@ -0,0 +1,115 @@
+// Package errno TODO
+package errno
+
+import (
+	"fmt"
+)
+
+// Errno TODO
+type Errno struct {
+	Code      int
+	Message   string
+	CNMessage string
+}
+
+const lang = "zh_CN"
+
+// Error 用于错误处理
+func (err Errno) Error() string {
+	switch lang {
+	case "zh_CN":
+		return err.CNMessage
+	case "en_US":
+		return err.Message
+	default:
+		return err.CNMessage
+	}
+}
+
+// Addf TODO
+func (err Errno) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// Add TODO
+func (err Errno) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+	case "en_US":
+		err.Message += message
+	default:
+		err.CNMessage += message
+	}
+	return err
+}
+
+// Err represents an error
+type Err struct {
+	Errno
+	Err error
+}
+
+// New TODO
+func New(errno Errno, err error) *Err {
+	return &Err{Errno: errno, Err: err}
+}
+
+// Add TODO
+func (err Err) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+		return err
+	case "en_US":
+		err.Message += message
+		return err
+	default:
+		err.CNMessage += message
+		return err
+	}
+}
+
+// SetMsg TODO
+func (err Err) SetMsg(message string) error {
+	err.Message = message
+	return err
+}
+
+// SetCNMsg TODO
+func (err Err) SetCNMsg(cnMessage string) error {
+	err.CNMessage = cnMessage
+	return err
+}
+
+// Addf TODO
+func (err Err) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// DecodeErr TODO
+func DecodeErr(err error) (int, string) {
+
+	var CN bool = true
+
+	if err == nil {
+		return OK.Code, OK.Message
+	}
+
+	switch typed := err.(type) {
+	case Err:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	case Errno:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	default:
+	}
+	return InternalServerError.Code, err.Error()
+}
diff --git a/dbm-services/common/db-resource/pkg/util/util.go b/dbm-services/common/db-resource/pkg/util/util.go
new file mode 100644
index 0000000000..80d62b1ad1
--- /dev/null
+++ b/dbm-services/common/db-resource/pkg/util/util.go
@@ -0,0 +1,2 @@
+// Package util TODO
+package util
diff --git a/dbm-services/common/db-resource/scripts/.gitkeep b/dbm-services/common/db-resource/scripts/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/common/dbha/ha-module/.gitignore b/dbm-services/common/dbha/ha-module/.gitignore
new file mode 100644
index 0000000000..6f4273640c
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/.gitignore
@@ -0,0 +1,7 @@
+main.exe
+main
+dbha
+vendor/*
+.idea/*
+proxytest.go
+testmysql.go
\ No newline at end of file
diff --git a/dbm-services/common/dbha/ha-module/Dockerfile b/dbm-services/common/dbha/ha-module/Dockerfile
new file mode 100644
index 0000000000..15659576cb
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/Dockerfile
@@ -0,0 +1,14 @@
+FROM centos:7
+MAINTAINER TenDB-New-DBHA Development
+
+ARG BASEDIR=/home/mysql/dbha
+
+COPY dbha /usr/bin/
+
+RUN groupadd -r mysql && useradd -r -g mysql mysql \
+    && /usr/bin/install -m 0775 -o mysql -g root -d ${BASEDIR}\
+    && chown -R mysql ${BAKDIR} ${BASEDIR} \
+    && chmod +x /usr/bin/dbha
+
+USER mysql
+ENTRYPOINT ["/usr/bin/dbha"]
diff --git a/dbm-services/common/dbha/ha-module/README.md b/dbm-services/common/dbha/ha-module/README.md
new file mode 100644
index 0000000000..e18473c281
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/README.md
@@ -0,0 +1,176 @@
+# bk-dbha
+DBHA是腾讯互娱DB的一套高可用解决方案。原高可用组件DBHA使用perl语言编写,且存在部署、裁撤复杂等诸多问题,本项目用golang语言对DBHA进行了重构
+,解决了原DBHA存在的一系列问题,提升了其拓展性,目前主要服务业bk-dbm项目。
+
+## 特性
+- 高性能轻量级
+- 部署简易
+- 动态扩缩容
+- 组件自身高可用
+- 支持多DB类型
+- 机器级别切换
+
+
+## 编译
+要求go1.14+
+```
+$ go build dbha.go
+```
+
+## 部署
+
+DBHA包含Agent和GM两个组件,Agent用于探测实例并上报,GM用于汇总探测信息并做决策和切换。
+
+DBHA部署还需要一个HADB服务,用于记录和操作DBHA元数据相关信息。
+
+### Agent
+Agent负责探测所在城市的所有DB实例,并跨城上报探测信息。Agent实例数可以根据所在城市DB实例个数任意进行扩缩容,只需要部署增加或减少一个实例即可。
+```
+./dbha -type=agent -config_file=/conf/agent.yaml -log_file=/log/dbha.log
+```
+
+
+### GM
+GM用于接受来自所有地区的Agent信息。由于Agent跨城上报的特点,建议至少需要两个实例,部署在任意两个城市。
+```
+$ ./dbha -type=gm -config_file=/conf/gm.yaml -log_file=/log/dbha.log
+```
+
+## 配置文件
+配置文件采用yaml语法,同时分为Agent和GM两套配置。
+
+### Agent
+```
+type: "agent"
+active_cluster_type: [
+  "tendbha:backend"
+]
+id: "12345"
+city: "123"
+campus: "坪山"
+instance_timeout: 900
+db:
+  reporter_interval: 60
+mysql:
+  user: "root"
+  pass: "123"
+  timeout: 10
+ssh:
+  port: 36000
+  user: "root"
+  pass: "xxx"
+  dest: "agent"
+  timeout: 10
+HADB:
+  host: "xxx"
+  port: 40000
+  timeout: 10
+CMDB:
+  host: "127.0.0.1"
+  port: 3306
+  timeout: 10
+```
+- type:DBHA类型,agent或gm
+- active_cluster_type:所探测的DB类型,为数组类型,可同时探测多种DB类型,采用`(cluster_type, machine_type)`作为二元组表明一种DB类型,写法为`cluster_type:machine_type`
+- id:唯一标识
+- city:cc中的城市id
+- campus:cc中的园区id
+- instance_timeout:agent获取gm和db信息的时间间隔
+- db.reporter_interval:db实例探测信息给hadb的时间间隔
+- mysql.user:探测所需mysql用户
+- mysql.pass:探测所需mysql用户的密码
+- mysql.timeout:探测mysql的超时时间
+- ssh.port:ssh探测所需端口号
+- ssh.user:ssh探测所需用户
+- ssh.pass:ssh探测用户所需密码
+- ssh.dest:执行ssh探测的角色
+- ssh.timeout:ssh探测的超时时间
+- HADB.host:访问HADB的域名
+- HADB.port:访问HADB的端口号
+- HADB.timeout:访问HADB的超时时间
+- CMDB.host:访问CMDB的域名
+- CMDB.port:访问CMDB的端口号
+- CMDB.timeout:访问CMDB的超时时间
+
+### GM
+```
+type: "gm"
+id: "12345"
+city: "123"
+campus: "浦东"
+db:
+  reporter_interval: 60
+mysql:
+  user: "dbha"
+  pass: "xxx"
+  proxy_user: "proxy"
+  proxy_pass: "xxx"
+  timeout: 10
+ssh:
+  port: 36000
+  user: "dba"
+  pass: "xxx"
+  dest: "agent"
+  timeout: 10
+HADB:
+  host: "127.0.0.1"
+  port: 3306
+  timeout: 10
+CMDB:
+  host: "127.0.0.1"
+  port: 3306
+  timeout: 10
+GDM:
+  liston_port: 50000
+  dup_expire: 600
+  scan_interval: 1
+GMM:
+GQA:
+  idc_cache_expire: 300
+  single_switch_idc: 50
+  single_switch_interval: 86400
+  single_switch_limit:  48
+  all_host_switch_limit:  150
+  all_switch_interval:  7200
+GCM:
+  allowed_checksum_max_offset: 2
+  allowed_slave_delay_max: 600
+  allowed_time_delay_max: 300
+  exec_slow_kbytes: 0
+```
+部分参数与Agent同名参数含义相同
+- mysql.proxy_user:切换mysql时其proxy管理端口用户
+- mysql.proxy_pass:切换mysql时其proxy管理用户密码
+- GDM.liston_port:GM监听端口
+- GDM.dup_expire:GDM缓存实例的时间
+- GDM.scan_interval:GDM扫描实例的时间间隔
+- GQA.idc_cache_expire:GQA查询IDC信息的缓存时间
+- GQA.single_switch_idc:一分钟内单个IDC切换阈值
+- GQA.single_switch_interval:GQA获取该实例多少时间内的切换次数
+- GQA.single_switch_limit:该实例切换次数阈值
+- GQA.all_host_switch_limit:DBHA切换次数阈值
+- GQA.all_switch_interval:GQA获取DBHA多少时间内的切换次数
+- GCM.allowed_checksum_max_offset:允许多少表的crc32值不相等
+- GCM.allowed_slave_delay_max:更新master_slave_check的延迟阈值
+- GCM.allowed_time_delay_max:master和slave之间的同步时间延迟阈值
+- GCM.exec_slow_kbytes:slave落后master的数据大小阈值
+
+## 镜像部署
+### 镜像制作
+
+```bash
+docker build . -t mirrors.tencent.com/sccmsp/bkm-dbha:${version}
+```
+
+### 测试
+
+```bash
+docker run -it --name dbha -d mirrors.tencent.com/sccmsp/bkm-dbha:${version}  bash -c "sleep 3600"
+```
+
+## helm部署
+```bash
+cd bk-dbha
+helm install . -g
+helm list
+```
diff --git a/dbm-services/common/dbha/ha-module/agent/agent.go b/dbm-services/common/dbha/ha-module/agent/agent.go
new file mode 100644
index 0000000000..552b7ef261
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/agent/agent.go
@@ -0,0 +1,2 @@
+// Package agent TODO
+package agent
diff --git a/dbm-services/common/dbha/ha-module/agent/connection.go b/dbm-services/common/dbha/ha-module/agent/connection.go
new file mode 100644
index 0000000000..283a686953
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/agent/connection.go
@@ -0,0 +1,71 @@
+package agent
+
+import (
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+	"net"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// GMConnection TODO
+type GMConnection struct {
+	Ip            string
+	Port          int
+	NetConnection net.Conn
+	IsConnection  bool
+	LastFetchTime time.Time
+	IsClose       bool
+	Mutex         sync.Mutex
+}
+
+// HEADER TODO
+const HEADER string = "HEADER"
+
+// Init init gm connect
+func (gm *GMConnection) Init() error {
+	address := gm.Ip + ":" + strconv.Itoa(gm.Port)
+	conn, err := net.Dial("tcp", address)
+	if err != nil {
+		log.Logger.Errorf("gm connection init failed. address:%s, err:%s", address, err.Error())
+		return err
+	}
+	gm.NetConnection = conn
+	gm.IsConnection = true
+	return nil
+}
+
+// ReportInstance report instance detect info
+func (gm *GMConnection) ReportInstance(dbType string, jsonInfo []byte) error {
+	var writeBuf string
+	writeBuf += HEADER
+	writeBuf += "\r\n"
+	writeBuf += dbType
+	writeBuf += "\r\n"
+	writeBuf += strconv.Itoa(len(jsonInfo))
+	writeBuf += "\r\n"
+	writeBuf += string(jsonInfo)
+	n, err := gm.NetConnection.Write([]byte(writeBuf))
+	if err != nil {
+		log.Logger.Error("GMConf write failed. gm ip:", gm.Ip, " port:", gm.Port, " err:", err.Error())
+		return err
+	}
+	if n != len(writeBuf) {
+		err = fmt.Errorf("repoter GMConf length not equal, buf size:%d,real send buf size:%d", len(writeBuf), n)
+		log.Logger.Errorf(err.Error())
+		return err
+	}
+	readBuf := make([]byte, 2)
+	n, err = gm.NetConnection.Read(readBuf)
+	if err != nil {
+		log.Logger.Error("GMConf read failed. gm ip:", gm.Ip, " port:", gm.Port, " err:", err.Error())
+		return err
+	}
+	if n != 2 || string(readBuf) != "OK" {
+		err = fmt.Errorf("GMConf read failed, return:%s, expect: OK", string(readBuf))
+		log.Logger.Errorf(err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/agent/monitor_agent.go b/dbm-services/common/dbha/ha-module/agent/monitor_agent.go
new file mode 100644
index 0000000000..7d6fca22d9
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/agent/monitor_agent.go
@@ -0,0 +1,448 @@
+package agent
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbmodule"
+	"dbm-services/common/dbha/ha-module/dbmodule/redis"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/monitor"
+	"dbm-services/common/dbha/ha-module/types"
+	"dbm-services/common/dbha/ha-module/util"
+	"fmt"
+	"net"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// MonitorAgent agent work struct
+type MonitorAgent struct {
+	City   string
+	Campus string
+	Type   string
+	// agent ip
+	MonIp            string
+	LastFetchInsTime time.Time
+	LastFetchGMTime  time.Time
+	DBInstance       map[string]dbutil.DataBaseDetect
+	GMInstance       map[string]*GMConnection
+	// config file
+	Conf *config.Config
+	// API client to access cmdb metadata
+	CmDBClient *client.CmDBClient
+	// API client to access hadb
+	HaDBClient *client.HaDBClient
+	heartbeat  time.Time
+}
+
+// NewMonitorAgent new a new agent do detect
+func NewMonitorAgent(conf *config.Config, dbType string) (*MonitorAgent, error) {
+	var err error
+	agent := &MonitorAgent{
+		City:             conf.AgentConf.City,
+		Campus:           conf.AgentConf.Campus,
+		Type:             dbType,
+		LastFetchInsTime: time.Now(),
+		LastFetchGMTime:  time.Now(),
+		GMInstance:       map[string]*GMConnection{},
+		heartbeat:        time.Now(),
+		Conf:             conf,
+	}
+	agent.CmDBClient, err = client.NewCmDBClient(&conf.DBConf.CMDB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+
+	agent.HaDBClient, err = client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+
+	agent.MonIp = util.LocalIp
+
+	// register agent into
+	err = agent.registerAgentInfoToHaDB()
+	if err != nil {
+		return nil, err
+	}
+
+	// fetch alive GMConf instance
+	err = agent.FetchGMInstance()
+	if err != nil {
+		return nil, err
+	}
+
+	err = agent.FetchDBInstance()
+	if err != nil {
+		return nil, err
+	}
+
+	return agent, nil
+}
+
+// Process parallel detect all instances periodic. Every round completed,
+// report agent's heartbeat info.
+func (a *MonitorAgent) Process(instances map[string]dbutil.DataBaseDetect) {
+	var wg sync.WaitGroup
+	log.Logger.Debugf("need to detect instances number:%d", len(a.DBInstance))
+	for _, ins := range instances {
+		wg.Add(1)
+		go func(ins dbutil.DataBaseDetect) {
+			defer wg.Done()
+			a.DoDetectSingle(ins)
+		}(ins)
+	}
+	wg.Wait()
+	a.DetectPostProcess()
+	time.Sleep(time.Second)
+}
+
+// Run agent main entry
+func (a *MonitorAgent) Run() error {
+	for {
+		a.RefreshInstanceCache()
+		a.RefreshGMCache()
+		a.Process(a.DBInstance)
+	}
+}
+
+// RefreshInstanceCache check whether needed to re-fetch instance, gm
+func (a *MonitorAgent) RefreshInstanceCache() {
+	if a.NeedRefreshInsCache() {
+		err := a.FetchDBInstance()
+		if err != nil {
+			log.Logger.Errorf("fetch %s instance failed. err:%s",
+				a.Type, err.Error())
+		}
+		a.flushInsFetchTime()
+	}
+}
+
+// DoDetectSingle do single instance detect
+func (a *MonitorAgent) DoDetectSingle(ins dbutil.DataBaseDetect) {
+	ip, port := ins.GetAddress()
+	log.Logger.Debugf("begin to detect instance:%s#%d", ip, port)
+	err := ins.Detection()
+	if err != nil {
+		log.Logger.Warnf("Detect db instance failed. ins:[%s:%d],dbType:%s status:%s,DeteckErr=%s",
+			ip, port, ins.GetType(), ins.GetStatus(), err.Error())
+	}
+
+	a.reportMonitor(ins, err)
+	if ins.NeedReporter() {
+		err = a.ReporterGM(ins)
+		if err != nil {
+			log.Logger.Errorf("reporter gm failed. err:%s", err.Error())
+		}
+
+		// reporter HADB
+		ip, port := ins.GetAddress()
+		err = a.HaDBClient.ReportDBStatus(a.MonIp, ip, port,
+			string(ins.GetType()), string(ins.GetStatus()))
+		if err != nil {
+			log.Logger.Errorf(
+				"reporter hadb instance status failed. err:%s, ip:%s, port:%d, db_type:%s, status:%s",
+				err.Error(), ip, port, ins.GetType(), ins.GetStatus())
+		}
+		ins.UpdateReporterTime()
+	}
+}
+
+// DetectPostProcess post agent heartbeat
+func (a *MonitorAgent) DetectPostProcess() {
+	err := a.reporterHeartbeat()
+	if err != nil {
+		log.Logger.Errorf("reporter heartbeat failed. err:%s", err.Error())
+	}
+	log.Logger.Infof("report agent heartbeat success.")
+}
+
+// RefreshGMCache refresh gm cache, delete expire gm
+func (a *MonitorAgent) RefreshGMCache() {
+	if a.NeedRefreshGMCache() {
+		if err := a.FetchGMInstance(); err != nil {
+			log.Logger.Errorf("fetch gm failed. err:%s", err.Error())
+		}
+		a.flushGmFetchTime()
+	}
+
+	for ip, ins := range a.GMInstance {
+		ins.Mutex.Lock()
+		anHour := time.Now().Add(-60 * time.Minute)
+		// connect leak?
+		if ins.LastFetchTime.Before(anHour) {
+			ins.IsClose = true
+			log.Logger.Infof("gm:%s de-cached", ip)
+			delete(a.GMInstance, ip)
+		}
+		ins.Mutex.Unlock()
+	}
+
+	// we not return error here, next refresh, new added gm maybe available.
+	if len(a.GMInstance) == 0 {
+		log.Logger.Errorf("after refresh, no gm available")
+	}
+}
+
+// FetchDBInstance fetch instance list by city info
+func (a *MonitorAgent) FetchDBInstance() error {
+	rawInfo, err := a.CmDBClient.GetDBInstanceInfoByCity(a.City)
+
+	if err != nil {
+		log.Logger.Errorf("get instance info from cmdb failed. err:%s", err.Error())
+		return err
+	}
+
+	log.Logger.Debugf("fetch db instance info len:%d", len(rawInfo))
+	cb, ok := dbmodule.DBCallbackMap[types.DBType(a.Type)]
+	if !ok {
+		err = fmt.Errorf("can't find fetch %s instance callback", a.Type)
+		log.Logger.Error(err.Error())
+		return err
+	}
+	AllDbInstance, err := cb.FetchDBCallback(rawInfo, a.Conf)
+	if err != nil {
+		log.Logger.Errorf("fetch db instance failed. err:%s", err.Error())
+		return err
+	}
+
+	err = a.FetchInstancePass(types.DBType(a.Type), AllDbInstance)
+	if err != nil {
+		log.Logger.Errorf("fetch db instance pass failed,err:%s", err.Error())
+		return err
+	}
+
+	a.DBInstance, err = a.moduloHashSharding(AllDbInstance)
+	if err != nil {
+		log.Logger.Errorf("fetch modulo hash sharding failed. err:%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// FetchGMInstance fetch appropriate gm for current agent(different city)
+func (a *MonitorAgent) FetchGMInstance() error {
+	gmInfo, err := a.HaDBClient.GetAliveGMInfo(a.Conf.AgentConf.FetchInterval)
+	if err != nil {
+		log.Logger.Errorf("get gm info failed. err:%s", err.Error())
+		return err
+	}
+
+	for _, info := range gmInfo {
+		if info.City == a.City || info.Cloud != a.Conf.AgentConf.Cloud {
+			continue
+		}
+		// needn't lock
+		_, ok := a.GMInstance[info.Ip]
+		if ok {
+			a.GMInstance[info.Ip].LastFetchTime = time.Now()
+		} else {
+			a.GMInstance[info.Ip] = &GMConnection{
+				Ip:            info.Ip,
+				Port:          info.Port,
+				LastFetchTime: time.Now(),
+				IsClose:       false,
+			}
+			err = a.GMInstance[info.Ip].Init()
+			if err != nil {
+				log.Logger.Errorf("init gm failed. gm_ip:%s, gm_port:%d, err:%s",
+					info.Ip, info.Port, err.Error())
+				return err
+			}
+		}
+	}
+	log.Logger.Infof("agent get aliveGmInfo:%d, GmInstance:%d",
+		len(gmInfo), len(a.GMInstance))
+	return nil
+}
+
+// FetchInstancePass get instance password from dbcofig center
+func (a *MonitorAgent) FetchInstancePass(dbType types.DBType,
+	insArr []dbutil.DataBaseDetect) error {
+	if len(insArr) == 0 {
+		return nil
+	}
+
+	count, err := redis.GetInstancePass(dbType, insArr, a.Conf)
+	if err != nil {
+		log.Logger.Errorf("AgentConf get passwd have some err[%s],count:%d,all:%d",
+			err.Error(), count, len(insArr))
+		return nil
+	} else {
+		return nil
+	}
+}
+
+// ReporterGM report detect info to gm
+func (a *MonitorAgent) ReporterGM(reporterInstance dbutil.DataBaseDetect) error {
+	if reporterInstance.GetStatus() == constvar.DBCheckSuccess ||
+		reporterInstance.GetStatus() == constvar.SSHCheckSuccess {
+		// if db is normal, needn't reporter gm
+		return nil
+	}
+	var err error
+	isReporter := false
+	ip, port := reporterInstance.GetAddress()
+
+	for _, gmIns := range a.GMInstance {
+		gmIns.Mutex.Lock()
+		if !gmIns.IsConnection {
+			gmIns.Mutex.Unlock()
+			continue
+		}
+		jsonInfo, err := reporterInstance.Serialization()
+		if err != nil {
+			gmIns.Mutex.Unlock()
+			log.Logger.Errorf("instance Serialization failed. err:%s", err.Error())
+			return err
+		}
+		err = gmIns.ReportInstance(string(reporterInstance.GetType()), jsonInfo)
+		if err != nil {
+			log.Logger.Warnf("reporter gm failed. gm_ip:%s, gm_port:%d, err:%s", ip, port, err.Error())
+			gmIns.IsConnection = false
+			err = a.RepairGM(gmIns)
+			if err != nil {
+				log.Logger.Errorf("Repair gm failed:%s", err.Error())
+				return err
+			}
+		} else {
+			isReporter = true
+			gmIns.Mutex.Unlock()
+			break
+		}
+		gmIns.Mutex.Unlock()
+	}
+
+	if !isReporter {
+		err = fmt.Errorf("all gm disconnect")
+		log.Logger.Error(err.Error())
+		return err
+	}
+	return nil
+}
+
+// NeedRefreshInsCache whether needed to refresh instance's cache
+func (a *MonitorAgent) NeedRefreshInsCache() bool {
+	return time.Now().After(a.LastFetchInsTime.Add(time.Second * time.Duration(a.Conf.AgentConf.FetchInterval)))
+}
+
+// NeedRefreshGMCache whether needed to refresh gm's cache
+func (a *MonitorAgent) NeedRefreshGMCache() bool {
+	return time.Now().After(a.LastFetchGMTime.Add(time.Second * time.Duration(a.Conf.AgentConf.FetchInterval)))
+}
+
+// flushInsFetchTime flush the instance time
+func (a *MonitorAgent) flushInsFetchTime() {
+	a.LastFetchInsTime = time.Now()
+}
+
+// flushGmFetchTime flush the gm time
+func (a *MonitorAgent) flushGmFetchTime() {
+	a.LastFetchGMTime = time.Now()
+}
+
+// RepairGM if conn break, do reconnect
+func (a *MonitorAgent) RepairGM(gmIns *GMConnection) error {
+	go func(gmIns *GMConnection) {
+		for {
+			gmIns.Mutex.Lock()
+			if gmIns.IsClose {
+				gmIns.Mutex.Unlock()
+				return
+			}
+			address := gmIns.Ip + ":" + strconv.Itoa(gmIns.Port)
+			conn, err := net.Dial("tcp", address)
+			if err != nil {
+				log.Logger.Warn(
+					"RepairGM: ip:", gmIns.Ip, " port:", gmIns.Port, " connect failed, err:", err.Error())
+			} else {
+				gmIns.NetConnection = conn
+				gmIns.IsConnection = true
+				log.Logger.Info("RepairGM: ip:", gmIns.Ip, " port:", gmIns.Port, " connect success.")
+				gmIns.Mutex.Unlock()
+				return
+			}
+			gmIns.Mutex.Unlock()
+			time.Sleep(10 * time.Second)
+		}
+	}(gmIns)
+	return nil
+}
+
+// registerAgentInfoToHaDB TODO
+// register current agent info
+func (a *MonitorAgent) registerAgentInfoToHaDB() error {
+	err := a.HaDBClient.RegisterDBHAInfo(
+		a.MonIp,
+		0,
+		"agent",
+		a.City,
+		a.Campus,
+		a.Type)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// moduloHashSharding rehash all instance into detect map, each ip
+// only detect the minimum port instance, other instances ignore.
+func (a *MonitorAgent) moduloHashSharding(allDbInstance []dbutil.DataBaseDetect) (map[string]dbutil.DataBaseDetect,
+	error) {
+	mod, modValue, err := a.HaDBClient.AgentGetHashValue(a.MonIp, a.Type, a.Conf.AgentConf.FetchInterval)
+	if err != nil {
+		log.Logger.Errorf("get Modulo failed and wait next refresh time. err:%s", err.Error())
+		return nil, err
+	}
+
+	result := make(map[string]dbutil.DataBaseDetect)
+	for _, rawIns := range allDbInstance {
+		rawIp, rawPort := rawIns.GetAddress()
+		if ins, ok := result[rawIp]; !ok {
+			if util.CRC32(rawIp)%mod == modValue {
+				result[rawIp] = rawIns
+			}
+		} else {
+			_, port := ins.GetAddress()
+			if rawPort < port {
+				result[rawIp] = ins
+			}
+		}
+	}
+	return result, nil
+}
+
+// reporterHeartbeat send heartbeat to hadb
+func (a *MonitorAgent) reporterHeartbeat() error {
+	interval := time.Now().Sub(a.heartbeat).Seconds()
+	err := a.HaDBClient.ReporterAgentHeartbeat(a.Type, int(interval))
+	a.heartbeat = time.Now()
+	return err
+}
+
+// reportMonitor report monitor
+func (a *MonitorAgent) reportMonitor(ins dbutil.DataBaseDetect, err error) {
+	var errInfo string
+	if err != nil {
+		errInfo = err.Error()
+	} else {
+		errInfo = "no err information"
+	}
+
+	switch ins.GetStatus() {
+	case constvar.SSHCheckFailed:
+		content := "agent detect failed by ssh check, err:" + errInfo
+		monitor.MonitorSendDetect(ins, constvar.DBHA_EVENT_DETECT_SSH, content)
+	case constvar.AUTHCheckFailed:
+		content := "agent detect failed by auth check, err:" + errInfo
+		monitor.MonitorSendDetect(ins, constvar.DBHA_EVENT_DETECT_AUTH, content)
+	case constvar.DBCheckFailed:
+		content := "agent detect failed by db check, err" + errInfo
+		monitor.MonitorSendDetect(ins, constvar.DBHA_EVENT_DETECT_DB, content)
+	default:
+		break
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/.helmignore b/dbm-services/common/dbha/ha-module/bk-dbha/.helmignore
new file mode 100644
index 0000000000..0e8a0eb36f
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/Chart.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/Chart.yaml
new file mode 100644
index 0000000000..a446d6fe9e
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/Chart.yaml
@@ -0,0 +1,24 @@
+apiVersion: v2
+name: bk-dbha
+description: A Helm chart for Kubernetes
+
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+# Versions are expected to follow Semantic Versioning (https://semver.org/)
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application. Versions are not expected to
+# follow Semantic Versioning. They should reflect the version the application is using.
+# It is recommended to use it with quotes.
+appVersion: "1.0.0"
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/NOTES.txt b/dbm-services/common/dbha/ha-module/bk-dbha/templates/NOTES.txt
new file mode 100644
index 0000000000..504886fc10
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/NOTES.txt
@@ -0,0 +1,22 @@
+1. Get the application URL by running these commands:
+{{- if .Values.ingress.enabled }}
+{{- range $host := .Values.ingress.hosts }}
+  {{- range .paths }}
+  http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
+  {{- end }}
+{{- end }}
+{{- else if contains "NodePort" .Values.service.type }}
+  export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "bk-dbha.fullname" . }})
+  export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
+  echo http://$NODE_IP:$NODE_PORT
+{{- else if contains "LoadBalancer" .Values.service.type }}
+     NOTE: It may take a few minutes for the LoadBalancer IP to be available.
+           You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "bk-dbha.fullname" . }}'
+  export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "bk-dbha.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
+  echo http://$SERVICE_IP:{{ .Values.service.port }}
+{{- else if contains "ClusterIP" .Values.service.type }}
+  export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "bk-dbha.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
+  export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
+  echo "Visit http://127.0.0.1:8080 to use your application"
+  kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
+{{- end }}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/_helpers.tpl b/dbm-services/common/dbha/ha-module/bk-dbha/templates/_helpers.tpl
new file mode 100644
index 0000000000..5e5d522de5
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/_helpers.tpl
@@ -0,0 +1,62 @@
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "bk-dbha.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "bk-dbha.fullname" -}}
+{{- if .Values.fullnameOverride }}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- $name := default .Chart.Name .Values.nameOverride }}
+{{- if contains $name .Release.Name }}
+{{- .Release.Name | trunc 63 | trimSuffix "-" }}
+{{- else }}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
+{{- end }}
+{{- end }}
+{{- end }}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "bk-dbha.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
+{{- end }}
+
+{{/*
+Common labels
+*/}}
+{{- define "bk-dbha.labels" -}}
+helm.sh/chart: {{ include "bk-dbha.chart" . }}
+{{ include "bk-dbha.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end }}
+
+{{/*
+Selector labels
+*/}}
+{{- define "bk-dbha.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "bk-dbha.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end }}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "bk-dbha.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create }}
+{{- default (include "bk-dbha.fullname" .) .Values.serviceAccount.name }}
+{{- else }}
+{{- default "default" .Values.serviceAccount.name }}
+{{- end }}
+{{- end }}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml
new file mode 100644
index 0000000000..8d620650ff
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/configmap.yaml
@@ -0,0 +1,76 @@
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: {{ include "bk-dbha.fullname" . }}
+  labels:
+    {{- include "bk-dbha.labels" . | nindent 4 }}
+data:
+  config.yaml: |-
+    agent_conf:
+      active_db_type: [
+        "tendbha:backend",
+        "tendbha:proxy",
+      ]
+      city: "3"
+      campus: "深圳"
+      fetch_interval: 60
+      reporter_interval: 120
+    gm_conf:
+      city: "4"
+      campus: "上海"
+      liston_port: 50000
+      GDM:
+        dup_expire: 600
+        scan_interval: 1
+      GMM:
+      GQA:
+        idc_cache_expire: 300
+        single_switch_idc: 50
+        single_switch_interval: 86400
+        single_switch_limit:  48
+        all_host_switch_limit:  150
+        all_switch_interval:  7200
+      GCM:
+        allowed_checksum_max_offset: 2
+        allowed_slave_delay_max: 600
+        allowed_time_delay_max: 300
+        exec_slow_kbytes: 0
+    db_conf:
+      hadb:
+        host: "hadb-api-host"
+        port: 8080
+        timeout: 30
+        bk_conf:
+          bk_app_code: "xxxx"
+          bk_app_secret: "xxxx"
+      cmdb:
+        host: "cmdb-api-host"
+        port: 80
+        timeout: 10
+        bk_conf:
+          bk_app_code: "xxxx"
+          bk_app_secret: "xxxx"
+      mysql: 
+        user: "mysql-conn-user"
+        pass: "mysql-conn-pass"
+        proxy_user: "proxy-conn-user"
+        proxy_pass: "proxy-conn-pass"
+        timeout: 10
+      redis:
+    dns:
+      bind_conf:
+        host: "bind-api-host"
+        port: 80
+        user: "xxxx"
+        pass: "xxxx"
+        timeout: 10
+        bk_conf:
+          bk_app_code: "xxxx"
+          bk_app_secret: "xxxx"
+      polaris_conf:
+    ssh:
+      port: 36000
+      user: "mysql"
+      pass: "mysql-user-pass"
+      dest: "agent"
+      timeout: 10
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/deployment.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/deployment.yaml
new file mode 100644
index 0000000000..632bd0ec2d
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/deployment.yaml
@@ -0,0 +1,75 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  name: {{ include "bk-dbha.fullname" . }}
+  labels:
+    {{- include "bk-dbha.labels" . | nindent 4 }}
+spec:
+  {{- if not .Values.autoscaling.enabled }}
+  replicas: {{ .Values.replicaCount }}
+  {{- end }}
+  selector:
+    matchLabels:
+      {{- include "bk-dbha.selectorLabels" . | nindent 6 }}
+  template:
+    metadata:
+      {{- with .Values.podAnnotations }}
+      annotations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      labels:
+        {{- include "bk-dbha.selectorLabels" . | nindent 8 }}
+    spec:
+      {{- with .Values.imagePullSecrets }}
+      imagePullSecrets:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      serviceAccountName: {{ include "bk-dbha.serviceAccountName" . }}
+      securityContext:
+        {{- toYaml .Values.podSecurityContext | nindent 8 }}
+      volumes:
+        -
+          name: config-volume
+          configMap:
+            name:  {{ include "bk-dbha.fullname" . }}
+            items:
+              -
+                key: config.yaml
+                path: config.yaml 
+                mode: 420
+            defaultMode: 272
+      containers:
+        #- command:
+        #  - bash 
+        #  - -c
+        #  - "sleep 3600"
+        - args:
+          - -config_file=/etc/config/config.yaml
+          - -type=gm
+          name: {{ .Chart.Name }}
+          securityContext:
+            {{- toYaml .Values.securityContext | nindent 12 }}
+          image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
+          imagePullPolicy: {{ .Values.image.pullPolicy }}
+          ports:
+            - name: http
+              containerPort: 80
+              protocol: TCP
+          resources:
+            {{- toYaml .Values.resources | nindent 12 }}
+          volumeMounts:
+            -
+              name: config-volume
+              mountPath: /etc/config
+      {{- with .Values.nodeSelector }}
+      nodeSelector:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.affinity }}
+      affinity:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
+      {{- with .Values.tolerations }}
+      tolerations:
+        {{- toYaml . | nindent 8 }}
+      {{- end }}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/hpa.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/hpa.yaml
new file mode 100644
index 0000000000..ff0789158a
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/hpa.yaml
@@ -0,0 +1,28 @@
+{{- if .Values.autoscaling.enabled }}
+apiVersion: autoscaling/v2beta1
+kind: HorizontalPodAutoscaler
+metadata:
+  name: {{ include "bk-dbha.fullname" . }}
+  labels:
+    {{- include "bk-dbha.labels" . | nindent 4 }}
+spec:
+  scaleTargetRef:
+    apiVersion: apps/v1
+    kind: Deployment
+    name: {{ include "bk-dbha.fullname" . }}
+  minReplicas: {{ .Values.autoscaling.minReplicas }}
+  maxReplicas: {{ .Values.autoscaling.maxReplicas }}
+  metrics:
+    {{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
+    - type: Resource
+      resource:
+        name: cpu
+        targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
+    {{- end }}
+    {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
+    - type: Resource
+      resource:
+        name: memory
+        targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
+    {{- end }}
+{{- end }}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/ingress.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/ingress.yaml
new file mode 100644
index 0000000000..cc8caa093f
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/ingress.yaml
@@ -0,0 +1,61 @@
+{{- if .Values.ingress.enabled -}}
+{{- $fullName := include "bk-dbha.fullname" . -}}
+{{- $svcPort := .Values.service.port -}}
+{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
+  {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
+  {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
+  {{- end }}
+{{- end }}
+{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1
+{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
+apiVersion: networking.k8s.io/v1beta1
+{{- else -}}
+apiVersion: extensions/v1beta1
+{{- end }}
+kind: Ingress
+metadata:
+  name: {{ $fullName }}
+  labels:
+    {{- include "bk-dbha.labels" . | nindent 4 }}
+  {{- with .Values.ingress.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+spec:
+  {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
+  ingressClassName: {{ .Values.ingress.className }}
+  {{- end }}
+  {{- if .Values.ingress.tls }}
+  tls:
+    {{- range .Values.ingress.tls }}
+    - hosts:
+        {{- range .hosts }}
+        - {{ . | quote }}
+        {{- end }}
+      secretName: {{ .secretName }}
+    {{- end }}
+  {{- end }}
+  rules:
+    {{- range .Values.ingress.hosts }}
+    - host: {{ .host | quote }}
+      http:
+        paths:
+          {{- range .paths }}
+          - path: {{ .path }}
+            {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
+            pathType: {{ .pathType }}
+            {{- end }}
+            backend:
+              {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
+              service:
+                name: {{ $fullName }}
+                port:
+                  number: {{ $svcPort }}
+              {{- else }}
+              serviceName: {{ $fullName }}
+              servicePort: {{ $svcPort }}
+              {{- end }}
+          {{- end }}
+    {{- end }}
+{{- end }}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/service.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/service.yaml
new file mode 100644
index 0000000000..1f8b72b12e
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: {{ include "bk-dbha.fullname" . }}
+  labels:
+    {{- include "bk-dbha.labels" . | nindent 4 }}
+spec:
+  type: {{ .Values.service.type }}
+  ports:
+    - port: {{ .Values.service.port }}
+      targetPort: http
+      protocol: TCP
+      name: http
+  selector:
+    {{- include "bk-dbha.selectorLabels" . | nindent 4 }}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/serviceaccount.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/serviceaccount.yaml
new file mode 100644
index 0000000000..13e4e4d57a
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/serviceaccount.yaml
@@ -0,0 +1,12 @@
+{{- if .Values.serviceAccount.create -}}
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: {{ include "bk-dbha.serviceAccountName" . }}
+  labels:
+    {{- include "bk-dbha.labels" . | nindent 4 }}
+  {{- with .Values.serviceAccount.annotations }}
+  annotations:
+    {{- toYaml . | nindent 4 }}
+  {{- end }}
+{{- end }}
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/templates/tests/test-connection.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/templates/tests/test-connection.yaml
new file mode 100644
index 0000000000..1c065c14f1
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/templates/tests/test-connection.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: "{{ include "bk-dbha.fullname" . }}-test-connection"
+  labels:
+    {{- include "bk-dbha.labels" . | nindent 4 }}
+  annotations:
+    "helm.sh/hook": test
+spec:
+  containers:
+    - name: wget
+      image: busybox
+      command: ['wget']
+      args: ['{{ include "bk-dbha.fullname" . }}:{{ .Values.service.port }}']
+  restartPolicy: Never
diff --git a/dbm-services/common/dbha/ha-module/bk-dbha/values.yaml b/dbm-services/common/dbha/ha-module/bk-dbha/values.yaml
new file mode 100644
index 0000000000..12c3277a22
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/bk-dbha/values.yaml
@@ -0,0 +1,82 @@
+# Default values for bk-dbha.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+replicaCount: 1
+
+image:
+  repository: mirrors.tencent.com/sccmsp/bkm-dbha
+  pullPolicy: IfNotPresent
+  # Overrides the image tag whose default is the chart appVersion.
+  tag: "1.0.1"
+
+imagePullSecrets: []
+nameOverride: "bk-dbha"
+fullnameOverride: "bk-dbha"
+
+serviceAccount:
+  # Specifies whether a service account should be created
+  create: true
+  # Annotations to add to the service account
+  annotations: {}
+  # The name of the service account to use.
+  # If not set and create is true, a name is generated using the fullname template
+  name: ""
+
+podAnnotations: {}
+
+podSecurityContext: {}
+  # fsGroup: 2000
+
+securityContext: {}
+  # capabilities:
+  #   drop:
+  #   - ALL
+  # readOnlyRootFilesystem: true
+  # runAsNonRoot: true
+  # runAsUser: 1000
+
+service:
+  type: ClusterIP
+  port: 80
+
+ingress:
+  enabled: false
+  className: ""
+  annotations: {}
+    # kubernetes.io/ingress.class: nginx
+    # kubernetes.io/tls-acme: "true"
+  hosts:
+    - host: chart-example.local
+      paths:
+        - path: /
+          pathType: ImplementationSpecific
+  tls: []
+  #  - secretName: chart-example-tls
+  #    hosts:
+  #      - chart-example.local
+
+resources: {}
+  # We usually recommend not to specify default resources and to leave this as a conscious
+  # choice for the user. This also increases chances charts run on environments with little
+  # resources, such as Minikube. If you do want to specify resources, uncomment the following
+  # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
+  # limits:
+  #   cpu: 100m
+  #   memory: 128Mi
+  # requests:
+  #   cpu: 100m
+  #   memory: 128Mi
+
+autoscaling:
+  enabled: false
+  minReplicas: 1
+  maxReplicas: 100
+  targetCPUUtilizationPercentage: 80
+  # targetMemoryUtilizationPercentage: 80
+
+nodeSelector: {}
+
+tolerations: []
+
+affinity: {}
diff --git a/dbm-services/common/dbha/ha-module/build.sh b/dbm-services/common/dbha/ha-module/build.sh
new file mode 100755
index 0000000000..2622eef8da
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/build.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+go build dbha.go
diff --git a/dbm-services/common/dbha/ha-module/client/client.go b/dbm-services/common/dbha/ha-module/client/client.go
new file mode 100644
index 0000000000..9b995ef437
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/client/client.go
@@ -0,0 +1,338 @@
+// Package client TODO
+package client
+
+import (
+	"bytes"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"strings"
+	"time"
+)
+
+const (
+	// apiServer response code
+	statusSuccess int = 0
+
+	// job execute user
+	jobExecuteUser = "mysql"
+)
+
+// HttpBodyParseCB TODO
+type HttpBodyParseCB func([]byte) (interface{}, error)
+
+// APIServerResponse response info from remote api server
+type APIServerResponse struct {
+	Code int             `json:"code"`
+	Msg  string          `json:"msg"`
+	Data json.RawMessage `json:"data"`
+}
+
+// Client use to request api server
+type Client struct {
+	Type       string
+	CloudId    int
+	Conf       *config.APIConfig
+	apiServers []string
+
+	// client for apiServers
+	client *http.Client
+}
+
+// SetHttpClient set http client info
+func (c *Client) SetHttpClient(h *http.Client) {
+	c.client = h
+}
+
+// SetApiServers set one or multi api server address
+func (c *Client) SetApiServers(s []string) {
+	for _, host := range s {
+		c.apiServers = append(c.apiServers, host)
+	}
+}
+
+// NewClientByAddrs init an new client to request api server
+func NewClientByAddrs(addrs []string, apiType string) (*Client, error) {
+	cli := &Client{}
+
+	cli.Type = apiType
+	for _, host := range addrs {
+		cli.apiServers = append(cli.apiServers, host)
+	}
+
+	cli.client = &http.Client{
+		Transport: &http.Transport{},
+	}
+
+	return cli, nil
+}
+
+// DoNew send http request and receive response
+func (c *Client) DoNew(method, url string, params interface{}, headers map[string]string) (*APIServerResponse, error) {
+	resp, err := c.DoNewForCB(method, url, params, headers, APIBodyParseCB)
+	if err != nil {
+		return nil, err
+	} else if resp == nil {
+		return nil, fmt.Errorf("url %s return nil response", url)
+	} else {
+		return resp.(*APIServerResponse), nil
+	}
+}
+
+// DoNewForCB process http body by callback, and support retry
+func (c *Client) DoNewForCB(
+	method, url string, params interface{}, headers map[string]string, bodyCB HttpBodyParseCB,
+) (interface{}, error) {
+	if headers == nil {
+		headers = map[string]string{}
+	}
+
+	var retryErr error
+	for retryIdx := 0; retryIdx < 5; retryIdx++ {
+		response, retryErr := c.doNewInner(method, url, params, headers, bodyCB)
+		if retryErr == nil {
+			return response, nil
+		}
+	}
+	return nil, retryErr
+}
+
+// APIBodyParseCB callback to parse api response body
+func APIBodyParseCB(b []byte) (interface{}, error) {
+	result := &APIServerResponse{}
+	err := json.Unmarshal(b, result)
+	if err != nil {
+		log.Logger.Errorf("unmarshall %s to %+v get an error:%s", string(b), *result, err.Error())
+		return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+	}
+
+	// check response and data is nil
+	if result.Code != statusSuccess {
+		log.Logger.Errorf("result.Code is %d not equal to %d,message:%s,data:%s",
+			result.Code, statusSuccess, result.Msg, string(result.Data))
+		if len(result.Data) != 0 {
+			return nil, fmt.Errorf("[%v - %v - %s]", result.Code, result.Msg, string(result.Data))
+		}
+		return nil, fmt.Errorf("%v - %v", result.Code, result.Msg)
+	}
+	return result, nil
+}
+
+// doNewInner TODO
+// execute request and handle response
+func (c *Client) doNewInner(method, url string, params interface{},
+	headers map[string]string, bodyCB HttpBodyParseCB) (interface{}, error) {
+	host, err := c.nextTarget()
+	if err != nil {
+		log.Logger.Errorf("nextTarget get an error:%s", err.Error())
+		return nil, fmt.Errorf("get target host failed, err: %+v", err)
+	}
+	log.Logger.Debugf("host:%s\n", host)
+
+	body, err := json.Marshal(params)
+	if err != nil {
+		log.Logger.Errorf("marshal %+v get an error:%s", params, err.Error())
+		return nil, fmt.Errorf("json marshal param failed, err: %+v", err)
+	}
+	req, err := http.NewRequest(method, host+url, bytes.NewReader(body))
+
+	if err != nil {
+		log.Logger.Errorf("create a new request(%s,%s,%+v) get an error:%s", method, host+url, params, err.Error())
+		return nil, fmt.Errorf("new request failed, err: %+v", err)
+	}
+
+	// TODO set auth...
+	c.setHeader(req, headers)
+
+	dump, _ := httputil.DumpRequest(req, true)
+	log.Logger.Debugf("begin http request: %s", dump)
+
+	resp, err := c.client.Do(req)
+	if err != nil {
+		log.Logger.Errorf("invoking http request failed, url: %s, error:%s", req.URL.String(), err.Error())
+		return nil, fmt.Errorf("do http request failed, err: %+v", err)
+	}
+	defer func() {
+		if resp == nil {
+			return
+		}
+		if err := resp.Body.Close(); err != nil {
+			log.Logger.Errorf("close response body failed, err:%s", err.Error())
+		}
+	}()
+
+	// 目前出现偶现网关超时问题,重试一次看是否时间段内必现
+	for i := 1; i <= 5; i++ {
+		// 500 可能正在发布
+		// 429 可能大并发量偶现超频
+		// 504 具体原因未知,先重试
+		if !util.HasElem(resp.StatusCode, []int{http.StatusInternalServerError, http.StatusTooManyRequests,
+			http.StatusGatewayTimeout}) {
+			break
+		}
+
+		wait := i*i*1000 + rand.Intn(1000)
+		time.Sleep(time.Duration(wait) * time.Millisecond)
+		log.Logger.Warnf("client.Do result with %s, wait %d milliSeconds and retry, url: %s",
+			resp.Status, wait, req.URL.String())
+		resp, err = c.client.Do(req)
+		if err != nil {
+			log.Logger.Errorf("an error occur while invoking client.Do, url: %s, error:%s",
+				req.URL.String(), err.Error())
+			return nil, fmt.Errorf("do http request failed, err: %+v", err)
+		}
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		bodyBytes, err := httputil.DumpResponse(resp, true)
+		if err != nil {
+			log.Logger.Errorf("read resp.body failed, err: %s", err.Error())
+			return nil, fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+		}
+		log.Logger.Debugf("http response: %s", string(bodyBytes))
+		return nil, fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+	}
+
+	b, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		err = fmt.Errorf("read resp.body error:%s", err.Error())
+		log.Logger.Error(err.Error())
+		return nil, err
+	}
+
+	result, err := bodyCB(b)
+	if err != nil {
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	return result, nil
+}
+
+// Do request main enter
+func (c *Client) Do(method, url string, params interface{}) (*APIServerResponse, error) {
+	return c.DoNew(method, url, params, map[string]string{})
+}
+
+// nextTarget TODO
+// random get an api server to request
+func (c *Client) nextTarget() (string, error) {
+	rand.Seed(time.Now().UnixNano())
+	startPos := rand.Intn(len(c.apiServers))
+	pos := startPos
+	for {
+		gotHost := c.apiServers[pos]
+		u, err := url.Parse(gotHost)
+		if err != nil {
+			if pos = (pos + 1) % len(c.apiServers); pos == startPos {
+				return "", fmt.Errorf("all hosts are down, uptime tests are failing. err:%s", err.Error())
+			}
+			continue
+		}
+		if util.HostCheck(u.Host) {
+			return gotHost, nil
+		}
+		log.Logger.Errorf("host %s is down", gotHost)
+		if pos = (pos + 1) % len(c.apiServers); pos == startPos {
+			return "", fmt.Errorf("all hosts are down, uptime tests are failing")
+		}
+	}
+}
+
+func (c *Client) setHeader(req *http.Request, others map[string]string) {
+	user := jobExecuteUser
+	if _, ok := others["user"]; ok {
+		user = strings.TrimSpace(others["user"])
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+	req.Header.Set("user", user)
+
+	if auth, ok := others[constvar.BkApiAuthorization]; ok {
+		req.Header.Set(constvar.BkApiAuthorization, auth)
+	}
+	// // Set JWT token
+	// if token, err := auth.Sign(user); err == nil {
+	//  req.Header.Set("Authorization", "Bearer "+token)
+	// }
+}
+
+// ConvertParamForGetRequest convert param for GET request
+// Encode encodes the values into “URL encoded” form
+// ("bar=baz&foo=qux") sorted by key.
+func (c *Client) ConvertParamForGetRequest(rawParam map[string]string) string {
+	values := url.Values{}
+	for k, v := range rawParam {
+		values.Add(k, v)
+	}
+	param := values.Encode()
+
+	return param
+}
+
+// SpliceUrlByPrefix assemble url
+func (c *Client) SpliceUrlByPrefix(prefix string, name string, param string) string {
+	var prefixUrl string
+	pu, err := url.Parse(prefix)
+	if err != nil {
+		log.Logger.Errorf("parse prefix url is invalid, err:%s", err.Error())
+		return "/"
+	}
+	prefixUrl = pu.String()
+
+	nu, err := url.Parse(name)
+	if err != nil {
+		log.Logger.Errorf("parse name url is invalid, err:%s", err.Error())
+		return "/"
+	}
+	nameUrl := nu.String()
+
+	if param == "" {
+		return prefixUrl + "/" + nameUrl
+	} else {
+		return prefixUrl + "/" + nameUrl + "?" + param
+	}
+}
+
+// SpliceUrl assemble url by param
+func (c *Client) SpliceUrl(name string, param string) string {
+	nu, err := url.Parse(name)
+	if err != nil {
+		log.Logger.Errorf("parse name url is invalid, err:%s", err.Error())
+		return ""
+	}
+	nameUrl := nu.String()
+	if param == "" {
+		return "/" + nameUrl
+	} else {
+		return "/" + nameUrl + "?" + param
+	}
+}
+
+// NewAPIClient create new api http client
+func NewAPIClient(c *config.APIConfig, apiType string, cloudId int) (Client, error) {
+	cli := Client{
+		Type:    apiType,
+		CloudId: cloudId,
+		Conf:    c,
+	}
+
+	cli.SetHttpClient(&http.Client{
+		Transport: &http.Transport{},
+		Timeout:   time.Second * time.Duration(c.Timeout),
+	})
+
+	// use http request at present
+	cli.SetApiServers([]string{
+		fmt.Sprintf("http://%s:%d", c.Host, c.Port),
+	})
+
+	return cli, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/client/cmdb.go b/dbm-services/common/dbha/ha-module/client/cmdb.go
new file mode 100644
index 0000000000..d220a7a248
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/client/cmdb.go
@@ -0,0 +1,208 @@
+package client
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strconv"
+)
+
+// CmDBClient client to request cmdb
+type CmDBClient struct {
+	Client
+}
+
+// NewCmDBClient init an new cmdb client to request
+func NewCmDBClient(conf *config.APIConfig, cloudId int) (*CmDBClient, error) {
+	c, err := NewAPIClient(conf, constvar.CmDBName, cloudId)
+	return &CmDBClient{c}, err
+}
+
+// GetDBInstanceInfoByIp fetch instance info from cmdb by ip
+func (c *CmDBClient) GetDBInstanceInfoByIp(ip string) ([]interface{}, error) {
+	var res []interface{}
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"addresses":      []string{ip},
+	}
+
+	response, err := c.DoNew(
+		http.MethodPost, c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CmDBInstanceUrl, ""), req, nil)
+	if err != nil {
+		return nil, err
+	}
+	if response.Code != 0 {
+		return nil, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		return nil, err
+	}
+	return res, nil
+}
+
+// GetDBInstanceInfoByCity detect running, available status instance
+func (c *CmDBClient) GetDBInstanceInfoByCity(area string) ([]interface{}, error) {
+	areaId, err := strconv.Atoi(area)
+	if err != nil {
+		log.Logger.Errorf("city is invalid, city:%s", area)
+		return nil, err
+	}
+
+	req := map[string]interface{}{
+		"db_cloud_token":   c.Conf.BKConf.BkToken,
+		"bk_cloud_id":      c.CloudId,
+		"logical_city_ids": []int{areaId},
+		"statuses":         []string{constvar.RUNNING, constvar.AVAILABLE},
+	}
+
+	response, err := c.DoNew(
+		http.MethodPost, c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CmDBInstanceUrl, ""), req, nil)
+	if err != nil {
+		return nil, err
+	}
+	if response.Code != 0 {
+		return nil, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+
+	var res []interface{}
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+// SwapMySQLRole swap mysql master and slave's cmdb info
+func (c *CmDBClient) SwapMySQLRole(masterIp string, masterPort int, slaveIp string, slavePort int) error {
+	payloads := []map[string]interface{}{
+		{
+			"instance1": map[string]interface{}{
+				"ip":   masterIp,
+				"port": masterPort,
+			},
+			"instance2": map[string]interface{}{
+				"ip":   slaveIp,
+				"port": slavePort,
+			},
+		},
+	}
+
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"payloads":       payloads,
+	}
+	log.Logger.Debugf("SwapMySQLRole param:%v", req)
+
+	response, err := c.DoNew(
+		http.MethodPost, c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CmDBSwapRoleUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	return nil
+}
+
+// SwapRedisRole swap redis master and slave's role info
+func (c *CmDBClient) SwapRedisRole(domain string, masterIp string,
+	masterPort int, slaveIp string, slavePort int) error {
+	payload := map[string]interface{}{
+		"master": map[string]interface{}{
+			"ip":   masterIp,
+			"port": masterPort,
+		},
+		"slave": map[string]interface{}{
+			"ip":   slaveIp,
+			"port": slavePort,
+		},
+		"domain": domain,
+	}
+
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"payload":        payload,
+	}
+
+	log.Logger.Debugf("SwapRedisRole param:%v", req)
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CmDBRedisSwapUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed,return code:%d,msg:%s",
+			util.AtWhere(), response.Code, response.Msg)
+	}
+	return nil
+}
+
+// UpdateDBStatus update instance's status
+func (c *CmDBClient) UpdateDBStatus(ip string, port int, status string) error {
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"payloads": []map[string]interface{}{
+			{
+				"ip":     ip,
+				"port":   port,
+				"status": status,
+			},
+		},
+	}
+
+	log.Logger.Debugf("UpdateDBStatus param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CmDBUpdateStatusUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	return nil
+}
+
+// GetEntryDetail get cluster's entry(domain) info
+func (c *CmDBClient) GetEntryDetail(
+	cluster string,
+) (map[string]interface{}, error) {
+	res := make(map[string]interface{})
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"domains":        []string{cluster},
+	}
+
+	log.Logger.Debugf("GetEntryDetail param:%v", req)
+	response, err := c.DoNew(
+		http.MethodPost, c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CmDBEntryDetailUrl, ""), req, nil,
+	)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if response.Code != 0 {
+		cmdbErr := fmt.Errorf("%s failed, return code:%d, msg:%s",
+			util.AtWhere(), response.Code, response.Msg)
+		return nil, cmdbErr
+	}
+
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/client/hadb.go b/dbm-services/common/dbha/ha-module/client/hadb.go
new file mode 100644
index 0000000000..14204f599d
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/client/hadb.go
@@ -0,0 +1,860 @@
+package client
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strconv"
+	"time"
+)
+
+// HaDBClient client use to request hadb api
+type HaDBClient struct {
+	Client
+}
+
+// CommonApiResponse common api response struct
+type CommonApiResponse struct {
+	Code int             `json:"code"`
+	Msg  string          `json:"msg"`
+	Data json.RawMessage `json:"data"`
+}
+
+// GMInfo gm base info, use to report
+type GMInfo struct {
+	Ip    string `json:"ip"`
+	Port  int    `json:"port"`
+	City  string `json:"city"`
+	Cloud string `json:"cloud"`
+}
+
+// HaStatus api for ha_status table
+type HaStatus struct {
+	Uid            uint       `json:"uid,omitempty"`
+	IP             string     `json:"ip,omitempty"`
+	Port           int        `json:"port,omitempty"`
+	Module         string     `json:"module,omitempty"`
+	City           string     `json:"city,omitempty"`
+	Campus         string     `json:"campus,omitempty"`
+	Cloud          string     `json:"cloud,omitempty"`
+	DbType         string     `json:"db_type,omitempty"`
+	StartTime      *time.Time `json:"start_time,omitempty"`
+	LastTime       *time.Time `json:"last_time,omitempty"`
+	Status         string     `json:"status,omitempty"`
+	TakeOverGm     string     `json:"take_over_gm,omitempty"`
+	ReportInterval int        `json:"report_interval,omitempty"`
+}
+
+// HaStatusRequest TODO
+type HaStatusRequest struct {
+	DBCloudToken string    `json:"db_cloud_token"`
+	BKCloudID    int       `json:"bk_cloud_id"`
+	Name         string    `json:"name"`
+	QueryArgs    *HaStatus `json:"query_args,omitempty"`
+	SetArgs      *HaStatus `json:"set_args,omitempty"`
+}
+
+// HaStatusResponse TODO
+type HaStatusResponse struct {
+	RowsAffected int `json:"rowsAffected"`
+}
+
+// DbStatus api for db_status table
+type DbStatus struct {
+	Uid      uint       `json:"uid,omitempty"`
+	AgentIP  string     `json:"agent_ip,omitempty"`
+	IP       string     `json:"ip,omitempty"`
+	Port     int        `json:"port,omitempty"`
+	DbType   string     `json:"db_type,omitempty"`
+	Status   string     `json:"status,omitempty"`
+	Cloud    string     `json:"cloud,omitempty"`
+	LastTime *time.Time `json:"last_time,omitempty"`
+}
+
+// DbStatusRequest TODO
+type DbStatusRequest struct {
+	DBCloudToken string    `json:"db_cloud_token"`
+	BKCloudID    int       `json:"bk_cloud_id"`
+	Name         string    `json:"name"`
+	QueryArgs    *DbStatus `json:"query_args,omitempty"`
+	SetArgs      *DbStatus `json:"set_args,omitempty"`
+}
+
+// DbStatusResponse TODO
+type DbStatusResponse struct {
+	RowsAffected int `json:"rowsAffected"`
+	Uid          int `json:"uid"`
+}
+
+// SwitchQueue api for tb_mon_switch_queue table
+type SwitchQueue struct {
+	Uid                uint       `json:"uid,omitempty"`
+	IP                 string     `json:"ip,omitempty"`
+	Port               int        `json:"port,omitempty"`
+	ConfirmCheckTime   *time.Time `json:"confirm_check_time,omitempty"`
+	DbRole             string     `json:"db_role,omitempty"`
+	SlaveIP            string     `json:"slave_ip,omitempty"`
+	SlavePort          int        `json:"slave_port,omitempty"`
+	Status             string     `json:"status,omitempty"`
+	ConfirmResult      string     `json:"confirm_result,omitempty"`
+	SwitchStartTime    *time.Time `json:"switch_start_time,omitempty"`
+	SwitchFinishedTime *time.Time `json:"switch_finished_time,omitempty"`
+	SwitchResult       string     `json:"switch_result,omitempty"`
+	Remark             string     `json:"remark,omitempty"`
+	App                string     `json:"app,omitempty"`
+	DbType             string     `json:"db_type,omitempty"`
+	Idc                string     `json:"idc,omitempty"`
+	Cloud              string     `json:"cloud,omitempty"`
+	Cluster            string     `json:"cluster,omitempty"`
+}
+
+// SwitchQueueRequest TODO
+type SwitchQueueRequest struct {
+	DBCloudToken string       `json:"db_cloud_token"`
+	BKCloudID    int          `json:"bk_cloud_id"`
+	Name         string       `json:"name"`
+	QueryArgs    *SwitchQueue `json:"query_args,omitempty"`
+	SetArgs      *SwitchQueue `json:"set_args,omitempty"`
+}
+
+// SwitchQueueResponse TODO
+type SwitchQueueResponse struct {
+	RowsAffected int  `json:"rowsAffected"`
+	Uid          uint `json:"uid"`
+}
+
+// HaLogs api for ha_logs table
+type HaLogs struct {
+	Uid      uint       `json:"uid,omitempty"`
+	IP       string     `json:"ip,omitempty"`
+	Port     int        `json:"port,omitempty"`
+	MonIP    string     `json:"mon_ip,omitempty"`
+	Module   string     `json:"module,omitempty"`
+	Cloud    string     `json:"cloud,omitempty"`
+	DateTime *time.Time `json:"date_time,omitempty"`
+	Comment  string     `json:"comment,omitempty"`
+}
+
+// HaLogsRequest TODO
+type HaLogsRequest struct {
+	DBCloudToken string  `json:"db_cloud_token"`
+	BKCloudID    int     `json:"bk_cloud_id"`
+	Name         string  `json:"name"`
+	QueryArgs    *HaLogs `json:"query_args,omitempty"`
+	SetArgs      *HaLogs `json:"set_args,omitempty"`
+}
+
+// HaLogsResponse TODO
+type HaLogsResponse struct {
+	RowsAffected int `json:"rowsAffected"`
+}
+
+// SwitchLogs api for switch_logs table
+type SwitchLogs struct {
+	UID      uint       `json:"uid,omitempty"`
+	SwitchID uint       `json:"sw_id,omitempty"`
+	IP       string     `json:"ip,omitempty"`
+	Result   string     `json:"result,omitempty"`
+	Datetime *time.Time `json:"datetime,omitempty"`
+	Comment  string     `json:"comment,omitempty"`
+	Port     int        `json:"port,omitempty"`
+}
+
+// SwitchLogRequest TODO
+type SwitchLogRequest struct {
+	DBCloudToken string      `json:"db_cloud_token"`
+	BKCloudID    int         `json:"bk_cloud_id"`
+	Name         string      `json:"name"`
+	QueryArgs    *SwitchLogs `json:"query_args,omitempty"`
+	SetArgs      *SwitchLogs `json:"set_args,omitempty"`
+}
+
+// SwitchLogResponse TODO
+type SwitchLogResponse struct {
+	RowsAffected int `json:"rowsAffected"`
+}
+
+// AgentIp agent ip info
+type AgentIp struct {
+	Ip string `json:"ip"`
+}
+
+// NewHaDBClient init hadb client object
+func NewHaDBClient(conf *config.APIConfig, cloudId int) (*HaDBClient, error) {
+	c, err := NewAPIClient(conf, constvar.HaDBName, cloudId)
+	return &HaDBClient{c}, err
+}
+
+// AgentGetGMInfo get gm info from hadb
+func (c *HaDBClient) AgentGetGMInfo() ([]GMInfo, error) {
+	req := HaStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.AgentGetGMInfo,
+		QueryArgs: &HaStatus{
+			Module: "gm",
+			Cloud:  strconv.Itoa(c.CloudId),
+		},
+	}
+
+	log.Logger.Debugf("AgentGetGMInfo param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.HaStatusUrl, ""), req, nil)
+	if err != nil {
+		return nil, err
+	}
+	if response.Code != 0 {
+		return nil, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	var result []GMInfo
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	if len(result) == 0 {
+		return nil, fmt.Errorf("no gm available")
+	}
+	return result, nil
+}
+
+// ReportDBStatus report detected instance's status
+func (c *HaDBClient) ReportDBStatus(
+	agentIp string, ip string, port int, dbType string, status string,
+) error {
+	var result DbStatusResponse
+	currentTime := time.Now()
+
+	updateReq := DbStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.UpdateInstanceStatus,
+		QueryArgs: &DbStatus{
+			AgentIP: agentIp,
+			IP:      ip,
+			Port:    port,
+		},
+		SetArgs: &DbStatus{
+			DbType:   dbType,
+			Status:   status,
+			Cloud:    strconv.Itoa(c.CloudId),
+			LastTime: ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("update instance status param:%v", updateReq)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.DbStatusUrl, ""), updateReq, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return err
+	}
+	if result.RowsAffected == 1 {
+		return nil
+	}
+	if result.RowsAffected > 1 {
+		log.Logger.Fatalf("bug: update instance status affect rows %d", result.RowsAffected)
+	}
+
+	insertReq := DbStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.InsertInstanceStatus,
+		SetArgs: &DbStatus{
+			AgentIP:  agentIp,
+			IP:       ip,
+			Port:     port,
+			DbType:   dbType,
+			Status:   status,
+			Cloud:    strconv.Itoa(c.CloudId),
+			LastTime: ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("insert instance status param:%v", updateReq)
+
+	response, err = c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.DbStatusUrl, ""), insertReq, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// ReportHaLog report ha logs
+func (c *HaDBClient) ReportHaLog(ip string, port int, module string, comment string) {
+	var result HaLogsRequest
+	log.Logger.Infof("reporter log. ip:%s, port:%d, module:%s, comment:%s",
+		ip, port, module, comment)
+
+	req := HaLogsRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.ReporterHALog,
+		SetArgs: &HaLogs{
+			IP:      ip,
+			Port:    port,
+			MonIP:   util.LocalIp,
+			Module:  module,
+			Cloud:   strconv.Itoa(c.CloudId),
+			Comment: comment,
+		},
+	}
+
+	log.Logger.Debugf("ReportHaLog param:%#v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.HaLogsUrl, ""), req, nil)
+	if err != nil {
+		log.Logger.Errorf("reporter log failed. err:%s", err.Error())
+		return
+	}
+	if response.Code != 0 {
+		err = fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+		log.Logger.Errorf("reporter log failed. err:%s", err.Error())
+		return
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		log.Logger.Errorf("reporter log failed. err:%s", err.Error())
+	}
+}
+
+// RegisterDBHAInfo register agent info to ha_status table
+func (c *HaDBClient) RegisterDBHAInfo(
+	ip string, port int, module string, city string, campus string, dbType string,
+) error {
+	var result HaStatusResponse
+
+	req := HaStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.RegisterDBHAInfo,
+		QueryArgs: &HaStatus{
+			IP:     ip,
+			Module: module,
+			DbType: dbType,
+		},
+		SetArgs: &HaStatus{
+			IP:     ip,
+			Port:   port,
+			Module: module,
+			City:   city,
+			Campus: campus,
+			Cloud:  strconv.Itoa(c.CloudId),
+			DbType: dbType,
+			Status: constvar.RUNNING,
+		},
+	}
+
+	log.Logger.Debugf("RegisterDBHAInfo param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.HaStatusUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// GetAliveAgentInfo fetch alive agent info from ha_status table
+func (c *HaDBClient) GetAliveAgentInfo(ip string, dbType string, interval int) ([]string, error) {
+	var result []string
+
+	currentTime := time.Now().Add(-time.Second * time.Duration(interval))
+	req := HaStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.GetAliveAgentInfo,
+		QueryArgs: &HaStatus{
+			IP:       ip,
+			DbType:   dbType,
+			Module:   constvar.Agent,
+			Status:   constvar.RUNNING,
+			Cloud:    strconv.Itoa(c.CloudId),
+			LastTime: ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("GetAliveAgentInfo param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.HaStatusUrl, ""), req, nil)
+	if err != nil {
+		return nil, err
+	}
+	if response.Code != 0 {
+		return nil, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// GetAliveGMInfo get alive gm instance from ha_status table
+func (c *HaDBClient) GetAliveGMInfo(interval int) ([]GMInfo, error) {
+	currentTime := time.Now().Add(-time.Second * time.Duration(interval))
+	req := HaStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.GetAliveGMInfo,
+		QueryArgs: &HaStatus{
+			Module:   constvar.GM,
+			Cloud:    strconv.Itoa(c.CloudId),
+			LastTime: ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("GetAliveGMInfo param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.HaStatusUrl, ""), req, nil)
+	if err != nil {
+		log.Logger.Errorf("GetAliveGMInfo failed, do http fail,err:%s", err.Error())
+		return nil, err
+	}
+	if response.Code != 0 {
+		return nil, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+
+	result := make([]GMInfo, 0)
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		log.Logger.Errorf("GetAliveGMInfo failed, unmarshal failed, err:%s, data:%s", err.Error(), response.Data)
+		return nil, err
+	}
+	if len(result) == 0 {
+		return nil, fmt.Errorf("no gm available")
+	}
+	return result, nil
+}
+
+// ReporterAgentHeartbeat report agent heartbeat to ha_status table
+func (c *HaDBClient) ReporterAgentHeartbeat(dbType string, interval int) error {
+	var result HaStatusResponse
+
+	currentTime := time.Now()
+	req := HaStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.ReporterAgentHeartbeat,
+		QueryArgs: &HaStatus{
+			IP:     util.LocalIp,
+			DbType: dbType,
+		},
+		SetArgs: &HaStatus{
+			ReportInterval: interval,
+			LastTime:       ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("ReporterAgentHeartbeat param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.HaStatusUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// ReporterGMHeartbeat report gm heartbeat to ha_status
+func (c *HaDBClient) ReporterGMHeartbeat(module string, interval int) error {
+	var result HaStatusResponse
+
+	currentTime := time.Now()
+	req := HaStatusRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.ReporterGMHeartbeat,
+		QueryArgs: &HaStatus{
+			IP:     util.LocalIp,
+			Module: module,
+		},
+		SetArgs: &HaStatus{
+			ReportInterval: interval,
+			LastTime:       ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("ReporterGMHeartbeat param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.HaStatusUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// QuerySingleTotal check same instance's switch number in a given time period
+func (c *HaDBClient) QuerySingleTotal(ip string, port int, interval int) (int, error) {
+	var result struct {
+		Count int `json:"count"`
+	}
+	confirmTime := time.Now().Add(-time.Second * time.Duration(interval))
+	req := SwitchQueueRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.QuerySingleTotal,
+		QueryArgs: &SwitchQueue{
+			IP:               ip,
+			Port:             port,
+			ConfirmCheckTime: &confirmTime,
+		},
+	}
+
+	log.Logger.Debugf("QuerySingleTotal param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.SwitchQueueUrl, ""), req, nil)
+	if err != nil {
+		return 0, err
+	}
+	if response.Code != 0 {
+		return 0, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return 0, err
+	}
+	return result.Count, nil
+}
+
+// QueryIntervalTotal get total switch number in a given time period
+func (c *HaDBClient) QueryIntervalTotal(interval int) (int, error) {
+	var result struct {
+		Count int `json:"count"`
+	}
+
+	confirmTime := time.Now().Add(-time.Second * time.Duration(interval))
+	req := SwitchQueueRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.QueryIntervalTotal,
+		QueryArgs: &SwitchQueue{
+			ConfirmCheckTime: &confirmTime,
+		},
+	}
+
+	log.Logger.Debugf("QueryIntervalTotal param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.SwitchQueueUrl, ""), req, nil)
+	if err != nil {
+		return 0, err
+	}
+	if response.Code != 0 {
+		return 0, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return 0, err
+	}
+	return result.Count, nil
+}
+
+// QuerySingleIDC get current idc total switch number in a given time period
+func (c *HaDBClient) QuerySingleIDC(ip string, idc string) (int, error) {
+	var result struct {
+		Count int `json:"count"`
+	}
+
+	confirmTime := time.Now().Add(-time.Minute)
+	req := SwitchQueueRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.QuerySingleIDC,
+		QueryArgs: &SwitchQueue{
+			IP:               ip,
+			Idc:              idc,
+			ConfirmCheckTime: &confirmTime,
+		},
+	}
+
+	log.Logger.Debugf("QuerySingleIDC param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.SwitchQueueUrl, ""), req, nil)
+	if err != nil {
+		return 0, err
+	}
+	if response.Code != 0 {
+		return 0, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return 0, err
+	}
+	return result.Count, nil
+}
+
+// UpdateTimeDelay update time delay for delay switch
+func (c *HaDBClient) UpdateTimeDelay(ip string, port int, app string) error {
+	var result struct {
+		RowsNum int `json:"rowsAffected"`
+	}
+
+	req := SwitchQueueRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.QuerySingleIDC,
+		QueryArgs: &SwitchQueue{
+			IP:   ip,
+			Port: port,
+			App:  app,
+		},
+	}
+
+	log.Logger.Debugf("UpadteTimeDelay param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost, c.SpliceUrl(constvar.UpdateTimeDelay, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return err
+	}
+	// if result.RowsNum != 1 {
+	// 	log.Logger.Fatalf("bug: ReporterAgentHeartbeat affect rows %d", result.RowsNum)
+	// }
+	return nil
+}
+
+// InsertSwitchQueue insert pre-switch instance to switch queue
+func (c *HaDBClient) InsertSwitchQueue(ip string, port int, idc string, confirmCheckTime time.Time,
+	app string, dbType string, cluster string) (uint, error) {
+	var result SwitchQueueResponse
+
+	currentTime := time.Now()
+	req := SwitchQueueRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.InsertSwitchQueue,
+		SetArgs: &SwitchQueue{
+			IP:               ip,
+			Port:             port,
+			Idc:              idc,
+			App:              app,
+			ConfirmCheckTime: &confirmCheckTime,
+			DbType:           dbType,
+			Cloud:            strconv.Itoa(c.CloudId),
+			Cluster:          cluster,
+			SwitchStartTime:  ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("InsertSwitchQueue param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.SwitchQueueUrl, ""), req, nil)
+	if err != nil {
+		return 0, err
+	}
+	if response.Code != 0 {
+		return 0, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return 0, err
+	}
+	return result.Uid, err
+}
+
+// QuerySlaveCheckConfig query slave check configure from hadb
+// whether to do checksum, whether omit delay
+func (c *HaDBClient) QuerySlaveCheckConfig(ip string, port int, app string) (int, int, error) {
+	var result struct {
+		DoChecksum  int `json:"do_checksum"`
+		DoTimeDelay int `json:"do_timedelay"`
+	}
+
+	req := c.ConvertParamForGetRequest(map[string]string{
+		"ip":   ip,
+		"port": strconv.Itoa(port),
+		"app":  app,
+	})
+
+	log.Logger.Debugf("QuerySlaveCheckConfig param:%v", req)
+
+	response, err := c.DoNew(http.MethodGet, c.SpliceUrl(constvar.QuerySlaveCheckConfig, req), nil, nil)
+	if err != nil {
+		return 0, 0, err
+	}
+	if response.Code != 0 {
+		return 0, 0, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return 0, 0, err
+	}
+	return result.DoChecksum, result.DoTimeDelay, err
+}
+
+// UpdateSwitchQueue TODO
+func (c *HaDBClient) UpdateSwitchQueue(uid uint, ip string, port int, status string,
+	slaveIp string, slavePort int, confirmResult string, switchResult string, dbRole string) error {
+	var result SwitchQueueResponse
+
+	currentTime := time.Now()
+	req := SwitchQueueRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.UpdateSwitchQueue,
+		QueryArgs: &SwitchQueue{
+			Uid: uid,
+		},
+		SetArgs: &SwitchQueue{
+			IP:                 ip,
+			Port:               port,
+			Status:             status,
+			ConfirmResult:      confirmResult,
+			SwitchResult:       switchResult,
+			DbRole:             dbRole,
+			SlaveIP:            slaveIp,
+			SlavePort:          slavePort,
+			SwitchFinishedTime: ¤tTime,
+		},
+	}
+
+	log.Logger.Debugf("UpdateSwitchQueue param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.SwitchQueueUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &result)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// InsertSwitchLog insert switch log to hadb
+func (c *HaDBClient) InsertSwitchLog(swId uint, ip string, port int, result string,
+	comment string, switchFinishTime time.Time) error {
+	var res SwitchLogResponse
+	req := SwitchLogRequest{
+		DBCloudToken: c.Conf.BKConf.BkToken,
+		BKCloudID:    c.CloudId,
+		Name:         constvar.InsertSwitchLog,
+		SetArgs: &SwitchLogs{
+			SwitchID: swId,
+			IP:       ip,
+			Port:     port,
+			Result:   result,
+			Comment:  comment,
+			Datetime: &switchFinishTime,
+		},
+	}
+
+	log.Logger.Debugf("InsertSwitchLog param:%v", req)
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.SwitchLogUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// AgentGetHashValue get agent's module value and hash value.
+// fetch all agents by current agent's city, db_type
+//
+//	mod value  : agent number
+//	hash value : agent's index
+func (c *HaDBClient) AgentGetHashValue(agentIP string, dbType string, interval int) (uint32, uint32, error) {
+	//	select * from ha_status
+	//		where city in (
+	//		select city from ha_status where
+	//			agentIP = ? and db_type = ?
+	//		)
+	//	and module = "agent" and status = "RUNNING"
+	//	and last_time > DATE_SUB(now(), interval 5 minute)
+	//	order by uid;
+	agents, err := c.GetAliveAgentInfo(agentIP, dbType, interval)
+	if err != nil {
+		log.Logger.Errorf("get agent list failed. err:%s", err.Error())
+		return 0, 0, err
+	}
+	var mod uint32
+	var modValue uint32
+	var find bool
+	mod = uint32(len(agents))
+	for index, agentIp := range agents {
+		if agentIp == agentIP {
+			if find {
+				log.Logger.Errorf("multi agent with same agentIP:%s", agentIP)
+				return 0, 0, err
+			}
+			find = true
+			modValue = uint32(index)
+		}
+	}
+	if !find {
+		err = fmt.Errorf("bug: can't find in agent list. agentIP:%s, dbType:%s", agentIP, dbType)
+		log.Logger.Fatalf(err.Error())
+		return mod, modValue, err
+	}
+	return mod, modValue, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/client/name_service.go b/dbm-services/common/dbha/ha-module/client/name_service.go
new file mode 100644
index 0000000000..9f45c4351b
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/client/name_service.go
@@ -0,0 +1,266 @@
+package client
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"time"
+)
+
+// NameServiceClient client to request name service
+type NameServiceClient struct {
+	Client
+}
+
+// DomainRes api response result
+type DomainRes struct {
+	Detail  []DomainInfo `json:"detail"`
+	RowsNum int          `json:"rowsNum"`
+}
+
+// DomainInfo domain detail info struct
+type DomainInfo struct {
+	App            string    `json:"app"`
+	DnsStr         string    `json:"dns_str"`
+	DomainName     string    `json:"domain_name"`
+	DomainType     int       `json:"domain_type"`
+	Ip             string    `json:"ip"`
+	LastChangeTime time.Time `json:"last_change_time"`
+	Manager        string    `json:"manager"`
+	Port           int       `json:"port"`
+	Remark         string    `json:"remark"`
+	StartTime      time.Time `json:"start_time"`
+	Status         string    `json:"status"`
+	Uid            int       `json:"uid"`
+}
+
+// NewNameServiceClient create new PolarisClbGWClient instance
+func NewNameServiceClient(conf *config.APIConfig, cloudId int) (*NameServiceClient, error) {
+	c, err := NewAPIClient(conf, constvar.DnsName, cloudId)
+	return &NameServiceClient{c}, err
+}
+
+// GetDomainInfoByIp get domain info from dns by ip
+func (c *NameServiceClient) GetDomainInfoByIp(ip string) ([]DomainInfo, error) {
+	var res DomainRes
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"ip":             []string{ip},
+	}
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.GetDomainInfoUrl, ""), req, nil)
+	if err != nil {
+		return nil, err
+	}
+	if response.Code != 0 {
+		return nil, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		return nil, err
+	}
+	return res.Detail, nil
+}
+
+// GetDomainInfoByDomain get address info from dns by domain
+func (c *NameServiceClient) GetDomainInfoByDomain(domainName string) ([]DomainInfo, error) {
+	var res DomainRes
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"domain_name":    []string{domainName},
+	}
+
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.GetDomainInfoUrl, ""), req, nil)
+	if err != nil {
+		return nil, err
+	}
+	if response.Code != 0 {
+		return nil, fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		return nil, err
+	}
+	return res.Detail, nil
+}
+
+// DeleteDomain delete address from domain for dns
+func (c *NameServiceClient) DeleteDomain(domainName string, app string, ip string, port int) error {
+	var data DomainRes
+	addr := fmt.Sprintf("%s#%d", ip, port)
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"app":            app,
+		"domains": [](map[string]interface{}){
+			map[string]interface{}{
+				"domain_name": domainName,
+				"instances": []string{
+					addr,
+				},
+			},
+		},
+	}
+
+	log.Logger.Debugf("DeleteDomain param:%v", req)
+
+	response, err := c.DoNew(http.MethodDelete,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.DeleteDomainUrl, ""), req, nil)
+	if err != nil {
+		return err
+	}
+	if response.Code != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s", util.AtWhere(), response.Code, response.Msg)
+	}
+	err = json.Unmarshal(response.Data, &data)
+	if err != nil {
+		return err
+	}
+	if data.RowsNum != 1 {
+		return fmt.Errorf("rowsAffected = %d, delete domain %s failed. ip:%s, port:%d, app:%s",
+			data.RowsNum, domainName, ip, port, app)
+	}
+	return nil
+}
+
+// PolarisClbGWResp the response format for polaris and clb
+type PolarisClbGWResp struct {
+	Message string   `json:"message"`
+	Status  int      `json:"status"`
+	Ips     []string `json:"ips,omitempty"`
+}
+
+// PolarisClbBodyParseCB the http body process callback for polaris and clb api
+func PolarisClbBodyParseCB(b []byte) (interface{}, error) {
+	result := &PolarisClbGWResp{}
+	err := json.Unmarshal(b, result)
+	if err != nil {
+		log.Logger.Errorf("unmarshall %s to %+v get an error:%s", string(b), *result, err.Error())
+		return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+	}
+
+	// check response and data is nil
+	if result.Status != statusSuccess {
+		log.Logger.Errorf("result.Code is %d not equal to %d,message:%s",
+			result.Status, statusSuccess, result.Message)
+		return nil, fmt.Errorf("%v - %v", result.Status, result.Message)
+	}
+	return result, nil
+}
+
+// ClbDeRegister un-register address to clb
+func (c *NameServiceClient) ClbDeRegister(region string, lbid string, listenid string, addr string) error {
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"region":         region,
+		"loadbalancerid": lbid,
+		"listenerid":     listenid,
+		"ips":            []string{addr},
+	}
+
+	log.Logger.Debugf("ClbDeRegister param:%v", req)
+	response, err := c.DoNewForCB(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CLBDeRegisterUrl, ""),
+		req, nil, PolarisClbBodyParseCB)
+	if err != nil {
+		return err
+	}
+
+	gwRsp := response.(*PolarisClbGWResp)
+	if gwRsp.Status != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s",
+			util.AtWhere(), gwRsp.Status, gwRsp.Message)
+	}
+	return nil
+}
+
+// ClbGetTargets  get target address from clb
+func (c *NameServiceClient) ClbGetTargets(
+	region string, lbid string, listenid string,
+) ([]string, error) {
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"region":         region,
+		"loadbalancerid": lbid,
+		"listenerid":     listenid,
+	}
+
+	log.Logger.Debugf("ClbDeRegister param:%v", req)
+	response, err := c.DoNewForCB(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.CLBGetTargetsUrl, ""),
+		req, nil, PolarisClbBodyParseCB)
+	if err != nil {
+		return nil, err
+	}
+
+	gwRsp := response.(*PolarisClbGWResp)
+	if gwRsp.Status != 0 {
+		gwErr := fmt.Errorf("%s failed, return code:%d, msg:%s",
+			util.AtWhere(), gwRsp.Status, gwRsp.Message)
+		return nil, gwErr
+	}
+
+	return gwRsp.Ips, nil
+}
+
+// GetPolarisTargets get target address from polaris
+func (c *NameServiceClient) GetPolarisTargets(servicename string) ([]string, error) {
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"servicename":    servicename,
+	}
+
+	log.Logger.Debugf("GetPolarisTargets param:%v", req)
+	response, err := c.DoNewForCB(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.PolarisTargetsUrl, ""),
+		req, nil, PolarisClbBodyParseCB)
+	if err != nil {
+		return nil, err
+	}
+
+	gwRsp := response.(*PolarisClbGWResp)
+	if gwRsp.Status != 0 {
+		gwErr := fmt.Errorf("%s failed, return code:%d, msg:%s",
+			util.AtWhere(), gwRsp.Status, gwRsp.Message)
+		return nil, gwErr
+	}
+
+	return gwRsp.Ips, nil
+}
+
+// PolarisUnBindTarget unbind address from polaris
+func (c *NameServiceClient) PolarisUnBindTarget(servicename string, servertoken string, addr string) error {
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"servicename":    servicename,
+		"servicetoken":   servertoken,
+		"ips":            []string{addr},
+	}
+
+	log.Logger.Debugf("PolarisUnBindTarget param:%v", req)
+	response, err := c.DoNewForCB(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.PolarisUnBindUrl, ""),
+		req, nil, PolarisClbBodyParseCB)
+	if err != nil {
+		return err
+	}
+
+	gwRsp := response.(*PolarisClbGWResp)
+	if gwRsp.Status != 0 {
+		return fmt.Errorf("%s failed, return code:%d, msg:%s",
+			util.AtWhere(), gwRsp.Status, gwRsp.Message)
+	}
+
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/client/nc.go b/dbm-services/common/dbha/ha-module/client/nc.go
new file mode 100644
index 0000000000..a3947de9a1
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/client/nc.go
@@ -0,0 +1,83 @@
+package client
+
+import (
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+	"net"
+	"time"
+)
+
+// NcClient TODO
+type NcClient struct {
+	timeout    int
+	addr       string
+	connection net.Conn
+	init       bool
+}
+
+// DoConn TODO
+func (nc *NcClient) DoConn(addr string, timeout int) error {
+	conn, err := net.DialTimeout("tcp", addr, time.Duration(timeout)*time.Second)
+	if err != nil {
+		log.Logger.Errorf("ncclient dial addr{%s} failed,timeout=%d,err:%s",
+			addr, timeout, err.Error())
+		return err
+	}
+
+	nc.timeout = timeout
+	nc.addr = addr
+	nc.connection = conn
+	nc.init = true
+	return nil
+}
+
+// WriteText TODO
+func (nc *NcClient) WriteText(text string) error {
+	if !nc.init {
+		ncErr := fmt.Errorf("Connection uninit while Write Text")
+		return ncErr
+	}
+
+	n, err := nc.connection.Write([]byte(text))
+	if err != nil {
+		ncErr := fmt.Errorf("connection write failed,err:%s", err.Error())
+		return ncErr
+	}
+
+	if n != len(text) {
+		ncErr := fmt.Errorf("connection write part,sendLen:%d,dataLen:%d",
+			n, len(text))
+		return ncErr
+	}
+	return nil
+}
+
+// Read 用于常见IO
+func (nc *NcClient) Read(buf []byte) (int, error) {
+	if !nc.init {
+		ncErr := fmt.Errorf("Connection uninit while Read")
+		return 0, ncErr
+	}
+
+	n, err := nc.connection.Read(buf)
+	if err != nil {
+		log.Logger.Errorf("Connection read failed,err:%s", err.Error())
+		return 0, err
+	}
+	return n, nil
+}
+
+// Close TODO
+func (nc *NcClient) Close() error {
+	if !nc.init {
+		ncErr := fmt.Errorf("Connection uninit while close")
+		return ncErr
+	}
+
+	err := nc.connection.Close()
+	if err != nil {
+		log.Logger.Errorf("Connection close err:%s", err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/client/redis_client.go b/dbm-services/common/dbha/ha-module/client/redis_client.go
new file mode 100644
index 0000000000..22b96c2bf0
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/client/redis_client.go
@@ -0,0 +1,175 @@
+package client
+
+import (
+	"context"
+	"dbm-services/common/dbha/ha-module/log"
+	"time"
+
+	"github.com/go-redis/redis/v8"
+)
+
+// RedisClientType TODO
+type RedisClientType int
+
+const (
+	// RedisInstance TODO
+	RedisInstance = 0
+	// RedisCluster TODO
+	RedisCluster = 1
+)
+
+// RedisClient TODO
+type RedisClient struct {
+	rdb  *redis.Client
+	crdb *redis.ClusterClient
+	mode RedisClientType
+}
+
+// InitCluster TODO
+func (r *RedisClient) InitCluster(addr string, passwd string, timeout int) {
+	timeoutVal := time.Duration(timeout) * time.Second
+	r.crdb = redis.NewClusterClient(&redis.ClusterOptions{
+		Addrs:        []string{addr},
+		Password:     passwd,
+		DialTimeout:  timeoutVal,
+		ReadTimeout:  timeoutVal,
+		WriteTimeout: timeoutVal,
+	})
+	r.mode = RedisCluster
+	return
+}
+
+// Init TODO
+func (r *RedisClient) Init(addr string, passwd string, timeout int, dbnum int) {
+	timeoutVal := time.Duration(timeout) * time.Second
+	r.rdb = redis.NewClient(&redis.Options{
+		Addr:         addr,
+		Password:     passwd,
+		DB:           dbnum,
+		DialTimeout:  timeoutVal,
+		ReadTimeout:  timeoutVal,
+		WriteTimeout: timeoutVal,
+	})
+	r.mode = RedisInstance
+	return
+}
+
+// Ping TODO
+func (r *RedisClient) Ping() (interface{}, error) {
+	ret, err := r.rdb.Ping(context.TODO()).Result()
+	if err != nil {
+		log.Logger.Errorf("redisClient ping err[%s]", err.Error())
+		return nil, err
+	} else {
+		return ret, nil
+	}
+}
+
+// DoCommand TODO
+func (r *RedisClient) DoCommand(cmdArgv []string) (interface{}, error) {
+	cmds := make([]interface{}, 0)
+	for _, cmd := range cmdArgv {
+		cmds = append(cmds, cmd)
+	}
+
+	var (
+		ret interface{}
+		err error
+	)
+	if r.mode == RedisInstance {
+		ret, err = r.rdb.Do(context.TODO(), cmds...).Result()
+	} else {
+		ret, err = r.crdb.Do(context.TODO(), cmds...).Result()
+	}
+	if err != nil {
+		log.Logger.Errorf("redisClient DoCommand err[%s]", err.Error())
+		return nil, err
+	} else {
+		return ret, nil
+	}
+}
+
+// Info TODO
+func (r *RedisClient) Info() (interface{}, error) {
+	var (
+		ret interface{}
+		err error
+	)
+
+	if r.mode == RedisInstance {
+		ret, err = r.rdb.Info(context.TODO()).Result()
+	} else {
+		ret, err = r.crdb.Info(context.TODO()).Result()
+	}
+	if err != nil {
+		return nil, err
+	} else {
+		return ret, nil
+	}
+}
+
+// SlaveOf TODO
+func (r *RedisClient) SlaveOf(host, port string) (interface{}, error) {
+	var (
+		ret interface{}
+		err error
+	)
+
+	if r.mode == RedisInstance {
+		ret, err = r.rdb.SlaveOf(context.TODO(), host, port).Result()
+	} else {
+		ret, err = r.crdb.SlaveOf(context.TODO(), host, port).Result()
+	}
+	if err != nil {
+		return nil, err
+	} else {
+		return ret, nil
+	}
+}
+
+// Type TODO
+func (r *RedisClient) Type(key string) (interface{}, error) {
+	var (
+		ret interface{}
+		err error
+	)
+
+	if r.mode == RedisInstance {
+		ret, err = r.rdb.Type(context.TODO(), key).Result()
+	} else {
+		ret, err = r.crdb.Type(context.TODO(), key).Result()
+	}
+	if err != nil {
+		return nil, err
+	} else {
+		return ret, nil
+	}
+}
+
+// ClusterFailover TODO
+func (r *RedisClient) ClusterFailover() (interface{}, error) {
+	var (
+		ret interface{}
+		err error
+	)
+
+	if r.mode == RedisInstance {
+		ret, err = r.rdb.ClusterFailover(context.TODO()).Result()
+	} else {
+		ret, err = r.crdb.ClusterFailover(context.TODO()).Result()
+	}
+	if err != nil {
+		return nil, err
+	} else {
+		return ret, nil
+	}
+}
+
+// Close TODO
+func (r *RedisClient) Close() {
+	if r.mode == RedisInstance {
+		r.rdb.Close()
+	} else {
+		r.crdb.Close()
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/client/remote_config.go b/dbm-services/common/dbha/ha-module/client/remote_config.go
new file mode 100644
index 0000000000..fc76718253
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/client/remote_config.go
@@ -0,0 +1,105 @@
+package client
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+)
+
+// RemoteConfigClient TODO
+type RemoteConfigClient struct {
+	Client
+}
+
+// NewRemoteConfigClient create new RemoteConfigClient instance
+func NewRemoteConfigClient(conf *config.APIConfig, cloudId int) (*RemoteConfigClient, error) {
+	c, err := NewAPIClient(conf, constvar.DBConfigName, cloudId)
+	return &RemoteConfigClient{c}, err
+}
+
+// BatchGetConfigItem the batch api for get configure item
+func (c *RemoteConfigClient) BatchGetConfigItem(
+	confFile string, confType string, confNames string,
+	levelName string, levelValues []string, namespace string,
+) (map[string]interface{}, error) {
+	res := make(map[string]interface{})
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"conf_file":      confFile,
+		"conf_name":      confNames,
+		"conf_type":      confType,
+		"level_name":     levelName,
+		"level_values":   levelValues,
+		"namespace":      namespace,
+	}
+
+	log.Logger.Debugf("BatchGetConfigItem param:%v", req)
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.BKConfigBatchUrl, ""), req, nil)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if response.Code != 0 {
+		cmdbErr := fmt.Errorf("%s failed, return code:%d, msg:%s",
+			util.AtWhere(), response.Code, response.Msg)
+		return nil, cmdbErr
+	}
+
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+// GetConfigItem support get configure item from dbconfig server
+func (c *RemoteConfigClient) GetConfigItem(
+	app string, confFile string, confType string,
+	confName string, levelName string, levelValue string, namespace string,
+) ([]map[string]interface{}, error) {
+	res := make([]map[string]interface{}, 0)
+	req := map[string]interface{}{
+		"db_cloud_token": c.Conf.BKConf.BkToken,
+		"bk_cloud_id":    c.CloudId,
+		"bk_biz_id":      app,
+		"conf_file":      confFile,
+		"conf_type":      confType,
+		"conf_name":      confName,
+		"level_name":     levelName,
+		"level_value":    levelValue,
+		"namespace":      namespace,
+		"format":         "map",
+	}
+
+	log.Logger.Debugf("BatchGetConfigItem param:%v", req)
+	response, err := c.DoNew(http.MethodPost,
+		c.SpliceUrlByPrefix(c.Conf.UrlPre, constvar.BKConfigQueryUrl, ""), req, nil)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if response.Code != 0 {
+		cmdbErr := fmt.Errorf("%s failed, return code:%d, msg:%s",
+			util.AtWhere(), response.Code, response.Msg)
+		log.Logger.Errorf(cmdbErr.Error())
+		return nil, cmdbErr
+	}
+
+	err = json.Unmarshal(response.Data, &res)
+	if err != nil {
+		cmdbErr := fmt.Errorf("%s unmarshal failed,err:%s,response:%v",
+			util.AtWhere(), err.Error(), response)
+		log.Logger.Errorf(cmdbErr.Error())
+		return nil, cmdbErr
+	}
+	return res, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/config/config.go b/dbm-services/common/dbha/ha-module/config/config.go
new file mode 100644
index 0000000000..3eada1ac23
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/config/config.go
@@ -0,0 +1,282 @@
+// Package config TODO
+package config
+
+import (
+	"fmt"
+	"io/ioutil"
+	"strconv"
+
+	"github.com/go-playground/validator/v10"
+	"gopkg.in/yaml.v2"
+)
+
+// Config configure for agent/gm
+type Config struct {
+	// configure for Log File
+	LogConf LogConfig `yaml:"log_conf"`
+	// configure for AgentConf component
+	AgentConf AgentConfig `yaml:"agent_conf"`
+	// configure for GMConf component
+	GMConf GMConfig `yaml:"gm_conf"`
+	// configure for DB detect
+	DBConf DBConfig `yaml:"db_conf"`
+	// configure for SSH detect
+	SSH SSHConfig `yaml:"ssh"`
+	// configure for DNS API
+	DNS      DNSConfig      `yaml:"dns"`
+	Monitor  MonitorConfig  `yaml:"monitor"`
+	Timezone TimezoneConfig `yaml:"timezone"`
+}
+
+// LogConfig configure for log
+type LogConfig struct {
+	// the path of log file
+	LogPath string `yaml:"log_path"`
+	// the level of log
+	LogLevel string `yaml:"log_level"`
+	// maximum size of per log file, Unit: M
+	LogMaxSize int `yaml:"log_maxsize"`
+	// maximum number of backup files
+	LogMaxBackups int `yaml:"log_maxbackups"`
+	// maximum saving age
+	LogMaxAge int `yaml:"log_maxage"`
+	// support compress or not
+	LogCompress bool `yaml:"log_compress"`
+}
+
+// AgentConfig configure for agent component
+type AgentConfig struct {
+	// active type list for db detect, valid type in constant.go
+	ActiveDBType []string `yaml:"active_db_type"`
+	// instance city for detect
+	City string `yaml:"city"`
+	// instance campus for detect
+	Campus string `yaml:"campus"`
+	// cloud id for agent
+	Cloud string `yaml:"cloud"`
+	// fetch cmdb instance's interval(second)
+	FetchInterval  int `yaml:"fetch_interval"`
+	ReportInterval int `yaml:"reporter_interval"`
+}
+
+// GMConfig configure for gm component
+type GMConfig struct {
+	City           string    `yaml:"city" validate:"required"`
+	Campus         string    `yaml:"campus" validate:"required"`
+	Cloud          string    `yaml:"cloud" validate:"required"`
+	ListenPort     int       `yaml:"liston_port" validate:"required"`
+	ReportInterval int       `yaml:"report_interval" validate:"required"`
+	GDM            GDMConfig `yaml:"GDM"`
+	GMM            GMMConfig `yaml:"GMM"`
+	GQA            GQAConfig `yaml:"GQA"`
+	GCM            GCMConfig `yaml:"GCM"`
+}
+
+// GDMConfig configure for GDM component
+type GDMConfig struct {
+	DupExpire    int `yaml:"dup_expire"`
+	ScanInterval int `yaml:"scan_interval"`
+}
+
+// GMMConfig configure for GMM component
+type GMMConfig struct {
+}
+
+// GQAConfig configure for GQA component
+type GQAConfig struct {
+	IDCCacheExpire       int `yaml:"idc_cache_expire"`
+	SingleSwitchIDC      int `yaml:"single_switch_idc"`
+	SingleSwitchInterval int `yaml:"single_switch_interval"`
+	SingleSwitchLimit    int `yaml:"single_switch_limit"`
+	AllHostSwitchLimit   int `yaml:"all_host_switch_limit"`
+	AllSwitchInterval    int `yaml:"all_switch_interval"`
+}
+
+// GCMConfig configure for GCM component
+type GCMConfig struct {
+	AllowedChecksumMaxOffset int `yaml:"allowed_checksum_max_offset"`
+	AllowedSlaveDelayMax     int `yaml:"allowed_slave_delay_max"`
+	AllowedTimeDelayMax      int `yaml:"allowed_time_delay_max"`
+	ExecSlowKBytes           int `yaml:"exec_slow_kbytes"`
+}
+
+// DBConfig configure for database component
+type DBConfig struct {
+	// HADB for agent/GMConf report log, heartbeat
+	HADB APIConfig `yaml:"hadb"`
+	// CMDB for agent/GMConf fetch instance metadata
+	CMDB APIConfig `yaml:"cmdb"`
+	// MySQL instance detect info
+	MySQL MySQLConfig `yaml:"mysql"`
+	// Redis instance detect info
+	Redis RedisConfig `yaml:"redis"`
+}
+
+// MySQLConfig mysql instance connect info
+type MySQLConfig struct {
+	User      string `yaml:"user"`
+	Pass      string `yaml:"pass"`
+	ProxyUser string `yaml:"proxy_user"`
+	ProxyPass string `yaml:"proxy_pass"`
+	Timeout   int    `yaml:"timeout"`
+}
+
+// RedisConfig redis detect configure
+type RedisConfig struct {
+	Timeout int `yaml:"timeout"`
+}
+
+// SSHConfig ssh detect configure
+type SSHConfig struct {
+	Port    int    `yaml:"port"`
+	User    string `yaml:"user"`
+	Pass    string `yaml:"pass"`
+	Dest    string `yaml:"dest"`
+	Timeout int    `yaml:"timeout"`
+}
+
+// DNSConfig dns api configure info
+type DNSConfig struct {
+	BindConf    APIConfig `yaml:"bind_conf"`
+	PolarisConf APIConfig `yaml:"polaris_conf"`
+	ClbConf     APIConfig `yaml:"clb_conf"`
+	// TODO need remove from this struct
+	RemoteConf APIConfig `yaml:"remote_conf"`
+}
+
+// APIConfig api request info
+type APIConfig struct {
+	Host    string   `yaml:"host"`
+	Port    int      `yaml:"port"`
+	UrlPre  string   `yaml:"url_pre"`
+	User    string   `yaml:"user"`
+	Pass    string   `yaml:"pass"`
+	Timeout int      `yaml:"timeout"`
+	BKConf  BKConfig `yaml:"bk_conf"`
+}
+
+// BKConfig BK API authenticate configure
+type BKConfig struct {
+	BkToken string `yaml:"bk_token"`
+}
+
+// MonitorConfig monitor configure
+type MonitorConfig struct {
+	BkDataId     int    `yaml:"bk_data_id"`
+	AccessToken  string `yaml:"access_token"`
+	BeatPath     string `yaml:"beat_path"`
+	AgentAddress string `yaml:"agent_address"`
+}
+
+// TimezoneConfig support config timezone
+type TimezoneConfig struct {
+	Local string `yaml:"local"`
+}
+
+// ParseConfigureFile Parse Configure file
+func ParseConfigureFile(fileName string) (*Config, error) {
+	valid := validator.New()
+	cfg := Config{}
+	yamlFile, err := ioutil.ReadFile(fileName)
+	if err != nil {
+		fmt.Printf("yamlFile.Get err    #%v", err)
+		return nil, err
+	}
+
+	if err = yaml.Unmarshal(yamlFile, &cfg); err != nil {
+		fmt.Printf("yamlFile Unmarshal: #%v", err)
+		return nil, err
+	}
+
+	if err = valid.Struct(&cfg); err != nil {
+		return nil, err
+	}
+
+	return &cfg, err
+}
+
+// GetAPIAddress return host:port
+func (c *Config) GetAPIAddress(apiInfo APIConfig) string {
+	return fmt.Sprintf("%s:%d", apiInfo.Host, apiInfo.Port)
+}
+
+// GetBKToken return bktoken
+func (c *Config) GetBKToken(apiInfo APIConfig) string {
+	return apiInfo.BKConf.BkToken
+}
+
+// CheckConfig check some of config field is invalid or not
+func (c *Config) CheckConfig() error {
+	var err error
+	var hasAgent bool
+	var agentCid int
+	if len(c.AgentConf.Cloud) != 0 {
+		hasAgent = true
+		agentCid, err = strconv.Atoi(c.AgentConf.Cloud)
+		if err != nil {
+			fmt.Printf("cloud field convert to integer failed, %s", c.AgentConf.Cloud)
+			return err
+		}
+	}
+
+	var hasGm bool
+	var gmCid int
+	if len(c.GMConf.Cloud) != 0 {
+		hasGm = true
+		gmCid, err = strconv.Atoi(c.GMConf.Cloud)
+		if err != nil {
+			fmt.Printf("gm field convert to integer failed, %s", c.GMConf.Cloud)
+			return err
+		}
+	}
+
+	if hasAgent && hasGm && agentCid != gmCid {
+		fmt.Printf("the cloud id of agent and gm is not equal")
+		return fmt.Errorf("the cloud id of agent and gm is not equal")
+	}
+
+	if !hasAgent && !hasGm {
+		return fmt.Errorf("the cloud id of agent and gm is not set")
+	}
+	return nil
+}
+
+// GetCloudId convert the stirng of Cloud to integer
+func (c *Config) GetCloudId() int {
+	if len(c.AgentConf.Cloud) > 0 {
+		cloudId, err := strconv.Atoi(c.AgentConf.Cloud)
+		if err != nil {
+			fmt.Printf("convert cloud to integer failed, err:%s", err.Error())
+			return 0
+		} else {
+			return cloudId
+		}
+	}
+
+	if len(c.GMConf.Cloud) > 0 {
+		cloudId, err := strconv.Atoi(c.GMConf.Cloud)
+		if err != nil {
+			fmt.Printf("convert cloud to integer failed, err:%s", err.Error())
+			return 0
+		} else {
+			return cloudId
+		}
+	}
+
+	fmt.Printf("gm and agent lack cloud field")
+	return 0
+}
+
+// GetCloud the string of Cloud
+func (c *Config) GetCloud() string {
+	if len(c.AgentConf.Cloud) > 0 {
+		return c.AgentConf.Cloud
+	}
+
+	if len(c.GMConf.Cloud) > 0 {
+		return c.GMConf.Cloud
+	}
+
+	fmt.Printf("gm and agent lack cloud field")
+	return ""
+}
diff --git a/dbm-services/common/dbha/ha-module/constvar/constant.go b/dbm-services/common/dbha/ha-module/constvar/constant.go
new file mode 100644
index 0000000000..de1f8411e9
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/constvar/constant.go
@@ -0,0 +1,321 @@
+package constvar
+
+const (
+	// Agent TODO
+	Agent = "agent"
+	// GM TODO
+	GM = "gm"
+	// GCM TODO
+	GCM = "gcm"
+	// GMM TODO
+	GMM = "gmm"
+	// GQA TODO
+	GQA = "gqa"
+	// GDM TODO
+	GDM = "gdm"
+)
+
+const (
+	// MySQLClusterType TODO
+	MySQLClusterType = "tendbha"
+	// MySQLMetaType TODO
+	MySQLMetaType = "backend"
+	// MySQLProxyMetaType TODO
+	MySQLProxyMetaType = "proxy"
+	// MySQL TODO
+	MySQL = "tendbha:backend"
+	// MySQLProxy TODO
+	MySQLProxy = "tendbha:proxy"
+	// MySQLMaster TODO
+	MySQLMaster = "backend_master"
+	// MySQLSlave TODO
+	MySQLSlave = "backend_slave"
+	// MySQLRepeater TODO
+	MySQLRepeater = "backend_repeater"
+	// RedisClusterType TODO
+	RedisClusterType = "TwemproxyRedisInstance"
+	// TendisplusClusterType TODO
+	TendisplusClusterType = "PredixyTendisplusCluster"
+	// RedisMetaType TODO
+	RedisMetaType = "tendiscache"
+	// PredixyMetaType TODO
+	PredixyMetaType = "predixy"
+	// TwemproxyMetaType TODO
+	TwemproxyMetaType = "twemproxy"
+	// TendisplusMetaType TODO
+	TendisplusMetaType = "tendisplus"
+	// TendisCache TODO
+	TendisCache = "Rediscache"
+	// Twemproxy TODO
+	Twemproxy = "Twemproxy"
+	// Predixy TODO
+	Predixy = "Predixy"
+	// Tendisplus TODO
+	Tendisplus = "Tendisplus"
+)
+
+const (
+	// AutoSwitch TODO
+	AutoSwitch = "AutoSwitch"
+	// HandSwitch TODO
+	HandSwitch = "HandSwitch"
+	// NoSwitch TODO
+	NoSwitch = "NoSwitch"
+)
+
+const (
+	// DBCheckSuccess TODO
+	DBCheckSuccess = "DB_check_success"
+	// DBCheckFailed TODO
+	DBCheckFailed = "DB_check_failed"
+	// SSHCheckFailed TODO
+	SSHCheckFailed = "SSH_check_failed"
+	// SSHCheckSuccess TODO
+	SSHCheckSuccess = "SSH_check_success"
+	// AUTHCheckFailed TODO
+	AUTHCheckFailed = "AUTH_check_failed"
+)
+
+const (
+	// RUNNING TODO
+	RUNNING = "running"
+	// UNAVAILABLE TODO
+	UNAVAILABLE = "unavailable"
+	// AVAILABLE TODO
+	AVAILABLE = "available"
+)
+
+const (
+	// AgentGetGMInfo TODO
+	AgentGetGMInfo = "agent_get_GM_info"
+	// UpdateInstanceStatus TODO
+	UpdateInstanceStatus = "update_instance_status"
+	// InsertInstanceStatus TODO
+	InsertInstanceStatus = "insert_instance_status"
+	// ReporterHALog TODO
+	ReporterHALog = "reporter_log"
+	// RegisterDBHAInfo TODO
+	RegisterDBHAInfo = "register_dbha_info"
+	// GetAliveAgentInfo TODO
+	GetAliveAgentInfo = "get_alive_agent_info"
+	// GetAliveGMInfo TODO
+	GetAliveGMInfo = "get_alive_gm_info"
+	// ReporterAgentHeartbeat TODO
+	ReporterAgentHeartbeat = "reporter_agent_heartbeat"
+	// ReporterGMHeartbeat TODO
+	ReporterGMHeartbeat = "reporter_gm_heartbeat"
+	// QuerySingleTotal TODO
+	QuerySingleTotal = "query_single_total"
+	// QueryIntervalTotal TODO
+	QueryIntervalTotal = "query_interval_total"
+	// QuerySingleIDC TODO
+	QuerySingleIDC = "query_single_idc"
+	// UpdateTimeDelay TODO
+	UpdateTimeDelay = "update_time_delay"
+	// InsertSwitchQueue TODO
+	InsertSwitchQueue = "insert_switch_queue"
+	// QuerySlaveCheckConfig TODO
+	QuerySlaveCheckConfig = "query_slave_check_config"
+	// UpdateSwitchQueue TODO
+	UpdateSwitchQueue = "update_switch_queue"
+	// InsertSwitchLog TODO
+	InsertSwitchLog = "insert_switch_log"
+
+	// HaStatusUrl TODO
+	HaStatusUrl = "hastatus/"
+	// DbStatusUrl TODO
+	DbStatusUrl = "dbstatus/"
+	// HaLogsUrl TODO
+	HaLogsUrl = "halogs/"
+	// SwitchQueueUrl TODO
+	SwitchQueueUrl = "switchqueue/"
+	// SwitchLogUrl TODO
+	SwitchLogUrl = "switchlogs/"
+)
+
+const (
+	// CmDBCityUrl TODO
+	CmDBCityUrl = "dbmeta/dbha/cities/"
+	// CmDBInstanceUrl TODO
+	CmDBInstanceUrl = "dbmeta/dbha/instances/"
+	// CmDBSwapRoleUrl TODO
+	CmDBSwapRoleUrl = "dbmeta/dbha/swap_role/"
+	// CmDBUpdateStatusUrl TODO
+	CmDBUpdateStatusUrl = "dbmeta/dbha/update_status/"
+	// GetDomainInfoUrl TODO
+	GetDomainInfoUrl = "dns/domain/get/"
+	// DeleteDomainUrl TODO
+	DeleteDomainUrl = "dns/domain/delete/"
+	// CmDBRedisSwapUrl TODO
+	CmDBRedisSwapUrl = "dbmeta/dbha/tendis_cluster_swap/"
+	// CmDBEntryDetailUrl TODO
+	CmDBEntryDetailUrl = "dbmeta/dbha/entry_detail/"
+	// CLBDeRegisterUrl TODO
+	CLBDeRegisterUrl = "clb_deregister_part_target/"
+	// CLBGetTargetsUrl TODO
+	CLBGetTargetsUrl = "clb_get_target_private_ips/"
+	// PolarisTargetsUrl TODO
+	PolarisTargetsUrl = "polaris_describe_targets/"
+	// PolarisUnBindUrl TODO
+	PolarisUnBindUrl = "polaris_unbind_part_targets/"
+	// BKConfigBatchUrl TODO
+	BKConfigBatchUrl = "bkconfig/v1/confitem/batchget/"
+	// BKConfigQueryUrl TODO
+	BKConfigQueryUrl = "bkconfig/v1/confitem/query/"
+)
+
+const (
+	// CmDBName TODO
+	CmDBName = "cmdb"
+	// HaDBName TODO
+	HaDBName = "hadb"
+	// DnsName TODO
+	DnsName = "dns"
+	// ApiGWName TODO
+	ApiGWName = "apigw"
+	// DBConfigName TODO
+	DBConfigName = "db_config"
+
+	// BkApiAuthorization TODO
+	BkApiAuthorization = "x-bkapi-authorization"
+	// BkToken TODO
+	BkToken = "bk_token"
+
+	// ConfMysqlFile TODO
+	ConfMysqlFile = "mysql#user"
+	// ConfMysqlType TODO
+	ConfMysqlType = "init_user"
+	// ConfMysqlNamespace TODO
+	ConfMysqlNamespace = "tendb"
+	// ConfMysqlName TODO
+	ConfMysqlName = "os_mysql_pwd,os_mysql_user"
+
+	// ConfOSFile TODO
+	ConfOSFile = "os"
+	// ConfOSType TODO
+	ConfOSType = "osconf"
+	// ConfCommon TODO
+	ConfCommon = "common"
+	// ConfOSPlat TODO
+	ConfOSPlat = "plat"
+	// ConfOSApp TODO
+	ConfOSApp = "app"
+	// ConfUserPasswd TODO
+	ConfUserPasswd = "user_pwd"
+)
+
+const (
+	// LOG_DEBUG TODO
+	LOG_DEBUG = "LOG_DEBUG"
+	// LOG_INFO TODO
+	LOG_INFO = "LOG_INFO"
+	// LOG_WARN TODO
+	LOG_WARN = "LOG_WARN"
+	// LOG_ERROR TODO
+	LOG_ERROR = "LOG_ERROR"
+	// LOG_PANIC TODO
+	LOG_PANIC = "LOG_PANIC"
+	// LOG_FATAL TODO
+	LOG_FATAL = "LOG_FATAL"
+
+	// LOG_DEF_PATH TODO
+	LOG_DEF_PATH = "./dbha.log"
+	// LOG_DEF_BACKUPS TODO
+	LOG_DEF_BACKUPS = 5
+	// LOG_DEF_AGE TODO
+	LOG_DEF_AGE = 30
+	// LOG_DEF_SIZE TODO
+	LOG_DEF_SIZE = 1024
+	// LOG_MIN_SIZE TODO
+	LOG_MIN_SIZE = 1
+)
+
+const (
+	// REDIS_MAX_DIE_TIME TODO
+	REDIS_MAX_DIE_TIME = 600
+	// REDIS_DEF_AUTH TODO
+	REDIS_DEF_AUTH = "tendis+test"
+)
+
+const (
+	// REDIS_PASSWORD_LACK TODO
+	REDIS_PASSWORD_LACK = "NOAUTH Authentication required"
+	// REDIS_PASSWORD_INVALID TODO
+	REDIS_PASSWORD_INVALID = "invalid password"
+	// PREDIXY_PASSWORD_LACK TODO
+	PREDIXY_PASSWORD_LACK = "auth permission deny"
+
+	// SSH_PASSWORD_LACK_OR_INVALID TODO
+	SSH_PASSWORD_LACK_OR_INVALID = "unable to authenticate"
+)
+
+const (
+	// DBHA_EVENT_NAME TODO
+	DBHA_EVENT_NAME = "dbha_event"
+	// DBHA_EVENT_REDIS_SWITCH_SUCC TODO
+	DBHA_EVENT_REDIS_SWITCH_SUCC = "dbha_redis_switch_succ"
+	// DBHA_EVENT_REDIS_SWITCH_ERR TODO
+	DBHA_EVENT_REDIS_SWITCH_ERR = "dbha_redis_switch_err"
+	// DBHA_EVENT_MYSQL_SWITCH_SUCC TODO
+	DBHA_EVENT_MYSQL_SWITCH_SUCC = "dbha_mysql_switch_ok"
+	// DBHA_EVENT_MYSQL_SWITCH_ERR TODO
+	DBHA_EVENT_MYSQL_SWITCH_ERR = "dbha_mysql_switch_err"
+	// DBHA_EVENT_DETECT_AUTH TODO
+	DBHA_EVENT_DETECT_AUTH = "dbha_detect_auth_fail"
+	// DBHA_EVENT_DETECT_SSH TODO
+	DBHA_EVENT_DETECT_SSH = "dbha_detect_ssh_fail"
+	// DBHA_EVENT_DETECT_DB TODO
+	DBHA_EVENT_DETECT_DB = "dbha_detect_db_fail"
+	// DBHA_EVENT_DOUBLE_CHECK_SSH TODO
+	DBHA_EVENT_DOUBLE_CHECK_SSH = "dbha_doublecheck_ssh_fail"
+	// DBHA_EVENT_DOUBLE_CHECK_AUTH TODO
+	DBHA_EVENT_DOUBLE_CHECK_AUTH = "dbha_doublecheck_auth_fail"
+	// DBHA_EVENT_SYSTEM TODO
+	DBHA_EVENT_SYSTEM = "dbha_system"
+
+	// MONITOR_INFO_SWITCH TODO
+	MONITOR_INFO_SWITCH = 0
+	// MONITOR_INFO_DETECT TODO
+	MONITOR_INFO_DETECT = 1
+	// MONITOR_INFO_SYSTEM TODO
+	MONITOR_INFO_SYSTEM = 2
+
+	// MonitorReportType TODO
+	MonitorReportType = "agent"
+	// MonitorMessageKind TODO
+	MonitorMessageKind = "event"
+)
+
+// status in switch_logs(result field)
+// NB: Any adjustments need to be notified to the front-end development
+const (
+	CHECK_SWITCH_INFO = "info"
+	CHECK_SWITCH_FAIL = "failed"
+	SWITCH_INFO       = "info"
+	SWITCH_SUCC       = "success"
+	SWITCH_FAIL       = "failed"
+	UPDATEMETA_INFO   = "info"
+	UPDATEMETA_FAIL   = "failed"
+
+	SWITCH_INFO_DOUBLECHECK = "info"
+	SWITCH_INFO_SLAVE_IP    = "slave_ip"
+	SWITCH_INFO_SLAVE_PORT  = "slave_port"
+)
+
+// checksum sql
+const (
+	// CheckSumSql checksum number
+	CheckSumSql = "select count(distinct `db`, tbl) from infodba_schema.checksum where ts > date_sub(now(), interval 7 day)"
+	// CheckSumFailSql inconsistent checksum number
+	CheckSumFailSql = "select count(distinct `db`, tbl,chunk) from infodba_schema.checksum where " +
+		"(this_crc <> master_crc or this_cnt <> master_cnt) and ts > date_sub(now(), interval 7 day)"
+	// CheckDelaySql master and slave's time delay
+	CheckDelaySql = "select unix_timestamp(now())-unix_timestamp(master_time) as time_delay, delay_sec as slave_delay " +
+		"from infodba_schema.master_slave_heartbeat where master_server_id = ? and slave_server_id != master_server_id"
+)
+
+// timezone
+const (
+	TZ_UTC = "UTC"
+	TZ_CST = "CST"
+)
diff --git a/dbm-services/common/dbha/ha-module/constvar/constvar.go b/dbm-services/common/dbha/ha-module/constvar/constvar.go
new file mode 100644
index 0000000000..47405fe6eb
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/constvar/constvar.go
@@ -0,0 +1,2 @@
+// Package constvar TODO
+package constvar
diff --git a/dbm-services/common/dbha/ha-module/dbha.go b/dbm-services/common/dbha/ha-module/dbha.go
new file mode 100644
index 0000000000..2fa4c97005
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbha.go
@@ -0,0 +1,98 @@
+package main
+
+import (
+	"dbm-services/common/dbha/ha-module/agent"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/gm"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/monitor"
+	"dbm-services/common/dbha/ha-module/util"
+	"flag"
+	"fmt"
+	"os"
+)
+
+var dbhaType string
+var configFile string
+
+// Init TODO
+func Init() {
+	flag.StringVar(&dbhaType, "type", "", `Input dbha type, "agent" or "gm"`)
+	flag.StringVar(&configFile, "config_file", "", "Input config file path")
+}
+
+func main() {
+	Init()
+	flag.Parse()
+	if flag.NFlag() != 2 {
+		fmt.Println("args wrong.")
+		os.Exit(1)
+	}
+
+	conf, err := config.ParseConfigureFile(configFile)
+	if err != nil {
+		fmt.Printf("parse configure file failed:%s\n", err.Error())
+		os.Exit(1)
+	}
+
+	err = conf.CheckConfig()
+	if err != nil {
+		fmt.Printf("check configure file failed:%s\n", err.Error())
+		os.Exit(1)
+	}
+	util.InitTimezone(conf.Timezone)
+
+	err = log.Init(conf.LogConf)
+	if err != nil {
+		fmt.Printf("init log file failed:%s\n", err.Error())
+		os.Exit(1)
+	}
+
+	err = monitor.MonitorInit(conf)
+	if err != nil {
+		fmt.Printf("init monitor failed:%s\n", err.Error())
+		os.Exit(1)
+	}
+
+	if util.LocalIp, err = util.GetMonIp(); err != nil {
+		log.Logger.Fatal("get component machine ip failed.")
+		os.Exit(1)
+	}
+	log.Logger.Debugf("local ip address:%s", util.LocalIp)
+
+	switch dbhaType {
+	case constvar.Agent:
+		// new agent for each db type
+		for _, dbType := range conf.AgentConf.ActiveDBType {
+			go func(dbType string) {
+				Agent, err := agent.NewMonitorAgent(conf, dbType)
+				if err != nil {
+					log.Logger.Fatalf("agent init failed. dbtype:%s err:%s", dbType, err.Error())
+				}
+
+				err = Agent.Run()
+				if err != nil {
+					log.Logger.Fatalf("agent run failed. dbtype:%s err:%s", dbType, err.Error())
+				}
+			}(dbType)
+		}
+		var c chan struct{}
+		<-c
+	case constvar.GM:
+		GM, err := gm.NewGM(conf)
+		if err != nil {
+			log.Logger.Fatalf("GM init failed. err:%s", err.Error())
+			os.Exit(1)
+		}
+
+		if err = GM.Run(); err != nil {
+			log.Logger.Fatalf("GM run failed. err:%s", err.Error())
+			os.Exit(1)
+		}
+
+	default:
+		log.Logger.Fatalf("unknow dbha type")
+		os.Exit(1)
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/dbmodule.go b/dbm-services/common/dbha/ha-module/dbmodule/dbmodule.go
new file mode 100644
index 0000000000..0b36018dcd
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/dbmodule.go
@@ -0,0 +1,2 @@
+// Package dbmodule TODO
+package dbmodule
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_callback.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_callback.go
new file mode 100644
index 0000000000..a1b34c3df9
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_callback.go
@@ -0,0 +1,182 @@
+package mysql
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"encoding/json"
+	"fmt"
+	"strconv"
+)
+
+// NewMySQLProxyInstanceByCmDB Agent通过CMDB获取的信息来生成需要探测的实例
+func NewMySQLProxyInstanceByCmDB(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseDetect, error) {
+	var (
+		err          error
+		unmarshalIns []*MySQLDetectInstanceInfoFromCmDB
+		ret          []dbutil.DataBaseDetect
+	)
+
+	unmarshalIns, err = UnMarshalMySQLInstanceByCmdb(instances, constvar.MySQLClusterType,
+		constvar.MySQLProxyMetaType)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, uIns := range unmarshalIns {
+		pIns := &MySQLProxyDetectInstanceInfoFromCmDB{
+			MySQLDetectInstanceInfoFromCmDB: *uIns,
+		}
+		ret = append(ret, NewMySQLProxyDetectInstance1(pIns, conf))
+	}
+
+	return ret, err
+}
+
+// DeserializeMySQLProxy 反序列化从Agent上报上来的故障实例
+func DeserializeMySQLProxy(jsonInfo []byte, conf *config.Config) (dbutil.DataBaseDetect, error) {
+	response := MySQLProxyDetectResponse{}
+	err := json.Unmarshal(jsonInfo, &response)
+	if err != nil {
+		log.Logger.Errorf("json unmarshal failed. jsoninfo:\n%s\n, err:%s", string(jsonInfo), err.Error())
+		return nil, err
+	}
+	var ret dbutil.DataBaseDetect
+	ret = NewMySQLProxyDetectInstance2(&response, constvar.MySQLProxy, conf)
+	return ret, nil
+}
+
+// NewMySQLProxySwitchInstance get instance switch info
+func NewMySQLProxySwitchInstance(instances []interface{}, conf *config.Config) ([]dbutil.DataBaseSwitch, error) {
+	var err error
+	var ret []dbutil.DataBaseSwitch
+	for _, v := range instances {
+		ins := v.(map[string]interface{})
+		inf, ok := ins["ip"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		ip := inf.(string)
+
+		inf, ok = ins["port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		port := int(inf.(float64))
+
+		inf, ok = ins["bk_idc_city_id"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. role not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		idc := strconv.Itoa(int(inf.(float64)))
+
+		inf, ok = ins["status"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		status := inf.(string)
+
+		inf, ok = ins["cluster"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		cluster := inf.(string)
+
+		inf, ok = ins["bk_biz_id"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. app not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		app := strconv.Itoa(int(inf.(float64)))
+
+		inf, ok = ins["cluster_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		clusterType := inf.(string)
+
+		inf, ok = ins["machine_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. machine_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		metaType := inf.(string)
+
+		inf, ok = ins["admin_port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. admin_port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		adminPort := int(inf.(float64))
+
+		inf, ok = ins["bind_entry"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. proxyinstance_set not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		bindEntry := inf.(map[string]interface{})
+
+		cmdbClient, err := client.NewCmDBClient(&conf.DBConf.CMDB, conf.GetCloudId())
+		if err != nil {
+			return nil, err
+		}
+
+		hadbClient, err := client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+		if err != nil {
+			return nil, err
+		}
+
+		dnsClient, err := client.NewNameServiceClient(&conf.DNS.BindConf, conf.GetCloudId())
+		if err != nil {
+			return nil, err
+		}
+
+		swIns := MySQLProxySwitch{
+			BaseSwitch: dbutil.BaseSwitch{
+				Ip:          ip,
+				Port:        port,
+				IDC:         idc,
+				Status:      status,
+				App:         app,
+				ClusterType: clusterType,
+				MetaType:    metaType,
+				Cluster:     cluster,
+				CmDBClient:  cmdbClient,
+				HaDBClient:  hadbClient,
+			},
+			AdminPort: adminPort,
+			DnsClient: dnsClient,
+		}
+
+		inf, ok = bindEntry["dns"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. dns not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		swIns.Entry.Dns = inf.([]interface{})
+
+		ret = append(ret, &swIns)
+	}
+	return ret, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_detect.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_detect.go
new file mode 100644
index 0000000000..a271f1ff08
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_detect.go
@@ -0,0 +1,37 @@
+package mysql
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+)
+
+// MySQLProxyDetectInstance defined proxy detect info
+type MySQLProxyDetectInstance struct {
+	MySQLDetectInstance
+}
+
+// MySQLProxyDetectResponse defined proxy response struct
+type MySQLProxyDetectResponse struct {
+	MySQLDetectResponse
+}
+
+// MySQLProxyDetectInstanceInfoFromCmDB defined proxy detect info in cmdb
+type MySQLProxyDetectInstanceInfoFromCmDB struct {
+	MySQLDetectInstanceInfoFromCmDB
+}
+
+// NewMySQLProxyDetectInstance1 return detect info in cmdb
+func NewMySQLProxyDetectInstance1(ins *MySQLProxyDetectInstanceInfoFromCmDB,
+	conf *config.Config) *MySQLProxyDetectInstance {
+	return &MySQLProxyDetectInstance{
+		MySQLDetectInstance: *NewMySQLDetectInstance1(&ins.MySQLDetectInstanceInfoFromCmDB,
+			conf),
+	}
+}
+
+// NewMySQLProxyDetectInstance2 return detect info by agent report
+func NewMySQLProxyDetectInstance2(ins *MySQLProxyDetectResponse, dbType string,
+	conf *config.Config) *MySQLProxyDetectInstance {
+	return &MySQLProxyDetectInstance{
+		MySQLDetectInstance: *NewMySQLDetectInstance2(&ins.MySQLDetectResponse, dbType, conf),
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_switch.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_switch.go
new file mode 100644
index 0000000000..9259038bb9
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQLProxy_switch.go
@@ -0,0 +1,89 @@
+package mysql
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+)
+
+// MySQLProxySwitch define proxy switch detail info
+type MySQLProxySwitch struct {
+	dbutil.BaseSwitch
+	AdminPort int
+	Entry     dbutil.BindEntry
+	DnsClient *client.NameServiceClient
+}
+
+// CheckSwitch check whether proxy allowed swtich, always true at present
+func (ins *MySQLProxySwitch) CheckSwitch() (bool, error) {
+	return true, nil
+}
+
+// DoSwitch proxy do switch
+//  1. get domain info
+//  2. delete ip under the domain
+func (ins *MySQLProxySwitch) DoSwitch() error {
+	ins.ReportLogs(constvar.SWITCH_FAIL, fmt.Sprintf("get domain info by ip:%s", ins.Ip))
+	dnsInfos, err := ins.DnsClient.GetDomainInfoByIp(ins.Ip)
+	log.Logger.Debugf("dnsInfos:%v", dnsInfos)
+	if err != nil {
+		switchErrLog := fmt.Sprintf("get domain info by ip failed: %s", err.Error())
+		ins.ReportLogs(constvar.SWITCH_FAIL, switchErrLog)
+		return err
+	}
+	if len(dnsInfos) == 0 {
+		switchErrLog := "mysql proxy without domain info."
+		ins.ReportLogs(constvar.SWITCH_FAIL, switchErrLog)
+		return fmt.Errorf("no domain info found for mysql-proxy")
+	}
+
+	ins.ReportLogs(constvar.SWITCH_INFO, fmt.Sprintf("start release ip[%s] from domain", ins.Ip))
+	for _, dnsInfo := range dnsInfos {
+		ipInfos, err := ins.DnsClient.GetDomainInfoByDomain(dnsInfo.DomainName)
+		if err != nil {
+			switchErrLog := fmt.Sprintf("get domain info by domain name failed. err:%s", err.Error())
+			ins.ReportLogs(constvar.SWITCH_FAIL, switchErrLog)
+			return err
+		}
+		if len(ipInfos) == 0 {
+			switchErrLog := fmt.Sprintf("domain name: %s without ip.", dnsInfo.DomainName)
+			ins.ReportLogs(constvar.SWITCH_FAIL, switchErrLog)
+			return fmt.Errorf("domain name: %s without ip", dnsInfo.DomainName)
+		}
+		if len(ipInfos) == 1 {
+			switchOkLog := fmt.Sprintf("domain name: %s only one ip. so we skip it.",
+				dnsInfo.DomainName)
+			ins.ReportLogs(constvar.SWITCH_INFO, switchOkLog)
+		} else {
+			err = ins.DnsClient.DeleteDomain(dnsInfo.DomainName, dnsInfo.App, ins.Ip, ins.Port)
+			if err != nil {
+				switchErrLog := fmt.Sprintf("delete domain %s failed:%s", dnsInfo.DomainName, err.Error())
+				ins.ReportLogs(constvar.SWITCH_FAIL, switchErrLog)
+				return err
+			}
+			switchOkLog := fmt.Sprintf("delete domain %s success.", dnsInfo.DomainName)
+			log.Logger.Infof("%s, info:{%s}", switchOkLog, ins.ShowSwitchInstanceInfo())
+			ins.ReportLogs(constvar.SWITCH_INFO, switchOkLog)
+		}
+	}
+	return nil
+}
+
+// ShowSwitchInstanceInfo display switch proxy info
+func (ins *MySQLProxySwitch) ShowSwitchInstanceInfo() string {
+	str := fmt.Sprintf("<%s#%d IDC:%s Status:%s Bzid:%s ClusterType:%s MachineType:%s> switch",
+		ins.Ip, ins.Port, ins.IDC, ins.Status, ins.App, ins.ClusterType, ins.MetaType)
+	return str
+}
+
+// RollBack proxy do rollback
+func (ins *MySQLProxySwitch) RollBack() error {
+	return nil
+}
+
+// UpdateMetaInfo update cmdb meta info, do nothing at present
+func (ins *MySQLProxySwitch) UpdateMetaInfo() error {
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_callback.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_callback.go
new file mode 100644
index 0000000000..0d7d0dd814
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_callback.go
@@ -0,0 +1,342 @@
+package mysql
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"encoding/json"
+	"fmt"
+	"strconv"
+)
+
+// UnMarshalMySQLInstanceByCmdb convert cmdb instance info to MySQLDetectInstanceInfoFromCmDB
+func UnMarshalMySQLInstanceByCmdb(instances []interface{},
+	uClusterType string, uMetaType string) ([]*MySQLDetectInstanceInfoFromCmDB, error) {
+	var (
+		err error
+		ret []*MySQLDetectInstanceInfoFromCmDB
+	)
+	cache := map[string]*MySQLDetectInstanceInfoFromCmDB{}
+
+	for _, v := range instances {
+		ins := v.(map[string]interface{})
+		inf, ok := ins["cluster_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		clusterType := inf.(string)
+		if clusterType != uClusterType {
+			continue
+		}
+		inf, ok = ins["machine_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. machine_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		metaType := inf.(string)
+		if metaType != uMetaType {
+			continue
+		}
+		inf, ok = ins["status"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. status not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		status := inf.(string)
+		if status != constvar.RUNNING && status != constvar.AVAILABLE {
+			continue
+		}
+		inf, ok = ins["cluster"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		cluster := inf.(string)
+
+		inf, ok = ins["ip"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		ip := inf.(string)
+		inf, ok = ins["port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		port := int(inf.(float64))
+		inf, ok = ins["bk_biz_id"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. app not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		app := strconv.Itoa(int(inf.(float64)))
+		cacheIns, ok := cache[ip]
+		if ok {
+			if port < cacheIns.Port {
+				cache[ip] = &MySQLDetectInstanceInfoFromCmDB{
+					Ip:          ip,
+					Port:        port,
+					App:         app,
+					ClusterType: clusterType,
+					MetaType:    metaType,
+					Cluster:     cluster,
+				}
+			}
+		} else {
+			cache[ip] = &MySQLDetectInstanceInfoFromCmDB{
+				Ip:          ip,
+				Port:        port,
+				App:         app,
+				ClusterType: clusterType,
+				MetaType:    metaType,
+				Cluster:     cluster,
+			}
+		}
+	}
+
+	for _, cacheIns := range cache {
+		ret = append(ret, cacheIns)
+	}
+
+	return ret, nil
+}
+
+// NewMySQLInstanceByCmDB unmarshal cmdb instances to detect instance struct
+func NewMySQLInstanceByCmDB(instances []interface{}, Conf *config.Config) ([]dbutil.DataBaseDetect, error) {
+	var (
+		err          error
+		unmarshalIns []*MySQLDetectInstanceInfoFromCmDB
+		ret          []dbutil.DataBaseDetect
+	)
+
+	unmarshalIns, err = UnMarshalMySQLInstanceByCmdb(instances, constvar.MySQLClusterType,
+		constvar.MySQLMetaType)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, uIns := range unmarshalIns {
+		ret = append(ret, NewMySQLDetectInstance1(uIns, Conf))
+	}
+
+	return ret, err
+}
+
+// NewMySQLSwitchInstance unmarshal cmdb instances to switch instance struct
+func NewMySQLSwitchInstance(instances []interface{}, conf *config.Config) ([]dbutil.DataBaseSwitch, error) {
+	var err error
+	var ret []dbutil.DataBaseSwitch
+	for _, v := range instances {
+		ins := v.(map[string]interface{})
+		inf, ok := ins["ip"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		ip := inf.(string)
+
+		inf, ok = ins["port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		port := int(inf.(float64))
+
+		inf, ok = ins["bk_idc_city_id"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. role not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		idc := strconv.Itoa(int(inf.(float64)))
+
+		inf, ok = ins["instance_role"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. role not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		role := inf.(string)
+
+		inf, ok = ins["status"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		status := inf.(string)
+
+		inf, ok = ins["cluster"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		cluster := inf.(string)
+
+		inf, ok = ins["bk_biz_id"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. app not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		app := strconv.Itoa(int(inf.(float64)))
+
+		inf, ok = ins["cluster_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		clusterType := inf.(string)
+
+		inf, ok = ins["machine_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. machine_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		metaType := inf.(string)
+
+		inf, ok = ins["receiver"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. receiver not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		slave := inf.([]interface{})
+
+		inf, ok = ins["proxyinstance_set"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. proxyinstance_set not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		proxy := inf.([]interface{})
+
+		cmdbClient, err := client.NewCmDBClient(&conf.DBConf.CMDB, conf.GetCloudId())
+		if err != nil {
+			return nil, err
+		}
+
+		hadbClient, err := client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+		if err != nil {
+			return nil, err
+		}
+
+		swIns := MySQLSwitch{
+			BaseSwitch: dbutil.BaseSwitch{
+				Ip:          ip,
+				Port:        port,
+				IDC:         idc,
+				Status:      status,
+				App:         app,
+				ClusterType: clusterType,
+				MetaType:    metaType,
+				Cluster:     cluster,
+				CmDBClient:  cmdbClient,
+				HaDBClient:  hadbClient,
+			},
+			Role:                     role,
+			AllowedChecksumMaxOffset: conf.GMConf.GCM.AllowedChecksumMaxOffset,
+			AllowedSlaveDelayMax:     conf.GMConf.GCM.AllowedSlaveDelayMax,
+			AllowedTimeDelayMax:      conf.GMConf.GCM.AllowedTimeDelayMax,
+			ExecSlowKBytes:           conf.GMConf.GCM.ExecSlowKBytes,
+			MySQLUser:                conf.DBConf.MySQL.User,
+			MySQLPass:                conf.DBConf.MySQL.Pass,
+			ProxyUser:                conf.DBConf.MySQL.ProxyUser,
+			ProxyPass:                conf.DBConf.MySQL.ProxyPass,
+			Timeout:                  conf.DBConf.MySQL.Timeout,
+		}
+
+		for _, rawInfo := range slave {
+			mapInfo := rawInfo.(map[string]interface{})
+			inf, ok = mapInfo["ip"]
+			if !ok {
+				err = fmt.Errorf("umarshal failed. slave ip not exist")
+				log.Logger.Errorf(err.Error())
+				return nil, err
+			}
+			slaveIp := inf.(string)
+			inf, ok = mapInfo["port"]
+			if !ok {
+				err = fmt.Errorf("umarshal failed. slave port not exist")
+				log.Logger.Errorf(err.Error())
+				return nil, err
+			}
+			slavePort := inf.(float64)
+			swIns.Slave = append(swIns.Slave, MySQLSlaveInfo{
+				Ip:   slaveIp,
+				Port: int(slavePort),
+			})
+		}
+
+		for _, rawInfo := range proxy {
+			mapInfo := rawInfo.(map[string]interface{})
+			inf, ok = mapInfo["ip"]
+			if !ok {
+				err = fmt.Errorf("umarshal failed. proxy ip not exist")
+				log.Logger.Errorf(err.Error())
+				return nil, err
+			}
+			proxyIp := inf.(string)
+			inf, ok = mapInfo["port"]
+			if !ok {
+				err = fmt.Errorf("umarshal failed. proxy port not exist")
+				log.Logger.Errorf(err.Error())
+				return nil, err
+			}
+			proxyPort := inf.(float64)
+			inf, ok = mapInfo["admin_port"]
+			if !ok {
+				err = fmt.Errorf("umarshal failed. proxy port not exist")
+				log.Logger.Errorf(err.Error())
+				return nil, err
+			}
+			proxyAdminPort := inf.(float64)
+			var status string
+			inf, ok = mapInfo["status"]
+			if !ok {
+				status = ""
+			} else {
+				status = inf.(string)
+			}
+			swIns.Proxy = append(swIns.Proxy, dbutil.ProxyInfo{
+				Ip:        proxyIp,
+				Port:      int(proxyPort),
+				AdminPort: int(proxyAdminPort),
+				Status:    status,
+			})
+		}
+
+		ret = append(ret, &swIns)
+	}
+	return ret, nil
+}
+
+// DeserializeMySQL convert response info to detect info
+func DeserializeMySQL(jsonInfo []byte, Conf *config.Config) (dbutil.DataBaseDetect, error) {
+	response := MySQLDetectResponse{}
+	err := json.Unmarshal(jsonInfo, &response)
+	if err != nil {
+		log.Logger.Errorf("json unmarshal failed. jsoninfo:\n%s\n, err:%s", string(jsonInfo), err.Error())
+		return nil, err
+	}
+
+	ret := NewMySQLDetectInstance2(&response, constvar.MySQL, Conf)
+	return ret, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_detect.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_detect.go
new file mode 100644
index 0000000000..4c8da66d68
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_detect.go
@@ -0,0 +1,217 @@
+package mysql
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/types"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"math/rand"
+	"time"
+
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+)
+
+const (
+	replaceSql = "replace into infodba_schema.check_heartbeat(uid) values(1)"
+)
+
+// MySQLDetectInstance mysql instance detect struct
+type MySQLDetectInstance struct {
+	dbutil.BaseDetectDB
+	User    string
+	Pass    string
+	Timeout int
+	realDB  *gorm.DB
+}
+
+// MySQLDetectResponse mysql instance response struct
+type MySQLDetectResponse struct {
+	dbutil.BaseDetectDBResponse
+}
+
+// MySQLDetectInstanceInfoFromCmDB mysql instance detect struct in cmdb
+type MySQLDetectInstanceInfoFromCmDB struct {
+	Ip          string
+	Port        int
+	App         string
+	ClusterType string
+	MetaType    string
+	Cluster     string
+}
+
+// NewMySQLDetectInstance1 convert cmdb info to detect info
+func NewMySQLDetectInstance1(ins *MySQLDetectInstanceInfoFromCmDB, conf *config.Config) *MySQLDetectInstance {
+	return &MySQLDetectInstance{
+		BaseDetectDB: dbutil.BaseDetectDB{
+			Ip:             ins.Ip,
+			Port:           ins.Port,
+			App:            ins.App,
+			DBType:         types.DBType(fmt.Sprintf("%s:%s", ins.ClusterType, ins.MetaType)),
+			ReporterTime:   time.Unix(0, 0),
+			ReportInterval: conf.AgentConf.ReportInterval + rand.Intn(20),
+			Status:         constvar.DBCheckSuccess,
+			Cluster:        ins.Cluster,
+			SshInfo: dbutil.Ssh{
+				Port:    conf.SSH.Port,
+				User:    conf.SSH.User,
+				Pass:    conf.SSH.Pass,
+				Dest:    conf.SSH.Dest,
+				Timeout: conf.SSH.Timeout,
+			},
+		},
+		User:    conf.DBConf.MySQL.User,
+		Pass:    conf.DBConf.MySQL.Pass,
+		Timeout: conf.DBConf.MySQL.Timeout,
+	}
+}
+
+// NewMySQLDetectInstance2 convert api response info into detect info
+func NewMySQLDetectInstance2(ins *MySQLDetectResponse, dbType string, conf *config.Config) *MySQLDetectInstance {
+	return &MySQLDetectInstance{
+		BaseDetectDB: dbutil.BaseDetectDB{
+			Ip:             ins.DBIp,
+			Port:           ins.DBPort,
+			App:            ins.App,
+			DBType:         types.DBType(dbType),
+			ReporterTime:   time.Unix(0, 0),
+			ReportInterval: conf.AgentConf.ReportInterval + rand.Intn(20),
+			Status:         types.CheckStatus(ins.Status),
+			Cluster:        ins.Cluster,
+			SshInfo: dbutil.Ssh{
+				Port:    conf.SSH.Port,
+				User:    conf.SSH.User,
+				Pass:    conf.SSH.Pass,
+				Dest:    conf.SSH.Dest,
+				Timeout: conf.SSH.Timeout,
+			},
+		},
+		User:    conf.DBConf.MySQL.User,
+		Pass:    conf.DBConf.MySQL.Pass,
+		Timeout: conf.DBConf.MySQL.Timeout,
+	}
+}
+
+// Detection TODO
+// return error:
+//
+//	not nil: check db failed or do ssh failed
+//	nil:     check db success
+func (m *MySQLDetectInstance) Detection() error {
+	recheck := 1
+	var mysqlErr error
+	needRecheck := true
+	for i := 0; i <= recheck && needRecheck; i++ {
+		// 设置缓冲为1防止没有接收者导致阻塞,即Detection已经超时返回
+		errChan := make(chan error, 2)
+		// 这里存在资源泄露的可能,因为不能主动kill掉协程,所以如果这个协程依然阻塞在连接mysql,但是
+		// 这个函数已经超时返回了,那么这个协程因为被阻塞一直没被释放,直到MySQL连接超时,如果阻塞的时间
+		// 大于下次探测该实例的时间间隔,则创建协程频率大于释放协程频率,可能会导致oom。可以考虑在MySQL
+		// 客户端连接设置超时时间来防止。
+		go m.CheckMySQL(errChan)
+		select {
+		case mysqlErr = <-errChan:
+			if mysqlErr != nil {
+				log.Logger.Warnf("check mysql failed. ip:%s, port:%d, app:%s", m.Ip, m.Port, m.App)
+				m.Status = constvar.DBCheckFailed
+				needRecheck = false
+			} else {
+				m.Status = constvar.DBCheckSuccess
+				return nil
+			}
+		case <-time.After(time.Second * time.Duration(m.Timeout)):
+			mysqlErr = fmt.Errorf("connect MySQL timeout recheck:%d", recheck)
+			log.Logger.Warnf(mysqlErr.Error())
+			m.Status = constvar.DBCheckFailed
+		}
+	}
+
+	sshErr := m.CheckSSH()
+	if sshErr != nil {
+		if util.CheckSSHErrIsAuthFail(sshErr) {
+			m.Status = constvar.AUTHCheckFailed
+			log.Logger.Warnf("check ssh auth failed. ip:%s, port:%d, app:%s, status:%s",
+				m.Ip, m.Port, m.App, m.Status)
+		} else {
+			m.Status = constvar.SSHCheckFailed
+			log.Logger.Warnf("check ssh failed. ip:%s, port:%d, app:%s, status:%s",
+				m.Ip, m.Port, m.App, m.Status)
+		}
+		return sshErr
+	} else {
+		log.Logger.Infof("check ssh success. ip:%s, port:%d, app:%s", m.Ip, m.Port, m.App)
+		m.Status = constvar.SSHCheckSuccess
+	}
+	return mysqlErr
+}
+
+// CheckMySQL check whether mysql alive
+func (m *MySQLDetectInstance) CheckMySQL(errChan chan error) {
+	if m.realDB == nil {
+		connParam := fmt.Sprintf("%s:%s@(%s:%d)/%s", m.User, m.Pass, m.Ip, m.Port, "infodba_schema")
+		db, err := gorm.Open(mysql.Open(connParam), &gorm.Config{
+			Logger: log.GormLogger,
+		})
+		if err != nil {
+			log.Logger.Warnf("open mysql failed. ip:%s, port:%d, err:%s", m.Ip, m.Port, err.Error())
+			errChan <- err
+			return
+		}
+		// set connect timeout
+		db.Set("gorm:connect_timeout", m.Timeout)
+		m.realDB = db
+	}
+
+	defer func() {
+		db, _ := m.realDB.DB()
+		if err := db.Close(); err != nil {
+			log.Logger.Warnf("close connect[%s#%d] failed:%s", m.Ip, m.Port, err.Error())
+		}
+		// need set to nil, otherwise agent would cache connection
+		// and may cause connection leak
+		m.realDB = nil
+	}()
+
+	err := m.realDB.Exec(replaceSql).Error
+	if err != nil {
+		log.Logger.Warnf("mysql replace heartbeat failed. ip:%s, port:%d, err:%s", m.Ip, m.Port, err.Error())
+		errChan <- err
+		return
+	}
+
+	errChan <- nil
+}
+
+// CheckSSH use ssh check whether machine alived
+func (m *MySQLDetectInstance) CheckSSH() error {
+	touchFile := fmt.Sprintf("%s_%s_%d", m.SshInfo.Dest, util.LocalIp, m.Port)
+
+	touchStr := fmt.Sprintf("touch %s && if [ -d \"/data1/dbha/\" ]; then touch /data1/dbha/%s ; fi "+
+		"&& if [ -d \"/data/dbha/\" ]; then touch /data/dbha/%s ; fi", touchFile, touchFile, touchFile)
+
+	if err := m.DoSSH(touchStr); err != nil {
+		log.Logger.Warnf("do ssh failed. err:%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// Serialization serialize mysql instance info
+func (m *MySQLDetectInstance) Serialization() ([]byte, error) {
+	response := MySQLDetectResponse{
+		BaseDetectDBResponse: m.NewDBResponse(),
+	}
+
+	resByte, err := json.Marshal(&response)
+
+	if err != nil {
+		log.Logger.Errorf("mysql serialization failed. err:%s", err.Error())
+		return []byte{}, err
+	}
+
+	return resByte, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_proxy_handle.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_proxy_handle.go
new file mode 100644
index 0000000000..0550bd1ebb
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_proxy_handle.go
@@ -0,0 +1,86 @@
+package mysql
+
+import (
+	"database/sql"
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+
+	_ "github.com/go-sql-driver/mysql" // mysql TODO
+)
+
+// ConnectAdminProxy use admin port to connect proxy
+func ConnectAdminProxy(user, password, address string) (*sql.DB, error) {
+	config := fmt.Sprintf("%s:%s@tcp(%s)/?timeout=5s&maxAllowedPacket=%s",
+		user,
+		password,
+		address,
+		"4194304")
+	db, err := sql.Open("mysql", config)
+	if err != nil {
+		log.Logger.Errorf("Database connection failed. user: %s, address: %v,err:%s.", user,
+			address, err.Error())
+		return nil, err
+	}
+	if _, err = db.Query("select version();"); err != nil {
+		log.Logger.Errorf("Check Database connection failed. user: %s, address: %v,err:%s.", user,
+			address, err.Error())
+		return nil, err
+	}
+
+	return db, nil
+}
+
+// SwitchProxyBackendAddress connect proxy and refresh backends
+func SwitchProxyBackendAddress(proxyIp string, proxyAdminPort int, proxyUser string, proxyPass string,
+	slaveIp string, slavePort int) error {
+	addr := fmt.Sprintf("%s:%d", proxyIp, proxyAdminPort)
+	db, err := ConnectAdminProxy(proxyUser, proxyPass, addr)
+	if err != nil {
+		log.Logger.Errorf("connect admin proxy failed. addr:%s, err:%s", addr, err.Error())
+		return fmt.Errorf("connect admin proxy failed")
+	}
+
+	switchSql := fmt.Sprintf("refresh_backends('%s:%d',1)", slaveIp, slavePort)
+	querySql := "select * from backends"
+
+	_, err = db.Exec(switchSql)
+	if err != nil {
+		log.Logger.Errorf("exec switch sql failed. err:%s", err.Error())
+		return fmt.Errorf("exec switch sql failed")
+	}
+
+	var (
+		backendIndex    int
+		address         string
+		state           string
+		backendType     string
+		uuid            []uint8
+		connectedClient int
+	)
+
+	rows, err := db.Query(querySql)
+	if err != nil {
+		log.Logger.Errorf("query backend failed. err:%s", err.Error())
+		return fmt.Errorf("query backen failed")
+	}
+	for rows.Next() {
+		err = rows.Scan(&backendIndex, &address, &state, &backendType, &uuid, &connectedClient)
+		if err != nil {
+			log.Logger.Errorf("scan rows failed. err:%s", err.Error())
+			return fmt.Errorf("scan rows failed")
+		}
+		if address == fmt.Sprintf("%s:%d", slaveIp, slavePort) {
+			log.Logger.Infof("%s:%d refresh backend to %s is working", proxyIp, proxyAdminPort, slaveIp)
+			if address != "1.1.1.1:3306" {
+				if state == "up" || state == "unknown" {
+					// update cmdb backend
+					// update binlog format
+					return nil
+				}
+			}
+			return nil
+		}
+	}
+	log.Logger.Errorf("%s:%d refresh backend to %s failed", proxyIp, proxyAdminPort, slaveIp)
+	return fmt.Errorf("%s:%d refresh backend to %s failed", proxyIp, proxyAdminPort, slaveIp)
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_switch.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_switch.go
new file mode 100644
index 0000000000..6d1ac4bc47
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/MySQL_switch.go
@@ -0,0 +1,597 @@
+package mysql
+
+import (
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+)
+
+// MySQLSwitch defined mysql switch struct
+type MySQLSwitch struct {
+	dbutil.BaseSwitch
+	Role                     string
+	Slave                    []MySQLSlaveInfo
+	Proxy                    []dbutil.ProxyInfo
+	Entry                    dbutil.BindEntry
+	AllowedChecksumMaxOffset int
+	AllowedSlaveDelayMax     int
+	AllowedTimeDelayMax      int
+	ExecSlowKBytes           int
+	MySQLUser                string
+	MySQLPass                string
+	ProxyUser                string
+	ProxyPass                string
+	Timeout                  int
+}
+
+// MySQLSlaveInfo defined slave switch info
+type MySQLSlaveInfo struct {
+	Ip             string
+	Port           int
+	BinlogFile     string
+	BinlogPosition string
+}
+
+// DelayInfo defined slave delay info
+type DelayInfo struct {
+	// check whether SQL_Thread hang
+	SlaveDelay float64 `gorm:"column:slave_delay"`
+	// check whether IO_Thread hang
+	TimeDelay float64 `gorm:"column:time_delay"`
+}
+
+// MySQLVariableInfo show variable's result struct
+// not appropriate for string value
+type MySQLVariableInfo struct {
+	VariableName  string `gorm:"column:Variable_name"`
+	VariableValue uint64 `gorm:"column:Value"`
+}
+
+// BinlogStatus binlog status info struct
+type BinlogStatus struct {
+	MasterLogFileIndex      int
+	RelayMasterLogFileIndex int
+	ReadMasterLogPos        uint64
+	ExecMasterLogPos        uint64
+	// RetrievedGtidSet		string
+	// ExecutedGtidSet			string
+	// MasterUuid				string
+}
+
+// SlaveStatus show slave status info struct
+type SlaveStatus struct {
+	SlaveIoState               string `gorm:"column:Slave_IO_State"`
+	MasterHost                 string `gorm:"column:Master_Host"`
+	MasterUser                 string `gorm:"column:Master_User"`
+	MasterPort                 int    `gorm:"column:Master_Port"`
+	ConnectRetry               int    `gorm:"column:Connect_Retry"`
+	MasterLogFile              string `gorm:"column:Master_Log_File"`
+	ReadMasterLogPos           uint64 `gorm:"column:Read_Master_Log_Pos"`
+	RelayLogFile               string `gorm:"column:Relay_Log_File"`
+	RelayLogPos                uint64 `gorm:"column:Relay_Log_Pos"`
+	RelayMasterLogFile         string `gorm:"column:Relay_Master_Log_File"`
+	SlaveIoRunning             string `gorm:"column:Slave_IO_Running"`
+	SlaveSqlRunning            string `gorm:"column:Slave_SQL_Running"`
+	ReplicateDoDb              string `gorm:"column:Replicate_Do_DB"`
+	ReplicateIgnoreDb          string `gorm:"column:Replicate_Ignore_DB"`
+	ReplicateDoTable           string `gorm:"column:Replicate_Do_Table"`
+	ReplicateIgnoreTable       string `gorm:"column:Replicate_Ignore_Table"`
+	ReplicateWildDoTable       string `gorm:"column:Replicate_Wild_Do_Table"`
+	ReplicateWildIgnoreTable   string `gorm:"column:Replicate_Wild_Ignore_Table"`
+	LastErrno                  int    `gorm:"column:Last_Errno"`
+	LastError                  string `gorm:"column:Last_Error"`
+	SkipCounter                int    `gorm:"column:Skip_Counter"`
+	ExecMasterLogPos           uint64 `gorm:"column:Exec_Master_Log_Pos"`
+	RelayLogSpace              uint64 `gorm:"column:Relay_Log_Space"`
+	UntilCondition             string `gorm:"column:Until_Condition"`
+	UntilLogFile               string `gorm:"column:Until_Log_File"`
+	UntilLogPos                uint64 `gorm:"column:Until_Log_Pos"`
+	MasterSslAllowed           string `gorm:"column:Master_SSL_Allowed"`
+	MasterSslCaFile            string `gorm:"column:Master_SSL_CA_File"`
+	MasterSslCaPath            string `gorm:"column:Master_SSL_CA_Path"`
+	MasterSslCert              string `gorm:"column:Master_SSL_Cert"`
+	MasterSslCipher            string `gorm:"column:Master_SSL_Cipher"`
+	MasterSslKey               string `gorm:"column:Master_SSL_Key"`
+	SecondsBehindMaster        int    `gorm:"column:Seconds_Behind_Master"`
+	MasterSslVerifyServerCert  string `gorm:"column:Master_SSL_Verify_Server_Cert"`
+	LastIoErrno                int    `gorm:"column:Last_IO_Errno"`
+	LastIoError                string `gorm:"column:Last_IO_Error"`
+	LastSqlErrno               int    `gorm:"column:Last_SQL_Errno"`
+	LastSqlError               string `gorm:"column:Last_SQL_Error"`
+	ReplicateIgnoreServerIds   string `gorm:"column:Replicate_Ignore_Server_Ids"`
+	MasterServerId             uint64 `gorm:"column:Master_Server_Id"`
+	MasterUuid                 string `gorm:"column:Master_UUID"`
+	MasterInfoFile             string `gorm:"column:Master_Info_File"`
+	SqlDelay                   uint64 `gorm:"column:SQL_Delay"`
+	SqlRemainingDelay          string `gorm:"column:SQL_Remaining_Delay"`
+	SlaveSqlRunningState       string `gorm:"column:Slave_SQL_Running_State"`
+	MasterRetryCount           int    `gorm:"column:Master_Retry_Count"`
+	MasterBind                 string `gorm:"column:Master_Bind"`
+	LastIoErrorTimestamp       string `gorm:"column:Last_IO_Error_Timestamp"`
+	LastSqlErrorTimestamp      string `gorm:"column:Last_SQL_Error_Timestamp"`
+	MasterSslCrl               string `gorm:"column:Master_SSL_Crl"`
+	MasterSslCrlpath           string `gorm:"column:Master_SSL_Crlpath"`
+	RetrievedGtidSet           string `gorm:"column:Retrieved_Gtid_Set"`
+	ExecutedGtidSet            string `gorm:"column:Executed_Gtid_Set"`
+	AutoPosition               string `gorm:"column:Auto_Position"`
+	ReplicateWildParallelTable string `gorm:"column:Replicate_Wild_Parallel_Table"`
+}
+
+// GetRole get mysql role type
+func (ins *MySQLSwitch) GetRole() string {
+	return ins.Role
+}
+
+// ShowSwitchInstanceInfo show mysql instance's switch info
+func (ins *MySQLSwitch) ShowSwitchInstanceInfo() string {
+	str := fmt.Sprintf("<%s#%d IDC:%s Role:%s Status:%s Bzid:%s ClusterType:%s MachineType:%s>",
+		ins.Ip, ins.Port, ins.IDC, ins.Role, ins.Status, ins.App, ins.ClusterType,
+		ins.MetaType)
+	if len(ins.Slave) > 0 {
+		str = fmt.Sprintf("%s Switch from MASTER:<%s#%d> to SLAVE:<%s#%d>",
+			str, ins.Ip, ins.Port, ins.Slave[0].Ip, ins.Slave[0].Port)
+	}
+	return str
+}
+
+// CheckSwitch check slave before switch
+func (ins *MySQLSwitch) CheckSwitch() (bool, error) {
+	var err error
+	if ins.Role == constvar.MySQLSlave {
+		ins.ReportLogs(constvar.CHECK_SWITCH_INFO, "instance is slave, needn't check")
+		return false, nil
+	} else if ins.Role == constvar.MySQLRepeater {
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, "instance is repeater, dbha not support")
+		return false, err
+	} else if ins.Role == constvar.MySQLMaster {
+		log.Logger.Infof("info:{%s} is master", ins.ShowSwitchInstanceInfo())
+
+		log.Logger.Infof("check slave status. info{%s}", ins.ShowSwitchInstanceInfo())
+		if len(ins.Slave) == 0 {
+			ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, "no slave info found")
+			return false, err
+		}
+		ins.SetInfo(constvar.SWITCH_INFO_SLAVE_IP, ins.Slave[0].Ip)
+		ins.SetInfo(constvar.SWITCH_INFO_SLAVE_PORT, ins.Slave[0].Port)
+		err = ins.CheckSlaveStatus()
+		if err != nil {
+			ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, err.Error())
+			return false, err
+		}
+
+		log.Logger.Infof("start to switch. info{%s}", ins.ShowSwitchInstanceInfo())
+
+		if len(ins.Proxy) == 0 {
+			// available instance usual without proxy
+			log.Logger.Infof("without proxy! info:{%s}", ins.ShowSwitchInstanceInfo())
+			ins.ReportLogs(constvar.CHECK_SWITCH_INFO, "without proxy!")
+			return false, nil
+		}
+	} else {
+		err = fmt.Errorf("info:{%s} unknown role", ins.ShowSwitchInstanceInfo())
+		log.Logger.Error(err)
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, "instance unknown role")
+		return false, err
+	}
+
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, "mysql check switch ok")
+	return true, nil
+}
+
+// DoSwitch do switch from master to slave
+//  1. refresh all proxys's backend to 1.1.1.1
+//  2. reset slave
+//  3. get slave's consistent binlog pos
+//  4. refresh backend to alive(slave) mysql
+func (ins *MySQLSwitch) DoSwitch() error {
+	successFlag := true
+	ins.ReportLogs(constvar.SWITCH_INFO, "one phase:update all proxy's backend to 1.1.1.1 first")
+	for _, proxyIns := range ins.Proxy {
+		ins.ReportLogs(constvar.SWITCH_INFO, fmt.Sprintf("try to flush proxy:[%s:%d]'s backends to 1.1.1.1",
+			proxyIns.Ip, proxyIns.Port))
+		err := SwitchProxyBackendAddress(proxyIns.Ip, proxyIns.AdminPort, ins.ProxyUser,
+			ins.ProxyPass, "1.1.1.1", 3306)
+		if err != nil {
+			ins.ReportLogs(constvar.SWITCH_FAIL, fmt.Sprintf("flush proxy's backend failed: %s", err.Error()))
+			return fmt.Errorf("flush proxy's backend to 1.1.1.1 failed")
+		}
+		ins.ReportLogs(constvar.SWITCH_INFO, fmt.Sprintf("flush proxy:[%s:%d]'s backends to 1.1.1.1 success",
+			proxyIns.Ip, proxyIns.Port))
+	}
+	ins.ReportLogs(constvar.SWITCH_INFO, "all proxy flush backends to 1.1.1.1 success")
+
+	ins.ReportLogs(constvar.SWITCH_INFO, "try to reset slave")
+	binlogFile, binlogPosition, err := ins.ResetSlave()
+	if err != nil {
+		ins.ReportLogs(constvar.SWITCH_FAIL, fmt.Sprintf("reset slave failed:%s", err.Error()))
+		return fmt.Errorf("reset slave failed")
+	}
+	ins.ReportLogs(constvar.SWITCH_INFO, "reset slave success")
+	ins.Slave[0].BinlogFile = binlogFile
+	ins.Slave[0].BinlogPosition = strconv.Itoa(int(binlogPosition))
+
+	ins.ReportLogs(constvar.SWITCH_INFO, "two phase: update all proxy's backend to new master")
+	for _, proxyIns := range ins.Proxy {
+		ins.ReportLogs(constvar.SWITCH_INFO, fmt.Sprintf("try to flush proxy[%s:%d]'s backend to [%s:%d]",
+			proxyIns.Ip, proxyIns.Port, ins.Slave[0].Ip, ins.Slave[0].Port))
+		err = SwitchProxyBackendAddress(proxyIns.Ip, proxyIns.AdminPort, ins.ProxyUser,
+			ins.ProxyPass, ins.Slave[0].Ip, ins.Slave[0].Port)
+		if err != nil {
+			ins.ReportLogs(constvar.SWITCH_FAIL, fmt.Sprintf("flush proxy[%s:%d]'s backend to new master failed:%s",
+				proxyIns.Ip, proxyIns.Port, err.Error()))
+			successFlag = false
+		}
+		ins.ReportLogs(constvar.SWITCH_INFO, "flush proxy's backend to new master success")
+	}
+
+	if !successFlag {
+		return fmt.Errorf("not all proxy's backend switch to new master")
+	}
+
+	ins.ReportLogs(constvar.SWITCH_INFO, "all proxy flush backends to new master success")
+	return nil
+}
+
+// RollBack do switch rollback
+func (ins *MySQLSwitch) RollBack() error {
+	return nil
+}
+
+// UpdateMetaInfo swap master, slave 's meta info in cmdb
+func (ins *MySQLSwitch) UpdateMetaInfo() error {
+	// TODO: default 1 master 1 slave, for support multi slave, slave need to add switch_weight
+	// for chose a slave to failover.
+	err := ins.CmDBClient.SwapMySQLRole(ins.Ip, ins.Port,
+		ins.Slave[0].Ip, ins.Slave[0].Port)
+	if err != nil {
+		updateErrLog := fmt.Sprintf("swap mysql role failed. err:%s", err.Error())
+		log.Logger.Errorf("%s, info:{%s}", updateErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.UPDATEMETA_FAIL, updateErrLog)
+		return err
+	}
+	ins.ReportLogs(constvar.UPDATEMETA_INFO, "update meta info success")
+	return nil
+}
+
+// CheckSlaveStatus check whether slave satisfy to switch
+func (ins *MySQLSwitch) CheckSlaveStatus() error {
+	// check_slave_status
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, "try to check slave status info.")
+	if err := ins.CheckSlaveSlow(); err != nil {
+		return fmt.Errorf("slave delay too much. err:%s", err.Error())
+	}
+
+	needCheck, err := ins.FindUsefulDatabase()
+	if err != nil {
+		log.Logger.Errorf("found user-created database failed. err:%s, info:{%s}", err.Error(),
+			ins.ShowSwitchInstanceInfo())
+	}
+
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, "try to check slave checksum info.")
+	checksumCnt, checksumFail, slaveDelay, timeDelay, err := ins.GetMySQLSlaveCheckSum()
+	if err != nil {
+		log.Logger.Errorf("check slave checksum info failed. err:%s, info:{%s}", err.Error(),
+			ins.ShowSwitchInstanceInfo())
+		return err
+	}
+
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("checksumCnt:%d, checksumFail:%d, slaveDelay:%d, timeDelay:%d",
+		checksumCnt, checksumFail, slaveDelay, timeDelay))
+
+	if needCheck {
+		if ins.Status == constvar.AVAILABLE {
+			checksumCnt = 1
+			checksumFail = 0
+			slaveDelay = 0
+			timeDelay = 0
+			ins.ReportLogs(constvar.SWITCH_INFO, "instance is available, skip check delay and checksum")
+		}
+
+		if checksumCnt == 0 {
+			return fmt.Errorf("none checksum done on this db")
+		}
+
+		log.Logger.Debugf("checksum have done on slave. info:{%s}", ins.ShowSwitchInstanceInfo())
+
+		if checksumFail > ins.AllowedChecksumMaxOffset {
+			return fmt.Errorf("too many fail on tables checksum(%d > %d)", checksumFail,
+				ins.AllowedChecksumMaxOffset)
+		}
+		ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("checksum failedCnt[%d] in allowed range[%d]",
+			checksumFail, ins.AllowedChecksumMaxOffset))
+
+	} else {
+		ins.ReportLogs(constvar.CHECK_SWITCH_INFO, "none user-created database, skip check checksum")
+		return nil
+	}
+
+	if slaveDelay >= ins.AllowedSlaveDelayMax {
+		return fmt.Errorf("SQL_Thread delay on slave too large than allowed range(%d >= %d)", slaveDelay,
+			ins.AllowedSlaveDelayMax)
+	}
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("SQL_THread delay [%d] in allowed range[%d]",
+		slaveDelay, ins.AllowedSlaveDelayMax))
+
+	if timeDelay >= ins.AllowedTimeDelayMax {
+		return fmt.Errorf("IO_Thread delay on slave too large than master(%d >= %d)", timeDelay,
+			ins.AllowedTimeDelayMax)
+	}
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("IO_Thread delay [%d] in allowed range[%d]",
+		timeDelay, ins.AllowedTimeDelayMax))
+
+	return nil
+}
+
+// GetMySQLSlaveCheckSum return value:checksum, checktime, slave_delay, time_delay
+func (ins *MySQLSwitch) GetMySQLSlaveCheckSum() (int, int, int, int, error) {
+	var (
+		checksumCnt, checksumFailCnt int
+		delayInfo                    DelayInfo
+	)
+	ip := ins.Slave[0].Ip
+	port := ins.Slave[0].Port
+	connParam := fmt.Sprintf("%s:%s@(%s:%d)/%s", ins.MySQLUser, ins.MySQLPass,
+		ip, port, "infodba_schema")
+	db, err := gorm.Open(mysql.Open(connParam), &gorm.Config{
+		Logger: log.GormLogger,
+	})
+	if err != nil {
+		log.Logger.Errorf("open mysql failed. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return 0, 0, 0, 0, err
+	}
+	defer func() {
+		con, _ := db.DB()
+		if err = con.Close(); err != nil {
+			log.Logger.Warnf("close connect[%s#%d] failed:%s", ip, port, err.Error())
+		}
+	}()
+
+	slaveStatus := SlaveStatus{}
+	err = db.Raw("show slave status").Scan(&slaveStatus).Error
+	if err != nil {
+		log.Logger.Errorf("show slave status failed. err:%s", err.Error())
+		return 0, 0, 0, 0, err
+	}
+	log.Logger.Debugf("slave status info:%v", slaveStatus)
+
+	err = db.Raw(constvar.CheckSumSql).Scan(&checksumCnt).Error
+	if err != nil {
+		log.Logger.Errorf("mysql get checksumCnt failed. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return 0, 0, 0, 0, err
+	}
+
+	err = db.Raw(constvar.CheckSumFailSql).Scan(&checksumFailCnt).Error
+	if err != nil {
+		log.Logger.Errorf("mysql get checksumFailCnt failed. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return 0, 0, 0, 0, err
+	}
+
+	err = db.Raw(constvar.CheckDelaySql, slaveStatus.MasterServerId).Scan(&delayInfo).Error
+	if err != nil {
+		log.Logger.Errorf("mysql get delay info failed. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return 0, 0, 0, 0, err
+	}
+
+	return checksumCnt, checksumFailCnt, int(delayInfo.SlaveDelay), int(delayInfo.TimeDelay), nil
+}
+
+// FindUsefulDatabase found user created databases exclude system database
+// return val:
+//
+//	true: found
+//	false: not found
+func (ins *MySQLSwitch) FindUsefulDatabase() (bool, error) {
+	var systemDbs = map[string]bool{
+		"mysql":              true,
+		"information_schema": true,
+		"performance_schema": true,
+		"test":               true,
+		"infodba_schema":     true,
+		"sys":                true,
+	}
+	ip := ins.Slave[0].Ip
+	port := ins.Slave[0].Port
+	connParam := fmt.Sprintf("%s:%s@(%s:%d)/%s", ins.MySQLUser, ins.MySQLPass,
+		ip, port, "infodba_schema")
+	db, err := gorm.Open(mysql.Open(connParam), &gorm.Config{
+		Logger: log.GormLogger,
+	})
+	if err != nil {
+		log.Logger.Errorf("open mysql failed. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return false, err
+	}
+	var databases []string
+
+	showDatabaseSql := "show databases"
+	err = db.Raw(showDatabaseSql).Scan(&databases).Error
+	if err != nil {
+		log.Logger.Errorf("show databases faled. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return false, err
+	}
+
+	for _, database := range databases {
+		if _, ok := systemDbs[database]; !ok {
+			return true, nil
+		}
+	}
+	log.Logger.Infof("no user-created database found")
+
+	return false, nil
+}
+
+// CheckSlaveSlow check whether slave replication delay
+func (ins *MySQLSwitch) CheckSlaveSlow() error {
+	ip := ins.Slave[0].Ip
+	port := ins.Slave[0].Port
+	connParam := fmt.Sprintf("%s:%s@(%s:%d)/%s", ins.MySQLUser, ins.MySQLPass, ip, port, "infodba_schema")
+	db, err := gorm.Open(mysql.Open(connParam), &gorm.Config{
+		Logger: log.GormLogger,
+	})
+	if err != nil {
+		log.Logger.Errorf("open mysql failed. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return err
+	}
+	defer func() {
+		con, _ := db.DB()
+		if err = con.Close(); err != nil {
+			log.Logger.Warnf("close connect[%s#%d] failed:%s", ip, port, err.Error())
+		}
+	}()
+
+	var maxBinlogSize MySQLVariableInfo
+	err = db.Raw("show variables like 'max_binlog_size'").Scan(&maxBinlogSize).Error
+	if err != nil {
+		log.Logger.Errorf("get mas_binlog_size failed. ip:%s, port:%d, err:%s", ip, port, err.Error())
+		return err
+	}
+
+	binlogSizeMByte := maxBinlogSize.VariableValue / (1024 * 1024)
+	log.Logger.Infof("the slave max_binlog_size value is %d M!", binlogSizeMByte)
+
+	status, err := GetSlaveStatus(db)
+	if err != nil {
+		log.Logger.Errorf("get slave status failed. err:%s", err.Error())
+		return err
+	}
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("Relay_Master_Log_File_Index:%d, Exec_Master_Log_Pos:%d",
+		status.RelayMasterLogFileIndex, status.ReadMasterLogPos))
+
+	execSlowKBytes := binlogSizeMByte*1024*uint64(status.MasterLogFileIndex-status.RelayMasterLogFileIndex) -
+		status.ExecMasterLogPos/1024 + status.ReadMasterLogPos/1024
+
+	loop := 10
+	if execSlowKBytes > uint64(ins.ExecSlowKBytes) {
+		ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("slave delay kbytes[%d] large than allowed[%d],"+
+			"try to loop wait", execSlowKBytes, ins.ExecSlowKBytes))
+		var i int
+		for i = 0; i < loop; i++ {
+			time.Sleep(3 * time.Second)
+			tmpStatus, err := GetSlaveStatus(db)
+			if err != nil {
+				log.Logger.Errorf("get slave status failed. err:%s", err.Error())
+				return err
+			}
+			execSlowKBytes = binlogSizeMByte*1024*
+				uint64(tmpStatus.MasterLogFileIndex-tmpStatus.RelayMasterLogFileIndex) -
+				tmpStatus.ExecMasterLogPos/1024 + tmpStatus.ReadMasterLogPos/1024
+			if execSlowKBytes <= uint64(ins.ExecSlowKBytes) {
+				// todo: for GTID
+				break
+			}
+			log.Logger.Warnf("loop[%d],slave slower too much: Execute %dK,default value is:%d",
+				i, execSlowKBytes, ins.ExecSlowKBytes)
+		}
+		if i == loop {
+			return fmt.Errorf("after loop wait, slave still slower too much: Execute %dK, default value is:%d",
+				execSlowKBytes, ins.ExecSlowKBytes)
+		}
+	}
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("check slave[%s:%d] status success", ip, port))
+	return nil
+}
+
+// GetSlaveStatus get slave status info
+func GetSlaveStatus(db *gorm.DB) (BinlogStatus, error) {
+	slaveStatus := SlaveStatus{}
+	ret := BinlogStatus{}
+	err := db.Raw("show slave status").Scan(&slaveStatus).Error
+	if err != nil {
+		log.Logger.Errorf("show slave status failed. err:%s", err.Error())
+		return BinlogStatus{}, err
+	}
+
+	if slaveStatus.SlaveIoRunning != "Yes" || slaveStatus.SlaveSqlRunning != "Yes" {
+		return BinlogStatus{}, fmt.Errorf("slave's SQL_thread[%s], IO_Thread[%s] is abnormal",
+			slaveStatus.SlaveSqlRunning, slaveStatus.SlaveIoRunning)
+	}
+
+	if !strings.Contains(slaveStatus.MasterLogFile, ".") {
+		log.Logger.Errorf("can't find master log file. master_log_file:%s",
+			slaveStatus.MasterLogFile)
+		return BinlogStatus{}, fmt.Errorf("can't find master log file")
+	}
+
+	ret.MasterLogFileIndex, err = strconv.Atoi(strings.Split(slaveStatus.MasterLogFile, ".")[1])
+	if err != nil {
+		log.Logger.Errorf("split master log file failed. err:%s, master_log_file:%s", err.Error(),
+			slaveStatus.MasterLogFile)
+	}
+
+	if !strings.Contains(slaveStatus.RelayMasterLogFile, ".") {
+		log.Logger.Errorf("can't find master log file. relay_master_log_file:%s",
+			slaveStatus.RelayMasterLogFile)
+		return BinlogStatus{}, fmt.Errorf("can't find master log file")
+	}
+
+	ret.RelayMasterLogFileIndex, err = strconv.Atoi(strings.Split(slaveStatus.RelayMasterLogFile, ".")[1])
+	if err != nil {
+		log.Logger.Errorf("split master log file failed. relay_master_log_file:%s",
+			slaveStatus.RelayMasterLogFile)
+		return BinlogStatus{}, err
+	}
+
+	ret.ReadMasterLogPos = slaveStatus.ReadMasterLogPos
+	ret.ExecMasterLogPos = slaveStatus.ExecMasterLogPos
+	// ret.RetrievedGtidSet = slaveStatus.RetrievedGtidSet
+	// ret.ExecutedGtidSet = slaveStatus.ExecutedGtidSet
+	// ret.MasterUuid = slaveStatus.MasterUuid
+	return ret, nil
+}
+
+// MasterStatus master status struct
+type MasterStatus struct {
+	File            string
+	Position        uint64
+	BinlogDoDB      string
+	BinlogIgnoreDB  string
+	ExecutedGtidSet string
+}
+
+// ResetSlave do reset slave
+func (ins *MySQLSwitch) ResetSlave() (string, uint64, error) {
+	slaveIp := ins.Slave[0].Ip
+	slavePort := ins.Slave[0].Port
+	log.Logger.Infof("gonna RESET SLAVE on %s:%d", slaveIp, slavePort)
+
+	connParam := fmt.Sprintf("%s:%s@(%s:%d)/%s", ins.MySQLUser, ins.MySQLPass, slaveIp, slavePort, "infodba_schema")
+	db, err := gorm.Open(mysql.Open(connParam), &gorm.Config{
+		Logger: log.GormLogger,
+	})
+	if err != nil {
+		log.Logger.Errorf("open mysql failed. ip:%s, port:%d, err:%s", slaveIp, slavePort, err.Error())
+		return "", 0, err
+	}
+
+	stopSql := "stop slave"
+	masterSql := "show master status"
+	resetSql := "reset slave /*!50516 all */"
+
+	err = db.Exec(stopSql).Error
+	if err != nil {
+		return "", 0, fmt.Errorf("stop slave failed. err:%s", err.Error())
+	}
+	log.Logger.Infof("execute %s success", stopSql)
+
+	var masterStatus MasterStatus
+	err = db.Raw(masterSql).Scan(&masterStatus).Error
+	if err != nil {
+		return "", 0, fmt.Errorf("show master status failed, err:%s", err.Error())
+	}
+	log.Logger.Infof("get new master binlog info succeed. binlog_file:%s, binlog_pos:%d", masterStatus.File,
+		masterStatus.Position)
+
+	err = db.Exec(resetSql).Error
+	if err != nil {
+		return "", 0, fmt.Errorf("reset slave failed. err:%s", err.Error())
+	}
+	log.Logger.Infof("executed %s on %s:%d successd", resetSql, slaveIp, slavePort)
+
+	return masterStatus.File, masterStatus.Position, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/mysql/mysql.go b/dbm-services/common/dbha/ha-module/dbmodule/mysql/mysql.go
new file mode 100644
index 0000000000..b65d989eef
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/mysql/mysql.go
@@ -0,0 +1,2 @@
+// Package mysql TODO
+package mysql
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/lru_cache.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/lru_cache.go
new file mode 100644
index 0000000000..06b06678b2
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/lru_cache.go
@@ -0,0 +1,186 @@
+package redis
+
+import (
+	"container/list"
+	"sync"
+	"time"
+)
+
+// DEFAULT_LRU_CACHE_SIZE TODO
+const DEFAULT_LRU_CACHE_SIZE int = 1024
+
+// Cache the interface of cache
+type Cache interface {
+	Add(key, value interface{}, expiresTime time.Duration)
+	Get(key interface{}) (interface{}, bool)
+	GetWithTTL(key interface{}) (interface{}, time.Duration, bool)
+	Remove(key interface{})
+	CacheLen() int
+}
+
+type cache struct {
+	size  int
+	lru   *list.List
+	items map[interface{}]*list.Element
+}
+
+// 每一个具体的item值
+type entry struct {
+	key       interface{}
+	value     interface{}
+	expiresAt time.Time
+}
+
+// NewUnlocked 创建一个非阻塞的cache,可以根据需求进行创建,size<=0,会强制设置为DEFAULT_LRU_CACHE_SIZE
+func NewUnlocked(size int) Cache {
+
+	// 读与
+	if size <= 0 {
+		size = DEFAULT_LRU_CACHE_SIZE
+	}
+
+	return &cache{
+		size:  size,
+		lru:   list.New(),
+		items: make(map[interface{}]*list.Element),
+	}
+}
+
+// Add 一般添加时间的比较少,都比较倾向于使用时间点
+func (c *cache) Add(key, value interface{}, expireTime time.Duration) {
+
+	expiresAt := time.Now().Add(expireTime)
+
+	if ent, ok := c.items[key]; ok {
+		// update existing entry
+		c.lru.MoveToFront(ent)
+		v := ent.Value.(*entry)
+		v.value = value
+		v.expiresAt = expiresAt
+		return
+	}
+
+	// add new entry
+	c.items[key] = c.lru.PushFront(&entry{
+		key:       key,
+		value:     value,
+		expiresAt: expiresAt,
+	})
+
+	// remove oldest
+	if c.lru.Len() > c.size {
+		ent := c.lru.Back()
+		if ent != nil {
+			c.removeElement(ent)
+		}
+	}
+}
+
+// Get get value by key from cache
+func (c *cache) Get(key interface{}) (interface{}, bool) {
+	if ent, ok := c.items[key]; ok {
+		v := ent.Value.(*entry)
+
+		if v.expiresAt.After(time.Now()) {
+			// found good entry
+			c.lru.MoveToFront(ent)
+			return v.value, true
+		}
+
+		// ttl expired
+		c.removeElement(ent)
+	}
+	return nil, false
+}
+
+// GetWithTTL the get api and support ttl
+func (c *cache) GetWithTTL(key interface{}) (interface{}, time.Duration, bool) {
+	if ent, ok := c.items[key]; ok {
+		v := ent.Value.(*entry)
+
+		if v.expiresAt.After(time.Now()) {
+			// found good entry
+			c.lru.MoveToFront(ent)
+			return v.value, time.Until(v.expiresAt), true
+		}
+
+		// ttl expired
+		c.removeElement(ent)
+	}
+	return nil, 0, false
+}
+
+// Remove remove item from cache by key
+func (c *cache) Remove(key interface{}) {
+	if ent, ok := c.items[key]; ok {
+		c.removeElement(ent)
+	}
+}
+
+// CacheLen get the length of cache
+func (c *cache) CacheLen() int {
+	return c.lru.Len()
+}
+
+func (c *cache) removeElement(e *list.Element) {
+	c.lru.Remove(e)
+	kv := e.Value.(*entry)
+	delete(c.items, kv.key)
+}
+
+type lockedCache struct {
+	c cache
+	m sync.Mutex
+}
+
+// NewLocked create new instance of lockedCache
+func NewLocked(size int) Cache {
+
+	if size <= 0 {
+		size = DEFAULT_LRU_CACHE_SIZE
+	}
+	return &lockedCache{
+		c: cache{
+			size:  size,
+			lru:   list.New(),
+			items: make(map[interface{}]*list.Element),
+		},
+	}
+}
+
+// Add add kv to lockedCache
+func (l *lockedCache) Add(key, value interface{}, expireTime time.Duration) {
+	l.m.Lock()
+	l.c.Add(key, value, expireTime)
+	l.m.Unlock()
+}
+
+// Get get value from lockedCache by key
+func (l *lockedCache) Get(key interface{}) (interface{}, bool) {
+	l.m.Lock()
+	v, f := l.c.Get(key)
+	l.m.Unlock()
+	return v, f
+}
+
+// GetWithTTL the get api support ttl
+func (l *lockedCache) GetWithTTL(key interface{}) (interface{}, time.Duration, bool) {
+	l.m.Lock()
+	defer l.m.Unlock()
+	return l.c.GetWithTTL(key)
+}
+
+// Remove remove item from lockedCache by key
+func (l *lockedCache) Remove(key interface{}) {
+	l.m.Lock()
+	l.c.Remove(key)
+	l.m.Unlock()
+}
+
+// CacheLen get the length of lockedCache
+func (l *lockedCache) CacheLen() int {
+	l.m.Lock()
+	c := l.c.CacheLen()
+	l.m.Unlock()
+	return c
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_callback.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_callback.go
new file mode 100644
index 0000000000..d71a34753f
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_callback.go
@@ -0,0 +1,93 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"encoding/json"
+)
+
+// NewPredixyInstanceByCmdb Agent通过CMDB获取的信息来生成需要探测的实例
+func NewPredixyInstanceByCmdb(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseDetect, error) {
+	var (
+		err          error
+		unmarshalIns []*RedisDetectInfoFromCmDB
+		ret          []dbutil.DataBaseDetect
+	)
+
+	unmarshalIns, err = UnMarshalRedisInstanceByCmdb(
+		instances, constvar.TendisplusClusterType,
+		constvar.PredixyMetaType)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, uIns := range unmarshalIns {
+		ret = append(ret, NewPredixyDetectInstance(uIns, conf))
+	}
+
+	return ret, err
+}
+
+// DeserializePredixy 反序列化从Agent上报上来的故障实例
+func DeserializePredixy(jsonInfo []byte, conf *config.Config) (dbutil.DataBaseDetect, error) {
+	response := RedisDetectResponse{}
+	err := json.Unmarshal(jsonInfo, &response)
+	if err != nil {
+		log.Logger.Errorf("json unmarshal failed. jsoninfo:\n%s\n, err:%s",
+			string(jsonInfo), err.Error())
+		return nil, err
+	}
+	ret := NewPredixyDetectInstanceFromRsp(&response, conf)
+	return ret, nil
+}
+
+// NewPredixySwitchInstance TODO
+func NewPredixySwitchInstance(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseSwitch, error) {
+	var err error
+	var ret []dbutil.DataBaseSwitch
+	for _, v := range instances {
+		swIns, err := CreateRedisProxySwitchInfo(v, conf)
+		if err != nil {
+			log.Logger.Errorf("parse predixy switch instance failed,err:%s",
+				err.Error())
+			continue
+		}
+
+		if swIns.MetaType != constvar.PredixyMetaType {
+			log.Logger.Errorf("Create predixy switch while the metaType[%s] != %s",
+				swIns.MetaType, constvar.PredixyMetaType)
+			continue
+		}
+
+		if swIns.CheckFetchEntryDetail() {
+			edErr := swIns.GetEntryDetailInfo()
+			if edErr != nil {
+				log.Logger.Errorf("GetEntryDetail failed in NewPredixySwitch,err:%s",
+					edErr.Error())
+			}
+		}
+
+		pw := PredixySwitch{
+			RedisProxySwitchInfo: *swIns,
+		}
+
+		passwd, err := GetInstancePassByCluster(
+			constvar.Predixy, pw.Cluster, conf,
+		)
+		if err != nil {
+			log.Logger.Errorf("get predixy switch passwd failed,err:%s,info:%s",
+				err.Error(), pw.ShowSwitchInstanceInfo())
+		} else {
+			log.Logger.Infof("get predixy switch passwd[%s]", passwd)
+			pw.Pass = passwd
+		}
+		ret = append(ret, &pw)
+	}
+
+	return ret, err
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_detect.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_detect.go
new file mode 100644
index 0000000000..fd9ed243e5
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_detect.go
@@ -0,0 +1,130 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// PredixyDetectInstance TODO
+type PredixyDetectInstance struct {
+	RedisDetectBase
+}
+
+// Detection TODO
+func (ins *PredixyDetectInstance) Detection() error {
+	err := ins.DoPredixyDetection()
+	if err == nil && ins.Status == constvar.DBCheckSuccess {
+		log.Logger.Debugf("Predixy check ok and return")
+		return nil
+	}
+
+	if err != nil && ins.Status == constvar.AUTHCheckFailed {
+		log.Logger.Errorf("Predixy auth failed,pass:%s,status:%s",
+			ins.Pass, ins.Status)
+		return err
+	}
+
+	sshErr := ins.CheckSSH()
+	if sshErr != nil {
+		if util.CheckSSHErrIsAuthFail(sshErr) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("Predixy check ssh auth failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		} else {
+			ins.Status = constvar.SSHCheckFailed
+			log.Logger.Errorf("Predixy check ssh failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		}
+		return sshErr
+	} else {
+		log.Logger.Debugf("Predixy check ssh success. ip:%s, port:%d, app:%s",
+			ins.Ip, ins.Port, ins.App)
+		ins.Status = constvar.SSHCheckSuccess
+		return nil
+	}
+}
+
+// DoPredixyDetection do predixy detect
+func (ins *PredixyDetectInstance) DoPredixyDetection() error {
+	r := &client.RedisClient{}
+	addr := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+	r.Init(addr, ins.Pass, ins.Timeout, 0)
+	defer r.Close()
+
+	rsp, err := r.Ping()
+	if err != nil {
+		predixyErr := fmt.Errorf("do predixy cmd err,err:%s", err.Error())
+		if util.CheckRedisErrIsAuthFail(err) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("predixy detect auth failed,err:%s,status:%s",
+				predixyErr.Error(), ins.Status)
+		} else {
+			ins.Status = constvar.DBCheckFailed
+			log.Logger.Errorf("predixy detect failed,err:%s,status:%s",
+				predixyErr.Error(), ins.Status)
+		}
+		return predixyErr
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		predixyErr := fmt.Errorf("predixy ping response type is not string")
+		log.Logger.Errorf(predixyErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return predixyErr
+	}
+
+	if strings.Contains(rspInfo, "PONG") || strings.Contains(rspInfo, "pong") {
+		ins.Status = constvar.DBCheckSuccess
+		return nil
+	} else {
+		predixyErr := fmt.Errorf("do predixy cmd err,rsp;%s", rspInfo)
+		log.Logger.Errorf(predixyErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return predixyErr
+	}
+}
+
+// Serialization TODO
+func (ins *PredixyDetectInstance) Serialization() ([]byte, error) {
+	response := RedisDetectResponse{
+		BaseDetectDBResponse: ins.NewDBResponse(),
+		Pass:                 ins.Pass,
+	}
+
+	resByte, err := json.Marshal(&response)
+	if err != nil {
+		log.Logger.Errorf("Predixy serialization failed. err:%s", err.Error())
+		return []byte{}, err
+	}
+	return resByte, nil
+}
+
+// ShowDetectionInfo TODO
+func (ins *PredixyDetectInstance) ShowDetectionInfo() string {
+	str := fmt.Sprintf("ip:%s, port:%d, status:%s, DBType:%s",
+		ins.Ip, ins.Port, ins.Status, ins.DBType)
+	return str
+}
+
+// NewPredixyDetectInstance TODO
+func NewPredixyDetectInstance(ins *RedisDetectInfoFromCmDB,
+	conf *config.Config) *PredixyDetectInstance {
+	return &PredixyDetectInstance{
+		RedisDetectBase: *GetDetectBaseByInfo(ins, constvar.Predixy, conf),
+	}
+}
+
+// NewPredixyDetectInstanceFromRsp TODO
+func NewPredixyDetectInstanceFromRsp(ins *RedisDetectResponse,
+	conf *config.Config) *PredixyDetectInstance {
+	return &PredixyDetectInstance{
+		RedisDetectBase: *GetDetectBaseByRsp(ins, constvar.Predixy, conf),
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_switch.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_switch.go
new file mode 100644
index 0000000000..325159b554
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/predixy_switch.go
@@ -0,0 +1,69 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+)
+
+// PredixySwitch TODO
+type PredixySwitch struct {
+	RedisProxySwitchInfo
+}
+
+// CheckSwitch TODO
+func (ins *PredixySwitch) CheckSwitch() (bool, error) {
+	return true, nil
+}
+
+// DoSwitch TODO
+func (ins *PredixySwitch) DoSwitch() error {
+	ins.ReportLogs(constvar.SWITCH_INFO,
+		fmt.Sprintf("handle predixy switch[%s:%d]", ins.Ip, ins.Port))
+	err := ins.KickOffDns()
+	cErr := ins.KickOffClb()
+	pErr := ins.KickOffPolaris()
+	if err != nil {
+		predixyErrLog := fmt.Sprintf("Predixy kick dns failed,err:%s", err.Error())
+		log.Logger.Errorf("%s info:%s", predixyErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, predixyErrLog)
+		return err
+	}
+	if cErr != nil {
+		predixyErrLog := fmt.Sprintf("Predixy kick clb failed,err:%s", cErr.Error())
+		log.Logger.Errorf("%s info:%s", predixyErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, predixyErrLog)
+		return cErr
+	}
+	if pErr != nil {
+		predixyErrLog := fmt.Sprintf("Predixy kick polaris failed,err:%s", pErr.Error())
+		log.Logger.Errorf("%s info:%s", predixyErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, predixyErrLog)
+		return pErr
+	}
+
+	succLog := fmt.Sprintf("Predixy do switch ok,dns[%t] clb[%t] polaris[%t]",
+		ins.ApiGw.DNSFlag, ins.ApiGw.CLBFlag, ins.ApiGw.PolarisFlag)
+	ins.ReportLogs(constvar.SWITCH_INFO, succLog)
+	return nil
+}
+
+// RollBack TODO
+func (ins *PredixySwitch) RollBack() error {
+	return nil
+}
+
+// UpdateMetaInfo TODO
+func (ins *PredixySwitch) UpdateMetaInfo() error {
+	return nil
+}
+
+// ShowSwitchInstanceInfo TODO
+func (ins *PredixySwitch) ShowSwitchInstanceInfo() string {
+	format := `<%s#%d IDC:%s Status:%s App:%s ClusterType:%s MachineType:%s Cluster:%s> switch`
+	str := fmt.Sprintf(
+		format, ins.Ip, ins.Port, ins.IDC, ins.Status, ins.App,
+		ins.ClusterType, ins.MetaType, ins.Cluster,
+	)
+	return str
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/redis.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis.go
new file mode 100644
index 0000000000..d4f2ff9288
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis.go
@@ -0,0 +1,2 @@
+// Package redis TODO
+package redis
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_base.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_base.go
new file mode 100644
index 0000000000..54801b571c
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_base.go
@@ -0,0 +1,832 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/types"
+	"dbm-services/common/dbha/ha-module/util"
+	"fmt"
+	"math/rand"
+	"strconv"
+	"time"
+)
+
+// PolarisInfo TODO
+type PolarisInfo struct {
+	Namespace string
+	Service   string
+	Token     string
+}
+
+// CLBInfo TODO
+type CLBInfo struct {
+	Region        string
+	LoadBalanceId string
+	ListenId      string
+}
+
+// DNSInfo TODO
+type DNSInfo struct {
+	Domain string
+}
+
+// GWInfo TODO
+type GWInfo struct {
+	PolarisFlag bool
+	Polaris     []PolarisInfo
+	CLBFlag     bool
+	CLB         []CLBInfo
+	DNSFlag     bool
+	DNS         []DNSInfo
+}
+
+// RedisSwitchInfo TODO
+type RedisSwitchInfo struct {
+	dbutil.BaseSwitch
+	AdminPort       int
+	ApiGw           GWInfo
+	DnsClient       *client.NameServiceClient
+	PolarisGWClient *client.NameServiceClient
+	MasterConf      string
+	SlaveConf       string
+	Proxy           []dbutil.ProxyInfo
+	Slave           []RedisSlaveInfo
+	Pass            string
+	Timeout         int
+}
+
+// RedisSlaveInfo TODO
+type RedisSlaveInfo struct {
+	Ip   string `json:"ip"`
+	Port int    `json:"port"`
+}
+
+// RedisDetectBase TODO
+type RedisDetectBase struct {
+	dbutil.BaseDetectDB
+	Pass    string
+	Timeout int
+}
+
+// RedisDetectResponse TODO
+type RedisDetectResponse struct {
+	dbutil.BaseDetectDBResponse
+	Pass string `json:"pass"`
+}
+
+// RedisDetectInfoFromCmDB TODO
+type RedisDetectInfoFromCmDB struct {
+	Ip          string
+	Port        int
+	App         string
+	ClusterType string
+	MetaType    string
+	Pass        string
+	Cluster     string
+}
+
+// RedisProxySwitchInfo TODO
+type RedisProxySwitchInfo struct {
+	dbutil.BaseSwitch
+	AdminPort       int
+	ApiGw           GWInfo
+	DnsClient       *client.NameServiceClient
+	PolarisGWClient *client.NameServiceClient
+	Pass            string
+}
+
+// CheckSSH redis do ssh check
+func (ins *RedisDetectBase) CheckSSH() error {
+	touchFile := fmt.Sprintf("%s_%s_%d", ins.SshInfo.Dest, util.LocalIp, ins.Port)
+
+	touchStr := fmt.Sprintf("touch %s && if [ -d \"/data1/dbha\" ]; then touch /data1/dbha/%s ; fi "+
+		"&& if [ -d \"/data/dbha\" ]; then touch /data/dbha/%s ; fi", touchFile, touchFile, touchFile)
+
+	if err := ins.DoSSH(touchStr); err != nil {
+		log.Logger.Errorf("RedisDetection do ssh failed. err:%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// ShowSwitchInstanceInfo TODO
+func (ins *RedisProxySwitchInfo) ShowSwitchInstanceInfo() string {
+	str := fmt.Sprintf("ip:%s, port:%d, IDC:%s, status:%s, app:%s, cluster_type:%s, machine_type:%s",
+		ins.Ip, ins.Port, ins.IDC, ins.Status, ins.App, ins.ClusterType, ins.MetaType)
+	return str
+}
+
+// KickOffDns TODO
+func (ins *RedisProxySwitchInfo) KickOffDns() error {
+	if !ins.ApiGw.DNSFlag {
+		log.Logger.Infof("no need kickDNS,info:%s",
+			ins.ShowSwitchInstanceInfo())
+		return nil
+	}
+
+	for _, dnsInfo := range ins.ApiGw.DNS {
+		ipInfos, err := ins.DnsClient.GetDomainInfoByDomain(dnsInfo.Domain)
+		if err != nil {
+			log.Logger.Errorf("get domain info by domain name failed. err:%s, info:{%s}",
+				err.Error(), ins.ShowSwitchInstanceInfo())
+			return err
+		}
+
+		if len(ipInfos) == 0 {
+			log.Logger.Errorf("domain name: %s without ip. info:{%s}",
+				dnsInfo.Domain, ins.ShowSwitchInstanceInfo())
+			return fmt.Errorf("domain name: %s without ip", dnsInfo.Domain)
+		} else if len(ipInfos) == 1 {
+			log.Logger.Warnf("domain name: %s only one ip. so we skip it. info:{%s}",
+				dnsInfo.Domain, ins.ShowSwitchInstanceInfo())
+		} else {
+			err = ins.DnsClient.DeleteDomain(
+				dnsInfo.Domain, ins.App, ins.Ip, ins.Port,
+			)
+			if err != nil {
+				log.Logger.Errorf("delete domain %s failed. err:%s, info:{%s}",
+					dnsInfo.Domain, err.Error(), ins.ShowSwitchInstanceInfo())
+				return err
+			}
+			log.Logger.Infof("delete domain %s success. info:{%s}",
+				dnsInfo.Domain, ins.ShowSwitchInstanceInfo())
+		}
+	}
+	return nil
+}
+
+// KickOffClb TODO
+func (ins *RedisProxySwitchInfo) KickOffClb() error {
+	if !ins.ApiGw.CLBFlag {
+		log.Logger.Infof("switch proxy no need to kickoff CLB,info:%s",
+			ins.ShowSwitchInstanceInfo())
+		return nil
+	}
+
+	for _, clbInfo := range ins.ApiGw.CLB {
+		ips, err := ins.PolarisGWClient.ClbGetTargets(
+			clbInfo.Region, clbInfo.LoadBalanceId, clbInfo.ListenId,
+		)
+		if err != nil {
+			log.Logger.Errorf("call ClbGetTargets failed,info:%s",
+				ins.ShowSwitchInstanceInfo())
+			return err
+		}
+
+		addr := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+		if len(ips) > 1 {
+			err := ins.PolarisGWClient.ClbDeRegister(
+				clbInfo.Region, clbInfo.LoadBalanceId, clbInfo.ListenId, addr,
+			)
+			if err != nil {
+				log.Logger.Errorf("Kickoff %s from clb failed,info:%s",
+					addr, ins.ShowSwitchInstanceInfo())
+				return err
+			}
+		} else {
+			log.Logger.Infof("CLB only left one ip, and no need to kickoff,info:%s",
+				ins.ShowSwitchInstanceInfo())
+		}
+	}
+	return nil
+}
+
+// KickOffPolaris TODO
+func (ins *RedisProxySwitchInfo) KickOffPolaris() error {
+	if !ins.ApiGw.PolarisFlag {
+		log.Logger.Infof("switch proxy no need to kickoff Polaris,info:%s",
+			ins.ShowSwitchInstanceInfo())
+		return nil
+	}
+
+	for _, pinfo := range ins.ApiGw.Polaris {
+		ips, err := ins.PolarisGWClient.GetPolarisTargets(pinfo.Service)
+		if err != nil {
+			log.Logger.Errorf("call GetPolarisTargets failed,info:%s,err:%s",
+				ins.ShowSwitchInstanceInfo(), err.Error())
+			return err
+		}
+
+		addr := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+		if len(ips) > 1 {
+			err := ins.PolarisGWClient.PolarisUnBindTarget(
+				pinfo.Service, pinfo.Token, addr)
+			if err != nil {
+				log.Logger.Errorf("Kickoff %s from polaris failed,info:%s,err=%s",
+					addr, ins.ShowSwitchInstanceInfo(), err.Error())
+				return err
+			}
+		} else {
+			log.Logger.Infof("Polaris only left one ip, and no need to kickoff,info:%s",
+				ins.ShowSwitchInstanceInfo())
+		}
+	}
+	return nil
+}
+
+// CheckFetchEntryDetail TODO
+func (ins *RedisProxySwitchInfo) CheckFetchEntryDetail() bool {
+	if ins.ApiGw.PolarisFlag || ins.ApiGw.CLBFlag || ins.ApiGw.DNSFlag {
+		return true
+	} else {
+		return false
+	}
+}
+
+// GetEntryDetailInfo TODO
+func (ins *RedisProxySwitchInfo) GetEntryDetailInfo() error {
+	entry, err := ins.CmDBClient.GetEntryDetail(ins.Cluster)
+	if err != nil {
+		log.Logger.Errorf("GetEntryDetail failed, info:%s,err:%s",
+			ins.ShowSwitchInstanceInfo(), err.Error())
+		return err
+	}
+
+	clusterEntryInfo, ok := entry[ins.Cluster]
+	if !ok {
+		entryErr := fmt.Errorf("GetEntryDetail can not find [%s] in [%v]",
+			ins.Cluster, entry)
+		log.Logger.Errorf(entryErr.Error())
+		return entryErr
+	}
+
+	entryInfo, ok := clusterEntryInfo.(map[string]interface{})
+	if !ok {
+		entryErr := fmt.Errorf("GetEntryDetail transfer type fail,[%v]",
+			clusterEntryInfo)
+		log.Logger.Errorf(entryErr.Error())
+		return entryErr
+	}
+	err = ParseAPIGWInfo(entryInfo, &ins.ApiGw)
+	if err != nil {
+		log.Logger.Errorf("Parse APIGW failed, info:%s,err:%s",
+			ins.ShowSwitchInstanceInfo(), err.Error())
+		return err
+	}
+	return nil
+}
+
+// UnMarshalRedisInstanceByCmdb TODO
+func UnMarshalRedisInstanceByCmdb(instances []interface{},
+	uClusterType string, uMetaType string) ([]*RedisDetectInfoFromCmDB, error) {
+	var (
+		err error
+		ret []*RedisDetectInfoFromCmDB
+	)
+
+	for _, v := range instances {
+		ins := v.(map[string]interface{})
+		inf, ok := ins["cluster_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		clusterType := inf.(string)
+		if clusterType != uClusterType {
+			continue
+		}
+		inf, ok = ins["machine_type"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. machine_type not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		metaType := inf.(string)
+		if metaType != uMetaType {
+			continue
+		}
+		inf, ok = ins["status"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. status not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		status := inf.(string)
+		if status != constvar.RUNNING && status != constvar.AVAILABLE {
+			continue
+		}
+		inf, ok = ins["ip"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		ip := inf.(string)
+		inf, ok = ins["port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		port := int(inf.(float64))
+		inf, ok = ins["bk_biz_id"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. app not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		app := strconv.Itoa(int(inf.(float64)))
+
+		inf, ok = ins["cluster"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. cluster not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		cluster := inf.(string)
+
+		detechInfo := &RedisDetectInfoFromCmDB{
+			Ip:          ip,
+			Port:        port,
+			App:         app,
+			ClusterType: clusterType,
+			MetaType:    metaType,
+			Cluster:     cluster,
+		}
+
+		ret = append(ret, detechInfo)
+	}
+	return ret, nil
+}
+
+// CreateRedisProxySwitchInfo TODO
+func CreateRedisProxySwitchInfo(
+	instance interface{}, conf *config.Config,
+) (*RedisProxySwitchInfo, error) {
+	var err error
+
+	ins := instance.(map[string]interface{})
+	inf, ok := ins["ip"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. ip not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	ip := inf.(string)
+
+	inf, ok = ins["port"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. port not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	port := int(inf.(float64))
+
+	inf, ok = ins["bk_idc_city_id"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. role not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	idc := strconv.Itoa(int(inf.(float64)))
+
+	inf, ok = ins["status"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. ip not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	status := inf.(string)
+
+	inf, ok = ins["bk_biz_id"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. app not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	app := strconv.Itoa(int(inf.(float64)))
+
+	inf, ok = ins["cluster_type"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. cluster_type not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	clusterType := inf.(string)
+
+	inf, ok = ins["machine_type"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. machine_type not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	metaType := inf.(string)
+
+	inf, ok = ins["cluster"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. cluster not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	cluster := inf.(string)
+
+	inf, ok = ins["admin_port"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. admin_port not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	adminPort := int(inf.(float64))
+
+	inf, ok = ins["bind_entry"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. bind_entry not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	bindEntry := inf.(map[string]interface{})
+
+	cmdbClient, err := client.NewCmDBClient(&conf.DBConf.CMDB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+
+	hadbClient, err := client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+
+	swIns := RedisProxySwitchInfo{
+		BaseSwitch: dbutil.BaseSwitch{
+			Ip:          ip,
+			Port:        port,
+			IDC:         idc,
+			Status:      status,
+			App:         app,
+			ClusterType: clusterType,
+			MetaType:    metaType,
+			Cluster:     cluster,
+			CmDBClient:  cmdbClient,
+			HaDBClient:  hadbClient,
+		},
+		AdminPort: adminPort,
+	}
+
+	_, ok = bindEntry["dns"]
+	if !ok {
+		log.Logger.Infof("switch info not contain dns")
+		swIns.ApiGw.DNSFlag = false
+	} else {
+		swIns.ApiGw.DNSFlag = true
+		swIns.DnsClient, err = client.NewNameServiceClient(&conf.DNS.BindConf, conf.GetCloudId())
+		if err != nil {
+			log.Logger.Errorf("Create dns client failed,conf:%v",
+				conf.DNS.BindConf)
+			return nil, err
+		}
+	}
+
+	_, ok = bindEntry["polaris"]
+	if !ok {
+		log.Logger.Infof("switch info not contain polaris")
+		swIns.ApiGw.PolarisFlag = false
+	} else {
+		swIns.ApiGw.PolarisFlag = true
+	}
+
+	_, ok = bindEntry["clb"]
+	if !ok {
+		log.Logger.Infof("switch info not contain clb")
+		swIns.ApiGw.CLBFlag = false
+	} else {
+		swIns.ApiGw.CLBFlag = true
+	}
+	if swIns.ApiGw.CLBFlag || swIns.ApiGw.PolarisFlag {
+		swIns.PolarisGWClient, err = client.NewNameServiceClient(&conf.DNS.PolarisConf, conf.GetCloudId())
+		if err != nil {
+			log.Logger.Errorf("create polaris client failed,conf:%v",
+				conf.DNS.PolarisConf)
+			return nil, err
+		}
+	}
+
+	return &swIns, nil
+}
+
+// ParseAPIGWInfo TODO
+func ParseAPIGWInfo(entryDetail map[string]interface{}, apiGW *GWInfo) error {
+	if nil == apiGW {
+		return fmt.Errorf("input apiGW is nil")
+	}
+
+	log.Logger.Infof("input entryDetail:%v", entryDetail)
+	if apiGW.PolarisFlag {
+		pVal, ok := entryDetail["polaris"]
+		if !ok {
+			err := fmt.Errorf("have PolarisFlag ture but entryDetail lack polaris")
+			log.Logger.Errorf(err.Error())
+			return err
+		} else {
+			pArr := pVal.([]interface{})
+			if nil == pArr {
+				return fmt.Errorf("type trans failed while parse polaris")
+			}
+			for _, polaris := range pArr {
+				var pIns PolarisInfo
+				pInfo := polaris.(map[string]interface{})
+				pname, pok := pInfo["polaris_name"]
+				if pok {
+					pIns.Service = pname.(string)
+				}
+				ptoken, pok := pInfo["polaris_token"]
+				if pok {
+					pIns.Token = ptoken.(string)
+				}
+				apiGW.Polaris = append(apiGW.Polaris, pIns)
+			}
+		}
+	}
+
+	if apiGW.CLBFlag {
+		cVal, ok := entryDetail["clb"]
+		if !ok {
+			err := fmt.Errorf("have CLBFlag ture but entryDetail lack clb")
+			log.Logger.Errorf(err.Error())
+			return err
+		} else {
+			cArr := cVal.([]interface{})
+			if nil == cArr {
+				return fmt.Errorf("type trans failed while parse CLB")
+			}
+			for _, clb := range cArr {
+				var cins CLBInfo
+				cinfo := clb.(map[string]interface{})
+				clbid, cok := cinfo["clb_id"]
+				if cok {
+					cins.LoadBalanceId = clbid.(string)
+				}
+				listenId, cok := cinfo["listener_id"]
+				if cok {
+					cins.ListenId = listenId.(string)
+				}
+				domain, cok := cinfo["clb_domain"]
+				if cok {
+					cins.Region = domain.(string)
+				}
+			}
+		}
+	}
+
+	if apiGW.DNSFlag {
+		dVal, ok := entryDetail["dns"]
+		if !ok {
+			err := fmt.Errorf("have DNSFlag ture but entryDetail lack dns")
+			log.Logger.Errorf(err.Error())
+			return err
+		} else {
+			dArr := dVal.([]interface{})
+			if nil == dArr {
+				return fmt.Errorf("type trans failed while parse CLB")
+			}
+			for _, dns := range dArr {
+				var dnsIns DNSInfo
+				dinfo := dns.(map[string]interface{})
+				domain, dok := dinfo["domain"]
+				if dok {
+					dnsIns.Domain = domain.(string)
+				}
+				apiGW.DNS = append(apiGW.DNS, dnsIns)
+			}
+		}
+	}
+
+	return nil
+}
+
+// CreateRedisSwitchInfo TODO
+func CreateRedisSwitchInfo(instance interface{}, conf *config.Config) (*RedisSwitchInfo, error) {
+	var err error
+
+	ins := instance.(map[string]interface{})
+	inf, ok := ins["ip"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. ip not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	ip := inf.(string)
+
+	inf, ok = ins["port"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. port not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	port := int(inf.(float64))
+
+	inf, ok = ins["bk_idc_city_id"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. role not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	idc := strconv.Itoa(int(inf.(float64)))
+
+	inf, ok = ins["status"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. ip not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	status := inf.(string)
+
+	inf, ok = ins["bk_biz_id"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. app not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	app := strconv.Itoa(int(inf.(float64)))
+
+	inf, ok = ins["cluster_type"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. cluster_type not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	clusterType := inf.(string)
+
+	inf, ok = ins["machine_type"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. machine_type not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	metaType := inf.(string)
+
+	inf, ok = ins["cluster"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. cluster not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	cluster := inf.(string)
+
+	_, ok = ins["bind_entry"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. bind_entry not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+
+	cmdbClient, err := client.NewCmDBClient(&conf.DBConf.CMDB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+
+	hadbClient, err := client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+
+	inf, ok = ins["receiver"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. receiver not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	slave := inf.([]interface{})
+
+	inf, ok = ins["proxyinstance_set"]
+	if !ok {
+		err = fmt.Errorf("umarshal failed. proxyinstance_set not exist")
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	proxy := inf.([]interface{})
+
+	swIns := RedisSwitchInfo{
+		BaseSwitch: dbutil.BaseSwitch{
+			Ip:          ip,
+			Port:        port,
+			IDC:         idc,
+			Status:      status,
+			App:         app,
+			ClusterType: clusterType,
+			MetaType:    metaType,
+			Cluster:     cluster,
+			CmDBClient:  cmdbClient,
+			HaDBClient:  hadbClient,
+		},
+		Timeout: conf.DBConf.Redis.Timeout,
+	}
+
+	for _, rawInfo := range slave {
+		mapInfo := rawInfo.(map[string]interface{})
+		inf, ok = mapInfo["ip"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. slave ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		slaveIp := inf.(string)
+		inf, ok = mapInfo["port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. slave port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		slavePort := inf.(float64)
+		swIns.Slave = append(swIns.Slave, RedisSlaveInfo{
+			Ip:   slaveIp,
+			Port: int(slavePort),
+		})
+	}
+
+	for _, rawInfo := range proxy {
+		mapInfo := rawInfo.(map[string]interface{})
+		inf, ok = mapInfo["ip"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. proxy ip not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		proxyIp := inf.(string)
+		inf, ok = mapInfo["port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. proxy port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		proxyPort := inf.(float64)
+		inf, ok = mapInfo["admin_port"]
+		if !ok {
+			err = fmt.Errorf("umarshal failed. proxy port not exist")
+			log.Logger.Errorf(err.Error())
+			return nil, err
+		}
+		proxyAdminPort := inf.(float64)
+		var status string
+		inf, ok = mapInfo["status"]
+		if !ok {
+			status = ""
+		} else {
+			status = inf.(string)
+		}
+		swIns.Proxy = append(swIns.Proxy, dbutil.ProxyInfo{
+			Ip:        proxyIp,
+			Port:      int(proxyPort),
+			AdminPort: int(proxyAdminPort),
+			Status:    status,
+		})
+	}
+	return &swIns, nil
+}
+
+// GetDetectBaseByInfo TODO
+func GetDetectBaseByInfo(ins *RedisDetectInfoFromCmDB,
+	dbType string, conf *config.Config) *RedisDetectBase {
+	passwd := GetRedisMachinePasswd(ins.App, conf)
+	return &RedisDetectBase{
+		BaseDetectDB: dbutil.BaseDetectDB{
+			Ip:             ins.Ip,
+			Port:           ins.Port,
+			App:            ins.App,
+			DBType:         types.DBType(dbType),
+			ReporterTime:   time.Unix(0, 0),
+			ReportInterval: conf.AgentConf.ReportInterval + rand.Intn(20),
+			Status:         constvar.DBCheckSuccess,
+			Cluster:        ins.Cluster,
+			SshInfo: dbutil.Ssh{
+				Port:    conf.SSH.Port,
+				User:    conf.SSH.User,
+				Pass:    passwd,
+				Dest:    conf.SSH.Dest,
+				Timeout: conf.SSH.Timeout,
+			},
+		},
+		Pass:    ins.Pass,
+		Timeout: conf.DBConf.Redis.Timeout,
+	}
+}
+
+// GetDetectBaseByRsp TODO
+func GetDetectBaseByRsp(ins *RedisDetectResponse,
+	dbType string, conf *config.Config) *RedisDetectBase {
+	passwd := GetRedisMachinePasswd(ins.App, conf)
+	return &RedisDetectBase{
+		BaseDetectDB: dbutil.BaseDetectDB{
+			Ip:             ins.DBIp,
+			Port:           ins.DBPort,
+			App:            ins.App,
+			DBType:         types.DBType(dbType),
+			ReporterTime:   time.Unix(0, 0),
+			ReportInterval: conf.AgentConf.ReportInterval + rand.Intn(20),
+			Status:         types.CheckStatus(ins.Status),
+			Cluster:        ins.Cluster,
+			SshInfo: dbutil.Ssh{
+				Port:    conf.SSH.Port,
+				User:    conf.SSH.User,
+				Pass:    passwd,
+				Dest:    conf.SSH.Dest,
+				Timeout: conf.SSH.Timeout,
+			},
+		},
+		Pass:    ins.Pass,
+		Timeout: conf.DBConf.Redis.Timeout,
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_callback.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_callback.go
new file mode 100644
index 0000000000..230ed56797
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_callback.go
@@ -0,0 +1,85 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"encoding/json"
+)
+
+// NewRedisInstanceByCmdb Agent通过CMDB获取的信息来生成需要探测的实例
+func NewRedisInstanceByCmdb(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseDetect, error) {
+	var (
+		err          error
+		unmarshalIns []*RedisDetectInfoFromCmDB
+		ret          []dbutil.DataBaseDetect
+	)
+
+	unmarshalIns, err = UnMarshalRedisInstanceByCmdb(instances,
+		constvar.RedisClusterType, constvar.RedisMetaType)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, uIns := range unmarshalIns {
+		ret = append(ret, NewRedisDetectInstance(uIns, conf))
+	}
+
+	return ret, err
+}
+
+// DeserializeRedis 反序列化从Agent上报上来的故障实例
+func DeserializeRedis(jsonInfo []byte,
+	conf *config.Config) (dbutil.DataBaseDetect, error) {
+	response := RedisDetectResponse{}
+	err := json.Unmarshal(jsonInfo, &response)
+	if err != nil {
+		log.Logger.Errorf("json unmarshal failed. jsoninfo:\n%s\n, err:%s",
+			string(jsonInfo), err.Error())
+		return nil, err
+	}
+	ret := NewRedisDetectInstanceFromRsp(&response, conf)
+	return ret, nil
+}
+
+// NewRedisSwitchInstance TODO
+func NewRedisSwitchInstance(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseSwitch, error) {
+	var err error
+	var ret []dbutil.DataBaseSwitch
+	for _, v := range instances {
+		swIns, err := CreateRedisSwitchInfo(v, conf)
+		if err != nil {
+			log.Logger.Errorf("parse redis switch instance failed,err:%s",
+				err.Error())
+			continue
+		}
+
+		if swIns.MetaType != constvar.RedisMetaType {
+			log.Logger.Errorf("Create redis switch while the metaType[%s] != %s",
+				swIns.MetaType, constvar.RedisMetaType)
+			continue
+		}
+
+		pw := RedisSwitch{
+			RedisSwitchInfo: *swIns,
+			Config:          conf,
+		}
+
+		passwd, err := GetInstancePassByCluster(
+			constvar.TendisCache, pw.Cluster, conf,
+		)
+		if err != nil {
+			log.Logger.Errorf("get redis switch passwd failed,err:%s,info:%s",
+				err.Error(), pw.ShowSwitchInstanceInfo())
+		} else {
+			pw.Pass = passwd
+		}
+		ret = append(ret, &pw)
+	}
+
+	return ret, err
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_detect.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_detect.go
new file mode 100644
index 0000000000..5059d9a038
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_detect.go
@@ -0,0 +1,198 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// RedisDetectInstance TODO
+type RedisDetectInstance struct {
+	RedisDetectBase
+}
+
+// Detection TODO
+func (ins *RedisDetectInstance) Detection() error {
+	err := ins.DoRedisDetection()
+	if err == nil && ins.Status == constvar.DBCheckSuccess {
+		log.Logger.Debugf("Redis check ok and return")
+		return nil
+	}
+
+	if err != nil && ins.Status == constvar.AUTHCheckFailed {
+		log.Logger.Errorf("Redis auth failed,pass:%s,status:%s",
+			ins.Pass, ins.Status)
+		return err
+	}
+
+	sshErr := ins.CheckSSH()
+	if sshErr != nil {
+		if util.CheckSSHErrIsAuthFail(sshErr) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("Redis check ssh auth failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		} else {
+			ins.Status = constvar.SSHCheckFailed
+			log.Logger.Errorf("Redis check ssh failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		}
+		return sshErr
+	} else {
+		log.Logger.Debugf("Redis check ssh success. ip:%s, port:%d, app:%s",
+			ins.Ip, ins.Port, ins.App)
+		ins.Status = constvar.SSHCheckSuccess
+		return nil
+	}
+}
+
+// DoRedisDetection TODO
+func (ins *RedisDetectInstance) DoRedisDetection() error {
+	r := &client.RedisClient{}
+	addr := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+	r.Init(addr, ins.Pass, ins.Timeout, 0)
+	defer r.Close()
+
+	rsp, err := r.Info()
+	if err != nil {
+		redisErr := fmt.Errorf("redis do cmd err,err: %s", err.Error())
+		if util.CheckRedisErrIsAuthFail(err) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("redis detect auth failed,err:%s,status:%s",
+				redisErr.Error(), ins.Status)
+		} else {
+			ins.Status = constvar.DBCheckFailed
+			log.Logger.Errorf("redis detect failed,err:%s,status:%s",
+				redisErr.Error(), ins.Status)
+		}
+		return redisErr
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		redisErr := fmt.Errorf("redis info response type is not string")
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	if !strings.Contains(rspInfo, "redis_version:") {
+		redisErr := fmt.Errorf("response un-find redis_version, rsp:%s", rspInfo)
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	role, err := ins.GetRole(rspInfo)
+	if nil != err {
+		redisErr := fmt.Errorf("response un-find role, rsp:%s", rspInfo)
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	if role == "master" {
+		return ins.DoSetCheck(r)
+	}
+
+	ins.Status = constvar.DBCheckSuccess
+	return nil
+}
+
+// Serialization TODO
+func (ins *RedisDetectInstance) Serialization() ([]byte, error) {
+	response := RedisDetectResponse{
+		BaseDetectDBResponse: ins.NewDBResponse(),
+		Pass:                 ins.Pass,
+	}
+
+	resByte, err := json.Marshal(&response)
+	if err != nil {
+		log.Logger.Errorf("redis serialization failed. err:%s", err.Error())
+		return []byte{}, err
+	}
+	return resByte, nil
+}
+
+// GetRole TODO
+func (ins *RedisDetectInstance) GetRole(info string) (string, error) {
+	beginPos := strings.Index(info, "role:")
+	if beginPos < 0 {
+		roleErr := fmt.Errorf("RedisCache rsp not contains role")
+		log.Logger.Errorf(roleErr.Error())
+		return "", roleErr
+	}
+
+	endPos := strings.Index(info[beginPos:], "\r\n")
+	if endPos < 0 {
+		roleErr := fmt.Errorf("RedisCache the substr is invalid,%s",
+			info[beginPos:])
+		log.Logger.Errorf(roleErr.Error())
+		return "", roleErr
+	}
+
+	roleInfo := info[beginPos+len("role:") : beginPos+endPos]
+	return roleInfo, nil
+}
+
+// DoSetCheck TODO
+func (ins *RedisDetectInstance) DoSetCheck(r *client.RedisClient) error {
+	keyFormat := "dbha:agent:%s"
+	checkKey := fmt.Sprintf(keyFormat, ins.Ip)
+	checkTime := time.Now().Format("2006-01-02 15:04:05")
+
+	cmdArgv := []string{"SET", checkKey, checkTime}
+	rsp, err := r.DoCommand(cmdArgv)
+	if err != nil {
+		redisErr := fmt.Errorf("response un-find role, rsp:%s", rsp)
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		redisErr := fmt.Errorf("redis info response type is not string")
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	if strings.Contains(rspInfo, "OK") || strings.Contains(rspInfo, "MOVED") {
+		ins.Status = constvar.DBCheckSuccess
+		return nil
+	} else {
+		redisErr := fmt.Errorf("set check failed,rsp:%s", rspInfo)
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+}
+
+// ShowDetectionInfo TODO
+func (ins *RedisDetectInstance) ShowDetectionInfo() string {
+	str := fmt.Sprintf("ip:%s, port:%d, status:%s, DBType:%s",
+		ins.Ip, ins.Port, ins.Status, ins.DBType)
+	return str
+}
+
+// NewRedisDetectInstance TODO
+func NewRedisDetectInstance(ins *RedisDetectInfoFromCmDB,
+	conf *config.Config) *RedisDetectInstance {
+	return &RedisDetectInstance{
+		RedisDetectBase: *GetDetectBaseByInfo(ins, constvar.TendisCache, conf),
+	}
+}
+
+// NewRedisDetectInstanceFromRsp TODO
+func NewRedisDetectInstanceFromRsp(ins *RedisDetectResponse,
+	conf *config.Config) *RedisDetectInstance {
+	return &RedisDetectInstance{
+		RedisDetectBase: *GetDetectBaseByRsp(ins, constvar.TendisCache, conf),
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_switch.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_switch.go
new file mode 100644
index 0000000000..7d28ba2731
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/redis_switch.go
@@ -0,0 +1,588 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"fmt"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// RedisSwitch TODO
+type RedisSwitch struct {
+	RedisSwitchInfo
+	Config *config.Config
+	FLock  *util.FileLock
+}
+
+// CheckSwitch TODO
+func (ins *RedisSwitch) CheckSwitch() (bool, error) {
+	ins.ReportLogs(
+		constvar.CHECK_SWITCH_INFO, fmt.Sprintf("handle instance[%s:%d]", ins.Ip, ins.Port),
+	)
+	if len(ins.Slave) != 1 {
+		redisErr := fmt.Errorf("redis have invald slave[%d]", len(ins.Slave))
+		log.Logger.Errorf("%s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, redisErr.Error())
+		return false, redisErr
+	}
+
+	ins.SetInfo(constvar.SWITCH_INFO_SLAVE_IP, ins.Slave[0].Ip)
+	ins.SetInfo(constvar.SWITCH_INFO_SLAVE_PORT, ins.Slave[0].Port)
+	err := ins.DoLockByFile()
+	if err != nil {
+		redisErrLog := fmt.Sprintf("RedisSwitch lockfile failed,err:%s", err.Error())
+		log.Logger.Errorf("%s info:%s", redisErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, redisErrLog)
+		return false, err
+	}
+
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, fmt.Sprintf("twemproxy infos:%v", ins.Proxy))
+	_, err = ins.CheckTwemproxyPing()
+	if err != nil {
+		ins.DoUnLockByFile()
+		redisErrLog := fmt.Sprintf("RedisSwitch check twemproxy failed,err:%s", err.Error())
+		log.Logger.Errorf("%s info:%s", redisErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, redisErrLog)
+		return false, err
+	}
+
+	ins.ReportLogs(
+		constvar.CHECK_SWITCH_INFO, "RedisSwitch lock file and check twemproxy ok",
+	)
+	return true, nil
+}
+
+// DoSwitch TODO
+func (ins *RedisSwitch) DoSwitch() error {
+	log.Logger.Infof("redis do switch.info:{%s}", ins.ShowSwitchInstanceInfo())
+	r := &client.RedisClient{}
+	defer r.Close()
+
+	slave := ins.Slave[0]
+	addr := fmt.Sprintf("%s:%d", slave.Ip, slave.Port)
+	r.Init(addr, ins.Pass, ins.Timeout, 0)
+
+	rsp, err := r.SlaveOf("no", "one")
+	if err != nil {
+		ins.DoUnLockByFile()
+		redisErrLog := fmt.Sprintf("Slave[%s] exec slaveOf no one failed,%s", addr, err.Error())
+		log.Logger.Errorf("%s info:%s", redisErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, redisErrLog)
+		return err
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		ins.DoUnLockByFile()
+		redisErr := fmt.Errorf("redis info response type is not string")
+		log.Logger.Errorf("%s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, redisErr.Error())
+		return redisErr
+	}
+
+	log.Logger.Infof("redis switch slaveof addr:%s,rsp:%s", addr, rspInfo)
+	if !strings.Contains(rspInfo, "OK") {
+		ins.DoUnLockByFile()
+		redisErr := fmt.Errorf("redis do slaveof failed,slave:%d,rsp:%s",
+			len(ins.Slave), rspInfo)
+		log.Logger.Errorf("%s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, redisErr.Error())
+		return redisErr
+	}
+
+	slaveOfOk := fmt.Sprintf("do slaveof no one ok,mark slave[%s] as master", addr)
+	log.Logger.Infof("RedisSwitch  %s info:%s", slaveOfOk, ins.ShowSwitchInstanceInfo())
+	ins.ReportLogs(constvar.SWITCH_INFO, slaveOfOk)
+
+	ok, switchNum := ins.TwemproxySwitchM2S(ins.Ip, ins.Port, slave.Ip, slave.Port)
+	if !ok {
+		switchPart := fmt.Sprintf("redis switch proxy part success,succ:{%d},fail[%d],total:{%d}",
+			switchNum, len(ins.Proxy)-switchNum, len(ins.Proxy))
+		log.Logger.Infof("%s info:%s", switchPart, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, switchPart)
+		return nil
+	}
+
+	switchAllOk := fmt.Sprintf("redis switch twemproxy ok,succ[%d],fail[%d],total[%d]",
+		switchNum, len(ins.Proxy)-switchNum, len(ins.Proxy))
+	log.Logger.Infof("%s info:%s", switchAllOk, ins.ShowSwitchInstanceInfo())
+	ins.ReportLogs(constvar.SWITCH_INFO, switchAllOk)
+	return nil
+}
+
+// ShowSwitchInstanceInfo TODO
+func (ins *RedisSwitch) ShowSwitchInstanceInfo() string {
+	format := `<%s#%d IDC:%s Status:%s App:%s ClusterType:%s MachineType:%s Cluster:%s>`
+	str := fmt.Sprintf(
+		format, ins.Ip, ins.Port, ins.IDC, ins.Status, ins.App,
+		ins.ClusterType, ins.MetaType, ins.Cluster,
+	)
+	if len(ins.Slave) > 0 {
+		str = fmt.Sprintf("%s Switch from MASTER:<%s#%d> to SLAVE:<%s#%d>",
+			str, ins.Ip, ins.Port, ins.Slave[0].Ip, ins.Slave[0].Port)
+	}
+	return str
+}
+
+// RollBack TODO
+func (ins *RedisSwitch) RollBack() error {
+	return nil
+}
+
+// UpdateMetaInfo TODO
+func (ins *RedisSwitch) UpdateMetaInfo() error {
+	defer ins.DoUnLockByFile()
+	ins.ReportLogs(constvar.UPDATEMETA_INFO, "handle swap_role for cmdb")
+	if len(ins.Slave) != 1 {
+		redisErr := fmt.Errorf("redis have invald slave[%d]", len(ins.Slave))
+		log.Logger.Errorf("%s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.UPDATEMETA_FAIL, redisErr.Error())
+		return redisErr
+	}
+
+	err := ins.CmDBClient.SwapRedisRole(ins.Cluster, ins.Ip, ins.Port,
+		ins.Slave[0].Ip, ins.Slave[0].Port)
+	if err != nil {
+		redisErrLog := fmt.Sprintf("swap redis role failed. err:%s", err.Error())
+		log.Logger.Errorf("%s info:%s", redisErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.UPDATEMETA_FAIL, redisErrLog)
+		return err
+	}
+	swapOk := fmt.Sprintf("cluster[%s] swap_role slave[%s#%d] master[%s#%d] ok",
+		ins.Cluster, ins.Ip, ins.Port, ins.Slave[0].Ip, ins.Slave[0].Port)
+	ins.ReportLogs(constvar.UPDATEMETA_INFO, swapOk)
+	return nil
+}
+
+// DoLockByFile do file lock by cluster and ip
+func (ins *RedisSwitch) DoLockByFile() error {
+	format := "/tmp/tendis-cluster-switch-%s.%s.lock"
+	path := fmt.Sprintf(format, ins.Cluster, ins.Ip)
+	fl := util.NewFileLock(path)
+
+	err := fl.Lock()
+	if err != nil {
+		lockErrLog := fmt.Sprintf("lockfile failed,path:%s,err:%s", path, err.Error())
+		log.Logger.Errorf("%s info:%s", lockErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, lockErrLog)
+		return err
+	} else {
+		log.Logger.Infof("RedisSwitch lockfile ok,path:%s,info:%s",
+			path, ins.ShowSwitchInstanceInfo())
+		ins.FLock = fl
+		ins.ReportLogs(
+			constvar.CHECK_SWITCH_INFO, fmt.Sprintf("instance lock file %s ok", path),
+		)
+		return nil
+	}
+}
+
+// DoUnLockByFile un-lock file lock
+func (ins *RedisSwitch) DoUnLockByFile() {
+	if nil == ins.FLock {
+		log.Logger.Errorf("RedisSwitch filelock uninit and nil,info:%s",
+			ins.ShowSwitchInstanceInfo())
+		return
+	}
+
+	err := ins.FLock.UnLock()
+	if err != nil {
+		lockErrLog := fmt.Sprintf("RedisSwitch unlock failed,path:%s,err:%s",
+			ins.FLock.Path, err.Error())
+		log.Logger.Errorf("%s info:%s", lockErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, lockErrLog)
+		return
+	} else {
+		log.Logger.Infof("RedisSwitch unlock ok,path:%s,info:%s",
+			ins.FLock.Path, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(
+			constvar.CHECK_SWITCH_INFO, fmt.Sprintf("instance unlock file %s ok", ins.FLock.Path),
+		)
+		return
+	}
+}
+
+// CheckTwemproxyPing TODO
+func (ins *RedisSwitch) CheckTwemproxyPing() ([]dbutil.ProxyInfo, error) {
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO,
+		fmt.Sprintf("twemproxy ping:start check_ping, with [%d] twemproxy",
+			len(ins.Proxy)),
+	)
+	var wg sync.WaitGroup
+	var proxyLock sync.Mutex
+	kickProxys := make([]dbutil.ProxyInfo, 0)
+	proxyServers := make([]map[string]string, 0)
+	for _, proxy := range ins.Proxy {
+		wg.Add(1)
+		go func(proxyInfo dbutil.ProxyInfo) {
+			defer wg.Done()
+			psvrs, err := ins.DoTwemproxyPing(proxyInfo)
+			if err != nil {
+				if psvrs != nil && ins.ProxyStatusIsRunning(proxy) {
+					proxyLock.Lock()
+					kickProxys = append(kickProxys, proxyInfo)
+					proxyServers = append(proxyServers, psvrs)
+					proxyLock.Unlock()
+				}
+			} else {
+				if psvrs != nil && ins.ProxyStatusIsRunning(proxy) {
+					proxyLock.Lock()
+					proxyServers = append(proxyServers, psvrs)
+					proxyLock.Unlock()
+				}
+			}
+		}(proxy)
+	}
+
+	wg.Wait()
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO,
+		fmt.Sprintf("twemproxy ping:[%d] check ping,with [%d] ok,[%d] kickoff",
+			len(ins.Proxy), len(proxyServers)-len(kickProxys), len(kickProxys)))
+	ins.KickOffTwemproxy(kickProxys)
+
+	err := CheckInstancesEqual(proxyServers)
+	if err != nil {
+		redisErrLog := fmt.Sprintf("twemproxy conf not equal,err:%s, info:%s",
+			err.Error(), ins.ShowSwitchInstanceInfo())
+		log.Logger.Errorf(redisErrLog)
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, redisErrLog)
+		return nil, err
+	}
+
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO,
+		"all twemproxy nosqlproxy servers is equal")
+	return kickProxys, nil
+}
+
+// KickOffTwemproxy TODO
+func (ins *RedisSwitch) KickOffTwemproxy(kickProxys []dbutil.ProxyInfo) {
+	if len(kickProxys) == 0 {
+		ins.ReportLogs(constvar.CHECK_SWITCH_INFO,
+			"all twemproxy sames to be ok,ignore kickOff...")
+		return
+	}
+
+	kickLog := fmt.Sprintf("need to kickoff twemproxy [%d]", len(kickProxys))
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, kickLog)
+	log.Logger.Infof("RedisSwitch %s", kickLog)
+	for _, proxy := range kickProxys {
+		ins.ReportLogs(constvar.CHECK_SWITCH_INFO,
+			fmt.Sprintf("do kickoff bad ping twemproxys,twemproxy:%v", proxy))
+		ins.DoKickTwemproxy(proxy)
+	}
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, "kickoff bad ping twemproxy done")
+	return
+}
+
+// ProxyStatusIsRunning check status of proxy is running or not
+func (ins *RedisSwitch) ProxyStatusIsRunning(proxy dbutil.ProxyInfo) bool {
+	if len(proxy.Status) == 0 {
+		log.Logger.Infof("RedisSwitch proxy has no status and skip")
+		return true
+	}
+
+	if proxy.Status != constvar.RUNNING {
+		log.Logger.Infof("RedisSwitch proxy status[%s] is not RUNNING", proxy.Status)
+		return false
+	} else {
+		log.Logger.Infof("RedisSwitch proxy status[%s] is RUNNING", proxy.Status)
+		return true
+	}
+}
+
+// ParseTwemproxyResponse parse the reponse of twemproxy
+func ParseTwemproxyResponse(rsp string) (map[string]string, error) {
+	proxyIns := make(map[string]string)
+	lines := strings.Split(rsp, "\n")
+	if len(lines) == 0 {
+		redisErr := fmt.Errorf("twemproxy nosqlproxy servers return none")
+		return proxyIns, redisErr
+	}
+
+	for _, line := range lines {
+		eles := strings.Split(line, " ")
+		if len(eles) < 3 {
+			continue
+		}
+
+		proxyIns[eles[0]] = eles[2]
+	}
+	return proxyIns, nil
+}
+
+// CheckInstancesEqual check if the redis instances of twemproxy is equivalent
+func CheckInstancesEqual(proxysSvrs []map[string]string) error {
+	if len(proxysSvrs) <= 1 {
+		return nil
+	}
+
+	filterProxys := make([]map[string]string, 0)
+	for _, p := range proxysSvrs {
+		if len(p) > 0 {
+			filterProxys = append(filterProxys, p)
+		}
+	}
+
+	log.Logger.Debugf("RedisSwitch compare proxys, proxySvrs:%d filterProxySvrs:%d",
+		len(proxysSvrs), len(filterProxys))
+	if len(filterProxys) <= 1 {
+		return nil
+	}
+
+	first := filterProxys[0]
+	for i := 1; i < len(filterProxys); i++ {
+		cmpOne := filterProxys[i]
+		for k, v := range first {
+			val, ok := cmpOne[k]
+			if !ok {
+				err := fmt.Errorf("compare twemproxy server failed,%s-%s not find, twemproxyConf:%v",
+					k, v, filterProxys)
+				log.Logger.Errorf(err.Error())
+				return err
+			}
+
+			if val != v {
+				err := fmt.Errorf("compare twemproxy server failed,[%s-%s] vs [%s-%s], twemproxyConf:%v",
+					k, v, k, val, filterProxys)
+				log.Logger.Errorf(err.Error())
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// DoTwemproxyPing get redis instance infomation from twemproxy by netcat
+func (ins *RedisSwitch) DoTwemproxyPing(proxy dbutil.ProxyInfo) (map[string]string, error) {
+	rsp, err := ins.CommunicateTwemproxy(proxy.Ip, proxy.AdminPort,
+		"get nosqlproxy servers")
+	if err != nil {
+		checkErrLog := fmt.Sprintf("twemproxy ping: communicate failed,proxy[%s:%d],err=%s",
+			proxy.Ip, proxy.Port, err.Error())
+		log.Logger.Errorf("RedisSwitch %s info:%s", checkErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, checkErrLog)
+		return nil, nil
+	}
+
+	proxyIns, err := ParseTwemproxyResponse(rsp)
+	if err != nil {
+		checkErrLog := fmt.Sprintf("twemproxy ping: parse rsp[%s] failed,proxy[%s:%d]",
+			rsp, proxy.Ip, proxy.Port)
+		log.Logger.Errorf("RedisSwitch %s info:%s", checkErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, checkErrLog)
+		return nil, nil
+	}
+
+	masterInfo := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+	if _, ok := proxyIns[masterInfo]; !ok {
+		format := "twemproxy[%s:%d:%d] not have %s and need to kick,addr[%s:%d],twemproxyConf:%s"
+		redisErr := fmt.Errorf(format, proxy.Ip, proxy.Port,
+			proxy.AdminPort, masterInfo, proxy.Ip, proxy.Port, rsp)
+		log.Logger.Errorf("RedisSwitch %s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, redisErr.Error())
+		return proxyIns, redisErr
+	}
+	log.Logger.Debugf("RedisSwitch do ping twemproxy proxy[%s:%d] ok, twemproxyConf:%v",
+		proxy.Ip, proxy.Port, rsp)
+	return proxyIns, nil
+}
+
+// DoKickTwemproxy kick bad case of twemproxy from twemproxy
+func (ins *RedisSwitch) DoKickTwemproxy(proxy dbutil.ProxyInfo) error {
+	log.Logger.Infof("RedisSwitch kick twemproxy[%s:%d-%d]",
+		proxy.Ip, proxy.Port, proxy.AdminPort)
+	infos, err := ins.CmDBClient.GetDBInstanceInfoByIp(proxy.Ip)
+	if err != nil {
+		redisErr := fmt.Errorf("get twemproxy[%s:%d:%d] from cmdb failed",
+			proxy.Ip, proxy.Port, proxy.AdminPort)
+		log.Logger.Errorf("%s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, redisErr.Error())
+		return redisErr
+	}
+
+	if len(infos) == 0 {
+		redisErr := fmt.Errorf("the number of proxy[%d] is invalid", len(infos))
+		log.Logger.Errorf("%s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, redisErr.Error())
+		return redisErr
+	}
+
+	log.Logger.Infof("RedisSwitch kick debug 2,infoLen=%d", len(infos))
+	for _, info := range infos {
+		proxyIns, err := CreateRedisProxySwitchInfo(info, ins.Config)
+		if err != nil {
+			log.Logger.Errorf(err.Error())
+			continue
+		}
+
+		if proxyIns.Ip != proxy.Ip || proxyIns.Port != proxy.Port ||
+			proxyIns.MetaType != constvar.TwemproxyMetaType {
+			log.Logger.Infof("RedisSwitch skip kick[%s:%d-%s],proxy[%s:%d-%s]",
+				proxy.Ip, proxy.Port, constvar.TwemproxyMetaType,
+				proxyIns.Ip, proxyIns.Port, proxyIns.MetaType,
+			)
+			continue
+		}
+
+		if proxyIns.Status != constvar.RUNNING &&
+			proxyIns.Status != constvar.AVAILABLE {
+			kickSkipLog := fmt.Sprintf("skip while status is [%s],twemproxy[%s:%d]",
+				proxyIns.Status, proxyIns.Ip, proxyIns.Port)
+			log.Logger.Infof("RedisSwitch %s", kickSkipLog)
+			ins.ReportLogs(constvar.CHECK_SWITCH_INFO, kickSkipLog)
+			continue
+		}
+
+		if proxyIns.CheckFetchEntryDetail() {
+			edErr := proxyIns.GetEntryDetailInfo()
+			if edErr != nil {
+				kickErrLog := fmt.Sprintf("GetEntryDetail failed while Kick Twemproxy:%s,err:%s",
+					proxyIns.ShowSwitchInstanceInfo(), edErr.Error())
+				log.Logger.Errorf("RedisSwitch %s", kickErrLog)
+				return edErr
+			}
+		}
+
+		err = proxyIns.KickOffDns()
+		if err != nil {
+			kickErrLog := fmt.Sprintf("kick twemproxy failed by dns,proxy=%s,err=%s",
+				proxyIns.ShowSwitchInstanceInfo(), err.Error())
+			log.Logger.Errorf("RedisSwitch %s", kickErrLog)
+			ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, kickErrLog)
+			return err
+		}
+		err = proxyIns.KickOffPolaris()
+		if err != nil {
+			kickErrLog := fmt.Sprintf("kick twemproxy failed by polaris,proxy=%s,err=%s",
+				proxyIns.ShowSwitchInstanceInfo(), err.Error())
+			log.Logger.Errorf("RedisSwitch %s", kickErrLog)
+			ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, kickErrLog)
+			return err
+		}
+		err = proxyIns.KickOffClb()
+		if err != nil {
+			kickErrLog := fmt.Sprintf("kick twemproxy failed by clb,proxy=%s,err=%s",
+				proxyIns.ShowSwitchInstanceInfo(), err.Error())
+			log.Logger.Errorf("RedisSwitch %s", kickErrLog)
+			ins.ReportLogs(constvar.CHECK_SWITCH_FAIL, kickErrLog)
+			return err
+		}
+	}
+
+	kickOkLog := fmt.Sprintf(" kick twemproxy[%s:%d-%d] ok",
+		proxy.Ip, proxy.Port, proxy.AdminPort)
+	log.Logger.Infof("RedisSwitch %s", kickOkLog)
+	ins.ReportLogs(constvar.CHECK_SWITCH_INFO, kickOkLog)
+	return nil
+}
+
+// TwemproxySwitchM2S TODO
+func (ins *RedisSwitch) TwemproxySwitchM2S(masterIp string, masterPort int,
+	slaveIp string, slavePort int) (bool, int) {
+	var successSwitchNum int64 = 0
+	var wg sync.WaitGroup
+	for _, proxy := range ins.Proxy {
+		wg.Add(1)
+		go func(proxyInfo dbutil.ProxyInfo) {
+			defer wg.Done()
+			log.Logger.Infof("RedisCache twemproxy[%s:%d:%d] switch,master[%s:%d]->slave[%s:%d]",
+				proxyInfo.Ip, proxyInfo.Port, proxyInfo.AdminPort,
+				masterIp, masterPort, slaveIp, slavePort)
+			pok, err := ins.TwemproxySwitchSingle(
+				proxyInfo, masterIp, masterPort, slaveIp, slavePort,
+			)
+			if err != nil {
+				log.Logger.Infof("redisCache twemproxy switch failed,err:%s,info:%s",
+					err.Error(), ins.ShowSwitchInstanceInfo())
+				return
+			}
+
+			if !pok {
+				log.Logger.Infof("redisCache twemproxy switch failed,info:%s",
+					ins.ShowSwitchInstanceInfo())
+			} else {
+				log.Logger.Infof("RedisCache twemproxy switch M2S ok,proxy[%s:%d-%d],info:%s",
+					proxyInfo.Ip, proxyInfo.Port, proxyInfo.AdminPort, ins.ShowSwitchInstanceInfo())
+				atomic.AddInt64(&successSwitchNum, 1)
+			}
+		}(proxy)
+	}
+
+	wg.Wait()
+	switchSucc := int(successSwitchNum)
+	if switchSucc == len(ins.Proxy) {
+		log.Logger.Infof("RedisCache twemproxy switch M2S,all succ[%d]", len(ins.Proxy))
+		return true, switchSucc
+	} else {
+		log.Logger.Infof("RedisCache twemproxy switch M2S,part succ[%d],all[%d]",
+			switchSucc, len(ins.Proxy))
+		return false, switchSucc
+	}
+}
+
+// TwemproxySwitchSingle change the redis information of twemproxy by master and slave
+func (ins *RedisSwitch) TwemproxySwitchSingle(proxy dbutil.ProxyInfo,
+	masterIp string, masterPort int,
+	slaveIp string, slavePort int) (bool, error) {
+	format := "change nosqlproxy %s %s"
+	masterAddr := fmt.Sprintf("%s:%d", masterIp, masterPort)
+	slaveAddr := fmt.Sprintf("%s:%d", slaveIp, slavePort)
+	cmdInfo := fmt.Sprintf(format, masterAddr, slaveAddr)
+
+	rsp, err := ins.CommunicateTwemproxy(proxy.Ip, proxy.AdminPort, cmdInfo)
+	if err != nil {
+		redisErr := fmt.Errorf("twemproxy[%s:%d:%d] switch %s to %s failed,cmd:%s,err:%s",
+			proxy.Ip, proxy.Port, proxy.AdminPort, masterAddr, slaveAddr, cmdInfo, err.Error())
+		log.Logger.Errorf("RedisSwitch %s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, redisErr.Error())
+		return false, redisErr
+	}
+
+	if strings.Contains(rsp, "success") {
+		return true, nil
+	} else {
+		redisErr := fmt.Errorf("switch twemproxy[%s:%d:%d] from %s to %s failed",
+			proxy.Ip, proxy.Port, proxy.AdminPort, masterAddr, slaveAddr)
+		log.Logger.Errorf("RedisSwitch %s info:%s", redisErr.Error(), ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, redisErr.Error())
+		return false, redisErr
+	}
+}
+
+// CommunicateTwemproxy connect to twemproxy and send command by tcp
+func (ins *RedisSwitch) CommunicateTwemproxy(
+	ip string, port int, text string,
+) (string, error) {
+	nc := &client.NcClient{}
+	addr := fmt.Sprintf("%s:%d", ip, port)
+	defer nc.Close()
+
+	err := nc.DoConn(addr, ins.Timeout)
+	if err != nil {
+		log.Logger.Errorf("RedisSwitch nc conn failed,addr:%s,timeout:%d,err:%s",
+			addr, ins.Timeout, err.Error())
+		return "", err
+	}
+
+	err = nc.WriteText(text)
+	if err != nil {
+		log.Logger.Errorf("RedisSwitch nc write failed,addr:%s,timeout:%d,err:%s",
+			addr, ins.Timeout, err.Error())
+		return "", err
+	}
+
+	rsp := make([]byte, 1024*10)
+	n, err := nc.Read(rsp)
+	if err != nil {
+		log.Logger.Errorf("RedisSwitch nc read failed,addr:%s,timeout:%d,err:%s",
+			addr, ins.Timeout, err.Error())
+		return "", err
+	}
+	return string(rsp[:n]), nil
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/svr_password.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/svr_password.go
new file mode 100644
index 0000000000..dc45ba55c2
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/svr_password.go
@@ -0,0 +1,377 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/types"
+	"fmt"
+	"time"
+)
+
+var passwdCache Cache
+var (
+	passwdCacheSize = 3000
+	passwdCacheTime = 10 * time.Minute
+)
+
+func init() {
+	passwdCache = NewLocked(passwdCacheSize)
+}
+
+// GetRedisMachinePasswd get redis machine password from remote dbconfig server
+func GetRedisMachinePasswd(
+	app string, conf *config.Config,
+) string {
+	format := "%s-%s-%s-%s-%s-%s-%s"
+	key := fmt.Sprintf(format, app, constvar.ConfOSFile, constvar.ConfOSType,
+		"", constvar.ConfOSApp, app, constvar.ConfCommon)
+
+	cachePasswd, ok := passwdCache.Get(key)
+	if ok {
+		log.Logger.Debugf("RedisSSHPWD get cache ok, key:%s,passwd:%s",
+			key, cachePasswd)
+		return cachePasswd.(string)
+	}
+
+	passwdMap, err := GetMachinePasswordRemote(app, constvar.ConfOSFile,
+		constvar.ConfOSType, "", constvar.ConfOSApp, app,
+		constvar.ConfCommon, conf.DNS.RemoteConf, conf.GetCloudId())
+	if err != nil {
+		log.Logger.Errorf("RedisSSHPWD fetch remote err[%s],return conf,pass:%s",
+			err.Error(), conf.SSH.Pass)
+		return conf.SSH.Pass
+	}
+
+	passwdVal, ok := passwdMap[constvar.ConfUserPasswd]
+	if !ok {
+		log.Logger.Errorf("RedisSSHPWD not find [%s] in map[%v]",
+			constvar.ConfUserPasswd, passwdMap)
+		return conf.SSH.Pass
+	}
+
+	passwd := passwdVal.(string)
+	passwdCache.Add(key, passwd, passwdCacheTime)
+	log.Logger.Debugf("RedisSSHPWD %s get passwd[%s] ok", key, passwd)
+	return passwd
+}
+
+// GetMysqlMachinePasswd get mysql machine password from remote dbconfig server
+func GetMysqlMachinePasswd(
+	conf *config.Config,
+) string {
+	format := "%s-%s-%s-%s-%s-%s-%s"
+	key := fmt.Sprintf(format, "0", constvar.ConfMysqlFile, constvar.ConfMysqlType,
+		constvar.ConfMysqlName, constvar.ConfOSPlat, "0", constvar.ConfMysqlNamespace)
+
+	cachePasswd, ok := passwdCache.Get(key)
+	if ok {
+		log.Logger.Debugf("MysqlSSHPWD get cache ok, key:%s,passwd:%s",
+			key, cachePasswd)
+		return cachePasswd.(string)
+	}
+
+	passwdMap, err := GetMachinePasswordRemote("0", constvar.ConfMysqlFile,
+		constvar.ConfMysqlType, constvar.ConfMysqlName, constvar.ConfOSPlat,
+		"0", constvar.ConfMysqlNamespace, conf.DNS.RemoteConf, conf.GetCloudId())
+	if err != nil {
+		log.Logger.Errorf("MysqlSSHPWD fetch remote err[%s],return conf,pass:%s",
+			err.Error(), conf.SSH.Pass)
+		return conf.SSH.Pass
+	}
+
+	passwdVal, ok := passwdMap["os_mysql_pwd"]
+	if !ok {
+		log.Logger.Errorf("MysqlSSHPWD not find [%s] in map[%v]",
+			constvar.ConfUserPasswd, passwdMap)
+		return conf.SSH.Pass
+	}
+
+	passwd := passwdVal.(string)
+	passwdCache.Add(key, passwd, passwdCacheTime)
+	log.Logger.Debugf("MysqlSSHPWD %s get passwd[%s] ok", key, passwd)
+	return passwd
+}
+
+// GetMachinePasswordRemote get machine password from remote dbconfig server
+func GetMachinePasswordRemote(app string, confFile string, confType string,
+	confName string, levelName string, levelValue string, namespace string,
+	remoteconf config.APIConfig, cloudId int) (map[string]interface{}, error) {
+	remoteConfigClient, err := client.NewRemoteConfigClient(&remoteconf, cloudId)
+	if err != nil {
+		log.Logger.Errorf("SSHPWD new dbconfig client failed,err:%s",
+			err.Error())
+		return make(map[string]interface{}), err
+	}
+
+	configData, err := remoteConfigClient.GetConfigItem(
+		app, confFile, confType, confName,
+		levelName, levelValue, namespace,
+	)
+	if err != nil {
+		log.Logger.Errorf("SSHPWD call failed,err:%s", err.Error())
+		return make(map[string]interface{}), err
+	}
+
+	if configData == nil || len(configData) == 0 {
+		passErr := fmt.Errorf("SSHPWD no config data")
+		return make(map[string]interface{}), passErr
+	}
+
+	content, cok := configData[0]["content"]
+	if !cok {
+		passErr := fmt.Errorf("SSHPWD content not exist")
+		log.Logger.Errorf(passErr.Error())
+		return make(map[string]interface{}), passErr
+	}
+
+	passwdMap, pok := content.(map[string]interface{})
+	if !pok {
+		passErr := fmt.Errorf("SSHPWD transfer type failed")
+		log.Logger.Errorf(passErr.Error())
+		return make(map[string]interface{}), passErr
+	}
+
+	return passwdMap, nil
+}
+
+// GetInstancePass get redis instances cluster password by batch api
+func GetInstancePass(dbType types.DBType,
+	insArr []dbutil.DataBaseDetect, conf *config.Config) (int, error) {
+	if dbType == constvar.MySQLProxy || dbType == constvar.MySQL {
+		return 0, nil
+	}
+
+	cType, cFile, cName, lName, namespace, err := GetConfigParamByDbType(dbType)
+	if err != nil {
+		passErr := fmt.Errorf("PassWDClusters get passwd by dbtype[%s] failed", dbType)
+		log.Logger.Errorf(passErr.Error())
+		return 0, passErr
+	}
+
+	clusterPasswd := make(map[string]string)
+	clusters := make([]string, 0)
+	clusterExist := make(map[string]string)
+	for _, ins := range insArr {
+		_, ok := clusterExist[ins.GetCluster()]
+		if ok {
+			continue
+		}
+		clusterExist[ins.GetCluster()] = ""
+
+		key := fmt.Sprintf("%s-%s-%s-%s-%s-%s",
+			cFile, cType, cName, lName, namespace, ins.GetCluster())
+		passwdVal, ok := passwdCache.Get(key)
+		if ok {
+			passwdStr := passwdVal.(string)
+			clusterPasswd[ins.GetCluster()] = passwdStr
+		} else {
+			clusters = append(clusters, ins.GetCluster())
+		}
+	}
+	log.Logger.Debugf("PassWDClusters cachePasswd:%v,NeedQuery:%v",
+		clusterPasswd, clusters)
+
+	remoteConfigClient, err := client.NewRemoteConfigClient(&conf.DNS.RemoteConf, conf.GetCloudId())
+	if err != nil {
+		log.Logger.Errorf("PassWDClusters new db config client failed,err:%s",
+			err.Error())
+		return 0, err
+	}
+
+	NewPasswds := QueryPasswords(remoteConfigClient, cFile,
+		cType, cName, lName, clusters, namespace)
+	for k, v := range NewPasswds {
+		clusterPasswd[k] = v
+		key := fmt.Sprintf("%s-%s-%s-%s-%s-%s",
+			cFile, cType, cName, lName, namespace, k)
+		passwdCache.Add(key, v, passwdCacheTime)
+	}
+
+	succCount := 0
+	for _, ins := range insArr {
+		host, port := ins.GetAddress()
+		passwd, find := clusterPasswd[ins.GetCluster()]
+		if !find {
+			log.Logger.Errorf("PassWDClusters ins[%s:%d] db[%s] not find cluster[%s] in passwds",
+				host, port, dbType, ins.GetCluster())
+		} else {
+			err := SetPasswordToInstance(dbType, passwd, ins)
+			if err != nil {
+				log.Logger.Errorf("PassWDClusters ins[%s:%d] db[%s] cluster[%s] set passwd[%s] fail",
+					host, port, dbType, ins.GetCluster(), passwd)
+			} else {
+				succCount++
+			}
+		}
+	}
+
+	if succCount != len(insArr) {
+		passErr := fmt.Errorf("PassWDClusters not all instance get passwd,succ:%d,all:%d",
+			succCount, len(insArr))
+		log.Logger.Errorf(passErr.Error())
+		return succCount, passErr
+	}
+	return succCount, nil
+}
+
+// GetInstancePassByCluster get single redis cluster password
+func GetInstancePassByCluster(dbType types.DBType,
+	cluster string, conf *config.Config) (string, error) {
+	if dbType == constvar.MySQLProxy || dbType == constvar.MySQL {
+		return "", nil
+	}
+
+	cType, cFile, cName, lName, namespace, err := GetConfigParamByDbType(dbType)
+	if err != nil {
+		passErr := fmt.Errorf("PassWDCluster get passwd by dbtype[%s] failed", dbType)
+		log.Logger.Errorf(passErr.Error())
+		return "", passErr
+	}
+
+	remoteConfigClient, err := client.NewRemoteConfigClient(&conf.DNS.RemoteConf, conf.GetCloudId())
+	if err != nil {
+		log.Logger.Errorf("PassWDCluster new dbconfig client failed,err:%s", err.Error())
+		return "", err
+	}
+
+	key := fmt.Sprintf("%s-%s-%s-%s-%s-%s",
+		cFile, cType, cName, lName, namespace, cluster)
+	cachePasswd, ok := passwdCache.Get(key)
+	if ok {
+		return cachePasswd.(string), nil
+	}
+
+	clusterPasswds := QueryPasswords(remoteConfigClient, cFile,
+		cType, cName, lName, []string{cluster}, namespace)
+
+	passwdStr, ok := clusterPasswds[cluster]
+	if ok {
+		passwdCache.Add(key, passwdStr, passwdCacheTime)
+		return passwdStr, nil
+	} else {
+		passErr := fmt.Errorf("PassWDCluster key[%s] unfind passwd", key)
+		log.Logger.Errorf(passErr.Error())
+		return "", passErr
+	}
+}
+
+// QueryPasswords the batch api of query password by input parameters
+func QueryPasswords(remoteConfigClient *client.RemoteConfigClient, cFile string,
+	cType string, cName string, lName string,
+	clusters []string, namespace string) map[string]string {
+	clusterPasswd := make(map[string]string)
+	configData, err := remoteConfigClient.BatchGetConfigItem(
+		cFile, cType, cName,
+		lName, clusters, namespace,
+	)
+	if err != nil {
+		log.Logger.Errorf(err.Error())
+		return clusterPasswd
+	}
+
+	content, cok := configData["content"]
+	if !cok {
+		passErr := fmt.Errorf("PassWDQuery content not exist")
+		log.Logger.Errorf(passErr.Error())
+		return clusterPasswd
+	}
+
+	passwdMap, pok := content.(map[string]interface{})
+	if !pok {
+		passErr := fmt.Errorf("PassWDQuery transfer type failed")
+		log.Logger.Errorf(passErr.Error())
+		return clusterPasswd
+	}
+
+	for _, c := range clusters {
+		passwdInfo, find := passwdMap[c]
+		if !find {
+			log.Logger.Errorf("PassWDQuery can not find passwd for cluster[%s]", c)
+			continue
+		}
+
+		cname2passwd, ok := passwdInfo.(map[string]interface{})
+		if !ok {
+			log.Logger.Errorf("PassWDQuery [%v] trans to map[string]interface{} failed",
+				passwdInfo)
+			continue
+		}
+
+		passwd, ok := cname2passwd[cName]
+		if !ok {
+			log.Logger.Errorf("PassWDQuery not find [%s] in cname2passwd[%v]",
+				cName, cname2passwd)
+			continue
+		}
+		clusterPasswd[c] = passwd.(string)
+	}
+	return clusterPasswd
+}
+
+// SetPasswordToInstance set password to redis detection instance
+func SetPasswordToInstance(dbType types.DBType,
+	passwd string, ins dbutil.DataBaseDetect) error {
+	if dbType == constvar.TendisCache {
+		cacheP, isCache := ins.(*RedisDetectInstance)
+		if isCache {
+			cacheP.Pass = passwd
+		} else {
+			passErr := fmt.Errorf("the type[%s] of instance transfer type failed", dbType)
+			return passErr
+		}
+	} else if dbType == constvar.Twemproxy {
+		twemP, isTwem := ins.(*TwemproxyDetectInstance)
+		if isTwem {
+			twemP.Pass = passwd
+		} else {
+			passErr := fmt.Errorf("the type[%s] of instance transfer type failed", dbType)
+			return passErr
+		}
+	} else if dbType == constvar.Predixy {
+		predixyP, isPredixy := ins.(*PredixyDetectInstance)
+		if isPredixy {
+			predixyP.Pass = passwd
+		} else {
+			passErr := fmt.Errorf("the type[%s] of instance transfer type failed", dbType)
+			return passErr
+		}
+	} else if dbType == constvar.Tendisplus {
+		tendisP, isTendis := ins.(*TendisplusDetectInstance)
+		if isTendis {
+			tendisP.Pass = passwd
+		} else {
+			passErr := fmt.Errorf("the type[%s] of instance transfer type failed", dbType)
+			return passErr
+		}
+	} else {
+		passwdErr := fmt.Errorf("the type[%s] of instance is invalid",
+			dbType)
+		return passwdErr
+	}
+	return nil
+}
+
+// GetConfigParamByDbType the return value:
+//
+//	conf_type conf_file conf_name conf_name level_name namespace
+func GetConfigParamByDbType(dbType types.DBType,
+) (string, string, string, string, string, error) {
+	if dbType == constvar.TendisCache {
+		clusterMeta := constvar.RedisClusterType
+		return "proxyconf", "Twemproxy-latest", "redis_password", "cluster", clusterMeta, nil
+	} else if dbType == constvar.Twemproxy {
+		clusterMeta := constvar.RedisClusterType
+		return "proxyconf", "Twemproxy-latest", "password", "cluster", clusterMeta, nil
+	} else if dbType == constvar.Predixy {
+		clusterMeta := constvar.TendisplusClusterType
+		return "proxyconf", "Predixy-latest", "password", "cluster", clusterMeta, nil
+	} else if dbType == constvar.Tendisplus {
+		clusterMeta := constvar.TendisplusClusterType
+		return "proxyconf", "Predixy-latest", "redis_password", "cluster", clusterMeta, nil
+	} else {
+		return "", "", "", "", "", nil
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_callback.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_callback.go
new file mode 100644
index 0000000000..53842eeb89
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_callback.go
@@ -0,0 +1,85 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"encoding/json"
+)
+
+// NewTendisplusInstanceByCmdb Agent通过CMDB获取的信息来生成需要探测的实例
+func NewTendisplusInstanceByCmdb(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseDetect, error) {
+	var (
+		err          error
+		unmarshalIns []*RedisDetectInfoFromCmDB
+		ret          []dbutil.DataBaseDetect
+	)
+
+	unmarshalIns, err = UnMarshalRedisInstanceByCmdb(instances,
+		constvar.TendisplusClusterType, constvar.TendisplusMetaType)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, uIns := range unmarshalIns {
+		ret = append(ret, NewTendisplusDetectInstance(uIns, conf))
+	}
+
+	return ret, err
+}
+
+// DeserializeTendisplus 反序列化从Agent上报上来的故障实例
+func DeserializeTendisplus(jsonInfo []byte,
+	conf *config.Config) (dbutil.DataBaseDetect, error) {
+	response := RedisDetectResponse{}
+	err := json.Unmarshal(jsonInfo, &response)
+	if err != nil {
+		log.Logger.Errorf("json unmarshal failed. jsoninfo:\n%s\n, err:%s",
+			string(jsonInfo), err.Error())
+		return nil, err
+	}
+	ret := NewTendisplusDetectInstanceFromRsp(&response, conf)
+	return ret, nil
+}
+
+// NewTendisplusSwitchInstance TODO
+func NewTendisplusSwitchInstance(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseSwitch, error) {
+	var err error
+	var ret []dbutil.DataBaseSwitch
+	for _, v := range instances {
+		swIns, err := CreateRedisSwitchInfo(v, conf)
+		if err != nil {
+			log.Logger.Errorf("parse tendisplus switch instance failed,err:%s",
+				err.Error())
+			continue
+		}
+
+		if swIns.MetaType != constvar.TendisplusMetaType {
+			log.Logger.Errorf("Create tendisplus switch while the metaType[%s] != %s",
+				swIns.MetaType, constvar.TendisplusMetaType)
+			continue
+		}
+
+		pw := TendisplusSwitch{
+			RedisSwitchInfo: *swIns,
+		}
+
+		passwd, err := GetInstancePassByCluster(
+			constvar.Tendisplus, pw.Cluster, conf,
+		)
+		if err != nil {
+			log.Logger.Errorf("get tendisplus switch passwd failed,err:%s,info:%s",
+				err.Error(), pw.ShowSwitchInstanceInfo())
+		} else {
+			pw.Pass = passwd
+		}
+
+		ret = append(ret, &pw)
+	}
+
+	return ret, err
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_detect.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_detect.go
new file mode 100644
index 0000000000..5790fb0b18
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_detect.go
@@ -0,0 +1,203 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// TendisplusDetectInstance TODO
+type TendisplusDetectInstance struct {
+	RedisDetectBase
+}
+
+// Detection TODO
+func (ins *TendisplusDetectInstance) Detection() error {
+	err := ins.DoTendisDetection()
+	if err == nil && ins.Status == constvar.DBCheckSuccess {
+		log.Logger.Debugf("Tendis check ok and return")
+		return nil
+	}
+
+	if err != nil && ins.Status == constvar.AUTHCheckFailed {
+		log.Logger.Errorf("Tendis auth failed,pass:%s,status:%s",
+			ins.Pass, ins.Status)
+		return err
+	}
+
+	sshErr := ins.CheckSSH()
+	if sshErr != nil {
+		if util.CheckSSHErrIsAuthFail(sshErr) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("Tendis check ssh auth failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		} else {
+			ins.Status = constvar.SSHCheckFailed
+			log.Logger.Errorf("Tendis check ssh failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		}
+		return sshErr
+	} else {
+		log.Logger.Debugf("Tendis check ssh success. ip:%s, port:%d, app:%s",
+			ins.Ip, ins.Port, ins.App)
+		ins.Status = constvar.SSHCheckSuccess
+		return nil
+	}
+}
+
+// DoTendisDetection execute detection for tendisplus instance
+func (ins *TendisplusDetectInstance) DoTendisDetection() error {
+	r := &client.RedisClient{}
+	addr := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+	r.Init(addr, ins.Pass, ins.Timeout, 0)
+	defer r.Close()
+
+	rsp, err := r.Info()
+	if err != nil {
+		redisErr := fmt.Errorf("tendisplus exec detection failed,info:%s, err:%s",
+			ins.ShowDetectionInfo(), err.Error())
+		if util.CheckRedisErrIsAuthFail(err) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("tendisplus detect auth failed,err:%s,status:%s",
+				redisErr.Error(), ins.Status)
+		} else {
+			ins.Status = constvar.DBCheckFailed
+			log.Logger.Errorf("tendisplus detect failed,err:%s,status:%s",
+				redisErr.Error(), ins.Status)
+		}
+		return redisErr
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		redisErr := fmt.Errorf("tendisplus info response type is not string")
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	if !strings.Contains(rspInfo, "redis_version:") {
+		redisErr := fmt.Errorf("response un-find redis_version, rsp:%s", rspInfo)
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	role, err := ins.GetRole(rspInfo)
+	if nil != err {
+		redisErr := fmt.Errorf("response un-find role, rsp:%s", rspInfo)
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	if role == "master" {
+		return ins.DoSetCheck()
+	}
+	ins.Status = constvar.DBCheckSuccess
+	return nil
+}
+
+// Serialization TODO
+func (ins *TendisplusDetectInstance) Serialization() ([]byte, error) {
+	response := RedisDetectResponse{
+		BaseDetectDBResponse: ins.NewDBResponse(),
+		Pass:                 ins.Pass,
+	}
+
+	resByte, err := json.Marshal(&response)
+	if err != nil {
+		log.Logger.Errorf("Tendisplus serialization failed. err:%s", err.Error())
+		return []byte{}, err
+	}
+	return resByte, nil
+}
+
+// GetRole TODO
+func (ins *TendisplusDetectInstance) GetRole(info string) (string, error) {
+	beginPos := strings.Index(info, "role:")
+	if beginPos < 0 {
+		roleErr := fmt.Errorf("tendisplus rsp not contains role")
+		log.Logger.Errorf(roleErr.Error())
+		return "", roleErr
+	}
+
+	endPos := strings.Index(info[beginPos:], "\r\n")
+	if endPos < 0 {
+		roleErr := fmt.Errorf("tendisplus the substr is invalid,%s",
+			info[beginPos:])
+		log.Logger.Errorf(roleErr.Error())
+		return "", roleErr
+	}
+
+	roleInfo := info[beginPos+len("role:") : beginPos+endPos]
+	return roleInfo, nil
+}
+
+// DoSetCheck TODO
+func (ins *TendisplusDetectInstance) DoSetCheck() error {
+	r := &client.RedisClient{}
+	addr := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+	r.InitCluster(addr, ins.Pass, ins.Timeout)
+	defer r.Close()
+
+	keyFormat := "dbha:agent:%s"
+	checkKey := fmt.Sprintf(keyFormat, ins.Ip)
+	checkTime := time.Now().Format("2006-01-02 15:04:05")
+	cmdArgv := []string{"SET", checkKey, checkTime}
+
+	rsp, err := r.DoCommand(cmdArgv)
+	if err != nil {
+		redisErr := fmt.Errorf("tendisplus set value failed,err:%s", err.Error())
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		redisErr := fmt.Errorf("tendisplus info response type is not string")
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+
+	if strings.Contains(rspInfo, "OK") || strings.Contains(rspInfo, "MOVED") {
+		ins.Status = constvar.DBCheckSuccess
+		return nil
+	} else {
+		redisErr := fmt.Errorf("set check failed,rsp:%s", rspInfo)
+		log.Logger.Errorf(redisErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return redisErr
+	}
+}
+
+// ShowDetectionInfo TODO
+func (ins *TendisplusDetectInstance) ShowDetectionInfo() string {
+	str := fmt.Sprintf("ip:%s, port:%d, status:%s, DBType:%s",
+		ins.Ip, ins.Port, ins.Status, ins.DBType)
+	return str
+}
+
+// NewTendisplusDetectInstance TODO
+func NewTendisplusDetectInstance(ins *RedisDetectInfoFromCmDB,
+	conf *config.Config) *TendisplusDetectInstance {
+	return &TendisplusDetectInstance{
+		RedisDetectBase: *GetDetectBaseByInfo(ins, constvar.Tendisplus, conf),
+	}
+}
+
+// NewTendisplusDetectInstanceFromRsp TODO
+func NewTendisplusDetectInstanceFromRsp(ins *RedisDetectResponse,
+	conf *config.Config) *TendisplusDetectInstance {
+	return &TendisplusDetectInstance{
+		RedisDetectBase: *GetDetectBaseByRsp(ins, constvar.Tendisplus, conf),
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_switch.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_switch.go
new file mode 100644
index 0000000000..3e5eb9777a
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/tendisplus_switch.go
@@ -0,0 +1,137 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+	"strings"
+)
+
+// TendisplusSwitch TODO
+type TendisplusSwitch struct {
+	RedisSwitchInfo
+	slave2M *RedisSlaveInfo
+}
+
+// CheckSwitch TODO
+func (ins *TendisplusSwitch) CheckSwitch() (bool, error) {
+	return true, nil
+}
+
+// DoSwitch TODO
+func (ins *TendisplusSwitch) DoSwitch() error {
+	log.Logger.Infof("redis do switch. info:{%s}", ins.ShowSwitchInstanceInfo())
+	slave2Master := false
+	for _, slave := range ins.Slave {
+		isMaster, err := ins.CheckSlaveMaster(&slave)
+		if err != nil {
+			log.Logger.Infof("Tendisplus Check slave is master err[%s]", err.Error())
+			continue
+		}
+
+		if isMaster {
+			slave2Master = true
+			ins.slave2M = &slave
+			log.Logger.Infof("Tendisplus find slave[%s:%d] is master",
+				slave.Ip, slave.Port)
+			break
+		}
+	}
+
+	if slave2Master {
+		log.Logger.Infof("Tendisplus have slave[%s:%d] switch to master",
+			ins.slave2M.Ip, ins.slave2M.Port)
+		return nil
+	} else {
+		switchInfoLog := fmt.Sprintf("no slave change to master, info:%s",
+			ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_INFO, switchInfoLog)
+		log.Logger.Infof(switchInfoLog)
+		return nil
+	}
+}
+
+// ShowSwitchInstanceInfo TODO
+func (ins *TendisplusSwitch) ShowSwitchInstanceInfo() string {
+	format := `<%s#%d IDC:%s Status:%s App:%s ClusterType:%s MachineType:%s Cluster:%s> switch`
+	str := fmt.Sprintf(
+		format, ins.Ip, ins.Port, ins.IDC, ins.Status, ins.App,
+		ins.ClusterType, ins.MetaType, ins.Cluster,
+	)
+	return str
+}
+
+// RollBack TODO
+func (ins *TendisplusSwitch) RollBack() error {
+	return nil
+}
+
+// UpdateMetaInfo TODO
+func (ins *TendisplusSwitch) UpdateMetaInfo() error {
+	return nil
+}
+
+// CheckConfig TODO
+func (ins *TendisplusSwitch) CheckConfig() bool {
+	return true
+}
+
+// CheckSlaveMaster check if the slave of this instance is change to master role
+func (ins *TendisplusSwitch) CheckSlaveMaster(slave *RedisSlaveInfo) (bool, error) {
+	r := &client.RedisClient{}
+	addr := fmt.Sprintf("%s:%d", slave.Ip, slave.Port)
+	r.Init(addr, ins.Pass, ins.Timeout, 0)
+	defer r.Close()
+
+	rsp, err := r.Info()
+	if err != nil {
+		log.Logger.Errorf("slave exec redis info failed,addr=%s,err=%s",
+			addr, err.Error())
+		return false, err
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		redisErr := fmt.Errorf("slave exec redis info response type is not string")
+		log.Logger.Errorf(redisErr.Error())
+		return false, redisErr
+	}
+
+	if !strings.Contains(rspInfo, "cluster_enabled:1") {
+		redisErr := fmt.Errorf("slave not support cluster,addr:%s,info:%s,rsp:%s",
+			addr, ins.ShowSwitchInstanceInfo(), rspInfo)
+		log.Logger.Errorf(redisErr.Error())
+		return false, redisErr
+	}
+
+	log.Logger.Debugf("Tendisplus switch slaveCheckMaster slave[%s] rsp:%s",
+		addr, rspInfo)
+	if strings.Contains(rspInfo, "role:master") {
+		log.Logger.Infof("Slave already begin master,info:{%s},slave:{%s:%d}",
+			ins.ShowSwitchInstanceInfo(), slave.Ip, slave.Port)
+		return true, nil
+	}
+	return false, nil
+}
+
+// ParseRole parse tendisplus role by the response of info command
+func (ins *TendisplusSwitch) ParseRole(info string) (string, error) {
+	beginPos := strings.Index(info, "role:")
+	if beginPos < 0 {
+		roleErr := fmt.Errorf("tendisplus rsp not contains role")
+		log.Logger.Errorf(roleErr.Error())
+		return "", roleErr
+	}
+
+	endPos := strings.Index(info[beginPos:], "\r\n")
+	if endPos < 0 {
+		roleErr := fmt.Errorf("tendisplus the substr is invalid,%s",
+			info[beginPos:])
+		log.Logger.Errorf(roleErr.Error())
+		return "", roleErr
+	}
+
+	roleInfo := info[beginPos+len("role:") : beginPos+endPos]
+	return roleInfo, nil
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_callback.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_callback.go
new file mode 100644
index 0000000000..a14d0ed322
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_callback.go
@@ -0,0 +1,91 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"encoding/json"
+)
+
+// NewTwemproxyInstanceByCmdb Agent通过CMDB获取的信息来生成需要探测的实例
+func NewTwemproxyInstanceByCmdb(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseDetect, error) {
+	var (
+		err          error
+		unmarshalIns []*RedisDetectInfoFromCmDB
+		ret          []dbutil.DataBaseDetect
+	)
+
+	unmarshalIns, err = UnMarshalRedisInstanceByCmdb(instances,
+		constvar.RedisClusterType, constvar.TwemproxyMetaType)
+
+	if err != nil {
+		return nil, err
+	}
+
+	for _, uIns := range unmarshalIns {
+		ret = append(ret, NewTwemproxyDetectInstance(uIns, conf))
+	}
+
+	return ret, err
+}
+
+// DeserializeTwemproxy 反序列化从Agent上报上来的故障实例
+func DeserializeTwemproxy(jsonInfo []byte,
+	conf *config.Config) (dbutil.DataBaseDetect, error) {
+	response := RedisDetectResponse{}
+	err := json.Unmarshal(jsonInfo, &response)
+	if err != nil {
+		log.Logger.Errorf("json unmarshal failed. jsoninfo:\n%s\n, err:%s",
+			string(jsonInfo), err.Error())
+		return nil, err
+	}
+	ret := NewTwemproxyDetectInstanceFromRsp(&response, conf)
+	return ret, nil
+}
+
+// NewTwemproxySwitchInstance TODO
+func NewTwemproxySwitchInstance(instances []interface{},
+	conf *config.Config) ([]dbutil.DataBaseSwitch, error) {
+	var err error
+	var ret []dbutil.DataBaseSwitch
+	for _, v := range instances {
+		swIns, err := CreateRedisProxySwitchInfo(v, conf)
+		if err != nil {
+			log.Logger.Errorf("parse twemproxy switch instance failed,err:%s",
+				err.Error())
+			continue
+		}
+
+		if swIns.MetaType != constvar.TwemproxyMetaType {
+			log.Logger.Errorf("Create Twemproxy switch while the metaType[%s] != %s",
+				swIns.MetaType, constvar.TwemproxyMetaType)
+			continue
+		}
+		if swIns.CheckFetchEntryDetail() {
+			edErr := swIns.GetEntryDetailInfo()
+			if edErr != nil {
+				log.Logger.Errorf("GetEntryDetail failed in NewTwemproxySwitch,err:%s",
+					edErr.Error())
+			}
+		}
+
+		pw := TwemproxySwitch{
+			RedisProxySwitchInfo: *swIns,
+		}
+
+		passwd, err := GetInstancePassByCluster(
+			constvar.Twemproxy, pw.Cluster, conf,
+		)
+		if err != nil {
+			log.Logger.Errorf("get twemproxy switch passwd failed,err:%s,info:%s",
+				err.Error(), pw.ShowSwitchInstanceInfo())
+		} else {
+			pw.Pass = passwd
+		}
+		ret = append(ret, &pw)
+	}
+
+	return ret, err
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_detect.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_detect.go
new file mode 100644
index 0000000000..46d772087f
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_detect.go
@@ -0,0 +1,134 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"encoding/json"
+	"fmt"
+	"strings"
+)
+
+// TwemproxyDetectInstance TODO
+type TwemproxyDetectInstance struct {
+	RedisDetectBase
+}
+
+// Detection TODO
+func (ins *TwemproxyDetectInstance) Detection() error {
+	err := ins.DoTwemproxyDetection()
+	if err == nil && ins.Status == constvar.DBCheckSuccess {
+		log.Logger.Debugf("Twemproxy check ok and return")
+		return nil
+	}
+
+	if err != nil && ins.Status == constvar.AUTHCheckFailed {
+		log.Logger.Errorf("Twemproxy auth failed,pass:%s,status:%s",
+			ins.Pass, ins.Status)
+		return err
+	}
+
+	sshErr := ins.CheckSSH()
+	if sshErr != nil {
+		if util.CheckSSHErrIsAuthFail(sshErr) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("Twemproxy check ssh auth failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		} else {
+			ins.Status = constvar.SSHCheckFailed
+			log.Logger.Errorf("Twemproxy check ssh failed.ip:%s,port:%d,app:%s,status:%s",
+				ins.Ip, ins.Port, ins.App, ins.Status)
+		}
+		return sshErr
+	} else {
+		log.Logger.Debugf("Twemproxy check ssh success. ip:%s, port:%d, app:%s",
+			ins.Ip, ins.Port, ins.App)
+		ins.Status = constvar.SSHCheckSuccess
+		return nil
+	}
+}
+
+// DoTwemproxyDetection execte detection for twemproxy instance
+func (ins *TwemproxyDetectInstance) DoTwemproxyDetection() error {
+	var twemproxyErr error
+	r := &client.RedisClient{}
+	addr := fmt.Sprintf("%s:%d", ins.Ip, ins.Port)
+	r.Init(addr, ins.Pass, ins.Timeout, 0)
+	defer r.Close()
+
+	rsp, err := r.Type("twemproxy_mon")
+	if err != nil {
+		twemproxyErr = fmt.Errorf("do twemproxy cmd err,err: %s,info;%s",
+			err.Error(), ins.ShowDetectionInfo())
+		if util.CheckRedisErrIsAuthFail(err) {
+			ins.Status = constvar.AUTHCheckFailed
+			log.Logger.Errorf("tendisplus detect auth failed,err:%s,status:%s",
+				twemproxyErr.Error(), ins.Status)
+		} else {
+			ins.Status = constvar.DBCheckFailed
+			log.Logger.Errorf("tendisplus detect failed,err:%s,status:%s",
+				twemproxyErr.Error(), ins.Status)
+		}
+		return twemproxyErr
+	}
+
+	rspInfo, ok := rsp.(string)
+	if !ok {
+		twemproxyErr := fmt.Errorf("redis info response type is not string")
+		log.Logger.Errorf(twemproxyErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return twemproxyErr
+	}
+
+	log.Logger.Infof("Twemproxy detection response:%s", rspInfo)
+	if strings.Contains(rspInfo, "none") {
+		ins.Status = constvar.DBCheckSuccess
+		return nil
+	} else {
+		twemproxyErr = fmt.Errorf("twemproxy exec detection failed,rsp:%s,info:%s",
+			rspInfo, ins.ShowDetectionInfo())
+		log.Logger.Errorf(twemproxyErr.Error())
+		ins.Status = constvar.DBCheckFailed
+		return twemproxyErr
+	}
+}
+
+// Serialization TODO
+func (ins *TwemproxyDetectInstance) Serialization() ([]byte, error) {
+	response := RedisDetectResponse{
+		BaseDetectDBResponse: ins.NewDBResponse(),
+		Pass:                 ins.Pass,
+	}
+
+	resByte, err := json.Marshal(&response)
+	if err != nil {
+		log.Logger.Errorf("twemproxy serialization failed. err:%s", err.Error())
+		return []byte{}, err
+	}
+	return resByte, nil
+}
+
+// ShowDetectionInfo TODO
+func (ins *TwemproxyDetectInstance) ShowDetectionInfo() string {
+	str := fmt.Sprintf("ip:%s, port:%d, status:%s, DBType:%s",
+		ins.Ip, ins.Port, ins.Status, ins.DBType)
+	return str
+}
+
+// NewTwemproxyDetectInstance TODO
+func NewTwemproxyDetectInstance(ins *RedisDetectInfoFromCmDB,
+	conf *config.Config) *TwemproxyDetectInstance {
+	return &TwemproxyDetectInstance{
+		RedisDetectBase: *GetDetectBaseByInfo(ins, constvar.Twemproxy, conf),
+	}
+}
+
+// NewTwemproxyDetectInstanceFromRsp TODO
+func NewTwemproxyDetectInstanceFromRsp(ins *RedisDetectResponse,
+	conf *config.Config) *TwemproxyDetectInstance {
+	return &TwemproxyDetectInstance{
+		RedisDetectBase: *GetDetectBaseByRsp(ins, constvar.Twemproxy, conf),
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_switch.go b/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_switch.go
new file mode 100644
index 0000000000..1ed6ddb6a5
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/redis/twemproxy_switch.go
@@ -0,0 +1,68 @@
+package redis
+
+import (
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+)
+
+// TwemproxySwitch TODO
+type TwemproxySwitch struct {
+	RedisProxySwitchInfo
+}
+
+// CheckSwitch TODO
+func (ins *TwemproxySwitch) CheckSwitch() (bool, error) {
+	return true, nil
+}
+
+// DoSwitch TODO
+func (ins *TwemproxySwitch) DoSwitch() error {
+	ins.ReportLogs(constvar.SWITCH_INFO,
+		fmt.Sprintf("handle twemproxy switch[%s:%d]", ins.Ip, ins.Port))
+	err := ins.KickOffDns()
+	cErr := ins.KickOffClb()
+	pErr := ins.KickOffPolaris()
+	if err != nil {
+		tpErrLog := fmt.Sprintf("Twemproxy kick dns failed,err:%s", err.Error())
+		log.Logger.Errorf("%s info:%s", tpErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, tpErrLog)
+		return err
+	}
+	if cErr != nil {
+		tpErrLog := fmt.Sprintf("Twemproxy kick clb failed,err:%s", cErr.Error())
+		log.Logger.Errorf("%s info:%s", tpErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, tpErrLog)
+		return cErr
+	}
+	if pErr != nil {
+		tpErrLog := fmt.Sprintf("Twemproxy kick polaris failed,err:%s", pErr.Error())
+		log.Logger.Errorf("%s info:%s", tpErrLog, ins.ShowSwitchInstanceInfo())
+		ins.ReportLogs(constvar.SWITCH_FAIL, tpErrLog)
+		return pErr
+	}
+	succLog := fmt.Sprintf("Twemproxy do switch ok,dns[%t] clb[%t], polaris[%t]",
+		ins.ApiGw.DNSFlag, ins.ApiGw.CLBFlag, ins.ApiGw.PolarisFlag)
+	ins.ReportLogs(constvar.SWITCH_INFO, succLog)
+	return nil
+}
+
+// RollBack TODO
+func (ins *TwemproxySwitch) RollBack() error {
+	return nil
+}
+
+// UpdateMetaInfo TODO
+func (ins *TwemproxySwitch) UpdateMetaInfo() error {
+	return nil
+}
+
+// ShowSwitchInstanceInfo TODO
+func (ins *TwemproxySwitch) ShowSwitchInstanceInfo() string {
+	format := `<%s#%d IDC:%s Status:%s App:%s ClusterType:%s MachineType:%s Cluster:%s> switch`
+	str := fmt.Sprintf(
+		format, ins.Ip, ins.Port, ins.IDC, ins.Status, ins.App,
+		ins.ClusterType, ins.MetaType, ins.Cluster,
+	)
+	return str
+}
diff --git a/dbm-services/common/dbha/ha-module/dbmodule/register.go b/dbm-services/common/dbha/ha-module/dbmodule/register.go
new file mode 100644
index 0000000000..580ff8805d
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbmodule/register.go
@@ -0,0 +1,64 @@
+package dbmodule
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbmodule/mysql"
+	"dbm-services/common/dbha/ha-module/dbmodule/redis"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/types"
+)
+
+// FetchDBCallback Agent将从cmdb获取的db实例信息转换为DataBaseDetect用于探测
+type FetchDBCallback func(instances []interface{}, conf *config.Config) ([]dbutil.DataBaseDetect, error)
+
+// DeserializeCallback GM将Agent发送来的json信息转换为DataBaseDetect用于二次探测
+type DeserializeCallback func(jsonInfo []byte, conf *config.Config) (dbutil.DataBaseDetect, error)
+
+// GetSwitchInstanceInformation GQA将获取的实例信息转换为DataBaseSwitch用于切换
+type GetSwitchInstanceInformation func(instance []interface{}, conf *config.Config) ([]dbutil.DataBaseSwitch, error)
+
+// Callback TODO
+type Callback struct {
+	FetchDBCallback              FetchDBCallback
+	DeserializeCallback          DeserializeCallback
+	GetSwitchInstanceInformation GetSwitchInstanceInformation
+}
+
+// DBCallbackMap TODO
+var DBCallbackMap map[types.DBType]Callback
+
+func init() {
+	DBCallbackMap = map[types.DBType]Callback{}
+	DBCallbackMap[constvar.MySQL] = Callback{
+		FetchDBCallback:              mysql.NewMySQLInstanceByCmDB,
+		DeserializeCallback:          mysql.DeserializeMySQL,
+		GetSwitchInstanceInformation: mysql.NewMySQLSwitchInstance,
+	}
+	DBCallbackMap[constvar.MySQLProxy] = Callback{
+		FetchDBCallback:              mysql.NewMySQLProxyInstanceByCmDB,
+		DeserializeCallback:          mysql.DeserializeMySQLProxy,
+		GetSwitchInstanceInformation: mysql.NewMySQLProxySwitchInstance,
+	}
+
+	DBCallbackMap[constvar.TendisCache] = Callback{
+		FetchDBCallback:              redis.NewRedisInstanceByCmdb,
+		DeserializeCallback:          redis.DeserializeRedis,
+		GetSwitchInstanceInformation: redis.NewRedisSwitchInstance,
+	}
+	DBCallbackMap[constvar.Twemproxy] = Callback{
+		FetchDBCallback:              redis.NewTwemproxyInstanceByCmdb,
+		DeserializeCallback:          redis.DeserializeTwemproxy,
+		GetSwitchInstanceInformation: redis.NewTwemproxySwitchInstance,
+	}
+	DBCallbackMap[constvar.Predixy] = Callback{
+		FetchDBCallback:              redis.NewPredixyInstanceByCmdb,
+		DeserializeCallback:          redis.DeserializePredixy,
+		GetSwitchInstanceInformation: redis.NewPredixySwitchInstance,
+	}
+	DBCallbackMap[constvar.Tendisplus] = Callback{
+		FetchDBCallback:              redis.NewTendisplusInstanceByCmdb,
+		DeserializeCallback:          redis.DeserializeTendisplus,
+		GetSwitchInstanceInformation: redis.NewTendisplusSwitchInstance,
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbutil/db_detect.go b/dbm-services/common/dbha/ha-module/dbutil/db_detect.go
new file mode 100644
index 0000000000..812d722dc2
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbutil/db_detect.go
@@ -0,0 +1,168 @@
+package dbutil
+
+import (
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/types"
+	"dbm-services/common/dbha/ha-module/util"
+	"fmt"
+	"time"
+
+	"golang.org/x/crypto/ssh"
+)
+
+// DataBaseDetect interface
+type DataBaseDetect interface {
+	Detection() error
+	Serialization() ([]byte, error)
+
+	NeedReporter() bool
+	GetType() types.DBType
+	GetStatus() types.CheckStatus
+	GetAddress() (string, int)
+	GetApp() string
+	GetCluster() string
+	UpdateReporterTime()
+}
+
+// BaseDetectDB db detect base struct
+type BaseDetectDB struct {
+	Ip             string
+	Port           int
+	App            string
+	DBType         types.DBType
+	ReporterTime   time.Time
+	ReportInterval int
+	Status         types.CheckStatus
+	Cluster        string
+	SshInfo        Ssh
+}
+
+// BaseDetectDBResponse detect response struct
+type BaseDetectDBResponse struct {
+	AgentIp string `json:"agent_ip"`
+	DBIp    string `json:"db_ip"`
+	DBPort  int    `json:"db_port"`
+	App     string `json:"app"`
+	Status  string `json:"status"`
+	Cluster string `json:"cluster"`
+}
+
+// Ssh detect configure
+type Ssh struct {
+	Port    int
+	User    string
+	Pass    string
+	Dest    string
+	Timeout int
+}
+
+// DoSSH do ssh detect
+func (b *BaseDetectDB) DoSSH(shellStr string) error {
+	conf := &ssh.ClientConfig{
+		Timeout:         time.Second * time.Duration(b.SshInfo.Timeout), // ssh 连接time out 时间一秒钟, 如果ssh验证错误 会在一秒内返回
+		User:            b.SshInfo.User,
+		HostKeyCallback: ssh.InsecureIgnoreHostKey(), // 这个可以, 但是不够安全
+		// HostKeyCallback: hostKeyCallBackFunc(h.Host),
+	}
+	conf.Auth = []ssh.AuthMethod{
+		ssh.KeyboardInteractive(b.ReturnSshInteractive()),
+		ssh.Password(b.SshInfo.Pass),
+	}
+	addr := fmt.Sprintf("%s:%d", b.Ip, b.SshInfo.Port)
+	sshClient, err := ssh.Dial("tcp", addr, conf)
+	if err != nil {
+		log.Logger.Warnf("ssh connect failed. ip:%s, port:%d, err:%s", b.Ip, b.Port, err.Error())
+		return err
+	}
+	defer sshClient.Close()
+
+	session, err := sshClient.NewSession()
+	if err != nil {
+		log.Logger.Warnf("ssh new session failed. ip:%s, port:%d, err:%s", b.Ip, b.Port, err.Error())
+		return err
+	}
+	defer session.Close()
+
+	_, err = session.CombinedOutput(shellStr)
+
+	if err != nil {
+		log.Logger.Warnf("ssh run command failed. ip:%s, port:%d, err:%s", b.Ip, b.Port, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// NeedReporter decide whether need report detect result
+func (b *BaseDetectDB) NeedReporter() bool {
+	var need bool
+	if b.Status == constvar.DBCheckSuccess {
+		now := time.Now()
+		if now.After(b.ReporterTime.Add(time.Second * time.Duration(b.ReportInterval))) {
+			need = true
+		} else {
+			need = false
+		}
+		// log.Logger.Debugf("now time:%s, reporter time:%s, reporter interval:%d, need:%s",
+		// 	now.String(), b.ReporterTime.String(), b.ReportInterval, need)
+	} else {
+		need = true
+	}
+	return need
+}
+
+// GetAddress return instance's ip, port
+func (b *BaseDetectDB) GetAddress() (ip string, port int) {
+	return b.Ip, b.Port
+}
+
+// GetType return dbType
+func (b *BaseDetectDB) GetType() types.DBType {
+	return b.DBType
+}
+
+// GetStatus return status
+func (b *BaseDetectDB) GetStatus() types.CheckStatus {
+	return b.Status
+}
+
+// GetApp return app info
+func (b *BaseDetectDB) GetApp() string {
+	return b.App
+}
+
+// GetCluster return cluster info
+func (b *BaseDetectDB) GetCluster() string {
+	return b.Cluster
+}
+
+// UpdateReporterTime update report info
+func (b *BaseDetectDB) UpdateReporterTime() {
+	b.ReporterTime = time.Now()
+}
+
+// ReturnSshInteractive return ssh interactive info
+func (b *BaseDetectDB) ReturnSshInteractive() ssh.KeyboardInteractiveChallenge {
+	return func(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
+		answers = make([]string, len(questions))
+		// The second parameter is unused
+		for n := range questions {
+			answers[n] = b.SshInfo.Pass
+		}
+
+		return answers, nil
+	}
+}
+
+// NewDBResponse init db response struct, use to unmarshal
+func (b *BaseDetectDB) NewDBResponse() BaseDetectDBResponse {
+	return BaseDetectDBResponse{
+		AgentIp: util.LocalIp,
+		DBIp:    b.Ip,
+		DBPort:  b.Port,
+		App:     b.App,
+		Status:  string(b.Status),
+		Cluster: b.Cluster,
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbutil/db_switch.go b/dbm-services/common/dbha/ha-module/dbutil/db_switch.go
new file mode 100644
index 0000000000..0c4541ef27
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbutil/db_switch.go
@@ -0,0 +1,153 @@
+package dbutil
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/log"
+	"time"
+)
+
+// DataBaseSwitch TODO
+type DataBaseSwitch interface {
+	CheckSwitch() (bool, error)
+	DoSwitch() error
+	ShowSwitchInstanceInfo() string
+	RollBack() error
+	UpdateMetaInfo() error
+
+	GetAddress() (string, int)
+	GetIDC() string
+	GetStatus() string
+	GetApp() string
+	GetClusterType() string
+	GetMetaType() string
+	GetSwitchUid() uint
+	GetRole() string // proxy没有role
+	GetCluster() string
+
+	SetSwitchUid(uint)
+	SetInfo(infoKey string, infoValue interface{})
+	GetInfo(infoKey string) (bool, interface{})
+	ReportLogs(result string, comment string) bool
+}
+
+// BindEntry TODO
+type BindEntry struct {
+	Dns     []interface{}
+	Polaris []interface{}
+	CLB     []interface{}
+}
+
+// ProxyInfo TODO
+type ProxyInfo struct {
+	Ip        string `json:"ip"`
+	Port      int    `json:"port"`
+	AdminPort int    `json:"admin_port"`
+	Status    string `json:"status"`
+}
+
+// BaseSwitch TODO
+type BaseSwitch struct {
+	Ip          string
+	Port        int
+	IDC         string
+	Status      string
+	App         string
+	ClusterType string
+	MetaType    string
+	SwitchUid   uint
+	Cluster     string
+	CmDBClient  *client.CmDBClient
+	HaDBClient  *client.HaDBClient
+	Infos       map[string]interface{}
+}
+
+// GetAddress TODO
+func (ins *BaseSwitch) GetAddress() (string, int) {
+	return ins.Ip, ins.Port
+}
+
+// GetIDC TODO
+func (ins *BaseSwitch) GetIDC() string {
+	return ins.IDC
+}
+
+// GetStatus TODO
+func (ins *BaseSwitch) GetStatus() string {
+	return ins.Status
+}
+
+// GetApp TODO
+func (ins *BaseSwitch) GetApp() string {
+	return ins.App
+}
+
+// GetClusterType TODO
+func (ins *BaseSwitch) GetClusterType() string {
+	return ins.ClusterType
+}
+
+// GetMetaType TODO
+func (ins *BaseSwitch) GetMetaType() string {
+	return ins.MetaType
+}
+
+// GetSwitchUid TODO
+func (ins *BaseSwitch) GetSwitchUid() uint {
+	return ins.SwitchUid
+}
+
+// SetSwitchUid TODO
+func (ins *BaseSwitch) SetSwitchUid(uid uint) {
+	ins.SwitchUid = uid
+}
+
+// GetRole TODO
+// override if needed
+func (ins *BaseSwitch) GetRole() string {
+	return "N/A"
+}
+
+// GetCluster return the cluster info
+func (ins *BaseSwitch) GetCluster() string {
+	return ins.Cluster
+}
+
+// SetInfo set information to switch instance
+func (ins *BaseSwitch) SetInfo(infoKey string, infoValue interface{}) {
+	if nil == ins.Infos {
+		ins.Infos = make(map[string]interface{})
+	}
+
+	ins.Infos[infoKey] = infoValue
+}
+
+// GetInfo get information by key from switch instance
+func (ins *BaseSwitch) GetInfo(infoKey string) (bool, interface{}) {
+	if nil == ins.Infos {
+		return false, nil
+	}
+
+	v, ok := ins.Infos[infoKey]
+	if ok {
+		return true, v
+	} else {
+		return false, nil
+	}
+}
+
+// ReportLogs TODO
+func (ins *BaseSwitch) ReportLogs(result string, comment string) bool {
+	log.Logger.Infof(comment)
+	if nil == ins.HaDBClient {
+		return false
+	}
+
+	err := ins.HaDBClient.InsertSwitchLog(
+		ins.SwitchUid, ins.Ip, ins.Port, result, comment, time.Now(),
+	)
+	if err != nil {
+		return false
+	} else {
+		return true
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/dbutil/dbutil.go b/dbm-services/common/dbha/ha-module/dbutil/dbutil.go
new file mode 100644
index 0000000000..d060e68705
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/dbutil/dbutil.go
@@ -0,0 +1,2 @@
+// Package dbutil TODO
+package dbutil
diff --git a/dbm-services/common/dbha/ha-module/errno/code.go b/dbm-services/common/dbha/ha-module/errno/code.go
new file mode 100644
index 0000000000..313c555d7e
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/errno/code.go
@@ -0,0 +1,47 @@
+package errno
+
+var (
+	// OK TODO
+	OK = &Errno{Code: 0, Message: "OK"}
+
+	// ErrInterval TODO
+	// types error, prefix is 100
+	ErrInterval = &Errno{Code: 10001, Message: "Internal server error", CNMessage: "内部未知错误"}
+	// ErrHttpStatusCode TODO
+	ErrHttpStatusCode = &Errno{Code: 10002, Message: "Invalid http status code", CNMessage: "http请求状态码不对"}
+	// ErrHttpResponse TODO
+	ErrHttpResponse = &Errno{Code: 10003, Message: "Invalid http response", CNMessage: "http请求返回异常"}
+	// ErrInvokeApi TODO
+	ErrInvokeApi = &Errno{Code: 10004, Message: "Invoke api failed", CNMessage: "调用api出错"}
+	// ErrJSONMarshal TODO
+	ErrJSONMarshal = &Errno{Code: 10005, Message: "Error occurred while marshal the data to JSON.",
+		CNMessage: "序列化JSON数据出错"}
+	// ErrJSONUnmarshal TODO
+	ErrJSONUnmarshal = &Errno{Code: 10006, Message: "Error occurred while unmarshal the JSON to data model.",
+		CNMessage: "反序列号JSON数据出错"}
+	// ErrGetInstanceInfo TODO
+	ErrGetInstanceInfo = &Errno{Code: 10007, Message: "Get instance info failed", CNMessage: "获取实例信息失败"}
+	// ErrAppNotFound TODO
+	ErrAppNotFound = &Errno{Code: 10008, Message: "Get app info failed", CNMessage: "获取业务信息失败"}
+
+	// api error, prefix is 200
+
+	// ErrMultiMaster TODO
+	// mysql error, prefix is 300
+	ErrMultiMaster = &Errno{Code: 30001, Message: "Multi master found", CNMessage: "同一主机同时存在实例角色master和slave"}
+	// ErrSwitchNumUnMatched TODO
+	// dead host's instance num not equal to its switch number
+	ErrSwitchNumUnMatched = &Errno{Code: 30002, Message: "instances number is %d, switch number is %d, unmatched",
+		CNMessage: "实例个数%d与切换个数%d不匹配"}
+	// ErrRemoteQuery TODO
+	ErrRemoteQuery = &Errno{Code: 30003, Message: "do remote query failed", CNMessage: "调用remoteQuery失败"}
+	// ErrRemoteExecute TODO
+	ErrRemoteExecute = &Errno{Code: 30004, Message: "do remote execute failed", CNMessage: "调用remoteExecute失败"}
+	// ErrIOTreadState TODO
+	ErrIOTreadState = &Errno{Code: 30005, Message: "slave IO_THREAD is not ok", CNMessage: "IO_THREAD异常"}
+	// ErrSQLTreadState TODO
+	ErrSQLTreadState = &Errno{Code: 30006, Message: "slave SQL_THREAD is not ok", CNMessage: "SQL_THREAD异常"}
+	// ErrSlaveStatus TODO
+	ErrSlaveStatus = &Errno{Code: 30007, Message: "get slave status abnormal", CNMessage: "获取slave status异常"}
+	// proxy error, prefix is 400
+)
diff --git a/dbm-services/common/dbha/ha-module/errno/errno.go b/dbm-services/common/dbha/ha-module/errno/errno.go
new file mode 100644
index 0000000000..b61565926a
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/errno/errno.go
@@ -0,0 +1,102 @@
+// Package errno TODO
+package errno
+
+import (
+	"fmt"
+)
+
+// Errno struct
+type Errno struct {
+	Code      int
+	Message   string
+	CNMessage string
+}
+
+// Lang TODO
+var Lang = "en_US"
+
+// Error 用于错误处理
+// get string error
+func (err Errno) Error() string {
+	switch Lang {
+	case "zh_CN":
+		return err.CNMessage
+	case "en_US":
+		return err.Message
+	default:
+		return err.Message
+	}
+}
+
+// Addf error info according to format
+func (err Errno) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// Errorf format
+func (err Errno) Errorf(args ...interface{}) error {
+	switch Lang {
+	case "zh_CN":
+		err.CNMessage = fmt.Sprintf(err.CNMessage, args...)
+	case "en_US":
+		err.Message = fmt.Sprintf(err.Message, args...)
+	default:
+		err.Message = fmt.Sprintf(err.Message, args...)
+	}
+
+	return err
+}
+
+// Add error info
+func (err Errno) Add(message string) error {
+	switch Lang {
+	case "zh_CN":
+		err.CNMessage += message
+	case "en_US":
+		err.Message += message
+	default:
+		err.Message += message
+	}
+	return err
+}
+
+// Err or with errno
+type Err struct {
+	Errno
+	Err error
+}
+
+// New error
+func New(errno *Errno, err error) *Err {
+	return &Err{Errno: *errno, Err: err}
+}
+
+// SetMsg TODO
+// set error message
+func (err Err) SetMsg(message string) error {
+	err.Message = message
+	return err
+}
+
+// SetCNMsg TODO
+// set cn error message
+func (err Err) SetCNMsg(cnMessage string) error {
+	err.CNMessage = cnMessage
+	return err
+}
+
+// Error 用于错误处理
+// get error string
+func (err Err) Error() string {
+	message := err.Message
+	switch Lang {
+	case "zh_CN":
+		message = err.CNMessage
+	case "en_US":
+		message = err.Message
+		err.Message += message
+	default:
+		message = err.Message
+	}
+	return fmt.Sprintf("Err - code: %d, message: %s, error: %s", err.Code, message, err.Err.Error())
+}
diff --git a/dbm-services/common/dbha/ha-module/gm/connection.go b/dbm-services/common/dbha/ha-module/gm/connection.go
new file mode 100644
index 0000000000..4de2364b1d
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/gm/connection.go
@@ -0,0 +1,236 @@
+package gm
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/dbmodule"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/types"
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type parseStatus int
+
+const (
+	// Idle TODO
+	Idle parseStatus = 0
+	// ParseHeader TODO
+	ParseHeader parseStatus = 1
+	// ParseHeaderLF TODO
+	ParseHeaderLF parseStatus = 2
+	// ParseType TODO
+	ParseType parseStatus = 3
+	// ParseTypeLF TODO
+	ParseTypeLF parseStatus = 4
+	// ParseLength TODO
+	ParseLength parseStatus = 5
+	// ParseLengthLF TODO
+	ParseLengthLF parseStatus = 6
+	// ParseBody TODO
+	ParseBody parseStatus = 7
+)
+
+// HEADER TODO
+const HEADER string = "HEADER"
+
+// MaxDBTypeLength TODO
+const MaxDBTypeLength int = 64
+
+// MaxBodyLength TODO
+const MaxBodyLength int = 128 * 1024
+
+// Package TODO
+type Package struct {
+	Header     string
+	DBType     string
+	BodyLength int
+	Body       []byte
+}
+
+// AgentConnection TODO
+type AgentConnection struct {
+	Ip            string
+	Port          int
+	NetConnection net.Conn
+	Buffer        []byte
+	status        parseStatus
+	netPackage    Package
+	GDMChan       chan DoubleCheckInstanceInfo
+	Conf          *config.Config
+}
+
+// Init TODO
+func (conn *AgentConnection) Init() {
+	addr := strings.Split(conn.NetConnection.RemoteAddr().String(), ":")
+	conn.Ip = addr[0]
+	conn.Port, _ = strconv.Atoi(addr[1])
+	conn.status = Idle
+}
+
+// Read 用于常见IO
+func (conn *AgentConnection) Read() error {
+	defer conn.NetConnection.Close()
+	for {
+		conn.Buffer = make([]byte, 1024)
+		n, err := conn.NetConnection.Read(conn.Buffer)
+		if err != nil {
+			log.Logger.Warnf("client exit.ip:%s port:%d err: %s", conn.Ip, conn.Port, err.Error())
+			return err
+		}
+		err = conn.parse(n)
+		if err != nil {
+			log.Logger.Errorf("parse net package failed.buf:\n%s\n err:%s", conn.Buffer, err.Error())
+			return err
+		}
+	}
+}
+
+// parse 拆包的过程
+func (conn *AgentConnection) parse(readLen int) error {
+	var err error
+	var i int
+	for i = 0; i < readLen; i++ {
+		switch conn.status {
+		case Idle:
+			if conn.Buffer[i] != HEADER[0] {
+				err = fmt.Errorf("parse failed, status Idle, index %d", i)
+				log.Logger.Errorf(err.Error())
+				break
+			}
+			conn.resetPackage()
+			conn.netPackage.Header += string(conn.Buffer[i])
+			conn.status = ParseHeader
+		case ParseHeader:
+			if (conn.Buffer[i] == '\r' && conn.netPackage.Header != HEADER) ||
+				(conn.Buffer[i] != '\r' && len(conn.netPackage.Header) == len(HEADER)) {
+				err = fmt.Errorf("parse failed, status ParseHeader, index %d", i)
+				log.Logger.Errorf(err.Error())
+				break
+			} else if conn.Buffer[i] == '\r' && conn.netPackage.Header == HEADER {
+				conn.status = ParseHeaderLF
+			} else {
+				conn.netPackage.Header += string(conn.Buffer[i])
+			}
+		case ParseHeaderLF:
+			if conn.Buffer[i] != '\n' {
+				err = fmt.Errorf("parse failed, status ParseHeaderLF, index %d", i)
+				log.Logger.Errorf(err.Error())
+				break
+			}
+			conn.status = ParseType
+		case ParseType:
+			if conn.Buffer[i] == '\r' {
+				_, ok := dbmodule.DBCallbackMap[types.DBType(conn.netPackage.DBType)]
+				if !ok {
+					err = fmt.Errorf("parse failed, can't find dbtype, status ParseType, index %d", i)
+					log.Logger.Errorf(err.Error())
+					break
+				}
+				conn.status = ParseTypeLF
+			} else if conn.Buffer[i] != '\r' && len(conn.netPackage.DBType) > MaxDBTypeLength {
+				err = fmt.Errorf("parse failed, len(DBType) > MaxDBtypeLen, status ParseType, index %d", i)
+				log.Logger.Errorf(err.Error())
+				break
+			} else {
+				conn.netPackage.DBType += string(conn.Buffer[i])
+			}
+		case ParseTypeLF:
+			if conn.Buffer[i] != '\n' {
+				err = fmt.Errorf("parse failed, status ParseTypeLF, index %d", i)
+				log.Logger.Errorf(err.Error())
+				break
+			}
+			conn.status = ParseLength
+		case ParseLength:
+			if conn.Buffer[i] == '\r' {
+				conn.status = ParseLengthLF
+			} else {
+				num, err := strconv.Atoi(string(conn.Buffer[i]))
+				if err != nil {
+					log.Logger.Errorf("parse failed, err:%s", err.Error())
+					break
+				}
+				// int overflow?
+				conn.netPackage.BodyLength = conn.netPackage.BodyLength*10 + num
+				if conn.netPackage.BodyLength > MaxBodyLength {
+					err = fmt.Errorf("parse failed, bodylength > MaxBodyLength, status ParseLength, index %d", i)
+					log.Logger.Errorf(err.Error())
+					break
+				}
+			}
+		case ParseLengthLF:
+			if conn.Buffer[i] != '\n' {
+				err = fmt.Errorf("parse failed, status ParseLengthLF, index %d", i)
+				log.Logger.Errorf(err.Error())
+				break
+			}
+			conn.status = ParseBody
+		case ParseBody:
+			conn.netPackage.Body = append(conn.netPackage.Body, conn.Buffer[i])
+			if len(conn.netPackage.Body) == conn.netPackage.BodyLength {
+				err = conn.processPackage()
+				if err != nil {
+					log.Logger.Errorf("process net package failed. err:%s", err.Error())
+					break
+				}
+
+				// unpack success
+				// replay ok
+				log.Logger.Infof("process net package success. Type:%s, Body:%s",
+					conn.netPackage.DBType, conn.netPackage.Body)
+				n, err := conn.NetConnection.Write([]byte("OK"))
+				if err != nil {
+					log.Logger.Error("write failed. agent ip:", conn.Ip, " port:", conn.Port)
+					return err
+				}
+				if n != len("OK") {
+					err = fmt.Errorf(
+						"repoter GMConf length not equal, buf size:%d,real send buf size:%d", len("OK"), n)
+					log.Logger.Errorf(err.Error())
+					return err
+				}
+				conn.resetPackage()
+				conn.status = Idle
+			}
+		}
+		if err != nil {
+			conn.resetPackage()
+		}
+	}
+	return nil
+}
+
+func (conn *AgentConnection) resetPackage() {
+	conn.netPackage.Header = ""
+	conn.netPackage.DBType = ""
+	conn.netPackage.BodyLength = 0
+	conn.netPackage.Body = []byte{}
+	conn.status = Idle
+}
+
+// processPackage 将一个完整的包处理并传给gdm
+func (conn *AgentConnection) processPackage() error {
+	var err error
+	cb, ok := dbmodule.DBCallbackMap[types.DBType(conn.netPackage.DBType)]
+	if !ok {
+		err = fmt.Errorf("can't find %s instance callback", conn.netPackage.DBType)
+		log.Logger.Errorf(err.Error())
+		return err
+	}
+	retDB, err := cb.DeserializeCallback(conn.netPackage.Body, conn.Conf)
+	if err != nil {
+		log.Logger.Errorf("deserialize failed. err:%s", err.Error())
+		return err
+	}
+	conn.GDMChan <- DoubleCheckInstanceInfo{
+		AgentIp:      conn.Ip,
+		AgentPort:    conn.Port,
+		db:           retDB,
+		ReceivedTime: time.Now(),
+		ConfirmTime:  time.Now(),
+	}
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/gm/gcm.go b/dbm-services/common/dbha/ha-module/gm/gcm.go
new file mode 100644
index 0000000000..5773e77117
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/gm/gcm.go
@@ -0,0 +1,261 @@
+package gm
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/monitor"
+	"fmt"
+	"time"
+)
+
+// GCM gcm work struct
+type GCM struct {
+	GQAChan                  chan dbutil.DataBaseSwitch
+	CmDBClient               *client.CmDBClient
+	HaDBClient               *client.HaDBClient
+	Conf                     *config.Config
+	AllowedChecksumMaxOffset int
+	AllowedSlaveDelayMax     int
+	AllowedTimeDelayMax      int
+	ExecSlowKBytes           int
+	reporter                 *HAReporter
+}
+
+// NewGCM init new gcm
+func NewGCM(conf *config.Config, ch chan dbutil.DataBaseSwitch, reporter *HAReporter) (*GCM, error) {
+	var err error
+	gcm := &GCM{
+		GQAChan:                  ch,
+		Conf:                     conf,
+		AllowedChecksumMaxOffset: conf.GMConf.GCM.AllowedChecksumMaxOffset,
+		AllowedTimeDelayMax:      conf.GMConf.GCM.AllowedTimeDelayMax,
+		AllowedSlaveDelayMax:     conf.GMConf.GCM.AllowedSlaveDelayMax,
+		ExecSlowKBytes:           conf.GMConf.GCM.ExecSlowKBytes,
+		reporter:                 reporter,
+	}
+	gcm.CmDBClient, err = client.NewCmDBClient(&conf.DBConf.CMDB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+	gcm.HaDBClient, err = client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+	return gcm, nil
+}
+
+// Run gcm run main entry
+func (gcm *GCM) Run() {
+	for {
+		select {
+		case ins := <-gcm.GQAChan:
+			gcm.Process(ins)
+		case <-time.After(time.Duration(gcm.Conf.GMConf.ReportInterval) * time.Second):
+		}
+		gcm.reporter.DoReport(ModuleReportInfo{
+			Module: constvar.GCM,
+		})
+	}
+}
+
+// PopInstance pop instance from GQA chan
+func (gcm *GCM) PopInstance() dbutil.DataBaseSwitch {
+	switchInstance := <-gcm.GQAChan
+	return switchInstance
+}
+
+// Process gcm process instance switch
+func (gcm *GCM) Process(switchInstance dbutil.DataBaseSwitch) {
+	go func(switchInstance dbutil.DataBaseSwitch) {
+		gcm.DoSwitchSingle(switchInstance)
+	}(switchInstance)
+}
+
+// DoSwitchSingle gcm do instance switch
+func (gcm *GCM) DoSwitchSingle(switchInstance dbutil.DataBaseSwitch) {
+	var err error
+
+	// 这里先将实例获取锁设为unavailable,再插入switch_queue。原因是如果先插switch_queue,如果其他gm同时更新,则会有多条
+	// switch_queue记录,则更新switch_queue会同时更新多条记录,因为我们没有无法区分哪条记录是哪个gm插入的
+	log.Logger.Infof("get instance lock and set unavailable")
+	err = gcm.SetUnavailableAndLockInstance(switchInstance)
+	if err != nil {
+		switchFail := "set instance to unavailable failed:" + err.Error()
+		switchInstance.ReportLogs(constvar.SWITCH_FAIL, switchFail)
+		monitor.MonitorSendSwitch(switchInstance, switchFail, false)
+		return
+	}
+
+	log.Logger.Infof("insert tb_mon_switch_queue. info:{%s}", switchInstance.ShowSwitchInstanceInfo())
+	err = gcm.InsertSwitchQueue(switchInstance)
+	if err != nil {
+		log.Logger.Errorf("insert switch queue failed. err:%s, info{%s}", err.Error(),
+			switchInstance.ShowSwitchInstanceInfo())
+		switchFail := "insert switch queue failed. err:" + err.Error()
+		monitor.MonitorSendSwitch(switchInstance, switchFail, false)
+		return
+	}
+	switchInstance.ReportLogs(constvar.CHECK_SWITCH_INFO, "set instance unavailable success")
+
+	for i := 0; i < 1; i++ {
+		switchInstance.ReportLogs(constvar.CHECK_SWITCH_INFO, "start check switch")
+
+		var needContinue bool
+		needContinue, err = switchInstance.CheckSwitch()
+
+		if err != nil {
+			log.Logger.Errorf("check switch failed. err:%s, info{%s}", err.Error(),
+				switchInstance.ShowSwitchInstanceInfo())
+			err = fmt.Errorf("check switch failed:%s", err.Error())
+			break
+		}
+
+		if !needContinue {
+			break
+		}
+
+		switchInstance.ReportLogs(constvar.SWITCH_INFO, "start do switch")
+		err = switchInstance.DoSwitch()
+		if err != nil {
+			log.Logger.Errorf("do switch failed. err:%s, info{%s}", err.Error(),
+				switchInstance.ShowSwitchInstanceInfo())
+			err = fmt.Errorf("do switch failed:%s", err.Error())
+			break
+		}
+		switchInstance.ReportLogs(constvar.SWITCH_INFO, "do switch success. try to update meta info")
+
+		log.Logger.Infof("do update meta info. info{%s}", switchInstance.ShowSwitchInstanceInfo())
+		err = switchInstance.UpdateMetaInfo()
+		if err != nil {
+			log.Logger.Errorf("do update meta info failed. err:%s, info{%s}", err.Error(),
+				switchInstance.ShowSwitchInstanceInfo())
+			err = fmt.Errorf("do update meta info failed:%s", err.Error())
+			break
+		}
+		switchInstance.ReportLogs(constvar.SWITCH_INFO, "update meta info success")
+	}
+	if err != nil {
+		monitor.MonitorSendSwitch(switchInstance, err.Error(), false)
+		log.Logger.Errorf("switch instance failed. info:{%s}", switchInstance.ShowSwitchInstanceInfo())
+
+		updateErr := gcm.UpdateSwitchQueue(switchInstance, err.Error(), constvar.SWITCH_FAIL)
+		if updateErr != nil {
+			log.Logger.Errorf("update switch queue failed. err:%s, info{%s}", updateErr.Error(),
+				switchInstance.ShowSwitchInstanceInfo())
+		}
+		gcm.InsertSwitchLogs(switchInstance, false, err.Error())
+
+		rollbackErr := switchInstance.RollBack()
+		if rollbackErr != nil {
+			log.Logger.Errorf("instance rollback failed. err:%s, info{%s}", rollbackErr.Error(),
+				switchInstance.ShowSwitchInstanceInfo())
+		}
+	} else {
+		log.Logger.Infof("switch instance success. info:{%s}", switchInstance.ShowSwitchInstanceInfo())
+		switchOk := "switch success"
+		monitor.MonitorSendSwitch(switchInstance, switchOk, true)
+		gcm.InsertSwitchLogs(switchInstance, true, switchOk)
+
+		updateErr := gcm.UpdateSwitchQueue(switchInstance, "switch_done", constvar.SWITCH_SUCC)
+		if updateErr != nil {
+			log.Logger.Errorf("update Switch queue failed. err:%s", updateErr.Error())
+			return
+		}
+	}
+}
+
+// InsertSwitchQueue insert switch info to tb_mon_switch_queue
+func (gcm *GCM) InsertSwitchQueue(instance dbutil.DataBaseSwitch) error {
+	ip, port := instance.GetAddress()
+	uid, err := gcm.HaDBClient.InsertSwitchQueue(
+		ip, port, instance.GetIDC(), time.Now(), instance.GetApp(),
+		instance.GetClusterType(), instance.GetCluster(),
+	)
+	if err != nil {
+		log.Logger.Errorf("insert switch queue failed. err:%s", err.Error())
+		return err
+	}
+	instance.SetSwitchUid(uid)
+	return nil
+}
+
+// InsertSwitchLogs insert switch logs to switchLogs table
+func (gcm *GCM) InsertSwitchLogs(instance dbutil.DataBaseSwitch, result bool, resultInfo string) {
+	var resultDetail string
+	var comment string
+	curr := time.Now()
+	info := instance.ShowSwitchInstanceInfo()
+	if result {
+		resultDetail = constvar.SWITCH_SUCC
+		comment = fmt.Sprintf("%s %s success", curr.Format("2006-01-02 15:04:05"), info)
+	} else {
+		resultDetail = constvar.SWITCH_FAIL
+		comment = fmt.Sprintf(
+			"%s %s failed,err:%s", curr.Format("2006-01-02 15:04:05"), info, resultInfo,
+		)
+	}
+
+	ip, port := instance.GetAddress()
+	err := gcm.HaDBClient.InsertSwitchLog(
+		instance.GetSwitchUid(), ip, port, resultDetail, comment, time.Now(),
+	)
+	if err != nil {
+		log.Logger.Errorf("insert switch logs failed. err:%s", err.Error())
+	}
+}
+
+// SetUnavailableAndLockInstance update instance status to unavailable
+func (gcm *GCM) SetUnavailableAndLockInstance(instance dbutil.DataBaseSwitch) error {
+	// no lock
+	ip, port := instance.GetAddress()
+	err := gcm.CmDBClient.UpdateDBStatus(ip, port, constvar.UNAVAILABLE)
+	if err != nil {
+		log.Logger.Errorf("set instance unavailable failed. err:%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// UpdateSwitchQueue update switch result
+func (gcm *GCM) UpdateSwitchQueue(instance dbutil.DataBaseSwitch, confirmResult string, switchResult string) error {
+	var (
+		confirmStr string
+		slaveIp    string
+		slavePort  int
+	)
+	if ok, dcInfo := instance.GetInfo(constvar.SWITCH_INFO_DOUBLECHECK); ok {
+		confirmStr = dcInfo.(string)
+	} else {
+		confirmStr = confirmResult
+	}
+
+	if ok, slaveIpInfo := instance.GetInfo(constvar.SWITCH_INFO_SLAVE_IP); ok {
+		slaveIp = slaveIpInfo.(string)
+	} else {
+		slaveIp = "N/A"
+	}
+
+	if ok, slavePortInfo := instance.GetInfo(constvar.SWITCH_INFO_SLAVE_PORT); ok {
+		slavePort = slavePortInfo.(int)
+	} else {
+		slavePort = 0
+	}
+
+	ip, port := instance.GetAddress()
+	if err := gcm.HaDBClient.UpdateSwitchQueue(
+		instance.GetSwitchUid(), ip, port,
+		constvar.UNAVAILABLE,
+		slaveIp,
+		slavePort,
+		confirmStr,
+		switchResult,
+		instance.GetRole(),
+	); err != nil {
+		log.Logger.Errorf("update switch queue failed. err:%s", err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/gm/gdm.go b/dbm-services/common/dbha/ha-module/gm/gdm.go
new file mode 100644
index 0000000000..1d001421ef
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/gm/gdm.go
@@ -0,0 +1,174 @@
+package gm
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"net"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// GDM work struct
+type GDM struct {
+	AgentChan     chan DoubleCheckInstanceInfo
+	GMMChan       chan DoubleCheckInstanceInfo
+	ListenPort    int
+	ReporterCache map[InstanceKey]*DoubleCheckInstanceInfo
+	cacheMutex    sync.Mutex
+	DupExpire     int
+	ScanInterval  int
+	Conf          *config.Config
+	reporter      *HAReporter
+}
+
+// NewGDM init gdm
+func NewGDM(conf *config.Config, ch chan DoubleCheckInstanceInfo,
+	reporter *HAReporter) (*GDM, error) {
+	gdm := &GDM{
+		AgentChan:     make(chan DoubleCheckInstanceInfo, 10),
+		GMMChan:       ch,
+		ListenPort:    conf.GMConf.ListenPort,
+		ReporterCache: map[InstanceKey]*DoubleCheckInstanceInfo{},
+		cacheMutex:    sync.Mutex{},
+		DupExpire:     conf.GMConf.GDM.DupExpire,
+		ScanInterval:  conf.GMConf.GDM.ScanInterval,
+		Conf:          conf,
+		reporter:      reporter,
+	}
+	return gdm, nil
+}
+
+// Run gdm main entry
+func (gdm *GDM) Run() {
+	gdm.Init()
+	for {
+		select {
+		case ins := <-gdm.AgentChan:
+			gdm.Process(ins)
+		case <-time.After(time.Duration(gdm.Conf.GMConf.ReportInterval) * time.Second):
+		}
+
+		gdm.reporter.DoReport(ModuleReportInfo{
+			Module: constvar.GDM,
+		})
+		gdm.PostProcess()
+	}
+}
+
+// Init gdm do init
+func (gdm *GDM) Init() {
+	go func() {
+		gdm.listenAndDoAccept()
+	}()
+}
+
+// Process gdm process instance
+func (gdm *GDM) Process(ins DoubleCheckInstanceInfo) {
+	if !gdm.isReporterRecently(&ins) {
+		gdm.PushInstance2Next(ins)
+	}
+}
+
+// PostProcess gdm post instance info
+func (gdm *GDM) PostProcess() {
+	gdm.flushCache()
+	return
+}
+
+// PushInstance2Next gdm push instance to gmm chan
+func (gdm *GDM) PushInstance2Next(ins DoubleCheckInstanceInfo) {
+	gdm.GMMChan <- ins
+	return
+}
+
+// listenAndDoAccept TODO
+// gdm do listen
+func (gdm *GDM) listenAndDoAccept() {
+	addr := "0.0.0.0:" + strconv.Itoa(gdm.ListenPort)
+	log.Logger.Infof("gdm start listen %s\n", addr)
+	listen, err := net.Listen("tcp", addr)
+	if err != nil {
+		log.Logger.Fatalf("gdm listen failed. err:%s", err.Error())
+	}
+	defer func() {
+		err = listen.Close()
+		if err != nil {
+			log.Logger.Errorf("close socket failed. err:%s", err.Error())
+		}
+	}()
+
+	for {
+		conn, err := listen.Accept()
+		if err != nil {
+			log.Logger.Errorf("accept socket failed. err:%s", err.Error())
+			continue
+		} else {
+			log.Logger.Infof("gdm accept success con: %v agent ip: %v\n", conn, conn.RemoteAddr().String())
+		}
+		agentConn := AgentConnection{
+			NetConnection: conn,
+			GDMChan:       gdm.AgentChan,
+			Conf:          gdm.Conf,
+		}
+		go func(agentConn AgentConnection) {
+			agentConn.Init()
+			err = agentConn.Read()
+			if err != nil {
+				log.Logger.Warnf("agentConn close. err:%s\n", err.Error())
+				return
+			}
+		}(agentConn)
+	}
+}
+
+func (gdm *GDM) isReporterRecently(ins *DoubleCheckInstanceInfo) bool {
+	ip, port := ins.db.GetAddress()
+	gdm.cacheMutex.Lock()
+	defer gdm.cacheMutex.Unlock()
+	cache, ok := gdm.ReporterCache[InstanceKey{
+		ip,
+		port,
+	}]
+	if ok && cache.db.GetStatus() == ins.db.GetStatus() {
+		log.Logger.Infof("instance[%s#%d] cached, skip report", ip, port)
+		return true
+	}
+	// 刷新缓存
+	gdm.ReporterCache[InstanceKey{
+		ip,
+		port,
+	}] = ins
+	return false
+}
+
+func (gdm *GDM) flushCache() {
+	now := time.Now()
+	gdm.cacheMutex.Lock()
+	defer gdm.cacheMutex.Unlock()
+	// 清除超过DupExpire的缓存
+	for key, val := range gdm.ReporterCache {
+		if now.After(val.ReceivedTime.Add(time.Second * time.Duration(gdm.DupExpire))) {
+			delete(gdm.ReporterCache, key)
+		}
+	}
+}
+
+// InstanceSwitchDone 清除超过一分钟且已经切换结束(非正常切换结束,即double check成功或者延迟切换)的缓存
+// 当切换结束后,将ReceiveTime - DupExpire + 1 minute,通过flushCache的逻辑来将缓存清除
+func (gdm *GDM) InstanceSwitchDone(ip string, port int, dbType string) {
+	gdm.cacheMutex.Lock()
+	defer gdm.cacheMutex.Unlock()
+	cache, ok := gdm.ReporterCache[InstanceKey{
+		ip,
+		port,
+	}]
+	if !ok {
+		log.Logger.Warnf(
+			"ip:%s, port:%d, dbtype:%s switch done, but cache not exist", ip, port, dbType)
+		return
+	}
+	log.Logger.Infof("ip:%s, port:%d, dbtype:%s switch done", ip, port, dbType)
+	cache.ReceivedTime.Add(time.Minute - time.Duration(gdm.DupExpire)*time.Second)
+}
diff --git a/dbm-services/common/dbha/ha-module/gm/gm.go b/dbm-services/common/dbha/ha-module/gm/gm.go
new file mode 100644
index 0000000000..7ea5143072
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/gm/gm.go
@@ -0,0 +1,238 @@
+// Package gm TODO
+package gm
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+	"time"
+)
+
+// InstanceKey instance key info
+type InstanceKey struct {
+	Ip   string
+	Port int
+}
+
+// DoubleCheckInstanceInfo double check instance info
+type DoubleCheckInstanceInfo struct {
+	AgentIp      string
+	AgentPort    int
+	db           dbutil.DataBaseDetect
+	ReceivedTime time.Time
+	ConfirmTime  time.Time
+	ResultInfo   string
+}
+
+// ModuleReportInfo module info
+type ModuleReportInfo struct {
+	Module string
+}
+
+// HAReporter ha reporter
+type HAReporter struct {
+	gm             *GM
+	lastReportTime time.Time
+}
+
+// GM work struct
+type GM struct {
+	gdm            *GDM
+	gmm            *GMM
+	gqa            *GQA
+	gcm            *GCM
+	HaDBClient     *client.HaDBClient
+	Conf           *config.Config
+	reportChan     chan ModuleReportInfo
+	lastReportTime time.Time
+}
+
+// NewGM init new gm
+func NewGM(conf *config.Config) (*GM, error) {
+	var err error
+	gdmToGmmChan := make(chan DoubleCheckInstanceInfo, 100)
+	gmmToGqaChan := make(chan DoubleCheckInstanceInfo, 100)
+	gqaToGcmChan := make(chan dbutil.DataBaseSwitch, 100)
+	gm := &GM{
+		Conf:           conf,
+		reportChan:     make(chan ModuleReportInfo, 100),
+		lastReportTime: time.Now(),
+	}
+	gm.gdm, err = NewGDM(conf, gdmToGmmChan, &HAReporter{
+		gm:             gm,
+		lastReportTime: time.Now(),
+	})
+	if err != nil {
+		log.Logger.Errorf("gdm init failed. err:%s", err.Error())
+		return nil, err
+	}
+	gm.gmm, err = NewGMM(gm.gdm, conf, gdmToGmmChan, gmmToGqaChan, &HAReporter{
+		gm:             gm,
+		lastReportTime: time.Now(),
+	})
+	if err != nil {
+		log.Logger.Errorf("gmm init failed. err:%s", err.Error())
+		return nil, err
+	}
+	gm.gqa, err = NewGQA(gm.gdm, conf, gmmToGqaChan, gqaToGcmChan, &HAReporter{
+		gm:             gm,
+		lastReportTime: time.Now(),
+	})
+	if err != nil {
+		log.Logger.Errorf("gqa init failed. err:%s", err.Error())
+		return nil, err
+	}
+	gm.gcm, err = NewGCM(conf, gqaToGcmChan, &HAReporter{
+		gm:             gm,
+		lastReportTime: time.Now(),
+	})
+	if err != nil {
+		log.Logger.Errorf("gcm init failed. err:%s", err.Error())
+		return nil, err
+	}
+	gm.HaDBClient, err = client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+	return gm, nil
+}
+
+// Run gm work main entry
+func (gm *GM) Run() error {
+	if err := gm.HaDBClient.RegisterDBHAInfo(util.LocalIp, gm.Conf.GMConf.ListenPort, "gm",
+		gm.Conf.GMConf.City, gm.Conf.GMConf.Campus, ""); err != nil {
+		return err
+	}
+
+	if err := gm.HaDBClient.RegisterDBHAInfo(util.LocalIp, gm.Conf.GMConf.ListenPort, constvar.GDM,
+		gm.Conf.GMConf.City, gm.Conf.GMConf.Campus, ""); err != nil {
+		log.Logger.Errorf("GM register gcm module failed,err:%s", err.Error())
+		return err
+	}
+
+	go func() {
+		gm.gdm.Run()
+	}()
+
+	if err := gm.HaDBClient.RegisterDBHAInfo(util.LocalIp, gm.Conf.GMConf.ListenPort, constvar.GMM,
+		gm.Conf.GMConf.City, gm.Conf.GMConf.Campus, ""); err != nil {
+		log.Logger.Errorf("GM register gcm module failed,err:%s", err.Error())
+		return err
+	}
+
+	go func() {
+		gm.gmm.Run()
+	}()
+
+	if err := gm.HaDBClient.RegisterDBHAInfo(util.LocalIp, gm.Conf.GMConf.ListenPort, constvar.GQA,
+		gm.Conf.GMConf.City, gm.Conf.GMConf.Campus, ""); err != nil {
+		log.Logger.Errorf("GM register gcm module failed,err:%s", err.Error())
+		return err
+	}
+
+	go func() {
+		gm.gqa.Run()
+	}()
+
+	if err := gm.HaDBClient.RegisterDBHAInfo(util.LocalIp, gm.Conf.GMConf.ListenPort, constvar.GCM,
+		gm.Conf.GMConf.City, gm.Conf.GMConf.Campus, ""); err != nil {
+		log.Logger.Errorf("GM register gcm module failed,err:%s", err.Error())
+		return err
+	}
+
+	go func() {
+		gm.gcm.Run()
+	}()
+
+	gm.TimerRun()
+	return nil
+}
+
+// GetGDM return gdm object
+func (gm *GM) GetGDM() *GDM {
+	return gm.gdm
+}
+
+// GetGMM return gmm object
+func (gm *GM) GetGMM() *GMM {
+	return gm.gmm
+}
+
+// GetGQA return gqa object
+func (gm *GM) GetGQA() *GQA {
+	return gm.gqa
+}
+
+// GetGCM return gcm object
+func (gm *GM) GetGCM() *GCM {
+	return gm.gcm
+}
+
+// TimerRun gm report heartbeat
+func (gm *GM) TimerRun() {
+	for {
+		select {
+		case ins := <-gm.reportChan:
+			gm.ProcessModuleReport(ins)
+		case <-time.After(time.Duration(gm.Conf.GMConf.ReportInterval) * time.Second):
+		}
+		gm.CheckReportMyself()
+	}
+}
+
+// ProcessModuleReport do module report
+func (gm *GM) ProcessModuleReport(reportInfo ModuleReportInfo) {
+	log.Logger.Infof("GM process module[%s] report", reportInfo.Module)
+	gm.DoDBHAReport(reportInfo.Module)
+}
+
+// CheckReportMyself gm report itself heartbeat
+func (gm *GM) CheckReportMyself() {
+	now := time.Now()
+	nextReport := gm.lastReportTime.Add(time.Duration(gm.Conf.GMConf.ReportInterval) * time.Second)
+	if now.After(nextReport) {
+		log.Logger.Debugf("GM report myself by check, lastReport:%s, now:%s, nextReport:%s",
+			gm.lastReportTime.Format("2006-01-02 15:04:05"),
+			now.Format("2006-01-02 15:04:05"),
+			nextReport.Format("2006-01-02 15:04:05"))
+		gm.lastReportTime = now
+		gm.DoDBHAReport(constvar.GM)
+	}
+}
+
+// DoDBHAReport do heartbeat report through api
+func (gm *GM) DoDBHAReport(module string) {
+	err := gm.HaDBClient.ReporterGMHeartbeat(module, gm.Conf.GMConf.ReportInterval)
+	if err != nil {
+		log.Logger.Errorf("report module[%s] heartbeat to dbha failed", module)
+	} else {
+		log.Logger.Infof("report module[%s] heartbeat to dbha ok", module)
+	}
+}
+
+// GetDBDetect return instance detect info
+func (dc *DoubleCheckInstanceInfo) GetDBDetect() *dbutil.DataBaseDetect {
+	return &dc.db
+}
+
+// SetDBDetect set db detect info
+func (dc *DoubleCheckInstanceInfo) SetDBDetect(detect dbutil.DataBaseDetect) {
+	dc.db = detect
+}
+
+// DoReport gm do heartbeat report
+func (reporter *HAReporter) DoReport(reportInfo ModuleReportInfo) {
+	now := time.Now()
+	nextReport := reporter.lastReportTime.Add(
+		time.Duration(reporter.gm.Conf.GMConf.ReportInterval) * time.Second)
+	if now.After(nextReport) {
+		log.Logger.Debugf("report module[%s], lastReport:%s, now:%s, nextReport:%s",
+			reportInfo.Module, reporter.lastReportTime.Format("2006-01-02 15:04:05"),
+			now.Format("2006-01-02 15:04:05"), nextReport.Format("2006-01-02 15:04:05"))
+		reporter.lastReportTime = now
+		reporter.gm.DoDBHAReport(reportInfo.Module)
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/gm/gmm.go b/dbm-services/common/dbha/ha-module/gm/gmm.go
new file mode 100644
index 0000000000..3926a245da
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/gm/gmm.go
@@ -0,0 +1,147 @@
+package gm
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/monitor"
+	"fmt"
+	"time"
+)
+
+// GMM work struct
+type GMM struct {
+	GDMChan    chan DoubleCheckInstanceInfo
+	GQAChan    chan DoubleCheckInstanceInfo
+	HaDBClient *client.HaDBClient
+	gdm        *GDM
+	Conf       *config.Config
+	reporter   *HAReporter
+}
+
+// NewGMM new gmm obeject
+func NewGMM(gdm *GDM, conf *config.Config, gdmCh,
+	gqaCh chan DoubleCheckInstanceInfo, reporter *HAReporter) (*GMM, error) {
+	var err error
+	gmm := &GMM{
+		GDMChan:  gdmCh,
+		GQAChan:  gqaCh,
+		gdm:      gdm,
+		Conf:     conf,
+		reporter: reporter,
+	}
+	gmm.HaDBClient, err = client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+	return gmm, nil
+}
+
+// Run gmm main entry
+func (gmm *GMM) Run() {
+	for {
+		select {
+		case instance := <-gmm.GDMChan:
+			gmm.Process(instance)
+		case <-time.After(time.Duration(gmm.Conf.GMConf.ReportInterval) * time.Second):
+		}
+		gmm.reporter.DoReport(ModuleReportInfo{
+			Module: constvar.GMM,
+		})
+	}
+}
+
+// PopInstance pop instance from gdm chan
+func (gmm *GMM) PopInstance() DoubleCheckInstanceInfo {
+	instance := <-gmm.GDMChan
+	return instance
+}
+
+// PushInstance2Next push instance to gqa chan
+func (gmm *GMM) PushInstance2Next(instance DoubleCheckInstanceInfo) {
+	gmm.GQAChan <- instance
+	return
+}
+
+// Process gmm process instance detect
+func (gmm *GMM) Process(instance DoubleCheckInstanceInfo) {
+	checkStatus := instance.db.GetStatus()
+	switch checkStatus {
+	case constvar.SSHCheckSuccess:
+		{ // machine level switch never satisfy this condition, agent only report ssh failed instance.
+			ip, port := instance.db.GetAddress()
+			// no switch in machine level switch
+			gmm.HaDBClient.ReportHaLog(
+				ip,
+				port,
+				"gmm",
+				"db check failed. no need to switch in machine level",
+			)
+		}
+	// AUTHCheckFailed also need double check and process base on the result of double check.
+	case constvar.SSHCheckFailed, constvar.AUTHCheckFailed:
+		{ // double check
+			go func(doubleCheckInstance DoubleCheckInstanceInfo) {
+				ip, port := doubleCheckInstance.db.GetAddress()
+				err := doubleCheckInstance.db.Detection()
+				switch doubleCheckInstance.db.GetStatus() {
+				case constvar.DBCheckSuccess:
+					gmm.HaDBClient.ReportHaLog(
+						ip,
+						port,
+						"gmm",
+						"double check success: db check success.",
+					)
+				case constvar.SSHCheckSuccess:
+					{
+						// no switch in machine level switch
+						gmm.HaDBClient.ReportHaLog(
+							ip,
+							port,
+							"gmm",
+							fmt.Sprintf("double check success: db check failed, ssh check success. dbcheck err:%s", err),
+						)
+					}
+				case constvar.SSHCheckFailed:
+					{
+						gmm.HaDBClient.ReportHaLog(
+							ip,
+							port,
+							"gmm",
+							fmt.Sprintf("double check failed: ssh check failed. sshcheck err:%s", err),
+						)
+						content := fmt.Sprintf("double check failed: ssh check failed. sshcheck err:%s", err)
+						monitor.MonitorSendDetect(
+							doubleCheckInstance.db, constvar.DBHA_EVENT_DOUBLE_CHECK_SSH, content,
+						)
+						doubleCheckInstance.ResultInfo = content
+						// reporter GQA
+						doubleCheckInstance.ConfirmTime = time.Now()
+						gmm.GQAChan <- doubleCheckInstance
+						return
+					}
+				case constvar.AUTHCheckFailed:
+					{
+						log.Logger.Errorf("double check failed: ssh authenticate failed,err:%s", err)
+						gmm.HaDBClient.ReportHaLog(
+							ip,
+							port,
+							"gmm",
+							fmt.Sprintf("double check failed: ssh authenticate failed, dbcheck err:%s", err),
+						)
+						content := fmt.Sprintf("double check failed: ssh authenticate failed. sshcheck err:%s", err)
+						monitor.MonitorSendDetect(
+							doubleCheckInstance.db, constvar.DBHA_EVENT_DOUBLE_CHECK_AUTH, content,
+						)
+					}
+				default:
+					log.Logger.Fatalf("unknown check status:%s", doubleCheckInstance.db.GetStatus())
+				}
+				gmm.gdm.InstanceSwitchDone(ip, port, string(doubleCheckInstance.db.GetType()))
+			}(instance)
+		}
+	default:
+		log.Logger.Errorf("unknown check status recevied: %s", checkStatus)
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/gm/gqa.go b/dbm-services/common/dbha/ha-module/gm/gqa.go
new file mode 100644
index 0000000000..5f652c71b4
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/gm/gqa.go
@@ -0,0 +1,260 @@
+package gm
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbmodule"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"fmt"
+	"time"
+)
+
+// GQA work struct
+type GQA struct {
+	GMMChan              chan DoubleCheckInstanceInfo
+	GCMChan              chan dbutil.DataBaseSwitch
+	CmDBClient           *client.CmDBClient
+	HaDBClient           *client.HaDBClient
+	gdm                  *GDM
+	Conf                 *config.Config
+	IDCCache             map[string]time.Time
+	IDCCacheExpire       int
+	SingleSwitchInterval int
+	SingleSwitchLimit    int
+	AllSwitchInterval    int
+	AllSwitchLimit       int
+	SingleSwitchIDCLimit int
+	reporter             *HAReporter
+}
+
+// NewGQA init GQA object
+func NewGQA(gdm *GDM, conf *config.Config,
+	gmmCh chan DoubleCheckInstanceInfo,
+	gcmCh chan dbutil.DataBaseSwitch, reporter *HAReporter) (*GQA, error) {
+	var err error
+	gqa := &GQA{
+		GMMChan:              gmmCh,
+		GCMChan:              gcmCh,
+		gdm:                  gdm,
+		Conf:                 conf,
+		IDCCache:             map[string]time.Time{},
+		IDCCacheExpire:       conf.GMConf.GQA.IDCCacheExpire,
+		SingleSwitchInterval: conf.GMConf.GQA.SingleSwitchInterval,
+		SingleSwitchLimit:    conf.GMConf.GQA.SingleSwitchLimit,
+		AllSwitchInterval:    conf.GMConf.GQA.AllSwitchInterval,
+		AllSwitchLimit:       conf.GMConf.GQA.AllHostSwitchLimit,
+		SingleSwitchIDCLimit: conf.GMConf.GQA.SingleSwitchIDC,
+		reporter:             reporter,
+	}
+	gqa.CmDBClient, err = client.NewCmDBClient(&conf.DBConf.CMDB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+	gqa.HaDBClient, err = client.NewHaDBClient(&conf.DBConf.HADB, conf.GetCloudId())
+	if err != nil {
+		return nil, err
+	}
+	return gqa, nil
+}
+
+// Run GQA main entry
+func (gqa *GQA) Run() {
+	for {
+		select {
+		case ins := <-gqa.GMMChan:
+			instances := gqa.PreProcess(ins)
+			gqa.Process(instances)
+		case <-time.After(time.Duration(gqa.Conf.GMConf.ReportInterval) * time.Second):
+		}
+
+		gqa.reporter.DoReport(ModuleReportInfo{
+			Module: constvar.GQA,
+		})
+	}
+}
+
+// PopInstance pop instance from gmm
+func (gqa *GQA) PopInstance() []dbutil.DataBaseSwitch {
+	instance := <-gqa.GMMChan
+	return gqa.PreProcess(instance)
+}
+
+// PreProcess fetch instance detail info for process
+func (gqa *GQA) PreProcess(instance DoubleCheckInstanceInfo) []dbutil.DataBaseSwitch {
+	ip, port := instance.db.GetAddress()
+	log.Logger.Infof("gqa get instance. ip:%s, port:%d", ip, port)
+
+	cmdbInfos, err := gqa.getAllInstanceFromCMDB(&instance)
+	if err != nil {
+		errInfo := fmt.Sprintf("get idc failed. err:%s", err.Error())
+		log.Logger.Errorf(errInfo)
+		gqa.HaDBClient.ReportHaLog(ip, port, "gqa", errInfo)
+		return nil
+	}
+	return cmdbInfos
+}
+
+// PushInstance2Next push instance to gcm chan
+func (gqa *GQA) PushInstance2Next(ins dbutil.DataBaseSwitch) {
+	gqa.GCMChan <- ins
+	return
+}
+
+// Process decide whether instance allow next switch
+func (gqa *GQA) Process(cmdbInfos []dbutil.DataBaseSwitch) {
+	if nil == cmdbInfos {
+		return
+	}
+
+	for _, instanceInfo := range cmdbInfos {
+		ip, port := instanceInfo.GetAddress()
+		log.Logger.Infof("gqa handle instance. ip:%s, port:%d", ip, port)
+
+		// check single IDC
+		lastCacheTime, ok := gqa.IDCCache[instanceInfo.GetIDC()]
+		if ok {
+			if time.Now().After(lastCacheTime.Add(time.Duration(gqa.IDCCacheExpire) * time.Second)) {
+				delete(gqa.IDCCache, instanceInfo.GetIDC())
+			} else {
+				err := gqa.delaySwitch(instanceInfo)
+				if err != nil {
+					errInfo := fmt.Sprintf("delay switch failed. err:%s", err.Error())
+					log.Logger.Errorf(errInfo)
+					gqa.HaDBClient.ReportHaLog(ip, port, "gqa", errInfo)
+				} else {
+					gqa.HaDBClient.ReportHaLog(ip, port, "gqa",
+						"single IDC switch too much, delay switch")
+				}
+				continue
+			}
+		}
+
+		// check status
+		if instanceInfo.GetStatus() != constvar.RUNNING && instanceInfo.GetStatus() != constvar.AVAILABLE {
+			gqa.HaDBClient.ReportHaLog(ip, port, "gqa",
+				fmt.Sprintf("status:%s not equal RUNNING or AVAILABLE", instanceInfo.GetStatus()))
+			continue
+		}
+
+		// query single instance total
+		singleTotal, err := gqa.HaDBClient.QuerySingleTotal(ip, port, gqa.SingleSwitchInterval)
+		if err != nil {
+			errInfo := fmt.Sprintf("query single total failed. err:%s", err.Error())
+			log.Logger.Errorf(errInfo)
+			gqa.HaDBClient.ReportHaLog(ip, port, "gqa", errInfo)
+			continue
+		}
+		if singleTotal >= gqa.SingleSwitchLimit {
+			gqa.HaDBClient.ReportHaLog(ip, port, "gqa", "reached single total.")
+			continue
+		}
+
+		// query all machines max total
+		intervalTotal, err := gqa.HaDBClient.QueryIntervalTotal(gqa.AllSwitchInterval)
+		if err != nil {
+			errInfo := fmt.Sprintf("query interval total failed. err:%s", err.Error())
+			log.Logger.Errorf(errInfo)
+			gqa.HaDBClient.ReportHaLog(ip, port, "gqa", errInfo)
+			continue
+		}
+		if intervalTotal >= gqa.AllSwitchLimit {
+			err = gqa.delaySwitch(instanceInfo)
+			if err != nil {
+				errInfo := fmt.Sprintf("delay switch failed. err:%s", err.Error())
+				log.Logger.Errorf(errInfo)
+				gqa.HaDBClient.ReportHaLog(ip, port, "gqa", errInfo)
+			} else {
+				gqa.HaDBClient.ReportHaLog(ip, port, "gqa",
+					"dbha switch too much, delay switch")
+			}
+			continue
+		}
+
+		// query job doing(machine)
+
+		// query single idc(machine) in 1 minute
+		idcTotal, err := gqa.HaDBClient.QuerySingleIDC(ip, instanceInfo.GetIDC())
+		if err != nil {
+			errInfo := fmt.Sprintf("query single idc failed. err:%s", err.Error())
+			log.Logger.Errorf(errInfo)
+			gqa.HaDBClient.ReportHaLog(ip, port, "gqa", errInfo)
+			continue
+		}
+		if idcTotal >= gqa.SingleSwitchIDCLimit {
+			_, ok = gqa.IDCCache[instanceInfo.GetIDC()]
+			if !ok {
+				gqa.IDCCache[instanceInfo.GetIDC()] = time.Now()
+			}
+			err = gqa.delaySwitch(instanceInfo)
+			if err != nil {
+				errInfo := fmt.Sprintf("delay switch failed. err:%s", err.Error())
+				log.Logger.Errorf(errInfo)
+				gqa.HaDBClient.ReportHaLog(ip, port, "gqa", errInfo)
+			} else {
+				gqa.HaDBClient.ReportHaLog(ip, port, "gqa",
+					"single IDC switch too much, delay switch")
+			}
+			continue
+		}
+
+		// query instance and proxy info
+
+		log.Logger.Infof("start switch. ip:%s, port:%d, cluster_Type:%s, app:%s",
+			ip, port, instanceInfo.GetClusterType(), instanceInfo.GetApp())
+		gqa.PushInstance2Next(instanceInfo)
+	}
+}
+
+func (gqa *GQA) getAllInstanceFromCMDB(
+	instance *DoubleCheckInstanceInfo) ([]dbutil.DataBaseSwitch, error) {
+	ip, _ := instance.db.GetAddress()
+	instances, err := gqa.CmDBClient.GetDBInstanceInfoByIp(ip)
+	if err != nil {
+		log.Logger.Errorf("get mysql instance failed. err:%s", err.Error())
+		return nil, err
+	}
+
+	if nil == instances {
+		log.Logger.Errorf("gqa get mysql instances nil")
+	} else {
+		log.Logger.Infof("gqa get mysql instance number:%d", len(instances))
+	}
+
+	cb, ok := dbmodule.DBCallbackMap[instance.db.GetType()]
+	if !ok {
+		err = fmt.Errorf("can't find %s instance callback", instance.db.GetType())
+		log.Logger.Errorf(err.Error())
+		return nil, err
+	}
+	ret, err := cb.GetSwitchInstanceInformation(instances, gqa.Conf)
+	if err != nil {
+		log.Logger.Errorf("get switch instance info failed. err:%s", err.Error())
+		return nil, err
+	}
+
+	if ret == nil {
+		log.Logger.Errorf("gqa get switch instance is nil")
+	} else {
+		log.Logger.Errorf("gqa get switch instance num:%d", len(ret))
+	}
+
+	for _, sins := range ret {
+		sins.SetInfo(constvar.SWITCH_INFO_DOUBLECHECK, instance.ResultInfo)
+	}
+	return ret, nil
+}
+
+func (gqa *GQA) delaySwitch(instance dbutil.DataBaseSwitch) error {
+	ip, port := instance.GetAddress()
+	log.Logger.Infof("start delay switch. ip:%s, port:%d, app:%s",
+		ip, port, instance.GetApp())
+	// err := gqa.HaDBClient.UpdateTimeDelay(instance.Ip, instance.Port, instance.App)
+	// if err != nil {
+	// 	log.Logger.Errorf("update timedelay failed. err:%s", err.Error())
+	// 	return err
+	// }
+	gqa.gdm.InstanceSwitchDone(ip, port, instance.GetClusterType())
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/go.mod b/dbm-services/common/dbha/ha-module/go.mod
new file mode 100644
index 0000000000..786ee88636
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/go.mod
@@ -0,0 +1,37 @@
+module dbm-services/common/dbha/ha-module
+
+go 1.19
+
+require (
+	github.com/go-playground/validator/v10 v10.12.0
+	github.com/go-redis/redis/v8 v8.11.5
+	github.com/go-sql-driver/mysql v1.7.1
+	github.com/natefinch/lumberjack v2.0.0+incompatible
+	go.uber.org/zap v1.24.0
+	golang.org/x/crypto v0.8.0
+	gopkg.in/yaml.v2 v2.4.0
+	gorm.io/driver/mysql v1.5.0
+	gorm.io/gorm v1.25.0
+)
+
+require (
+	github.com/BurntSushi/toml v1.2.1 // indirect
+	github.com/cespare/xxhash/v2 v2.1.2 // indirect
+	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/go-playground/locales v0.14.1 // indirect
+	github.com/go-playground/universal-translator v0.18.1 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.5 // indirect
+	github.com/kr/pretty v0.3.0 // indirect
+	github.com/leodido/go-urn v1.2.3 // indirect
+	github.com/pkg/errors v0.9.1 // indirect
+	github.com/rogpeppe/go-internal v1.8.0 // indirect
+	go.uber.org/atomic v1.9.0 // indirect
+	go.uber.org/goleak v1.1.12 // indirect
+	go.uber.org/multierr v1.8.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+	gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+)
diff --git a/dbm-services/common/dbha/ha-module/go.sum b/dbm-services/common/dbha/ha-module/go.sum
new file mode 100644
index 0000000000..ffd3882646
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/go.sum
@@ -0,0 +1,124 @@
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
+github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
+github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
+github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
+go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM=
+gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo=
+gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU=
+gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
diff --git a/dbm-services/common/dbha/ha-module/ha.yaml b/dbm-services/common/dbha/ha-module/ha.yaml
new file mode 100644
index 0000000000..3578e34f51
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/ha.yaml
@@ -0,0 +1,92 @@
+log_conf:
+  log_path: "./log"
+  log_level: "LOG_DEBUG"
+  log_maxsize: 1024
+  log_maxbackups: 5
+  log_maxage: 30
+  log_compress: true
+agent_conf:
+  active_db_type: [
+    "tendbha:backend",
+    "tendbha:proxy",
+  ]
+  city: "3"
+  campus: "深圳"
+  cloud: "5"
+  fetch_interval: 60
+  reporter_interval: 120
+gm_conf:
+  city: "4"
+  campus: "上海"
+  cloud: "6"
+  liston_port: 50000
+  GDM:
+    dup_expire: 600
+    scan_interval: 1
+  GMM:
+  GQA:
+    idc_cache_expire: 300
+    single_switch_idc: 50
+    single_switch_interval: 86400
+    single_switch_limit:  48
+    all_host_switch_limit:  150
+    all_switch_interval:  7200
+  GCM:
+    allowed_checksum_max_offset: 2
+    allowed_slave_delay_max: 600
+    allowed_time_delay_max: 300
+    exec_slow_kbytes: 0
+db_conf:
+  hadb:
+    host: "hadb-api-host"
+    port: 8080
+    url_pre: "/apis/proxypass/hadb"
+    timeout: 30
+    bk_conf:
+      bk_token: "xxxx"
+  cmdb:
+    host: "cmdb-api-host"
+    port: 80
+    url_pre: "/apis/proxypass"
+    timeout: 10
+    bk_conf:
+      bk_token: "xxxx"
+  mysql:
+    user: "mysql-conn-user"
+    pass: "mysql-conn-pass"
+    proxy_user: "proxy-conn-user"
+    proxy_pass: "proxy-conn-pass"
+    timeout: 10
+  redis:
+dns:
+  bind_conf:
+    host: "bind-api-host"
+    port: 80
+    url_pre: "/apis/proxypass"
+    user: "xxxx"
+    pass: "xxxx"
+    timeout: 10
+    bk_conf:
+      bk_token: "xxxx"
+  remote_conf:
+    host: "dbm-host"
+    port: 80
+    url_pre: "/apis/proxypass"
+    user: "xxxx"
+    pass: "xxxx"
+    timeout: 10
+    bk_conf:
+      bk_token: "xxxx"
+ssh:
+  port: 36000
+  user: "mysql"
+  pass: "mysql-user-pass"
+  dest: "agent"
+  timeout: 10
+monitor:
+  bk_data_id: 0
+  access_token: "xxx"
+  beat_path: "xxx"
+  agent_address: "xxxx"
+timezone:
+  local: "CST"
diff --git a/dbm-services/common/dbha/ha-module/log/log.go b/dbm-services/common/dbha/ha-module/log/log.go
new file mode 100644
index 0000000000..02619354da
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/log/log.go
@@ -0,0 +1,143 @@
+// Package log TODO
+package log
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+	"time"
+
+	"github.com/natefinch/lumberjack"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"gorm.io/gorm/logger"
+)
+
+// Logger TODO
+var Logger *zap.SugaredLogger
+
+// GormLogger use for gorm's db error input
+var GormLogger logger.Interface
+
+// Init TODO
+func Init(logConf config.LogConfig) error {
+	// user ioutil.Discard here to discard gorm's internal error.
+	GormLogger = logger.New(log.New(ioutil.Discard, "\r\n", log.LstdFlags), logger.Config{
+		SlowThreshold:             200 * time.Millisecond,
+		LogLevel:                  logger.Warn,
+		IgnoreRecordNotFoundError: false,
+		Colorful:                  true,
+	})
+
+	// initialize the normal logger
+	level := getLogLevel(logConf.LogLevel)
+	isFile, filepath := checkLogFilepath(logConf.LogPath)
+	var writeSyncer zapcore.WriteSyncer
+	if isFile {
+		logMaxSize, logMaxAge, logMaxBackups := getLogFileConfig(logConf)
+		fmt.Printf("log FILE parameter: filePath=%s,maxSize=%d,maxAge=%d,maxBackups=%d,compress=%v\n",
+			filepath, logMaxSize, logMaxAge, logMaxBackups, logConf.LogCompress)
+		writeSyncer = getLogFileWriter(filepath, logMaxSize,
+			logMaxBackups, logMaxAge, logConf.LogCompress)
+	} else {
+		fmt.Printf("log stdout\n")
+		writeSyncer = getStdoutWriter()
+	}
+
+	encoder := getEncoder()
+	core := zapcore.NewCore(encoder, writeSyncer, level)
+	rowLogger := zap.New(core, zap.AddCaller())
+	Logger = rowLogger.Sugar()
+
+	Logger.Info("Logger init ok")
+	if isFile {
+		err := checkLogFileExist(filepath)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func getEncoder() zapcore.Encoder {
+	encoderConfig := zap.NewProductionEncoderConfig()
+	encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+	encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
+	return zapcore.NewConsoleEncoder(encoderConfig)
+}
+
+func getLogFileWriter(filename string, logMaxSize int,
+	logMaxBackups int, logMaxAge int, compress bool) zapcore.WriteSyncer {
+	lumberJackLogger := &lumberjack.Logger{
+		Filename:   filename,
+		MaxSize:    logMaxSize,
+		MaxBackups: logMaxBackups,
+		MaxAge:     logMaxAge,
+		Compress:   compress,
+	}
+	return zapcore.AddSync(lumberJackLogger)
+}
+
+func getStdoutWriter() zapcore.WriteSyncer {
+	return zapcore.AddSync(os.Stdout)
+}
+
+func getLogLevel(logLevel string) zapcore.Level {
+	if logLevel == constvar.LOG_DEBUG {
+		return zapcore.DebugLevel
+	} else if logLevel == constvar.LOG_INFO {
+		return zapcore.InfoLevel
+	} else if logLevel == constvar.LOG_ERROR {
+		return zapcore.ErrorLevel
+	} else if logLevel == constvar.LOG_PANIC {
+		return zapcore.PanicLevel
+	} else if logLevel == constvar.LOG_FATAL {
+		return zapcore.FatalLevel
+	} else {
+		return zapcore.DebugLevel
+	}
+}
+
+func getLogFileConfig(logConf config.LogConfig) (int, int, int) {
+	logMaxSize := logConf.LogMaxSize
+	if logConf.LogMaxSize == 0 {
+		logMaxSize = constvar.LOG_DEF_SIZE
+	}
+
+	logMaxAge := logConf.LogMaxAge
+	if logMaxAge == 0 {
+		logMaxAge = constvar.LOG_DEF_AGE
+	}
+
+	logMaxBackups := logConf.LogMaxBackups
+	if logMaxBackups == 0 {
+		logMaxBackups = constvar.LOG_DEF_BACKUPS
+	}
+
+	return logMaxSize, logMaxAge, logMaxBackups
+}
+
+func checkLogFilepath(logpath string) (bool, string) {
+	if len(logpath) == 0 {
+		fmt.Printf("the logfile is not set and log switch to stdout\n")
+		return false, ""
+	}
+
+	return true, logpath
+}
+
+func checkLogFileExist(logpath string) error {
+	_, err := os.Stat(logpath)
+	if err != nil {
+		if os.IsExist(err) {
+			fmt.Printf("Log %s is not exist, err:%s\n", logpath, err.Error())
+			return err
+		}
+		fmt.Printf("Log %s check err, err:%s\n", logpath, err.Error())
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/dbha/ha-module/monitor/monitor.go b/dbm-services/common/dbha/ha-module/monitor/monitor.go
new file mode 100644
index 0000000000..2530d402b1
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/monitor/monitor.go
@@ -0,0 +1,159 @@
+// Package monitor TODO
+package monitor
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/log"
+	"dbm-services/common/dbha/ha-module/util"
+)
+
+// SwitchMonitor switch monitor information
+type SwitchMonitor struct {
+	ServerIp    string
+	ServerPort  int
+	Bzid        string
+	MachineType string
+	Role        string
+	Status      string
+	Cluster     string
+	IDC         string
+}
+
+// DetectMonitor detect monitor information
+type DetectMonitor struct {
+	ServerIp    string
+	ServerPort  int
+	Bzid        string
+	MachineType string
+	Status      string
+	Cluster     string
+}
+
+// MonitorInfo the struct of monitor information
+type MonitorInfo struct {
+	EventName       string
+	MonitorInfoType int
+	Switch          SwitchMonitor
+	Detect          DetectMonitor
+}
+
+// MonitorInit init monitor moudule by config
+func MonitorInit(conf *config.Config) error {
+	targetIp, err := util.GetMonIp()
+	if err != nil {
+		return err
+	}
+
+	RuntimeConfigInit(
+		targetIp, conf.Monitor.BkDataId, conf.Monitor.AccessToken,
+		conf.GetCloud(), constvar.MonitorReportType, constvar.MonitorMessageKind,
+		conf.Monitor.BeatPath, conf.Monitor.AgentAddress,
+	)
+
+	return nil
+}
+
+// MonitorSendSwitch send switch monitor infomration
+func MonitorSendSwitch(ins dbutil.DataBaseSwitch, content string, succ bool) {
+	minfo := GetMonitorInfoBySwitch(ins, succ)
+	err := MonitorSend(content, minfo)
+	if err != nil {
+		log.Logger.Errorf(
+			"monitor send switch failed,err:%s,info:%v, content:%s", err.Error(), minfo, content,
+		)
+	}
+}
+
+// MonitorSendDetect send detect monitor information
+func MonitorSendDetect(ins dbutil.DataBaseDetect, eventName string, content string) {
+	minfo := GetMonitorInfoByDetect(ins, eventName)
+	err := MonitorSend(content, minfo)
+	if err != nil {
+		log.Logger.Errorf(
+			"monitor send detect failed,err:%s,info:%v, content:%s", err.Error(), minfo, content,
+		)
+	}
+}
+
+// MonitorSend send dbha monitor information
+func MonitorSend(content string, info MonitorInfo) error {
+	addDimension := make(map[string]interface{})
+	if info.MonitorInfoType == constvar.MONITOR_INFO_SWITCH {
+		addDimension["role"] = info.Switch.Role
+		addDimension["bzid"] = info.Switch.Bzid
+		addDimension["server_ip"] = info.Switch.ServerIp
+		addDimension["server_port"] = info.Switch.ServerPort
+		addDimension["status"] = info.Switch.Status
+		addDimension["cluster"] = info.Switch.Cluster
+		addDimension["machine_type"] = info.Switch.MachineType
+		addDimension["idc"] = info.Switch.IDC
+	}
+
+	return SendEvent(info.EventName, content, addDimension)
+}
+
+// GetMonitorInfoBySwitch get MonitorInfo by switch instance
+func GetMonitorInfoBySwitch(ins dbutil.DataBaseSwitch, succ bool) MonitorInfo {
+	var eventName string
+	switch ins.GetMetaType() {
+	case constvar.RedisMetaType, constvar.TwemproxyMetaType:
+		if succ {
+			eventName = constvar.DBHA_EVENT_REDIS_SWITCH_SUCC
+		} else {
+			eventName = constvar.DBHA_EVENT_REDIS_SWITCH_ERR
+		}
+	case constvar.PredixyMetaType, constvar.TendisplusMetaType:
+		if succ {
+			eventName = constvar.DBHA_EVENT_REDIS_SWITCH_SUCC
+		} else {
+			eventName = constvar.DBHA_EVENT_REDIS_SWITCH_ERR
+		}
+	case constvar.MySQLMetaType, constvar.MySQLProxyMetaType:
+		if succ {
+			eventName = constvar.DBHA_EVENT_MYSQL_SWITCH_SUCC
+		} else {
+			eventName = constvar.DBHA_EVENT_MYSQL_SWITCH_ERR
+		}
+	default:
+		if succ {
+			eventName = constvar.DBHA_EVENT_MYSQL_SWITCH_SUCC
+		} else {
+			eventName = constvar.DBHA_EVENT_MYSQL_SWITCH_ERR
+		}
+	}
+
+	addr, port := ins.GetAddress()
+	return MonitorInfo{
+		EventName:       eventName,
+		MonitorInfoType: constvar.MONITOR_INFO_SWITCH,
+		Switch: SwitchMonitor{
+			ServerIp:    addr,
+			ServerPort:  port,
+			Bzid:        ins.GetApp(),
+			MachineType: ins.GetMetaType(),
+			Role:        ins.GetRole(),
+			Status:      ins.GetStatus(),
+			Cluster:     ins.GetCluster(),
+			IDC:         ins.GetIDC(),
+		},
+	}
+}
+
+// GetMonitorInfoByDetect get MonitorInfo by detect instance
+func GetMonitorInfoByDetect(ins dbutil.DataBaseDetect, eventName string) MonitorInfo {
+	addr, port := ins.GetAddress()
+	return MonitorInfo{
+		EventName:       eventName,
+		MonitorInfoType: constvar.MONITOR_INFO_DETECT,
+		Detect: DetectMonitor{
+			ServerIp:    addr,
+			ServerPort:  port,
+			Bzid:        ins.GetApp(),
+			MachineType: string(ins.GetType()),
+			Status:      string(ins.GetStatus()),
+			Cluster:     ins.GetCluster(),
+		},
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/monitor/monitor_api.go b/dbm-services/common/dbha/ha-module/monitor/monitor_api.go
new file mode 100644
index 0000000000..1ac7999298
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/monitor/monitor_api.go
@@ -0,0 +1,175 @@
+package monitor
+
+import (
+	"bytes"
+	"dbm-services/common/dbha/ha-module/log"
+	"encoding/json"
+	"fmt"
+	"os/exec"
+	"time"
+)
+
+// bkCustom struct of bk monitor information
+type bkCustom struct {
+	BkDataId    int    `yaml:"bk_data_id" validate:"required"`
+	AccessToken string `yaml:"access_token" validate:"required"`
+	ReportType  string `yaml:"report_type" validate:"required"`
+	MessageKind string `yaml:"message_kind" validate:"required"`
+}
+
+// BkMonitorBeat information of bkmonitorbeat tool
+type BkMonitorBeat struct {
+	CustomMetrics    bkCustom `yaml:"custom_metrics" validate:"required"`
+	CustomEvent      bkCustom `yaml:"custom_event" validate:"required"`
+	InnerEventName   string   `yaml:"inner_event_name" validate:"required"`
+	InnerMetricsName string   `yaml:"inner_metrics_name" validate:"required"`
+	BeatPath         string   `yaml:"beat_path" validate:"required,file"`
+	AgentAddress     string   `yaml:"agent_address" validate:"required,file"`
+}
+
+// runtimeConfig the runtime struct of monitor
+type runtimeConfig struct {
+	Ip            string        `yaml:"ip" validate:"required,ipv4"`
+	BkCloudID     int           `yaml:"bk_cloud_id" validate:"required,gte=0"`
+	BkMonitorBeat BkMonitorBeat `yaml:"bk_monitor_beat" validate:"required"`
+}
+
+// commonData the common data of bk monitor message
+type commonData struct {
+	Target    string                 `json:"target"`
+	Timestamp int64                  `json:"timestamp"`
+	Dimension map[string]interface{} `json:"dimension"`
+	Metrics   map[string]int         `json:"metrics"`
+}
+
+// eventData the event data of bk monitor message
+type eventData struct {
+	EventName string                 `json:"event_name"`
+	Event     map[string]interface{} `json:"event"`
+	commonData
+}
+
+// commonBody the common body of bk monitor message
+type commonBody struct {
+	DataId      int    `json:"bk_data_id"`
+	AccessToken string `json:"access_token"`
+}
+
+// eventBody the event body of bk monitor message
+type eventBody struct {
+	commonBody
+	Data []eventData `json:"data"`
+}
+
+// buildDimension asemble dimension of monitor messsage
+func buildDimension(addition map[string]interface{}) map[string]interface{} {
+	dimension := make(map[string]interface{})
+	dimension["bk_cloud_id"] = RuntimeConfig.BkCloudID
+
+	for k, v := range addition {
+		dimension[k] = v
+	}
+
+	return dimension
+}
+
+// SendBkMonitorBeat send bk monitor message
+func SendBkMonitorBeat(
+	dataId int, reportType string,
+	messageKind string, body interface{},
+) error {
+	output, err := json.Marshal(body)
+	if err != nil {
+		log.Logger.Errorf("send bk monitor heart beat encode body, err:%s,body:%v",
+			err.Error(), body)
+		return err
+	}
+
+	cmd := exec.Command(
+		RuntimeConfig.BkMonitorBeat.BeatPath, []string{
+			"-report",
+			"-report.bk_data_id", fmt.Sprintf("%d", dataId),
+			"-report.type", reportType,
+			"-report.message.kind", messageKind,
+			"-report.agent.address", RuntimeConfig.BkMonitorBeat.AgentAddress,
+			"-report.message.body", string(output),
+		}...,
+	)
+	log.Logger.Infof("send bk monitor, command=%s", cmd.String())
+	var stdout, stderr bytes.Buffer
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+
+	err = cmd.Run()
+	if err != nil {
+		log.Logger.Errorf("send bk monitor beat failed, err:%s, stdout:%s, stderr:%s",
+			err.Error(), stdout.String(), stderr.String())
+		return err
+	}
+
+	return nil
+}
+
+// SendEvent send bk montor event
+func SendEvent(name string, content string, additionDimension map[string]interface{}) error {
+	ts := time.Now().UnixNano() / (1000 * 1000)
+	body := eventBody{
+		commonBody: commonBody{
+			DataId:      RuntimeConfig.BkMonitorBeat.CustomEvent.BkDataId,
+			AccessToken: RuntimeConfig.BkMonitorBeat.CustomEvent.AccessToken,
+		},
+		Data: []eventData{
+			{
+				EventName: name, // RuntimeConfig.BkMonitorBeat.CustomEvent.Name,
+				Event: map[string]interface{}{
+					"content": content,
+				},
+				commonData: commonData{
+					Target:    RuntimeConfig.Ip,
+					Timestamp: ts,
+					Dimension: buildDimension(additionDimension),
+					Metrics:   nil,
+				},
+			},
+		},
+	}
+
+	err := SendBkMonitorBeat(
+		RuntimeConfig.BkMonitorBeat.CustomEvent.BkDataId,
+		RuntimeConfig.BkMonitorBeat.CustomEvent.ReportType,
+		RuntimeConfig.BkMonitorBeat.CustomEvent.MessageKind,
+		body,
+	)
+	if err != nil {
+		log.Logger.Errorf("send event failed, err:%s", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// RuntimeConfig TODO
+var RuntimeConfig *runtimeConfig
+
+// RuntimeConfigInit init monitor
+func RuntimeConfigInit(
+	targetIp string, bkDataId int, accessToken string, bkCloudId string,
+	reportType string, msgKind string, beatPath string, agentAddress string,
+) {
+	RuntimeConfig = &runtimeConfig{}
+	RuntimeConfig.Ip = targetIp
+
+	bkMonitorBeat := BkMonitorBeat{
+		CustomEvent: bkCustom{
+			BkDataId:    bkDataId,
+			AccessToken: accessToken,
+			ReportType:  reportType,
+			MessageKind: msgKind,
+		},
+		BeatPath:     beatPath,
+		AgentAddress: agentAddress,
+	}
+
+	RuntimeConfig.BkMonitorBeat = bkMonitorBeat
+	return
+}
diff --git a/dbm-services/common/dbha/ha-module/test/MySQL_test.go b/dbm-services/common/dbha/ha-module/test/MySQL_test.go
new file mode 100644
index 0000000000..5d0eb71d77
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/test/MySQL_test.go
@@ -0,0 +1,62 @@
+package test
+
+import (
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbmodule/mysql"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"fmt"
+	"testing"
+	"time"
+)
+
+func newTestInstance() *mysql.MySQLDetectInstance {
+	return &mysql.MySQLDetectInstance{
+		BaseDetectDB: dbutil.BaseDetectDB{
+			Ip:             "xxxx",
+			Port:           40000,
+			App:            "test",
+			DBType:         constvar.MySQL,
+			ReporterTime:   time.Unix(0, 0),
+			ReportInterval: 10,
+			Status:         constvar.DBCheckSuccess,
+			SshInfo: dbutil.Ssh{
+				Port:    36000,
+				User:    "xxxx",
+				Pass:    "xxxx",
+				Dest:    "mysql",
+				Timeout: 5,
+			},
+		},
+		User:    "root",
+		Pass:    "xxxx",
+		Timeout: 10,
+	}
+}
+
+func TestSSH(t *testing.T) {
+	ins := newTestInstance()
+	err := ins.CheckSSH()
+	if err != nil {
+		t.Errorf("detection failed.err:%s", err.Error())
+	}
+}
+
+func TestDetectionSuccess(t *testing.T) {
+	var d dbutil.DataBaseDetect
+	d = newTestInstance()
+	for i := 0; i <= 20; i++ {
+		err := d.Detection()
+		if err != nil {
+			fmt.Println("detection failed.err:" + err.Error())
+			t.Errorf("detection failed.err:%s", err.Error())
+		}
+		fmt.Printf("status: %s\n", d.GetStatus())
+		if d.NeedReporter() {
+			fmt.Println("need reporter")
+			d.UpdateReporterTime()
+		} else {
+			fmt.Println("needn't reporter")
+		}
+		time.Sleep(time.Second)
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/test/agent_test.go b/dbm-services/common/dbha/ha-module/test/agent_test.go
new file mode 100644
index 0000000000..1aeca0e4d0
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/test/agent_test.go
@@ -0,0 +1,77 @@
+package test
+
+import (
+	"dbm-services/common/dbha/ha-module/agent"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbutil"
+	"dbm-services/common/dbha/ha-module/gm"
+	"log"
+	"testing"
+	"time"
+)
+
+func TestAgentNetTransfor(t *testing.T) {
+	GlobalConfig, err := config.ParseConfigureFile("../gmInfo.yaml")
+	if err != nil {
+		log.Println("gmInfo get config failed.")
+		return
+	}
+	ch := make(chan gm.DoubleCheckInstanceInfo, 0)
+	gdm, _ := gm.NewGDM(GlobalConfig, ch, nil)
+	go func() {
+		gdm.Run()
+	}()
+
+	time.Sleep(10 * time.Second)
+
+	var d dbutil.DataBaseDetect
+	dbIns := newTestInstance()
+	d = dbIns
+	agentIns := agent.MonitorAgent{
+		City:             "sz",
+		Type:             "M",
+		LastFetchInsTime: time.Unix(0, 0),
+	}
+	ip, _ := d.GetAddress()
+	agentIns.DBInstance[ip] = d
+	gmInfo := agent.GMConnection{
+		Ip:            "0.0.0.0",
+		Port:          50000,
+		LastFetchTime: time.Now(),
+	}
+	err = gmInfo.Init()
+	agentIns.GMInstance = map[string]*agent.GMConnection{
+		"0.0.0.0": &gmInfo,
+	}
+	if err != nil {
+		t.Errorf("gmInfo init failed.err:%s", err.Error())
+		return
+	}
+
+	for i := 0; i < 100; i++ {
+		switch i % 3 {
+		case 0:
+			dbIns.Status = constvar.DBCheckFailed
+		case 1:
+			dbIns.Status = constvar.SSHCheckFailed
+		case 2:
+			dbIns.Status = constvar.DBCheckSuccess
+		}
+		switch i % 4 {
+		case 0:
+			dbIns.App = "APP1"
+		case 1:
+			dbIns.App = "APP22"
+		case 2:
+			dbIns.App = "APP333"
+		case 3:
+			dbIns.App = "APP4444"
+		}
+		err = agentIns.ReporterGM(d)
+		if err != nil {
+			t.Errorf("reporter gmInfo failed.err:%s", err.Error())
+			return
+		}
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/test/client_test.go b/dbm-services/common/dbha/ha-module/test/client_test.go
new file mode 100644
index 0000000000..df2f305a23
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/test/client_test.go
@@ -0,0 +1,84 @@
+package test
+
+import (
+	"dbm-services/common/dbha/ha-module/client"
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/dbmodule"
+	"fmt"
+	"net/http"
+	"testing"
+)
+
+func TestNewClientByAddrs(t *testing.T) {
+	addr := "http://127.0.0.1:8080"
+	c, _ := client.NewClientByAddrs([]string{addr}, constvar.CmDBName)
+	param := c.ConvertParamForGetRequest(map[string]string{
+		"apps": "test1",
+	})
+	result, err := c.DoNew(http.MethodGet, "/cmdb/cluster/query?"+param, nil, nil)
+	if err != nil {
+		fmt.Printf("requst failed:%s", err.Error())
+	}
+	fmt.Printf("%s", string(result.Data))
+}
+
+func TestGetInstanceByCity(t *testing.T) {
+	GlobalConfig, err := config.ParseConfigureFile("../monitor_agent.yaml")
+	if err != nil {
+		fmt.Printf("get config failed. err:%s", err.Error())
+		t.FailNow()
+	}
+	addr := "http://127.0.0.1:8080"
+	c, _ := client.NewClientByAddrs([]string{addr}, constvar.CmDBName)
+	cmdbC := client.CmDBClient{
+		Client: *c,
+	}
+	rawList, err := cmdbC.GetDBInstanceInfoByCity("2")
+	if err != nil {
+		fmt.Printf("get instance failed. err:%s", err.Error())
+		t.FailNow()
+	}
+	dbs, err := dbmodule.DBCallbackMap["tendbha"].FetchDBCallback(rawList, GlobalConfig)
+	for _, info := range dbs {
+		ip, port := info.GetAddress()
+		fmt.Printf("%s, %d, %s, %s, %s\n", ip, port, info.GetType(), info.GetStatus(), info.GetApp())
+	}
+}
+
+func TestGetInstanceByIp(t *testing.T) {
+	addr := "http://127.0.0.1:8080"
+	c, _ := client.NewClientByAddrs([]string{addr}, constvar.CmDBName)
+	cmdbC := client.CmDBClient{
+		Client: *c,
+	}
+	inf, err := cmdbC.GetDBInstanceInfoByIp("6.6.6.6")
+	if err != nil {
+		fmt.Printf("get instance failed. err:%s", err.Error())
+		t.FailNow()
+	}
+	list, err := dbmodule.DBCallbackMap["tendbha"].GetSwitchInstanceInformation(inf, nil)
+	if err != nil {
+		fmt.Printf("get switch instance failed. err:%s", err.Error())
+		t.FailNow()
+	}
+	for _, info := range list {
+		fmt.Printf("%v\n", info)
+	}
+}
+
+func TestHaDBAgentGetGMInfo(t *testing.T) {
+	addr := "http://127.0.0.1:8080"
+	c, _ := client.NewClientByAddrs([]string{addr}, constvar.HaDBName)
+	hadb := client.HaDBClient{
+		Client: *c,
+	}
+	gmInfo, err := hadb.AgentGetGMInfo()
+	if err != nil {
+		fmt.Printf("get gm failed. err:%s", err.Error())
+		t.FailNow()
+	}
+	for _, info := range gmInfo {
+		fmt.Printf("%v\n", info)
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/test/log_test.go b/dbm-services/common/dbha/ha-module/test/log_test.go
new file mode 100644
index 0000000000..0321501aed
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/test/log_test.go
@@ -0,0 +1,17 @@
+package test
+
+import (
+	"dbm-services/common/dbha/ha-module/log"
+	"testing"
+)
+
+func TestLog(t *testing.T) {
+	i := 0
+	for {
+		log.Logger.Debugf("debug %d", i)
+		log.Logger.Infof("info %d", i)
+		log.Logger.Warnf("warn %d", i)
+		log.Logger.Errorf("error %d", i)
+		i++
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/test/util_test.go b/dbm-services/common/dbha/ha-module/test/util_test.go
new file mode 100644
index 0000000000..8920a495a6
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/test/util_test.go
@@ -0,0 +1,20 @@
+package test
+
+import (
+	"dbm-services/common/dbha/ha-module/util"
+	"testing"
+)
+
+func TestMonIp(t *testing.T) {
+	for i := 0; i < 100000; i++ {
+		ip, err := util.GetMonIp()
+		if err != nil {
+			t.Errorf("get mon ip failed.err:%s", err.Error())
+			return
+		}
+		if ip != "127.0.0.1" {
+			t.Errorf("get mon ip error.ip:%s", ip)
+			return
+		}
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/types/types.go b/dbm-services/common/dbha/ha-module/types/types.go
new file mode 100644
index 0000000000..51d082f6dd
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/types/types.go
@@ -0,0 +1,8 @@
+// Package types TODO
+package types
+
+// DBType TODO
+type DBType string
+
+// CheckStatus TODO
+type CheckStatus string
diff --git a/dbm-services/common/dbha/ha-module/util/file_lock.go b/dbm-services/common/dbha/ha-module/util/file_lock.go
new file mode 100644
index 0000000000..2a852d920e
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/util/file_lock.go
@@ -0,0 +1,59 @@
+package util
+
+import (
+	"dbm-services/common/dbha/ha-module/log"
+	"os"
+	"syscall"
+)
+
+// FileLock file lock struct
+type FileLock struct {
+	Path string
+	Fd   *os.File
+}
+
+// NewFileLock init file lock
+func NewFileLock(path string) *FileLock {
+	return &FileLock{
+		Path: path,
+	}
+}
+
+// Lock do lock
+func (l *FileLock) Lock() error {
+	f, err := os.OpenFile(l.Path, os.O_RDWR|os.O_CREATE, os.ModePerm)
+	if err != nil {
+		log.Logger.Errorf("FileLock open file failed,path:%s,err:%s",
+			l.Path, err.Error())
+		return err
+	}
+
+	l.Fd = f
+	err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX)
+	if err != nil {
+		log.Logger.Errorf("FileLock lock failed,path:%s,fd:%d,err:%s",
+			l.Path, int(f.Fd()), err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// UnLock do unlock
+func (l *FileLock) UnLock() error {
+	if nil == l.Fd {
+		log.Logger.Warnf("FileLock fd is nil")
+		return nil
+	}
+
+	err := syscall.Flock(int(l.Fd.Fd()), syscall.LOCK_UN)
+	if err != nil {
+		log.Logger.Infof("FileLock unlock failed,path:%s,fd:%d,err:%s",
+			l.Path, int(l.Fd.Fd()), err.Error())
+		l.Fd.Close()
+		return err
+	} else {
+		l.Fd.Close()
+		return nil
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/util/timezone.go b/dbm-services/common/dbha/ha-module/util/timezone.go
new file mode 100644
index 0000000000..dc0b8b88da
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/util/timezone.go
@@ -0,0 +1,36 @@
+package util
+
+import (
+	"dbm-services/common/dbha/ha-module/config"
+	"dbm-services/common/dbha/ha-module/constvar"
+	"fmt"
+	"time"
+)
+
+// InitTimezone TODO
+func InitTimezone(tzConf config.TimezoneConfig) {
+	switch tzConf.Local {
+	case constvar.TZ_UTC:
+		SetTimezoneToUTC()
+	case constvar.TZ_CST:
+		SetTimezoneToCST()
+	default:
+		SetTimezoneToCST()
+	}
+}
+
+// SetTimezoneToUTC TODO
+func SetTimezoneToUTC() {
+	time.Local = time.UTC
+}
+
+// SetTimezoneToCST TODO
+func SetTimezoneToCST() {
+	loc, err := time.LoadLocation("Asia/Shanghai")
+	if err != nil {
+		fmt.Println("load time location failed")
+	} else {
+		time.Local = loc
+		fmt.Printf("timezone is set to CST Asia/shanghai\n")
+	}
+}
diff --git a/dbm-services/common/dbha/ha-module/util/util.go b/dbm-services/common/dbha/ha-module/util/util.go
new file mode 100644
index 0000000000..8a3a757e3a
--- /dev/null
+++ b/dbm-services/common/dbha/ha-module/util/util.go
@@ -0,0 +1,136 @@
+// Package util TODO
+package util
+
+import (
+	"dbm-services/common/dbha/ha-module/constvar"
+	"dbm-services/common/dbha/ha-module/log"
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"net"
+	"reflect"
+	"runtime"
+	"strings"
+	"time"
+)
+
+// LocalIp component local ip
+var LocalIp string
+
+const (
+	tcpDialTimeout = 3 * time.Second
+)
+
+// AtWhere return the parent function name.
+func AtWhere() string {
+	pc, _, _, ok := runtime.Caller(1)
+	if ok {
+		fileName, line := runtime.FuncForPC(pc).FileLine(pc)
+		result := strings.Index(fileName, "/tenjob/")
+		if result > 1 {
+			preStr := fileName[0:result]
+			fileName = strings.Replace(fileName, preStr, "", 1)
+		}
+		//		method := runtime.FuncForPC(pc).Name()
+		//		return fmt.Sprintf("%s [%s] line:%d", fileName, method, line)
+
+		return fmt.Sprintf("%s:%d", fileName, line)
+	} else {
+		return "Method not Found!"
+	}
+}
+
+// HasElem TODO
+func HasElem(elem interface{}, slice interface{}) bool {
+	defer func() {
+		if err := recover(); err != nil {
+			log.Logger.Errorf("HasElem error %s at  %s", err, AtWhere())
+		}
+	}()
+	arrV := reflect.ValueOf(slice)
+	if arrV.Kind() == reflect.Slice || arrV.Kind() == reflect.Array {
+		for i := 0; i < arrV.Len(); i++ {
+			// XXX - panics if slice element points to an unexported struct field
+			// see https://golang.org/pkg/reflect/#Value.Interface
+			if reflect.DeepEqual(arrV.Index(i).Interface(), elem) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// HostCheck TODO
+func HostCheck(host string) bool {
+	_, err := net.DialTimeout("tcp", host, time.Duration(tcpDialTimeout))
+	if err != nil {
+		log.Logger.Error(err.Error())
+		return false
+	}
+	return true
+}
+
+// GetMonIp TODO
+func GetMonIp() (string, error) {
+	addr, err := net.InterfaceAddrs()
+
+	if err != nil {
+		return "", err
+	}
+
+	for _, address := range addr {
+		// 检查ip地址判断是否回环地址
+		if inet, ok := address.(*net.IPNet); ok && !inet.IP.IsLoopback() {
+			if inet.IP.To4() != nil {
+				return inet.IP.String(), nil
+			}
+
+		}
+	}
+
+	return "", errors.New("can not find the client ip address")
+}
+
+// CRC32 TODO
+func CRC32(str string) uint32 {
+	return crc32.ChecksumIEEE([]byte(str))
+}
+
+// CheckRedisErrIsAuthFail check if the return error of
+//
+//	redis api is authentication failure,
+//	this function support four type server and two status.
+//
+// server type: rediscache tendisplus twemproxy and predixy
+// status: api lack password and the password is invalid
+func CheckRedisErrIsAuthFail(err error) bool {
+	errInfo := err.Error()
+	if strings.Contains(errInfo, constvar.REDIS_PASSWORD_INVALID) {
+		// this case is the status of the password is invalid,
+		//  rediscache tendisplus twemproxy and predixy match this case
+		return true
+	} else if strings.Contains(errInfo, constvar.REDIS_PASSWORD_LACK) {
+		// this case is the status of lack password,
+		//	rediscache tendisplus twemproxy match this case, predixy un-match
+		return true
+	} else if strings.Contains(errInfo, constvar.PREDIXY_PASSWORD_LACK) {
+		// this case is the status of lack password
+		//  predixy match this case
+		return true
+	} else {
+		return false
+	}
+}
+
+// CheckSSHErrIsAuthFail check if the the return error of ssh api
+//
+//	is authentication failure.
+func CheckSSHErrIsAuthFail(err error) bool {
+	errInfo := err.Error()
+	// ssh lack password or password is invalid will return the same error
+	if strings.Contains(errInfo, constvar.SSH_PASSWORD_LACK_OR_INVALID) {
+		return true
+	} else {
+		return false
+	}
+}
diff --git a/dbm-services/common/dbha/hadb-api/Dockerfile b/dbm-services/common/dbha/hadb-api/Dockerfile
new file mode 100644
index 0000000000..7b492a6b21
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/Dockerfile
@@ -0,0 +1,20 @@
+FROM mirrors.tencent.com/sccmsp/golang:1.16
+MAINTAINER hadb-api Development
+
+ARG BASEDIR=/home/hadb
+
+RUN mkdir ${BASEDIR}
+
+COPY hadb-api ${BASEDIR}/
+
+RUN groupadd -r mysql && useradd -r -g mysql mysql \
+    && /usr/bin/install -m 0777 -o mysql -g root -d ${BASEDIR}\
+    && chown -R mysql ${BASEDIR} \
+    && chmod a+x ${BASEDIR}/hadb-api \
+    && chmod a+x ${BASEDIR}
+
+USER mysql
+
+WORKDIR ${BASEDIR}
+
+ENTRYPOINT ["/home/hadb/hadb-api"]
diff --git a/dbm-services/common/dbha/hadb-api/LICENSE b/dbm-services/common/dbha/hadb-api/LICENSE
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/common/dbha/hadb-api/README.md b/dbm-services/common/dbha/hadb-api/README.md
new file mode 100644
index 0000000000..58d2a8c692
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/README.md
@@ -0,0 +1,13 @@
+# bk-dbha-api
+bk-dbha-api提供一系列api方便bk-dbha组件访问高可用相关数据库。包括日常切换状态的访问,心跳上报,切换日志等
+
+## 要求
+go1.14+
+
+## 编译
+```bash
+go build -o hadb main.go
+```
+
+## 运行
+./hadb run port:8090
\ No newline at end of file
diff --git a/dbm-services/common/dbha/hadb-api/cmd/add.go b/dbm-services/common/dbha/hadb-api/cmd/add.go
new file mode 100644
index 0000000000..bd957ba7dd
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/cmd/add.go
@@ -0,0 +1,42 @@
+// Package cmd TODO
+/*
+Copyright © 2022 NAME HERE 
+
+*/
+package cmd
+
+import (
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// addCmd represents the add command
+var addCmd = &cobra.Command{
+	Use:   "add",
+	Short: "A brief description of your command",
+	Long: `A longer description that spans multiple lines and likely contains examples
+and usage of using your command. For example:
+
+Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+	Run: func(cmd *cobra.Command, args []string) {
+		fmt.Println("add called")
+	},
+}
+
+func init() {
+
+	rootCmd.AddCommand(addCmd)
+
+	// Here you will define your flags and configuration settings.
+
+	// Cobra supports Persistent Flags which will work for this command
+	// and all subcommands, e.g.:
+	// addCmd.PersistentFlags().String("foo", "", "A help for foo")
+
+	// Cobra supports local flags which will only run when this command
+	// is called directly, e.g.:
+	// addCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
+}
diff --git a/dbm-services/common/dbha/hadb-api/cmd/root.go b/dbm-services/common/dbha/hadb-api/cmd/root.go
new file mode 100644
index 0000000000..cbdf6fbae5
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/cmd/root.go
@@ -0,0 +1,48 @@
+// Package cmd TODO
+/*
+Copyright © 2022 NAME HERE 
+
+*/
+package cmd
+
+import (
+	"os"
+
+	"github.com/spf13/cobra"
+)
+
+// rootCmd represents the base command when called without any subcommands
+var rootCmd = &cobra.Command{
+	Use:   "model-api",
+	Short: "A brief description of your application",
+	Long: `A longer description that spans multiple lines and likely contains
+examples and usage of using your application. For example:
+
+Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+	// Uncomment the following line if your bare application
+	// has an action associated with it:
+	// Run: func(cmd *cobra.Command, args []string) { },
+}
+
+// Execute adds all child commands to the root command and sets flags appropriately.
+// This is called by main.main(). It only needs to happen once to the rootCmd.
+func Execute() {
+	err := rootCmd.Execute()
+	if err != nil {
+		os.Exit(1)
+	}
+}
+
+func init() {
+	// Here you will define your flags and configuration settings.
+	// Cobra supports persistent flags, which, if defined here,
+	// will be global for your application.
+
+	// rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.model-api.yaml)")
+
+	// Cobra also supports local flags, which will only run
+	// when this action is called directly.
+	rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
+}
diff --git a/dbm-services/common/dbha/hadb-api/cmd/run.go b/dbm-services/common/dbha/hadb-api/cmd/run.go
new file mode 100644
index 0000000000..427f2838d7
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/cmd/run.go
@@ -0,0 +1,68 @@
+// Package cmd TODO
+/*
+Copyright © 2022 NAME HERE 
+
+*/
+package cmd
+
+import (
+	"dbm-services/common/dbha/hadb-api/initc"
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/pkg/handler"
+	"dbm-services/common/dbha/hadb-api/util"
+
+	"github.com/spf13/cobra"
+	"github.com/valyala/fasthttp"
+)
+
+// runCmd represents the run command
+var runCmd = &cobra.Command{
+	Use:   "run",
+	Short: "A brief description of your command",
+	Long: `A longer description that spans multiple lines and likely contains examples
+and usage of using your command. For example:
+
+Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+	Run: StartApiServer,
+}
+
+func init() {
+	rootCmd.AddCommand(runCmd)
+
+	// Here you will define your flags and configuration settings.
+
+	// Cobra supports Persistent Flags which will work for this command
+	// and all subcommands, e.g.:
+	// runCmd.PersistentFlags().String("foo", "", "A help for foo")
+
+	// Cobra supports local flags which will only run when this command
+	// is called directly, e.g.:
+	// runCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
+}
+
+// StartApiServer TODO
+func StartApiServer(cmd *cobra.Command, args []string) {
+	log.Logger.Info("start run api server...., args:", args)
+	serverPort := util.DefaultServerPort
+	if len(initc.GlobalConfig.NetInfo.Port) > 0 {
+		serverPort = ":" + initc.GlobalConfig.NetInfo.Port
+		log.Logger.Info("Set port by config.yaml,port:%s", serverPort)
+	}
+
+	log.Logger.Info("the port of http sever ", serverPort)
+
+	router := func(ctx *fasthttp.RequestCtx) {
+		url := string(ctx.Path())
+		log.Logger.Debugf("url info:%s", url)
+		if _, ok := handler.AddToHandlers[url]; ok {
+			handler.AddToHandlers[url](ctx)
+		} else {
+			ctx.Error("Not found", fasthttp.StatusNotFound)
+		}
+	}
+	if err := fasthttp.ListenAndServe(serverPort, router); err != nil {
+		log.Logger.Errorf("run api server failed:%s", err.Error())
+	}
+}
diff --git a/dbm-services/common/dbha/hadb-api/conf/config.yaml b/dbm-services/common/dbha/hadb-api/conf/config.yaml
new file mode 100644
index 0000000000..e1d79a76e1
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/conf/config.yaml
@@ -0,0 +1,20 @@
+hadbInfo:
+  host: "127.0.0.1"
+  port: 3306
+  db: "hadb"
+  user: "test"
+  password: "test"
+  charset: "utf8"
+serverInfo:
+  name: "model-api"
+netInfo:
+  port: "8080"
+logInfo:
+  logPath: "./log"
+  logLevel: "LOG_DEBUG"
+  logMaxsize: 1024
+  logMaxbackups: 5
+  logMaxage: 30
+  logCompress: true
+timezone:
+  local: "CST"
\ No newline at end of file
diff --git a/dbm-services/common/dbha/hadb-api/go.mod b/dbm-services/common/dbha/hadb-api/go.mod
new file mode 100644
index 0000000000..3b99f8dca5
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/go.mod
@@ -0,0 +1,46 @@
+module dbm-services/common/dbha/hadb-api
+
+go 1.19
+
+require (
+	github.com/natefinch/lumberjack v2.0.0+incompatible
+	github.com/spf13/cobra v1.7.0
+	github.com/spf13/viper v1.15.0
+	github.com/valyala/fasthttp v1.35.0
+	go.uber.org/zap v1.24.0
+	gorm.io/driver/mysql v1.5.0
+	gorm.io/gorm v1.25.0
+)
+
+require (
+	github.com/BurntSushi/toml v1.2.1 // indirect
+	github.com/andybalholm/brotli v1.0.4 // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/go-sql-driver/mysql v1.7.1 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.5 // indirect
+	github.com/klauspost/compress v1.15.11 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.7 // indirect
+	github.com/rogpeppe/go-internal v1.8.0 // indirect
+	github.com/spf13/afero v1.9.5 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/stretchr/testify v1.8.2 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	github.com/valyala/bytebufferpool v1.0.0 // indirect
+	go.uber.org/atomic v1.9.0 // indirect
+	go.uber.org/goleak v1.1.12 // indirect
+	go.uber.org/multierr v1.8.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
+	gopkg.in/yaml.v2 v2.4.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/dbm-services/common/dbha/hadb-api/go.sum b/dbm-services/common/dbha/hadb-api/go.sum
new file mode 100644
index 0000000000..a8b76c9470
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/go.sum
@@ -0,0 +1,547 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
+github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c=
+github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/natefinch/lumberjack v2.0.0+incompatible h1:4QJd3OLAMgj7ph+yZTuX13Ld4UpgHp07nNdFX7mqFfM=
+github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
+github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+github.com/valyala/fasthttp v1.35.0 h1:wwkR8mZn2NbigFsaw2Zj5r+xkmzjbrA/lyTmiSlal/Y=
+github.com/valyala/fasthttp v1.35.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I=
+github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
+go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM=
+gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo=
+gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU=
+gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/dbm-services/common/dbha/hadb-api/initc/initc.go b/dbm-services/common/dbha/hadb-api/initc/initc.go
new file mode 100644
index 0000000000..67605a5f06
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/initc/initc.go
@@ -0,0 +1,2 @@
+// Package initc TODO
+package initc
diff --git a/dbm-services/common/dbha/hadb-api/initc/initconfig.go b/dbm-services/common/dbha/hadb-api/initc/initconfig.go
new file mode 100644
index 0000000000..93c2f1df49
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/initc/initconfig.go
@@ -0,0 +1,48 @@
+package initc
+
+// GlobalConfig TODO
+var GlobalConfig *Config
+
+// Config struct generate by https://zhwt.github.io/yaml-to-go/
+type Config struct {
+	HadbInfo     HadbInfo     `yaml:"hadbInfo"`
+	ServerInfo   ServerInfo   `yaml:"serverInfo"`
+	NetInfo      NetInfo      `yaml:"netInfo"`
+	LogInfo      LogInfo      `yaml:"logInfo"`
+	TimezoneInfo TimezoneInfo `yaml:"timezone"`
+}
+
+// HadbInfo TODO
+type HadbInfo struct {
+	Host     string `yaml:"host"`
+	Port     int    `yaml:"port"`
+	Db       string `yaml:"db"`
+	User     string `yaml:"user"`
+	Password string `yaml:"password"`
+	Charset  string `yaml:"charset"`
+}
+
+// ServerInfo TODO
+type ServerInfo struct {
+	Name string `yaml:"name"`
+}
+
+// NetInfo TODO
+type NetInfo struct {
+	Port string `yaml:"port"`
+}
+
+// LogInfo TODO
+type LogInfo struct {
+	LogPath       string `yaml:"logPath"`
+	LogLevel      string `yaml:"logLevel"`
+	LogMaxSize    int    `yaml:"logMaxsize"`
+	LogMaxBackups int    `yaml:"logMaxbackups"`
+	LogMaxAge     int    `yaml:"logMaxage"`
+	LogCompress   bool   `yaml:"logCompress"`
+}
+
+// TimezoneInfo support timezone configure
+type TimezoneInfo struct {
+	Local string `yaml:"local"`
+}
diff --git a/dbm-services/common/dbha/hadb-api/log/log.go b/dbm-services/common/dbha/hadb-api/log/log.go
new file mode 100644
index 0000000000..57f5f251e6
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/log/log.go
@@ -0,0 +1,121 @@
+// Package log TODO
+package log
+
+import (
+	"dbm-services/common/dbha/hadb-api/initc"
+	"dbm-services/common/dbha/hadb-api/util"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/natefinch/lumberjack"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// Logger TODO
+var Logger *zap.SugaredLogger
+
+// InitLog TODO
+// Init init log module
+func InitLog(logConf initc.LogInfo) {
+	// initilize the normal logger
+	level := getLogLevel(logConf.LogLevel)
+	isFile, filepath := checkLogFilepath(logConf.LogPath)
+	var writeSyncer zapcore.WriteSyncer
+	if isFile {
+		logMaxSize, logMaxAge, logMaxBackups := getLogFileConfig(logConf)
+		fmt.Printf("log FILE parameter: filePath=%s,maxSize=%d,maxAge=%d,maxBackups=%d,compress=%v\n",
+			filepath, logMaxSize, logMaxAge, logMaxBackups, logConf.LogCompress)
+		writeSyncer = getLogFileWriter(filepath, logMaxSize,
+			logMaxBackups, logMaxAge, logConf.LogCompress)
+	} else {
+		fmt.Printf("log stdout\n")
+		writeSyncer = getStdoutWriter()
+	}
+
+	encoder := getEncoder()
+	core := zapcore.NewCore(encoder, writeSyncer, level)
+	rowLogger := zap.New(core, zap.AddCaller())
+	Logger = rowLogger.Sugar()
+}
+
+// getEncoder get log encoder
+func getEncoder() zapcore.Encoder {
+	encoderConfig := zap.NewProductionEncoderConfig()
+	encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
+	encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
+	return zapcore.NewConsoleEncoder(encoderConfig)
+}
+
+// getLogFileWriter support log write to file
+func getLogFileWriter(filename string, logMaxSize int,
+	logMaxBackups int, logMaxAge int, compress bool) zapcore.WriteSyncer {
+	lumberJackLogger := &lumberjack.Logger{
+		Filename:   filename,
+		MaxSize:    logMaxSize,
+		MaxBackups: logMaxBackups,
+		MaxAge:     logMaxAge,
+		Compress:   compress,
+	}
+	return zapcore.AddSync(lumberJackLogger)
+}
+
+// getStdoutWriter support log write to stdout
+func getStdoutWriter() zapcore.WriteSyncer {
+	return zapcore.AddSync(os.Stdout)
+}
+
+// getLogLevel get the value of log level
+func getLogLevel(logLevel string) zapcore.Level {
+	switch logLevel {
+	case util.LOG_DEBUG:
+		return zapcore.DebugLevel
+	case util.LOG_INFO:
+		return zapcore.InfoLevel
+	case util.LOG_ERROR:
+		return zapcore.ErrorLevel
+	case util.LOG_PANIC:
+		return zapcore.PanicLevel
+	case util.LOG_FATAL:
+		return zapcore.FatalLevel
+	default:
+		return zapcore.DebugLevel
+	}
+}
+
+// getLogFileConfig get the value of log parameter
+func getLogFileConfig(logConf initc.LogInfo) (int, int, int) {
+	logMaxSize := logConf.LogMaxSize
+	if logConf.LogMaxSize == 0 {
+		logMaxSize = util.LOG_DEF_SIZE
+	}
+
+	logMaxAge := logConf.LogMaxAge
+	if logMaxAge == 0 {
+		logMaxAge = util.LOG_DEF_AGE
+	}
+
+	logMaxBackups := logConf.LogMaxBackups
+	if logMaxBackups == 0 {
+		logMaxBackups = util.LOG_DEF_BACKUPS
+	}
+
+	return logMaxSize, logMaxAge, logMaxBackups
+}
+
+// checkLogFilepath check the log path is exist or not
+func checkLogFilepath(logpath string) (bool, string) {
+	if len(logpath) == 0 {
+		fmt.Printf("the logfile is not set and log switch to stdout\n")
+		return false, ""
+	}
+
+	fpath := filepath.Dir(logpath)
+	_, err := os.Stat(fpath)
+	if err != nil && os.IsNotExist(err) {
+		fmt.Printf("the father path:%s is not exist\n", fpath)
+		return false, ""
+	}
+	return true, logpath
+}
diff --git a/dbm-services/common/dbha/hadb-api/main.go b/dbm-services/common/dbha/hadb-api/main.go
new file mode 100644
index 0000000000..67d002ea04
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/main.go
@@ -0,0 +1,40 @@
+// Package main TODO
+/*
+Copyright © 2022 NAME HERE
+*/
+package main
+
+import (
+	"dbm-services/common/dbha/hadb-api/cmd"
+	"dbm-services/common/dbha/hadb-api/initc"
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/model"
+	"dbm-services/common/dbha/hadb-api/util"
+	"fmt"
+
+	"github.com/spf13/viper"
+)
+
+func main() {
+	fmt.Printf("try to start service...")
+	viper.SetConfigName("config")
+	viper.SetConfigType("yaml")
+	viper.AddConfigPath("./conf")
+	if err := viper.ReadInConfig(); err != nil {
+		fmt.Printf("read config file failed:%s", err.Error())
+		return
+	}
+	initc.GlobalConfig = &initc.Config{}
+	if err := viper.Unmarshal(initc.GlobalConfig); err != nil {
+		log.Logger.Errorf("unmarshal configure failed:%s", err.Error())
+	}
+	fmt.Printf("%+v", initc.GlobalConfig)
+
+	util.InitTimezone(initc.GlobalConfig.TimezoneInfo.Local)
+
+	log.InitLog(initc.GlobalConfig.LogInfo)
+	model.HADB.Init()
+	defer model.HADB.Close()
+
+	cmd.Execute()
+}
diff --git a/dbm-services/common/dbha/hadb-api/model/DBStatus.go b/dbm-services/common/dbha/hadb-api/model/DBStatus.go
new file mode 100644
index 0000000000..0a755ce1d6
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/model/DBStatus.go
@@ -0,0 +1,22 @@
+package model
+
+import (
+	"time"
+)
+
+// DbStatus TODO
+type DbStatus struct {
+	Uid      uint      `gorm:"column:uid;primary_key;AUTO_INCREMENT" json:"uid"`
+	AgentIP  string    `gorm:"column:agent_ip;NOT NULL" json:"agent_ip"`
+	IP       string    `gorm:"column:ip;NOT NULL" json:"ip"`
+	Port     uint      `gorm:"column:port;NOT NULL" json:"port"`
+	DbType   string    `gorm:"column:db_type;NOT NULL" json:"db_type"`
+	Status   string    `gorm:"column:status;NOT NULL" json:"status"`
+	Cloud    string    `gorm:"column:cloud;NOT NULL" json:"cloud"`
+	LastTime time.Time `gorm:"column:last_time;type:datetime;default:CURRENT_TIMESTAMP;NOT NULL"`
+}
+
+// TableName TODO
+func (m *DbStatus) TableName() string {
+	return "db_status"
+}
diff --git a/dbm-services/common/dbha/hadb-api/model/HALogs.go b/dbm-services/common/dbha/hadb-api/model/HALogs.go
new file mode 100644
index 0000000000..ab3a21dc51
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/model/HALogs.go
@@ -0,0 +1,21 @@
+// Package model Code generated by sql2gorm. DO NOT EDIT.
+package model
+
+import (
+	"time"
+)
+
+type HaLogs struct {
+	Uid      uint      `gorm:"column:uid;primary_key;AUTO_INCREMENT" json:"uid"`
+	IP       string    `gorm:"column:ip;NOT NULL" json:"ip"`
+	Port     uint      `gorm:"column:port;NOT NULL" json:"port"`
+	MonIP    string    `gorm:"column:mon_ip;NOT NULL" json:"mon_ip"`
+	Module   string    `gorm:"column:module;NOT NULL" json:"module"`
+	Cloud    string    `gorm:"column:cloud;NOT NULL" json:"cloud"`
+	DateTime time.Time `gorm:"column:date_time;type:datetime;default:CURRENT_TIMESTAMP;NOT NULL" json:"date_time"`
+	Comment  string    `gorm:"column:comment;NOT NULL" json:"comment"`
+}
+
+func (m *HaLogs) TableName() string {
+	return "ha_logs"
+}
diff --git a/dbm-services/common/dbha/hadb-api/model/HAStatus.go b/dbm-services/common/dbha/hadb-api/model/HAStatus.go
new file mode 100644
index 0000000000..8790337082
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/model/HAStatus.go
@@ -0,0 +1,27 @@
+package model
+
+import (
+	"time"
+)
+
+// HaStatus struct generate by https://sql2gorm.mccode.info/
+type HaStatus struct {
+	Uid            uint      `gorm:"column:uid;primary_key;AUTO_INCREMENT" json:"uid"`
+	IP             string    `gorm:"column:ip;NOT NULL" json:"ip"`
+	Port           int       `gorm:"column:port" json:"port"`
+	Module         string    `gorm:"column:module;NOT NULL" json:"module"`
+	City           string    `gorm:"column:city;NOT NULL" json:"city"`
+	Campus         string    `gorm:"column:campus;NOT NULL" json:"campus"`
+	Cloud          string    `gorm:"column:cloud;NOT NULL" json:"cloud"`
+	DbType         string    `gorm:"column:db_type" json:"db_type"`
+	StartTime      time.Time `gorm:"column:start_time;type:datetime;default:CURRENT_TIMESTAMP;NOT NULL" json:"start_time"`
+	LastTime       time.Time `gorm:"column:last_time;type:datetime;default:CURRENT_TIMESTAMP;NOT NULL" json:"last_time"`
+	Status         string    `gorm:"column:status;NOT NULL" json:"status"`
+	TakeOverGm     string    `gorm:"column:take_over_gm" json:"take_over_gm"`
+	ReportInterval int       `gorm:"column:report_interval" json:"report_interval"`
+}
+
+// TableName TODO
+func (m *HaStatus) TableName() string {
+	return "ha_status"
+}
diff --git a/dbm-services/common/dbha/hadb-api/model/SwitchLogs.go b/dbm-services/common/dbha/hadb-api/model/SwitchLogs.go
new file mode 100644
index 0000000000..e5d2ecce21
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/model/SwitchLogs.go
@@ -0,0 +1,19 @@
+package model
+
+import "time"
+
+// SwitchLogs TODO
+type SwitchLogs struct {
+	UID      int       `gorm:"column:uid;primaryKey;autoIncrement" json:"uid"`
+	SwitchID int       `gorm:"column:sw_id" json:"sw_id"`
+	IP       string    `gorm:"column:ip" json:"ip"`
+	Result   string    `gorm:"column:result" json:"result"`
+	Datetime time.Time `gorm:"column:datetime" json:"datetime"`
+	Comment  string    `gorm:"column:comment" json:"comment"`
+	Port     int       `gorm:"column:port" json:"port"`
+}
+
+// TableName TODO
+func (s *SwitchLogs) TableName() string {
+	return "switch_logs"
+}
diff --git a/dbm-services/common/dbha/hadb-api/model/TbMonSwitchQueue.go b/dbm-services/common/dbha/hadb-api/model/TbMonSwitchQueue.go
new file mode 100644
index 0000000000..14dfdb0fe1
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/model/TbMonSwitchQueue.go
@@ -0,0 +1,32 @@
+package model
+
+import (
+	"time"
+)
+
+// TbMonSwitchQueue TODO
+type TbMonSwitchQueue struct {
+	Uid                uint      `gorm:"column:uid;primary_key;AUTO_INCREMENT" json:"uid"`
+	IP                 string    `gorm:"column:ip;NOT NULL" json:"ip"`
+	Port               int       `gorm:"column:port;NOT NULL" json:"port"`
+	ConfirmCheckTime   time.Time `gorm:"column:confirm_check_time;type:datetime;default:CURRENT_TIMESTAMP" json:"confirm_check_time"`
+	DbRole             string    `gorm:"column:db_role;NOT NULL" json:"db_role"`
+	SlaveIP            string    `gorm:"column:slave_ip" json:"slave_ip"`
+	SlavePort          int       `gorm:"column:slave_port" json:"slave_port"`
+	Status             string    `gorm:"column:status" json:"status"`
+	ConfirmResult      string    `gorm:"column:confirm_result" json:"confirm_result"`
+	SwitchStartTime    time.Time `gorm:"column:switch_start_time" json:"switch_start_time"`
+	SwitchFinishedTime time.Time `gorm:"column:switch_finished_time" json:"switch_finished_time"`
+	SwitchResult       string    `gorm:"column:switch_result" json:"switch_result"`
+	Remark             string    `gorm:"column:remark" json:"remark"`
+	App                string    `gorm:"column:app" json:"app"`
+	DbType             string    `gorm:"column:db_type" json:"db_type"`
+	Idc                string    `gorm:"column:idc" json:"idc"`
+	Cloud              string    `gorm:"column:cloud" json:"cloud"`
+	Cluster            string    `gorm:"column:cluster" json:"cluster"`
+}
+
+// TableName TODO
+func (m *TbMonSwitchQueue) TableName() string {
+	return "tb_mon_switch_queue"
+}
diff --git a/dbm-services/common/dbha/hadb-api/model/init.go b/dbm-services/common/dbha/hadb-api/model/init.go
new file mode 100644
index 0000000000..948b6cca75
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/model/init.go
@@ -0,0 +1,127 @@
+package model
+
+import (
+	"database/sql"
+	"dbm-services/common/dbha/hadb-api/initc"
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/util"
+	"fmt"
+	"time"
+
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+)
+
+// Database TODO
+type Database struct {
+	Self *gorm.DB
+}
+
+// HADB TODO
+var HADB *Database
+
+// InitHaDB TODO
+func InitHaDB() *gorm.DB {
+	err := DoCreateDBIfNotExist()
+	if err != nil {
+		log.Logger.Errorf("init hadb failed,%s", err.Error())
+	}
+
+	haDBInfo := initc.GlobalConfig.HadbInfo
+	haDBDsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=%s&parseTime=True&loc=Local",
+		haDBInfo.User, haDBInfo.Password, haDBInfo.Host, haDBInfo.Port, haDBInfo.Db, haDBInfo.Charset)
+	hadb, err := gorm.Open(mysql.Open(haDBDsn), GenerateGormConfig())
+	if err != nil {
+		log.Logger.Errorf("connect to %s%d failed:%s", haDBInfo.Host, haDBInfo.Port, err.Error())
+	}
+
+	err = DoAutoMigrate(hadb)
+	if err != nil {
+		log.Logger.Errorf("hadb auto migrate failed, err:%s", err.Error())
+	}
+	return hadb
+}
+
+func (db *Database) setupDB() {
+	d, err := db.Self.DB()
+	if err != nil {
+		log.Logger.Error("get db for setup failed:%s", err.Error())
+	}
+	d.SetMaxIdleConns(0)
+}
+
+func (db *Database) closeDB() {
+	d, err := db.Self.DB()
+	if err != nil {
+		log.Logger.Error("get db for close failed:%s", err.Error())
+		return
+	}
+	if err := d.Close(); err != nil {
+		log.Logger.Error("close db failed:%s", err.Error())
+	}
+}
+
+// Init TODO
+func (db *Database) Init() {
+	HADB = &Database{
+		Self: InitHaDB(),
+	}
+}
+
+// Close TODO
+func (db *Database) Close() {
+	HADB.closeDB()
+}
+
+// DoCreateDBIfNotExist TODO
+func DoCreateDBIfNotExist() error {
+	haDBInfo := initc.GlobalConfig.HadbInfo
+	connStr := fmt.Sprintf("%s:%s@tcp(%s:%d)/",
+		haDBInfo.User, haDBInfo.Password, haDBInfo.Host, haDBInfo.Port)
+	log.Logger.Infof("connect sql:%s", connStr)
+	haDB, err := sql.Open("mysql", connStr)
+	if err != nil {
+		log.Logger.Infof("exec database/sql failed, err:%s", err.Error())
+		return err
+	}
+
+	defer haDB.Close()
+
+	databaseStr := fmt.Sprintf("CREATE DATABASE IF NOT EXISTS %s", haDBInfo.Db)
+	log.Logger.Infof("database sql:%s", databaseStr)
+	_, err = haDB.Exec(databaseStr)
+	if err != nil {
+		log.Logger.Infof("exec database failed, err:%s", err.Error())
+		return err
+	}
+	log.Logger.Infof("Hadb init db success")
+	return nil
+}
+
+// DoAutoMigrate do gorm auto migrate
+func DoAutoMigrate(db *gorm.DB) error {
+	return db.AutoMigrate(&DbStatus{}, &HaLogs{}, &HaStatus{}, &SwitchLogs{}, &TbMonSwitchQueue{})
+}
+
+// GenerateGormConfig generate GORM.config
+func GenerateGormConfig() *gorm.Config {
+	var nowFunc func() time.Time
+	switch initc.GlobalConfig.TimezoneInfo.Local {
+	case util.TZ_UTC:
+		nowFunc = func() time.Time {
+			return time.Now().UTC()
+		}
+	case util.TZ_CST:
+		nowFunc = func() time.Time {
+			return time.Now().In(time.FixedZone("CST", 8*3600))
+		}
+	default:
+		nowFunc = func() time.Time {
+			return time.Now().In(time.FixedZone("CST", 8*3600))
+		}
+	}
+
+	return &gorm.Config{
+		NowFunc: nowFunc,
+	}
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/api/api.go b/dbm-services/common/dbha/hadb-api/pkg/api/api.go
new file mode 100644
index 0000000000..7849549300
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/api/api.go
@@ -0,0 +1,62 @@
+// Package api TODO
+package api
+
+import (
+	"encoding/json"
+
+	"github.com/valyala/fasthttp"
+)
+
+const (
+	// RespOK TODO
+	RespOK = 0
+	// RespErr TODO
+	RespErr = 1
+)
+
+const (
+	// RowsAffect TODO
+	RowsAffect = "rowsAffected"
+)
+
+// QueryPage TODO
+type QueryPage struct {
+	Limit  int `json:"limit"`
+	Offset int `json:"offset"`
+}
+
+// RequestInfo TODO
+type RequestInfo struct {
+	// bk_cloud_id is needed by proxypass
+	BkCloudId int `json:"bk_cloud_id"`
+	// bk_token is needed by proxypass
+	BkToken string `json:"bk_token"`
+	// api name
+	Name string `json:"name"`
+	// query args from request.body
+	QueryArgs interface{} `json:"query_args"`
+	// set args from request.body
+	SetArgs interface{} `json:"set_args"`
+	// query limit
+	PageArgs QueryPage `json:"page_args"`
+}
+
+// ResponseInfo TODO
+type ResponseInfo struct {
+	Code    int         `json:"code"`
+	Message string      `json:"msg"`
+	Data    interface{} `json:"data"`
+}
+
+// SendResponse TODO
+func SendResponse(ctx *fasthttp.RequestCtx, responseInfo ResponseInfo) {
+	body, err := json.Marshal(responseInfo)
+	if err != nil {
+		responseInfo.Data = ""
+		responseInfo.Message = err.Error()
+		responseInfo.Code = 1
+	}
+	ctx.Response.SetBody(body)
+	ctx.Response.Header.SetContentType("application/json")
+	ctx.Response.SetStatusCode(fasthttp.StatusOK)
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/add_dbstatus.go b/dbm-services/common/dbha/hadb-api/pkg/handler/add_dbstatus.go
new file mode 100644
index 0000000000..31b456bb86
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/add_dbstatus.go
@@ -0,0 +1,12 @@
+package handler
+
+import (
+	"dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus"
+)
+
+func init() {
+	AddToApiManager(ApiHandler{
+		Url:     "/dbstatus/",
+		Handler: dbstatus.Handler,
+	})
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/add_halogs.go b/dbm-services/common/dbha/hadb-api/pkg/handler/add_halogs.go
new file mode 100644
index 0000000000..81c36aeda4
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/add_halogs.go
@@ -0,0 +1,12 @@
+package handler
+
+import (
+	"dbm-services/common/dbha/hadb-api/pkg/handler/halogs"
+)
+
+func init() {
+	AddToApiManager(ApiHandler{
+		Url:     "/halogs/",
+		Handler: halogs.Handler,
+	})
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/add_hastatus.go b/dbm-services/common/dbha/hadb-api/pkg/handler/add_hastatus.go
new file mode 100644
index 0000000000..157460df43
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/add_hastatus.go
@@ -0,0 +1,12 @@
+package handler
+
+import (
+	"dbm-services/common/dbha/hadb-api/pkg/handler/hastatus"
+)
+
+func init() {
+	AddToApiManager(ApiHandler{
+		Url:     "/hastatus/",
+		Handler: hastatus.Handler,
+	})
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/add_switchlogs.go b/dbm-services/common/dbha/hadb-api/pkg/handler/add_switchlogs.go
new file mode 100644
index 0000000000..12b86d5ec2
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/add_switchlogs.go
@@ -0,0 +1,10 @@
+package handler
+
+import "dbm-services/common/dbha/hadb-api/pkg/handler/switchlog"
+
+func init() {
+	AddToApiManager(ApiHandler{
+		Url:     "/switchlogs/",
+		Handler: switchlog.Handler,
+	})
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/add_switchqueue.go b/dbm-services/common/dbha/hadb-api/pkg/handler/add_switchqueue.go
new file mode 100644
index 0000000000..e279b7e28a
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/add_switchqueue.go
@@ -0,0 +1,12 @@
+package handler
+
+import (
+	"dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue"
+)
+
+func init() {
+	AddToApiManager(ApiHandler{
+		Url:     "/switchqueue/",
+		Handler: switchqueue.Handler,
+	})
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus.go b/dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus.go
new file mode 100644
index 0000000000..679738bde4
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus.go
@@ -0,0 +1,2 @@
+// Package dbstatus TODO
+package dbstatus
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus_handler.go b/dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus_handler.go
new file mode 100644
index 0000000000..922546e089
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/dbstatus/dbstatus_handler.go
@@ -0,0 +1,202 @@
+package dbstatus
+
+import (
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/model"
+	"dbm-services/common/dbha/hadb-api/pkg/api"
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"github.com/valyala/fasthttp"
+)
+
+const (
+	// GetStatus TODO
+	GetStatus = "get_instance_status"
+	// UpdateStatus TODO
+	UpdateStatus = "update_instance_status"
+	// PutStatus TODO
+	PutStatus = "insert_instance_status"
+)
+
+// Handler TODO
+func Handler(ctx *fasthttp.RequestCtx) {
+	param := &api.RequestInfo{}
+	if err := json.Unmarshal(ctx.PostBody(), param); err != nil {
+		log.Logger.Errorf("parse request body failed:%s", err.Error())
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: err.Error(),
+		})
+		return
+	}
+	switch param.Name {
+	case GetStatus:
+		GetDBStatus(ctx, param.QueryArgs)
+	case UpdateStatus:
+		UpdateDBStatus(ctx, param.QueryArgs, param.SetArgs)
+	case PutStatus:
+		PutDBStatus(ctx, param.SetArgs)
+	default:
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: fmt.Sprintf("unknown api name[%s]", param.Name),
+		})
+	}
+}
+
+// GetDBStatus TODO
+func GetDBStatus(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		result    = []model.DbStatus{}
+		whereCond = &model.DbStatus{}
+		response  = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be Post request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	if err := model.HADB.Self.Table(whereCond.TableName()).Where(whereCond).Find(&result).Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	log.Logger.Debugf("%+v", result)
+}
+
+// UpdateDBStatus TODO
+func UpdateDBStatus(ctx *fasthttp.RequestCtx, queryParam interface{}, setParam interface{}) {
+	var (
+		result    = map[string]int64{}
+		whereCond = struct {
+			query model.DbStatus
+			set   model.DbStatus
+		}{}
+		response = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		return
+	}
+
+	// convert queryParam
+	if bytes, err := json.Marshal(queryParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.query); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.set); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	whereCond.set.LastTime = time.Now()
+	log.Logger.Debugf("%+v", whereCond)
+
+	db := model.HADB.Self.Table(whereCond.query.TableName()).Where(whereCond.query).Updates(whereCond.set)
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	result[api.RowsAffect] = db.RowsAffected
+	log.Logger.Debugf("%+v", result)
+	return
+}
+
+// PutDBStatus TODO
+func PutDBStatus(ctx *fasthttp.RequestCtx, setParam interface{}) {
+	input := &model.DbStatus{}
+	response := api.ResponseInfo{
+		Data:    nil,
+		Code:    api.RespOK,
+		Message: "",
+	}
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Code = api.RespErr
+		response.Message = "must be POST method"
+		return
+	}
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, input); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	db := model.HADB.Self.Table(input.TableName()).Create(input)
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("insert table failed:%s", err.Error())
+		return
+	}
+
+	response.Data = map[string]interface{}{api.RowsAffect: db.RowsAffected}
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs.go b/dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs.go
new file mode 100644
index 0000000000..732f561fa4
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs.go
@@ -0,0 +1,2 @@
+// Package halogs TODO
+package halogs
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs_handler.go b/dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs_handler.go
new file mode 100644
index 0000000000..7b8c2ba8fa
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/halogs/halogs_handler.go
@@ -0,0 +1,81 @@
+package halogs
+
+import (
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/model"
+	"dbm-services/common/dbha/hadb-api/pkg/api"
+	"encoding/json"
+	"fmt"
+
+	"github.com/valyala/fasthttp"
+)
+
+const (
+	// PutLogs TODO
+	PutLogs = "reporter_log"
+)
+
+// Handler TODO
+func Handler(ctx *fasthttp.RequestCtx) {
+	param := &api.RequestInfo{}
+	if err := json.Unmarshal(ctx.PostBody(), param); err != nil {
+		log.Logger.Errorf("parse request body failed:%s", err.Error())
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: err.Error(),
+		})
+		return
+	}
+	switch param.Name {
+	case PutLogs:
+		PutHALogs(ctx, param.SetArgs)
+	default:
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: fmt.Sprintf("unknown api name[%s]", param.Name),
+		})
+	}
+}
+
+// PutHALogs TODO
+func PutHALogs(ctx *fasthttp.RequestCtx, setParam interface{}) {
+	input := &model.HaLogs{}
+	response := api.ResponseInfo{
+		Data:    nil,
+		Code:    api.RespOK,
+		Message: "",
+	}
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Code = api.RespErr
+		response.Message = "must be POST method"
+		return
+	}
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, input); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	db := model.HADB.Self.Table(input.TableName()).Create(input)
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("insert table failed:%s", err.Error())
+		return
+	}
+
+	response.Data = map[string]interface{}{api.RowsAffect: db.RowsAffected}
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/handler.go b/dbm-services/common/dbha/hadb-api/pkg/handler/handler.go
new file mode 100644
index 0000000000..bc8ef36b53
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/handler.go
@@ -0,0 +1,21 @@
+// Package handler TODO
+package handler
+
+import "github.com/valyala/fasthttp"
+
+// AddToHandlers TODO
+var AddToHandlers map[string]func(ctx *fasthttp.RequestCtx)
+
+// ApiHandler TODO
+type ApiHandler struct {
+	Url     string
+	Handler func(ctx *fasthttp.RequestCtx)
+}
+
+// AddToApiManager TODO
+func AddToApiManager(m ApiHandler) {
+	if AddToHandlers == nil {
+		AddToHandlers = make(map[string]func(ctx *fasthttp.RequestCtx))
+	}
+	AddToHandlers[m.Url] = m.Handler
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus.go b/dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus.go
new file mode 100644
index 0000000000..42e0f40d24
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus.go
@@ -0,0 +1,2 @@
+// Package hastatus TODO
+package hastatus
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus_handler.go b/dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus_handler.go
new file mode 100644
index 0000000000..a8cf16f43a
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/hastatus/hastatus_handler.go
@@ -0,0 +1,404 @@
+package hastatus
+
+import (
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/model"
+	"dbm-services/common/dbha/hadb-api/pkg/api"
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"github.com/valyala/fasthttp"
+	"gorm.io/gorm"
+)
+
+const (
+	haTableName = "ha_status"
+)
+
+/*
+func Handler(ctx *fasthttp.RequestCtx) {
+  result := &model.HaStatus{}
+  response := api.ResponseInfo{
+    Data:    result,
+    Code:    api.RespOK,
+    Message: "",
+  }
+  structName := reflect.TypeOf(model.HaStatus{})
+  whereCondMap := make(map[string]interface{})
+  ctx.QueryArgs().VisitAll(func(key, value []byte) {
+    if _, ok := util.FieldByNameCaseIgnore(structName, string(key)); ok {
+      whereCondMap[string(key)] = string(value)
+    } else {
+      log.Logger.Warnf("ignore invalid request argument:%s", string(key))
+    }
+  })
+  if err := model.HADB.Self.Table(result.TableName()).Where(whereCondMap).Find(result).Error; err != nil {
+    response.Code = api.RespErr
+    response.Message = err.Error()
+    response.Data = nil
+    log.Logger.Errorf("query table failed:%s", err.Error())
+  }
+  log.Logger.Debugf("query result:%+v", result)
+  api.SendResponse(ctx, response)
+}
+*/
+
+const (
+	// GetGmInfo TODO
+	GetGmInfo = "agent_get_GM_info"
+	// GetAgentInfo TODO
+	GetAgentInfo = "agent_get_agent_info"
+	// UpdateAgentInfo TODO
+	UpdateAgentInfo = "reporter_agent_heartbeat"
+	// UpdateGMInfo TODO
+	UpdateGMInfo = "reporter_gm_heartbeat"
+	// GetAliveAgentInfo TODO
+	GetAliveAgentInfo = "get_alive_agent_info"
+	// GetAliveGMInfo TODO
+	GetAliveGMInfo = "get_alive_gm_info"
+	// RegisterHaInfo TODO
+	RegisterHaInfo = "register_dbha_info"
+)
+
+// Handler TODO
+func Handler(ctx *fasthttp.RequestCtx) {
+	param := &api.RequestInfo{}
+	if err := json.Unmarshal(ctx.PostBody(), param); err != nil {
+		log.Logger.Errorf("parse request body failed:%s", err.Error())
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: err.Error(),
+		})
+		return
+	}
+	switch param.Name {
+	case GetGmInfo, GetAgentInfo:
+		GetHaInfo(ctx, param.QueryArgs)
+	case UpdateAgentInfo:
+		UpdateHaInfo(ctx, param.QueryArgs, param.SetArgs)
+	case UpdateGMInfo:
+		UpdateHaInfo(ctx, param.QueryArgs, param.SetArgs)
+	case GetAliveGMInfo:
+		GetAliveGmInfo(ctx, param.QueryArgs)
+	case GetAliveAgentInfo:
+		GetAliveHaInfo(ctx, param.QueryArgs)
+	case RegisterHaInfo:
+		ReplaceHaInfo(ctx, param.QueryArgs, param.SetArgs)
+	default:
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: fmt.Sprintf("unknown api name[%s]", param.Name),
+		})
+	}
+}
+
+// GetHaInfo TODO
+func GetHaInfo(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		result    = []model.HaStatus{}
+		whereCond = &model.HaStatus{}
+		response  = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	if err := model.HADB.Self.Table(whereCond.TableName()).Where(whereCond).Find(&result).Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	log.Logger.Debugf("%+v", result)
+}
+
+// UpdateHaInfo TODO
+func UpdateHaInfo(ctx *fasthttp.RequestCtx, queryParam interface{}, setParam interface{}) {
+	var (
+		result    = map[string]int64{}
+		whereCond = struct {
+			query model.HaStatus
+			set   model.HaStatus
+		}{}
+		response = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		return
+	}
+
+	// convert queryParam
+	if bytes, err := json.Marshal(queryParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.query); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.set); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	db := model.HADB.Self.Table(whereCond.query.TableName()).Where(whereCond.query).Updates(whereCond.set)
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	result[api.RowsAffect] = db.RowsAffected
+	log.Logger.Debugf("%+v", result)
+	return
+}
+
+// GetAliveGmInfo TODO
+func GetAliveGmInfo(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		result    = []model.HaStatus{}
+		whereCond = &model.HaStatus{}
+		response  = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("alive gm: %+v", whereCond)
+
+	if err := model.HADB.Self.Table(whereCond.TableName()).
+		Where("module = ? and cloud= ? and last_time > ?", whereCond.Module, whereCond.Cloud, whereCond.LastTime).
+		Find(&result).Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	log.Logger.Debugf("%+v", result)
+}
+
+// GetAliveHaInfo TODO
+func GetAliveHaInfo(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		result    = []string{}
+		whereCond = &model.HaStatus{}
+		response  = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	// select ip from ha_status
+	//		where city in (
+	//		select city from ha_status where
+	//		   ip = ? and db_type = ?
+	//		)
+	//	 and module = "agent" and status = "RUNNING"
+	//	 and last_time > DATE_SUB(now(), interval 5 minute)
+	//	 order by uid;
+	db := model.HADB.Self
+	subQuery := db.Table(haTableName).
+		Where("ip = ? ", whereCond.IP).Select("city")
+	db.Table(haTableName).Where("city in (?)", subQuery).Select("ip").
+		Where("module = ? and status = ? and last_time > ? and db_type= ? and cloud= ?",
+			whereCond.Module, whereCond.Status, whereCond.LastTime, whereCond.DbType, whereCond.Cloud).Order("uid").Find(&result)
+
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	log.Logger.Debugf("%+v", result)
+}
+
+// ReplaceHaInfo TODO
+func ReplaceHaInfo(ctx *fasthttp.RequestCtx, queryParam interface{}, setParam interface{}) {
+	var (
+		result    = map[string]int64{}
+		whereCond = struct {
+			query model.HaStatus
+			set   model.HaStatus
+		}{}
+		response = api.ResponseInfo{
+			Code:    api.RespOK,
+			Message: "",
+			Data:    &result,
+		}
+	)
+
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		return
+	}
+
+	// convert queryParam
+	if bytes, err := json.Marshal(queryParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.query); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.set); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	whereCond.set.LastTime = time.Now()
+
+	log.Logger.Debugf("%+v", whereCond)
+
+	if err := model.HADB.Self.Transaction(func(tx *gorm.DB) error {
+		row := &model.HaStatus{}
+		rt := tx.Table(whereCond.query.TableName()).Where(whereCond.query).First(row)
+		if rt.Error != nil {
+			if rt.Error == gorm.ErrRecordNotFound {
+				tx = tx.Table(whereCond.set.TableName()).Create(setParam)
+				if tx.Error != nil {
+					return tx.Error
+				} else {
+					result[api.RowsAffect] = tx.RowsAffected
+					return nil
+				}
+			} else {
+				return rt.Error
+			}
+		} else {
+			tx = tx.Table(whereCond.set.TableName()).Where(whereCond.query).Updates(whereCond.set)
+			if tx.Error != nil {
+				return tx.Error
+			} else {
+				log.Logger.Debugf("rowsaffected:%d", tx.RowsAffected)
+				result[api.RowsAffect] = tx.RowsAffected
+				return nil
+			}
+		}
+	}); err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	return
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlog.go b/dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlog.go
new file mode 100644
index 0000000000..0f62b0f7e1
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlog.go
@@ -0,0 +1,2 @@
+// Package switchlog TODO
+package switchlog
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlogs_handler.go b/dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlogs_handler.go
new file mode 100644
index 0000000000..768611e3f2
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/switchlog/switchlogs_handler.go
@@ -0,0 +1,166 @@
+package switchlog
+
+import (
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/model"
+	"dbm-services/common/dbha/hadb-api/pkg/api"
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"github.com/valyala/fasthttp"
+)
+
+const (
+	// PutLog TODO
+	PutLog = "insert_switch_log"
+	// GetLog TODO
+	GetLog = "query_switch_log"
+)
+
+// SwitchLogsApi TODO
+type SwitchLogsApi struct {
+	UID      int    `json:"uid"`
+	SwitchID int    `json:"sw_id"`
+	IP       string `json:"ip"`
+	Result   string `json:"result"`
+	Datetime string `json:"datetime"`
+	Comment  string `json:"comment"`
+	Port     int    `json:"port"`
+}
+
+// Handler TODO
+func Handler(ctx *fasthttp.RequestCtx) {
+	param := &api.RequestInfo{}
+	if err := json.Unmarshal(ctx.PostBody(), param); err != nil {
+		log.Logger.Errorf("parse request body failed:%s", err.Error())
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: err.Error(),
+		})
+		return
+	}
+	switch param.Name {
+	case GetLog:
+		GetSwitchLogs(ctx, param.QueryArgs)
+	case PutLog:
+		PutSwitchLogs(ctx, param.SetArgs)
+	default:
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: fmt.Sprintf("unknown api name[%s]", param.Name),
+		})
+	}
+}
+
+// GetSwitchLogs TODO
+func GetSwitchLogs(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		result    = []model.SwitchLogs{}
+		whereCond = &model.SwitchLogs{}
+		response  = api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	if err := model.HADB.Self.Table(whereCond.TableName()).
+		Where("sw_id = ?", whereCond.SwitchID).
+		Order("uid DESC").Find(&result).Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+
+	response.Data = TransSwitchLogsToApi(result)
+	log.Logger.Debugf("%+v", result)
+	log.Logger.Debugf("apiResult:%v", response.Data)
+}
+
+// PutSwitchLogs TODO
+func PutSwitchLogs(ctx *fasthttp.RequestCtx, setParam interface{}) {
+	input := &model.SwitchLogs{}
+	response := api.ResponseInfo{
+		Data:    nil,
+		Code:    api.RespOK,
+		Message: "",
+	}
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Code = api.RespErr
+		response.Message = "must be post method"
+		return
+	}
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, input); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	db := model.HADB.Self.Table(input.TableName()).Create(input)
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("insert table failed:%s", err.Error())
+		return
+	}
+
+	response.Data = map[string]interface{}{api.RowsAffect: db.RowsAffected}
+}
+
+// TransSwitchLogsToApi TODO
+func TransSwitchLogsToApi(result []model.SwitchLogs) []SwitchLogsApi {
+	apiResult := make([]SwitchLogsApi, 0)
+	loc, _ := time.LoadLocation("Asia/Shanghai")
+	for _, log := range result {
+		logApi := SwitchLogsApi{
+			UID:      log.UID,
+			SwitchID: log.SwitchID,
+			IP:       log.IP,
+			Result:   log.Result,
+			Datetime: log.Datetime.In(loc).Format("2006-01-02T15:04:05-07:00"),
+			Comment:  log.Comment,
+			Port:     log.Port,
+		}
+		apiResult = append(apiResult, logApi)
+	}
+	return apiResult
+}
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue.go b/dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue.go
new file mode 100644
index 0000000000..b6e185255a
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue.go
@@ -0,0 +1,2 @@
+// Package switchqueue TODO
+package switchqueue
diff --git a/dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue_handler.go b/dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue_handler.go
new file mode 100644
index 0000000000..b1d3f2e068
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/pkg/handler/switchqueue/switchqueue_handler.go
@@ -0,0 +1,442 @@
+package switchqueue
+
+import (
+	"dbm-services/common/dbha/hadb-api/log"
+	"dbm-services/common/dbha/hadb-api/model"
+	"dbm-services/common/dbha/hadb-api/pkg/api"
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"github.com/valyala/fasthttp"
+)
+
+const (
+	// GetInsTotalSwitch query single ins switch total
+	GetInsTotalSwitch = "query_single_total"
+	// GetIpTotalSwitch query single ip switch total
+	GetIpTotalSwitch = "query_interval_total"
+	// GetIdcTotalSwitch query single idc switch total
+	GetIdcTotalSwitch = "query_single_idc"
+	// UpdateQueue TODO
+	UpdateQueue = "update_switch_queue"
+	// PutQueue TODO
+	PutQueue = "insert_switch_queue"
+	// GetQueue TODO
+	GetQueue = "query_switch_queue"
+)
+
+// TbMonSwitchQueueApi TODO
+type TbMonSwitchQueueApi struct {
+	Uid                uint   `json:"uid"`
+	IP                 string `json:"ip"`
+	Port               int    `json:"port"`
+	ConfirmCheckTime   string `json:"confirm_check_time"`
+	DbRole             string `json:"db_role"`
+	SlaveIP            string `json:"slave_ip"`
+	SlavePort          int    `json:"slave_port"`
+	Status             string `json:"status"`
+	ConfirmResult      string `json:"confirm_result"`
+	SwitchStartTime    string `json:"switch_start_time"`
+	SwitchFinishedTime string `json:"switch_finished_time"`
+	SwitchResult       string `json:"switch_result"`
+	Remark             string `json:"remark"`
+	App                string `json:"app"`
+	DbType             string `json:"db_type"`
+	Idc                string `json:"idc"`
+	Cloud              string `json:"cloud"`
+	Cluster            string `json:"cluster"`
+}
+
+// Handler TODO
+func Handler(ctx *fasthttp.RequestCtx) {
+	param := &api.RequestInfo{}
+	if err := json.Unmarshal(ctx.PostBody(), param); err != nil {
+		log.Logger.Errorf("parse request body failed:%s", err.Error())
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: err.Error(),
+		})
+		return
+	}
+	switch param.Name {
+	case GetInsTotalSwitch:
+		GetSingleInsTotal(ctx, param.QueryArgs)
+	case GetIpTotalSwitch:
+		GetSingleIpTotal(ctx, param.QueryArgs)
+	case GetIdcTotalSwitch:
+		GetSingleIdcTotal(ctx, param.QueryArgs)
+	case UpdateQueue:
+		UpdateSwitchQueue(ctx, param.QueryArgs, param.SetArgs)
+	case PutQueue:
+		PutSwitchQueue(ctx, param.SetArgs)
+	case GetQueue:
+		GetSwitchQueue(ctx, param.QueryArgs, param.PageArgs)
+	default:
+		api.SendResponse(ctx, api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespErr,
+			Message: fmt.Sprintf("unknown api name[%s]", param.Name),
+		})
+	}
+}
+
+// GetSwitchQueue TODO
+func GetSwitchQueue(ctx *fasthttp.RequestCtx, param interface{}, page api.QueryPage) {
+	var (
+		result    = []model.TbMonSwitchQueue{}
+		whereCond = &model.TbMonSwitchQueue{}
+		response  = api.ResponseInfo{
+			Data:    nil,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be Post request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	db := model.HADB.Self.Table(whereCond.TableName())
+	if whereCond.App != "" {
+		db = db.Where("app = ?", whereCond.App)
+	}
+	if !whereCond.SwitchStartTime.IsZero() && !whereCond.SwitchFinishedTime.IsZero() {
+		db = db.Where("switch_start_time > ?", whereCond.SwitchStartTime).
+			Where("switch_finished_time < ?", whereCond.SwitchFinishedTime)
+	}
+
+	if page.Limit > 0 {
+		if err := db.Limit(page.Limit).Offset(page.Offset).Order("uid DESC").Find(&result).Error; err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			response.Data = nil
+			log.Logger.Errorf("query table failed:%s", err.Error())
+		}
+	} else {
+		log.Logger.Debugf("no page_args")
+		if err := db.Order("uid DESC").Find(&result).Error; err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			response.Data = nil
+			log.Logger.Errorf("query table failed:%s", err.Error())
+		}
+	}
+
+	response.Data = TransSwitchQueueToApi(result)
+	log.Logger.Debugf("%+v", result)
+}
+
+// GetSingleInsTotal TODO
+func GetSingleInsTotal(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		count  int64
+		result = map[string]*int64{
+			"count": &count,
+		}
+		whereCond = &model.TbMonSwitchQueue{}
+		response  = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	if err := model.HADB.Self.Table(whereCond.TableName()).
+		Where("confirm_check_time > ?", whereCond.ConfirmCheckTime).
+		Where("ip = ? and port = ?", whereCond.IP, whereCond.Port).
+		Count(&count).Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	log.Logger.Debugf("%+v", count)
+}
+
+// GetSingleIpTotal TODO
+func GetSingleIpTotal(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		count  int64
+		result = map[string]*int64{
+			"count": &count,
+		}
+		whereCond = &model.TbMonSwitchQueue{}
+		response  = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	if err := model.HADB.Self.Table(whereCond.TableName()).
+		Where("confirm_check_time > ?", whereCond.ConfirmCheckTime).
+		Distinct("ip").Count(&count).Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	log.Logger.Debugf("%+v", result)
+}
+
+// GetSingleIdcTotal TODO
+func GetSingleIdcTotal(ctx *fasthttp.RequestCtx, param interface{}) {
+	var (
+		count  int64
+		result = map[string]*int64{
+			"count": &count,
+		}
+		whereCond = &model.TbMonSwitchQueue{}
+		response  = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		log.Logger.Errorf("must by post request, param:%+v", param)
+		return
+	}
+
+	if bytes, err := json.Marshal(param); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, whereCond); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	if err := model.HADB.Self.Table(whereCond.TableName()).
+		Where("confirm_check_time > ?", whereCond.ConfirmCheckTime).
+		Where("idc = ? and ip <> ?", whereCond.Idc, whereCond.IP).
+		Distinct("ip").Count(&count).Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	log.Logger.Debugf("%+v", result)
+}
+
+// UpdateSwitchQueue TODO
+func UpdateSwitchQueue(ctx *fasthttp.RequestCtx, queryParam interface{}, setParam interface{}) {
+	var (
+		result    = map[string]int64{}
+		whereCond = struct {
+			query model.TbMonSwitchQueue
+			set   model.TbMonSwitchQueue
+		}{}
+		response = api.ResponseInfo{
+			Data:    &result,
+			Code:    api.RespOK,
+			Message: "",
+		}
+	)
+
+	// NB:couldn't user api.SendResponse(ctx, response) directly, otherwise
+	// deepCopy response first
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Message = "must be POST request"
+		response.Code = api.RespErr
+		return
+	}
+
+	// convert queryParam
+	if bytes, err := json.Marshal(queryParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.query); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, &whereCond.set); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+	log.Logger.Debugf("%+v", whereCond)
+
+	db := model.HADB.Self.Table(whereCond.query.TableName()).Where(whereCond.query).Updates(whereCond.set)
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("query table failed:%s", err.Error())
+	}
+	result[api.RowsAffect] = db.RowsAffected
+	log.Logger.Debugf("%+v", result)
+	return
+}
+
+// PutSwitchQueue TODO
+func PutSwitchQueue(ctx *fasthttp.RequestCtx, setParam interface{}) {
+	input := &model.TbMonSwitchQueue{}
+	response := api.ResponseInfo{
+		Data:    nil,
+		Code:    api.RespOK,
+		Message: "",
+	}
+	defer func() { api.SendResponse(ctx, response) }()
+
+	if !ctx.IsPost() {
+		response.Code = api.RespErr
+		response.Message = "must be POST method"
+		return
+	}
+	// convert setParam
+	if bytes, err := json.Marshal(setParam); err != nil {
+		log.Logger.Errorf("convert param failed:%s", err.Error())
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		return
+	} else {
+		if err = json.Unmarshal(bytes, input); err != nil {
+			response.Code = api.RespErr
+			response.Message = err.Error()
+			return
+		}
+	}
+
+	db := model.HADB.Self.Table(input.TableName()).Create(input)
+	if err := db.Error; err != nil {
+		response.Code = api.RespErr
+		response.Message = err.Error()
+		response.Data = nil
+		log.Logger.Errorf("insert table failed:%s", err.Error())
+		return
+	}
+
+	response.Data = map[string]interface{}{
+		api.RowsAffect: db.RowsAffected,
+		"uid":          input.Uid,
+	}
+}
+
+// TransSwitchQueueToApi TODO
+func TransSwitchQueueToApi(result []model.TbMonSwitchQueue) []TbMonSwitchQueueApi {
+	loc, _ := time.LoadLocation("Asia/Shanghai")
+	ApiResults := make([]TbMonSwitchQueueApi, 0)
+	for _, switchQueue := range result {
+		switchQueueApi := TbMonSwitchQueueApi{
+			Uid:                switchQueue.Uid,
+			IP:                 switchQueue.IP,
+			Port:               switchQueue.Port,
+			ConfirmCheckTime:   switchQueue.ConfirmCheckTime.In(loc).Format("2006-01-02T15:04:05-07:00"),
+			DbRole:             switchQueue.DbRole,
+			SlaveIP:            switchQueue.SlaveIP,
+			SlavePort:          switchQueue.SlavePort,
+			Status:             switchQueue.Status,
+			ConfirmResult:      switchQueue.ConfirmResult,
+			SwitchStartTime:    switchQueue.SwitchStartTime.In(loc).Format("2006-01-02T15:04:05-07:00"),    //
+			SwitchFinishedTime: switchQueue.SwitchFinishedTime.In(loc).Format("2006-01-02T15:04:05-07:00"), //
+			SwitchResult:       switchQueue.SwitchResult,
+			Remark:             switchQueue.Remark,
+			App:                switchQueue.App,
+			DbType:             switchQueue.DbType,
+			Idc:                switchQueue.Idc,
+			Cloud:              switchQueue.Cloud,
+			Cluster:            switchQueue.Cluster,
+		}
+
+		ApiResults = append(ApiResults, switchQueueApi)
+	}
+	return ApiResults
+}
diff --git a/dbm-services/common/dbha/hadb-api/util/constants.go b/dbm-services/common/dbha/hadb-api/util/constants.go
new file mode 100644
index 0000000000..724641932f
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/util/constants.go
@@ -0,0 +1,38 @@
+package util
+
+const (
+	// LogFileName defines the model api's log filename
+	LogFileName = "run.log"
+	// DefaultServerPort TODO
+	DefaultServerPort = ":8080"
+
+	// LOG_DEBUG TODO
+	LOG_DEBUG = "LOG_DEBUG"
+	// LOG_INFO TODO
+	LOG_INFO = "LOG_INFO"
+	// LOG_WARN TODO
+	LOG_WARN = "LOG_WARN"
+	// LOG_ERROR TODO
+	LOG_ERROR = "LOG_ERROR"
+	// LOG_PANIC TODO
+	LOG_PANIC = "LOG_PANIC"
+	// LOG_FATAL TODO
+	LOG_FATAL = "LOG_FATAL"
+
+	// LOG_DEF_PATH TODO
+	LOG_DEF_PATH = "./dbha.log"
+	// LOG_DEF_BACKUPS TODO
+	LOG_DEF_BACKUPS = 5
+	// LOG_DEF_AGE TODO
+	LOG_DEF_AGE = 30
+	// LOG_DEF_SIZE TODO
+	LOG_DEF_SIZE = 1024
+	// LOG_MIN_SIZE TODO
+	LOG_MIN_SIZE = 1
+)
+
+// timezone
+const (
+	TZ_UTC = "UTC"
+	TZ_CST = "CST"
+)
diff --git a/dbm-services/common/dbha/hadb-api/util/timezone.go b/dbm-services/common/dbha/hadb-api/util/timezone.go
new file mode 100644
index 0000000000..c8caa1e0c0
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/util/timezone.go
@@ -0,0 +1,34 @@
+package util
+
+import (
+	"fmt"
+	"time"
+)
+
+// InitTimezone init local timezone
+func InitTimezone(tzInfo string) {
+	switch tzInfo {
+	case TZ_UTC:
+		SetTimezoneToUTC()
+	case TZ_CST:
+		SetTimezoneToCST()
+	default:
+		SetTimezoneToCST()
+	}
+}
+
+// SetTimezoneToUTC set local timezone to utc
+func SetTimezoneToUTC() {
+	time.Local = time.UTC
+}
+
+// SetTimezoneToCST set local timezone to cst
+func SetTimezoneToCST() {
+	loc, err := time.LoadLocation("Asia/Shanghai")
+	if err != nil {
+		fmt.Println("load time location failed")
+	} else {
+		time.Local = loc
+		fmt.Printf("timezone is set to CST Asia/shanghai\n")
+	}
+}
diff --git a/dbm-services/common/dbha/hadb-api/util/util.go b/dbm-services/common/dbha/hadb-api/util/util.go
new file mode 100644
index 0000000000..17c4b16166
--- /dev/null
+++ b/dbm-services/common/dbha/hadb-api/util/util.go
@@ -0,0 +1,26 @@
+// Package util TODO
+package util
+
+import (
+	"reflect"
+	"strings"
+)
+
+// FieldByNameCaseIgnore TODO
+func FieldByNameCaseIgnore(v reflect.Type, name string) (reflect.StructField, bool) {
+	name = strings.ToLower(name)
+	v.Field(0)
+	if name != "" {
+		for i := 0; i < v.NumField(); i++ {
+			tf := v.Field(i)
+			if strings.ToLower(tf.Name) == name {
+				return tf, true
+			}
+		}
+
+	}
+
+	return v.FieldByNameFunc(func(s string) bool {
+		return strings.ToLower(s) == name
+	})
+}
diff --git a/dbm-services/common/go-pubpkg/.gitignore b/dbm-services/common/go-pubpkg/.gitignore
new file mode 100644
index 0000000000..1d74e21965
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/.gitignore
@@ -0,0 +1 @@
+.vscode/
diff --git a/dbm-services/common/go-pubpkg/README.md b/dbm-services/common/go-pubpkg/README.md
new file mode 100644
index 0000000000..24a3ab301d
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/README.md
@@ -0,0 +1 @@
+# DB-ON-BK 项目 go公共包
\ No newline at end of file
diff --git a/dbm-services/common/go-pubpkg/cc.v3/README.md b/dbm-services/common/go-pubpkg/cc.v3/README.md
new file mode 100644
index 0000000000..52cf34ecf6
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/README.md
@@ -0,0 +1,2 @@
+# CC.v3
+CC 3.0版本API接口
diff --git a/dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy.go b/dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy.go
new file mode 100644
index 0000000000..e4e80e4af7
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy.go
@@ -0,0 +1,27 @@
+package cc
+
+import (
+	"net/http"
+)
+
+// AddHostInfoFromCmpy TODO
+type AddHostInfoFromCmpy struct {
+	client *Client
+	url    string
+}
+
+// NewAddHostInfoFromCmpy returns a new AddHostInfoFromCmpy
+func NewAddHostInfoFromCmpy(client *Client) *AddHostInfoFromCmpy {
+	return &AddHostInfoFromCmpy{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/add_host_from_cmpy/",
+	}
+}
+
+// Query 同步新增主机信息到cc
+func (s *AddHostInfoFromCmpy) Query(svrIds []int) (*Response, error) {
+	param := &AddHostInfoFromCmpyParam{
+		SvrIds: svrIds,
+	}
+	return s.client.Do(http.MethodPost, s.url, param)
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy_test.go b/dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy_test.go
new file mode 100644
index 0000000000..1f4caad32c
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/add_host_from_cmpy_test.go
@@ -0,0 +1,26 @@
+package cc
+
+import (
+	"fmt"
+	"log"
+	"testing"
+)
+
+// TestHostLocation_Query
+func TestAddHostInfoFromCmpy_Query(t *testing.T) {
+	address := "http://127.0.0.1"
+
+	client, err := NewClient(address, TestSecret)
+	if err != nil {
+		log.Fatal(err.Error())
+	}
+
+	h := NewAddHostInfoFromCmpy(client)
+	param := []int{489462239}
+	result, err := h.Query(param)
+	if err != nil {
+		log.Fatal(err.Error())
+	}
+
+	fmt.Printf("result: %#v", result)
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_internal_module.go b/dbm-services/common/go-pubpkg/cc.v3/biz_internal_module.go
new file mode 100644
index 0000000000..32f71929d3
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_internal_module.go
@@ -0,0 +1,37 @@
+package cc
+
+import (
+	"encoding/json"
+	"net/http"
+)
+
+// BizInternalModule is a the BizInternalModule server
+type BizInternalModule struct {
+	client *Client
+	url    string
+}
+
+// NewBizInternalModule returns a new BizInternalModule server
+func NewBizInternalModule(client *Client) *BizInternalModule {
+	return &BizInternalModule{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/get_biz_internal_module/",
+	}
+}
+
+// Query 根据业务ID查询业务的内置模块
+func (h *BizInternalModule) Query(bizID int) (*BizInternalModuleResponse, error) {
+	param := &BizInternalModulesParam{
+		BKBizId: bizID,
+	}
+	resp, err := h.client.Do(http.MethodGet, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result BizInternalModuleResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_internal_module_test.go b/dbm-services/common/go-pubpkg/cc.v3/biz_internal_module_test.go
new file mode 100644
index 0000000000..fa76abb095
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_internal_module_test.go
@@ -0,0 +1,19 @@
+package cc
+
+import (
+	"testing"
+)
+
+// Test BizInternalModule
+func TestBizInternalModule(t *testing.T) {
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	w := NewBizInternalModule(client)
+	response, err := w.Query(295)
+	if err != nil {
+		t.Fatalf("query biz internal modules failed, err: %+v", err)
+	}
+	t.Logf("query biz internal modules output: %+v", response)
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_list.go b/dbm-services/common/go-pubpkg/cc.v3/biz_list.go
new file mode 100644
index 0000000000..dd5192f4fd
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_list.go
@@ -0,0 +1,47 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"net/http"
+	"reflect"
+)
+
+// BizList is a the BizList server
+type BizList struct {
+	client *Client
+	url    string
+	fields []string
+}
+
+// NewBizList returns a new BizList server
+func NewBizList(client *Client) *BizList {
+	fields := utils.GetStructTagName(reflect.TypeOf(&Biz{}))
+	return &BizList{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/search_business",
+		fields: fields,
+	}
+}
+
+// Query handler
+func (h *BizList) Query(condition map[string]interface{}, fields []string, page BKPage) (*BizResponse, error) {
+	param := &BizParam{
+		Fields:    fields,
+		Page:      page,
+		Condition: condition,
+	}
+	if len(fields) == 0 {
+		param.Fields = h.fields
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result BizResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_list_test.go b/dbm-services/common/go-pubpkg/cc.v3/biz_list_test.go
new file mode 100644
index 0000000000..a3c4e7ade1
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_list_test.go
@@ -0,0 +1,21 @@
+package cc
+
+//
+// import (
+//	"testing"
+// )
+//
+// func TestBizQuery(t *testing.T) {
+//	client, err := NewClient("http://127.0.0.1", TestSecret)
+//	if err != nil {
+//		t.Fatal(err)
+//	}
+//	h := NewBizList(client)
+//	result, err := h.Query(TestPage)
+//	if err != nil {
+//		t.Fatal(err)
+//	}
+//	if len(result.Info) != 1 {
+//		t.Fatalf("Biz query result not eq 1[%d]", len(result.Info))
+//	}
+// }
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_location.go b/dbm-services/common/go-pubpkg/cc.v3/biz_location.go
new file mode 100644
index 0000000000..8e537b4bd9
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_location.go
@@ -0,0 +1,38 @@
+package cc
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+)
+
+// BizLocation is a the BizLocation server
+type BizLocation struct {
+	client *Client
+	url    string
+}
+
+// NewBizLocation returns a new BizLocation server
+func NewBizLocation(client *Client) *BizLocation {
+	return &BizLocation{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/get_biz_location",
+	}
+}
+
+// Query 根据业务Id查询业务所在CC版本位置
+func (h *BizLocation) Query(bizIDs []int) ([]BizLocationInfo, error) {
+	param := &BizLocationParam{
+		BKBizIds: bizIDs,
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, fmt.Errorf("do http request failed, err: %+v", err)
+	}
+	var result []BizLocationInfo
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, fmt.Errorf("json unmarshal failed, responseb body: %s, err: %+v", string(resp.Data), err)
+	}
+	return result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_location_test.go b/dbm-services/common/go-pubpkg/cc.v3/biz_location_test.go
new file mode 100644
index 0000000000..3d079e8d89
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_location_test.go
@@ -0,0 +1,41 @@
+package cc
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+// TestBizLocationQuery
+func TestBizLocationQuery(t *testing.T) {
+	cases := struct {
+		param        BizLocationParam
+		expectResult []BizLocationInfo
+	}{
+		param: BizLocationParam{
+			BKBizIds: []int{100605, 690},
+		},
+		expectResult: []BizLocationInfo{
+			{
+				BkBizID:    10065,
+				BkLocation: "v3.0",
+			},
+			{
+				BkBizID:    690,
+				BkLocation: "v1.0",
+			},
+		},
+	}
+
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	h := NewBizLocation(client)
+	result, err := h.Query([]int{100605, 690})
+	if err != nil {
+		t.Fatal(err)
+	}
+	assert.Equal(t, cases.expectResult, result, fmt.Sprintf("case: %+v", cases.param))
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_module.go b/dbm-services/common/go-pubpkg/cc.v3/biz_module.go
new file mode 100644
index 0000000000..e53cce35a1
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_module.go
@@ -0,0 +1,25 @@
+package cc
+
+import (
+	"net/http"
+)
+
+// BizModule TODO
+type BizModule struct {
+	client *Client
+	url    string
+}
+
+// NewBizModule List returns a new BizModuleList server
+func NewBizModule(client *Client) *BizModule {
+	return &BizModule{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/create_module/",
+	}
+}
+
+// Create TODO
+func (h *BizModule) Create(param CreateModuleParam) (err error) {
+	_, err = h.client.Do(http.MethodPost, h.url, param)
+	return
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_module_list.go b/dbm-services/common/go-pubpkg/cc.v3/biz_module_list.go
new file mode 100644
index 0000000000..31f2685b22
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_module_list.go
@@ -0,0 +1,45 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"net/http"
+	"reflect"
+)
+
+// BizModuleList is a the BizModuleList server
+type BizModuleList struct {
+	client *Client
+	url    string
+	fields []string
+}
+
+// NewBizModuleList returns a new BizModuleList server
+func NewBizModuleList(client *Client) *BizModuleList {
+	fields := utils.GetStructTagName(reflect.TypeOf(&Module{}))
+	return &BizModuleList{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/search_module",
+		fields: fields,
+	}
+}
+
+// Query handler
+func (h *BizModuleList) Query(bizId int, setId int, page BKPage) (*BizModuleResponse, error) {
+	param := &BizModuleParam{
+		BKBizId: bizId,
+		BKSetId: setId,
+		Fields:  h.fields,
+		Page:    page,
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result BizModuleResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_module_list_test.go b/dbm-services/common/go-pubpkg/cc.v3/biz_module_list_test.go
new file mode 100644
index 0000000000..7562eed5dc
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_module_list_test.go
@@ -0,0 +1,21 @@
+package cc
+
+import (
+	"testing"
+)
+
+// TestBizModuleQuery
+func TestBizModuleQuery(t *testing.T) {
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	h := NewBizModuleList(client)
+	result, err := h.Query(312, 7823, TestPage)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if len(result.Info) != 1 {
+		t.Fatalf("Biz module query result not eq 1[%d]", len(result.Info))
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_sensitive.go b/dbm-services/common/go-pubpkg/cc.v3/biz_sensitive.go
new file mode 100644
index 0000000000..106c423644
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_sensitive.go
@@ -0,0 +1,44 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"net/http"
+	"reflect"
+)
+
+// BizSensitiveList is a the BizSensitiveList server
+type BizSensitiveList struct {
+	client *Client
+	url    string
+	fields []string
+}
+
+// NewBizSensitiveList returns a new BizSensitiveList server
+func NewBizSensitiveList(client *Client) *BizSensitiveList {
+	fields := utils.GetStructTagName(reflect.TypeOf(&BizSensitive{}))
+	return &BizSensitiveList{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/find_biz_sensitive_batch",
+		fields: fields,
+	}
+}
+
+// Query handler
+func (h *BizSensitiveList) Query(bizIds []int, page BKPage) (*BizSensitiveResponse, error) {
+	param := &BizSensitiveParam{
+		Fields:   h.fields,
+		Page:     page,
+		BkBizIds: bizIds,
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result BizSensitiveResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_set.go b/dbm-services/common/go-pubpkg/cc.v3/biz_set.go
new file mode 100644
index 0000000000..d13c5386fe
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_set.go
@@ -0,0 +1,61 @@
+package cc
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+)
+
+// BizSet is a the BizSet server
+type BizSet struct {
+	client *Client
+	url    string
+}
+
+// NewBizSet returns a new BizSet server
+func NewBizSet(client *Client) *BizSet {
+	return &BizSet{
+		client: client,
+		url:    "/api/c/compapi/v2/cc",
+	}
+}
+
+// Create handler
+func (h *BizSet) Create(bizId int, setName string, setTemplateID int) (*BizCreateSetResponse, error) {
+	param := &BizCreateSetParam{
+		BkBizID: bizId,
+	}
+	param.Data.BkParentID = bizId
+	param.Data.BkSetName = setName
+	param.Data.SetTemplateID = setTemplateID
+
+	resp, err := h.client.Do(http.MethodPost, fmt.Sprintf("%s/create_set", h.url), param)
+	if err != nil {
+		return nil, err
+	}
+	var result BizCreateSetResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
+
+// Delete handler
+func (h *BizSet) Delete(bizId int, bizSetId int) error {
+	param := &BizDeleteSetParam{
+		BkBizID:    bizId,
+		BkBizSetID: bizSetId,
+	}
+
+	resp, err := h.client.Do(http.MethodPost, fmt.Sprintf("%s/delete_set", h.url), param)
+	if err != nil {
+		return err
+	}
+
+	if resp.Code != 0 || !resp.Result {
+		return fmt.Errorf("delete bkset failed: code: %v result: %v message: %s", resp.Code, resp.Result, resp.Message)
+	}
+
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_set_list.go b/dbm-services/common/go-pubpkg/cc.v3/biz_set_list.go
new file mode 100644
index 0000000000..95d36d79ad
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_set_list.go
@@ -0,0 +1,44 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"net/http"
+	"reflect"
+)
+
+// BizSetList is a the BizSetList server
+type BizSetList struct {
+	client *Client
+	url    string
+	fields []string
+}
+
+// NewBizSetList returns a new BizSetList server
+func NewBizSetList(client *Client) *BizSetList {
+	fields := utils.GetStructTagName(reflect.TypeOf(&Set{}))
+	return &BizSetList{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/search_set",
+		fields: fields,
+	}
+}
+
+// Query handler
+func (h *BizSetList) Query(bizId int, page BKPage) (*BizSetResponse, error) {
+	param := &BizSetParam{
+		BKBizId: bizId,
+		Fields:  h.fields,
+		Page:    page,
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result BizSetResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_set_test.go b/dbm-services/common/go-pubpkg/cc.v3/biz_set_test.go
new file mode 100644
index 0000000000..8dff8a4abc
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_set_test.go
@@ -0,0 +1,34 @@
+package cc
+
+import (
+	"testing"
+)
+
+// TestBizCreateSet
+func TestBizCreateSet(t *testing.T) {
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	h := NewBizSet(client)
+	result, err := h.Create(100781, "cephoooooxx", 5000235)
+	if err != nil {
+		t.Fatal(err)
+	}
+	if result.BkSetName != "cephoooooxx" {
+		t.Fatalf("Biz module create set %+v", result)
+	}
+}
+
+// TestBizDeleteSet
+func TestBizDeleteSet(t *testing.T) {
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	h := NewBizSet(client)
+	err = h.Delete(100781, 5004495)
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_topo_tree.go b/dbm-services/common/go-pubpkg/cc.v3/biz_topo_tree.go
new file mode 100644
index 0000000000..82946ce16c
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_topo_tree.go
@@ -0,0 +1,38 @@
+package cc
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+)
+
+// BizTopoTree is a the BizTopoTree server
+type BizTopoTree struct {
+	client *Client
+	url    string
+}
+
+// NewBizTopoTree returns a new BizTopoTree server
+func NewBizTopoTree(client *Client) *BizTopoTree {
+	return &BizTopoTree{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/search_biz_inst_topo/",
+	}
+}
+
+// Query 根据业务ID查询业务信息
+func (h *BizTopoTree) Query(bizID int) ([]TopoTreeNode, error) {
+	param := &BizTopoTreeParam{
+		BKBizId: bizID,
+	}
+	resp, err := h.client.Do(http.MethodGet, h.url, param)
+	if err != nil {
+		return nil, fmt.Errorf("do http request failed, err: %+v", err)
+	}
+	var result []TopoTreeNode
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, fmt.Errorf("json unmarshal failed, responseb body: %s, err: %+v", string(resp.Data), err)
+	}
+	return result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/biz_watch.go b/dbm-services/common/go-pubpkg/cc.v3/biz_watch.go
new file mode 100644
index 0000000000..8b30854a42
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/biz_watch.go
@@ -0,0 +1,99 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"sync"
+
+	"github.com/golang/glog"
+)
+
+// BizWatcher TODO
+type BizWatcher struct {
+	client *Client
+	// 查询CC业务信息对应的项
+	fields   []string
+	lock     sync.Mutex
+	stopped  bool
+	outgoing chan Event
+	emit     func(Event)
+}
+
+func newBizWatcher(client *Client) *BizWatcher {
+	w := &BizWatcher{
+		client:   client,
+		fields:   utils.GetStructTagName(reflect.TypeOf(&Biz{})),
+		outgoing: make(chan Event),
+	}
+	w.emit = func(e Event) { w.outgoing <- e }
+	return w
+}
+
+// BizWatchList returns a new BizWatcher server
+func BizWatchList(client *Client) (Interface, error) {
+	w := newBizWatcher(client)
+	go w.sync()
+	return w, nil
+}
+
+func (w *BizWatcher) sync() {
+	var cursor string
+	for {
+		result, err := resourceWatch(w.client, BizResource, cursor, w.fields)
+		if err != nil {
+			w.emit(Event{
+				Type:   Error,
+				Object: fmt.Sprintf("BizWatcher failed: %v", err),
+			})
+			// 出现异常,重置cursor
+			cursor = ""
+			continue
+		}
+		glog.Infof("BizWatcher - Cursor: %s, RequestId: %s, EventCount: %d", cursor, result.RequestId, len(result.BKEvents))
+		// 如果BKEvents为空,那么需要重置cursor
+		// 要不从当前的cursor watch就会一直报错
+		if len(result.BKEvents) == 0 {
+			cursor = ""
+			continue
+		}
+		for _, item := range result.BKEvents {
+			cursor = item.BKCursor
+			if string(item.BKDetail) == "null" {
+				continue
+			}
+			var biz Biz
+			if err := json.Unmarshal(item.BKDetail, &biz); err != nil {
+				w.emit(Event{
+					Type:   Error,
+					Object: fmt.Sprintf("TypeErr biz: Detail: %v - Err: %s", string(item.BKDetail), err),
+				})
+				continue
+			}
+			w.emit(Event{
+				Key:    strconv.Itoa(biz.ApplicationID),
+				Object: &biz,
+				Type:   EventType(item.BKEventType),
+			})
+		}
+	}
+}
+
+// ResultChan TODO
+// Return event chan
+func (w *BizWatcher) ResultChan() <-chan Event {
+	return w.outgoing
+}
+
+// Stop watcher
+func (w *BizWatcher) Stop() {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	// Prevent double channel closes.
+	if !w.stopped {
+		w.stopped = true
+		close(w.outgoing)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/client.go b/dbm-services/common/go-pubpkg/cc.v3/client.go
new file mode 100644
index 0000000000..44b03e0e44
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/client.go
@@ -0,0 +1,128 @@
+package cc
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"time"
+
+	"github.com/google/go-querystring/query"
+)
+
+const (
+	// apiserver response code
+	statusSuccess int = 0
+)
+
+// Response TODO
+type Response struct {
+	Code       int             `json:"code"`
+	Permission json.RawMessage `json:"permission"`
+	Result     bool            `json:"result"`
+	RequestId  string          `json:"request_id"`
+	Message    string          `json:"message"`
+	Data       json.RawMessage `json:"data"`
+	Error      json.RawMessage `json:"error"`
+}
+
+// Client TODO
+type Client struct {
+	apiserver string
+	// client for apiservers
+	client *http.Client
+	// Blueking secret
+	secret Secret
+
+	timeout time.Duration
+}
+
+// Secret TODO
+type Secret struct {
+	BKAppCode   string
+	BKAppSecret string
+	BKUsername  string
+}
+
+// NewClient return new client
+func NewClient(apiserver string, secret Secret) (*Client, error) {
+	cli := &Client{
+		apiserver: apiserver,
+		secret:    secret,
+	}
+	tr := &http.Transport{}
+	cli.client = &http.Client{
+		Transport: tr,
+	}
+	return cli, nil
+}
+
+// Timeout TODO
+func (c *Client) Timeout(duration time.Duration) {
+	c.timeout = duration
+}
+
+// Do main handler
+func (c *Client) Do(method, url string, params interface{}) (*Response, error) {
+	object, err := Accessor(params)
+	if err != nil {
+		return nil, err
+	}
+	// set auth...
+	object.SetSecret(c.secret)
+
+	body, err := json.Marshal(object)
+	if err != nil {
+		return nil, fmt.Errorf("RequestErr - %v", err)
+	}
+	fullURL := c.apiserver + url
+	req, err := http.NewRequest(method, fullURL, bytes.NewReader(body))
+	if err != nil {
+		return nil, fmt.Errorf("RequestErr - new request failed: %v", err)
+	}
+	if c.timeout != 0 {
+		ctx, cancel := context.WithTimeout(req.Context(), c.timeout)
+		defer cancel()
+
+		req = req.WithContext(ctx)
+	}
+	// Set Header
+	req.Header.Set("X-Bkapi-Accept-Code-Type", "int")
+
+	if method == "GET" {
+		q, _ := query.Values(object)
+		log.Println("encode: ", q.Encode())
+		req.URL.RawQuery = q.Encode()
+	}
+
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return nil, err
+	}
+
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("HttpCodeErr - Code: %v, Response: %+v", resp.StatusCode, resp)
+	}
+	b, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("HttpCodeErr - Code: %v, io read all failed %s", resp.StatusCode, err.Error())
+	}
+	result := &Response{}
+	err = json.Unmarshal(b, result)
+	if err != nil {
+		return nil, err
+	}
+	// check response and data is nil
+	if result.Code != statusSuccess {
+		return nil, fmt.Errorf("RequestErr - RequestId: %s, Code: %v,  Messag: %v, Error: %v",
+			result.RequestId,
+			result.Code,
+			result.Message,
+			string(result.Error))
+	}
+	return result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/clone_host_property.go b/dbm-services/common/go-pubpkg/cc.v3/clone_host_property.go
new file mode 100644
index 0000000000..251a4c674c
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/clone_host_property.go
@@ -0,0 +1,29 @@
+package cc
+
+import (
+	"net/http"
+)
+
+// HostProperty 主机属性
+type HostProperty struct {
+	client *Client
+	url    string
+}
+
+// NewHostProperty TODO
+func NewHostProperty(client *Client) *HostProperty {
+	return &HostProperty{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/clone_host_property",
+	}
+}
+
+// Clone 克隆主机属性
+func (h *HostProperty) Clone(param *CloneHostPropertyParam) error {
+	// 获取所有的HostId
+	_, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/clont_host_service_instance_proc.go b/dbm-services/common/go-pubpkg/cc.v3/clont_host_service_instance_proc.go
new file mode 100644
index 0000000000..b535f4832d
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/clont_host_service_instance_proc.go
@@ -0,0 +1,29 @@
+package cc
+
+import (
+	"net/http"
+)
+
+// HostServiceInstance 主机属性
+type HostServiceInstance struct {
+	client *Client
+	url    string
+}
+
+// NewHostServiceInstance TODO
+func NewHostServiceInstance(client *Client) *HostServiceInstance {
+	return &HostServiceInstance{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/clone_host_service_instance_proc",
+	}
+}
+
+// Clone 克隆实例信息
+func (h *HostServiceInstance) Clone(param *CloneHostSvcInsParam) error {
+	// 获取所有的HostId
+	_, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/dept_list.go b/dbm-services/common/go-pubpkg/cc.v3/dept_list.go
new file mode 100644
index 0000000000..f8bce9196d
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/dept_list.go
@@ -0,0 +1,37 @@
+package cc
+
+import (
+	"encoding/json"
+	"net/http"
+)
+
+// DeptList is a the DeptList server
+type DeptList struct {
+	client *Client
+	url    string
+}
+
+// NewDeptList returns a new DeptList server
+func NewDeptList(client *Client) *DeptList {
+	return &DeptList{
+		client: client,
+		url:    "/component/compapi/tof/get_dept_info",
+	}
+}
+
+// Query handler
+func (h *DeptList) Query(deptId string) (*DeptResponse, error) {
+	param := &DeptParam{
+		DeptId: deptId,
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result DeptResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_base_info.go b/dbm-services/common/go-pubpkg/cc.v3/host_base_info.go
new file mode 100644
index 0000000000..70ba861258
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_base_info.go
@@ -0,0 +1,38 @@
+package cc
+
+import (
+	"encoding/json"
+	"net/http"
+)
+
+// HostBaseInfo TODO
+type HostBaseInfo struct {
+	client *Client
+	url    string
+}
+
+// NewHostBaseInfo TODO
+// NewHostBizRelation returns a new HostBizRelation server
+func NewHostBaseInfo(client *Client) *HostBaseInfo {
+	return &HostBaseInfo{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/get_host_base_info/",
+	}
+}
+
+// Query handler
+func (h *HostBaseInfo) Query(hostId int) ([]HostPropertyInfo, error) {
+	param := &GetHostBaseInfoParam{
+		BkHostID: hostId,
+	}
+	resp, err := h.client.Do(http.MethodGet, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result []HostPropertyInfo
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_base_info_test.go b/dbm-services/common/go-pubpkg/cc.v3/host_base_info_test.go
new file mode 100644
index 0000000000..e095906ea4
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_base_info_test.go
@@ -0,0 +1,30 @@
+package cc_test
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3"
+	"testing"
+)
+
+func TestQueryHostBaseInfo(t *testing.T) {
+	t.Log("start testing...")
+	client, err := cc.NewClient("", cc.Secret{
+		BKAppCode:   "",
+		BKAppSecret: "",
+		BKUsername:  "",
+	})
+	if err != nil {
+		t.Fatal(err)
+		return
+	}
+	t.Log("1111")
+	// 2000026095
+	data, err := cc.NewHostBaseInfo(client).Query(2000026095)
+	if err != nil {
+		t.Fatal(err)
+		return
+	}
+	for _, v := range data {
+		t.Log(v)
+	}
+
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_biz_relations.go b/dbm-services/common/go-pubpkg/cc.v3/host_biz_relations.go
new file mode 100644
index 0000000000..67f016d836
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_biz_relations.go
@@ -0,0 +1,49 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"net/http"
+	"reflect"
+)
+
+// HostBizRelation is a the HostBizRelation server
+type HostBizRelation struct {
+	client     *Client
+	url        string
+	hostFields []string
+}
+
+// NewHostBizRelation returns a new HostBizRelation server
+func NewHostBizRelation(client *Client) *HostBizRelation {
+	hostFields := utils.GetStructTagName(reflect.TypeOf(&Host{}))
+	return &HostBizRelation{
+		client:     client,
+		url:        "/api/c/compapi/v2/cc/find_host_biz_relations/",
+		hostFields: hostFields,
+	}
+}
+
+// Query handler
+func (h *HostBizRelation) Query(hostIds []int, page BKPage) ([]FindHostBizRelationResp, error) {
+	param := &FindHostBizRelationParam{
+		BKHostFields:   h.hostFields,
+		BKBizFields:    []string{"bk_biz_id", "bk_biz_name"},
+		BKSetFields:    []string{"bk_set_id", "bk_set_name"},
+		BKModuleFields: []string{"bk_module_id", "bk_module_name"},
+		Page:           page,
+	}
+	if len(hostIds) > 0 {
+		param.BKHostIds = hostIds
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	result := make([]FindHostBizRelationResp, 0)
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_id_query.go b/dbm-services/common/go-pubpkg/cc.v3/host_id_query.go
new file mode 100644
index 0000000000..25b41e9408
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_id_query.go
@@ -0,0 +1,69 @@
+package cc
+
+// QueryHostId 根据主机IP、固资号查询主机信息
+func QueryHostId(client *Client, innerIPs, aseetIds []string) ([]int, error) {
+	var (
+		hostIds []int
+		lister  = NewHostWithoutBizList(client)
+	)
+	// 根据内网IP获取主机ID
+	if len(innerIPs) > 0 {
+		ret, err := lister.QueryWithFilter(HostPropertyFilter{
+			Condition: "AND",
+			Rules: []Rule{
+				{
+					Field:    "bk_host_innerip",
+					Operator: "in",
+					Value:    innerIPs,
+				},
+			},
+		}, BKPage{
+			Start: 0,
+			Limit: Limit,
+		})
+		if err != nil {
+			return nil, err
+		}
+		for _, item := range ret.Info {
+			hostIds = append(hostIds, item.BKHostId)
+		}
+	}
+	// 根据固资号获取主机ID
+	if len(aseetIds) > 0 {
+		ret, err := lister.QueryWithFilter(HostPropertyFilter{
+			Condition: "AND",
+			Rules: []Rule{
+				{
+					Field:    "bk_asset_id",
+					Operator: "in",
+					Value:    aseetIds,
+				},
+			},
+		}, BKPage{
+			Start: 0,
+			Limit: Limit,
+		})
+		if err != nil {
+			return nil, err
+		}
+		for _, item := range ret.Info {
+			hostIds = append(hostIds, item.BKHostId)
+		}
+	}
+	return hostIds, nil
+}
+
+// RemoveRepeatedHostId TODO
+func RemoveRepeatedHostId(hostIds []int) []int {
+	var (
+		result  []int
+		hostIdM = make(map[int]struct{})
+	)
+	for _, id := range hostIds {
+		if _, ok := hostIdM[id]; !ok {
+			hostIdM[id] = struct{}{}
+			result = append(result, id)
+		}
+	}
+	return result
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_location.go b/dbm-services/common/go-pubpkg/cc.v3/host_location.go
new file mode 100644
index 0000000000..e483abbd1f
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_location.go
@@ -0,0 +1,44 @@
+package cc
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+)
+
+// HostLocation 业务CC版本位置
+type HostLocation struct {
+	client *Client
+	url    string
+}
+
+// NewHostLocation returns a new HostLocation server
+func NewHostLocation(client *Client) *HostLocation {
+	return &HostLocation{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/get_host_location",
+	}
+}
+
+// Query 根据Host查询业务所在CC版本位置
+func (h *HostLocation) Query(host []string) ([]HostLocationInfo, error) {
+	list := make([]BkHostList, len(host))
+	for i := 0; i < len(host); i++ {
+		list[i].BkCloudID = 0
+		list[i].BkHostInnerip = host[i]
+	}
+	param := HostLocationParam{
+		BkHostList: list,
+	}
+
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, fmt.Errorf("do http request failed, err: %s", err.Error())
+	}
+	var result []HostLocationInfo
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, fmt.Errorf("json unmarshal failed, responseb body: %s, err: %+v", string(resp.Data), err)
+	}
+	return result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_location_test.go b/dbm-services/common/go-pubpkg/cc.v3/host_location_test.go
new file mode 100644
index 0000000000..09d5b8cbd0
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_location_test.go
@@ -0,0 +1,26 @@
+package cc
+
+import (
+	"fmt"
+	"log"
+	"testing"
+)
+
+// TestHostLocation_Query
+func TestHostLocation_Query(t *testing.T) {
+	address := "http://127.0.0.1"
+
+	client, err := NewClient(address, TestSecret)
+	if err != nil {
+		log.Fatal(err.Error())
+	}
+
+	h := NewHostLocation(client)
+	param := []string{""}
+	result, err := h.Query(param)
+	if err != nil {
+		log.Fatal(err.Error())
+	}
+
+	fmt.Println(result)
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_relation_info_test.go b/dbm-services/common/go-pubpkg/cc.v3/host_relation_info_test.go
new file mode 100644
index 0000000000..d3403fd1c6
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_relation_info_test.go
@@ -0,0 +1,26 @@
+package cc_test
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3"
+	"testing"
+)
+
+func TestQueryHostRelationList(t *testing.T) {
+	t.Log("start testing...")
+	client, err := cc.NewClient("", cc.Secret{
+		BKAppCode:   "",
+		BKAppSecret: "",
+		BKUsername:  "",
+	})
+	if err != nil {
+		t.Fatal(err)
+		return
+	}
+	cc.NewListBizHosts(client).QueryListBizHosts(&cc.ListBizHostsParam{})
+	data, err := cc.NewHostRelationList(client).Query(&cc.HostMetaData{InnerIPs: []string{"127.0.0.1"}}, cc.BKPage{Start: 0, Limit: 100})
+	if err != nil {
+		t.Fatal(err)
+		return
+	}
+	t.Log(data)
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_relation_list.go b/dbm-services/common/go-pubpkg/cc.v3/host_relation_list.go
new file mode 100644
index 0000000000..160d4857b1
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_relation_list.go
@@ -0,0 +1,57 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"reflect"
+)
+
+// HostRelationList 主机关联信息查询
+type HostRelationList struct {
+	client     *Client
+	url        string
+	hostFields []string
+}
+
+// NewHostRelationList returns a new HostRelationList server
+func NewHostRelationList(client *Client) *HostRelationList {
+	hostFields := utils.GetStructTagName(reflect.TypeOf(&Host{}))
+	return &HostRelationList{
+		client:     client,
+		url:        "/api/c/compapi/v2/cc/list_host_related_info",
+		hostFields: hostFields,
+	}
+}
+
+// Query handler
+func (h *HostRelationList) Query(data *HostMetaData, page BKPage) (*ListHostRelationResponse, error) {
+	param := &ListHostRelationParam{
+		BKHostFields:   h.hostFields,
+		BKBizFields:    []string{"bk_biz_id", "bk_biz_name"},
+		BKSetFields:    []string{"bk_set_id", "bk_set_name"},
+		BKModuleFields: []string{"bk_module_id", "bk_module_name"},
+		Page:           page,
+	}
+	if data != nil {
+		hostIds, err := QueryHostId(h.client, data.InnerIPs, data.AssetIds)
+		if err != nil {
+			return nil, fmt.Errorf("Query BKHostId from innerip or assetid failed: %v", err)
+		}
+		hostIds = append(hostIds, data.BKHostIds...)
+		if len(hostIds) > 0 {
+			param.BKHostIds = RemoveRepeatedHostId(hostIds)
+		}
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result ListHostRelationResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_relation_watch.go b/dbm-services/common/go-pubpkg/cc.v3/host_relation_watch.go
new file mode 100644
index 0000000000..3b12bb5023
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_relation_watch.go
@@ -0,0 +1,103 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"sync"
+
+	"github.com/golang/glog"
+)
+
+// HostRelationWatcher is a the HostRelationWatcher server
+type HostRelationWatcher struct {
+	client *Client
+	// 查询CC主机与业务模块关系对应的项
+	fields   []string
+	lock     sync.Mutex
+	stopped  bool
+	outgoing chan Event
+	emit     func(Event)
+}
+
+func newHostRelationWatcher(client *Client) *HostRelationWatcher {
+	w := &HostRelationWatcher{
+		client:   client,
+		fields:   utils.GetStructTagName(reflect.TypeOf(&HostBizModule{})),
+		outgoing: make(chan Event),
+	}
+	w.emit = func(e Event) { w.outgoing <- e }
+	return w
+}
+
+// HostRelationWatchList TODO
+func HostRelationWatchList(client *Client) (Interface, error) {
+	w := newHostRelationWatcher(client)
+	go w.sync()
+	return w, nil
+}
+
+func (w *HostRelationWatcher) sync() {
+	var cursor string
+	for {
+		result, err := resourceWatch(w.client, HostRelationResource, cursor, w.fields)
+		if err != nil {
+			w.emit(Event{
+				Type:   Error,
+				Object: fmt.Sprintf("HostRelationWatcher failed: %v", err),
+			})
+			// 出现异常,重置cursor
+			cursor = ""
+			continue
+		}
+		glog.Infof("HostRelationWatcher - Cursor: %s, RequestId: %s, EventCount: %d",
+			cursor,
+			result.RequestId,
+			len(result.BKEvents))
+		// 如果BKEvents为空,那么需要重置cursor
+		// 要不从当前的cursor watch就会一直报错
+		if len(result.BKEvents) == 0 {
+			cursor = ""
+			continue
+		}
+		for _, item := range result.BKEvents {
+			cursor = item.BKCursor
+			if string(item.BKDetail) == "null" {
+				continue
+			}
+			var relation HostBizModule
+			if err := json.Unmarshal(item.BKDetail, &relation); err != nil {
+				w.emit(Event{
+					Type:   Error,
+					Object: fmt.Sprintf("TypeErr host_relation: Detail: %v - Err: %s", string(item.BKDetail), err),
+				})
+				continue
+			}
+			w.emit(Event{
+				Key:    strconv.Itoa(relation.BKHostId),
+				Object: &relation,
+				Type:   EventType(item.BKEventType),
+				Cursor: item.BKCursor,
+			})
+		}
+	}
+}
+
+// ResultChan TODO
+// Return event chan
+func (w *HostRelationWatcher) ResultChan() <-chan Event {
+	return w.outgoing
+}
+
+// Stop watcher
+func (w *HostRelationWatcher) Stop() {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	// Prevent double channel closes.
+	if !w.stopped {
+		w.stopped = true
+		close(w.outgoing)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_watch.go b/dbm-services/common/go-pubpkg/cc.v3/host_watch.go
new file mode 100644
index 0000000000..91e8a83d8c
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_watch.go
@@ -0,0 +1,100 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"sync"
+
+	"github.com/golang/glog"
+)
+
+// HostWatcher 主机信息watch
+type HostWatcher struct {
+	client *Client
+	// 查询CC主机对应的项
+	fields   []string
+	lock     sync.Mutex
+	stopped  bool
+	outgoing chan Event
+	emit     func(Event)
+}
+
+func newHostWatcher(client *Client) *HostWatcher {
+	w := &HostWatcher{
+		client:   client,
+		fields:   utils.GetStructTagName(reflect.TypeOf(&Host{})),
+		outgoing: make(chan Event),
+	}
+	w.emit = func(e Event) { w.outgoing <- e }
+	return w
+}
+
+// HostWatchList TODO
+func HostWatchList(client *Client) (Interface, error) {
+	w := newHostWatcher(client)
+	go w.sync()
+	return w, nil
+}
+
+func (w *HostWatcher) sync() {
+	var cursor string
+	for {
+		result, err := resourceWatch(w.client, HostResource, cursor, w.fields)
+		if err != nil {
+			w.emit(Event{
+				Type:   Error,
+				Object: fmt.Sprintf("HostWatcher failed, cursor:%s - err: %v", cursor, err),
+			})
+			// 出现异常,重置cursor
+			cursor = ""
+			continue
+		}
+		glog.Infof("HostWatcher - Cursor: %s, RequestId: %s, EventCount: %d", cursor, result.RequestId, len(result.BKEvents))
+		// 如果BKEvents为空,那么需要重置cursor
+		// 要不从当前的cursor watch就会一直报错
+		if len(result.BKEvents) == 0 {
+			cursor = ""
+			continue
+		}
+		for _, item := range result.BKEvents {
+			cursor = item.BKCursor
+			if string(item.BKDetail) == "null" {
+				continue
+			}
+			var host Host
+			if err := json.Unmarshal(item.BKDetail, &host); err != nil {
+				w.emit(Event{
+					Type:   Error,
+					Object: fmt.Sprintf("TypeErr host: Detail: %v - Err: %s", string(item.BKDetail), err),
+				})
+				continue
+			}
+			w.emit(Event{
+				Key:    strconv.Itoa(host.BKHostId),
+				Object: &host,
+				Type:   EventType(item.BKEventType),
+				Cursor: item.BKCursor,
+			})
+		}
+	}
+}
+
+// ResultChan TODO
+// Return event chan
+func (w *HostWatcher) ResultChan() <-chan Event {
+	return w.outgoing
+}
+
+// Stop watcher
+func (w *HostWatcher) Stop() {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	// Prevent double channel closes.
+	if !w.stopped {
+		w.stopped = true
+		close(w.outgoing)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_watch_test.go b/dbm-services/common/go-pubpkg/cc.v3/host_watch_test.go
new file mode 100644
index 0000000000..80d2372c97
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_watch_test.go
@@ -0,0 +1,19 @@
+package cc
+
+import (
+	"testing"
+)
+
+// TestHostWatchList
+func TestHostWatchList(t *testing.T) {
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	w, _ := HostWatchList(client)
+	event := <-w.ResultChan()
+	_, ok := event.Object.(*Host)
+	if !ok {
+		t.Fatalf("Object not Host: %+v", event.Object)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list.go b/dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list.go
new file mode 100644
index 0000000000..5bd2f08c1b
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list.go
@@ -0,0 +1,39 @@
+package cc
+
+import (
+	"encoding/json"
+	"net/http"
+)
+
+// HostWithoutBizList is a the HostWithoutBizList server
+type HostWithoutBizList struct {
+	client *Client
+	url    string
+}
+
+// NewHostWithoutBizList returns a new HostWithoutBizList server
+func NewHostWithoutBizList(client *Client) *HostWithoutBizList {
+	return &HostWithoutBizList{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/list_hosts_without_biz",
+	}
+}
+
+// QueryWithFilter 根据内网IP查询主机ID信息
+func (h *HostWithoutBizList) QueryWithFilter(filter HostPropertyFilter, page BKPage) (*HostsWithoutBizListResponse,
+	error) {
+	param := &HostsWithoutBizListParam{
+		HostPropertyFilter: filter,
+		Page:               page,
+	}
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, err
+	}
+	var result HostsWithoutBizListResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, err
+	}
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list_test.go b/dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list_test.go
new file mode 100644
index 0000000000..f8680c32fb
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/host_without_biz_list_test.go
@@ -0,0 +1,55 @@
+package cc
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+// TestQueryWithFilter
+func TestQueryWithFilter(t *testing.T) {
+	cases := []struct {
+		filter         HostPropertyFilter
+		expectedResult int
+	}{
+		{
+			filter: HostPropertyFilter{
+				Condition: "AND",
+				Rules: []Rule{
+					{
+						Field:    "bk_host_innerip",
+						Operator: "in",
+						Value:    []string{"x.x.x.x"},
+					},
+				},
+			},
+			expectedResult: 0,
+		},
+		{
+			filter: HostPropertyFilter{
+				Condition: "AND",
+				Rules: []Rule{
+					{
+						Field:    "bk_host_innerip",
+						Operator: "in",
+						Value:    []string{"1.1.1.1"},
+					},
+				},
+			},
+			expectedResult: 1,
+		},
+	}
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	h := NewHostWithoutBizList(client)
+	for _, tc := range cases {
+		result, err := h.QueryWithFilter(tc.filter, TestPage)
+		if err != nil {
+			t.Fatal(err)
+		}
+		assert.Equal(t, tc.expectedResult, len(result.Info), fmt.Sprintf("case: %+v", tc.filter))
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/list_biz_hosts.go b/dbm-services/common/go-pubpkg/cc.v3/list_biz_hosts.go
new file mode 100644
index 0000000000..2a05cd9ffb
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/list_biz_hosts.go
@@ -0,0 +1,37 @@
+package cc
+
+import (
+	"encoding/json"
+	"net/http"
+)
+
+// 功能描述
+// 根据业务ID查询业务下的主机,可附带其他的过滤信息,如集群id,模块id等
+
+// ListBizHosts TODO
+type ListBizHosts struct {
+	client *Client
+	url    string
+}
+
+// NewListBizHosts returns a new ListBizHosts server
+func NewListBizHosts(client *Client) *ListBizHosts {
+	return &ListBizHosts{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/list_biz_hosts/",
+	}
+}
+
+// QueryListBizHosts 查询业务下的主机
+func (h *ListBizHosts) QueryListBizHosts(param *ListBizHostsParam) (*ListBizHostsResponse, *Response, error) {
+	resp, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return nil, resp, err
+	}
+	var result ListBizHostsResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, resp, err
+	}
+	return &result, resp, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/module_watch.go b/dbm-services/common/go-pubpkg/cc.v3/module_watch.go
new file mode 100644
index 0000000000..2ef87994ec
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/module_watch.go
@@ -0,0 +1,101 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"sync"
+
+	"github.com/golang/glog"
+)
+
+// ModuleWatcher is a the ModuleWatcher server
+type ModuleWatcher struct {
+	client   *Client
+	fields   []string
+	lock     sync.Mutex
+	stopped  bool
+	outgoing chan Event
+	emit     func(Event)
+}
+
+func newModuleWatcher(client *Client) *ModuleWatcher {
+	w := &ModuleWatcher{
+		client:   client,
+		fields:   utils.GetStructTagName(reflect.TypeOf(&Module{})),
+		outgoing: make(chan Event),
+	}
+	w.emit = func(e Event) { w.outgoing <- e }
+	return w
+}
+
+// ModuleWatchList TODO
+func ModuleWatchList(client *Client) (Interface, error) {
+	w := newModuleWatcher(client)
+	go w.sync()
+	return w, nil
+}
+
+func (w *ModuleWatcher) sync() {
+	var cursor string
+	for {
+		result, err := resourceWatch(w.client, ModuleResource, cursor, w.fields)
+		if err != nil {
+			w.emit(Event{
+				Type:   Error,
+				Object: fmt.Sprintf("ModuleWatcher failed: %v", err),
+			})
+			// 出现异常,重置cursor
+			cursor = ""
+			continue
+		}
+		glog.Infof("ModuleWatcher - Cursor: %s, RequestId: %s, EventCount: %d",
+			cursor,
+			result.RequestId,
+			len(result.BKEvents))
+		// 如果BKEvents为空,那么需要重置cursor
+		// 要不从当前的cursor watch就会一直报错
+		if len(result.BKEvents) == 0 {
+			cursor = ""
+			continue
+		}
+		for _, item := range result.BKEvents {
+			cursor = item.BKCursor
+			if string(item.BKDetail) == "null" {
+				continue
+			}
+			var module Module
+			if err := json.Unmarshal(item.BKDetail, &module); err != nil {
+				w.emit(Event{
+					Type:   Error,
+					Object: fmt.Sprintf("TypeErr module: Detail: %v - Err: %s", string(item.BKDetail), err),
+				})
+				continue
+			}
+			w.emit(Event{
+				Key:    strconv.Itoa(module.BKModuleId),
+				Object: &module,
+				Type:   EventType(item.BKEventType),
+			})
+		}
+	}
+}
+
+// ResultChan TODO
+// Return event chan
+func (w *ModuleWatcher) ResultChan() <-chan Event {
+	return w.outgoing
+}
+
+// Stop watcher
+func (w *ModuleWatcher) Stop() {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	// Prevent double channel closes.
+	if !w.stopped {
+		w.stopped = true
+		close(w.outgoing)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/schema.go b/dbm-services/common/go-pubpkg/cc.v3/schema.go
new file mode 100644
index 0000000000..3d5f0b1679
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/schema.go
@@ -0,0 +1,729 @@
+package cc
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+// SecretMeta 定义了SetSecret接口
+// 每个请求CC接口的struct都包括了BaseSecret信息
+// 所以这里在调用时统一设置
+type SecretMeta interface {
+	SetSecret(secret Secret)
+}
+
+// Accessor 统一处理Secret信息
+func Accessor(obj interface{}) (SecretMeta, error) {
+	switch t := obj.(type) {
+	case SecretMeta:
+		return t, nil
+	default:
+		return nil, fmt.Errorf("TypeErr: %v", t)
+	}
+}
+
+// BaseSecret TODO
+// CC接口验证信息
+type BaseSecret struct {
+	BKAppCode         string `json:"bk_app_code" url:"bk_app_code"`
+	BKAppSecret       string `json:"bk_app_secret" url:"bk_app_secret"`
+	BKUsername        string `json:"bk_username" url:"bk_username"`
+	BKSupplierAccount string `json:"bk_supplier_account" url:"bk_supplier_account"`
+}
+
+// SetSecret 设置Secret信息
+func (s *BaseSecret) SetSecret(secret Secret) {
+	s.BKAppCode = secret.BKAppCode
+	s.BKAppSecret = secret.BKAppSecret
+	s.BKUsername = secret.BKUsername
+	s.BKSupplierAccount = "tencent"
+}
+
+// Host TODO
+// CC主机属性
+// 字段名与CCDeviceInfo的一致,主要方便做映射
+type Host struct {
+	AssetID            string      `json:"bk_asset_id,omitempty"`
+	HostID             int         `json:"svr_id,omitempty"`
+	BKHostId           int         `json:"bk_host_id,omitempty"`
+	SN                 string      `json:"bk_sn,omitempty"`
+	InnerIP            string      `json:"bk_host_innerip,omitempty"`
+	OuterIP            string      `json:"bk_host_outerip,omitempty"`
+	InnerSegmentName   string      `json:"inner_network_segment,omitempty"`
+	OuterSegmentName   string      `json:"outer_network_segment,omitempty"`
+	Dep                string      `json:"dept_name,omitempty"`
+	HostName           string      `json:"bk_host_name,omitempty"`
+	GroupName          string      `json:"group_name,omitempty"`
+	DeviceTypeName     string      `json:"svr_device_type_name,omitempty"`
+	DeviceClass        string      `json:"svr_device_class,omitempty"`
+	Raid               string      `json:"raid_name,omitempty"`
+	OSName             string      `json:"bk_os_name,omitempty"`
+	OSVersion          string      `json:"bk_os_version,omitempty"`
+	HardMemo           string      `json:"hard_memo,omitempty"`
+	InputTime          string      `json:"svr_input_time,omitempty"`
+	Operator           string      `json:"operator,omitempty"`
+	BakOperator        string      `json:"bk_bak_operator,omitempty"`
+	ClassifyLevelName  string      `json:"classify_level_name,omitempty"`
+	Alarmlevel         string      `json:"bk_sla,omitempty"`
+	ImportantLevel     string      `json:"srv_important_level,omitempty"`
+	ZoneName           string      `json:"bk_zone_name,omitempty"`
+	SZone              string      `json:"sub_zone,omitempty"`
+	SZoneID            string      `json:"sub_zone_id,omitempty"`
+	ModuleName         string      `json:"module_name,omitempty"`
+	IdcArea            string      `json:"bk_idc_area,omitempty"`
+	IDC                string      `json:"idc_name,omitempty"`
+	IDCID              int         `json:"idc_id,omitempty"`
+	IDCUnit            string      `json:"idc_unit_name,omitempty"`
+	IDCUnitID          int         `json:"idc_unit_id,omitempty"`
+	IdcOperationName   string      `json:"bk_isp_name,omitempty"`
+	IdcLogicArea       string      `json:"logic_domain,omitempty"`
+	LogicZone          string      `json:"bk_logic_zone,omitempty"`
+	LogicZoneID        string      `json:"bk_logic_zone_id,omitempty"`
+	State              string      `json:"srv_status,omitempty"`
+	ServerRack         string      `json:"rack,omitempty"`
+	Equipment          string      `json:"rack_id,omitempty"`
+	LinkNetdeviceId    string      `json:"net_device_id,omitempty"`
+	InnerSwitchPort    string      `json:"inner_switch_port,omitempty"`
+	OuterSwitchPort    string      `json:"outer_switch_port,omitempty"`
+	InnerSwitchIp      string      `json:"bk_inner_switch_ip,omitempty"`
+	OuterSwitchIp      string      `json:"bk_outer_switch_ip,omitempty"`
+	NetStructVer       interface{} `json:"net_struct_id,omitempty"`
+	NetStructVerName   string      `json:"net_struct_name,omitempty"`
+	IdcAreaId          int         `json:"bk_idc_area_id,omitempty"`
+	IPOperName         string      `json:"bk_ip_oper_name,omitempty"`
+	SvrDeviceClassName string      `json:"bk_svr_device_cls_name,omitempty"`
+	StrVersion         string      `json:"bk_str_version,omitempty"`
+	IdcCityName        string      `json:"idc_city_name,omitempty"`
+	IdcCityId          string      `json:"idc_city_id,omitempty"`
+	BkCpu              int         `json:"bk_cpu,omitempty"`
+	BkMem              int         `json:"bk_mem,omitempty"`
+	BkDisk             int         `json:"bk_disk"`
+	SvrTypeName        string      `json:"svr_type_name"`
+	BKBSInfos          []*CMDBInfo `json:"bk_bs_info"`
+}
+
+// CMDBInfo 公司CMDB对应的业务模块信息
+type CMDBInfo struct {
+	// CMDB业务集ID
+	Bs1NameId int `json:"bs1_name_id"`
+	// CMDB业务ID
+	Bs2NameId int `json:"bs2_name_id"`
+	// CMDB模块ID
+	Bs3NameId int `json:"bs3_name_id"`
+	// CMDB业务集名称
+	Bs1Name string `json:"bs1_name"`
+	// CMDB业务名称
+	Bs2Name string `json:"bs2_name"`
+	// CMDB模块名称
+	Bs3Name string `json:"bs3_name"`
+}
+
+// Set 集群(Set)信息
+type Set struct {
+	BKSetId   int    `json:"bk_set_id"`
+	BKSetName string `json:"bk_set_name"`
+}
+
+// Module 业务模块信息
+type Module struct {
+	BKModuleId   int    `json:"bk_module_id"`
+	BKModuleName string `json:"bk_module_name"`
+	// 模块类型
+	// 1:普通,2:数据库
+	ModuleCategory string `json:"bk_module_type"`
+	// default值
+	// 0. 普通的模块;	1. 空闲机;	2. 故障机;	3. 待回收;	>3 其它
+	Default int `json:"default"`
+}
+
+// Relation 集群与模块映射关系
+type Relation struct {
+	BKSetId   int    `json:"bk_set_id"`
+	BKSetName string `json:"bk_set_name"`
+	// 对应原来的TopoModuleID
+	BKModuleId int `json:"bk_module_id"`
+	// 对应原来的TopoModule
+	BKModuleName string   `json:"bk_module_name"`
+	BKBSInfo     CMDBInfo `json:"bk_bs_info"`
+	// 对应原来的AppModuleId
+	ServiceTemplateId int `json:"service_template_id"`
+	// 对应原来的AppModule
+	ServiceTemplateName string `json:"service_template_name"`
+}
+
+// HostBizModule 主机与业务模块的关联信息
+type HostBizModule struct {
+	BKHostId   int `json:"bk_host_id"`
+	BKBizId    int `json:"bk_biz_id"`
+	BKSetId    int `json:"bk_set_id"`
+	BKModuleId int `json:"bk_module_id"`
+}
+
+// ResourceWatchType 事件监听的资源类型
+type ResourceWatchType string
+
+// cc系统支持的监听事件类型
+const (
+	HostResource         ResourceWatchType = "host"
+	HostRelationResource ResourceWatchType = "host_relation"
+	BizResource          ResourceWatchType = "biz"
+	SetResource          ResourceWatchType = "set"
+	ModuleResource       ResourceWatchType = "module"
+)
+
+// ResourceWatchParam 资源监听输入参数
+type ResourceWatchParam struct {
+	BaseSecret `json:",inline"`
+	// 事件类型: create(新增)/update(更新)/delete(删除)
+	BKEventTypes []string `json:"bk_event_types"`
+	// 返回的事件中需要返回的字段列表, 不能置空
+	BKFields []string `json:"bk_fields"`
+	// 监听事件的起始时间,该值为unix time的秒数,
+	// 即为从UTC1970年1月1日0时0分0秒起至你要watch的时间点的总秒数。
+	BKStartFrom int64 `json:"bk_start_from"`
+	// 监听事件的游标,代表了要开始或者继续watch(监听)的事件地址,
+	// 系统会返回这个游标的下一个、或一批事件。
+	BKCursor string `json:"bk_cursor,omitempty"`
+	// 要监听的资源类型,枚举值为:host, host_relation, biz, set, module
+	BKResource ResourceWatchType `json:"bk_resource"`
+}
+
+// ResourceWatchResponse 资源监听事件返回参数
+type ResourceWatchResponse struct {
+	// RequestId只是为了记录日志,方便定位问题
+	RequestId string    `json:"-"`
+	BKEvents  []BKEvent `json:"bk_events"`
+	BKWatched bool      `json:"bk_watched"`
+}
+
+// BKEvent 监听事件明细
+type BKEvent struct {
+	BKEventType string            `json:"bk_event_type"`
+	BKResource  ResourceWatchType `json:"bk_resource"`
+	BKCursor    string            `json:"bk_cursor"`
+	BKDetail    json.RawMessage   `json:"bk_detail"`
+}
+
+// ListHostRelationParam 主机拓扑信息查询输入参数
+type ListHostRelationParam struct {
+	BaseSecret `json:",inline"`
+	// 要查询的主机列表,最大长度为500,若不为空,则page字段不生效
+	BKHostIds []int `json:"bk_host_ids"`
+	// 要返回的主机字段列表,不能为空
+	BKHostFields []string `json:"bk_host_fields"`
+	// 要返回的业务字段列表,不能为空
+	BKBizFields []string `json:"bk_biz_fields"`
+	// 要返回的集群字段列表,不能为空
+	BKSetFields []string `json:"bk_set_fields"`
+	// 要返回的模块字段列表,不能为空
+	BKModuleFields []string `json:"bk_module_fields"`
+	// 分页信息
+	Page BKPage `json:"page"`
+}
+
+// FindHostBizRelationParam 接口主机拓扑信息查询输入参数, BKHostIds的json为单数
+type FindHostBizRelationParam struct {
+	BaseSecret `json:",inline"`
+	// 要查询的主机列表,最大长度为500,若不为空,则page字段不生效
+	BKHostIds []int `json:"bk_host_id"`
+	// 要返回的主机字段列表,不能为空
+	BKHostFields []string `json:"bk_host_fields"`
+	// 要返回的业务字段列表,不能为空
+	BKBizFields []string `json:"bk_biz_fields"`
+	// 要返回的集群字段列表,不能为空
+	BKSetFields []string `json:"bk_set_fields"`
+	// 要返回的模块字段列表,不能为空
+	BKModuleFields []string `json:"bk_module_fields"`
+	// 分页信息
+	Page BKPage `json:"page"`
+}
+
+// FindHostBizRelationResp 接口返回结构
+type FindHostBizRelationResp struct {
+	BkBizID           int    `json:"bk_biz_id"`
+	BkHostID          int    `json:"bk_host_id"`
+	BkModuleID        int    `json:"bk_module_id"`
+	BkSetID           int    `json:"bk_set_id"`
+	BkSupplierAccount string `json:"bk_supplier_account"`
+}
+
+// GetHostBaseInfoParam TODO
+type GetHostBaseInfoParam struct {
+	BaseSecret        `json:",inline"`
+	BkHostID          int    `json:"bk_host_id" url:"bk_host_id"`
+	BkSupplierAccount string `json:"bk_supplier_account" url:"bk_supplier_account"`
+}
+
+// HostPropertyInfo TODO
+type HostPropertyInfo struct {
+	BkPropertyId    string      `json:"bk_property_id"`
+	BkPropertyName  string      `json:"bk_property_name"`
+	BkpropertyValue interface{} `json:"bk_property_value"`
+}
+
+// HostMetaData 标识一台主机设备的信息
+type HostMetaData struct {
+	// 主机IP
+	InnerIPs []string
+	// 主机固定号
+	AssetIds []string
+	// 主机的id
+	BKHostIds []int
+}
+
+// BKPage 查询分页参数
+type BKPage struct {
+	// 记录开始位置
+	Start int `json:"start"`
+	// 每页限制条数,最大500
+	Limit int `json:"limit"`
+}
+
+// ListHostRelationResponse 主机拓扑信息查询返回参数
+type ListHostRelationResponse struct {
+	Count int                 `json:"count"`
+	Info  []*HostRelationInfo `json:"info"`
+}
+
+// HostRelationInfo 主机与业务模块关联信息
+type HostRelationInfo struct {
+	Host      Host       `json:"host"`
+	Biz       Biz        `json:"biz"`
+	Sets      []Set      `json:"set"`
+	Modules   []Module   `json:"module"`
+	Relations []Relation `json:"relations"`
+}
+
+// BizSetParam 业务与Set信息查询输入参数
+type BizSetParam struct {
+	BaseSecret `json:",inline"`
+	BKBizId    int      `json:"bk_biz_id"`
+	Fields     []string `json:"fields"`
+	// 分页信息
+	Page BKPage `json:"page"`
+}
+
+// BizSetResponse 业务与Set信息查询返回值
+type BizSetResponse struct {
+	Count int    `json:"count"`
+	Info  []*Set `json:"info"`
+}
+
+// BizModuleParam 业务与模块信息查询输入参数
+type BizModuleParam struct {
+	BaseSecret `json:",inline"`
+	BKBizId    int `json:"bk_biz_id"`
+	BKSetId    int `json:"bk_set_id"`
+	// 模块属性列表,控制返回结果的模块信息里有哪些字段
+	Fields []string `json:"fields"`
+	// 分页信息
+	Page BKPage `json:"page"`
+}
+
+// BizModuleResponse 业务与模块信息查询返回值
+type BizModuleResponse struct {
+	Count int       `json:"count"`
+	Info  []*Module `json:"info"`
+}
+
+// Biz 业务信息
+type Biz struct {
+	// 业务唯一标识ID
+	ApplicationID int `json:"bk_biz_id"`
+	// 产品名称
+	DisplayName string `json:"bk_biz_name"`
+	// 运维小组名称
+	GroupName string `json:"bk_oper_grp_name"`
+	// 英文缩写
+	Abbreviation string `json:"bk_app_abbr"`
+	// 部门名称
+	DeptName string `json:"bk_dept_name_id"`
+	IsBip    string `json:"bk_is_bip"`
+	// 业务运维
+	Maintainers string `json:"bk_biz_maintainer"`
+	// 运管PM
+	OperationPlanning string `json:"bk_oper_plan"`
+	// DBA主
+	PmpDBAMajor string `json:"bk_pmp_dba_major"`
+	// 	DBA备
+	PmpDBABackup string `json:"bk_dba_bak"`
+	// 产品负责人
+	AppDirector string `json:"bk_app_director"`
+	// 产品URL
+	AppUrl string `json:"bk_app_url"`
+	// 开发团队
+	AppDevTeam string `json:"bk_app_devteam"`
+	// 开发人员
+	AppDevMan string `json:"bk_biz_developer"`
+	// 开发备份人员
+	AppDevBackup string `json:"bk_app_dev_bak"`
+	// 架构文档链接
+	AppDoc string `json:"bk_arc_doc"`
+	// 用户文档链接
+	AppUserManual string `json:"bk_app_user_manual"`
+	// 运维手册链接
+	AppOpeManual string `json:"bk_app_oper_manual"`
+	// 产品英文名
+	BipID string `json:"bk_bip_id"`
+	// SA
+	PmpSA string `json:"bk_pmp_sa"`
+	// PMP相关信息
+	PmpSafeMan    string `json:"bk_pmp_safe_man"`
+	PmpOpeDevMan  string `json:"bk_pmp_oper_dev_man"`
+	PmpOssMan     string `json:"bk_pmp_oss_man"`
+	PmpCmMan      string `json:"bk_pmp_cm_man"`
+	PmpPortalMan  string `json:"bk_pmp_potl_man"`
+	PmpIdipMan    string `json:"bk_pmp_idip_man"`
+	PmpTlogMan    string `json:"bk_tlog_man"`
+	PmpServicePM  string `json:"bk_pmp_svc_pm"`
+	PmpCmReqMan   string `json:"bk_pmp_cmreqman"`
+	PmpComPlot    string `json:"bk_pmp_com_plot"`
+	PmpProductMan string `json:"bk_biz_productor"`
+	PmpQA         string `json:"bk_pmp_qa"`
+	PmpQC         string `json:"bk_pmp_qc"`
+	// CC只有ID的字段
+	AppGameTypeID string `json:"bk_app_game_typeid"`
+	OperState     string `json:"life_cycle"`
+	AppType       string `json:"bk_app_type"`
+	SourceID      string `json:"bk_source_id"`
+	// OBS产品ID
+	ProductId int `json:"bk_product_id"`
+	// 业务部门ID,3为IEG
+	BusinessDeptId int `json:"bk_business_dept_id"`
+	// 初始运维部门ID,3为IEG
+	OperateDeptId int `json:"bk_operate_dept_id"`
+}
+
+// BizParam 查询业务入参
+type BizParam struct {
+	BaseSecret `json:",inline"`
+	Fields     []string `json:"fields"`
+	// 分页信息
+	Page      BKPage                 `json:"page"`
+	Condition map[string]interface{} `json:"condition"`
+}
+
+// HostOri TODO
+type HostOri struct {
+	BkIspName      string `json:"bk_isp_name"`      // 0:其它;1:电信;2:联通;3:移动
+	BkSn           string `json:"bk_sn"`            // 设备SN
+	Operator       string `json:"operator"`         // 主要维护人
+	BkOuterMac     string `json:"bk_outer_mac"`     // 外网MAC
+	BkStateName    string `json:"bk_state_name"`    // 所在国家	CN:中国,详细值,请参考CMDB页面
+	BkProvinceName string `json:"bk_province_name"` // 所在省份
+	ImportFrom     string `json:"import_from"`      // 录入方式	1:excel;2:agent;3:api
+	BkSla          string `json:"bk_sla"`           // SLA级别	1:L1;2:L2;3:L3
+	BkServiceTerm  int    `json:"bk_service_term"`  // 质保年限	1-10
+	BkOsType       string `json:"bk_os_type"`       // 操作系统类型	1:Linux;2:Windows;3:AIX
+	BkOsVersion    string `json:"bk_os_version"`    // 操作系统版本
+	BkOsBit        int    `json:"bk_os_bit"`        // 操作系统位数
+	BkMem          string `json:"bk_mem"`           // 内存容量
+	BkMac          string `json:"bk_mac"`           // 内网MAC地址
+	BkHostOuterip  string `json:"bk_host_outerip"`  // 外网IP
+	BkHostName     string `json:"bk_host_name"`     // 主机名称
+	BkHostInnerip  string `json:"bk_host_innerip"`  // 内网IP
+	BkHosId        int    `json:"bk_host_id"`       // 主机ID
+	BkDisk         int    `json:"bk_disk"`          // 磁盘容量
+	BkCpuModule    string `json:"bk_cpu_module"`    // CPU型号
+	BkCpuMhz       int    `json:"bk_cpu_mhz"`       // CPU频率
+	BkCpu          int    `json:"bk_cpu"`           // CPU逻辑核心数	1-1000000
+	BkComment      string `json:"bk_comment"`       // 备注
+	BkCloudId      int    `json:"bk_cloud_id"`      // 云区域
+	BkBakOperator  string `json:"bk_bak_operator"`  // 备份维护人
+	BkAssetId      string `json:"bk_asset_id"`      // 固资编号
+}
+
+// ListBizHostsParam TODO
+type ListBizHostsParam struct {
+	BaseSecret         `json:",inline"`
+	Page               BKPage             `json:"page"`
+	BkBizId            int                `json:"bk_biz_id"`
+	BkSetIds           []int              `json:"bk_set_ids"`
+	HostPropertyFilter HostPropertyFilter `json:"host_property_filter"`
+	Fileds             []string           `json:"fields"`
+}
+
+// ListBizHostsResponse TODO
+type ListBizHostsResponse struct {
+	Count int     `json:"count"`
+	Info  []*Host `json:"info"`
+}
+
+// BizResponse 查询业务返回信息
+type BizResponse struct {
+	Count int    `json:"count"`
+	Info  []*Biz `json:"info"`
+}
+
+// TransferHostParam 转移主机模块输入参数
+type TransferHostParam struct {
+	BaseSecret `json:",inline"`
+	From       BKFrom `json:"bk_from"`
+	To         BKTo   `json:"bk_to"`
+}
+
+// TransferHostModuleParam 同业务下转移模块,支持转移到多个模块
+type TransferHostModuleParam struct {
+	BaseSecret  `json:",inline"`
+	BkBizID     int   `json:"bk_biz_id"`
+	BkHostID    []int `json:"bk_host_id"`
+	BkModuleID  []int `json:"bk_module_id"`
+	IsIncrement bool  `json:"is_increment"`
+}
+
+// CloneHostPropertyParam 克隆主机属性输入参数
+type CloneHostPropertyParam struct {
+	BaseSecret  `json:",inline"`
+	BkBizId     int `json:"bk_biz_id"`
+	BkOrgHostId int `json:"bk_org_id"`
+	BkDstHostId int `json:"bk_dst_id"`
+}
+
+// CloneHostSvcInsParam 克隆实例信息输入参数
+type CloneHostSvcInsParam struct {
+	BaseSecret  `json:",inline"`
+	BkBizId     int   `json:"bk_biz_id"`
+	BkModuleIds []int `json:"bk_module_ids"`
+	SrcHostId   int   `json:"src_host_id"`
+	DstHostId   int   `json:"dst_host_id"`
+}
+
+// BKFrom 转移主机模块入参中的源主机(业务)信息
+type BKFrom struct {
+	// 源业务ID
+	BKBizId int `json:"bk_biz_id"`
+	// 主机IP
+	InnerIPs []string `json:"-"`
+	// 主机固定号
+	AssetIds []string `json:"-"`
+	// 主机的id
+	BKHostIds []int `json:"bk_host_ids"`
+}
+
+// BKTo 移主机模块入参中的目标业务信息
+type BKTo struct {
+	// 目标业务ID
+	BKBizId int `json:"bk_biz_id"`
+	// 主机要转移到的模块ID,如果不传,则将主机转移到该业务的空闲机模块下
+	BKModuleId int `json:"bk_module_id,omitempty"`
+}
+
+// UpdateHostParam 修改主机属性输入参数(例如主备负责人)
+type UpdateHostParam struct {
+	BaseSecret `json:",inline"`
+	// 主机IP
+	InnerIPs []string `json:"-"`
+	// 主机固定号
+	AssetIds []string `json:"-"`
+	// 主机ID,多个以逗号分隔
+	BKHostId string `json:"bk_host_id"`
+	// 主机数据
+	Data Host `json:"data"`
+}
+
+// HostsWithoutBizListParam 根据过滤条件查询主机信息
+type HostsWithoutBizListParam struct {
+	BaseSecret `json:",inline"`
+	// 查询条件
+	HostPropertyFilter HostPropertyFilter `json:"host_property_filter"`
+	// 分页
+	Page BKPage `json:"page"`
+}
+
+// HostPropertyFilter 查询规则组合
+type HostPropertyFilter struct {
+	// 条件逻辑AND | OR
+	Condition string `json:"condition"`
+	// 规则
+	Rules []Rule `json:"rules"`
+}
+
+// Rule 查询的运算规则
+type Rule struct {
+	// 字段名
+	Field string `json:"field"`
+	// 操作符
+	// 可选值 equal,not_equal,in,not_in,less,less_or_equal,greater,greater_or_equal,between,not_between
+	Operator string `json:"operator"`
+	// 操作数值
+	Value interface{} `json:"value"`
+}
+
+// HostsWithoutBizListResponse 主机(不带业务信息)查询返回信息
+type HostsWithoutBizListResponse struct {
+	Count int     `json:"count"`
+	Info  []*Host `json:"info"`
+}
+
+const (
+	// Limit 分页,每页的最大记录数
+	Limit int = 500
+)
+
+// BizInternalModulesParam 查询业务的内置模块入参
+type BizInternalModulesParam struct {
+	BaseSecret `json:",inline" url:",inline"`
+	BKBizId    int `json:"bk_biz_id" url:"bk_biz_id"`
+}
+
+// BizInternalModuleResponse 查询业务的内置模块返回值
+type BizInternalModuleResponse struct {
+	BkSetID int `json:"bk_set_id"`
+	Module  []struct {
+		Default          int    `json:"default"`
+		BkModuleName     string `json:"bk_module_name"`
+		BkModuleID       int    `json:"bk_module_id"`
+		HostApplyEnabled bool   `json:"host_apply_enabled"`
+	} `json:"module"`
+	BkSetName string `json:"bk_set_name"`
+}
+
+// BizTopoTreeParam 查询业务拓扑信息入参
+type BizTopoTreeParam struct {
+	BaseSecret `json:",inline" url:",inline"`
+	BKBizId    int `json:"bk_biz_id" url:"bk_biz_id"`
+}
+
+// TopoTreeNode TODO
+// CC topo tree
+type TopoTreeNode struct {
+	HostCount  int            `json:"host_count,omitempty"`
+	Default    int            `json:"default,omitempty"`
+	BkObjName  string         `json:"bk_obj_name,omitempty"`
+	BkObjID    string         `json:"bk_obj_id,omitempty"`
+	Child      []TopoTreeNode `json:"child,omitempty"`
+	BkInstID   int            `json:"bk_inst_id"`
+	BkInstName string         `json:"bk_inst_name"`
+}
+
+// BizLocationParam TODO
+type BizLocationParam struct {
+	BaseSecret `json:",inline" url:",inline"`
+	BKBizIds   []int `json:"bk_biz_ids" url:"bk_biz_ids"`
+}
+
+// HostLocationParam TODO
+type HostLocationParam struct {
+	BaseSecret `json:",inline" url:",inline"`
+	BkHostList []BkHostList `json:"bk_host_list"`
+}
+
+// BkHostList TODO
+type BkHostList struct {
+	BkHostInnerip string `json:"bk_host_innerip"`
+	BkCloudID     int    `json:"bk_cloud_id"`
+}
+
+// HostLocationInfo TODO
+// HostLocationParam
+type HostLocationInfo struct {
+	BkHostInnerip string `json:"bk_host_innerip"`
+	BkCloudID     int    `json:"bk_cloud_id"`
+	BkLocation    string `json:"bk_location"`
+}
+
+// BizLocationInfo TODO
+// CC biz location
+type BizLocationInfo struct {
+	BkBizID    int    `json:"bk_biz_id"`
+	BkLocation string `json:"bk_location"`
+}
+
+// FindHostBizRelationResponse 查询主机与业务关联信息返回值
+type FindHostBizRelationResponse struct {
+	Result  bool   `json:"result"`
+	Code    int    `json:"code"`
+	Message string `json:"message"`
+	Data    []struct {
+		BkBizID           int    `json:"bk_biz_id"`
+		BkHostID          int    `json:"bk_host_id"`
+		BkModuleID        int    `json:"bk_module_id"`
+		BkSetID           int    `json:"bk_set_id"`
+		BkSupplierAccount string `json:"bk_supplier_account"`
+	} `json:"data"`
+}
+
+// DeptParam 部门信息
+type DeptParam struct {
+	BaseSecret `json:",inline"`
+	// 部门ID
+	DeptId string `json:"dept_id"`
+}
+
+// DeptResponse 查询部分返回信息
+type DeptResponse struct {
+	// 部门ID
+	ID string `json:"ID"`
+	// 部门名称
+	Name string `json:"Name"`
+	// 上级ID
+	ParentId string `json:"ParentId"`
+}
+
+// BizCreateSetParam TODO
+type BizCreateSetParam struct {
+	BaseSecret `json:",inline"`
+	BkBizID    int `json:"bk_biz_id"`
+
+	Data struct {
+		BkParentID    int    `json:"bk_parent_id"`
+		BkSetName     string `json:"bk_set_name"`
+		SetTemplateID int    `json:"set_template_id"`
+	} `json:"data"`
+}
+
+// BizDeleteSetParam TODO
+type BizDeleteSetParam struct {
+	BaseSecret `json:",inline"`
+	BkBizID    int `json:"bk_biz_id"`
+	BkBizSetID int `json:"bk_set_id"`
+}
+
+// BizCreateSetResponse TODO
+type BizCreateSetResponse struct {
+	BkSetName string `json:"bk_set_name"`
+	BkSetId   int    `json:"bk_set_id"`
+}
+
+// BizSensitive 业务敏感信息
+type BizSensitive struct {
+	BKBizId     int `json:"bk_biz_id"`
+	BkProductId int `json:"bk_product_id"`
+}
+
+// BizSensitiveParam 查询业务敏感信息入参
+type BizSensitiveParam struct {
+	BaseSecret `json:",inline"`
+	Fields     []string `json:"fields"`
+	// 分页信息
+	Page     BKPage `json:"page"`
+	BkBizIds []int  `json:"bk_biz_ids"`
+}
+
+// BizSensitiveResponse 查询Obs产品信息返回信息
+type BizSensitiveResponse struct {
+	Count int             `json:"count"`
+	Info  []*BizSensitive `json:"info"`
+}
+
+// SyncHostInfoFromCmpyParam 同步公司cmdb更新信息入参
+type SyncHostInfoFromCmpyParam struct {
+	BaseSecret `json:",inline"`
+	BkHostIds  []int `json:"bk_host_ids"`
+}
+
+// AddHostInfoFromCmpyParam 同步公司cmdb新增信息入参
+type AddHostInfoFromCmpyParam struct {
+	BaseSecret `json:",inline"`
+	SvrIds     []int `json:"svr_ids"`
+}
+
+// CreateModuleParam 参数
+type CreateModuleParam struct {
+	BKBizId int `json:"bk_biz_id"`
+	BkSetId int `json:"bk_set_id"`
+	Data    struct {
+		BkParentID   int    `json:"bk_parent_id"`
+		BkModuleName string `json:"bk_module_name"`
+	} `json:"data"`
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/set_watch.go b/dbm-services/common/go-pubpkg/cc.v3/set_watch.go
new file mode 100644
index 0000000000..65c67ed567
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/set_watch.go
@@ -0,0 +1,98 @@
+package cc
+
+import (
+	"dbm-services/common/go-pubpkg/cc.v3/utils"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+	"sync"
+
+	"github.com/golang/glog"
+)
+
+// SetWatcher is a the SetWatcher server
+type SetWatcher struct {
+	client   *Client
+	fields   []string
+	lock     sync.Mutex
+	stopped  bool
+	outgoing chan Event
+	emit     func(Event)
+}
+
+func newSetWatcher(client *Client) *SetWatcher {
+	w := &SetWatcher{
+		client:   client,
+		fields:   utils.GetStructTagName(reflect.TypeOf(&Set{})),
+		outgoing: make(chan Event),
+	}
+	w.emit = func(e Event) { w.outgoing <- e }
+	return w
+}
+
+// SetWatchList TODO
+func SetWatchList(client *Client) (Interface, error) {
+	w := newSetWatcher(client)
+	go w.sync()
+	return w, nil
+}
+
+func (w *SetWatcher) sync() {
+	var cursor string
+	for {
+		result, err := resourceWatch(w.client, SetResource, cursor, w.fields)
+		if err != nil {
+			w.emit(Event{
+				Type:   Error,
+				Object: fmt.Sprintf("SetWatcher failed: %v", err),
+			})
+			// 出现异常,重置cursor
+			cursor = ""
+			continue
+		}
+		glog.Infof("SetWatcher - Cursor: %s, RequestId: %s, EventCount: %d", cursor, result.RequestId, len(result.BKEvents))
+		// 如果BKEvents为空,那么需要重置cursor
+		// 要不从当前的cursor watch就会一直报错
+		if len(result.BKEvents) == 0 {
+			cursor = ""
+			continue
+		}
+		for _, item := range result.BKEvents {
+			cursor = item.BKCursor
+			if string(item.BKDetail) == "null" {
+				continue
+			}
+			var set Set
+			if err := json.Unmarshal(item.BKDetail, &set); err != nil {
+				w.emit(Event{
+					Type:   Error,
+					Object: fmt.Sprintf("TypeErr set: Err: %v - Detail: %s", err, string(item.BKDetail)),
+				})
+				continue
+			}
+			w.emit(Event{
+				Key:    strconv.Itoa(set.BKSetId),
+				Object: &set,
+				Type:   EventType(item.BKEventType),
+			})
+		}
+	}
+}
+
+// ResultChan TODO
+// Return event chan
+func (w *SetWatcher) ResultChan() <-chan Event {
+	return w.outgoing
+}
+
+// Stop watcher
+func (w *SetWatcher) Stop() {
+	w.lock.Lock()
+	defer w.lock.Unlock()
+	// Prevent double channel closes.
+	if !w.stopped {
+		w.stopped = true
+		close(w.outgoing)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_cmpy.go b/dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_cmpy.go
new file mode 100644
index 0000000000..5d9e80f77c
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_cmpy.go
@@ -0,0 +1,27 @@
+package cc
+
+import (
+	"net/http"
+)
+
+// SyncHostInfoFromCmpy sync host from cmdb
+type SyncHostInfoFromCmpy struct {
+	client *Client
+	url    string
+}
+
+// NewSyncHostInfoFromCmpy returns a new SyncHostInfoFromCmpy
+func NewSyncHostInfoFromCmpy(client *Client) *SyncHostInfoFromCmpy {
+	return &SyncHostInfoFromCmpy{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/sync_host_info_from_cmpy/",
+	}
+}
+
+// Query 同步更新主机信息到cc
+func (s *SyncHostInfoFromCmpy) Query(bizHostIds []int) (*Response, error) {
+	param := &SyncHostInfoFromCmpyParam{
+		BkHostIds: bizHostIds,
+	}
+	return s.client.Do(http.MethodPost, s.url, param)
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_comy_test.go b/dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_comy_test.go
new file mode 100644
index 0000000000..260df3d986
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/sync_host_info_from_comy_test.go
@@ -0,0 +1,26 @@
+package cc
+
+import (
+	"fmt"
+	"log"
+	"testing"
+)
+
+// TestHostLocation_Query
+func TestSyncHostInfoFromCmpy_Query(t *testing.T) {
+	address := "http://127.0.0.1"
+
+	client, err := NewClient(address, TestSecret)
+	if err != nil {
+		log.Fatal(err.Error())
+	}
+
+	h := NewSyncHostInfoFromCmpy(client)
+	param := []int{1147527}
+	result, err := h.Query(param)
+	fmt.Printf("result: %#v\n", result)
+	if err != nil {
+		fmt.Printf("err: %s\n", err)
+		log.Fatal(err.Error())
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/test_config.go b/dbm-services/common/go-pubpkg/cc.v3/test_config.go
new file mode 100644
index 0000000000..10062d8a7e
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/test_config.go
@@ -0,0 +1,14 @@
+package cc
+
+// TestSecret TODO
+var TestSecret Secret = Secret{
+	BKAppCode:   "scr2020",
+	BKAppSecret: "xxxxx",
+	BKUsername:  "scr",
+}
+
+// TestPage TODO
+var TestPage BKPage = BKPage{
+	Start: 0,
+	Limit: 1,
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/transfer_host.go b/dbm-services/common/go-pubpkg/cc.v3/transfer_host.go
new file mode 100644
index 0000000000..0ce721f8e1
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/transfer_host.go
@@ -0,0 +1,36 @@
+package cc
+
+import (
+	"fmt"
+	"net/http"
+)
+
+// TransferHost is a the TransferHost server
+type TransferHost struct {
+	client *Client
+	url    string
+}
+
+// NewTransferHost returns a new TransferHost server
+func NewTransferHost(client *Client) *TransferHost {
+	return &TransferHost{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/transfer_host_to_another_biz",
+	}
+}
+
+// Transfer 转移主机业务模块
+func (h *TransferHost) Transfer(param *TransferHostParam) error {
+	// 获取所有的HostId
+	hostIds, err := QueryHostId(h.client, param.From.InnerIPs, param.From.AssetIds)
+	if err != nil {
+		return fmt.Errorf("Query BKHostId from innerip or assetid failed: %v", err)
+	}
+	hostIds = append(hostIds, param.From.BKHostIds...)
+	param.From.BKHostIds = RemoveRepeatedHostId(hostIds)
+	_, err = h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/transfer_host_module.go b/dbm-services/common/go-pubpkg/cc.v3/transfer_host_module.go
new file mode 100644
index 0000000000..08f6296237
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/transfer_host_module.go
@@ -0,0 +1,28 @@
+package cc
+
+import (
+	"net/http"
+)
+
+// TransferHostModule is a the TransferHostModule server
+type TransferHostModule struct {
+	client *Client
+	url    string
+}
+
+// NewTransferHostModule returns a new TransferHostModule server
+func NewTransferHostModule(client *Client) *TransferHostModule {
+	return &TransferHostModule{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/transfer_host_module",
+	}
+}
+
+// Transfer 同业务下转移主机业务模块, 支持转移到多个模块
+func (h *TransferHostModule) Transfer(param *TransferHostModuleParam) error {
+	_, err := h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/transfer_host_test.go b/dbm-services/common/go-pubpkg/cc.v3/transfer_host_test.go
new file mode 100644
index 0000000000..c42d04c293
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/transfer_host_test.go
@@ -0,0 +1,27 @@
+package cc
+
+import (
+	"testing"
+)
+
+// TestTransfer
+func TestTransfer(t *testing.T) {
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	h := NewTransferHost(client)
+	err = h.Transfer(&TransferHostParam{
+		From: BKFrom{
+			BKBizId:  310,
+			InnerIPs: []string{"1.1.1.1"},
+		},
+		To: BKTo{
+			BKBizId:    100605,
+			BKModuleId: 548622,
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/update_host.go b/dbm-services/common/go-pubpkg/cc.v3/update_host.go
new file mode 100644
index 0000000000..1cadb61fd4
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/update_host.go
@@ -0,0 +1,45 @@
+package cc
+
+import (
+	"fmt"
+	"net/http"
+	"strconv"
+	"strings"
+)
+
+// UpdateHost is a the UpdateHost server
+type UpdateHost struct {
+	client *Client
+	url    string
+}
+
+// NewUpdateHost returns a new UpdateHost server
+func NewUpdateHost(client *Client) *UpdateHost {
+	return &UpdateHost{
+		client: client,
+		url:    "/api/c/compapi/v2/cc/update_host",
+	}
+}
+
+// Update 更新主机属性
+func (h *UpdateHost) Update(param *UpdateHostParam) error {
+	// 获取所有的HostId
+	hostIds, err := QueryHostId(h.client, param.InnerIPs, param.AssetIds)
+	if err != nil {
+		return fmt.Errorf("Query BKHostId from innerip or assetid failed: %v", err)
+	}
+	for _, item := range strings.Split(param.BKHostId, ",") {
+		if item == "" {
+			continue
+		}
+		id, _ := strconv.Atoi(item)
+		hostIds = append(hostIds, id)
+	}
+	// 把数组转成字符串,按逗号隔开
+	param.BKHostId = strings.Replace(strings.Trim(fmt.Sprint(RemoveRepeatedHostId(hostIds)), "[]"), " ", ",", -1)
+	_, err = h.client.Do(http.MethodPost, h.url, param)
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/update_host_test.go b/dbm-services/common/go-pubpkg/cc.v3/update_host_test.go
new file mode 100644
index 0000000000..04c63d043b
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/update_host_test.go
@@ -0,0 +1,24 @@
+package cc
+
+import (
+	"testing"
+)
+
+// TestUpdate
+func TestUpdate(t *testing.T) {
+	client, err := NewClient("http://127.0.0.1", TestSecret)
+	if err != nil {
+		t.Fatal(err)
+	}
+	h := NewUpdateHost(client)
+	err = h.Update(&UpdateHostParam{
+		InnerIPs: []string{"1.1.1.1"},
+		Data: Host{
+			Operator:    "abc",
+			BakOperator: "ddd",
+		},
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/utils/utils.go b/dbm-services/common/go-pubpkg/cc.v3/utils/utils.go
new file mode 100644
index 0000000000..8e95215f1a
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/utils/utils.go
@@ -0,0 +1,40 @@
+// Package utils TODO
+package utils
+
+import (
+	"reflect"
+	"strings"
+)
+
+// GetStructTagName TODO
+func GetStructTagName(t reflect.Type) []string {
+	var tags []string
+	switch t.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
+		for _, item := range GetStructTagName(t.Elem()) {
+			tags = append(tags, toTagName(item))
+		}
+	case reflect.Struct:
+		for i := 0; i < t.NumField(); i++ {
+			f := t.Field(i)
+			switch f.Type.Kind() {
+			case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
+				for _, item := range GetStructTagName(f.Type) {
+					tags = append(tags, toTagName(item))
+				}
+			}
+			if f.Tag != "" {
+				tags = append(tags, toTagName(f.Tag.Get("json")))
+			}
+		}
+	}
+	return tags
+}
+
+func toTagName(val string) string {
+	parts := strings.Split(val, ",")
+	if len(parts) > 0 {
+		return parts[0]
+	}
+	return val
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/utils/utils_test.go b/dbm-services/common/go-pubpkg/cc.v3/utils/utils_test.go
new file mode 100644
index 0000000000..1357fe10f2
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/utils/utils_test.go
@@ -0,0 +1,52 @@
+package utils
+
+import (
+	"fmt"
+	"reflect"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+type testObject struct {
+	c1 string            `json:"c1,omitempty"`
+	c2 int               `json:"c2"`
+	c3 *c3               `json:"c3,omitempty"`
+	c4 map[string]string `json:"c4"`
+	c5 []int             `json:"c5"`
+}
+
+type c3 struct {
+	x1 string   `json:"x1"`
+	x2 []x2     `json:"x2,omitempty"`
+	x3 []string `json:"x3"`
+}
+
+type x2 struct {
+	y1 string         `json:"y1"`
+	y2 map[string]int `json:"y2,omitempty"`
+	y3 *int           `json:"y3"`
+}
+
+func TestGetStructTagName(t *testing.T) {
+	cases := []struct {
+		name           string
+		object         interface{}
+		expectedResult []string
+	}{
+		{
+			name:           "ptr",
+			object:         &testObject{},
+			expectedResult: []string{"c1", "c2", "x1", "y1", "y2", "y3", "x2", "x3", "c3", "c4", "c5"},
+		},
+		{
+			name:           "struct",
+			object:         testObject{},
+			expectedResult: []string{"c1", "c2", "x1", "y1", "y2", "y3", "x2", "x3", "c3", "c4", "c5"},
+		},
+	}
+	for _, tc := range cases {
+		fields := GetStructTagName(reflect.TypeOf(tc.object))
+		assert.Equal(t, tc.expectedResult, fields, fmt.Sprintf("case: %+v", tc.name))
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/cc.v3/watch.go b/dbm-services/common/go-pubpkg/cc.v3/watch.go
new file mode 100644
index 0000000000..6ddd34b570
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cc.v3/watch.go
@@ -0,0 +1,86 @@
+package cc
+
+import (
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/golang/glog"
+)
+
+// Interface 资源Watch接口
+type Interface interface {
+	// Stop s watching. Will close the channel returned by ResultChan()
+	Stop()
+
+	// ResultChan TODO
+	// Returns a chan which will receive all the events.
+	ResultChan() <-chan Event
+}
+
+// EventType TODO
+// watch类型
+type EventType string
+
+const (
+	// Added TODO
+	Added EventType = "create"
+	// Modified TODO
+	Modified EventType = "update"
+	// Deleted TODO
+	Deleted EventType = "delete"
+	// Error TODO
+	Error EventType = "error"
+)
+
+// Event TODO
+// watch事件信息
+type Event struct {
+	// 事件唯一标识
+	Key string
+	// 事件具体对象
+	Object interface{}
+	// 资源类型
+	Kind string
+	// 事件类型
+	Type EventType
+	// CC的游标
+	Cursor string
+}
+
+const (
+	// WatcherURL TODO
+	WatcherURL = "/api/c/compapi/v2/cc/resource_watch"
+)
+
+// resourceWatch 执行资源的watch
+// resourceWatchType: watch的资源类型
+// cursor:  watch开始位置
+// fields: 需要返回的项(值),对于不同的资源,返回不同的结果
+func resourceWatch(client *Client, resourceWatchType ResourceWatchType, cursor string,
+	fields []string) (*ResourceWatchResponse, error) {
+	param := &ResourceWatchParam{
+		BKFields:   fields,
+		BKResource: resourceWatchType,
+	}
+	if cursor != "" {
+		param.BKCursor = cursor
+	} else {
+		// 如果当前游标为空:包括第一次启动服务,或者watch异常时
+		// 取前1分钟的事件
+		param.BKStartFrom = time.Now().Add(time.Second * -60).Unix()
+	}
+	resp, err := client.Do(http.MethodPost, WatcherURL, param)
+	if err != nil {
+		return nil, err
+	}
+	glog.V(5).Infof("Watch response: %+v", resp)
+	var result ResourceWatchResponse
+	err = json.Unmarshal(resp.Data, &result)
+	if err != nil {
+		return nil, fmt.Errorf("RequestId: %s, err: %v", resp.RequestId, err)
+	}
+	result.RequestId = resp.RequestId
+	return &result, nil
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/command.go b/dbm-services/common/go-pubpkg/cmutil/command.go
new file mode 100644
index 0000000000..a03c8961c0
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/command.go
@@ -0,0 +1,34 @@
+package cmutil
+
+import (
+	"bytes"
+	"fmt"
+	"os/exec"
+
+	"github.com/pkg/errors"
+)
+
+// ExecShellCommand 执行 shell 命令
+// 如果有 err, 返回 stderr; 如果没有 err 返回的是 stdout
+// 后续尽量不要用这个方法,因为通过标准错误来判断有点不靠谱
+func ExecShellCommand(isSudo bool, param string) (stdoutStr string, err error) {
+	if isSudo {
+		param = "sudo " + param
+	}
+	cmd := exec.Command("bash", "-c", param)
+	var stdout, stderr bytes.Buffer
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+	err = cmd.Run()
+	if err != nil {
+		// return stderr.String(), err
+		return stderr.String(), errors.WithMessage(err, stderr.String())
+	}
+
+	if len(stderr.String()) > 0 {
+		err = fmt.Errorf("execute shell command(%s) error:%s", param, stderr.String())
+		return stderr.String(), err
+	}
+
+	return stdout.String(), nil
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/db.go b/dbm-services/common/go-pubpkg/cmutil/db.go
new file mode 100644
index 0000000000..b6c2257f22
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/db.go
@@ -0,0 +1,72 @@
+package cmutil
+
+import (
+	"context"
+	"database/sql"
+	"dbm-services/common/go-pubpkg/logger"
+	"fmt"
+	"time"
+
+	_ "github.com/go-sql-driver/mysql" // mysql TODO
+	"github.com/jmoiron/sqlx"
+)
+
+// DbWorker TODO
+type DbWorker struct {
+	Dsn string
+	Db  *sql.DB
+}
+
+// NewDbWorker TODO
+func NewDbWorker(dsn string) (*DbWorker, error) {
+	var err error
+	dbw := &DbWorker{
+		Dsn: dsn,
+	}
+	dbw.Db, err = sql.Open("mysql", dbw.Dsn)
+	if err != nil {
+		return nil, err
+	}
+	// check connect with timeout
+	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+	defer cancel()
+	if err := dbw.Db.PingContext(ctx); err != nil {
+		return nil, fmt.Errorf("ping context failed, err:%w", err)
+	}
+	if err := dbw.Db.Ping(); err != nil {
+		return nil, err
+	}
+	return dbw, nil
+}
+
+// ShowDatabases 执行show database 获取所有的dbName
+//
+//	@receiver h
+//	@return databases
+//	@return err
+func (h *DbWorker) ShowDatabases() (databases []string, err error) {
+	err = h.Queryx(&databases, "show databases")
+	return
+}
+
+// Queryx execute query use sqlx
+func (h *DbWorker) Queryx(data interface{}, query string, args ...interface{}) error {
+	logger.Info("query:%s", query)
+	logger.Info("args:%v", args)
+	db := sqlx.NewDb(h.Db, "mysql")
+	udb := db.Unsafe()
+	if err := udb.Select(data, query, args...); err != nil {
+		return fmt.Errorf("sqlx select failed, err:%w", err)
+	}
+	return nil
+}
+
+// Queryxs execute query use sqlx return Single column
+func (h *DbWorker) Queryxs(data interface{}, query string) error {
+	db := sqlx.NewDb(h.Db, "mysql")
+	udb := db.Unsafe()
+	if err := udb.Get(data, query); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/file.go b/dbm-services/common/go-pubpkg/cmutil/file.go
new file mode 100644
index 0000000000..65041b8a06
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/file.go
@@ -0,0 +1,72 @@
+package cmutil
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+)
+
+// GetFileMd5 获取文件MD5
+func GetFileMd5(fileAbPath string) (md5sum string, err error) {
+	f, err := filepath.Abs(fileAbPath)
+	if err != nil {
+		return
+	}
+	rFile, err := os.Open(f)
+	if err != nil {
+		return "", err
+	}
+	defer rFile.Close()
+	h := md5.New()
+	if _, err := io.Copy(h, rFile); err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%x", h.Sum(nil)), nil
+}
+
+// FileExists 检查目录是否已经存在
+func FileExists(path string) bool {
+	_, err := os.Stat(path)
+	if err != nil {
+		return os.IsExist(err)
+	}
+	return true
+}
+
+// FileExistsErr 如果文件不存在则抛出 error
+func FileExistsErr(path string) error {
+	_, err := os.Stat(path)
+	if err != nil {
+		return errors.WithStack(err)
+	}
+	return nil
+}
+
+// IsDirectory 检查本机路径是否是目录
+func IsDirectory(path string) bool {
+	fileInfo, err := os.Stat(path)
+	if err != nil {
+		return false
+	}
+	return fileInfo.IsDir()
+}
+
+// GetFileSize get file size from os
+func GetFileSize(path string) int64 {
+	f, err := os.Stat(path)
+	if err != nil {
+		// 有可能没权限,有可能不存在
+		if os.IsNotExist(err) {
+			return -1
+		} else if os.IsPermission(err) {
+			return -2
+		} else {
+			return -3
+		}
+	}
+	return f.Size()
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/map.go b/dbm-services/common/go-pubpkg/cmutil/map.go
new file mode 100644
index 0000000000..7c5c3ba7bf
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/map.go
@@ -0,0 +1,22 @@
+package cmutil
+
+import "encoding/json"
+
+// CleanStrMap TODO
+func CleanStrMap(data map[string]string) map[string]string {
+	for key := range data {
+		if IsEmpty(key) {
+			delete(data, key)
+		}
+	}
+	return data
+}
+
+// ConverMapToJsonStr TODO
+func ConverMapToJsonStr(m map[string]string) (string, error) {
+	b, err := json.Marshal(m)
+	if err != nil {
+		return "{}", err
+	}
+	return string(b), nil
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/mysql.go b/dbm-services/common/go-pubpkg/cmutil/mysql.go
new file mode 100644
index 0000000000..d923819deb
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/mysql.go
@@ -0,0 +1,110 @@
+// Package cmutil TODO
+package cmutil
+
+import (
+	"regexp"
+	"strconv"
+)
+
+// GetMysqlSystemDatabases TODO
+func GetMysqlSystemDatabases(version string) []string {
+	DBs := []string{"information_schema", "mysql", "performance_schema"}
+
+	if MySQLVersionParse(version) > MySQLVersionParse("5.7.0") {
+		DBs = append(DBs, "sys")
+	} else if MySQLVersionParse(version) < MySQLVersionParse("5.0.0") {
+		DBs = []string{"mysql"}
+	} else if MySQLVersionParse(version) < MySQLVersionParse("5.5.0") {
+		DBs = []string{"information_schema", "mysql"}
+	}
+	return DBs
+}
+
+// GetGcsSystemDatabases 获取mysql系统库列表,包括GCS监控管理库
+// 小于5.0:"mysql", "db_infobase", "test"
+// 小于5.5:"information_schema", "mysql", "db_infobase", "test"
+// 小于5.7:"information_schema", "mysql", "performance_schema", "db_infobase", "test"
+// 大于5.7:"information_schema", "mysql", "performance_schema", "sys","db_infobase", "test"
+func GetGcsSystemDatabases(version string) []string {
+	DBs := GetMysqlSystemDatabases(version)
+	DBs = append(DBs, "db_infobase")
+	DBs = append(DBs, "test")
+	return DBs
+}
+
+// GetGcsSystemDatabasesIgnoreTest TODO
+func GetGcsSystemDatabasesIgnoreTest(version string) []string {
+	DBs := GetMysqlSystemDatabases(version)
+	DBs = append(DBs, "db_infobase")
+	return DBs
+}
+
+// MySQLVersionParse ():
+// input: select version() 获取到的string
+// output: 获取tmysql中的mysql前缀版本
+// example:
+// 5.7.20-tmysql-3.1.5-log ==> 5*1000000 + 7*1000 + 20 ==> 5007020
+// MySQL5.1.13 ==>  5*1000000+1*1000+13 ==> 5001013
+func MySQLVersionParse(version string) uint64 {
+	re := regexp.MustCompile(`([\d]+).?([\d]+)?.?([\d]+)?`)
+	return mysqlVersionParse(re, version)
+}
+
+func mysqlVersionParse(re *regexp.Regexp, mysqlVersion string) uint64 {
+	result := re.FindStringSubmatch(mysqlVersion)
+	var (
+		total    uint64
+		billion  string
+		thousand string
+		single   string
+		// 2.1.5  => 2 * 1000000 + 1 * 1000 + 5
+	)
+	switch len(result) {
+	case 0:
+		return 0
+	case 4:
+		billion = result[1]
+		thousand = result[2]
+		single = result[3]
+		if billion != "" {
+			b, err := strconv.ParseUint(billion, 10, 64)
+			if err != nil {
+				// log.Printf("%s", err)
+				b = 0
+			}
+			total += b * 1000000
+		}
+		if thousand != "" {
+			t, err := strconv.ParseUint(thousand, 10, 64)
+			if err != nil {
+				// log.Printf("%s", err)
+				t = 0
+			}
+			total += t * 1000
+		}
+		if single != "" {
+			s, err := strconv.ParseUint(single, 10, 64)
+			if err != nil {
+				s = 0
+			}
+			total += s
+		}
+	default:
+		return 0
+	}
+	return total
+}
+
+var (
+	userPasswordRegex  = regexp.MustCompile(`\s-u\w+.*\s-p(\w+).*`)
+	mysqlPasswordRegex = regexp.MustCompile(`\s-p[^\s]+`)
+)
+
+// RemovePassword replace password in -u -p pattern
+// input:  abcd -uADMIN-pabcd -h127.0.0.1 -P20000  src size, err=EOF
+func IOLimitRateWithChunk(dst io.Writer, src io.Reader, bwlimitMB int64, chunkSize int64) (written int64, err error) {
+	if bwlimitMB == 0 {
+		return io.Copy(dst, src)
+	}
+	bwlimit := bwlimitMB * 1024 * 1024
+	srcBucket := ratelimit.NewBucketWithRate(float64(bwlimit), bwlimit)
+	return io.CopyN(dst, ratelimit.Reader(src, srcBucket), chunkSize)
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/remove_file_limit.go b/dbm-services/common/go-pubpkg/cmutil/remove_file_limit.go
new file mode 100644
index 0000000000..5bd6058aff
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/remove_file_limit.go
@@ -0,0 +1,94 @@
+package cmutil
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"time"
+)
+
+// TruncateFile remove file with io limit
+func TruncateFile(file string, bwlimitMB int) error {
+	if bwlimitMB == 0 {
+		if err := os.Remove(file); err != nil {
+			return err
+		}
+		return nil
+	}
+	f, err := os.OpenFile(file, os.O_RDWR, 0666)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	fi, err := os.Stat(file)
+	if err != nil {
+		return err
+	}
+	totalSize := fi.Size()
+	chunkSizeEverySec := bwlimitMB * 1024 * 1024
+	// 1s执行多次, >=1, <= 1000
+	batchEverySec := 10
+
+	// 每次清理大小
+	chunkSize := chunkSizeEverySec / batchEverySec
+	// 每次清理间隔
+	chunkInterval := 1000 / batchEverySec // 1000 毫秒
+	// logger.Info("bwlimitMB: %d, chunkSize: %d bytes, chunkInterval: %d ms, ", bwlimitMB, chunkSize, chunkInterval)
+
+	done := make(chan int, 1)
+	defer close(done)
+
+	var endOffset int64 = totalSize
+	for {
+		endOffset -= int64(chunkSize)
+		if endOffset <= 0 {
+			break
+		}
+		if err := f.Truncate(endOffset); err != nil {
+			return err
+		}
+		time.Sleep(time.Duration(chunkInterval) * time.Millisecond)
+	}
+	// f.Truncate(0)
+	f.Seek(0, 0)
+	f.Sync()
+	if err := os.Remove(file); err != nil {
+		return err
+	}
+	return nil
+}
+
+// TruncateDir TODO
+func TruncateDir(dirName string, bwlimitMB int) error {
+	LargeFile := int64(500 * 1024 * 1024) // 超过 500MB,我们认为是大文件,采用 truncate 方式删除
+	fs, err := ioutil.ReadDir(dirName)
+	if err != nil {
+		return err
+	}
+	for _, filePath := range fs {
+		fullFile := filepath.Join(dirName, filePath.Name())
+		if filePath.IsDir() {
+			fmt.Printf("path %s is dir, ignore\n", fullFile)
+			continue
+		} else {
+			fmt.Println(dirName + filePath.Name())
+			f, e := os.Stat(filepath.Join(dirName, filePath.Name()))
+			if e != nil {
+				return e
+			}
+			if f.Size() > LargeFile {
+				if err := TruncateFile(fullFile, bwlimitMB); err != nil {
+					return err
+				} else {
+					if err := os.Remove(fullFile); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	// remove all empty dirs
+	return os.RemoveAll(dirName)
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/sizebytes.go b/dbm-services/common/go-pubpkg/cmutil/sizebytes.go
new file mode 100644
index 0000000000..31d7d7be0a
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/sizebytes.go
@@ -0,0 +1,81 @@
+package cmutil
+
+import (
+	"strings"
+	"unicode"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+	"github.com/spf13/viper"
+)
+
+// ViperGetSizeInBytes TODO
+func ViperGetSizeInBytes(key string) int64 {
+	return ParseSizeInBytes(viper.GetString(key))
+}
+
+// ViperGetSizeInBytesE TODO
+func ViperGetSizeInBytesE(key string) (int64, error) {
+	return ParseSizeInBytesE(viper.GetString(key))
+}
+
+// ParseSizeInBytesE converts strings like 1GB or 12 mb into an unsigned integer number of bytes
+// withB indicate where sizeStr has suffix b/B
+func ParseSizeInBytesE(sizeStr string) (int64, error) {
+	sizeStr = strings.TrimSpace(sizeStr)
+	if unicode.ToLower(rune(sizeStr[len(sizeStr)-1])) != 'b' {
+		sizeStr += "b"
+	}
+	lastChar := len(sizeStr) - 1
+	multiplier := uint(1)
+	if lastChar > 0 {
+		if sizeStr[lastChar] == 'b' || sizeStr[lastChar] == 'B' {
+			if lastChar > 1 {
+				switch unicode.ToLower(rune(sizeStr[lastChar-1])) {
+				case 'k':
+					multiplier = 1 << 10
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'm':
+					multiplier = 1 << 20
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				case 'g':
+					multiplier = 1 << 30
+					sizeStr = strings.TrimSpace(sizeStr[:lastChar-1])
+				default:
+					multiplier = 1
+					sizeStr = strings.TrimSpace(strings.TrimSuffix(sizeStr, "b"))
+				}
+			} else if lastChar == 1 {
+				multiplier = 1
+				sizeStr = strings.TrimSpace(strings.TrimSuffix(sizeStr, "b"))
+			}
+		}
+	}
+	size, err := cast.ToInt64E(sizeStr)
+	if err != nil {
+		return -1, errors.Errorf("parse failed to bytes: %s", sizeStr)
+	} else if size < 0 {
+		return -2, errors.Errorf("bytes canot be negative: %s", sizeStr)
+	}
+	return safeMul(size, int64(multiplier)), nil
+}
+
+func safeMul(a, b int64) int64 {
+	c := a * b
+	if a > 1 && b > 1 && c/b != a {
+		return 0
+	}
+	return c
+}
+
+// ParseSizeInBytes 将 gb, MB 转换成 bytes 数字. b 不区分大小写,代表 1字节
+// ignore error
+func ParseSizeInBytes(sizeStr string) int64 {
+	sizeBytes, err := ParseSizeInBytesE(sizeStr)
+	if err != nil {
+		sizeBytes = 0
+	} else if sizeBytes < 0 {
+		sizeBytes = 0
+	}
+	return sizeBytes
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/slice.go b/dbm-services/common/go-pubpkg/cmutil/slice.go
new file mode 100644
index 0000000000..2955e4c0fb
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/slice.go
@@ -0,0 +1,190 @@
+package cmutil
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+
+	"github.com/spf13/cast"
+)
+
+// FilterOutStringSlice 滤除scr中含有filters 里面元素的数组
+//
+//	@receiver src
+//	@receiver filters
+//	@return dst
+func FilterOutStringSlice(src []string, filters []string) (dst []string) {
+	for _, v := range src {
+		if !StringsHas(filters, v) {
+			dst = append(dst, v)
+		}
+	}
+	return
+}
+
+// StringsHas check the []string contains the given element
+func StringsHas(ss []string, val string) bool {
+	for _, ele := range ss {
+		if ele == val {
+			return true
+		}
+	}
+	return false
+}
+
+// RemoveDuplicate 通过map主键唯一的特性过滤重复元素
+func RemoveDuplicate(arr []string) []string {
+	resArr := make([]string, 0)
+	tmpMap := make(map[string]struct{})
+	for _, val := range arr {
+		// 判断主键为val的map是否存在
+		if _, ok := tmpMap[val]; !ok {
+			resArr = append(resArr, val)
+			tmpMap[val] = struct{}{}
+		}
+	}
+
+	return resArr
+}
+
+// IntSliceToStrSlice TODO
+func IntSliceToStrSlice(elems []int) (dst []string) {
+	for _, v := range elems {
+		dst = append(dst, strconv.Itoa(v))
+	}
+	return dst
+}
+
+// IntsJoin join int slice to string
+func IntsJoin(intList []int, sep string) string {
+	strList := make([]string, len(intList))
+	for i, e := range intList {
+		strList[i] = cast.ToString(e)
+	}
+	fmt.Println("intsjoin: ", strList)
+	return strings.Join(strList, sep)
+}
+
+// SplitGroup TODO
+func SplitGroup(laxiconid []string, subGroupLength int64) [][]string {
+	max := int64(len(laxiconid))
+	var segmens = make([][]string, 0)
+	quantity := max / subGroupLength
+	remainder := max % subGroupLength
+	if quantity <= 1 {
+		segmens = append(segmens, laxiconid)
+		return segmens
+	}
+	i := int64(0)
+	for i = int64(0); i < quantity; i++ {
+		segmens = append(segmens, laxiconid[i*subGroupLength:(i+1)*subGroupLength])
+	}
+	if quantity == 0 || remainder != 0 {
+		segmens = append(segmens, laxiconid[i*subGroupLength:i*subGroupLength+remainder])
+	}
+	return segmens
+}
+
+// RemoveEmpty 过滤掉空字符串
+func RemoveEmpty(input []string) []string {
+	var result []string
+	for _, item := range input {
+		if strings.TrimSpace(item) != "" {
+			result = append(result, item)
+		}
+	}
+	return result
+}
+
+// RemoveDuplicateIntElement TODO
+func RemoveDuplicateIntElement(arry []int) []int {
+	result := make([]int, 0, len(arry))
+	temp := map[int]struct{}{}
+	for _, item := range arry {
+		if _, ok := temp[item]; !ok {
+			temp[item] = struct{}{}
+			result = append(result, item)
+		}
+	}
+	return result
+}
+
+// ArryToInterfaceArry TODO
+func ArryToInterfaceArry(arrys ...interface{}) []interface{} {
+	var result []interface{}
+	result = append(result, arrys...)
+	return result
+}
+
+// ElementNotInArry TODO
+func ElementNotInArry(ele string, arry []string) bool {
+	if len(arry) <= 0 {
+		return true
+	}
+	for _, v := range arry {
+		if strings.TrimSpace(v) == "" {
+			continue
+		}
+		if strings.TrimSpace(v) == ele {
+			return false
+		}
+	}
+	return true
+}
+
+// ArrayInGroupsOf TODO
+/*
+示例1:
+数组:[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],正整数:2
+期望结果: [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]]
+调用: res:= arrayInGroupsOf(arr,2)
+*/
+func ArrayInGroupsOf(arr []string, num int64) [][]string {
+	max := int64(len(arr))
+	// 判断数组大小是否小于等于指定分割大小的值,是则把原数组放入二维数组返回
+	if max <= num {
+		return [][]string{arr}
+	}
+	// 获取应该数组分割为多少份
+	var quantity int64
+	if max%num == 0 {
+		quantity = max / num
+	} else {
+		quantity = (max / num) + 1
+	}
+	// 声明分割好的二维数组
+	var segments = make([][]string, 0)
+	// 声明分割数组的截止下标
+	var start, end, i int64
+	for i = 1; i <= quantity; i++ {
+		end = i * num
+		if i != quantity {
+			segments = append(segments, arr[start:end])
+		} else {
+			segments = append(segments, arr[start:])
+		}
+		start = i * num
+	}
+	return segments
+}
+
+// HasElem TODO
+func HasElem(elem interface{}, slice interface{}) bool {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Println("HasElem error", err)
+		}
+	}()
+	arrV := reflect.ValueOf(slice)
+	if arrV.Kind() == reflect.Slice || arrV.Kind() == reflect.Array {
+		for i := 0; i < arrV.Len(); i++ {
+			// XXX - panics if slice element points to an unexported struct field
+			// see https://golang.org/pkg/reflect/#Value.Interface
+			if reflect.DeepEqual(arrV.Index(i).Interface(), elem) {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/str.go b/dbm-services/common/go-pubpkg/cmutil/str.go
new file mode 100644
index 0000000000..ac88f1393b
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/str.go
@@ -0,0 +1,49 @@
+package cmutil
+
+import (
+	"math/rand"
+	"strings"
+)
+
+// IsEmpty TODO
+func IsEmpty(str string) bool {
+	return strings.TrimSpace(str) == ""
+}
+
+// IsNotEmpty TODO
+func IsNotEmpty(str string) bool {
+	return !IsEmpty(str)
+}
+
+var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+// RandStr TODO
+func RandStr(n int) string {
+	b := make([]rune, n)
+	for i := range b {
+		b[i] = letters[rand.Intn(len(letters))]
+	}
+	return string(b)
+}
+
+// SplitAnyRune TODO
+// util.SplitAnyRune("a,b c", ", ")
+// if s is empty, return [], not [”]
+func SplitAnyRune(s string, seps string) []string {
+	splitter := func(r rune) bool {
+		return strings.ContainsRune(seps, r)
+	}
+	return strings.FieldsFunc(s, splitter)
+}
+
+// SplitAnyRuneTrim 分隔字符串,并去除空字符
+func SplitAnyRuneTrim(s string, seps string) []string {
+	ss := SplitAnyRune(s, seps)
+	for i, el := range ss {
+		if sss := strings.TrimSpace(el); sss != "" {
+			ss[i] = sss
+		}
+		// 忽略空字符
+	}
+	return ss
+}
diff --git a/dbm-services/common/go-pubpkg/cmutil/util.go b/dbm-services/common/go-pubpkg/cmutil/util.go
new file mode 100644
index 0000000000..87d62d0866
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/cmutil/util.go
@@ -0,0 +1,35 @@
+// Package cmutil TODO
+package cmutil
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// RetryConfig TODO
+type RetryConfig struct {
+	Times     int           // 重试次数
+	DelayTime time.Duration // 每次重试间隔
+}
+
+// RetriesExceeded TODO
+// retries exceeded
+const RetriesExceeded = "retries exceeded"
+
+// Retry 重试
+// 第 0 次也需要 delay 再运行
+func Retry(r RetryConfig, f func() error) (err error) {
+	for i := 0; i < r.Times; i++ {
+		time.Sleep(r.DelayTime)
+		if err = f(); err == nil {
+			return nil
+		}
+		logger.Warn("第%d次重试,函数错误:%s", i, err.Error())
+	}
+	if err != nil {
+		return errors.Wrap(err, RetriesExceeded)
+	}
+	return
+}
diff --git a/dbm-services/common/go-pubpkg/go.mod b/dbm-services/common/go-pubpkg/go.mod
new file mode 100644
index 0000000000..8f6bfba3d0
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/go.mod
@@ -0,0 +1,48 @@
+module dbm-services/common/go-pubpkg
+
+go 1.19
+
+require (
+	github.com/go-playground/locales v0.14.1
+	github.com/go-playground/universal-translator v0.18.1
+	github.com/go-playground/validator/v10 v10.12.0
+	github.com/go-sql-driver/mysql v1.7.1
+	github.com/golang/glog v1.1.1
+	github.com/google/go-querystring v1.1.0
+	github.com/jmoiron/sqlx v1.3.5
+	github.com/juju/ratelimit v1.0.2
+	github.com/pkg/errors v0.9.1
+	github.com/robfig/cron/v3 v3.0.1
+	github.com/spf13/cast v1.5.0
+	github.com/spf13/viper v1.15.0
+	github.com/stretchr/testify v1.8.2
+	go.uber.org/zap v1.24.0
+	gopkg.in/natefinch/lumberjack.v2 v2.2.1
+)
+
+require (
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/leodido/go-urn v1.2.3 // indirect
+	github.com/lib/pq v1.10.0 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mattn/go-sqlite3 v1.14.16 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.7 // indirect
+	github.com/pmezard/go-difflib v1.0.0 // indirect
+	github.com/rogpeppe/go-internal v1.8.0 // indirect
+	github.com/spf13/afero v1.9.5 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	go.uber.org/atomic v1.9.0 // indirect
+	go.uber.org/goleak v1.1.12 // indirect
+	go.uber.org/multierr v1.8.0 // indirect
+	golang.org/x/crypto v0.8.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/dbm-services/common/go-pubpkg/go.sum b/dbm-services/common/go-pubpkg/go.sum
new file mode 100644
index 0000000000..9dbbe34a8d
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/go.sum
@@ -0,0 +1,538 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
+github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/glog v1.1.1 h1:jxpi2eWoU84wbX9iIEyAeeoac3FLuifZpY9tcNUD9kw=
+github.com/golang/glog v1.1.1/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI=
+github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
+github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
+github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
+go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/dbm-services/common/go-pubpkg/logger/cst.go b/dbm-services/common/go-pubpkg/logger/cst.go
new file mode 100644
index 0000000000..265c9512a0
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/cst.go
@@ -0,0 +1,29 @@
+package logger
+
+import (
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// Level TODO
+type Level = zapcore.Level
+
+const (
+	// InfoLevel TODO
+	InfoLevel = zap.InfoLevel
+	// WarnLevel TODO
+	WarnLevel = zap.WarnLevel
+	// ErrorLevel TODO
+	ErrorLevel = zap.ErrorLevel
+	// DPanicLevel TODO
+	DPanicLevel = zap.DPanicLevel
+	// PanicLevel TODO
+	PanicLevel = zap.PanicLevel
+	// FatalLevel TODO
+	FatalLevel = zap.FatalLevel
+	// DebugLevel TODO
+	DebugLevel = zap.DebugLevel
+)
+
+// DatetimeUnion TODO
+const DatetimeUnion = "20160102150405"
diff --git a/dbm-services/common/go-pubpkg/logger/custom_field_test.go b/dbm-services/common/go-pubpkg/logger/custom_field_test.go
new file mode 100644
index 0000000000..56f59e2818
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/custom_field_test.go
@@ -0,0 +1,66 @@
+package logger
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+func TestCustomField(t *testing.T) {
+	var ops = []TreeOption{
+		{
+			FileName: "access.log",
+			Rpt: RotateOptions{
+				MaxSize:    1,
+				MaxAge:     1,
+				MaxBackups: 3,
+				Compress:   true,
+			},
+			Lef: func(level zapcore.Level) bool {
+				return level <= zap.InfoLevel
+			},
+		},
+		{
+			FileName: "error.log",
+			Rpt: RotateOptions{
+				MaxSize:    1,
+				MaxAge:     1,
+				MaxBackups: 3,
+				Compress:   true,
+			},
+			Lef: func(level zapcore.Level) bool {
+				return level > zap.InfoLevel
+			},
+		},
+	}
+
+	logger := NewRotate(ops)
+	ResetDefault(logger)
+	for i := 0; i < 2000000; i++ {
+		field := &CustomField{
+			UID:    fmt.Sprintf("%d", i),
+			NodeID: fmt.Sprintf("node_id_%d", i),
+			IP:     "127.0.0.1",
+		}
+		Warn("testing warn", zap.Inline(field))
+	}
+
+	assert.FileExists(t, "access.log")
+	assert.FileExists(t, "error.log")
+}
+
+type CustomField struct {
+	UID    string
+	NodeID string
+	IP     string
+}
+
+func (f CustomField) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+	enc.AddString("uid", f.UID)
+	enc.AddString("node_id", f.NodeID)
+	enc.AddString("ip", f.IP)
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/logger/default.go b/dbm-services/common/go-pubpkg/logger/default.go
new file mode 100644
index 0000000000..25fc93552c
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/default.go
@@ -0,0 +1,63 @@
+package logger
+
+import (
+	"io"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// New TODO
+//
+//	@receiver writer
+//	@receiver format 是否格式化日志
+//	@receiver level
+//	@receiver extMap
+//	@return *Logger
+func New(writer io.Writer, format bool, level Level, extMap ...map[string]string) *Logger {
+	if writer == nil {
+		panic("the writer is nil")
+	}
+
+	cfg := zap.NewProductionConfig()
+	cfg.EncoderConfig = zapcore.EncoderConfig{
+		MessageKey:     "msg",
+		LevelKey:       "levelname",
+		TimeKey:        "time",
+		NameKey:        "name",
+		FunctionKey:    "funcName",
+		StacktraceKey:  "stacktrace",
+		CallerKey:      "caller",
+		SkipLineEnding: false,
+		LineEnding:     zapcore.DefaultLineEnding,
+		EncodeLevel:    zapcore.LowercaseLevelEncoder,
+		EncodeTime:     zapcore.RFC3339TimeEncoder,
+		EncodeDuration: zapcore.SecondsDurationEncoder,
+		EncodeName:     zapcore.FullNameEncoder,
+		EncodeCaller:   zapcore.ShortCallerEncoder,
+	}
+
+	encoder := NewConsoleEncoder(cfg.EncoderConfig)
+	if format {
+		encoder = NewEncoder(cfg.EncoderConfig)
+	}
+	core := zapcore.NewCore(
+		encoder,
+		zapcore.AddSync(writer),
+		level,
+	)
+	logger := &Logger{
+		Zap:   zap.New(core),
+		Level: level,
+	}
+
+	// 初始化默认字段
+	fs := make([]zap.Field, 0)
+	for _, ext := range extMap {
+		for key, value := range ext {
+			fs = append(fs, zap.String(key, value))
+		}
+	}
+	logger = logger.With(fs...)
+	return logger
+}
diff --git a/dbm-services/common/go-pubpkg/logger/default_test.go b/dbm-services/common/go-pubpkg/logger/default_test.go
new file mode 100644
index 0000000000..36fe2b330f
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/default_test.go
@@ -0,0 +1,19 @@
+package logger
+
+import (
+	"os"
+	"testing"
+)
+
+func TestDefault(t *testing.T) {
+	file, err := os.OpenFile("./access.log", os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
+	if err != nil {
+		panic(err)
+	}
+
+	logger := New(file, false, InfoLevel)
+	ResetDefault(logger)
+	defer Sync()
+
+	Info("testing default info")
+}
diff --git a/dbm-services/common/go-pubpkg/logger/encoder.go b/dbm-services/common/go-pubpkg/logger/encoder.go
new file mode 100644
index 0000000000..7ae3de7c12
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/encoder.go
@@ -0,0 +1,46 @@
+package logger
+
+import (
+	"runtime"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/buffer"
+	"go.uber.org/zap/zapcore"
+)
+
+// CallerEncoder TODO
+type CallerEncoder struct {
+	zapcore.Encoder
+}
+
+// NewEncoder TODO
+func NewEncoder(cfg zapcore.EncoderConfig) zapcore.Encoder {
+	return CallerEncoder{
+		Encoder: zapcore.NewJSONEncoder(cfg),
+	}
+}
+
+// NewConsoleEncoder TODO
+func NewConsoleEncoder(cfg zapcore.EncoderConfig) zapcore.Encoder {
+	return CallerEncoder{
+		Encoder: zapcore.NewConsoleEncoder(cfg),
+	}
+}
+
+// Clone TODO
+func (enc CallerEncoder) Clone() zapcore.Encoder {
+	return CallerEncoder{
+		enc.Encoder.Clone(),
+	}
+}
+
+// EncodeEntry TODO
+func (enc CallerEncoder) EncodeEntry(entry zapcore.Entry, fields []zapcore.Field) (*buffer.Buffer, error) {
+	pc, file, line, _ := runtime.Caller(5)
+	filename := runtime.FuncForPC(pc).Name()
+	fields = append(fields, zap.String("funcName", filename))
+	fields = append(fields, zap.Int("lineno", line))
+	fields = append(fields, zap.String("pathname", file))
+	fields = append(fields, zap.String("tag", "actuator"))
+	return enc.Encoder.EncodeEntry(entry, fields)
+}
diff --git a/dbm-services/common/go-pubpkg/logger/field.go b/dbm-services/common/go-pubpkg/logger/field.go
new file mode 100644
index 0000000000..0ce0d221c7
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/field.go
@@ -0,0 +1,146 @@
+package logger
+
+import (
+	"os"
+
+	"go.uber.org/zap"
+)
+
+var (
+	// Skip TODO
+	Skip = zap.Skip
+	// Binary TODO
+	Binary = zap.Binary
+	// Bool TODO
+	Bool = zap.Bool
+	// Boolp TODO
+	Boolp = zap.Boolp
+	// ByteString TODO
+	ByteString = zap.ByteString
+	// Complex128 TODO
+	Complex128 = zap.Complex128
+	// Complex128p TODO
+	Complex128p = zap.Complex128p
+	// Complex64 TODO
+	Complex64 = zap.Complex64
+	// Complex64p TODO
+	Complex64p = zap.Complex64p
+	// Float64 TODO
+	Float64 = zap.Float64
+	// Float64p TODO
+	Float64p = zap.Float64p
+	// Float32 TODO
+	Float32 = zap.Float32
+	// Float32p TODO
+	Float32p = zap.Float32p
+	// Int TODO
+	Int = zap.Int
+	// Intp TODO
+	Intp = zap.Intp
+	// Int64 TODO
+	Int64 = zap.Int64
+	// Int64p TODO
+	Int64p = zap.Int64p
+	// Int32 TODO
+	Int32 = zap.Int32
+	// Int32p TODO
+	Int32p = zap.Int32p
+	// Int16 TODO
+	Int16 = zap.Int16
+	// Int16p TODO
+	Int16p = zap.Int16p
+	// Int8 TODO
+	Int8 = zap.Int8
+	// Int8p TODO
+	Int8p = zap.Int8p
+	// String TODO
+	String = zap.String
+	// Stringp TODO
+	Stringp = zap.Stringp
+	// Uint TODO
+	Uint = zap.Uint
+	// Uintp TODO
+	Uintp = zap.Uintp
+	// Uint64 TODO
+	Uint64 = zap.Uint64
+	// Uint64p TODO
+	Uint64p = zap.Uint64p
+	// Uint32 TODO
+	Uint32 = zap.Uint32
+	// Uint32p TODO
+	Uint32p = zap.Uint32p
+	// Uint16 TODO
+	Uint16 = zap.Uint16
+	// Uint16p TODO
+	Uint16p = zap.Uint16p
+	// Uint8 TODO
+	Uint8 = zap.Uint8
+	// Uint8p TODO
+	Uint8p = zap.Uint8p
+	// Uintptr TODO
+	Uintptr = zap.Uintptr
+	// Uintptrp TODO
+	Uintptrp = zap.Uintptrp
+	// Reflect TODO
+	Reflect = zap.Reflect
+	// Namespace TODO
+	Namespace = zap.Namespace
+	// Stringer TODO
+	Stringer = zap.Stringer
+	// Time TODO
+	Time = zap.Time
+	// Timep TODO
+	Timep = zap.Timep
+	// Stack TODO
+	Stack = zap.Stack
+	// StackSkip TODO
+	StackSkip = zap.StackSkip
+	// Duration TODO
+	Duration = zap.Duration
+	// Durationp TODO
+	Durationp = zap.Durationp
+	// Any TODO
+	Any = zap.Any
+
+	// Info TODO
+	Info = std.Info
+	// Warn TODO
+	Warn = std.Warn
+	// Error TODO
+	Error = std.Error
+	// DPanic TODO
+	DPanic = std.DPanic
+	// Panic TODO
+	Panic = std.Panic
+	// Fatal TODO
+	Fatal = std.Fatal
+	// Debug TODO
+	Debug = std.Debug
+)
+
+var std = New(os.Stderr, false, InfoLevel)
+
+// Default TODO
+func Default() *Logger {
+	return std
+}
+
+// ResetDefault TODO
+func ResetDefault(l *Logger) {
+	std = l
+	Info = std.Info
+	Warn = std.Warn
+	Error = std.Error
+	DPanic = std.DPanic
+	Panic = std.Panic
+	Fatal = std.Fatal
+	Debug = std.Debug
+}
+
+// Sync TODO
+func Sync() error {
+	if std != nil {
+		return std.Sync()
+	}
+	return nil
+}
diff --git a/dbm-services/common/go-pubpkg/logger/log.go b/dbm-services/common/go-pubpkg/logger/log.go
new file mode 100644
index 0000000000..afe6ee949d
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/log.go
@@ -0,0 +1,69 @@
+package logger
+
+import (
+	"fmt"
+
+	"go.uber.org/zap"
+)
+
+// Field TODO
+type Field = zap.Field
+
+// Logger TODO
+type Logger struct {
+	Zap    *zap.Logger
+	Level  Level
+	Fields []zap.Field
+}
+
+// Debug TODO
+func (l *Logger) Debug(format string, args ...interface{}) {
+	l.Zap.Debug(fmt.Sprintf(format, args...), l.Fields...)
+}
+
+// Info TODO
+func (l *Logger) Info(format string, args ...interface{}) {
+	l.Zap.Info(fmt.Sprintf(format, args...), l.Fields...)
+}
+
+// Warn TODO
+func (l *Logger) Warn(format string, args ...interface{}) {
+	l.Zap.Warn(fmt.Sprintf(format, args...), l.Fields...)
+}
+
+// Error 用于错误处理
+func (l *Logger) Error(format string, args ...interface{}) {
+	l.Zap.Error(fmt.Sprintf(format, args...), l.Fields...)
+}
+
+// DPanic TODO
+func (l *Logger) DPanic(format string, args ...interface{}) {
+	l.Zap.DPanic(fmt.Sprintf(format, args...), l.Fields...)
+}
+
+// Panic TODO
+func (l *Logger) Panic(format string, args ...interface{}) {
+	l.Zap.Panic(fmt.Sprintf(format, args...), l.Fields...)
+}
+
+// Fatal TODO
+func (l *Logger) Fatal(format string, args ...interface{}) {
+	l.Zap.Fatal(fmt.Sprintf(format, args...), l.Fields...)
+}
+
+// With TODO
+func (l *Logger) With(fields ...Field) *Logger {
+	logger := l.Zap.With(fields...)
+	l.Zap = logger
+	return l
+}
+
+// Sync TODO
+func (l *Logger) Sync() error {
+	return l.Zap.Sync()
+}
+
+// GetLogger TODO
+func GetLogger() *zap.Logger {
+	return zap.L()
+}
diff --git a/dbm-services/common/go-pubpkg/logger/logger.go b/dbm-services/common/go-pubpkg/logger/logger.go
new file mode 100644
index 0000000000..3dc273e398
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/logger.go
@@ -0,0 +1,2 @@
+// Package logger TODO
+package logger
diff --git a/dbm-services/common/go-pubpkg/logger/rotate.go b/dbm-services/common/go-pubpkg/logger/rotate.go
new file mode 100644
index 0000000000..1a7a34c809
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/rotate.go
@@ -0,0 +1,65 @@
+package logger
+
+import (
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"gopkg.in/natefinch/lumberjack.v2"
+)
+
+// RotateOptions TODO
+type RotateOptions struct {
+	MaxSize    int
+	MaxAge     int
+	MaxBackups int
+	Compress   bool
+}
+
+// TreeOption TODO
+type TreeOption struct {
+	FileName string
+	Rpt      RotateOptions
+	Lef      zap.LevelEnablerFunc
+}
+
+// NewRotate TODO
+func NewRotate(ops []TreeOption, opts ...zap.Option) *Logger {
+	var cores []zapcore.Core
+	cfg := zap.NewProductionConfig()
+	cfg.EncoderConfig = zapcore.EncoderConfig{
+		MessageKey:     "msg",
+		LevelKey:       "level",
+		TimeKey:        "time",
+		NameKey:        "name",
+		CallerKey:      "caller",
+		FunctionKey:    "func",
+		StacktraceKey:  "stacktrace",
+		SkipLineEnding: false,
+		LineEnding:     zapcore.DefaultLineEnding,
+		EncodeLevel:    zapcore.LowercaseLevelEncoder,
+		EncodeTime:     zapcore.ISO8601TimeEncoder,
+		EncodeDuration: zapcore.SecondsDurationEncoder,
+		EncodeCaller:   zapcore.ShortCallerEncoder,
+		EncodeName:     zapcore.FullNameEncoder,
+	}
+	for _, op := range ops {
+		lv := zap.LevelEnablerFunc(func(level zapcore.Level) bool {
+			return op.Lef(Level(level))
+		})
+		lj := zapcore.AddSync(&lumberjack.Logger{
+			Filename:   op.FileName,
+			MaxSize:    op.Rpt.MaxSize,
+			MaxBackups: op.Rpt.MaxBackups,
+			MaxAge:     op.Rpt.MaxAge,
+			LocalTime:  true,
+			Compress:   true,
+		})
+
+		core := zapcore.NewCore(zapcore.NewJSONEncoder(cfg.EncoderConfig), zapcore.AddSync(lj), lv)
+		cores = append(cores, core)
+	}
+	logger := &Logger{
+		Zap: zap.New(zapcore.NewTee(cores...), opts...),
+	}
+
+	return logger
+}
diff --git a/dbm-services/common/go-pubpkg/logger/rotate_test.go b/dbm-services/common/go-pubpkg/logger/rotate_test.go
new file mode 100644
index 0000000000..3561d9af9d
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/logger/rotate_test.go
@@ -0,0 +1,48 @@
+package logger
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+func TestRotate(t *testing.T) {
+	var ops = []TreeOption{
+		{
+			FileName: "access.log",
+			Rpt: RotateOptions{
+				MaxSize:    1,
+				MaxAge:     1,
+				MaxBackups: 3,
+				Compress:   true,
+			},
+			Lef: func(level zapcore.Level) bool {
+				return level <= InfoLevel
+			},
+		},
+		{
+			FileName: "error.log",
+			Rpt: RotateOptions{
+				MaxSize:    1,
+				MaxAge:     1,
+				MaxBackups: 3,
+				Compress:   true,
+			},
+			Lef: func(level zapcore.Level) bool {
+				return level > zap.InfoLevel
+			},
+		},
+	}
+
+	logger := NewRotate(ops)
+	ResetDefault(logger)
+	for i := 0; i < 2000000; i++ {
+		Info("testing ok", zap.String("tag", "test"), zap.Int("major version", 1))
+		Error("testing crash", zap.String("tag", "test"), zap.Int("major version", 1))
+	}
+
+	assert.FileExists(t, "access.log")
+	assert.FileExists(t, "error.log")
+}
diff --git a/dbm-services/common/go-pubpkg/reportlog/report.go b/dbm-services/common/go-pubpkg/reportlog/report.go
new file mode 100644
index 0000000000..1ba7cb62cd
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/reportlog/report.go
@@ -0,0 +1,91 @@
+package reportlog
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"encoding/json"
+	"log"
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+	"gopkg.in/natefinch/lumberjack.v2"
+)
+
+// Reporter TODO
+type Reporter struct {
+	ReportPath string `json:"report_path"`
+	Filename   string `json:"filename"`
+	LogOpt     *LoggerOption
+	Disable    bool
+	log        *log.Logger
+}
+
+// LoggerOption TODO
+type LoggerOption struct {
+	MaxSize    int
+	MaxBackups int
+	MaxAge     int
+	Compress   bool
+}
+
+func defaultLoggerOpt() *LoggerOption {
+	return &LoggerOption{
+		MaxSize:    5,  // MB
+		MaxBackups: 10, // num
+		MaxAge:     30, // days
+		Compress:   false,
+	}
+}
+
+// Println TODO
+func (r *Reporter) Println(v interface{}) {
+	bs, _ := json.Marshal(v)
+	r.log.Println(string(bs))
+}
+
+// NewReporter init reporter for logFile path
+func NewReporter(reportDir, filename string, logOpt *LoggerOption) (*Reporter, error) {
+	var reporter *Reporter = &Reporter{
+		log: &log.Logger{},
+	}
+	if reportDir == "" {
+		return nil, errors.Errorf("invalid reportDir:%s", reportDir)
+	} else if !cmutil.IsDirectory(reportDir) {
+		if err := os.MkdirAll(reportDir, 0755); err != nil {
+			return nil, errors.Wrap(err, "create report path")
+		}
+	}
+	/*
+		statusFile := "binlog_status.log"
+		statusLogger := &lumberjack.Logger{
+			Filename:   filepath.Join(viper.GetString("report.filepath"), statusFile),
+			MaxSize:    5, // MB
+			MaxBackups: 10,
+			MaxAge:     30,   // days
+			Compress:   true, // disabled by default
+		}
+		statusReporter := new(log.Logger)
+		statusReporter.SetOutput(statusLogger)
+		reporter.Status = &logPrint{log: statusReporter}
+	*/
+	if logOpt == nil {
+		logOpt = defaultLoggerOpt()
+	}
+	resultLogger := &lumberjack.Logger{
+		Filename:   filepath.Join(reportDir, filename),
+		MaxSize:    logOpt.MaxSize,
+		MaxBackups: logOpt.MaxBackups,
+		MaxAge:     logOpt.MaxAge,
+		Compress:   logOpt.Compress,
+	}
+	reporter.log.SetOutput(resultLogger)
+	return reporter, nil
+}
+
+// Print TODO
+func (r *Reporter) Print(v interface{}) {
+	bs, _ := json.Marshal(v)
+	if !r.Disable {
+		r.log.Println(string(bs))
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/reportlog/reportlog.go b/dbm-services/common/go-pubpkg/reportlog/reportlog.go
new file mode 100644
index 0000000000..aa55b2b146
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/reportlog/reportlog.go
@@ -0,0 +1,2 @@
+// Package reportlog TODO
+package reportlog
diff --git a/dbm-services/common/go-pubpkg/timeutil/duration.go b/dbm-services/common/go-pubpkg/timeutil/duration.go
new file mode 100644
index 0000000000..0652a21eca
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/timeutil/duration.go
@@ -0,0 +1,67 @@
+package timeutil
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+)
+
+// Duration TODO
+type Duration struct {
+	time.Duration
+}
+
+// UnmarshalJSON TODO
+func (d *Duration) UnmarshalJSON(b []byte) error {
+	var unmarshalledJson interface{}
+
+	err := json.Unmarshal(b, &unmarshalledJson)
+	if err != nil {
+		return err
+	}
+
+	switch value := unmarshalledJson.(type) {
+	case float64:
+		d.Duration = time.Duration(value)
+	case string:
+		d.Duration, err = time.ParseDuration(value)
+		if err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("invalid duration: %#v", unmarshalledJson)
+	}
+
+	return nil
+}
+
+// String 用于打印
+func (d *Duration) String() string {
+	return fmt.Sprintf("%s", d.Duration)
+}
+
+// IsZeroDuration TODO
+func (d *Duration) IsZeroDuration() bool {
+	return d.Duration == 0
+}
+
+// Return TODO
+func (d *Duration) Return() time.Duration {
+	return d.Duration
+}
+
+// NewDuration TODO
+func NewDuration(t time.Duration) Duration {
+	return Duration{t}
+}
+
+// CompareDuration 1: t1>t2, -1: t1 t2.Duration {
+		return 1
+	} else if t1.Duration < t2.Duration {
+		return -1
+	} else {
+		return 0
+	}
+}
diff --git a/dbm-services/common/go-pubpkg/timeutil/duration_ext.go b/dbm-services/common/go-pubpkg/timeutil/duration_ext.go
new file mode 100644
index 0000000000..9c3067989e
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/timeutil/duration_ext.go
@@ -0,0 +1,216 @@
+package timeutil
+
+import (
+	"errors"
+	"time"
+
+	"github.com/spf13/cast"
+	"github.com/spf13/viper"
+)
+
+// modify from: https://gist.github.com/xhit/79c9e137e1cfe332076cdda9f5e24699
+
+// ViperGetDuration TODO
+func ViperGetDuration(s string) time.Duration {
+	return ToDurationExt(viper.GetString(s))
+}
+
+// ViperGetDurationE TODO
+func ViperGetDurationE(s string) (time.Duration, error) {
+	return ToDurationExtE(viper.GetString(s))
+}
+
+// ToDurationExt 使用扩展的 duration, 支持 1d 1w 格式
+func ToDurationExt(s string) time.Duration {
+	d, _ := parseDuration(s)
+	return d
+}
+
+// ToDurationExtE 使用扩展的 duration, 支持 1d 1w 格式
+func ToDurationExtE(s string) (time.Duration, error) {
+	return parseDuration(s)
+}
+
+// ToDuration 使用内置的 duration, 不支持 1d 格式
+func ToDuration(s string) time.Duration {
+	return cast.ToDuration(s)
+}
+
+// ToDurationE 使用内置的 duration, 不支持 1d 格式
+func ToDurationE(s string) (time.Duration, error) {
+	return cast.ToDurationE(s)
+}
+
+var unitMap = map[string]int64{
+	"ns": int64(time.Nanosecond),
+	"us": int64(time.Microsecond),
+	"µs": int64(time.Microsecond), // U+00B5 = micro symbol
+	"μs": int64(time.Microsecond), // U+03BC = Greek letter mu
+	"ms": int64(time.Millisecond),
+	"s":  int64(time.Second),
+	"m":  int64(time.Minute),
+	"h":  int64(time.Hour),
+	"d":  int64(time.Hour) * 24,
+	"w":  int64(time.Hour) * 168,
+}
+
+// parseDuration parses a duration string.
+// A duration string is a possibly signed sequence of
+// decimal numbers, each with optional fraction and a unit suffix,
+// such as "300ms", "-1.5h" or "2h45m".
+// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w".
+func parseDuration(s string) (time.Duration, error) {
+	// [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
+	orig := s
+	var d int64
+	neg := false
+
+	// Consume [-+]?
+	if s != "" {
+		c := s[0]
+		if c == '-' || c == '+' {
+			neg = c == '-'
+			s = s[1:]
+		}
+	}
+	// Special case: if all that is left is "0", this is zero.
+	if s == "0" {
+		return 0, nil
+	}
+	if s == "" {
+		return 0, errors.New("time: invalid duration " + quote(orig))
+	}
+	for s != "" {
+		var (
+			v, f  int64       // integers before, after decimal point
+			scale float64 = 1 // value = v + f/scale
+		)
+
+		var err error
+
+		// The next character must be [0-9.]
+		if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') {
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+		// Consume [0-9]*
+		pl := len(s)
+		v, s, err = leadingInt(s)
+		if err != nil {
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+		pre := pl != len(s) // whether we consumed anything before a period
+
+		// Consume (\.[0-9]*)?
+		post := false
+		if s != "" && s[0] == '.' {
+			s = s[1:]
+			pl := len(s)
+			f, scale, s = leadingFraction(s)
+			post = pl != len(s)
+		}
+		if !pre && !post {
+			// no digits (e.g. ".s" or "-.s")
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+
+		// Consume unit.
+		i := 0
+		for ; i < len(s); i++ {
+			c := s[i]
+			if c == '.' || '0' <= c && c <= '9' {
+				break
+			}
+		}
+		if i == 0 {
+			return 0, errors.New("time: missing unit in duration " + quote(orig))
+		}
+		u := s[:i]
+		s = s[i:]
+		unit, ok := unitMap[u]
+		if !ok {
+			return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig))
+		}
+		if v > (1<<63-1)/unit {
+			// overflow
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+		v *= unit
+		if f > 0 {
+			// float64 is needed to be nanosecond accurate for fractions of hours.
+			// v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit)
+			v += int64(float64(f) * (float64(unit) / scale))
+			if v < 0 {
+				// overflow
+				return 0, errors.New("time: invalid duration " + quote(orig))
+			}
+		}
+		d += v
+		if d < 0 {
+			// overflow
+			return 0, errors.New("time: invalid duration " + quote(orig))
+		}
+	}
+
+	if neg {
+		d = -d
+	}
+	return time.Duration(d), nil
+}
+
+func quote(s string) string {
+	return "\"" + s + "\""
+}
+
+var errLeadingInt = errors.New("time: bad [0-9]*") // never printed
+
+// leadingInt consumes the leading [0-9]* from s.
+func leadingInt(s string) (x int64, rem string, err error) {
+	i := 0
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c < '0' || c > '9' {
+			break
+		}
+		if x > (1<<63-1)/10 {
+			// overflow
+			return 0, "", errLeadingInt
+		}
+		x = x*10 + int64(c) - '0'
+		if x < 0 {
+			// overflow
+			return 0, "", errLeadingInt
+		}
+	}
+	return x, s[i:], nil
+}
+
+// leadingFraction consumes the leading [0-9]* from s.
+// It is used only for fractions, so does not return an error on overflow,
+// it just stops accumulating precision.
+func leadingFraction(s string) (x int64, scale float64, rem string) {
+	i := 0
+	scale = 1
+	overflow := false
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c < '0' || c > '9' {
+			break
+		}
+		if overflow {
+			continue
+		}
+		if x > (1<<63-1)/10 {
+			// It's possible for overflow to give a positive number, so take care.
+			overflow = true
+			continue
+		}
+		y := x*10 + int64(c) - '0'
+		if y < 0 {
+			overflow = true
+			continue
+		}
+		x = y
+		scale *= 10
+	}
+	return x, scale, s[i:]
+}
diff --git a/dbm-services/common/go-pubpkg/timeutil/timeutil.go b/dbm-services/common/go-pubpkg/timeutil/timeutil.go
new file mode 100644
index 0000000000..27407bf41e
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/timeutil/timeutil.go
@@ -0,0 +1,2 @@
+// Package timeutil TODO
+package timeutil
diff --git a/dbm-services/common/go-pubpkg/validate/validate.go b/dbm-services/common/go-pubpkg/validate/validate.go
new file mode 100644
index 0000000000..3d936ecffb
--- /dev/null
+++ b/dbm-services/common/go-pubpkg/validate/validate.go
@@ -0,0 +1,189 @@
+// Package validate TODO
+package validate
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+	"time"
+
+	"github.com/go-playground/locales/en"
+	ut "github.com/go-playground/universal-translator"
+	"github.com/go-playground/validator/v10"
+	en_translations "github.com/go-playground/validator/v10/translations/en"
+	"github.com/pkg/errors"
+	"github.com/robfig/cron/v3"
+)
+
+// ValidateEnums TODO
+// make validate tag work with enums tag
+// 避免 validate oneof 和 swagger enums 写 2 份重复的校验和文档
+// example: Method string `validate:"required,enums" enums:"post,get" json:"method"`
+func ValidateEnums(f validator.FieldLevel) bool {
+	fieldValue := f.Field().String()
+	fieldName := f.StructFieldName()
+	// get StructField
+	sf, _ := f.Parent().Type().FieldByName(fieldName)
+	// get tag value from tag_field enums
+	tagValue := sf.Tag.Get(TagEnum)
+	enumsValues := strings.Split(tagValue, ",")
+	if cmutil.StringsHas(enumsValues, fieldValue) {
+		return true
+	} else {
+		return false
+	}
+}
+
+// GoValidateStructSimple TODO
+// 简单校验 struct,不涉及逻辑
+// 如果 struct 上有 tag validate:"enums",必须启用enum=true校验
+func GoValidateStructSimple(v interface{}, enum bool) error {
+	validate := validator.New()
+	if enum {
+		_ = validate.RegisterValidation("enums", ValidateEnums)
+	}
+	if err := validate.Struct(v); err != nil {
+		return err
+	}
+	return nil
+}
+
+// TagEnum TODO
+const TagEnum = "enums"
+
+// GoValidateStruct v 不能是Ptr
+func GoValidateStruct(v interface{}, enum bool, charset bool) error {
+	validate := validator.New()
+	uni := ut.New(en.New())
+	trans, _ := uni.GetTranslator("en")
+	// 提示时显示 json 字段的名字
+	validate.RegisterTagNameFunc(
+		func(fld reflect.StructField) string {
+			// name := fld.Tag.Get("json")
+			name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
+			if name == "-" {
+				return ""
+			}
+			return name
+		},
+	)
+	if err := en_translations.RegisterDefaultTranslations(validate, trans); err != nil {
+		return err
+	}
+
+	if enum {
+		_ = validate.RegisterValidation(TagEnum, ValidateEnums)
+	}
+	if charset {
+		_ = validate.RegisterValidation("checkCharset", validCharSet)
+	}
+	_ = validate.RegisterValidation("crontabexpr", validateCrontabExpr)
+	_ = validate.RegisterValidation("time", validateTimeStr)
+	if err := validate.Struct(v); err != nil {
+		return translateErr2Msg(v, trans, err)
+	}
+	return nil
+}
+
+// translateErr2Msg v 不能是Ptr
+func translateErr2Msg(v interface{}, trans ut.Translator, err error) error {
+	var errStr []string
+	_, ok := err.(*validator.InvalidValidationError)
+	if ok {
+		return fmt.Errorf("param error:%s", err.Error())
+	}
+	for _, vErr := range err.(validator.ValidationErrors) {
+		if vErr.Tag() == TagEnum {
+			errmsg := ""
+			// errmsg := customEnumTransFunc(vErr, v)
+			if vErr.Param() == "" {
+				sf, _ := reflect.TypeOf(v).FieldByName(vErr.StructField())
+				tagValue := sf.Tag.Get(TagEnum)
+				errmsg = fmt.Sprintf("%s must be one of [%s]", vErr.Field(), tagValue)
+			} else {
+				errmsg = vErr.Param()
+			}
+			errStr = append(errStr, errmsg)
+			continue
+		}
+		errStr = append(errStr, vErr.Translate(trans))
+	}
+	return errors.New(strings.Join(errStr, " || "))
+}
+
+func customEnumTransFunc(fe validator.FieldError, v interface{}) string {
+	if fe.Param() == "" {
+		sf, _ := reflect.TypeOf(v).FieldByName(fe.StructField())
+		tagValue := sf.Tag.Get(TagEnum)
+		errmsg := fmt.Sprintf("%s must be one of [%s]", fe.Field(), tagValue)
+		return errmsg
+	} else {
+		return fe.Param()
+	}
+}
+
+// registerTranslator 为自定义字段添加翻译功能
+func registerTranslator(tag string, msg string) validator.RegisterTranslationsFunc {
+	return func(trans ut.Translator) error {
+		if err := trans.Add(tag, msg, false); err != nil {
+			return err
+		}
+		return nil
+	}
+}
+
+// customTransFunc TODO
+// translate 自定义字段的翻译方法
+func customTransFunc(trans ut.Translator, fe validator.FieldError) string {
+	msg, err := trans.T(fe.Tag(), fe.Field())
+	if err != nil {
+		panic(fe.(error).Error())
+	}
+	return msg
+}
+
+func translate(ut ut.Translator, fe validator.FieldError) string {
+	s, err := ut.T(fe.Tag(), fe.Field(), "fe.Param()")
+	if err != nil {
+		log.Printf("warning: error translating FieldError: %#v", fe)
+		return fe.(error).Error()
+	}
+	return s
+}
+
+func validCharSet(f validator.FieldLevel) bool {
+	v := f.Field().String()
+	return cmutil.HasElem(v, []string{"default", "utf8mb4", "utf8", "latin1", "gb2312", "gbk", "binary", "gb18030"})
+}
+
+// validateCrontabExpr 验证Liunx crontab表达式
+func validateCrontabExpr(f validator.FieldLevel) bool {
+	v := f.Field().String()
+	err := validateCronExpr(v)
+	return err == nil
+}
+
+// validateCronExpr TODO
+/**
+ * @description: crontab 表达式检查,如果返回error != nil,则表示crontab 表达式不正确
+ * @receiver {string} cronstr eg:" * * * 3 5"
+ * @return {*}
+ */
+func validateCronExpr(cronstr string) (err error) {
+	specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow)
+	_, err = specParser.Parse(cronstr)
+	return
+}
+
+// validateTimeStr TODO
+// 验证时间字符串 "09:00:00" 这种
+func validateTimeStr(f validator.FieldLevel) bool {
+	v := f.Field().String()
+	if strings.TrimSpace(v) == "" {
+		return true
+	}
+	_, err := time.Parse("15:04:05", v)
+	return err == nil
+}
diff --git a/dbm-services/go.work b/dbm-services/go.work
new file mode 100644
index 0000000000..accef6f9a3
--- /dev/null
+++ b/dbm-services/go.work
@@ -0,0 +1,23 @@
+go 1.19
+
+use (
+	bigdata/db-tools/dbactuator
+	common/db-resource
+	common/dbha/ha-module
+	common/dbha/hadb-api
+	common/go-pubpkg
+	common/db-config
+	mysql/db-partition
+	mysql/db-priv
+	mysql/db-remote-service
+	mysql/db-simulation
+	mysql/db-tools/dbactuator
+	mysql/db-tools/mysql-crond
+	mysql/db-tools/mysql-monitor
+	mysql/db-tools/mysql-rotatebinlog
+	mysql/db-tools/mysql-table-checksum
+	mysql/slow-query-parser-service
+	redis/db-tools/dbactuator
+	redis/db-tools/dbmon
+	redis/redis-dts
+)
diff --git a/dbm-services/go.work.sum b/dbm-services/go.work.sum
new file mode 100644
index 0000000000..74a79ac786
--- /dev/null
+++ b/dbm-services/go.work.sum
@@ -0,0 +1,995 @@
+bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512 h1:SRsZGA7aFnCZETmov57jwPrWuTmaZK6+4R4v5FUe1/c=
+cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
+cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
+cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA=
+cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y=
+cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
+cloud.google.com/go/accessapproval v1.5.0 h1:/nTivgnV/n1CaAeo+ekGexTYUsKEU9jUVkoY5359+3Q=
+cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
+cloud.google.com/go/accesscontextmanager v1.4.0 h1:CFhNhU7pcD11cuDkQdrE6PQJgv0EXNKNv06jIzbLlCU=
+cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
+cloud.google.com/go/aiplatform v1.27.0 h1:DBi3Jk9XjCJ4pkkLM4NqKgj3ozUL1wq4l+d3/jTGXAI=
+cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
+cloud.google.com/go/analytics v0.12.0 h1:NKw6PpQi6V1O+KsjuTd+bhip9d0REYu4NevC45vtGp8=
+cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/apigateway v1.4.0 h1:IIoXKR7FKrEAQhMTz5hK2wiDz2WNFHS7eVr/L1lE/rM=
+cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
+cloud.google.com/go/apigeeconnect v1.4.0 h1:AONoTYJviyv1vS4IkvWzq69gEVdvHx35wKXc+e6wjZQ=
+cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
+cloud.google.com/go/appengine v1.5.0 h1:lmG+O5oaR9xNwaRBwE2XoMhwQHsHql5IoiGr1ptdDwU=
+cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
+cloud.google.com/go/area120 v0.6.0 h1:TCMhwWEWhCn8d44/Zs7UCICTWje9j3HuV6nVGMjdpYw=
+cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/artifactregistry v1.9.0 h1:3d0LRAU1K6vfqCahhl9fx2oGHcq+s5gftdix4v8Ibrc=
+cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/asset v1.10.0 h1:aCrlaLGJWTODJX4G56ZYzJefITKEWNfbjjtHSzWpxW0=
+cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
+cloud.google.com/go/assuredworkloads v1.9.0 h1:hhIdCOowsT1GG5eMCIA0OwK6USRuYTou/1ZeNxCSRtA=
+cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
+cloud.google.com/go/automl v1.8.0 h1:BMioyXSbg7d7xLibn47cs0elW6RT780IUWr42W8rp2Q=
+cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
+cloud.google.com/go/baremetalsolution v0.4.0 h1:g9KO6SkakcYPcc/XjAzeuUrEOXlYPnMpuiaywYaGrmQ=
+cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
+cloud.google.com/go/batch v0.4.0 h1:1jvEBY55OH4Sd2FxEXQfxGExFWov1A/IaRe+Z5Z71Fw=
+cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
+cloud.google.com/go/beyondcorp v0.3.0 h1:w+4kThysgl0JiKshi2MKDCg2NZgOyqOI0wq2eBZyrzA=
+cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
+cloud.google.com/go/bigquery v1.44.0 h1:Wi4dITi+cf9VYp4VH2T9O41w0kCW0uQTELq2Z6tukN0=
+cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/billing v1.7.0 h1:Xkii76HWELHwBtkQVZvqmSo9GTr0O+tIbRNnMcGdlg4=
+cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
+cloud.google.com/go/binaryauthorization v1.4.0 h1:pL70vXWn9TitQYXBWTK2abHl2JHLwkFRjYw6VflRqEA=
+cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
+cloud.google.com/go/certificatemanager v1.4.0 h1:tzbR4UHBbgsewMWUD93JHi8EBi/gHBoSAcY1/sThFGk=
+cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
+cloud.google.com/go/channel v1.9.0 h1:pNuUlZx0Jb0Ts9P312bmNMuH5IiFWIR4RUtLb70Ke5s=
+cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
+cloud.google.com/go/cloudbuild v1.4.0 h1:TAAmCmAlOJ4uNBu6zwAjwhyl/7fLHHxIEazVhr3QBbQ=
+cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/clouddms v1.4.0 h1:UhzHIlgFfMr6luVYVNydw/pl9/U5kgtjCMJHnSvoVws=
+cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
+cloud.google.com/go/cloudtasks v1.8.0 h1:faUiUgXjW8yVZ7XMnKHKm1WE4OldPBUWWfIRN/3z1dc=
+cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
+cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
+cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
+cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
+cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
+cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
+cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
+cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU=
+cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
+cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
+cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
+cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
+cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
+cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
+cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
+cloud.google.com/go/contactcenterinsights v1.4.0 h1:tTQLI/ZvguUf9Hv+36BkG2+/PeC8Ol1q4pBW+tgCx0A=
+cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
+cloud.google.com/go/container v1.7.0 h1:nbEK/59GyDRKKlo1SqpohY1TK8LmJ2XNcvS9Gyom2A0=
+cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
+cloud.google.com/go/containeranalysis v0.6.0 h1:2824iym832ljKdVpCBnpqm5K94YT/uHTVhNF+dRTXPI=
+cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
+cloud.google.com/go/datacatalog v1.8.0 h1:6kZ4RIOW/uT7QWC5SfPfq/G8sYzr/v+UOmOAxy4Z1TE=
+cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
+cloud.google.com/go/dataflow v0.7.0 h1:CW3541Fm7KPTyZjJdnX6NtaGXYFn5XbFC5UcjgALKvU=
+cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
+cloud.google.com/go/dataform v0.5.0 h1:vLwowLF2ZB5J5gqiZCzv076lDI/Rd7zYQQFu5XO1PSg=
+cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
+cloud.google.com/go/datafusion v1.5.0 h1:j5m2hjWovTZDTQak4MJeXAR9yN7O+zMfULnjGw/OOLg=
+cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
+cloud.google.com/go/datalabeling v0.6.0 h1:dp8jOF21n/7jwgo/uuA0RN8hvLcKO4q6s/yvwevs2ZM=
+cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
+cloud.google.com/go/dataplex v1.4.0 h1:cNxeA2DiWliQGi21kPRqnVeQ5xFhNoEjPRt1400Pm8Y=
+cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
+cloud.google.com/go/dataproc v1.8.0 h1:gVOqNmElfa6n/ccG/QDlfurMWwrK3ezvy2b2eDoCmS0=
+cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
+cloud.google.com/go/dataqna v0.6.0 h1:gx9jr41ytcA3dXkbbd409euEaWtofCVXYBvJz3iYm18=
+cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
+cloud.google.com/go/datastore v1.10.0 h1:4siQRf4zTiAVt/oeH4GureGkApgb2vtPQAtOmhpqQwE=
+cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastream v1.5.0 h1:PgIgbhedBtYBU6POGXFMn2uSl9vpqubc3ewTNdcU8Mk=
+cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
+cloud.google.com/go/deploy v1.5.0 h1:kI6dxt8Ml0is/x7YZjLveTvR7YPzXAUD/8wQZ2nH5zA=
+cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
+cloud.google.com/go/dialogflow v1.19.0 h1:HYHVOkoxQ9bSfNIelSZYNAtUi4CeSrCnROyOsbOqPq8=
+cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
+cloud.google.com/go/dlp v1.7.0 h1:9I4BYeJSVKoSKgjr70fLdRDumqcUeVmHV4fd5f9LR6Y=
+cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
+cloud.google.com/go/documentai v1.10.0 h1:jfq09Fdjtnpnmt/MLyf6A3DM3ynb8B2na0K+vSXvpFM=
+cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
+cloud.google.com/go/domains v0.7.0 h1:pu3JIgC1rswIqi5romW0JgNO6CTUydLYX8zyjiAvO1c=
+cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
+cloud.google.com/go/edgecontainer v0.2.0 h1:hd6J2n5dBBRuAqnNUEsKWrp6XNPKsaxwwIyzOPZTokk=
+cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
+cloud.google.com/go/errorreporting v0.3.0 h1:kj1XEWMu8P0qlLhm3FwcaFsUvXChV/OraZwA70trRR0=
+cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
+cloud.google.com/go/essentialcontacts v1.4.0 h1:b6csrQXCHKQmfo9h3dG/pHyoEh+fQG1Yg78a53LAviY=
+cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
+cloud.google.com/go/eventarc v1.8.0 h1:AgCqrmMMIcel5WWKkzz5EkCUKC3Rl5LNMMYsS+LvsI0=
+cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
+cloud.google.com/go/filestore v1.4.0 h1:yjKOpzvqtDmL5AXbKttLc8j0hL20kuC1qPdy5HPcxp0=
+cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
+cloud.google.com/go/firestore v1.9.0 h1:IBlRyxgGySXu5VuW0RgGFlTtLukSnNkpDiEOMkQkmpA=
+cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/functions v1.9.0 h1:35tgv1fQOtvKqH/uxJMzX3w6usneJ0zXpsFr9KAVhNE=
+cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
+cloud.google.com/go/gaming v1.8.0 h1:97OAEQtDazAJD7yh/kvQdSCQuTKdR0O+qWAJBZJ4xiA=
+cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
+cloud.google.com/go/gkebackup v0.3.0 h1:4K+jiv4ocqt1niN8q5Imd8imRoXBHTrdnJVt/uFFxF4=
+cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
+cloud.google.com/go/gkeconnect v0.6.0 h1:zAcvDa04tTnGdu6TEZewaLN2tdMtUOJJ7fEceULjguA=
+cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
+cloud.google.com/go/gkehub v0.10.0 h1:JTcTaYQRGsVm+qkah7WzHb6e9sf1C0laYdRPn9aN+vg=
+cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
+cloud.google.com/go/gkemulticloud v0.4.0 h1:8F1NhJj8ucNj7lK51UZMtAjSWTgP1zO18XF6vkfiPPU=
+cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
+cloud.google.com/go/grafeas v0.2.0 h1:CYjC+xzdPvbV65gi6Dr4YowKcmLo045pm18L0DhdELM=
+cloud.google.com/go/gsuiteaddons v1.4.0 h1:TGT2oGmO5q3VH6SjcrlgPUWI0njhYv4kywLm6jag0to=
+cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
+cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
+cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk=
+cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
+cloud.google.com/go/iap v1.5.0 h1:BGEXovwejOCt1zDk8hXq0bOhhRu9haXKWXXXp2B4wBM=
+cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
+cloud.google.com/go/ids v1.2.0 h1:LncHK4HHucb5Du310X8XH9/ICtMwZ2PCfK0ScjWiJoY=
+cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
+cloud.google.com/go/iot v1.4.0 h1:Y9+oZT9jD4GUZzORXTU45XsnQrhxmDT+TFbPil6pRVQ=
+cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
+cloud.google.com/go/kms v1.6.0 h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg=
+cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/language v1.8.0 h1:3Wa+IUMamL4JH3Zd3cDZUHpwyqplTACt6UZKRD2eCL4=
+cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
+cloud.google.com/go/lifesciences v0.6.0 h1:tIqhivE2LMVYkX0BLgG7xL64oNpDaFFI7teunglt1tI=
+cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
+cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI=
+cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
+cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
+cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
+cloud.google.com/go/managedidentities v1.4.0 h1:3Kdajn6X25yWQFhFCErmKSYTSvkEd3chJROny//F1A0=
+cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
+cloud.google.com/go/maps v0.1.0 h1:kLReRbclTgJefw2fcCbdLPLhPj0U6UUWN10ldG8sdOU=
+cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
+cloud.google.com/go/mediatranslation v0.6.0 h1:qAJzpxmEX+SeND10Y/4868L5wfZpo4Y3BIEnIieP4dk=
+cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
+cloud.google.com/go/memcache v1.7.0 h1:yLxUzJkZVSH2kPaHut7k+7sbIBFpvSh1LW9qjM2JDjA=
+cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
+cloud.google.com/go/metastore v1.8.0 h1:3KcShzqWdqxrDEXIBWpYJpOOrgpDj+HlBi07Grot49Y=
+cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
+cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA=
+cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
+cloud.google.com/go/networkconnectivity v1.7.0 h1:BVdIKaI68bihnXGdCVL89Jsg9kq2kg+II30fjVqo62E=
+cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
+cloud.google.com/go/networkmanagement v1.5.0 h1:mDHA3CDW00imTvC5RW6aMGsD1bH+FtKwZm/52BxaiMg=
+cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
+cloud.google.com/go/networksecurity v0.6.0 h1:qDEX/3sipg9dS5JYsAY+YvgTjPR63cozzAWop8oZS94=
+cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
+cloud.google.com/go/notebooks v1.5.0 h1:AC8RPjNvel3ExgXjO1YOAz+teg9+j+89TNxa7pIZfww=
+cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
+cloud.google.com/go/optimization v1.2.0 h1:7PxOq9VTT7TMib/6dMoWpMvWS2E4dJEvtYzjvBreaec=
+cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
+cloud.google.com/go/orchestration v1.4.0 h1:39d6tqvNjd/wsSub1Bn4cEmrYcet5Ur6xpaN+SxOxtY=
+cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
+cloud.google.com/go/orgpolicy v1.5.0 h1:erF5PHqDZb6FeFrUHiYj2JK2BMhsk8CyAg4V4amJ3rE=
+cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
+cloud.google.com/go/osconfig v1.10.0 h1:NO0RouqCOM7M2S85Eal6urMSSipWwHU8evzwS+siqUI=
+cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
+cloud.google.com/go/oslogin v1.7.0 h1:pKGDPfeZHDybtw48WsnVLjoIPMi9Kw62kUE5TXCLCN4=
+cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
+cloud.google.com/go/phishingprotection v0.6.0 h1:OrwHLSRSZyaiOt3tnY33dsKSedxbMzsXvqB21okItNQ=
+cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
+cloud.google.com/go/policytroubleshooter v1.4.0 h1:NQklJuOUoz1BPP+Epjw81COx7IISWslkZubz/1i0UN8=
+cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
+cloud.google.com/go/privatecatalog v0.6.0 h1:Vz86uiHCtNGm1DeC32HeG2VXmOq5JRYA3VRPf8ZEcSg=
+cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
+cloud.google.com/go/pubsub v1.27.1 h1:q+J/Nfr6Qx4RQeu3rJcnN48SNC0qzlYzSeqkPq93VHs=
+cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
+cloud.google.com/go/pubsublite v1.5.0 h1:iqrD8vp3giTb7hI1q4TQQGj77cj8zzgmMPsTZtLnprM=
+cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
+cloud.google.com/go/recaptchaenterprise v1.3.1 h1:u6EznTGzIdsyOsvm+Xkw0aSuKFXQlyjGE9a4exk6iNQ=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0 h1:UqzFfb/WvhwXGDF1eQtdHLrmni+iByZXY4h3w9Kdyv8=
+cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
+cloud.google.com/go/recommendationengine v0.6.0 h1:6w+WxPf2LmUEqX0YyvfCoYb8aBYOcbIV25Vg6R0FLGw=
+cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
+cloud.google.com/go/recommender v1.8.0 h1:9kMZQGeYfcOD/RtZfcNKGKtoex3DdoB4zRgYU/WaIwE=
+cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
+cloud.google.com/go/redis v1.10.0 h1:/zTwwBKIAD2DEWTrXZp8WD9yD/gntReF/HkPssVYd0U=
+cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
+cloud.google.com/go/resourcemanager v1.4.0 h1:NDao6CHMwEZIaNsdWy+tuvHaavNeGP06o1tgrR0kLvU=
+cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
+cloud.google.com/go/resourcesettings v1.4.0 h1:eTzOwB13WrfF0kuzG2ZXCfB3TLunSHBur4s+HFU6uSM=
+cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
+cloud.google.com/go/retail v1.11.0 h1:N9fa//ecFUOEPsW/6mJHfcapPV0wBSwIUwpVZB7MQ3o=
+cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
+cloud.google.com/go/run v0.3.0 h1:AWPuzU7Xtaj3Jf+QarDWIs6AJ5hM1VFQ+F6Q+VZ6OT4=
+cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
+cloud.google.com/go/scheduler v1.7.0 h1:K/mxOewgHGeKuATUJNGylT75Mhtjmx1TOkKukATqMT8=
+cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
+cloud.google.com/go/secretmanager v1.9.0 h1:xE6uXljAC1kCR8iadt9+/blg1fvSbmenlsDN4fT9gqw=
+cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
+cloud.google.com/go/security v1.10.0 h1:KSKzzJMyUoMRQzcz7azIgqAUqxo7rmQ5rYvimMhikqg=
+cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
+cloud.google.com/go/securitycenter v1.16.0 h1:QTVtk/Reqnx2bVIZtJKm1+mpfmwRwymmNvlaFez7fQY=
+cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
+cloud.google.com/go/servicecontrol v1.5.0 h1:ImIzbOu6y4jL6ob65I++QzvqgFaoAKgHOG+RU9/c4y8=
+cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicedirectory v1.7.0 h1:f7M8IMcVzO3T425AqlZbP3yLzeipsBHtRza8vVFYMhQ=
+cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
+cloud.google.com/go/servicemanagement v1.5.0 h1:TpkCO5M7dhKSy1bKUD9o/sSEW/U1Gtx7opA1fsiMx0c=
+cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
+cloud.google.com/go/serviceusage v1.4.0 h1:b0EwJxPJLpavSljMQh0RcdHsUrr5DQ+Nelt/3BAs5ro=
+cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
+cloud.google.com/go/shell v1.4.0 h1:b1LFhFBgKsG252inyhtmsUUZwchqSz3WTvAIf3JFo4g=
+cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
+cloud.google.com/go/spanner v1.41.0 h1:NvdTpRwf7DTegbfFdPjAWyD7bOVu0VeMqcvR9aCQCAc=
+cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
+cloud.google.com/go/speech v1.9.0 h1:yK0ocnFH4Wsf0cMdUyndJQ/hPv02oTJOxzi6AgpBy4s=
+cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
+cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
+cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ=
+cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
+cloud.google.com/go/storagetransfer v1.6.0 h1:fUe3OydbbvHcAYp07xY+2UpH4AermGbmnm7qdEj3tGE=
+cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
+cloud.google.com/go/talent v1.4.0 h1:MrekAGxLqAeAol4Sc0allOVqUGO8j+Iim8NMvpiD7tM=
+cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
+cloud.google.com/go/texttospeech v1.5.0 h1:ccPiHgTewxgyAeCWgQWvZvrLmbfQSFABTMAfrSPLPyY=
+cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
+cloud.google.com/go/tpu v1.4.0 h1:ztIdKoma1Xob2qm6QwNh4Xi9/e7N3IfvtwG5AcNsj1g=
+cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
+cloud.google.com/go/trace v1.4.0 h1:qO9eLn2esajC9sxpqp1YKX37nXC3L4BfGnPS0Cx9dYo=
+cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
+cloud.google.com/go/translate v1.4.0 h1:AOYOH3MspzJ/bH1YXzB+xTE8fMpn3mwhLjugwGXvMPI=
+cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/video v1.9.0 h1:ttlvO4J5c1VGq6FkHqWPD/aH6PfdxujHt+muTJlW1Zk=
+cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/videointelligence v1.9.0 h1:RPFgVVXbI2b5vnrciZjtsUgpNKVtHO/WIyXUhEfuMhA=
+cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
+cloud.google.com/go/vision v1.2.0 h1:/CsSTkbmO9HC8iQpxbK8ATms3OQaX3YQUeTMGCxlaK4=
+cloud.google.com/go/vision/v2 v2.5.0 h1:TQHxRqvLMi19azwm3qYuDbEzZWmiKJNTpGbkNsfRCik=
+cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
+cloud.google.com/go/vmmigration v1.3.0 h1:A2Tl2ZmwMRpvEmhV2ibISY85fmQR+Y5w9a0PlRz5P3s=
+cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
+cloud.google.com/go/vmwareengine v0.1.0 h1:JMPZaOT/gIUxVlTqSl/QQ32Y2k+r0stNeM1NSqhVP9o=
+cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
+cloud.google.com/go/vpcaccess v1.5.0 h1:woHXXtnW8b9gLFdWO9HLPalAddBQ9V4LT+1vjKwR3W8=
+cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
+cloud.google.com/go/webrisk v1.7.0 h1:ypSnpGlJnZSXbN9a13PDmAYvVekBLnGKxQ3Q9SMwnYY=
+cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
+cloud.google.com/go/websecurityscanner v1.4.0 h1:y7yIFg/h/mO+5Y5aCOtVAnpGUOgqCH5rXQ2Oc8Oq2+g=
+cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
+cloud.google.com/go/workflows v1.9.0 h1:7Chpin9p50NTU8Tb7qk+I11U/IwVXmDhEoSsdccvInE=
+cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6 h1:K72hopUosKG3ntOPNG4OzzbuhxGuVf06fa2la1/H/Ho=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8 h1:V8krnnfGj4pV65YLUm3C0/8bl7V5Nry2Pwvy3ru/wLc=
+github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible h1:KnPIugL51v3N3WwvaSmZbxukD1WuWXOiE9fRdu32f2I=
+github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM=
+github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
+github.com/Azure/go-autorest/autorest v0.11.18 h1:90Y4srNYrwOtAgVo3ndrQkTYn6kf1Eg/AjTFJ8Is2aM=
+github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc=
+github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk=
+github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
+github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc=
+github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc=
+github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08=
+github.com/DataDog/datadog-go v3.2.0+incompatible h1:qSG2N4FghB1He/r2mFrWKCaL7dXCilEuNEeAn20fdD4=
+github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
+github.com/HuKeping/rbtree v0.0.0-20200208030951-29f0b79e84ed h1:YKqpA6qf8Bh73vj8Rv9SBB5OU558f2c1A889nCVUSLE=
+github.com/Microsoft/hcsshim v0.9.2 h1:wB06W5aYFfUB3IvootYAY2WnOmIdgPGfqSI6tufQNnY=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3 h1:4FA+QBaydEHlwxg0lMN3rhwoDaQy6LKhVWR4qvq4BuA=
+github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
+github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
+github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af h1:wVe6/Ea46ZMeNkQjjBW6xcqyQA/j5e0D6GytH95g0gQ=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
+github.com/alexflint/go-filemutex v1.1.0 h1:IAWuUuRYL2hETx5b8vCgwnD+xSdlsTQY6s2JjBsqLdg=
+github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
+github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg=
+github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30 h1:HGREIyk0QRPt70R69Gm1JFHDgoiyYpCyuGE8E9k/nf0=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e h1:QEF07wC0T1rKkctt1RINW/+RMTVmiwxETico2l3gxJA=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA=
+github.com/armon/go-metrics v0.4.0 h1:yCQqn7dwca4ITXb+CbubHmedzaQYHhNhrEXLYUeEe8Q=
+github.com/armon/go-metrics v0.4.0/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
+github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI=
+github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/aws/aws-sdk-go v1.17.7 h1:/4+rDPe0W95KBmNGYCG+NUvdL8ssPYBMxL+aSCg6nIA=
+github.com/aws/aws-sdk-go-v2 v1.9.2 h1:dUFQcMNZMLON4BOe273pl0filK9RqyQMhCK/6xssL6s=
+github.com/aws/aws-sdk-go-v2/config v1.8.3 h1:o5583X4qUfuRrOGOgmOcDgvr5gJVSu57NK08cWAhIDk=
+github.com/aws/aws-sdk-go-v2/credentials v1.4.3 h1:LTdD5QhK073MpElh9umLLP97wxphkgVC/OjQaEbBwZA=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0 h1:9tfxW/icbSu98C2pcNynm5jmDwU3/741F11688B6QnU=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4 h1:TnU1cY51027j/MQeFy7DIgk1UuzJY+wLFYqXceY/fiE=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4 h1:leSJ6vCqtPpTmBIgE7044B1wql1E4n//McF+mEgNrYg=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0 h1:gceOysEWNNwLd6cki65IMBZ4WAM0MwgBQq2n7kejoT8=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2 h1:r7jel2aa4d9Duys7wEmWqDd5ebpC9w6Kxu6wIjjp18E=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2 h1:RnZjLgtCGLsF2xYYksy0yrx6xPvKG9BYv29VfK4p/J8=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1 h1:z+P3r4LrwdudLKBoEVWxIORrk4sVg4/iqpG3+CS53AY=
+github.com/aws/aws-sdk-go-v2/service/sso v1.4.2 h1:pZwkxZbspdqRGzddDB92bkZBoB7lg85sMRE7OqdB3V0=
+github.com/aws/aws-sdk-go-v2/service/sts v1.7.2 h1:ol2Y5DWqnJeKqNd8th7JWzBtqu63xpOfs1Is+n1t8/4=
+github.com/aws/smithy-go v1.8.0 h1:AEwwwXQZtUwP5Mz506FeXXrKBe0jA8gVM+1gEcSRooc=
+github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
+github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkNEM+Y=
+github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
+github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c h1:+0HFd5KSZ/mm3JmhmrDukiId5iR6w4+BdFtfSy4yWIc=
+github.com/blacktear23/go-proxyprotocol v0.0.0-20171102103907-62e368e1c470 h1:AAFU1eDJHimRQvJGBBnhO0Cm4oe7V2GG3CLtiQk/6wg=
+github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
+github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
+github.com/boombuler/barcode v1.0.0 h1:s1TvRnXwL2xJRaccrdcBQMZxq6X7DvsMogtmJeHDdrc=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA=
+github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
+github.com/bytedance/sonic v1.8.0/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/cenkalti/backoff/v4 v4.1.2 h1:6Yo7N8UP2K6LWZnW94DLVSSrbobcWdVzAYOisuDPIFo=
+github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0 h1:WW2B2uxx9KWF6bGlHqhm8Okiafwwx7Y2kcpn8lCpjgo=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0 h1:wpFFOoomK3389ue2lAb0Boag6XPht5QYpipxmSNL4d8=
+github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
+github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
+github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8=
+github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.7.0 h1:1k/q3ATgxSXRdrmPfH8d7YK0GfqVsEKZAX9dQZvs56k=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
+github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
+github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
+github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
+github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI=
+github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 h1:KwaoQzs/WeUxxJqiJsZ4euOly1Az/IgZXXSxlD/UBNk=
+github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
+github.com/cockroachdb/cockroach-go/v2 v2.1.1 h1:3XzfSMuUT0wBe1a3o5C0eOTcArhmmFAg2Jzh/7hhKqo=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5 h1:xD/lrqdvwsc+O2bjSSi3YqY73Ke3LAiSCx49aCesA0E=
+github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcKDqs=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY=
+github.com/containerd/aufs v1.0.0 h1:2oeJiwX5HstO7shSrPZjrohJZLzK36wvpdmzDRkL/LY=
+github.com/containerd/btrfs v1.0.0 h1:osn1exbzdub9L5SouXO5swW4ea/xVdJZ3wokxN5GrnA=
+github.com/containerd/cgroups v1.0.3 h1:ADZftAkglvCiD44c77s5YmMqaP2pzVCFZvBmAlBdAP4=
+github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
+github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA=
+github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
+github.com/containerd/go-cni v1.1.3 h1:t0MQwrtM96SH71Md8tH0uKrVE9v+jxkDTbvFSm3B9VE=
+github.com/containerd/go-runc v1.0.0 h1:oU+lLv1ULm5taqgV/CJivypVODI4SUz1znWjv3nNYS0=
+github.com/containerd/imgcrypt v1.1.3 h1:69UKRsA3Q/lAwo2eDzWshdjimqhmprrWXfNtBeO0fBc=
+github.com/containerd/nri v0.1.0 h1:6QioHRlThlKh2RkRTR4kIT3PKAcrLo3gIWnjkM4dQmQ=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1 h1:5e7heayhB7CcgdTkqfZqrNaNv15gABwr3Q2jBTbLlt4=
+github.com/containerd/ttrpc v1.1.0 h1:GbtyLRxb0gOLR0TYQWt3O6B0NvT8tMdorEHqIQo/lWI=
+github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
+github.com/containerd/zfs v1.0.0 h1:cXLJbx+4Jj7rNsTiqVfm6i+RNLx6FFA2fMmDlEf+Wm8=
+github.com/containernetworking/cni v1.0.1 h1:9OIL/sZmMYDBe+G8svzILAlulUpaDTUjeAbtH/JNLBo=
+github.com/containernetworking/plugins v1.0.1 h1:wwCfYbTCj5FC0EJgyzyjTXmqysOiJE9r712Z+2KVZAk=
+github.com/containers/ocicrypt v1.1.2 h1:Ez+GAMP/4GLix5Ywo/fL7O0nY771gsBIigiqUm1aXz0=
+github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s=
+github.com/coreos/go-iptables v0.6.0 h1:is9qnZMPYjLd8LYqmm/qlE+wwEgJIkTYdhV3rfZo4jk=
+github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM=
+github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c=
+github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
+github.com/cpuguy83/go-md2man v1.0.7 h1:DVS0EPFHUiaJSaX2EKlaf65HUmk9PXhOl/Xa3Go242Q=
+github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
+github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
+github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
+github.com/cznic/golex v0.0.0-20181122101858-9c343928389c h1:G8zTsaqyVfIHpgMFcGgdbhHSFhlNc77rAKkhVbQ9kQg=
+github.com/cznic/parser v0.0.0-20160622100904-31edd927e5b1 h1:uWcWCkSP+E1w1z8r082miT+c+9vzg+5UdrgGCo15lMo=
+github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186 h1:0rkFMAbn5KBKNpJyHQ6Prb95vIKanmAe62KxsrN+sqA=
+github.com/cznic/y v0.0.0-20170802143616-045f81c6662a h1:N2rDAvHuM46OGscJkGX4Dw4BBqZgg6mGNGLYs5utVVo=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c h1:Xo2rK1pzOm0jO6abTPIQwbAmqBIOj132otexc1mmzFc=
+github.com/d2g/dhcp4client v1.0.0 h1:suYBsYZIkSlUMEz4TAYCczKf62IA2UWC+O8+KtdOhCo=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5 h1:+CpLbZIeUn94m02LdEKPcgErLJ347NUwxPKs5u8ieiY=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4 h1:itqmmf1PFpC4n5JW+j4BU7X4MTfVurhYRTjODoPb2Y8=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954 h1:RMLoZVzv4GliuWafOuPuQDKSm1SJph7uCRnnS61JAn4=
+github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017 h1:2HQmlpI3yI9deH18Q6xiSOIjXD4sLI55Y/gfpa8/558=
+github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
+github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ=
+github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8=
+github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385 h1:clC1lXBpe2kTj2VHdaIu9ajZQe4kcEY9j0NsnDDBZ3o=
+github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
+github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
+github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0=
+github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE=
+github.com/etcd-io/gofail v0.0.0-20180808172546-51ce9a71510a h1:QNEenQIsGDEEfFNSnN+h6hE1OwnHqTg7Dl9gEk1Cko4=
+github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
+github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ=
+github.com/fogleman/gg v1.3.0 h1:/7zJX8F6AaYQc57WQCyN9cAIz+4bCJGO9B+dyW29am8=
+github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
+github.com/fsouza/fake-gcs-server v1.17.0 h1:OeH75kBZcZa3ZE+zz/mFdJ2btt9FgqfjI7gIh9+5fvk=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU=
+github.com/gabriel-vasile/mimetype v1.4.0 h1:Cn9dkdYsMIu56tGho+fqzh7XmvY2YyGU0FnbhiOsEro=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwLGgOgnE+rdPuvX9DxCqaHwKy7i/ko=
+github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
+github.com/gdamore/tcell v1.3.0 h1:r35w0JBADPZCVQijYebl6YMWWtHRqVEGt7kL2eBADRM=
+github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
+github.com/go-fonts/dejavu v0.1.0 h1:JSajPXURYqpr+Cu8U9bt8K+XcACIHWqWrvWCKyeFmVQ=
+github.com/go-fonts/latin-modern v0.2.0 h1:5/Tv1Ek/QCr20C6ZOz15vw3g7GELYL98KWr8Hgo+3vk=
+github.com/go-fonts/liberation v0.1.1 h1:wBrPaMkrXFBW3qXpXAjiKljdVUMxn9bX2ia3XjPHoik=
+github.com/go-fonts/stix v0.1.0 h1:UlZlgrvvmT/58o573ot7NFw0vZasZ5I6bcIft/oMdgg=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1 h1:QbL/5oDUmRBzO9/Z7Seo6zf912W/a6Sr4Eu0G/3Jho0=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4 h1:WtGNWLvXpe6ZudgnXrq0barxBImvnnJoMEhXAzcbM0I=
+github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
+github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
+github.com/go-kit/log v0.1.0 h1:DGJh0Sm43HbOeYDNnVZFl8BvcYVvjD5bqYJvp0REbwQ=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07 h1:OTlfMvwR1rLyf9goVmXfuS5AJn80+Vmj4rTf4n46SOs=
+github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4=
+github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
+github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
+github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
+github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
+github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
+github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
+github.com/go-openapi/runtime v0.0.0-20170901133030-bf2ff8f71507 h1:3O6ZxJ3kV/XnAbTK6rEySxfm9S64Qtj/3FhbVcRSecY=
+github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
+github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU=
+github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
+github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
+github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
+github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
+github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
+github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
+github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
+github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/validator/v10 v10.11.2/go.mod h1:NieE624vt4SCTJtD87arVLvdmjPAeV8BQlHtMnw9D7s=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd h1:hSkbZ9XSyjyBirMeqSqUrK+9HboWrweVlzRNqoBi2d4=
+github.com/gobuffalo/depgen v0.1.0 h1:31atYa/UW9V5q8vMJ+W6wd64OaaTHUrCUXER358zLM4=
+github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU=
+github.com/gobuffalo/flect v0.1.3 h1:3GQ53z7E3o00C/yy7Ko8VXqQXoJGLkrTQCLTF1EjoXU=
+github.com/gobuffalo/genny v0.1.1 h1:iQ0D6SpNXIxu52WESsD+KoQ7af2e3nCfnSBoSF/hKe0=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211 h1:mSVZ4vj4khv+oThUfS+SQU3UuFIZ5Zo6UNcvK8E8Mz8=
+github.com/gobuffalo/gogen v0.1.1 h1:dLg+zb+uOyd/mKeQUYIbwbNmfRsr9hd/WtYWepmayhI=
+github.com/gobuffalo/here v0.6.0 h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2 h1:8thhT+kUJMTMy3HlX4+y9Da+BNJck+p109tqqKp7WDs=
+github.com/gobuffalo/mapi v1.0.2 h1:fq9WcL1BYrm36SzK6+aAnZ8hcp+SrmnDyAxhNx8dvJk=
+github.com/gobuffalo/packd v0.1.0 h1:4sGKOD8yaYJ+dek1FDkwcxCHA40M4kfKgFHx8N2kwbU=
+github.com/gobuffalo/packr/v2 v2.2.0 h1:Ir9W9XIm9j7bhhkKE9cokvtTl1vBm62A/fene/ZCj6A=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754 h1:tpom+2CJmpzAWj5/VEHync2rJGi+epHNIeRSWjzGA+4=
+github.com/goccy/go-json v0.10.0/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556 h1:N/MD/sr6o61X+iZBAT2qEUF023s4KbA8RWfKzl0L6MQ=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
+github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro=
+github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw=
+github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=
+github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7 h1:2hRPrmiwPrp3fQX967rNJIhQPtiGXdlQWAxKbKw3VHA=
+github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
+github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI=
+github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
+github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-containerregistry v0.5.1 h1:/+mFTs4AlwsJ/mJe8NDtKb7BxLtbZFpcn8vDsneEkwQ=
+github.com/google/go-github/v39 v39.2.0 h1:rNNM311XtPOz5rDdsJXAp2o8F67X9FnROXTvto3aSnQ=
+github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
+github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
+github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
+github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg=
+github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
+github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
+github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
+github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
+github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
+github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
+github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
+github.com/googleapis/go-type-adapters v1.0.0 h1:9XdMn+d/G57qq1s8dNc5IesGCXHf6V2HZ2JwRxfA2tA=
+github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8 h1:tlyzajkF3030q6M8SvmJSemC9DTHL/xaMa18b65+JM4=
+github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o=
+github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
+github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg=
+github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
+github.com/gorilla/schema v1.1.0 h1:CamqUDOFUBqzrvxuz2vEwo8+SUdwsluFh7IlzJh30LY=
+github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=
+github.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=
+github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
+github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g=
+github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4=
+github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU=
+github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE=
+github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
+github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
+github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM=
+github.com/hashicorp/go-hclog v1.2.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
+github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
+github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
+github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
+github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
+github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
+github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
+github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
+github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
+github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
+github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
+github.com/hashicorp/go-version v1.2.1 h1:zEfKbn2+PDgroKdiOzqiE8rsmLqU2uwi5PB5pBJ3TkI=
+github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go.net v0.0.1 h1:sNCoNyDEvN1xa+X0baata4RdcpKwcMS6DH+xwfqPgjw=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
+github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ=
+github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
+github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
+github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
+github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
+github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639 h1:mV02weKRL81bEnm8A0HT1/CAelMQDBuQIfLw8n+d6xI=
+github.com/ianlancetaylor/demangle v0.0.0-20220319035150-800ac71e25c2/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
+github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
+github.com/intel/goresctrl v0.2.0 h1:JyZjdMQu9Kl/wLXe9xA6s1X+tF6BWsQPFGJMEeCfWzE=
+github.com/j-keck/arping v1.0.2 h1:hlLhuXgQkzIJTZuhMigvG/CuSkaspeaD9hRDk2zuiMI=
+github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
+github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
+github.com/jackc/pgconn v1.8.0 h1:FmjZ0rOyXTr1wfWs45i4a9vjnjWUAGpMuQLD9OSs+lw=
+github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 h1:WAvSpGf7MsFuzAtK4Vk7R4EVe+liW4x83r4oWu0WHKw=
+github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye4717ITLaNwV9mWbJx0dLCpcRzdA=
+github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
+github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
+github.com/jackc/pgproto3/v2 v2.0.7 h1:6Pwi1b3QdY65cuv6SyVO0FgPd5J3Bl7wf/nQQjinHMA=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b h1:C8S2+VttkHFdOOCXJe+YGfa4vHYwlt4Zx+IVXQ97jYg=
+github.com/jackc/pgtype v1.6.2 h1:b3pDeuhbbzBYcg5kwNmNDun4pFUD/0AAr1kLXZLeNt8=
+github.com/jackc/pgx/v4 v4.10.1 h1:/6Q3ye4myIj6AaplUm+eRcz4OhK9HAvFf4ePsG40LJY=
+github.com/jackc/puddle v1.1.3 h1:JnPg/5Q9xVJGfjsO5CPUOjnJps1JaRUm8I9FXVCFK94=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/joefitzgerald/rainbow-reporter v0.1.0 h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo=
+github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
+github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ=
+github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
+github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
+github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5 h1:PJr+ZMXIecYc1Ey2zucXdR73SMBtgjPgwa31099IMv0=
+github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
+github.com/k0kubun/pp v2.3.0+incompatible h1:EKhKbi34VQDWJtq+zpsKSEhkHHs9w2P8Izbq8IhLVSo=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA=
+github.com/karrick/godirwalk v1.10.3 h1:lOpSw2vJP0y5eLBW906QwKsUK/fe/QDyoqM5rnnuPDY=
+github.com/kisielk/errcheck v1.5.0 h1:e8esj/e4R+SAOwFwN+n3zr0nYeCyeweozKfO23MvHzY=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
+github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
+github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e h1:+lIPJOWl+jSiJOc70QXJ07+2eg2Jy2EC7Mi11BWujeM=
+github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
+github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI=
+github.com/ktrysmt/go-bitbucket v0.6.4 h1:C8dUGp0qkwncKtAnozHCbbqhptefzEd1I0sfnuy9rYQ=
+github.com/leodido/go-urn v1.2.2/go.mod h1:kUaIbLZWttglzwNuG0pgsh5vuV6u2YcGBYz1hIPjtOQ=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 h1:jUp75lepDg0phMUJBCmvaeFDldD2N3S1lBuPwUTszio=
+github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=
+github.com/lyft/protoc-gen-star v0.5.3 h1:zSGLzsUew8RT+ZKPHc3jnf8XLaVyHzTcAFBzHtCNR20=
+github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
+github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2 h1:JgVTCPf0uBVcUSWpyXmGpgOc62nK5HWUBKAGc3Qqa5k=
+github.com/markbates/pkger v0.15.1 h1:3MPelV53RnGSW07izx5xGxl4e/sdRD6zqseIk0rMASY=
+github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI=
+github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
+github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI=
+github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
+github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
+github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=
+github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
+github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.12/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=
+github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
+github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible h1:aKW/4cBs+yK6gpqU3K/oIwk9Q/XICqd3zOX/UFuvqmk=
+github.com/mitchellh/cli v1.1.0 h1:tEElEatulEHDeedTxwckzyYMA5c86fbmNIUL1hBIiTg=
+github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
+github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
+github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
+github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/gox v0.4.0 h1:lfGJxY7ToLJQjHHwi0EX6uYBdK78egf954SQl13PQJc=
+github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY=
+github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=
+github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
+github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI=
+github.com/moby/sys/signal v0.6.0 h1:aDpY94H8VlhTGa9sNYUFCFsMZIUh5wm0B6XkIoJj/iY=
+github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc=
+github.com/mrunalp/fileutils v0.5.0 h1:NKzVxiH7eSk+OQ4M+ZYW1K6h27RUV3MI6NUTsHhU6Z4=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
+github.com/mutecomm/go-sqlcipher/v4 v4.4.0 h1:sV1tWCWGAVlPhNGT95Q+z/txFxuhAYWwHD1afF5bMZg=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
+github.com/myesui/uuid v1.0.0 h1:xCBmH4l5KuvLYc5L7AS7SZg9/jKdIFubM7OVoLqaQUI=
+github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8 h1:P48LjvUQpTReR3TQRbxSeSBsMXzfK0uol7eRcr7VBYQ=
+github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
+github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba h1:fhFP5RliM2HW/8XdcO5QngSfFli9GcRIpMXvypTQt6E=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
+github.com/onsi/ginkgo/v2 v2.9.1/go.mod h1:FEcmzVcCHl+4o9bQZVab+4dC9+j+91t2FHSzmGAPfuo=
+github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ=
+github.com/openacid/errors v0.8.1 h1:Hrj9WENDoj5jP27ZfF60SY5LShbxei+sxKZa0EP+oDw=
+github.com/openacid/low v0.1.10 h1:rKpmB5CHtKoPq9tFiqUvRk8vtWaPympL2D2dNfw3PvI=
+github.com/openacid/must v0.1.3 h1:deanGZVyVwV+ozfwNFbRU5YF7czXeQ67s8GVyZxzKW4=
+github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39 h1:H7DMc6FAjgwZZi8BRqjrAAHWoqEr5e5L6pS4V0ezet4=
+github.com/opencontainers/selinux v1.10.0 h1:rAiKF8hTcgLI3w0DHm6i0ylVVcOrlgR1kK99DRLDhyU=
+github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
+github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pborman/uuid v0.0.0-20180122190007-c65b2f87fee3 h1:9J0mOv1rXIBlRjQCiAGyx9C3dZZh5uIa3HU0oTV8v1E=
+github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
+github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
+github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
+github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
+github.com/phpdave11/gofpdf v1.4.2 h1:KPKiIbfwbvC/wOncwhrpRdXVj2CZTCFlw4wnoyjtHfQ=
+github.com/phpdave11/gofpdi v1.0.12 h1:RZb9NG62cw/RW0rHAduVRo+98R8o/G1krcg2ns7DakQ=
+github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
+github.com/pierrec/lz4/v4 v4.1.8 h1:ieHkV+i2BRzngO4Wd/3HGowuZStgq6QkPsD1eolNAO4=
+github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3 h1:04yuCf5NMvLU8rB2m4Qs3rynH7EYpMno3lHkewIOdMo=
+github.com/pingcap/log v0.0.0-20210317133921-96f4fcab92a4 h1:ERrF0fTuIOnwfGbt71Ji3DKbOEaP189tjym50u8gpC8=
+github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7 h1:k2BbABz9+TNpYRwsCCFS8pEEnFVOdbgEjL/kTlLuzZQ=
+github.com/pingcap/tidb/parser v0.0.0-20221126021158-6b02a5d8ba7d h1:1DyyRrgYeNjqPkgjrdEsaIbX+kHpuTTk5ZOCtrcRFcQ=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e h1:aoZm08cpOy4WuID//EZDgcC4zIxODThtZNPirFr42+A=
+github.com/pkg/sftp v1.13.1 h1:I2qBYMChEhIjOgazfJmV3/mZM256btk6wkCDRmW7JYs=
+github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo=
+github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021 h1:0XM1XL/OFFJjXsYXlG30spTkV/E9+gmd5GD1w2HE8xM=
+github.com/prashantv/gostub v1.0.0 h1:wTzvgO04xSS3gHuz6Vhuo0/kvWelyJxwNS0IRBPAwGY=
+github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
+github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
+github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
+github.com/rivo/tview v0.0.0-20200219210816-cd38d7432498 h1:4CFNy7/q7P06AsIONZzuWy7jcdqEmYQvOZ9FAFZdbls=
+github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=
+github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc=
+github.com/rs/zerolog v1.15.0 h1:uPRuwkWF4J6fGsJ2R0Gn2jB1EQiav9k3S6CSdygQJXY=
+github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58 h1:nlG4Wa5+minh3S9LVFtNoY+GVRiudA2e3EVfcCi3RCA=
+github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef h1:UD99BBEz19F21KhOFHLNAI6KodDWUvXaPr4Oqu8yMV8=
+github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef/go.mod h1:8AEUvGVi2uQ5b24BIhcr0GCcpd/RNAFWaN2CJFrWIIQ=
+github.com/ryanuber/columnize v2.1.0+incompatible h1:j1Wcmh8OrK4Q7GXY+V7SVSY8nUWQxHW5TkBe7YUl+2s=
+github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1 h1:ZFfeKAhIQiiOrQaI3/znw0gOmYpO28Tcu1YaqMa/jtQ=
+github.com/sagikazarmark/crypt v0.9.0 h1:fipzMFW34hFUEc4D7fsLQFtE7yElkpgyS2zruedRdZk=
+github.com/sagikazarmark/crypt v0.9.0/go.mod h1:RnH7sEhxfdnPm1z+XMgSLjWTEIjyK4z2dw6+4vHTMuo=
+github.com/sanity-io/litter v1.2.0 h1:DGJO0bxH/+C2EukzOSBmAlxmkhVMGqzvcx/rvySYw9M=
+github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
+github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGuOeFkFbUWfBgK4=
+github.com/sclevine/spec v1.2.0 h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921 h1:58EBmR2dMNL2n/FnbQewK3D14nXr0V9CObDSvMJLq+Y=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371 h1:SWV2fHctRpRrp49VXJ6UZja7gU9QLHwRpIPBN89SKEo=
+github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
+github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca h1:3fECS8atRjByijiI8yYiuwLwQ2ZxXobW7ua/8GRB3pI=
+github.com/snowflakedb/gosnowflake v1.6.3 h1:EJDdDi74YbYt1ty164ge3fMZ0eVZ6KA7b1zmAa/wnRo=
+github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
+github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
+github.com/sqs/goreturns v0.0.0-20181028201513-538ac6014518/go.mod h1:CKI4AZ4XmGV240rTHfO0hfE83S6/a3/Q1siZJ/vXf7A=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 h1:lIOOHPEbXzO3vnmx2gok1Tfs31Q8GQqKLc8vVqyQq/I=
+github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU=
+github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
+github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/struCoder/pidusage v0.1.2 h1:fFPTThlcWFQyizv3xKs5Lyq1lpG5lZ36arEGNhWz2Vs=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
+github.com/tchap/go-patricia v2.2.6+incompatible h1:JvoDL7JSoIP2HDE8AbDH3zC8QBPxmzYe32HHy5yQ+Ck=
+github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJfDRtkanvQPiooDH8HvJ2FBh+iKT/OmiQQ=
+github.com/tinylib/msgp v1.1.5 h1:2gXmtWueD2HefZHQe1QOy9HVzmFrLOVvsXwXBQ0ayy0=
+github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA=
+github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31 h1:OXcKh35JaYsGMRzpvFkLv/MEyPuL49CThT1pZ8aSml4=
+github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q=
+github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c h1:u6SKchux2yDvFQnDHS3lPnIRmfVJ5Sxy3ao2SIdysLQ=
+github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo=
+github.com/ugorji/go v1.2.7 h1:qYhyWUUd6WbiM+C6JZAUkIJt/1WrjzNHY9+KCIjVqTo=
+github.com/ugorji/go/codec v1.2.9/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d h1:ggUgChAeyge4NZ4QUw6lhHsVymzwSDJOZcE0s2X8S20=
+github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
+github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8=
+github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5 h1:+UB2BJA852UkGH42H+Oee69djmxS3ANzl2b/JtT1YiA=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
+github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE=
+github.com/xanzy/go-gitlab v0.15.0 h1:rWtwKTgEnXyNUGrOArN7yyc3THRkpYcKXIXia9abywQ=
+github.com/xdg-go/scram v1.0.2 h1:akYIkZ28e6A96dkWNJQu3nmCzH3YfwMPQExUYDaRv7w=
+github.com/xdg-go/stringprep v1.0.2 h1:6iq84/ryjjeRmMJwxutI51F2GIPlP5BfTvXHeYjyhBc=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZyhNuGassLTcXTwjiWq7NmjdavZsUnmFybQ=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow=
+github.com/yuin/goldmark v1.4.13 h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY=
+github.com/zenazn/goji v0.9.0 h1:RSQQAbXGArQ0dIDEq+PI6WqN6if+5KHu6x2Cx/GXLTQ=
+gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b h1:7gd+rd8P3bqcn/96gOZa3F5dpJr/vEiDQYlNb/y2uNs=
+go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489 h1:1JFLBqwIgdyHN1ZtgjTBwO+blA6gVOmZurpiMEsETKo=
+go.etcd.io/etcd/api/v3 v3.5.6 h1:Cy2qx3npLcYqTKqGJzMypnMv2tiRyifZJ17BlWIWA7A=
+go.etcd.io/etcd/api/v3 v3.5.6/go.mod h1:KFtNaxGDw4Yx/BA4iPPwevUTAuqcsPxzyX8PHydchN8=
+go.etcd.io/etcd/client/pkg/v3 v3.5.6 h1:TXQWYceBKqLp4sa87rcPs11SXxUA/mHwH975v+BDvLU=
+go.etcd.io/etcd/client/pkg/v3 v3.5.6/go.mod h1:ggrwbk069qxpKPq8/FKkQ3Xq9y39kbFR4LnKszpRXeQ=
+go.etcd.io/etcd/client/v2 v2.305.6 h1:fIDR0p4KMjw01MJMfUIDWdQbjo06PD6CeYM5z4EHLi0=
+go.etcd.io/etcd/client/v2 v2.305.6/go.mod h1:BHha8XJGe8vCIBfWBpbBLVZ4QjOIlfoouvOwydu63E0=
+go.etcd.io/etcd/client/v3 v3.5.6 h1:coLs69PWCXE9G4FKquzNaSHrRyMCAXwF+IX1tAPVO8E=
+go.etcd.io/etcd/client/v3 v3.5.6/go.mod h1:f6GRinRMCsFVv9Ht42EyY7nfsVGwrNO0WEoS2pRKzQk=
+go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk=
+go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw=
+go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E=
+go.mongodb.org/mongo-driver v1.7.0 h1:hHrvOBWlWB2c7+8Gh/Xi5jj82AgidK/t7KVXBZ+IyUA=
+go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
+go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1 h1:A/5uWzF44DlIgdm/PQFwfMkW0JX+cIcQi/SwLAmZP5M=
+go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
+go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
+go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0 h1:Ky1MObd188aGbgb5OgNnwGuEEwI9MVIcc7rBW6zk5Ak=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU=
+go.opentelemetry.io/otel v1.3.0 h1:APxLf0eiBwLl+SOXiJJCVYzA1OOJNyAoV8C5RNRyy7Y=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0 h1:R/OBkMoGgfy2fLhs2QhkCI1w4HLEQX92GCcJB6SSdNk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0 h1:giGm8w67Ja7amYNfYMdme7xSp2pIxThWopw8+QP51Yk=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0 h1:VQbUHoJqytHHSJ1OZodPH9tvZZSVzUHjPHpkO85sT6k=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0 h1:Ydage/P0fRrSPpZeCVxzjqGcI6iVmG2xb43+IR8cjqM=
+go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
+go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
+go.opentelemetry.io/otel/sdk v1.3.0 h1:3278edCoH89MEJ0Ky8WQXVmDQv3FX4ZJ3Pp+9fJreAI=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
+go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
+go.opentelemetry.io/otel/trace v1.3.0 h1:doy8Hzb1RJ+I3yFhtDmwNc7tIyw1tNMOIsyPzp1NOGY=
+go.opentelemetry.io/proto/otlp v0.11.0 h1:cLDgIBTf4lLOlztkhzAEdQsJ4Lj+i5Wc9k6Nn0K1VyU=
+go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
+go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
+golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030 h1:lP9pYkih3DUSC641giIXa2XqfTIbbbRr0w2EOTA7wHA=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs=
+golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
+golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
+golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
+golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
+golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
+golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
+golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
+golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
+golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
+golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
+golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
+golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
+golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
+golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
+golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
+golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
+golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
+golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
+golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
+gonum.org/v1/gonum v0.9.3 h1:DnoIG+QAMaF5NvxnGe/oKsgKcAc6PcUyl8q0VetfQ8s=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
+gonum.org/v1/plot v0.9.0 h1:3sEo36Uopv1/SA/dMFFaxXoL5XyikJ9Sf2Vll/k6+2E=
+google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
+google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
+google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
+google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
+google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
+google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
+google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
+google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
+google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
+google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s=
+google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70=
+google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo=
+google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0=
+google.golang.org/api v0.107.0 h1:I2SlFjD8ZWabaIFOfeEDg3pf0BHJDh6iYQ1ic3Yu/UU=
+google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=
+google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
+google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
+google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
+google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
+google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw=
+google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s=
+google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg=
+google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
+google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
+google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
+google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
+google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE=
+google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
+gopkg.in/cheggaaa/pb.v1 v1.0.25 h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=
+gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec h1:RlWgLqCMMIYYEVcAR5MDsuHlVkaIPDAF+5Dehzg8L5A=
+gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 h1:/saqWwm73dLmuzbNhe92F0QsZ/KiFND+esHco2v1hiY=
+gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
+gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
+gopkg.in/stretchr/testify.v1 v1.2.2 h1:yhQC6Uy5CqibAIlk1wlusa/MJ3iAN49/BsR/dCCKz3M=
+gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
+gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.4.3/go.mod h1:sSIebwZAVPiT+27jK9HIwvsqOGKx3YMPmrA3mBJR10c=
+gorm.io/driver/postgres v1.0.8 h1:PAgM+PaHOSAeroTjHkCHCBIHHoBIf9RgPWGo8dF2DA8=
+gorm.io/driver/sqlite v1.3.1/go.mod h1:wJx0hJspfycZ6myN38x1O/AqLtNS6c5o9TndewFbELg=
+gorm.io/gorm v1.24.0/go.mod h1:DVrVomtaYTbqs7gB/x2uVvqnXzv0nqjB396B8cG4dBA=
+gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
+gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk=
+honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8=
+k8s.io/apiserver v0.22.5 h1:71krQxCUz218ecb+nPhfDsNB6QgP1/4EMvi1a2uYBlg=
+k8s.io/code-generator v0.19.7 h1:kM/68Y26Z/u//TFc1ggVVcg62te8A2yQh57jBfD0FWQ=
+k8s.io/component-base v0.22.5 h1:U0eHqZm7mAFE42hFwYhY6ze/MmVaW00JpMrzVsQmzYE=
+k8s.io/cri-api v0.23.1 h1:0DHL/hpTf4Fp+QkUXFefWcp1fhjXr9OlNdY9X99c+O8=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=
+k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
+k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
+k8s.io/kubernetes v1.13.0 h1:qTfB+u5M92k2fCCCVP2iuhgwwSOv1EkAkvQY1tQODD8=
+k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
+modernc.org/b v1.0.0 h1:vpvqeyp17ddcQWF29Czawql4lDdABCDRbXRAS4+aF2o=
+modernc.org/cc/v3 v3.32.4 h1:1ScT6MCQRWwvwVdERhGPsPq0f55J1/pFEOCiqM7zc78=
+modernc.org/ccgo/v3 v3.9.2 h1:mOLFgduk60HFuPmxSix3AluTEh7zhozkby+e1VDo/ro=
+modernc.org/db v1.0.0 h1:2c6NdCfaLnshSvY7OU09cyAY0gYXUZj4lmg5ItHyucg=
+modernc.org/file v1.0.0 h1:9/PdvjVxd5+LcWUQIfapAWRGOkDLK90rloa8s/au06A=
+modernc.org/fileutil v1.0.0 h1:Z1AFLZwl6BO8A5NldQg/xTSjGLetp+1Ubvl4alfGx8w=
+modernc.org/golex v1.0.0 h1:wWpDlbK8ejRfSyi0frMyhilD3JBvtcx2AdGDnU+JtsE=
+modernc.org/golex v1.0.1 h1:EYKY1a3wStt0RzHaH8mdSRNg78Ub0OHxYfCRWw35YtM=
+modernc.org/internal v1.0.0 h1:XMDsFDcBDsibbBnHB2xzljZ+B1yrOVLEFkKL2u15Glw=
+modernc.org/lex v1.0.0 h1:w0dxp18i1q+aSE7GkepvwzvVWTLoCIQ2oDgTFAV2JZU=
+modernc.org/lexer v1.0.0 h1:D2xE6YTaH7aiEC7o/+rbx6qTAEr1uY83peKwkamIdQ0=
+modernc.org/libc v1.9.5 h1:zv111ldxmP7DJ5mOIqzRbza7ZDl3kh4ncKfASB2jIYY=
+modernc.org/lldb v1.0.0 h1:6vjDJxQEfhlOLwl4bhpwIz00uyFK4EmSYcbwqwbynsc=
+modernc.org/mathutil v1.2.2 h1:+yFk8hBprV+4c0U9GjFtL+dV3N8hOJ8JCituQcMShFY=
+modernc.org/memory v1.0.4 h1:utMBrFcpnQDdNsmM6asmyH/FM9TqLPS7XF7otpJmrwM=
+modernc.org/opt v0.1.1 h1:/0RX92k9vwVeDXj+Xn23DKp2VJubL7k8qNffND6qn3A=
+modernc.org/parser v1.0.2 h1:/qHLDn1ezrcRk9/XbErYp84bPPM4+w0kIDuvMdRk6Vc=
+modernc.org/ql v1.0.0 h1:bIQ/trWNVjQPlinI6jdOQsi195SIturGo3mp5hsDqVU=
+modernc.org/scanner v1.0.1 h1:rmWBTztgQKLM2CYx0uTQGhAxgnrILDEOVXJsEq/I4Js=
+modernc.org/sortutil v1.1.0 h1:oP3U4uM+NT/qBQcbg/K2iqAX0Nx7B1b6YZtq3Gk/PjM=
+modernc.org/sqlite v1.10.6 h1:iNDTQbULcm0IJAqrzCm2JcCqxaKRS94rJ5/clBMRmc8=
+modernc.org/strutil v1.1.0 h1:+1/yCzZxY2pZwwrsbH+4T7BQMoLQ9QiBshRC9eicYsc=
+modernc.org/tcl v1.5.2 h1:sYNjGr4zK6cDH74USl8wVJRrvDX6UOLpG0j4lFvR0W0=
+modernc.org/tcl v1.15.2/go.mod h1:3+k/ZaEbKrC8ePv8zJWPtBSW0V7Gg9g8rkmhI1Kfs3c=
+modernc.org/token v1.0.0 h1:a0jaWiNMDhDUtqOj09wvjWWAqd3q7WpBulmL9H2egsk=
+modernc.org/y v1.0.1 h1:+QT+MtLkwkvLkh3fYQq+YD5vw2s5paVE73jdl5R/Py8=
+modernc.org/z v1.0.1 h1:WyIDpEpAIx4Hel6q/Pcgj/VhaQV5XPJ2I6ryIYbjnpc=
+modernc.org/z v1.7.3/go.mod h1:Ipv4tsdxZRbQyLq9Q1M6gdbkxYzdlrciF2Hi/lS7nWE=
+modernc.org/zappy v1.0.0 h1:dPVaP+3ueIUv4guk8PuZ2wiUGcJ1WUVvIheeSSTD0yk=
+rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE=
+rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4=
+rsc.io/quote/v3 v3.1.0 h1:9JKUTTIUgS6kzR9mK1YuGKv6Nl+DijDNIc0ghT58FaY=
+rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22 h1:fmRfl9WJ4ApJn7LxNuED4m0t18qivVQOxP6aAYG9J6c=
+sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67 h1:e1sMhtVq9AfcEy8AXNb8eSg6gbzfdpYhoNqnPJa+GzI=
diff --git a/dbm-services/mysql/db-partition/.ci/codecc.yml b/dbm-services/mysql/db-partition/.ci/codecc.yml
new file mode 100644
index 0000000000..c824dddd90
--- /dev/null
+++ b/dbm-services/mysql/db-partition/.ci/codecc.yml
@@ -0,0 +1,29 @@
+version: v2.0
+resources:
+  repositories:
+    - repository: ci_templates/public/codecc
+      name: codecc
+on:
+  mr:
+    target-branches:  [ "*" ]
+stages:
+  - name: "代码检查"
+    check-out:
+      gates:
+        - template: commonGate.yml@codecc
+      timeout-hours: 10
+    jobs:
+      codecc:
+        name: "CodeCC代码检查"
+        runs-on:
+          pool-name: docker  #docker-on-devcloud、docker、local、agentless
+          container:
+            image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0
+        steps:
+          - checkout: self
+          - uses: CodeccCheckAtomDebug@4.*
+            name: 腾讯代码分析
+            with:
+              beAutoLang: true # 自动检测项目语言
+              checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置
+              toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1
diff --git a/dbm-services/mysql/db-partition/.ci/open_source_check.yml b/dbm-services/mysql/db-partition/.ci/open_source_check.yml
new file mode 100644
index 0000000000..f421f315f3
--- /dev/null
+++ b/dbm-services/mysql/db-partition/.ci/open_source_check.yml
@@ -0,0 +1,84 @@
+version: "v2.0"
+name: "开源检查"
+label: []
+variables: {}
+stages:
+- name: "开源检查"
+  label:
+  - "Build"
+  jobs:
+    job_AfK:
+      name: "构建环境-LINUX"
+      runs-on:
+        pool-name: "docker"
+        container:
+          image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0"
+        needs: {}
+      steps:
+      - checkout: self
+      - name: "敏感信息检查-部门RTX"
+        uses: "SensitiveRtxChecker@3.*"
+      - name: "腾讯代码分析(官方-代码分析工作组)"
+        uses: "CodeccCheckAtomDebug@4.*"
+        with:
+          beAutoLang: true
+          languages:
+          - "GOLANG"
+          checkerSetType: "communityOpenScan"
+          tools:
+          - "WOODPECKER_COMMITSCAN"
+          - "SCC"
+          - "PECKER_SECURITY"
+          - "SENSITIVE"
+          - "DUPC"
+          - "IP_CHECK"
+          - "WOODPECKER_SENSITIVE"
+          - "HORUSPY"
+          - "XCHECK"
+          - "CCN"
+          asyncTask: false
+          asyncTaskId: ""
+          scriptType: "SHELL"
+          script: |-
+            # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷
+            # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh
+            # 确保build.sh能够编译代码
+            # cd path/to/build.sh
+            # sh build.sh
+          languageRuleSetMap: {}
+          checkerSetEnvType: "prod"
+          multiPipelineMark: ""
+          rtxReceiverType: "1"
+          botWebhookUrl: ""
+          botRemindRange: "2"
+          botRemindSeverity: "7"
+          botRemaindTools: []
+          emailReceiverType: "1"
+          emailCCReceiverList: []
+          instantReportStatus: "2"
+          reportDate: []
+          reportTime: ""
+          reportTools: []
+          toolScanType: "1"
+          diffBranch: ""
+          byFile: false
+          mrCommentEnable: true
+          prohibitIgnore: false
+          newDefectJudgeFromDate: ""
+          transferAuthorList: []
+          path: []
+          customPath: []
+          scanTestSource: false
+          openScanPrj: false
+          openScanFilterEnable: false
+          issueSystem: "TAPD"
+          issueSubSystem: ""
+          issueResolvers: []
+          issueReceivers: []
+          issueFindByVersion: ""
+          maxIssue: 1000
+          issueAutoCommit: false
+  check-out:
+    gates:
+      - template: open_source_gate.yml
+    timeout-hours: 10
\ No newline at end of file
diff --git a/dbm-services/mysql/db-partition/.ci/templates/open_source_gate.yml b/dbm-services/mysql/db-partition/.ci/templates/open_source_gate.yml
new file mode 100644
index 0000000000..d14127e08c
--- /dev/null
+++ b/dbm-services/mysql/db-partition/.ci/templates/open_source_gate.yml
@@ -0,0 +1,26 @@
+parameters:
+  - name: receivers
+    type: array
+    default: [ "${{ ci.actor }}" ]
+
+gates:
+  - name: open-source-gate
+    rule:
+      - "CodeccCheckAtomDebug.all_risk <= 0"
+      - "CodeccCheckAtomDebug.high_med_new_issue <= 0"
+      - "CodeccCheckAtomDebug.ccn_new_max_value <= 40"
+      - "CodeccCheckAtomDebug.sensitive_defect <= 0"
+      - "CodeccCheckAtomDebug.dupc_average <= 15"
+      - "CodeccCheckAtomDebug.ccn_average <= 3"
+      - "CodeccCheckAtomDebug.ccn_new_defect <= 0"
+      - "CodeccCheckAtomDebug.ccn_funcmax <= 20"
+      - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0"
+      - "CodeccCheckAtomDebug.horuspy_all_defect <= 0"
+      - "CodeccCheckAtomDebug.go_serious_defect <= 0"
+      - "CodeccCheckAtomDebug.go_all_defect <= 100"
+    notify-on-fail:
+      - type: wework-message
+        receivers: ${{ parameters.receivers }}
+    continue-on-fail:
+      gatekeepers:
+        - "${{ ci.actor }}"
\ No newline at end of file
diff --git a/dbm-services/mysql/db-partition/.gitignore b/dbm-services/mysql/db-partition/.gitignore
new file mode 100644
index 0000000000..dee084b6f6
--- /dev/null
+++ b/dbm-services/mysql/db-partition/.gitignore
@@ -0,0 +1,14 @@
+vendor/
+log/
+nohup.out
+Brewfile
+.idea/
+.vscode/
+.code.yml
+.golangci.yml
+service/service.go
+test.log
+test*
+main
+partition
+
diff --git a/dbm-services/mysql/db-partition/Dockerfile b/dbm-services/mysql/db-partition/Dockerfile
new file mode 100644
index 0000000000..e718e23fe1
--- /dev/null
+++ b/dbm-services/mysql/db-partition/Dockerfile
@@ -0,0 +1,11 @@
+FROM mirrors.tencent.com/sccmsp/golang:1.16
+MAINTAINER tencent
+RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
+RUN echo "Asia/Shanghai" > /etc/timezone
+
+ADD partition /
+
+WORKDIR /
+CMD /partition --migrate
+
+
diff --git a/dbm-services/mysql/db-partition/Makefile b/dbm-services/mysql/db-partition/Makefile
new file mode 100644
index 0000000000..f145f936cc
--- /dev/null
+++ b/dbm-services/mysql/db-partition/Makefile
@@ -0,0 +1,44 @@
+SHELL := /bin/bash
+BASEDIR = $(shell pwd)
+SRV_NAME = dbpartition
+COMMAND_NAME = partition
+VER = latest
+CURRENT_VERSION = $(VER)
+TEST_VERSION = test-$(VER)
+NAMESPACE = sccmsp
+DH_USER = Ex_vincixu
+DH_PASS = aA456123
+DH_URL = mirrors.tencent.com
+export GOOS = linux
+#export GOOS = darwin
+BUILD_PATH = .
+
+all: build
+api:
+	CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -o $(COMMAND_NAME)  -v ${BUILD_PATH}
+build:clean
+	CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -o $(COMMAND_NAME) -v ${BUILD_PATH}
+
+publish:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(CURRENT_VERSION) .
+	docker tag $(SRV_NAME):$(CURRENT_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+
+test:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(TEST_VERSION) .
+	docker tag $(SRV_NAME):$(TEST_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+	#docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+
+test_mac:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(TEST_VERSION) .
+	docker tag $(SRV_NAME):$(TEST_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+
+curl:
+	@echo curl -XGET -H "Content-Type: application/json" http://127.0.0.1:8080/user
+help:
+	@echo "make - compile go source"
+	@echo "make gotool - run gofmt & go too vet"
+	@echo "make clean - do some clean job"
+
+.PHONY: all gotool clean help api curl
\ No newline at end of file
diff --git a/dbm-services/mysql/db-partition/README.md b/dbm-services/mysql/db-partition/README.md
new file mode 100644
index 0000000000..d72a24b3e2
--- /dev/null
+++ b/dbm-services/mysql/db-partition/README.md
@@ -0,0 +1,109 @@
+dbm-services/mysql/db-partition 提供了MySQL/Spider分区表管理配置功能
+共6中分区类型,如下:
+
+(1) type0 
+RANGE类型分区,to_days()方式,表结构如下:  
+CREATE TABLE `tb0` (  
+`a` int(11) NOT NULL,  
+`b` datetime NOT NULL,  
+PRIMARY KEY (`a`,`b`)  
+) ENGINE=InnoDB DEFAULT CHARSET=utf8  
+PARTITION BY RANGE (to_days(b))  
+(PARTITION p20200607 VALUES LESS THAN (737949) ENGINE = InnoDB,  
+PARTITION p20200608 VALUES LESS THAN (737950) ENGINE = InnoDB,  
+PARTITION p20200609 VALUES LESS THAN (737951) ENGINE = InnoDB,  
+PARTITION p20200610 VALUES LESS THAN (737952) ENGINE = InnoDB,  
+PARTITION p20200611 VALUES LESS THAN (737953) ENGINE = InnoDB,  
+PARTITION p20200612 VALUES LESS THAN (737954) ENGINE = InnoDB,  
+PARTITION p20200613 VALUES LESS THAN (737955) ENGINE = InnoDB,  
+PARTITION p20200614 VALUES LESS THAN (737956) ENGINE = InnoDB)  
+
+(2) type1
+LIST类型分区,to_days()方式,表结构如下:
+CREATE TABLE `tb1` (  
+`a` int(11) NOT NULL,  
+`b` datetime NOT NULL,  
+PRIMARY KEY (`a`,`b`)  
+) ENGINE=InnoDB DEFAULT CHARSET=utf8
+PARTITION BY LIST (to_days(b))  
+(PARTITION p20200528 VALUES IN (737938) ENGINE = InnoDB,  
+PARTITION p20200529 VALUES IN (737939) ENGINE = InnoDB)
+
+(3)type3
+LIST类型分区,表结构如下:
+CREATE TABLE `tb3` (  
+`a` int(11) NOT NULL,  
+`b` int(11) NOT NULL,  
+PRIMARY KEY (`a`,`b`)  
+) ENGINE=InnoDB DEFAULT CHARSET=utf8
+PARTITION BY LIST (b)  
+(PARTITION p20200528 VALUES IN (20200528) ENGINE = InnoDB,  
+PARTITION p20200529 VALUES IN (20200529) ENGINE = InnoDB,  
+PARTITION p20200530 VALUES IN (20200530) ENGINE = InnoDB,  
+PARTITION p20200531 VALUES IN (20200531) ENGINE = InnoDB,  
+PARTITION p20200601 VALUES IN (20200601) ENGINE = InnoDB,  
+PARTITION p20200602 VALUES IN (20200602) ENGINE = InnoDB,  
+PARTITION p20200603 VALUES IN (20200603) ENGINE = InnoDB,  
+PARTITION p20200604 VALUES IN (20200604) ENGINE = InnoDB,  
+PARTITION p20200605 VALUES IN (20200605) ENGINE = InnoDB,  
+PARTITION p20200606 VALUES IN (20200606) ENGINE = InnoDB,  
+PARTITION p20200607 VALUES IN (20200607) ENGINE = InnoDB,  
+PARTITION p20200608 VALUES IN (20200608) ENGINE = InnoDB,  
+PARTITION p20200609 VALUES IN (20200610) ENGINE = InnoDB)  
+
+(4) type4
+RANGE COLUMNS类型分区,表结构如下:
+CREATE TABLE `tb4` (  
+`a` int(11) NOT NULL,  
+`b` datetime NOT NULL,  
+PRIMARY KEY (`a`,`b`)  
+) ENGINE=InnoDB DEFAULT CHARSET=utf8
+PARTITION BY RANGE COLUMNS(b)  
+(PARTITION p20200601 VALUES LESS THAN ('2020-06-02') ENGINE = InnoDB,  
+PARTITION p20200602 VALUES LESS THAN ('2020-06-03') ENGINE = InnoDB,  
+PARTITION p20200603 VALUES LESS THAN ('2020-06-04') ENGINE = InnoDB,  
+PARTITION p20200604 VALUES LESS THAN ('2020-06-05') ENGINE = InnoDB,  
+PARTITION p20200605 VALUES LESS THAN ('2020-06-06') ENGINE = InnoDB,  
+PARTITION p20200606 VALUES LESS THAN ('2020-06-07') ENGINE = InnoDB,  
+PARTITION p20200607 VALUES LESS THAN ('2020-06-08') ENGINE = InnoDB,  
+PARTITION p20200608 VALUES LESS THAN ('2020-06-09') ENGINE = InnoDB,  
+PARTITION p20200609 VALUES LESS THAN ('2020-06-10') ENGINE = InnoDB,  
+PARTITION p20200610 VALUES LESS THAN ('2020-06-11') ENGINE = InnoDB,  
+PARTITION p20200611 VALUES LESS THAN ('2020-06-12') ENGINE = InnoDB,  
+PARTITION p20200612 VALUES LESS THAN ('2020-06-13') ENGINE = InnoDB,  
+PARTITION p20200613 VALUES LESS THAN ('2020-06-14') ENGINE = InnoDB,  
+PARTITION p20200614 VALUES LESS THAN ('2020-06-15') ENGINE = InnoDB)  
+
+(5) type5
+RANGE类型分区,UNIX_TIMESTAMP方式,表结构如下:
+CREATE TABLE `tb5` (  
+`a` int(11) NOT NULL,  
+`b` timestamp NOT NULL,  
+PRIMARY KEY (`a`,`b`)  
+) ENGINE=InnoDB DEFAULT CHARSET=utf8
+PARTITION BY RANGE (UNIX_TIMESTAMP(b))  
+(PARTITION p20200601 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-02')) ENGINE = InnoDB,  
+PARTITION p20200602 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-03')) ENGINE = InnoDB,  
+PARTITION p20200603 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-04')) ENGINE = InnoDB,  
+PARTITION p20200604 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-05')) ENGINE = InnoDB,  
+PARTITION p20200605 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-06')) ENGINE = InnoDB,  
+PARTITION p20200606 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-07')) ENGINE = InnoDB,  
+PARTITION p20200607 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-08')) ENGINE = InnoDB,  
+PARTITION p20200608 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-09')) ENGINE = InnoDB,  
+PARTITION p20200609 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-10')) ENGINE = InnoDB,  
+PARTITION p20200610 VALUES LESS THAN (UNIX_TIMESTAMP('2020-06-11')) ENGINE = InnoDB)  
+
+(6) type101
+RANGE或者RANGE COLUMNS类型分区,兼容mapleleaf分区表,表结构如下:
+CREATE TABLE `tb101` (  
+`a` int(11) NOT NULL,  
+`b` int(11) NOT NULL,  
+PRIMARY KEY (`a`,`b`)  
+) ENGINE=TokuDB DEFAULT CHARSET=utf8
+PARTITION BY RANGE (b)  
+(PARTITION p20200601 VALUES LESS THAN (20200601) ENGINE = TokuDB,  
+PARTITION p20200602 VALUES LESS THAN (20200602) ENGINE = TokuDB,  
+PARTITION p20200603 VALUES LESS THAN (20200603) ENGINE = TokuDB,  
+PARTITION p20200604 VALUES LESS THAN (20200604) ENGINE = TokuDB,  
+PARTITION p20200605 VALUES LESS THAN (20200605) ENGINE = TokuDB,  
+PARTITION p20200606 VALUES LESS THAN (20200606) ENGINE = TokuDB)  
\ No newline at end of file
diff --git a/dbm-services/mysql/db-partition/assests/assests.go b/dbm-services/mysql/db-partition/assests/assests.go
new file mode 100644
index 0000000000..1c4d3bda77
--- /dev/null
+++ b/dbm-services/mysql/db-partition/assests/assests.go
@@ -0,0 +1,2 @@
+// Package assests TODO
+package assests
diff --git a/dbm-services/mysql/db-partition/assests/migrate.go b/dbm-services/mysql/db-partition/assests/migrate.go
new file mode 100644
index 0000000000..513bf50154
--- /dev/null
+++ b/dbm-services/mysql/db-partition/assests/migrate.go
@@ -0,0 +1,51 @@
+package assests
+
+import (
+	"embed"
+	"fmt"
+
+	"github.com/golang-migrate/migrate/v4"
+	_ "github.com/golang-migrate/migrate/v4/database/mysql" // mysql TODO
+	"github.com/golang-migrate/migrate/v4/source/iofs"
+	"github.com/pkg/errors"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+//go:embed migrations/*.sql
+var fs embed.FS
+
+// DoMigrateFromEmbed 先尝试从 go embed 文件系统查找 migrations
+// no changes: return nil
+func DoMigrateFromEmbed() error {
+	var mig *migrate.Migrate
+	if d, err := iofs.New(fs, "migrations"); err != nil {
+		return err
+	} else {
+		dbURL := fmt.Sprintf(
+			"mysql://%s:%s@tcp(%s:%d)/%s?charset=%s&parseTime=true&loc=Local&multiStatements=true&interpolateParams=true",
+			viper.GetString("db.user"),
+			viper.GetString("db.password"),
+			viper.GetString("db.host"),
+			viper.GetInt("db.port"),
+			viper.GetString("db.name"),
+			"utf8",
+		)
+		mig, err = migrate.NewWithSourceInstance("iofs", d, dbURL)
+		if err != nil {
+			return errors.WithMessage(err, "migrate from embed")
+		}
+		defer mig.Close()
+		err = mig.Up()
+		if err == nil {
+			slog.Info("migrate source from embed success")
+			return nil
+		} else if err == migrate.ErrNoChange {
+			slog.Info("migrate source from embed success with", "msg", err.Error())
+			return nil
+		} else {
+			slog.Error("migrate source from embed failed", err)
+			return err
+		}
+	}
+}
diff --git a/dbm-services/mysql/db-partition/assests/migrations/000001_init.down.sql.sql b/dbm-services/mysql/db-partition/assests/migrations/000001_init.down.sql.sql
new file mode 100644
index 0000000000..a4f40086f9
--- /dev/null
+++ b/dbm-services/mysql/db-partition/assests/migrations/000001_init.down.sql.sql
@@ -0,0 +1 @@
+SET NAMES utf8;
diff --git a/dbm-services/mysql/db-partition/assests/migrations/000001_init.up.sql b/dbm-services/mysql/db-partition/assests/migrations/000001_init.up.sql
new file mode 100644
index 0000000000..bcaf95e5d0
--- /dev/null
+++ b/dbm-services/mysql/db-partition/assests/migrations/000001_init.up.sql
@@ -0,0 +1 @@
+SET NAMES utf8;
\ No newline at end of file
diff --git a/dbm-services/mysql/db-partition/assests/migrations/000002_create_table.down.sql b/dbm-services/mysql/db-partition/assests/migrations/000002_create_table.down.sql
new file mode 100644
index 0000000000..577cd03bc7
--- /dev/null
+++ b/dbm-services/mysql/db-partition/assests/migrations/000002_create_table.down.sql
@@ -0,0 +1,2 @@
+DROP TABLE IF EXISTS schema_migrations;
+
diff --git a/dbm-services/mysql/db-partition/assests/migrations/000002_create_table.up.sql b/dbm-services/mysql/db-partition/assests/migrations/000002_create_table.up.sql
new file mode 100644
index 0000000000..a4c9838035
--- /dev/null
+++ b/dbm-services/mysql/db-partition/assests/migrations/000002_create_table.up.sql
@@ -0,0 +1,184 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost  Database: dbpartition
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3.2-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Table structure for table `mysql_partition_config`
+--
+
+/*!40101 SET @saved_cs_client   = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `mysql_partition_config` (
+    `id` bigint NOT NULL AUTO_INCREMENT,
+    `bk_biz_id` int NOT NULL,
+    `immute_domain` varchar(200) NOT NULL,
+    `port` int NOT NULL,
+    `bk_cloud_id` int NOT NULL,
+    `cluster_id` int NOT NULL,
+    `dblike` varchar(100) NOT NULL,
+    `tblike` varchar(100) NOT NULL,
+    `partition_column` varchar(100) DEFAULT NULL,
+    `partition_column_type` varchar(100) DEFAULT NULL,
+    `reserved_partition` int NOT NULL,
+    `extra_partition` int NOT NULL,
+    `partition_time_interval` int NOT NULL,
+    `partition_type` int NOT NULL,
+    `expire_time` int NOT NULL,
+    `creator` varchar(100) DEFAULT NULL,
+    `updator` varchar(100) DEFAULT NULL,
+    `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+    `update_time` timestamp NOT NULL DEFAULT '2000-01-01 00:00:00',
+    `phase` varchar(100) NOT NULL,
+    PRIMARY KEY (`id`),
+    UNIQUE KEY `uniq` (`bk_biz_id`,`immute_domain`,`cluster_id`,`dblike`,`tblike`),
+    KEY `idx_cluster_id_phase` (`cluster_id`,`phase`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+
+--
+-- Table structure for table `spider_partition_config`
+--
+
+/*!40101 SET @saved_cs_client   = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `spider_partition_config` (
+  `id` bigint NOT NULL AUTO_INCREMENT,
+  `bk_biz_id` int NOT NULL,
+  `immute_domain` varchar(200) NOT NULL,
+  `port` int NOT NULL,
+  `bk_cloud_id` int NOT NULL,
+  `cluster_id` int NOT NULL,
+  `dblike` varchar(100) NOT NULL,
+  `tblike` varchar(100) NOT NULL,
+  `partition_column` varchar(100) DEFAULT NULL,
+  `partition_column_type` varchar(100) DEFAULT NULL,
+  `reserved_partition` int NOT NULL,
+  `extra_partition` int NOT NULL,
+  `partition_time_interval` int NOT NULL,
+  `partition_type` int NOT NULL,
+  `expire_time` int NOT NULL,
+  `creator` varchar(100) DEFAULT NULL,
+  `updator` varchar(100) DEFAULT NULL,
+  `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+  `update_time` timestamp NOT NULL DEFAULT '2000-01-01 00:00:00',
+  `phase` varchar(100) NOT NULL,
+  PRIMARY KEY (`id`),
+  UNIQUE KEY `uniq` (`bk_biz_id`,`immute_domain`,`cluster_id`,`dblike`,`tblike`),
+  KEY `idx_cluster_id_phase` (`cluster_id`,`phase`)
+  ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+
+--
+-- Table structure for table `mysql_partition_cron_log`
+--
+
+/*!40101 SET @saved_cs_client   = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `mysql_partition_cron_log` (
+  `id` bigint NOT NULL AUTO_INCREMENT,
+  `config_id` int NOT NULL,
+  `bk_biz_id` int NOT NULL,
+  `cluster_id` int NOT NULL,
+  `ticket_id` int NOT NULL,
+  `immute_domain` varchar(200) NOT NULL,
+  `scheduler` varchar(100) NOT NULL,
+  `bk_cloud_id` int NOT NULL,
+  `time_zone` varchar(100) NOT NULL,
+  `cron_date` varchar(100) NOT NULL,
+  `ticket_detail` json DEFAULT NULL,
+  `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+  `check_info` text,
+  `status` varchar(100) NOT NULL,
+  PRIMARY KEY (`id`),
+  KEY `idx_create_time` (`create_time`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `mysql_partition_cron_log`
+--
+
+/*!40101 SET @saved_cs_client   = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `spider_partition_cron_log` (
+  `id` bigint NOT NULL AUTO_INCREMENT,
+  `config_id` int NOT NULL,
+  `bk_biz_id` int NOT NULL,
+  `cluster_id` int NOT NULL,
+  `ticket_id` int NOT NULL,
+  `immute_domain` varchar(200) NOT NULL,
+  `scheduler` varchar(100) NOT NULL,
+  `bk_cloud_id` int NOT NULL,
+  `time_zone` varchar(100) NOT NULL,
+  `cron_date` varchar(100) NOT NULL,
+  `ticket_detail` json DEFAULT NULL,
+  `create_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
+  `check_info` text,
+  `status` varchar(100) NOT NULL,
+  PRIMARY KEY (`id`),
+  KEY `idx_create_time` (`create_time`)
+  ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `partition_logs`
+--
+
+/*!40101 SET @saved_cs_client   = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `partition_logs` (
+  `id` int(11) NOT NULL AUTO_INCREMENT,
+  `bk_biz_id` int(11) NOT NULL COMMENT '业务的 cmdb id',
+  `operator` varchar(800) NOT NULL COMMENT '操作者',
+  `para` longtext NOT NULL COMMENT '参数',
+  `execute_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '执行时间',
+  PRIMARY KEY (`id`),
+  KEY `bk_biz_id` (`bk_biz_id`,`operator`(10),`execute_time`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `schema_migrations`
+--
+
+/*!40101 SET @saved_cs_client   = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `schema_migrations` (
+  `version` bigint(20) NOT NULL,
+  `dirty` tinyint(1) NOT NULL,
+  PRIMARY KEY (`version`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+
+/*!40101 SET character_set_client = @saved_cs_client */;
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-03-22 20:55:00
diff --git a/dbm-services/mysql/db-partition/cron/cron.go b/dbm-services/mysql/db-partition/cron/cron.go
new file mode 100644
index 0000000000..71b2ff12d0
--- /dev/null
+++ b/dbm-services/mysql/db-partition/cron/cron.go
@@ -0,0 +1,71 @@
+// Package cron TODO
+package cron
+
+import (
+	"dbm-services/mysql/db-partition/model"
+	"errors"
+	"fmt"
+	"log"
+	"strings"
+	"time"
+
+	"github.com/robfig/cron/v3"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// RegisterCron 注册定时任务
+func RegisterCron() ([]*cron.Cron, error) {
+	cronList := make([]*cron.Cron, 24)
+	timingHour := viper.GetString("cron.timing_hour")
+	retryHour := viper.GetString("cron.retry_hour")
+	if timingHour == "" || retryHour == "" {
+		err := errors.New("cron.partition_hour or cron.retry_hour was not set")
+		slog.Error("msg", "cron error", err)
+		return cronList, err
+	}
+	timing := fmt.Sprintf("2 %s * * * ", timingHour)
+	retry := fmt.Sprintf("2 %s * * * ", retryHour)
+	fmt.Println(retry)
+	var debug bool
+	if strings.ToLower(strings.TrimSpace(viper.GetString("log.level"))) == "debug" {
+		debug = true
+	}
+	timezone := map[string]int{
+		"UTC": 0, "UTC+1": 1, "UTC+2": 2, "UTC+3": 3, "UTC+4": 4, "UTC+5": 5, "UTC+6": 6, "UTC+7": 7, "UTC+8": 8,
+		"UTC+9": 9, "UTC+10": 10, "UTC+11": 11, "UTC+12": 12, "UTC-11": -11, "UTC-10": -10, "UTC-9": -9,
+		"UTC-8": -8, "UTC-7": -7, "UTC-6": -6, "UTC-5": -5, "UTC-4": -4, "UTC-3": -3, "UTC-2": -2, "UTC-1": -1,
+	}
+	for name, offset := range timezone {
+		offetSeconds := offset * 60 * 60
+		zone := time.FixedZone(name, offetSeconds)
+		date := time.Now().In(zone).Format("20060102")
+		var c *cron.Cron
+		if debug {
+			c = cron.New(cron.WithLocation(zone), cron.WithLogger(cron.VerbosePrintfLogger(log.New(model.NewWriter(
+				"log/cron.log"), fmt.Sprintf("timezone: %+03d:00  ", offset), log.LstdFlags))))
+		} else {
+			c = cron.New(cron.WithLocation(zone))
+		}
+		_, err := c.AddJob(timing, PartitionJob{CronType: Daily, ZoneOffset: offset, CronDate: date})
+		if err != nil {
+			slog.Error("msg", "cron add daily job error", err)
+			return cronList, err
+		}
+		_, err = c.AddJob(retry, PartitionJob{CronType: Retry, ZoneOffset: offset, CronDate: date})
+		if err != nil {
+			slog.Error("msg", "cron add retry job error", err)
+			return cronList, err
+		}
+		if offset == 0 {
+			_, err = c.AddJob("@every 1s", PartitionJob{CronType: Heartbeat, ZoneOffset: offset, CronDate: date})
+			if err != nil {
+				slog.Error("msg", "cron add heartbeat job error", err)
+				return cronList, err
+			}
+		}
+		cronList = append(cronList, c)
+		c.Start()
+	}
+	return cronList, nil
+}
diff --git a/dbm-services/mysql/db-partition/cron/cron_basic_func.go b/dbm-services/mysql/db-partition/cron/cron_basic_func.go
new file mode 100644
index 0000000000..9717ab9c73
--- /dev/null
+++ b/dbm-services/mysql/db-partition/cron/cron_basic_func.go
@@ -0,0 +1,79 @@
+package cron
+
+import (
+	"dbm-services/mysql/db-partition/errno"
+	"dbm-services/mysql/db-partition/model"
+	"dbm-services/mysql/db-partition/monitor"
+	"dbm-services/mysql/db-partition/service"
+	"dbm-services/mysql/db-partition/util"
+	"fmt"
+
+	"golang.org/x/exp/slog"
+)
+
+// Scheduler TODO
+var Scheduler string
+
+// Run TODO
+func (m PartitionJob) Run() {
+	var err error
+	Scheduler, err = util.ExecShellCommand(false, "hostname -I")
+	if err != nil {
+		Scheduler = "0.0.0.0"
+	}
+	if m.CronType == Heartbeat {
+		monitor.SendMetric(Scheduler)
+		return
+	}
+	key := fmt.Sprintf("%s_%d_%s", m.CronType, m.ZoneOffset, m.CronDate)
+	model.Lock(key)
+	m.ExecutePartitionCron(service.Tendbha)
+	m.ExecutePartitionCron(service.Tendbcluster)
+	flag, err := model.Lock(key)
+	if err != nil {
+		dimension := monitor.NewDeveloperEventDimension(Scheduler)
+		content := fmt.Sprintf("partition error. set redis mutual exclusion error: %s", err.Error())
+		monitor.SendEvent(monitor.PartitionDeveloperEvent, dimension, content, Scheduler)
+		slog.Error("msg", "model.Lock err", err)
+	} else if flag {
+		m.ExecutePartitionCron(service.Tendbha)
+		m.ExecutePartitionCron(service.Tendbcluster)
+	} else {
+		slog.Warn("set redis mutual exclusion fail, do nothing", "key", key)
+	}
+}
+
+// ExecutePartitionCron 执行所有业务的分区
+func (m PartitionJob) ExecutePartitionCron(clusterType string) {
+	zone := fmt.Sprintf("%+03d:00", m.ZoneOffset)
+	needMysql, errOuter := service.NeedPartition(m.CronType, clusterType, m.ZoneOffset, m.CronDate)
+	if errOuter != nil {
+		dimension := monitor.NewDeveloperEventDimension(Scheduler)
+		content := fmt.Sprintf("partition error. get need partition list fail: %s", errOuter.Error())
+		monitor.SendEvent(monitor.PartitionDeveloperEvent, dimension, content, Scheduler)
+		slog.Error("msg", "get need partition list fail", errOuter)
+		return
+	}
+	for _, item := range needMysql {
+		objects, err := (*item).DryRun()
+		if err != nil {
+			code, _ := errno.DecodeErr(err)
+			if code == errno.NothingToDo.Code {
+				service.AddLog(item.ConfigId, item.BkBizId, item.ClusterId, *item.BkCloudId, 0,
+					item.ImmuteDomain, zone, m.CronDate, Scheduler, "{}",
+					errno.NothingToDo.Message, service.CheckSucceeded, item.ClusterType)
+				continue
+			} else {
+				dimension := monitor.NewPartitionEventDimension(item.BkBizId, *item.BkCloudId, item.ImmuteDomain)
+				content := fmt.Sprintf("partition error. get partition sql fail: %s", err.Error())
+				monitor.SendEvent(monitor.PartitionEvent, dimension, content, "0.0.0.0")
+				service.AddLog(item.ConfigId, item.BkBizId, item.ClusterId, *item.BkCloudId, 0,
+					item.ImmuteDomain, zone, m.CronDate, Scheduler, "{}",
+					content, service.CheckFailed, item.ClusterType)
+				slog.Error(fmt.Sprintf("%v", *item), "get partition sql fail", err)
+				continue
+			}
+		}
+		service.CreatePartitionTicket(*item, objects, m.ZoneOffset, m.CronDate, Scheduler)
+	}
+}
diff --git a/dbm-services/mysql/db-partition/cron/cron_object.go b/dbm-services/mysql/db-partition/cron/cron_object.go
new file mode 100644
index 0000000000..365a317e9c
--- /dev/null
+++ b/dbm-services/mysql/db-partition/cron/cron_object.go
@@ -0,0 +1,17 @@
+package cron
+
+// Daily TODO
+const Daily = "daily"
+
+// Retry TODO
+const Retry = "retry"
+
+// Heartbeat TODO
+const Heartbeat = "heartbeat"
+
+// PartitionJob TODO
+type PartitionJob struct {
+	CronType   string
+	ZoneOffset int
+	CronDate   string
+}
diff --git a/dbm-services/mysql/db-partition/errno/code.go b/dbm-services/mysql/db-partition/errno/code.go
new file mode 100644
index 0000000000..abaa19d931
--- /dev/null
+++ b/dbm-services/mysql/db-partition/errno/code.go
@@ -0,0 +1,361 @@
+package errno
+
+var (
+	// OK TODO
+	// Common errors
+	// OK = Errno{Code: 0, Message: ""}
+	OK = Errno{Code: 0, Message: "", CNMessage: ""}
+	// SaveOK TODO
+	SaveOK = Errno{Code: 0, Message: "Bill save success!", CNMessage: "单据保存成功!"}
+	// CommitOK TODO
+	CommitOK = Errno{Code: 0, Message: "Bill commit success!", CNMessage: "单据提交成功!"}
+	// AuditOK TODO
+	AuditOK = Errno{Code: 0, Message: "Bill audit success!", CNMessage: "单据审核成功!"}
+	// RollbackOK TODO
+	RollbackOK = Errno{Code: 0, Message: "Bill rollback success!", CNMessage: "单据驳回成功!"}
+	// StopOK TODO
+	StopOK = Errno{Code: 0, Message: "Bill stop success!", CNMessage: "单据终止成功!"}
+	// ExecuteOK TODO
+	ExecuteOK = Errno{Code: 0, Message: "Bill execute success!", CNMessage: "单据执行成功!"}
+	// CommonOK TODO
+	CommonOK = Errno{Code: 0, Message: "", CNMessage: "通用成功描述"}
+	// JobUpdateOK TODO
+	JobUpdateOK = Errno{Code: 0, Message: "Job update success!", CNMessage: "Job 更新成功!"}
+	// SubjobUpdateOK TODO
+	SubjobUpdateOK = Errno{Code: 0, Message: "Subjob update success!", CNMessage: "Subjob 更新成功!"}
+
+	// ErrRecordNotFound TODO
+	ErrRecordNotFound = Errno{Code: 404, Message: "There is no records in db.", CNMessage: "数据库未找到对应的记录!"}
+
+	// CommonErr TODO
+	CommonErr = Errno{Code: 10000, Message: "common error!", CNMessage: "通用错误!"}
+
+	// InternalServerError TODO
+	InternalServerError = Errno{Code: 10001, Message: "Internal server error", CNMessage: "服务器内部错误。"}
+	// ErrBind TODO
+	ErrBind = Errno{Code: 10002, Message: "Error occurred while binding the request body to the struct.",
+		CNMessage: "参数处理发生错误。"}
+	// ErrString2Int TODO
+	ErrString2Int = Errno{Code: 10010, Message: "Error occurred while convert string to int.",
+		CNMessage: "string 转化为 int 出错!"}
+	// ErrorJsonToMap TODO
+	ErrorJsonToMap = Errno{Code: 10030, Message: "Error occured while converting json to Map.",
+		CNMessage: "Json 转为 Map 出现错误!"}
+	// ErrorUIDBeZero TODO
+	ErrorUIDBeZero = Errno{Code: 10035, Message: "uid can not be 0!", CNMessage: "uid 不能为 0.!"}
+	// ErrRequestParam TODO
+	ErrRequestParam = Errno{Code: 10036, Message: "request parameter error!", CNMessage: "请求参数错误!"}
+
+	// ErrTypeAssertion TODO
+	ErrTypeAssertion = Errno{Code: 10040, Message: "Error occurred while doing type assertion."}
+	// ErrParameterRequired TODO
+	ErrParameterRequired = Errno{Code: 10050, Message: "Input paramter required"}
+	// StartBiggerThanEndTime TODO
+	StartBiggerThanEndTime = Errno{Code: 10060, Message: "Start time is bigger than end time."}
+
+	// ErrValidation TODO
+	ErrValidation = Errno{Code: 20001, Message: "Validation failed."}
+	// ErrDatabase TODO
+	ErrDatabase = Errno{Code: 20002, Message: "Database error."}
+	// ErrToken TODO
+	ErrToken = Errno{Code: 20003, Message: "Error occurred while signing the JSON web token."}
+
+	// ErrEncrypt TODO
+	// user errors
+	ErrEncrypt = Errno{Code: 20101, Message: "Error occurred while encrypting the user password."}
+	// ErrUserNotFound TODO
+	ErrUserNotFound = Errno{Code: 20102, Message: "The user was not found."}
+	// ErrTokenInvalid TODO
+	ErrTokenInvalid = Errno{Code: 20103, Message: "The token was invalid."}
+	// ErrPasswordIncorrect TODO
+	ErrPasswordIncorrect = Errno{Code: 20104, Message: "The password was incorrect."}
+	// ErrDoNotHavePrivs TODO
+	ErrDoNotHavePrivs = Errno{Code: 20106, Message: "User don't have Privs."}
+	// ErrUserIsEmpty TODO
+	ErrUserIsEmpty = Errno{Code: 20110, Message: "User can't be empty.", CNMessage: "user 不能为空!"}
+	// ErrAppNameIsEmpty TODO
+	ErrAppNameIsEmpty = Errno{Code: 20115, Message: "App name can't be empty.", CNMessage: "业务名不能为空!"}
+
+	// ErrCommonExecute TODO
+	ErrCommonExecute = Errno{Code: 20200, Message: "Error occured while invoking execute method.",
+		CNMessage: "调用 execute 出错!"}
+
+	// ErrUserHaveNoProjectPriv TODO
+	ErrUserHaveNoProjectPriv = Errno{Code: 30000, Message: "User don't have project priv.", CNMessage: "没有 project 权限!"}
+
+	// ErrGcsBillNotFound TODO
+	// gcsbill errors
+	ErrGcsBillNotFound = Errno{Code: 40000, Message: "Gcs bill was not found.", CNMessage: "单据不存在!"}
+	// ErrGCSBillTypeEmpty TODO
+	ErrGCSBillTypeEmpty = Errno{Code: 40001, Message: "Gcs bill type can not be empty.", CNMessage: "单据类型不能为空!"}
+	// InvalidGCSBillType TODO
+	InvalidGCSBillType = Errno{Code: 40002, Message: "Invalid Gcs bill type.", CNMessage: "无效的 GCS 单据类型!"}
+	// InvalidAuditLevel TODO
+	InvalidAuditLevel = Errno{Code: 40003, Message: "Invalid Bill Audit level.", CNMessage: "无效的单据审核级别!"}
+
+	// CannotGetBillStatus TODO
+	CannotGetBillStatus = Errno{Code: 40004, Message: "Cann't get bill status.", CNMessage: `无法获取单据状态`}
+	// ErrGCSBillnotAuditable TODO
+	ErrGCSBillnotAuditable = Errno{Code: 40005, Message: "Current GCS bill is not in audit status now.",
+		CNMessage: `当前单据不在“待审核”状态!`}
+	// ErrGCSBillNotInExecute TODO
+	ErrGCSBillNotInExecute = Errno{Code: 40006, Message: "Bill is not in execute status.", CNMessage: `当前单据不在“待执行”状态!`}
+	// ErrGCSBillAudit TODO
+	ErrGCSBillAudit = Errno{Code: 40007, Message: "Audit bill error.", CNMessage: `审核单据出错。`}
+
+	// ErrNotHaveBillCommitPriv TODO
+	ErrNotHaveBillCommitPriv = Errno{Code: 40008, Message: "user don't have bill commit priv", CNMessage: "用户没有提单权限!"}
+
+	// ErrGetGCSDoneBills TODO
+	ErrGetGCSDoneBills = Errno{Code: 40009, Message: "Error occured while getting done bills.",
+		CNMessage: "获取个人已办事项出错!"}
+	// ErrBillAppIsEmpty TODO
+	ErrBillAppIsEmpty = Errno{Code: 40010, Message: "Gcs bill app can not be empty.", CNMessage: "单据的业务名不能为空!"}
+	// ErrGCSBillNoExecutePriv TODO
+	ErrGCSBillNoExecutePriv = Errno{Code: 40011, Message: "Only apply user and follower can execute the bill!",
+		CNMessage: "只有申请人或者关注人可以执行单据!"}
+	// ErrGetGCSBillModel TODO
+	ErrGetGCSBillModel = Errno{Code: 40012, Message: "Error occured while getting bill info",
+		CNMessage: "获取 Bill 详情出错"}
+	// ErrGetGCSBillTypes TODO
+	ErrGetGCSBillTypes = Errno{Code: 40014, Message: "Error occured while getting bill types",
+		CNMessage: "获取所有单据类型失败!"}
+	// ErrGCSBillCommit TODO
+	ErrGCSBillCommit = Err{Errno: Errno{Code: 40015, Message: "The bill can not be committed repeatly!",
+		CNMessage: "单据不能被重复提交!"}}
+	// ErrInvokeBillCommit TODO
+	ErrInvokeBillCommit = Err{Errno: Errno{Code: 40016, Message: "Error occured while committing gcs bills",
+		CNMessage: "单据提交时发生错误!"}}
+	// ErrInvokeBillExecute TODO
+	ErrInvokeBillExecute = Err{Errno: Errno{Code: 40017, Message: "Error occured while executing gcs bills",
+		CNMessage: "单据执行时发生错误!"}}
+
+	// ErrGCSBillnotRollback TODO
+	ErrGCSBillnotRollback = Errno{Code: 40019, Message: "Bill is not auditable ,it can not be rollback.",
+		CNMessage: `非“待审核”单据不能被驳回!`}
+	// ErrGetGCSBills TODO
+	ErrGetGCSBills = Errno{Code: 40020, Message: "Error occured while getting gcs bills", CNMessage: "获取单据失败!"}
+	// ErrCloneUnfinishedBills TODO
+	ErrCloneUnfinishedBills = Errno{Code: 40022, Message: "Error occured while cloning unfinished gcs bills",
+		CNMessage: "不能克隆没有结束的单据!"}
+	// ErrFinishedBills TODO
+	ErrFinishedBills = Errno{Code: 40027, Message: "Error occured while finishing gcs bills",
+		CNMessage: `设置单据为“完成”状态时失败!`}
+	// ErrBillHaveTerminated TODO
+	ErrBillHaveTerminated = Errno{Code: 40028, Message: "Bill have terminated!", CNMessage: `单据已“终止”!`}
+
+	// ErrNoStopPriv TODO
+	ErrNoStopPriv = Errno{Code: 40037, Message: "Don't have stop bill priv!", CNMessage: `用户没有“终止”单据权限!`}
+	// ErrGCSBillSave TODO
+	ErrGCSBillSave = Err{Errno: Errno{Code: 40042, Message: "Error occured while saving gcs bills!",
+		CNMessage: "单据保存失败!"}}
+	// ErrBillIsNotUncommit TODO
+	ErrBillIsNotUncommit = Err{Errno: Errno{Code: 40043,
+		Message: "Bill phase is not v_uncommit before committing the bill!", CNMessage: "单据提交之前,单据状态不是\"未提交\"!"}}
+	// ErrBillPreCommit TODO
+	ErrBillPreCommit = Err{Errno: Errno{Code: 40046, Message: "Error occured while invoking bill pre commit api:",
+		CNMessage: "调用单据的 PreCommit API 失败:"}}
+	// ErrBillAfterExecute TODO
+	ErrBillAfterExecute = Err{Errno: Errno{Code: 40050, Message: "Error occured while invoking after execute api!",
+		CNMessage: "调用单据的 AfterExecute API 失败!"}}
+
+	// ErrTbBillInfoToBill TODO
+	ErrTbBillInfoToBill = Err{Errno: Errno{Code: 40055, Message: "Error occured while transfer TbBillInfo  to Bill!",
+		CNMessage: "转换 Bill Model 失败"}}
+
+	// ErrCreateGCSJob TODO
+	// job errors
+	ErrCreateGCSJob = Errno{Code: 40100, Message: "Error occured while creating the gcs job.",
+		CNMessage: "创建 GCS Job 失败!"}
+	// ErrGetJobQueue TODO
+	ErrGetJobQueue = Errno{Code: 40101, Message: "Error occured while get the gcs job queue.",
+		CNMessage: "获取 job 失败 !"}
+	// ErrGetJobQueueNotFound TODO
+	ErrGetJobQueueNotFound = Errno{Code: 40102, Message: "Job Queue Not Found.", CNMessage: "Job 不存在!"}
+	// ErrDeleteJobQueue TODO
+	ErrDeleteJobQueue = Errno{Code: 40103, Message: "Error occured while set the jobQueue to be deleted.",
+		CNMessage: "删除 Job 失败!"}
+	// ErrJobIDConvert2Int TODO
+	ErrJobIDConvert2Int = Errno{Code: 40104, Message: "Error occured while converting the jobID to int.",
+		CNMessage: "jobID 转换为int 出错!"}
+	// ErrSubjobIDConvert2Int TODO
+	ErrSubjobIDConvert2Int = Errno{Code: 40105, Message: "Error occured while converting the subjob_id to int.",
+		CNMessage: "subjobID 转换为int 出错!"}
+
+	// ErrPutJobQueueParam TODO
+	ErrPutJobQueueParam = Errno{Code: 40106, Message: " param errors while puting a new JobQueue.",
+		CNMessage: "创建 Job 时参数错误!"}
+	// ErrJobQueueInputParam TODO
+	ErrJobQueueInputParam = Errno{Code: 40107,
+		Message:   "Some parameters is required in EnJobQueue: app,name,input,tag_id",
+		CNMessage: "创建Job 时缺少下列参数:[app,name,input,tag_id]!"}
+	// ErrJobQueueV1InputParam TODO
+	ErrJobQueueV1InputParam = Errno{Code: 40107,
+		Message:   "Some parameters is required in puting JobQueue: [app,name,distributions,payload,user]",
+		CNMessage: "创建/修改 Job 时缺少下列参数:[app,name,distributions,payload,user]!"}
+	// ErrJobQueueDistribution TODO
+	ErrJobQueueDistribution = Errno{Code: 40108, Message: "JobQueue distributions format is wrong.",
+		CNMessage: "创建 JobQueue 时 distributions 格式不正确!"}
+	// ErrCheckJobQueue TODO
+	ErrCheckJobQueue = Errno{Code: 40109, Message: "Error occured while checking JobQueue.",
+		CNMessage: "检查 JobQueue 出错!"}
+	// ErrJoqQueueIsNil TODO
+	ErrJoqQueueIsNil = Errno{Code: 40110, Message: "JobQueue is Nil", CNMessage: "返回的Job 内容为空!"}
+	// ErrCloneJoqQueues TODO
+	ErrCloneJoqQueues = Errno{Code: 40113, Message: "Error occured while cloning jobQueues",
+		CNMessage: "克隆 jobQueues 出错!"}
+
+	// JobResultSuccess TODO
+	JobResultSuccess = Errno{Code: 0, Message: "success", CNMessage: "success"}
+	// JobResultRunning TODO
+	JobResultRunning = Errno{Code: 40114, Message: "running", CNMessage: "running"}
+	// JobResultFailed TODO
+	JobResultFailed = Errno{Code: 40115, Message: "fail", CNMessage: "fail"}
+	// JobResultOthers TODO
+	JobResultOthers = Errno{Code: 40116, Message: "other job status", CNMessage: "other job status"}
+
+	// ErrGetJobFeedbacks TODO
+	// JobFeedback
+	ErrGetJobFeedbacks = Errno{Code: 40210, Message: "Error occured while getting the gcs job feedback.",
+		CNMessage: "获取 job feedback 信息失败!"}
+	// ErrCreateGCSJobFeedback TODO
+	ErrCreateGCSJobFeedback = Errno{Code: 40215, Message: "Error occured while creating the gcs jobFeedback.",
+		CNMessage: "创建 GCS jobFeedback 失败!"}
+
+	// InvalidJobIDorSubjobID TODO
+	InvalidJobIDorSubjobID = Errno{Code: 40220, Message: "Invalid jobID or subJobID while getting the gcs job feedback.",
+		CNMessage: "jobID or subJobID 无效!"}
+
+	// ErrorJobNameBeEmpty TODO
+	// JobDef errors
+	ErrorJobNameBeEmpty = Errno{Code: 40300, Message: "JobName can not be empty.", CNMessage: "JobName 不能为空!"}
+	// ErrorGetJobDef TODO
+	ErrorGetJobDef = Errno{Code: 40302, Message: "Error occured while getting the gcs job_def",
+		CNMessage: "获取 job_def 出现错误!"}
+
+	// ErrorGetJobBlob TODO
+	// JobBlob errors
+	ErrorGetJobBlob = Errno{Code: 40302, Message: "Error occured while getting the gcs job_blob",
+		CNMessage: "获取 job_blob 出现错误!"}
+
+	// ErrorGetSubJobQueue TODO
+	// subjob errors
+	ErrorGetSubJobQueue = Errno{Code: 40800, Message: "Error occured while getting the gcs subjob ",
+		CNMessage: "获取 subjob 出现错误!"}
+	// ErrCreateSubJobQueue TODO
+	ErrCreateSubJobQueue = Errno{Code: 40801, Message: "Error occured while creating the gcs subjobQueue.",
+		CNMessage: "创建 GCS subjobQueue 失败!"}
+	// ErrUpdateSubJobQueue TODO
+	ErrUpdateSubJobQueue = Errno{Code: 40802, Message: "Error occured while updating the gcs subjobQueue.",
+		CNMessage: "更新 GCS subjobQueue 失败!"}
+
+	// SubJobUIDRequied TODO
+	SubJobUIDRequied = Errno{Code: 40804, Message: "Subjob uid is required!", CNMessage: "Subjob uid 是必填项.!"}
+	// ErrorUIDMustBeInt TODO
+	ErrorUIDMustBeInt = Errno{Code: 40808, Message: "Subjob uid must be int!", CNMessage: "Subjob uid 必须是 int 类型.!"}
+	// ErrSubjobQueueInputParam TODO
+	ErrSubjobQueueInputParam = Errno{Code: 40812,
+		Message: "Some parameters [JobID,Username,JobName,AtomjobList,JobInput] are not meet the demands in saving SubjobQueue", CNMessage: "保存 SubjobQueue 时缺少下列参数:[JobID,Username,JobName,AtomjobList,JobInput]!"}
+	// ErrJobFeedbackInputParam TODO
+	ErrJobFeedbackInputParam = Errno{Code: 40815,
+		Message: "Some parameters are not meet the demands in saving JobFeedback", CNMessage: "保存 JobFeedback 时参数不满足要求。"}
+	// ErrGetGCSApps TODO
+	// gcs app errors
+	ErrGetGCSApps = Errno{Code: 40900, Message: "Error occured while getting gcs apps", CNMessage: "获取 GCS App 出现错误!"}
+	// ErrGetCCApps TODO
+	ErrGetCCApps = Errno{Code: 40902, Message: "Error occured while getting cc apps", CNMessage: "获取 App 出现错误!"}
+	// ErrGetProjects TODO
+	ErrGetProjects = Errno{Code: 40905, Message: "Error occured while getting projects", CNMessage: "获取 projects 出现错误!"}
+
+	// ErrDBTransaction TODO
+	// model operation errors
+	ErrDBTransaction = Errno{Code: 50200, Message: "DB Transaction error.", CNMessage: "DB 事务发生错误!"}
+	// ErrModelFunction TODO
+	ErrModelFunction = Err{Errno: Errno{Code: 50201, Message: "Error occured while invoking model function.",
+		CNMessage: "调用 DB model 方法发生错误!"}, Err: nil}
+
+	// ErrSaveFlowAuditLog TODO
+	ErrSaveFlowAuditLog = Errno{Code: 50203, Message: "Error occured while saving Flow Audit Log.",
+		CNMessage: "存储单据审核日志记录出错!"}
+
+	// ErrGetJSONArray TODO
+	// data handle error
+	ErrGetJSONArray = Errno{Code: 50300, Message: "Get simplejson Array error.", CNMessage: ""}
+	// ErrConvert2Map TODO
+	ErrConvert2Map = Errno{Code: 50301, Message: "Error occurred while converting the data to Map.",
+		CNMessage: "Error occurred while converting the data to Map."}
+	// ErrJSONMarshal TODO
+	ErrJSONMarshal = Errno{Code: 50302, Message: "Error occurred while marshaling the data to JSON.",
+		CNMessage: "Error occurred while marshaling the data to JSON."}
+	// ErrReadEntity TODO
+	ErrReadEntity = Errno{Code: 50303, Message: "Error occurred while parsing the request parameter.",
+		CNMessage: "Error occurred while parsing the request parameter."}
+	// ErrJSONUnmarshal TODO
+	ErrJSONUnmarshal = Errno{Code: 50304, Message: "Error occurred while Unmarshaling the JSON to data model.",
+		CNMessage: "Error occurred while Unmarshaling the JSON to data model."}
+	// ErrBytesToMap TODO
+	ErrBytesToMap = Errno{Code: 50307, Message: "Error occurred while converting bytes to map.",
+		CNMessage: "Error occurred while converting bytes to map."}
+
+	// ErrUserIsNotDBA TODO
+	// user login and permission errors
+	ErrUserIsNotDBA = Errno{Code: 50500, Message: "User is not dba."}
+	// ErrNoSaveAndCommitPriv TODO
+	ErrNoSaveAndCommitPriv = Errno{Code: 50502,
+		Message: "User don't have gcs bill save and commit privs in this app.", CNMessage: "用户在当前 APP 上没有单据的保存和提交权限!"}
+	// ErrNoBillAduitPriv TODO
+	ErrNoBillAduitPriv = Errno{Code: 50504, Message: "User don't have gcs audit privs in this app.",
+		CNMessage: "用户在当前 APP 上没有单据的审核权限!"}
+	// ErrUserNotHaveBillRollbackPriv TODO
+	ErrUserNotHaveBillRollbackPriv = Errno{Code: 50506, Message: "User don't have gcs rollback privs in this app.",
+		CNMessage: "用户在当前 APP 上没有单据的驳回权限!"}
+	// ErrUserHasNoPermission TODO
+	ErrUserHasNoPermission = Errno{Code: 50508, Message: "User has no permission.", CNMessage: "当前用户没有权限!"}
+	// ErrUserNotHaveBillClonePriv TODO
+	ErrUserNotHaveBillClonePriv = Errno{Code: 50510, Message: "User don't have gcs bill clone privs in this app.",
+		CNMessage: "用户没有当前单据的克隆权限!"}
+	// ErrViewAppPriv TODO
+	ErrViewAppPriv = Errno{Code: 50515, Message: "User have no priv to view this app!",
+		CNMessage: "用户没有查看当前 APP 的权限!"}
+
+	// ErrInvokeAPI TODO
+	ErrInvokeAPI = Errno{Code: 50601, Message: "Error occurred while invoking API", CNMessage: "调用 API 发生错误!"}
+
+	// ErrSnedRTX TODO
+	// alarm errors
+	ErrSnedRTX = Errno{Code: 50800, Message: "Error occurred while sending RTX message to user.",
+		CNMessage: "发送 RTX 消息出现错误!"}
+
+	// BkBizIdIsEmpty TODO
+	BkBizIdIsEmpty = Errno{Code: 51012, Message: "bk_biz_id can't be empty", CNMessage: "bk_biz_id不能为空"}
+	// InstanceNotExists TODO
+	InstanceNotExists = Errno{Code: 51018, Message: "instance not exists", CNMessage: "实例不存在"}
+	// NoTableMatched TODO
+	NoTableMatched = Errno{Code: 51019, Message: "no table matched", CNMessage: "找不到匹配的表"}
+	// ClusterIdIsEmpty TODO
+	ClusterIdIsEmpty = Errno{Code: 51020, Message: "cluster_id can't be empty",
+		CNMessage: "cluster_id不能为空"}
+	// CheckPartitionFailed TODO
+	CheckPartitionFailed = Errno{Code: 51021, Message: "partition check failed", CNMessage: "分区检查失败"}
+	// PartitionConfigNotExisted TODO
+	PartitionConfigNotExisted = Errno{Code: 51022, Message: "Partition config not existed ", CNMessage: "分区配置不存在"}
+	// PartOfPartitionConfigsNotExisted TODO
+	PartOfPartitionConfigsNotExisted = Errno{Code: 51023, Message: "part of artition configs not existed ",
+		CNMessage: "部分分区配置不存在"}
+	// NotSupportedClusterType TODO
+	NotSupportedClusterType = Errno{Code: 51024, Message: "this instance type is not supportted by partition",
+		CNMessage: "不支持的实例类型"}
+	// ConfigIdIsEmpty TODO
+	ConfigIdIsEmpty = Errno{Code: 51025, Message: "partition config id can't be empty",
+		CNMessage: "partition config id 不能为空"}
+	// CloudIdRequired TODO
+	CloudIdRequired = Errno{Code: 51026, Message: "bk_cloud_id is required", CNMessage: "bk_cloud_id不能为空"}
+	// GetPartitionSqlFail TODO
+	GetPartitionSqlFail = Errno{Code: 51027, Message: "get partition sql failed", CNMessage: "获取分区语句失败"}
+	// ExecutePartitionFail TODO
+	ExecutePartitionFail = Errno{Code: 51028, Message: "execute partition failed", CNMessage: "执行分区失败"}
+	// NothingToDo TODO
+	NothingToDo = Errno{Code: 51029, Message: "nothing to do", CNMessage: "没有需要执行的操作"}
+	// DomainNotExists TODO
+	DomainNotExists = Errno{Code: 51030, Message: "domain not exists", CNMessage: "域名不存在"}
+)
diff --git a/dbm-services/mysql/db-partition/errno/errno.go b/dbm-services/mysql/db-partition/errno/errno.go
new file mode 100644
index 0000000000..fe2cf6d791
--- /dev/null
+++ b/dbm-services/mysql/db-partition/errno/errno.go
@@ -0,0 +1,133 @@
+// Package errno TODO
+package errno
+
+import (
+	"fmt"
+
+	"github.com/spf13/viper"
+)
+
+// Errno TODO
+type Errno struct {
+	Code      int
+	Message   string
+	CNMessage string
+}
+
+var lang = viper.GetString("lang")
+
+// Error 用于错误处理
+func (err Errno) Error() string {
+	switch lang {
+	case "zh_CN":
+		return err.CNMessage
+	case "en_US":
+		return err.Message
+	default:
+		return err.CNMessage
+	}
+}
+
+// Addf TODO
+func (err Errno) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// Add TODO
+func (err Errno) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+		return err
+	case "en_US":
+		err.Message += message
+		return err
+	default:
+		err.CNMessage += message
+		return err
+	}
+	return err
+}
+
+// Err represents an error
+type Err struct {
+	Errno
+	Err error
+}
+
+// New TODO
+func New(errno Errno, err error) *Err {
+	return &Err{Errno: errno, Err: err}
+}
+
+// Add TODO
+func (err Err) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+		return err
+	case "en_US":
+		err.Message += message
+		return err
+	default:
+		err.CNMessage += message
+		return err
+	}
+	return err
+}
+
+// SetMsg TODO
+func (err Err) SetMsg(message string) error {
+	err.Message = message
+	return err
+}
+
+// SetCNMsg TODO
+func (err Err) SetCNMsg(cnMessage string) error {
+	err.CNMessage = cnMessage
+	return err
+}
+
+// Addf TODO
+func (err Err) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// IsErrUserNotFound TODO
+/*
+	func (err *Err) Error() string {
+		return fmt.Sprintf("Err - code: %d, message: %s, error: %s", err.Code, err.Message, err.Err)
+	}
+*/
+func IsErrUserNotFound(err error) bool {
+	code, _ := DecodeErr(err)
+	return code == ErrUserNotFound.Code
+}
+
+// DecodeErr TODO
+func DecodeErr(err error) (int, string) {
+
+	var CN bool = true
+
+	if err == nil {
+		return OK.Code, OK.Message
+	}
+
+	switch typed := err.(type) {
+	case Err:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	case Errno:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	default:
+	}
+	// lager.Logger.Errorf("%s", err)
+	return InternalServerError.Code, err.Error()
+}
diff --git a/dbm-services/mysql/db-partition/go.mod b/dbm-services/mysql/db-partition/go.mod
new file mode 100644
index 0000000000..9f03530011
--- /dev/null
+++ b/dbm-services/mysql/db-partition/go.mod
@@ -0,0 +1,64 @@
+module dbm-services/mysql/db-partition
+
+go 1.19
+
+require (
+	github.com/gin-gonic/gin v1.9.0
+	github.com/go-redis/redis v6.15.9+incompatible
+	github.com/golang-migrate/migrate/v4 v4.15.2
+	github.com/google/go-querystring v1.1.0
+	github.com/pkg/errors v0.9.1
+	github.com/robfig/cron/v3 v3.0.1
+	github.com/spf13/pflag v1.0.5
+	github.com/spf13/viper v1.15.0
+	golang.org/x/exp v0.0.0-20230418202329-0354be287a23
+	gopkg.in/natefinch/lumberjack.v2 v2.2.1
+	gorm.io/driver/mysql v1.5.0
+	gorm.io/gorm v1.25.0
+)
+
+require (
+	github.com/bytedance/sonic v1.8.8 // indirect
+	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-playground/locales v0.14.1 // indirect
+	github.com/go-playground/universal-translator v0.18.1 // indirect
+	github.com/go-playground/validator/v10 v10.12.0 // indirect
+	github.com/go-sql-driver/mysql v1.7.1 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
+	github.com/golang/protobuf v1.5.3 // indirect
+	github.com/hashicorp/errwrap v1.1.0 // indirect
+	github.com/hashicorp/go-multierror v1.1.1 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.5 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+	github.com/leodido/go-urn v1.2.3 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mattn/go-isatty v0.0.18 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/onsi/ginkgo v1.16.5 // indirect
+	github.com/onsi/gomega v1.18.1 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.7 // indirect
+	github.com/rogpeppe/go-internal v1.8.0 // indirect
+	github.com/sirupsen/logrus v1.9.0 // indirect
+	github.com/spf13/afero v1.9.5 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
+	github.com/ugorji/go/codec v1.2.11 // indirect
+	go.uber.org/atomic v1.9.0 // indirect
+	golang.org/x/arch v0.3.0 // indirect
+	golang.org/x/crypto v0.8.0 // indirect
+	golang.org/x/net v0.9.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	google.golang.org/protobuf v1.30.0 // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/dbm-services/mysql/db-partition/go.sum b/dbm-services/mysql/db-partition/go.sum
new file mode 100644
index 0000000000..6d9f1497f0
--- /dev/null
+++ b/dbm-services/mysql/db-partition/go.sum
@@ -0,0 +1,1944 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
+github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY=
+github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0=
+github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
+github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU=
+github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
+github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM=
+github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo=
+github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
+github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
+github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
+github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q=
+github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
+github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o=
+github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8=
+github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.13+incompatible h1:5s7uxnKZG+b8hYWlPYUi6x1Sjpq2MSt96d15eLZeHyw=
+github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
+github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
+github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
+github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
+github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
+github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc=
+github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
+github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
+github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
+github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
+github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
+github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
+github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
+github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
+github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
+github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
+github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
+github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
+github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
+github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
+github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
+go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
+golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY=
+golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
+google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM=
+gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo=
+gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
+gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
+gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
+gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU=
+gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
+modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878=
+modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo=
+modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8=
+modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw=
+modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
+modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM=
+modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
+modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
+modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
+modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
+modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
+modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
+modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
+modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
+modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
+modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
+modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/dbm-services/mysql/db-partition/handler/handler.go b/dbm-services/mysql/db-partition/handler/handler.go
new file mode 100644
index 0000000000..efe5e5acbd
--- /dev/null
+++ b/dbm-services/mysql/db-partition/handler/handler.go
@@ -0,0 +1,205 @@
+// Package handler TODO
+package handler
+
+import (
+	"dbm-services/mysql/db-partition/errno"
+	"dbm-services/mysql/db-partition/service"
+	"errors"
+	"fmt"
+	"net/http"
+	_ "runtime/debug" // debug TODO
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// DryRun TODO
+func DryRun(r *gin.Context) {
+	fmt.Println("do DryRun!")
+	var input service.Checker
+	if err := r.ShouldBind(&input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(r, errno.ErrBind, nil)
+		return
+	}
+	sqls, err := input.DryRun()
+	SendResponse(r, err, sqls)
+	return
+
+}
+
+// GetPartitionsConfig TODO
+func GetPartitionsConfig(r *gin.Context) {
+	var input service.QueryParititionsInput
+	if err := r.ShouldBind(&input); err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, errno.ErrBind, nil)
+		return
+	}
+	slog.Info(fmt.Sprintf("bk_biz_id: %d, immute_domains: %s", input.BkBizId, input.ImmuteDomains))
+	lists, count, err := input.GetPartitionsConfig()
+	type ListResponse struct {
+		Count int64       `json:"count"`
+		Items interface{} `json:"items"`
+	}
+	if err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	SendResponse(r, err, ListResponse{
+		Count: count,
+		Items: lists,
+	})
+	return
+}
+
+// GetPartitionLog TODO
+func GetPartitionLog(r *gin.Context) {
+	var input service.QueryLogInput
+	if err := r.ShouldBind(&input); err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, errno.ErrBind, nil)
+		return
+	}
+	lists, count, err := input.GetPartitionLog()
+	type ListResponse struct {
+		Count int64       `json:"count"`
+		Items interface{} `json:"items"`
+	}
+	if err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	SendResponse(r, err, ListResponse{
+		Count: count,
+		Items: lists,
+	})
+	return
+}
+
+// DeletePartitionsConfig TODO
+func DeletePartitionsConfig(r *gin.Context) {
+	var input service.DeletePartitionConfigByIds
+	if err := r.ShouldBind(&input); err != nil {
+		err = errno.ErrReadEntity.Add(err.Error())
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	slog.Info(fmt.Sprintf("bk_biz_id: %d, ids: %v", input.BkBizId, input.Ids))
+	err := input.DeletePartitionsConfig()
+	if err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	SendResponse(r, err, "分区配置信息删除成功!")
+	return
+}
+
+// CreatePartitionsConfig TODO
+func CreatePartitionsConfig(r *gin.Context) {
+	var input service.CreatePartitionsInput
+	if err := r.ShouldBind(&input); err != nil {
+		err = errno.ErrReadEntity.Add(err.Error())
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	slog.Info(fmt.Sprintf("bk_biz_id: %d, immute_domain: %s, creator: %s", input.BkBizId, input.ImmuteDomain,
+		input.Creator))
+	err := input.CreatePartitionsConfig()
+	if err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, errors.New(fmt.Sprintf("添加分区配置失败!%s", err.Error())), nil)
+		return
+	}
+	SendResponse(r, nil, "分区配置信息创建成功!")
+	return
+}
+
+// DisablePartition TODO
+func DisablePartition(r *gin.Context) {
+	var input service.DisablePartitionInput
+	if err := r.ShouldBind(&input); err != nil {
+		err = errno.ErrReadEntity.Add(err.Error())
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	slog.Info(fmt.Sprintf("ids: %v, operator: %s", input.Ids, input.Operator))
+	err := input.DisablePartitionConfig()
+	if err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, errors.New(fmt.Sprintf("分区禁用失败!%s", err.Error())), nil)
+		return
+	}
+	SendResponse(r, nil, "分区禁用成功!")
+	return
+}
+
+// EnablePartition TODO
+func EnablePartition(r *gin.Context) {
+	var input service.EnablePartitionInput
+	if err := r.ShouldBind(&input); err != nil {
+		err = errno.ErrReadEntity.Add(err.Error())
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	slog.Info(fmt.Sprintf("ids: %v, operator: %s", input.Ids, input.Operator))
+	err := input.EnablePartitionConfig()
+	if err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, errors.New(fmt.Sprintf("分区启用失败!%s", err.Error())), nil)
+		return
+	}
+	SendResponse(r, nil, "分区启用成功!")
+	return
+}
+
+// UpdatePartitionsConfig TODO
+func UpdatePartitionsConfig(r *gin.Context) {
+	var input service.CreatePartitionsInput
+	if err := r.ShouldBind(&input); err != nil {
+		err = errno.ErrReadEntity.Add(err.Error())
+		slog.Error(err.Error())
+		SendResponse(r, err, nil)
+		return
+	}
+	slog.Info(fmt.Sprintf("bk_biz_id: %d, immute_domain: %s, creator: %s", input.BkBizId, input.ImmuteDomain,
+		input.Creator))
+	err := input.UpdatePartitionsConfig()
+	if err != nil {
+		slog.Error(err.Error())
+		SendResponse(r, errors.New(fmt.Sprintf("更新分区配置失败!%s", err.Error())), nil)
+		return
+	}
+	SendResponse(r, nil, "更新分区配置信息创建成功!")
+	return
+}
+
+// Response TODO
+type Response struct {
+	Code    int         `json:"code"`
+	Message string      `json:"message"`
+	Data    interface{} `json:"data"`
+}
+
+// SendResponse TODO
+func SendResponse(r *gin.Context, err error, data interface{}) {
+	code, message := errno.DecodeErr(err)
+	dataErr, ok := data.(error)
+	if ok {
+		message += dataErr.Error()
+	}
+
+	// always return http.StatusOK
+	r.JSON(http.StatusOK, Response{
+		Code:    code,
+		Message: message,
+		Data:    data,
+	})
+}
diff --git a/dbm-services/mysql/db-partition/main.go b/dbm-services/mysql/db-partition/main.go
new file mode 100644
index 0000000000..1f7e3adfad
--- /dev/null
+++ b/dbm-services/mysql/db-partition/main.go
@@ -0,0 +1,56 @@
+package main
+
+import (
+	"dbm-services/mysql/db-partition/assests"
+	"dbm-services/mysql/db-partition/cron"
+	"dbm-services/mysql/db-partition/model"
+	"dbm-services/mysql/db-partition/router"
+	"os"
+
+	"github.com/gin-gonic/gin"
+	"github.com/golang-migrate/migrate/v4"
+	flag "github.com/spf13/pflag"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+func main() {
+	flag.Parse()
+	// 元数据库 migration
+	if viper.GetBool("migrate") {
+		if err := assests.DoMigrateFromEmbed(); err != nil && err != migrate.ErrNoChange {
+			slog.Error("migrate fail", err)
+			os.Exit(0)
+		}
+	}
+
+	// 注册定时任务
+	cronList, err := cron.RegisterCron()
+	if err != nil {
+		os.Exit(0)
+	}
+
+	defer func() {
+		for _, c := range cronList {
+			c.Stop()
+		}
+		slog.Info("stop all cron jobs")
+	}()
+
+	// 注册服务
+	gin.SetMode(gin.ReleaseMode)
+	r := gin.New()
+	r.Use(gin.Recovery())
+	router.RegisterRouter(r)
+	if err = r.Run(viper.GetString("listen_address")); err != nil {
+		slog.Error("register router fail:", err)
+		os.Exit(0)
+	}
+}
+
+func init() {
+	model.InitEnv()
+	model.InitLog()
+	model.DB.Init()
+	model.InitClient()
+}
diff --git a/dbm-services/mysql/db-partition/model/init_db.go b/dbm-services/mysql/db-partition/model/init_db.go
new file mode 100644
index 0000000000..979018181e
--- /dev/null
+++ b/dbm-services/mysql/db-partition/model/init_db.go
@@ -0,0 +1,49 @@
+package model
+
+import (
+	"database/sql"
+	"fmt"
+	"strings"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+)
+
+// Database TODO
+type Database struct {
+	Self *gorm.DB
+}
+
+// DB TODO
+var DB *Database
+
+func openDB(user, password, addr, name string) *gorm.DB {
+	dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=%t&loc=%s",
+		user, password, addr, name, true, "Local")
+	sqlDB, err := sql.Open("mysql", dsn)
+	if err != nil {
+		slog.Error("connect to mysql failed", err)
+		return nil
+	}
+	db, err := gorm.Open(mysql.New(mysql.Config{Conn: sqlDB}), &gorm.Config{})
+	if err != nil {
+		slog.Error(strings.Replace(dsn, password, "", -1))
+		slog.Error(fmt.Sprintf("Database %s connection failed", name), err)
+	}
+	return db
+}
+
+// Init TODO
+func (db *Database) Init() {
+	user := viper.GetString("db.user")
+	password := viper.GetString("db.password")
+	host := viper.GetString("db.host")
+	port := viper.GetInt("db.port")
+	name := viper.GetString("db.name")
+	addr := fmt.Sprintf("%s:%d", host, port)
+	DB = &Database{
+		Self: openDB(user, password, addr, name),
+	}
+}
diff --git a/dbm-services/mysql/db-partition/model/init_env.go b/dbm-services/mysql/db-partition/model/init_env.go
new file mode 100644
index 0000000000..500d8de46a
--- /dev/null
+++ b/dbm-services/mysql/db-partition/model/init_env.go
@@ -0,0 +1,60 @@
+package model
+
+import (
+	flag "github.com/spf13/pflag"
+	"github.com/spf13/viper"
+)
+
+// InitEnv 环境变量初始化
+func InitEnv() {
+	// meta DB参数
+	viper.BindEnv("db.user", "DB_USER")
+	viper.BindEnv("db.password", "DB_PASSWORD")
+	viper.BindEnv("db.name", "DB_NAME")
+	viper.BindEnv("db.host", "DB_HOST")
+	viper.BindEnv("db.port", "DB_PORT")
+
+	viper.BindEnv("redis.password", "REDIS_PASSWORD")
+	viper.BindEnv("redis.host", "REDIS_HOST")
+	viper.BindEnv("redis.port", "REDIS_PORT")
+
+	// 分区服务
+	viper.BindEnv("listen_address", "LISTEN_ADDRESS")
+	viper.BindEnv("cron.timing_hour", "CRON_TIMING_HOUR")
+	viper.BindEnv("cron.retry_hour", "CRON_RETRY_HOUR")
+
+	viper.BindEnv("dbm_db_name", "DBM_DB_NAME")
+	viper.BindEnv("db_remote_service", "DB_REMOTE_SERVICE")
+	viper.BindEnv("db_meta_service", "DB_META_SERVICE")
+	viper.BindEnv("dbm_ticket_service", "DBM_TICKET_SERVICE")
+	viper.BindEnv("bk_app_code", "BK_APP_CODE")
+	viper.BindEnv("bk_app_secret", "BK_APP_SECRET")
+
+	// pt-osc参数
+	viper.BindEnv("pt.max_load.threads_running", "PT_MAX_LOAD_THREADS_RUNNING")
+	viper.BindEnv("pt.critical_load.threads_running", "PT_CRITICAL_LOAD_THREADs_RUNNING")
+	viper.BindEnv("pt.lock_wait_timeout", "PT_LOCK_WAIT_TIMEOUT")
+	viper.BindEnv("pt.max_size", "PT_MAX_SIZE")
+	viper.BindEnv("pt.max_rows", "PT_MAX_ROWS")
+
+	viper.BindEnv("monitor.service", "MONITOR_SERVICE")
+	// 蓝鲸监控自定义事件
+	viper.BindEnv("monitor.event.data_id", "MONITOR_EVENT_DATA_ID")
+	viper.BindEnv("monitor.event.access_token", "MONITOR_EVENT_ACCESS_TOKEN")
+	// 蓝鲸监控自定义指标
+	viper.BindEnv("monitor.metric.data_id", "MONITOR_METRIC_DATA_ID")
+	viper.BindEnv("monitor.metric.access_token", "MONITOR_METRIC_ACCESS_TOKEN")
+
+	viper.BindEnv("dba.bk_biz_id", "DBA_BK_BIZ_ID")
+
+	// 程序日志参数, 可选参数
+	viper.BindEnv("log.path", "LOG_PATH")
+	viper.BindEnv("log.level", "LOG_LEVEL")
+	viper.BindEnv("log.max_size", "LOG_MAX_SIZE")
+	viper.BindEnv("log.max_age", "LOG_MAX_AGE")
+	viper.BindEnv("log.max_backups", "LOG_MAX_BACKUPS")
+
+	flag.Bool("migrate", false,
+		"run migrate to databases, not exit.")
+	viper.BindPFlags(flag.CommandLine)
+}
diff --git a/dbm-services/mysql/db-partition/model/init_logger.go b/dbm-services/mysql/db-partition/model/init_logger.go
new file mode 100644
index 0000000000..129e6fb537
--- /dev/null
+++ b/dbm-services/mysql/db-partition/model/init_logger.go
@@ -0,0 +1,38 @@
+package model
+
+import (
+	"io"
+	"os"
+	"strings"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+	"gopkg.in/natefinch/lumberjack.v2"
+)
+
+// InitLog 程序日志初始化
+func InitLog() {
+	var logLevel = new(slog.LevelVar)
+	logLevel.Set(slog.LevelInfo)
+	if strings.ToLower(strings.TrimSpace(viper.GetString("log.level"))) == "debug" {
+		logLevel.Set(slog.LevelDebug)
+	}
+	var logger *slog.TextHandler
+	logger = slog.HandlerOptions{Level: logLevel, AddSource: true}.NewTextHandler(os.Stdout)
+	logPath := strings.TrimSpace(viper.GetString("log.path"))
+	if logPath != "" {
+		logger = slog.HandlerOptions{Level: logLevel, AddSource: true}.NewTextHandler(NewWriter(logPath))
+	}
+	slog.SetDefault(slog.New(logger))
+}
+
+// NewWriter TODO
+func NewWriter(path string) io.Writer {
+	return io.MultiWriter(os.Stdout, &lumberjack.Logger{
+		Filename:   path,
+		MaxSize:    viper.GetInt("log.max_size"),
+		MaxAge:     viper.GetInt("log.max_age"),
+		MaxBackups: viper.GetInt("log.max_backups"),
+		LocalTime:  true,
+	})
+}
diff --git a/dbm-services/mysql/db-partition/model/init_redis.go b/dbm-services/mysql/db-partition/model/init_redis.go
new file mode 100644
index 0000000000..31e1667edd
--- /dev/null
+++ b/dbm-services/mysql/db-partition/model/init_redis.go
@@ -0,0 +1,46 @@
+package model
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/go-redis/redis"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+var rdb *redis.Client
+
+// InitClient 初始化连接
+func InitClient() (err error) {
+	rdb = redis.NewClient(&redis.Options{
+		Addr:     fmt.Sprintf("%s:%d", viper.GetString("redis.host"), viper.GetInt("redis.port")),
+		Password: viper.GetString("redis.password"),
+		DB:       0,
+	})
+
+	_, err = rdb.Ping().Result()
+	if err != nil {
+		slog.Error("redis db", "ping err", err)
+		return err
+	}
+	return nil
+}
+
+// Lock 加锁
+func Lock(key string) (bool, error) {
+	slog.Info("msg", "key", key)
+	return rdb.SetNX(key, `{"lock":1}`, 30*time.Hour).Result()
+}
+
+/*
+UnLock 解锁
+func UnLock(key string) int64 {
+	nums, err := rdb.Del(key).Result()
+	if err != nil {
+		log.Println(err.Error())
+		return 0
+	}
+	return nums
+}
+*/
diff --git a/dbm-services/mysql/db-partition/model/model.go b/dbm-services/mysql/db-partition/model/model.go
new file mode 100644
index 0000000000..9b7b5afda7
--- /dev/null
+++ b/dbm-services/mysql/db-partition/model/model.go
@@ -0,0 +1,2 @@
+// Package model TODO
+package model
diff --git a/dbm-services/mysql/db-partition/monitor/monitor.go b/dbm-services/mysql/db-partition/monitor/monitor.go
new file mode 100644
index 0000000000..20908c0de6
--- /dev/null
+++ b/dbm-services/mysql/db-partition/monitor/monitor.go
@@ -0,0 +1,110 @@
+// Package monitor TODO
+package monitor
+
+import (
+	"dbm-services/mysql/db-partition/util"
+	"fmt"
+	"net/http"
+	"time"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+const cronHeartBeat = "partition_cron_beat"
+
+// PartitionEvent TODO
+const PartitionEvent = "partition"
+
+// PartitionDeveloperEvent TODO
+const PartitionDeveloperEvent = "partition_dev"
+
+// PartitionCron TODO
+const PartitionCron = "partition_cron"
+
+// SendMetric TODO
+func SendMetric(serverIp string) {
+	l, _ := time.LoadLocation("Local")
+	dimension := make(map[string]interface{})
+	dimension["bk_cloud_id"] = 0
+	dimension["immute_domain"] = PartitionCron
+	dimension["server_ip"] = serverIp
+	dimension["machine_type"] = PartitionCron
+
+	body := metricsBody{
+		commonBody: commonBody{
+			DataId:      viper.GetInt("monitor.metric.data_id"),
+			AccessToken: viper.GetString("monitor.metric.access_token"),
+		},
+		Data: []metricsData{
+			{
+				commonData: commonData{
+					Target:    serverIp,
+					Timestamp: time.Now().In(l).UnixMilli(),
+					Dimension: dimension,
+					Metrics: map[string]int{
+						cronHeartBeat: 1,
+					},
+				},
+			},
+		},
+	}
+	c := util.NewClientByHosts(viper.GetString("monitor.service"))
+	_, err := c.Do(http.MethodPost, "v2/push/", body)
+	if err != nil {
+		slog.Error("msg", "send partition cron heatbeat metric error", err)
+	}
+}
+
+// SendEvent TODO
+func SendEvent(eventName string, dimension map[string]interface{}, content string, serverIp string) {
+	l, _ := time.LoadLocation("Local")
+
+	body := eventBody{
+		commonBody: commonBody{
+			DataId:      viper.GetInt("monitor.event.data_id"),
+			AccessToken: viper.GetString("monitor.event.access_token"),
+		},
+		Data: []eventData{
+			{
+				EventName: eventName,
+				Event: map[string]interface{}{
+					"content": content,
+				},
+				commonData: commonData{
+					Target:    serverIp,
+					Timestamp: time.Now().In(l).UnixMilli(),
+					Dimension: dimension,
+					Metrics:   nil,
+				},
+			},
+		},
+	}
+	c := util.NewClientByHosts(viper.GetString("monitor.service"))
+	_, err := c.Do(http.MethodPost, "v2/push/", body)
+	if err != nil {
+		slog.Info(fmt.Sprintf("%v", body))
+		slog.Error("msg", "send partition event error", err)
+	}
+}
+
+// NewDeveloperEventDimension TODO
+func NewDeveloperEventDimension(serverIp string) map[string]interface{} {
+	dimension := make(map[string]interface{})
+	dimension["bk_biz_id"] = viper.GetString("dba.bk_biz_id")
+	dimension["bk_cloud_id"] = 0
+	dimension["immute_domain"] = PartitionCron
+	dimension["server_ip"] = serverIp
+	dimension["machine_type"] = PartitionCron
+	return dimension
+}
+
+// NewPartitionEventDimension TODO
+func NewPartitionEventDimension(bkBizId int, bkCloudId int, domain string) map[string]interface{} {
+	dimension := make(map[string]interface{})
+	dimension["bk_biz_id"] = bkBizId
+	dimension["bk_biz_id"] = bkBizId
+	dimension["bk_cloud_id"] = bkCloudId
+	dimension["immute_domain"] = domain
+	return dimension
+}
diff --git a/dbm-services/mysql/db-partition/monitor/monitor_object.go b/dbm-services/mysql/db-partition/monitor/monitor_object.go
new file mode 100644
index 0000000000..01f6f75b2d
--- /dev/null
+++ b/dbm-services/mysql/db-partition/monitor/monitor_object.go
@@ -0,0 +1,33 @@
+package monitor
+
+type eventBody struct {
+	commonBody
+	Data []eventData `json:"data"`
+}
+
+type metricsBody struct {
+	commonBody
+	Data []metricsData `json:"data"`
+}
+
+type commonBody struct {
+	DataId      int    `json:"data_id"`
+	AccessToken string `json:"access_token"`
+}
+
+type eventData struct {
+	EventName string                 `json:"event_name"`
+	Event     map[string]interface{} `json:"event"`
+	commonData
+}
+
+type metricsData struct {
+	commonData
+}
+
+type commonData struct {
+	Target    string                 `json:"target"`
+	Timestamp int64                  `json:"timestamp"`
+	Dimension map[string]interface{} `json:"dimension"`
+	Metrics   map[string]int         `json:"metrics"`
+}
diff --git a/dbm-services/mysql/db-partition/router/router.go b/dbm-services/mysql/db-partition/router/router.go
new file mode 100644
index 0000000000..84f537a779
--- /dev/null
+++ b/dbm-services/mysql/db-partition/router/router.go
@@ -0,0 +1,21 @@
+// Package router TODO
+package router
+
+import (
+	"dbm-services/mysql/db-partition/handler"
+
+	"github.com/gin-gonic/gin"
+)
+
+// RegisterRouter TODO
+func RegisterRouter(engine *gin.Engine) {
+	p := engine.Group("/partition")
+	p.POST("/query_conf", handler.GetPartitionsConfig)
+	p.POST("/query_log", handler.GetPartitionLog)
+	p.POST("/create_conf", handler.CreatePartitionsConfig)
+	p.POST("/del_conf", handler.DeletePartitionsConfig)
+	p.POST("/dry_run", handler.DryRun)
+	p.POST("/disable_partition", handler.DisablePartition)
+	p.POST("/enable_partition", handler.EnablePartition)
+	p.POST("/update_conf", handler.UpdatePartitionsConfig)
+}
diff --git a/dbm-services/mysql/db-partition/service/check_partition.go b/dbm-services/mysql/db-partition/service/check_partition.go
new file mode 100644
index 0000000000..0fb4d8ec98
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/check_partition.go
@@ -0,0 +1,210 @@
+package service
+
+import (
+	"context"
+	"dbm-services/mysql/db-partition/errno"
+	"dbm-services/mysql/db-partition/model"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/exp/slog"
+)
+
+// DryRun TODO
+func (m *Checker) DryRun() ([]PartitionObject, error) {
+	slog.Info("do service DryRun")
+	var objects []PartitionObject
+	var sqls []PartitionSql
+	var err error
+	var needPartition bool
+	if m.BkBizId == 0 {
+		return objects, errno.BkBizIdIsEmpty
+	}
+	if m.ClusterId == 0 {
+		return objects, errno.ClusterIdIsEmpty
+	}
+	if m.BkCloudId == nil {
+		return objects, errno.CloudIdRequired
+	}
+	var configs []*PartitionConfig
+	var tbName string
+	switch m.ClusterType {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		slog.Error(m.ClusterType, "error", errors.New("not supported db type"))
+		return objects, errno.NotSupportedClusterType
+	}
+	if m.ConfigId == 0 {
+		err = model.DB.Self.Table(tbName).Where("bk_biz_id = ? and cluster_id = ?", m.BkBizId, m.ClusterId).Scan(&configs).
+			Error
+		if err != nil {
+			slog.Error("msg", fmt.Sprintf("query %s err", tbName), err)
+			return objects, err
+		}
+	} else {
+		err = model.DB.Self.Table(tbName).Where("bk_biz_id = ? and cluster_id = ? and id = ?", m.BkBizId, m.ClusterId,
+			m.ConfigId).Scan(&configs).Error
+		if err != nil {
+			slog.Error("msg", fmt.Sprintf("query %s err", tbName), err)
+			return objects, err
+		}
+	}
+	if len(configs) == 0 {
+		return objects, errno.PartitionConfigNotExisted
+	}
+
+	slog.Info(fmt.Sprintf("configs:%v", configs))
+	switch m.ClusterType {
+	case Tendbha, Tendbsingle:
+		newConfigs, err := GetMaster(configs, m.ImmuteDomain, m.ClusterType)
+		if err != nil {
+			slog.Error("msg", "GetClusterMasterError", err)
+			return objects, err
+		}
+		sqls, err = m.CheckPartitionConfigs(newConfigs, "mysql", 1)
+		// sqls, err = m.CheckPartitionConfigs(configs, "mysql", 1)
+		if err != nil {
+			slog.Error("msg", "CheckPartitionConfigs", err)
+			return objects, err
+		}
+		objects = []PartitionObject{{"0.0.0.0", 0, "null", sqls}}
+	case Tendbcluster:
+		objects, err = m.CheckSpiderPartitionConfigs(configs)
+		if err != nil {
+			slog.Error("msg", "CheckSpiderPartitionConfigs", err)
+			return objects, err
+		}
+	default:
+		slog.Error(m.ClusterType, "error", errors.New("not supported db type"))
+		return objects, errno.NotSupportedClusterType
+	}
+
+	for _, item := range objects {
+		for _, execute := range item.ExecuteObjects {
+			// 集群没有需要执行的分区语句并且在获取分区语句时没有错误,则不生成单据
+			if len(execute.AddPartition) != 0 || len(execute.DropPartition) != 0 || len(execute.InitPartition) != 0 {
+				needPartition = true
+				break
+			}
+		}
+	}
+	if needPartition == false {
+		return objects, errno.NothingToDo
+	}
+	return objects, nil
+}
+
+// CheckSpiderPartitionConfigs TODO
+func (m *Checker) CheckSpiderPartitionConfigs(configs []*PartitionConfig) ([]PartitionObject, error) {
+	fmt.Printf("do CheckSpiderPartitionConfigs")
+
+	address := fmt.Sprintf("%s:%d", m.ImmuteDomain, m.Port)
+	backends, splitCnt, err := GetSpiderBackends(address, *m.BkCloudId)
+	if err != nil {
+		return nil, err
+	}
+	var all []PartitionObject
+	for _, item := range backends {
+		newconfigs := make([]*PartitionConfig, len(configs))
+		host := item["HOST"].(string)
+		port, _ := strconv.Atoi(item["PORT"].(string))
+		for k, v := range configs {
+			newconfig := *v
+			newconfig.ImmuteDomain = host
+			newconfig.Port = port
+			if item["WRAPPER"] == "mysql" {
+				newconfig.DbLike = fmt.Sprintf("%s_%s", newconfig.DbLike, item["SPLIT_NUM"].(string))
+			}
+			newconfigs[k] = &newconfig
+		}
+		execute, err := m.CheckPartitionConfigs(newconfigs, item["WRAPPER"].(string), splitCnt)
+		if err != nil {
+			slog.Error("msg", "CheckPartitionConfigs", err)
+			return all, errno.GetPartitionSqlFail.Add(fmt.Sprintf("spit%s %s:%s\n%s", item["SPLIT_NUM"], item["HOST"],
+				item["PORT"], err.Error()))
+		}
+		all = append(all, PartitionObject{host, port, item["SERVER_NAME"].(string), execute})
+	}
+
+	return all, nil
+}
+
+// CheckPartitionConfigs TODO
+func (m *Checker) CheckPartitionConfigs(configs []*PartitionConfig, dbtype string, splitCnt int) ([]PartitionSql,
+	error) {
+	fmt.Printf("do CheckPartitionConfigs")
+	var errMsg Messages
+	sqlSet := PartitionSqlSet{}
+	wg := sync.WaitGroup{}
+	tokenBucket := make(chan int, 10)
+	for _, config := range configs {
+		wg.Add(1)
+		tokenBucket <- 0
+		ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+		go func(config *PartitionConfig) {
+			slog.Info(fmt.Sprintf("%s:%v", "CheckOnePartitionConfig", config))
+			err := m.CheckOnePartitionConfig(ctx, cancel, *config, &wg, &tokenBucket, &sqlSet, dbtype, splitCnt)
+			if err != nil {
+				errMsg.mu.Lock()
+				errMsg.list = append(errMsg.list, err.Error())
+				errMsg.mu.Unlock()
+			}
+		}(config)
+	}
+	wg.Wait()
+	close(tokenBucket)
+	if len(errMsg.list) > 0 {
+		return sqlSet.PartitionSqls, fmt.Errorf(strings.Join(errMsg.list, "\n"))
+	}
+	return sqlSet.PartitionSqls, nil
+}
+
+// CheckOnePartitionConfig TODO
+func (m *Checker) CheckOnePartitionConfig(ctx context.Context, cancel context.CancelFunc, config PartitionConfig,
+	wg *sync.WaitGroup, tokenBucket *chan int, sqlSet *PartitionSqlSet, dbtype string, splitCnt int) error {
+	fmt.Printf("do CheckOnePartitionConfig")
+	var addSql, dropSql []string
+	var err error
+	var initSql []InitSql
+	defer func() {
+		<-*tokenBucket
+		wg.Done()
+		cancel()
+	}()
+
+	finish := make(chan int, 1)
+	errorChan := make(chan error, 1)
+	go func() {
+		defer func() {
+			finish <- 1
+		}()
+		initSql, addSql, dropSql, err = config.GetPartitionDbLikeTbLike(dbtype, splitCnt)
+		if err != nil {
+			errorChan <- err
+			return
+		}
+		sqlSet.Mu.Lock()
+		sqlSet.PartitionSqls = append(sqlSet.PartitionSqls, PartitionSql{config.ID, config.DbLike, config.TbLike, initSql,
+			addSql, dropSql})
+		sqlSet.Mu.Unlock()
+		return
+	}()
+
+	select {
+	case <-finish:
+		return nil
+	case errOuter := <-errorChan:
+		return errOuter
+	case <-ctx.Done():
+		errOuter := fmt.Errorf("partition rule: [dblike:`%s` tblike:`%s`] get partition sql timeout",
+			config.DbLike, config.TbLike)
+		return errOuter
+	}
+}
diff --git a/dbm-services/mysql/db-partition/service/check_partition_base_func.go b/dbm-services/mysql/db-partition/service/check_partition_base_func.go
new file mode 100644
index 0000000000..668c3dc61c
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/check_partition_base_func.go
@@ -0,0 +1,635 @@
+package service
+
+import (
+	"dbm-services/mysql/db-partition/errno"
+	"dbm-services/mysql/db-partition/model"
+	"dbm-services/mysql/db-partition/monitor"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// GetPartitionDbLikeTbLike TODO
+func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt int) ([]InitSql, []string, []string,
+	error) {
+	var addSqls, dropSqls, errs Messages
+	var initSqls InitMessages
+	var err error
+	initSqls.list = []InitSql{}
+	addSqls.list = []string{}
+	dropSqls.list = []string{}
+	errs.list = []string{}
+
+	tbs, errOuter := config.GetDbTableInfo()
+	if errOuter != nil {
+		slog.Error("GetDbTableInfo error", errOuter)
+		return nil, nil, nil, fmt.Errorf("get database and table info failed:%s", errOuter.Error())
+	}
+	var sql string
+	var needSize int
+	wg := sync.WaitGroup{}
+	tokenBucket := make(chan int, 10)
+	for _, tb := range tbs {
+		wg.Add(1)
+		tokenBucket <- 0
+		slog.Info(fmt.Sprintf("get init/add/drop partition for (domain:%s, dbname:%s, table_name:%s)", tb.ImmuteDomain,
+			tb.DbName, tb.TbName))
+		go func(tb ConfigDetail) {
+			defer func() {
+				<-tokenBucket
+				wg.Done()
+			}()
+			if tb.Partitioned {
+				sql, err = tb.GetAddPartitionSql()
+				if err != nil {
+					slog.Error("msg", "GetAddPartitionSql error", err)
+					AddString(&errs, err.Error())
+					return
+				}
+				AddString(&addSqls, sql)
+				sql, err = tb.GetDropPartitionSql()
+				if err != nil {
+					slog.Error("msg", "GetDropPartitionSql error", err)
+					AddString(&errs, err.Error())
+					return
+				}
+				AddString(&dropSqls, sql)
+			} else {
+				sql, needSize, err = tb.GetInitPartitionSql(dbtype, splitCnt)
+				if err != nil {
+					slog.Error("msg", "GetInitPartitionSql error", err)
+					AddString(&errs, err.Error())
+					return
+				}
+				AddInit(&initSqls, InitSql{sql, needSize})
+			}
+			return
+		}(tb)
+	}
+	wg.Wait()
+	close(tokenBucket)
+	if len(errs.list) > 0 {
+		err := fmt.Errorf("partition rule: [dblike:`%s` tblike:`%s`] get partition sql error\n%s",
+			config.DbLike, config.TbLike, strings.Join(errs.list, "\n"))
+		slog.Error("msg", "GetPartitionDbLikeTbLike", err)
+		return nil, nil, nil, err
+	}
+	return initSqls.list, addSqls.list, dropSqls.list, nil
+}
+
+// GetDbTableInfo TODO
+func (config *PartitionConfig) GetDbTableInfo() (ptlist []ConfigDetail, err error) {
+	address := fmt.Sprintf("%s:%d", config.ImmuteDomain, config.Port)
+	slog.Info(fmt.Sprintf("get real partition info from (%s/%s,%s)", address, config.DbLike, config.TbLike))
+
+	var output oneAddressResult
+	sql := fmt.Sprintf(
+		`select TABLE_SCHEMA,TABLE_NAME,CREATE_OPTIONS from information_schema.tables where TABLE_SCHEMA like '%s' and TABLE_NAME like '%s';`, config.DbLike, config.TbLike)
+	var queryRequest = QueryRequest{[]string{address}, []string{sql}, true, 30, config.BkCloudId}
+	output, err = OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return nil, err
+	}
+	if len(output.CmdResults[0].TableData) == 0 {
+		return nil, errno.NoTableMatched.Add(fmt.Sprintf("db like: [%s] and table like: [%s]", config.DbLike, config.TbLike))
+	}
+	fmt.Printf("output.CmdResults[0].TableData:%v\n", output.CmdResults[0].TableData)
+	for _, row := range output.CmdResults[0].TableData {
+		var partitioned bool
+		if strings.Contains(row["CREATE_OPTIONS"].(string), "partitioned") {
+			partitioned = true
+		}
+
+		partitionTable := ConfigDetail{PartitionConfig: *config, DbName: row["TABLE_SCHEMA"].(string),
+			TbName: row["TABLE_NAME"].(string), Partitioned: partitioned}
+		ptlist = append(ptlist, partitionTable)
+	}
+	slog.Info("finish getting all partition info")
+	return ptlist, nil
+}
+
+// GetDropPartitionSql 生成删除分区的sql
+func (m *ConfigDetail) GetDropPartitionSql() (string, error) {
+	var sql, dropSql, fx string
+	reserve := m.ReservedPartition * m.PartitionTimeInterval
+	address := fmt.Sprintf("%s:%d", m.ImmuteDomain, m.Port)
+	base0 := fmt.Sprintf(`select partition_name from INFORMATION_SCHEMA.PARTITIONS `+
+		`where TABLE_SCHEMA='%s' and TABLE_NAME='%s' and PARTITION_DESCRIPTION<`, m.DbName, m.TbName)
+	base1 := "order by PARTITION_DESCRIPTION asc;"
+	switch m.PartitionType {
+	case 0:
+		fx = fmt.Sprintf(`(TO_DAYS(now())-%d) `, reserve-DiffOneDay)
+	case 1:
+		fx = fmt.Sprintf(`(TO_DAYS(now())-%d) `, reserve)
+	case 3:
+		fx = fmt.Sprintf(`DATE_FORMAT(date_sub(now(),interval %d day),'%%Y%%m%%d')`, reserve)
+	case 101:
+		fx = fmt.Sprintf(`DATE_FORMAT(date_sub(now(),interval %d day),'%%Y%%m%%d')`, reserve-DiffOneDay)
+	case 4:
+		fx = fmt.Sprintf(`DATE_FORMAT(date_sub(now(),interval %d day),'\'%%Y-%%m-%%d\'')`, reserve-DiffOneDay)
+	case 5:
+		fx = fmt.Sprintf(`UNIX_TIMESTAMP(date_sub(curdate(),INTERVAL %d DAY))`, reserve-DiffOneDay)
+	default:
+		return dropSql, errors.New("not supported partition type")
+	}
+	sql = fmt.Sprintf("%s %s %s", base0, fx, base1)
+	var queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{sql}, Force: true, QueryTimeout: 30,
+		BkCloudId: m.BkCloudId}
+	output, err := OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return dropSql, err
+	}
+	reg := regexp.MustCompile(fmt.Sprintf("^%s$", "p[0-9]{8}"))
+
+	var expired []string
+	for _, row := range output.CmdResults[0].TableData {
+		name := row["partition_name"].(string)
+		if reg.MatchString(name) {
+			expired = append(expired, name)
+		} else {
+			return dropSql, fmt.Errorf("partition_name [%s] not like 'p20130101', "+
+				"not created by partition system, can't be dropped", name)
+		}
+	}
+	if len(expired) != 0 {
+		dropSql = fmt.Sprintf("alter table `%s`.`%s` drop partition %s", m.DbName, m.TbName, strings.Join(expired, ","))
+	}
+	return dropSql, nil
+}
+
+// GetInitPartitionSql 首次分区,自动分区
+func (m *ConfigDetail) GetInitPartitionSql(dbtype string, splitCnt int) (string, int, error) {
+	var sqlPartitionDesc []string
+	var pkey, descKey, descFormat, initSql string
+	var needSize, diff int
+	var err error
+	slog.Info(fmt.Sprintf("GetInitPartitionSql ConfigDetail: %v", m))
+	switch m.PartitionType {
+	case 0:
+		pkey = fmt.Sprintf("RANGE (TO_DAYS(%s))", m.PartitionColumn)
+		descKey = "less than"
+		descFormat = "to_days('2006-01-02')"
+		diff = DiffOneDay
+	case 1:
+		pkey = fmt.Sprintf("LIST (TO_DAYS(%s))", m.PartitionColumn)
+		descKey = "in"
+		descFormat = "to_days('2006-01-02')"
+		diff = 0
+	case 3:
+		pkey = fmt.Sprintf("LIST (%s)", m.PartitionColumn)
+		descKey = "in"
+		descFormat = "20060102"
+		diff = 0
+	case 4:
+		pkey = fmt.Sprintf("RANGE COLUMNS(%s)", m.PartitionColumn)
+		descKey = "less than"
+		descFormat = "'2006-01-02'"
+		diff = DiffOneDay
+	case 5:
+		pkey = fmt.Sprintf("RANGE (UNIX_TIMESTAMP(%s))", m.PartitionColumn)
+		descKey = "less than"
+		descFormat = "UNIX_TIMESTAMP('2006-01-02')"
+		diff = DiffOneDay
+	case 101:
+		pkey = fmt.Sprintf("RANGE (%s)", m.PartitionColumn)
+		descKey = "less than"
+		descFormat = "20060102"
+		diff = 0
+	default:
+		return initSql, needSize, errors.New("不支持的分区类型")
+	}
+
+	for i := -m.ReservedPartition; i < 15; i++ {
+		pname := time.Now().AddDate(0, 0, i*m.PartitionTimeInterval).Format("p20060102")
+		pdesc := time.Now().AddDate(0, 0, i*m.PartitionTimeInterval+diff).Format(descFormat)
+		palter := fmt.Sprintf(" partition %s values %s (%s)", pname, descKey, pdesc)
+		sqlPartitionDesc = append(sqlPartitionDesc, palter)
+	}
+	// nohup /usr/bin/perl /data/dbbak/percona-toolkit-3.2.0/bin/pt-online-schema-change -uxxx -pxxx -S /data1/mysqldata/mysql.sock
+	// --charset=utf8 --recursion-method=NONE --alter-foreign-keys-method=auto --alter "partition by xxx"
+	// D=leagues_server_HN1,t=league_audit --max-load Threads_running=100 --critical-load=Threads_running:80 --no-drop-old-table
+	// --pause-file=/tmp/partition_osc_pause_xxxx --set-vars lock_wait_timeout=5 --execute >> /data/dbbak/xxx.out 2>&1 &
+
+	if dbtype == "TDBCTL" {
+		initSql = fmt.Sprintf("alter table `%s`.`%s` partition by %s (%s)", m.DbName, m.TbName, pkey,
+			strings.Join(sqlPartitionDesc, ","))
+	} else {
+		needSize, err = m.CheckTableSize(splitCnt)
+		if err != nil {
+			return initSql, needSize, err
+		}
+		options := fmt.Sprintf(
+			"--charset=utf8 --recursion-method=NONE --alter-foreign-keys-method=auto --max-load Threads_running=%d "+
+				"--critical-load=Threads_running=%d --set-vars lock_wait_timeout=%d --print --pause-file=/tmp/partition_osc_pause_%s_%s --execute ",
+			viper.GetInt("pt.max_load.threads_running"),
+			viper.GetInt("pt.critical_load.threads_running"), viper.GetInt("pt.lock_wait_timeout"), m.DbName, m.TbName)
+		initSql = fmt.Sprintf(` D=%s,t=%s --alter "partition by %s (%s)" %s`, m.DbName, m.TbName, pkey,
+			strings.Join(sqlPartitionDesc, ","), options)
+	}
+	return initSql, needSize, nil
+}
+
+// CheckTableSize TODO
+func (m *ConfigDetail) CheckTableSize(splitCnt int) (int, error) {
+	var needSize int
+	address := fmt.Sprintf("%s:%d", m.ImmuteDomain, m.Port)
+	sql := fmt.Sprintf(
+		"select TABLE_ROWS,(DATA_LENGTH+INDEX_LENGTH) as BYTES from information_schema.tables where TABLE_SCHEMA='%s' and TABLE_NAME='%s'", m.DbName, m.TbName)
+	var queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{sql}, Force: true, QueryTimeout: 30,
+		BkCloudId: m.BkCloudId}
+	output, err := OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return needSize, err
+	}
+	rows, _ := strconv.Atoi(output.CmdResults[0].TableData[0]["TABLE_ROWS"].(string))
+	bytes, _ := strconv.Atoi(output.CmdResults[0].TableData[0]["BYTES"].(string))
+	if bytes < viper.GetInt("pt.max_size") && rows < viper.GetInt("pt.max_rows") {
+		needSize = 3 * bytes * splitCnt // 预留空间:3倍于表大小的空间用于做pt-osc,如果是spider remote,再乘以这台机器上的分片数量
+		return needSize, nil
+	} else {
+		return needSize, fmt.Errorf(
+			"table %s.%s is not a partition table,and can not do auto alter partition, "+
+				"because large than 100MB size or large than 1000000 rows", m.DbName, m.TbName)
+	}
+}
+
+// GetAddPartitionSql 生成增加分区的sql
+func (m *ConfigDetail) GetAddPartitionSql() (string, error) {
+	var vsql, addSql, descKey, name, fx string
+	var wantedDesc, wantedName, wantedDescIfOld, wantedNameIfOld string
+	var diff, desc int
+	var begin int
+	address := fmt.Sprintf("%s:%d", m.ImmuteDomain, m.Port)
+	switch m.PartitionType {
+	case 0:
+		diff = DiffOneDay
+		descKey = "less than"
+		fx = fmt.Sprintf(`(TO_DAYS(now())+%d) `, diff)
+		wantedDesc = "partition_description as WANTED_DESC,"
+		wantedName = fmt.Sprintf(`DATE_FORMAT(from_days(PARTITION_DESCRIPTION-%d),'%%Y%%m%%d')  as WANTED_NAME`, diff)
+		wantedDescIfOld = fmt.Sprintf(`(TO_DAYS(now())+%d) as WANTED_DESC,`, diff)
+		wantedNameIfOld = "DATE_FORMAT(now(),'%Y%m%d')  as wanted_name"
+	case 1:
+		descKey = "in"
+		fx = "TO_DAYS(now())"
+		wantedDesc = "partition_description as WANTED_DESC,"
+		wantedName = "DATE_FORMAT(from_days(PARTITION_DESCRIPTION),'%Y%m%d')  as WANTED_NAME"
+		wantedDescIfOld = "(TO_DAYS(now())) as WANTED_DESC,`"
+		wantedNameIfOld = "DATE_FORMAT(now(),'%Y%m%d')  as WANTED_NAME"
+	case 3:
+		descKey = "in"
+		fx = "DATE_FORMAT(now(),'%Y%m%d')"
+		wantedName = "partition_description as WANTED_NAME"
+		wantedNameIfOld = "DATE_FORMAT(now(),'%Y%m%d')  as WANTED_NAME"
+	case 101:
+		diff = DiffOneDay
+		descKey = "less than"
+		fx = fmt.Sprintf(`DATE_FORMAT(date_add(now(),interval %d day),'%%Y%%m%%d')`, diff)
+		wantedName = "partition_description as WANTED_NAME"
+		wantedNameIfOld = "DATE_FORMAT(now(),'%Y%m%d')  as WANTED_NAME"
+	case 4:
+		diff = DiffOneDay
+		descKey = "less than"
+		fx = fmt.Sprintf(`DATE_FORMAT(date_add(now(),interval %d day),'\'%%Y-%%m-%%d\'')`, diff)
+		wantedName = fmt.Sprintf(
+			`DATE_FORMAT(date_sub(replace(partition_description,'\'',''),interval %d day),'%%Y%%m%%d') as WANTED_NAME`, diff)
+		wantedNameIfOld = "DATE_FORMAT(now(),'%Y%m%d')  as WANTED_NAME"
+	case 5:
+		diff = DiffOneDay
+		descKey = "less than"
+		fx = fmt.Sprintf(`UNIX_TIMESTAMP(date_add(curdate(),INTERVAL %d DAY))`, diff)
+		wantedDesc = "partition_description as WANTED_DESC,"
+		wantedName = fmt.Sprintf(
+			`DATE_FORMAT(date_sub(from_unixtime(partition_description),interval %d day),'%%Y%%m%%d') as WANTED_NAME`, diff)
+		wantedDescIfOld = fmt.Sprintf(`UNIX_TIMESTAMP(DATE_ADD(curdate(),INTERVAL %d DAY)) as WANTED_DESC,`, diff)
+		wantedNameIfOld = "DATE_FORMAT(now(),'%Y%m%d')  as WANTED_NAME"
+	default:
+		return addSql, errors.New("不支持的分区类型")
+	}
+
+	vsql = fmt.Sprintf(
+		"select count(*) as COUNT from INFORMATION_SCHEMA.PARTITIONS where TABLE_SCHEMA='%s' and TABLE_NAME='%s' "+
+			"and PARTITION_DESCRIPTION> %s", m.DbName, m.TbName, fx)
+	var queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{vsql}, Force: true, QueryTimeout: 30,
+		BkCloudId: m.BkCloudId}
+	output, err := OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return addSql, err
+	}
+	cnt, _ := strconv.Atoi(output.CmdResults[0].TableData[0]["COUNT"].(string))
+	// 是否需要添加分区
+	if cnt >= m.ExtraPartition {
+		return addSql, nil
+	}
+	need := m.ExtraPartition - cnt
+	// 先获取当前最大的分区PARTITION_DESCRIPTION和PARTITION_NAME
+	vsql = fmt.Sprintf(`select %s %s from INFORMATION_SCHEMA.PARTITIONS where TABLE_SCHEMA ='%s' and TABLE_NAME='%s' `+
+		`and partition_description >= %s `+
+		`order by PARTITION_DESCRIPTION desc limit 1;`, wantedDesc, wantedName, m.DbName, m.TbName, fx)
+	queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{vsql}, Force: true, QueryTimeout: 30,
+		BkCloudId: m.BkCloudId}
+	output, err = OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return addSql, err
+	}
+	// 表是分区表,但是已有的分区过旧,以至于不能包含今天或者未来的分区,添加能包含今天数据的分区
+	if len(output.CmdResults[0].TableData) == 0 {
+		begin = -1
+		vsql = fmt.Sprintf(`select %s %s from INFORMATION_SCHEMA.PARTITIONS limit 1;`, wantedDescIfOld, wantedNameIfOld)
+		queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{vsql}, Force: true, QueryTimeout: 30,
+			BkCloudId: m.BkCloudId}
+		output, err = OneAddressExecuteSql(queryRequest)
+		if err != nil {
+			return addSql, err
+		}
+		slog.Warn(fmt.Sprintf(
+			"%s.%s is a partitioned table, but existed partitions are too old, do not contain today's data.", m.DbName,
+			m.TbName))
+	}
+	name = output.CmdResults[0].TableData[0]["WANTED_NAME"].(string)
+	switch m.PartitionType {
+	case 0, 1, 5:
+		desc, _ = strconv.Atoi(output.CmdResults[0].TableData[0]["WANTED_DESC"].(string))
+		addSql, err = m.NewPartitionNameDescType0Type1Type5(begin, need, name, desc, descKey)
+	case 3, 101:
+		addSql, err = m.NewPartitionNameDescType3Type101(begin, need, name, descKey)
+	case 4:
+		addSql, err = m.NewPartitionNameDescType4(begin, need, name, descKey)
+	default:
+		return addSql, errors.New("不支持的分区类型")
+	}
+	addSql = fmt.Sprintf("alter table `%s`.`%s`  add partition( %s", m.DbName, m.TbName, addSql)
+	return addSql, nil
+}
+
+// NewPartitionNameDescType0Type1Type5 TODO
+func (m *ConfigDetail) NewPartitionNameDescType0Type1Type5(begin int, need int, name string, desc int,
+	descKey string) (string, error) {
+	var newdesc, ratio int
+	var newname, sql string
+	ratio = 1
+	if m.PartitionType == 5 {
+		ratio = 86400
+	}
+	for i := begin; i < need; i++ {
+		// 生成分区description
+		newdesc = desc + (i+1)*m.PartitionTimeInterval*ratio
+		// 生成分区名
+		formatDate, err := time.Parse("20060102", name)
+		if err != nil {
+			return sql, errors.New("err partition name: " + name)
+		}
+		newname = formatDate.AddDate(0, 0, (i+1)*m.PartitionTimeInterval).Format("20060102")
+		sql = fmt.Sprintf("%s partition `p%s`  values %s (%d),", sql, newname, descKey, newdesc)
+	}
+	sql = sql[0:len(sql)-1] + ")"
+	return sql, nil
+}
+
+// NewPartitionNameDescType3Type101 TODO
+func (m *ConfigDetail) NewPartitionNameDescType3Type101(begin int, need int, name string, descKey string) (string,
+	error) {
+	var newname, sql string
+	for i := begin; i < need; i++ {
+		formatDate, err := time.Parse("20060102", name)
+		if err != nil {
+			return sql, errors.New("err partition name: " + name)
+		}
+		newname = formatDate.AddDate(0, 0, (i+1)*m.PartitionTimeInterval).Format("20060102")
+		sql = fmt.Sprintf("%s partition `p%s` values %s (%s),", sql, newname, descKey, newname)
+	}
+	sql = sql[0:len(sql)-1] + ")"
+	return sql, nil
+}
+
+// NewPartitionNameDescType4 TODO
+func (m *ConfigDetail) NewPartitionNameDescType4(begin int, need int, name string, descKey string) (string, error) {
+	var newname, newdesc, sql string
+	for i := begin; i < need; i++ {
+		formatDate, err := time.Parse("20060102", name)
+		if err != nil {
+			return sql, errors.New("err partition name: " + name)
+		}
+		newname = formatDate.AddDate(0, 0, (i+1)*m.PartitionTimeInterval).Format("20060102")
+		newdesc = formatDate.AddDate(0, 0, (i+2)*m.PartitionTimeInterval).Format("'2006-01-02'")
+		sql = fmt.Sprintf("%s partition `p%s`  values %s (%s),", sql, newname, descKey, newdesc)
+	}
+	sql = sql[0:len(sql)-1] + ")"
+	return sql, nil
+}
+
+// GetSpiderBackends TODO
+func GetSpiderBackends(address string, bkCloudId int) (tableDataType, int, error) {
+	var splitCnt int
+	vsql := "select HOST,PORT,replace(server_name,'SPT','') as SPLIT_NUM, SERVER_NAME, WRAPPER from mysql.servers " +
+		"where wrapper in ('mysql','TDBCTL') and (server_name like 'SPT%' or server_name like 'TDBCTL%') ;"
+	queryRequest := QueryRequest{Addresses: []string{address}, Cmds: []string{vsql}, Force: true, QueryTimeout: 30,
+		BkCloudId: bkCloudId}
+	output, err := OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return nil, splitCnt, fmt.Errorf("get spider info error: %s", err.Error())
+	} else if len(output.CmdResults[0].TableData) == 0 {
+		return nil, splitCnt, fmt.Errorf("no spider remote db or control spider found")
+	}
+	vsql =
+		"select count(*) as COUNT from mysql.servers where WRAPPER='mysql' and SERVER_NAME like 'SPT%' group by host order by 1 desc limit 1;"
+	queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{vsql}, Force: true, QueryTimeout: 30,
+		BkCloudId: bkCloudId}
+	output1, err := OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return nil, splitCnt, fmt.Errorf("get spider split count error: %s", err.Error())
+	}
+	splitCnt, _ = strconv.Atoi(output1.CmdResults[0].TableData[0]["COUNT"].(string))
+	return output.CmdResults[0].TableData, splitCnt, nil
+}
+
+// CreatePartitionTicket TODO
+func CreatePartitionTicket(check Checker, objects []PartitionObject, zoneOffset int, date string, scheduler string) {
+	zone := fmt.Sprintf("%+03d:00", zoneOffset)
+	ticketType := "MYSQL_PARTITION"
+	if check.ClusterType == Tendbcluster {
+		ticketType = "SPIDER_PARTITION"
+	}
+	ticket := Ticket{BkBizId: check.BkBizId, TicketType: ticketType, Remark: "auto partition",
+		Details: Detail{Infos: []Info{{check.ConfigId, check.ClusterId, check.ImmuteDomain, *check.BkCloudId, objects}}}}
+	id, err := CreateDbmTicket(ticket)
+	if err != nil {
+		dimension := monitor.NewPartitionEventDimension(check.BkBizId, *check.BkCloudId, check.ImmuteDomain)
+		content := fmt.Sprintf("partition error. create ticket fail: %s", err.Error())
+		monitor.SendEvent(monitor.PartitionEvent, dimension, content, "0.0.0.0")
+		slog.Error("msg", fmt.Sprintf("create ticket fail: %v", ticket), err)
+		AddLog(check.ConfigId, check.BkBizId, check.ClusterId, *check.BkCloudId, 0, check.ImmuteDomain, zone, date, scheduler,
+			"{}",
+			content, CheckFailed, check.ClusterType)
+		return
+	}
+	bytes, err := json.Marshal(ticket)
+	if err != nil {
+		bytes = []byte("{}")
+		slog.Error("msg", "ticket marshal failed", err)
+	}
+	AddLog(check.ConfigId, check.BkBizId, check.ClusterId, *check.BkCloudId, id, check.ImmuteDomain,
+		zone, date, scheduler, string(bytes), "", ExecuteAsynchronous, check.ClusterType)
+}
+
+// NeedPartition TODO
+func NeedPartition(cronType string, clusterType string, zoneOffset int, cronDate string) ([]*Checker, error) {
+	var configTb, logTb, ticket string
+	var all, successed, doNothing []*Checker
+	switch clusterType {
+	case Tendbha, Tendbsingle:
+		configTb = MysqlPartitionConfig
+		logTb = MysqlPartitionCronLogTable
+		ticket = MysqlPartition
+	case Tendbcluster:
+		configTb = SpiderPartitionConfig
+		logTb = SpiderPartitionCronLogTable
+		ticket = SpiderPartition
+	default:
+		return nil, errors.New("不支持的db类型")
+	}
+	vzone := fmt.Sprintf("%+03d:00", zoneOffset)
+	vsql := fmt.Sprintf(
+		"select conf.id as config_id, conf.bk_biz_id as bk_biz_id, conf.cluster_id as cluster_id,"+
+			"conf.immute_domain as immute_domain, conf.port as port, conf.bk_cloud_id as bk_cloud_id,"+
+			"cluster.cluster_type as cluster_type from `%s`.`%s` as conf,`%s`.db_meta_cluster "+
+			"as cluster where conf.cluster_id=cluster.id and cluster.time_zone='%s' and "+
+			"conf.phase='online' order by 2,3;",
+		viper.GetString("db.name"), configTb, viper.GetString("dbm_db_name"), vzone)
+	slog.Info(vsql)
+	err := model.DB.Self.Raw(vsql).Scan(&all).Error
+	if err != nil {
+		slog.Error(vsql, "execute err", err)
+		return nil, err
+	}
+	slog.Info("all", all)
+	if cronType == "daily" {
+		return all, nil
+	}
+	vsql = fmt.Sprintf(
+		"select conf.id as config_id from `%s`.`%s` as conf,`%s`.db_meta_cluster as cluster, "+
+			"`%s`.`%s` as log,`%s`.ticket_ticket as ticket "+
+			"where conf.cluster_id=cluster.id and conf.id=log.config_id and ticket.id=log.ticket_id "+
+			"and cluster.time_zone='%s' and log.cron_date='%s' "+
+			"and ticket.remark='auto partition' and ticket.ticket_type='%s' "+
+			"and (ticket.status='SUCCEEDED' or ticket.status='RUNNING')",
+		viper.GetString("db.name"), configTb, viper.GetString("dbm_db_name"),
+		viper.GetString("db.name"), logTb, viper.GetString("dbm_db_name"), vzone, cronDate, ticket)
+	slog.Info(vsql)
+	err = model.DB.Self.Raw(vsql).Scan(&successed).Error
+	if err != nil {
+		slog.Error(vsql, "execute err", err)
+		return nil, err
+	}
+	slog.Info("successed", successed)
+	vsql = fmt.Sprintf("select conf.id as config_id from `%s`.`%s` as conf,`%s`.db_meta_cluster as cluster, "+
+		"`%s`.`%s` as log where conf.cluster_id=cluster.id and conf.id=log.config_id "+
+		"and cluster.time_zone='%s' and log.cron_date='%s' and log.status like '%s'",
+		viper.GetString("db.name"), configTb, viper.GetString("dbm_db_name"),
+		viper.GetString("db.name"), logTb, vzone, cronDate, CheckSucceeded)
+	slog.Info(vsql)
+	err = model.DB.Self.Raw(vsql).Scan(&doNothing).Error
+	if err != nil {
+		slog.Error(vsql, "execute err", err)
+		return nil, err
+	}
+	slog.Info("doNothing", doNothing)
+	var need []*Checker
+	for _, item := range all {
+		retryFlag := true
+		for _, ok := range successed {
+			if (*item).ConfigId == (*ok).ConfigId {
+				retryFlag = false
+				break
+			}
+		}
+		if retryFlag == false {
+			continue
+		}
+		for _, ok := range doNothing {
+			if (*item).ConfigId == (*ok).ConfigId {
+				retryFlag = false
+				break
+			}
+		}
+		if retryFlag == true {
+			need = append(need, item)
+		}
+	}
+	slog.Info("need", need)
+	return need, nil
+}
+
+// GetMaster TODO
+func GetMaster(configs []*PartitionConfig, immuteDomain, clusterType string) ([]*PartitionConfig, error) {
+	newconfigs := make([]*PartitionConfig, len(configs))
+	clusterInfo, err := GetCluster(Domain{immuteDomain}, clusterType)
+	if err != nil {
+		slog.Error("msg", "GetCluster err", err)
+		return nil, fmt.Errorf("GetCluster err: %s", err.Error())
+	}
+	var masterIp string
+	var masterPort int
+	for _, storage := range clusterInfo.Storages {
+		if storage.InstanceRole == Orphan || storage.InstanceRole == BackendMaster {
+			masterIp = storage.IP
+			masterPort = storage.Port
+			break
+		}
+	}
+
+	for k, v := range configs {
+		newconfig := *v
+		newconfig.ImmuteDomain = masterIp
+		newconfig.Port = masterPort
+		newconfigs[k] = &newconfig
+	}
+	return newconfigs, nil
+}
+
+// AddLog TODO
+func AddLog(configId, bkBizId, clusterId, bkCloudId, ticketId int, immuteDomain, zone, date, scheduler, detailJson,
+	info, checkStatus, clusterType string) {
+	tx := model.DB.Self.Begin()
+	tb := MysqlPartitionCronLogTable
+	if clusterType == Tendbcluster {
+		tb = SpiderPartitionCronLogTable
+	}
+	log := &PartitionCronLog{ConfigId: configId, BkBizId: bkBizId, ClusterId: clusterId, TicketId: ticketId,
+		ImmuteDomain: immuteDomain, BkCloudId: bkCloudId, TimeZone: zone, CronDate: date, Scheduler: scheduler,
+		TicketDetail: detailJson, CheckInfo: info, Status: checkStatus}
+	err := tx.Debug().Table(tb).Create(log).Error
+	if err != nil {
+		tx.Rollback()
+		slog.Error("msg", "add con log failed", err)
+	}
+	tx.Commit()
+}
+
+// AddInit TODO
+func AddInit(m *InitMessages, s InitSql) {
+	if s.Sql != "" {
+		(*m).mu.Lock()
+		(*m).list = append((*m).list, s)
+		(*m).mu.Unlock()
+	}
+	return
+}
+
+// AddString TODO
+func AddString(m *Messages, s string) {
+	if s != "" {
+		(*m).mu.Lock()
+		(*m).list = append((*m).list, s)
+		(*m).mu.Unlock()
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-partition/service/check_partition_object.go b/dbm-services/mysql/db-partition/service/check_partition_object.go
new file mode 100644
index 0000000000..40ff1cd207
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/check_partition_object.go
@@ -0,0 +1,81 @@
+package service
+
+import (
+	"sync"
+	"time"
+)
+
+// DiffOneDay TODO
+const DiffOneDay = 1 // 分区名称与分区描述相差的天数
+
+// Checker TODO
+type Checker struct {
+	ClusterType  string `json:"cluster_type"`
+	BkBizId      int    `json:"bk_biz_id"`
+	ConfigId     int    `json:"config_id"`
+	ClusterId    int    `json:"cluster_id"`
+	ImmuteDomain string `json:"immute_domain"`
+	Port         int    `json:"port"`
+	BkCloudId    *int   `json:"bk_cloud_id"`
+}
+
+// PartitionSqlSet 分区语句集合
+type PartitionSqlSet struct {
+	Mu            sync.RWMutex
+	PartitionSqls []PartitionSql
+}
+
+// PartitionSql 实例ip:port上的分区语句
+type PartitionSql struct {
+	ConfigId      int       `json:"config_id"`
+	DbLike        string    `json:"dblike"`
+	TbLike        string    `json:"tblike"`
+	InitPartition []InitSql `json:"init_partition"`
+	AddPartition  []string  `json:"add_partition"`
+	DropPartition []string  `json:"drop_partition"`
+}
+
+// PartitionCronLog TODO
+type PartitionCronLog struct {
+	Id           int    `json:"id" gorm:"column:id;primary_key;auto_increment"`
+	BkBizId      int    `json:"bk_biz_id" gorm:"column:bk_biz_id"`
+	ClusterId    int    `json:"cluster_id" gorm:"column:cluster_id"`
+	ConfigId     int    `json:"config_id" gorm:"column:config_id"`
+	TicketId     int    `json:"ticket_id" gorm:"column:ticket_id"`
+	ImmuteDomain string `json:"immute_domain" gorm:"column:immute_domain"`
+	Scheduler    string `json:"scheduler" gorm:"column:scheduler"`
+	BkCloudId    int    `json:"bk_cloud_id" gorm:"column:bk_cloud_id"`
+	TimeZone     string `json:"time_zone" gorm:"column:time_zone"`
+	CronDate     string `json:"cron_date" grom:"column:cron_date"`
+	TicketDetail string `json:"ticket_detail" gorm:"column:ticket_detail"`
+	CheckInfo    string `json:"check_info" gorm:"column:check_info"`
+	Status       string `json:"status" gorm:"column:status"`
+}
+
+// PartitionLog TODO
+type PartitionLog struct {
+	Id           int       `json:"id"`
+	TicketId     int       `json:"ticket_id" gorm:"column:ticket_id"`
+	TicketStatus string    `json:"ticket_status" gorm:"ticket_status"`
+	ExecuteTime  time.Time `json:"execute_time" gorm:"execute_time"`
+	CheckInfo    string    `json:"check_info" gorm:"check_info"`
+	Status       string    `json:"status" gorm:"status"`
+}
+
+// InitMessages TODO
+type InitMessages struct {
+	mu   sync.RWMutex
+	list []InitSql
+}
+
+// InitSql TODO
+type InitSql struct {
+	Sql      string `json:"sql"`
+	NeedSize int    `json:"need_size"`
+}
+
+// Messages TODO
+type Messages struct {
+	mu   sync.RWMutex
+	list []string
+}
diff --git a/dbm-services/mysql/db-partition/service/db_meta_service.go b/dbm-services/mysql/db-partition/service/db_meta_service.go
new file mode 100644
index 0000000000..9d735bcaf7
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/db_meta_service.go
@@ -0,0 +1,144 @@
+package service
+
+import (
+	"dbm-services/mysql/db-partition/errno"
+	"dbm-services/mysql/db-partition/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// Tendbha TODO
+const Tendbha string = "tendbha"
+
+// Tendbsingle TODO
+const Tendbsingle string = "tendbsingle"
+
+// Tendbcluster TODO
+const Tendbcluster string = "tendbcluster"
+
+// CheckFailed TODO
+const CheckFailed string = "FAILED"
+
+// CheckSucceeded TODO
+const CheckSucceeded string = "SUCCEEDED"
+
+// ExecuteAsynchronous TODO
+const ExecuteAsynchronous string = "UNKNOWN"
+
+// BackendMaster TODO
+const BackendMaster string = "backend_master"
+
+// Orphan TODO
+const Orphan string = "orphan"
+
+// CreateDbmTicket bk-dbm生成单据的接口
+func CreateDbmTicket(config Ticket) (int, error) {
+	var ticketId int
+	type Data struct {
+		Id                int     `json:"id"`
+		Creator           string  `json:"creator"`
+		CreateAt          string  `json:"create_at"`
+		Updater           string  `json:"updater"`
+		UpdateAt          string  `json:"update_at"`
+		TicketType        string  `json:"ticket_type"`
+		Status            string  `json:"status"`
+		Remark            string  `json:"remark"`
+		Group             string  `json:"group"`
+		Details           Details `json:"details"`
+		TicketTypeDisplay string  `json:"ticket_type_display"`
+		StatusDisplay     string  `json:"status_display"`
+		CostTime          int     `json:"cost_time"`
+		BkBizName         string  `json:"bk_biz_name"`
+		BkAppAbbr         string  `json:"bk_app_abbr"`
+		IgnoreDuplication bool    `json:"ignore_duplication"`
+		BkBizId           int     `json:"bk_biz_id"`
+		IsReviewed        bool    `json:"is_reviewed"`
+	}
+
+	var resp Data
+	c := util.NewClientByHosts(viper.GetString("dbm_ticket_service"))
+	slog.Info(viper.GetString("dbm_ticket_service"))
+	slog.Info(fmt.Sprintf("config:%v", config))
+	result, err := c.Do(http.MethodPost, "tickets/", config)
+	if err != nil {
+		slog.Error("msg", err)
+		return ticketId, err
+	}
+	slog.Info(fmt.Sprintf("data:%v", string(result.Data)))
+	if err := json.Unmarshal(result.Data, &resp); err != nil {
+		return ticketId, err
+	}
+	slog.Info(fmt.Sprintf("resp:%v", resp))
+	return resp.Id, nil
+}
+
+// Domain GetClusterInfo 函数的入参
+type Domain struct {
+	EntryName string `json:"entry_name" url:"entry_name"`
+}
+
+// Instance GetCluster 函数返回的结构体
+type Instance struct {
+	Proxies      []Proxy   `json:"proxies"`
+	Storages     []Storage `json:"storages"`
+	SpiderMaster []Proxy   `json:"spider_master"`
+	SpiderSlave  []Proxy   `json:"spider_slave"`
+	ClusterType  string    `json:"cluster_type"`
+	BkBizId      int64     `json:"bk_biz_id"`
+	DbModuleId   int64     `json:"db_module_id"`
+	BindTo       string    `json:"bind_to"`
+	EntryRole    string    `json:"entry_role"`
+	BkCloudId    int64     `json:"bk_cloud_id"`
+	ImmuteDomain string    `json:"immute_domain"`
+}
+
+// Proxy proxy 实例
+type Proxy struct {
+	IP        string `json:"ip"`
+	Port      int    `json:"port"`
+	AdminPort int    `json:"admin_port"`
+	Status    string `json:"status"`
+}
+
+// Storage mysql 后端节点
+type Storage struct {
+	IP           string `json:"ip"`
+	Port         int    `json:"port"`
+	InstanceRole string `json:"instance_role"`
+	Status       string `json:"status"`
+}
+
+// Cluster GetAllClustersInfo 函数返回 Cluster 数组
+type Cluster struct {
+	DbModuleId   int64     `json:"db_module_id"`
+	BkBizId      string    `json:"bk_biz_id"`
+	Proxies      []Proxy   `json:"proxies"`
+	Storages     []Storage `json:"storages"`
+	ClusterType  string    `json:"cluster_type"`
+	ImmuteDomain string    `json:"immute_domain"`
+}
+
+// BkBizId 业务 id,QueryAccountRule、GetAllClustersInfo 函数的入参
+type BkBizId struct {
+	BkBizId int64 `json:"bk_biz_id" url:"bk_biz_id"`
+}
+
+// GetCluster 根据域名获取集群信息
+func GetCluster(dns Domain, ClusterType string) (Instance, error) {
+	c := util.NewClientByHosts(viper.GetString("db_meta_service"))
+	var resp Instance
+	url := fmt.Sprintf("/db_meta/priv_manager/%s/cluster_instances", ClusterType)
+	result, err := c.Do(http.MethodGet, url, dns)
+	if err != nil {
+		slog.Error(url, err)
+		return resp, errno.DomainNotExists.Add(fmt.Sprintf(" %s: %s", dns.EntryName, err.Error()))
+	}
+	if err := json.Unmarshal(result.Data, &resp); err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
diff --git a/dbm-services/mysql/db-partition/service/db_remote_service.go b/dbm-services/mysql/db-partition/service/db_remote_service.go
new file mode 100644
index 0000000000..29c25872dd
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/db_remote_service.go
@@ -0,0 +1,118 @@
+package service
+
+import (
+	"dbm-services/mysql/db-partition/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// OneAddressExecuteSqlBasic OneAddressExecuteSql 通过db-remote-service服务连接mysql实例执行sql语句
+func OneAddressExecuteSqlBasic(vtype string, queryRequest QueryRequest) (oneAddressResult, error) {
+	slog.Info("msg", "OneAddressExecuteSqlBasic", "begin")
+	var errMsg []string
+	var result oneAddressResult
+	var temp []oneAddressResult
+	c := util.NewClientByHosts(viper.GetString("db_remote_service"))
+	var url string
+	if vtype == "mysql" {
+		url = "mysql/rpc/"
+	} else if vtype == "proxy" {
+		url = "proxy-admin/rpc/"
+	}
+	apiResp, err := c.Do(http.MethodPost, url, queryRequest)
+	if err != nil {
+		slog.Error("drs err", err)
+		return result, err
+	}
+	if apiResp.Code != 0 {
+		slog.Error("remote service api", fmt.Errorf(apiResp.Message))
+		return result, fmt.Errorf(apiResp.Message)
+	} else {
+		if err := json.Unmarshal(apiResp.Data, &temp); err != nil {
+			return result, err
+		}
+		if temp[0].ErrorMsg != "" {
+			errMsg = append(errMsg, fmt.Sprintf("instance: %s err: %s", queryRequest.Addresses[0], temp[0].ErrorMsg))
+		}
+		for _, res := range temp[0].CmdResults {
+			if res.ErrorMsg != "" {
+				errMsg = append(errMsg, fmt.Sprintf("instance: %s execute: `%s` error:`%s`;",
+					queryRequest.Addresses[0], strings.Replace(res.Cmd, "%", "%%", -1),
+					strings.Replace(res.ErrorMsg, "%", "%%", -1)))
+			}
+		}
+	}
+
+	if len(errMsg) > 0 {
+		slog.Error("msg", fmt.Errorf(strings.Join(errMsg, "\n")))
+		return result, fmt.Errorf(strings.Join(errMsg, "\n"))
+	}
+	slog.Info("msg", "OneAddressExecuteSqlBasic", "end")
+	return temp[0], nil
+
+}
+
+// OneAddressExecuteSql OneAddressExecuteSql 通过db-remote-service服务连接mysql实例执行sql语句
+func OneAddressExecuteSql(queryRequest QueryRequest) (oneAddressResult, error) {
+	slog.Info("msg", "queryRequest", queryRequest)
+	result, err := OneAddressExecuteSqlBasic("mysql", queryRequest)
+	if err != nil {
+		return result, err
+	}
+	return result, nil
+}
+
+// QueryRequest OneAddressExecuteSql函数的入参
+type QueryRequest struct {
+	Addresses []string `form:"addresses" json:"addresses" url:"addresses"` // mysql实例数组,ip:port数组
+	Cmds      []string `form:"cmds" json:"cmds" url:"cmds"`                // sql语句数组
+	Force     bool     `form:"force" json:"force" url:"force"`             // 是否强制执行,强制:一个sql语句执行失败,不会中断,继续执行其他sql语句
+	/*
+			QueryTimeout是sql执行的超时时间,默认超时时间是30秒
+			ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout))
+		    defer cancel()
+			rows, err := db.QueryxContext(ctx, cmd)
+	*/
+	QueryTimeout int `form:"query_timeout" json:"query_timeout" url:"query_timeout"` // sql执行超时时间
+	BkCloudId    int `form:"bk_cloud_id" json:"bk_cloud_id" url:"bk_cloud_id"`       // mysql服务所在的云域
+}
+
+// queryResponse db-remote-service服务/mysql/rpc接口返回的结构
+type queryResponse struct {
+	Code      int               `json:"code"`
+	Data      queryResponseData `json:"data"`
+	Msg       string            `json:"message"`
+	RequestId string            `json:"request_id"`
+}
+
+// queryResponseData 在多个ip:port执行sql返回的结果
+type queryResponseData []oneAddressResult
+
+// oneAddressResult 在一个ip:port执行sql返回的结果
+type oneAddressResult struct {
+	Address    string      `json:"address"`
+	CmdResults []cmdResult `json:"cmd_results"`
+	ErrorMsg   string      `json:"error_msg"`
+}
+
+// cmdResult
+type cmdResult struct {
+	Cmd          string        `json:"cmd"`
+	TableData    tableDataType `json:"table_data"`
+	RowsAffected int           `json:"rows_affected"`
+	ErrorMsg     string        `json:"error_msg"`
+}
+
+// tableDataType 查询返回记录
+type tableDataType []map[string]interface{}
+
+// PasswordResp mysql实例中user@host的密码以及加密类型
+type PasswordResp struct {
+	Psw     string `gorm:"column:psw;not_null;" json:"psw"`
+	PwdType string `gorm:"column:psw_type;not_null;" json:"psw_type"`
+}
diff --git a/dbm-services/mysql/db-partition/service/execute_partition_object.go b/dbm-services/mysql/db-partition/service/execute_partition_object.go
new file mode 100644
index 0000000000..5ee4d0adb9
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/execute_partition_object.go
@@ -0,0 +1,187 @@
+package service
+
+import (
+	"time"
+)
+
+// PartitionConfig TODO
+type PartitionConfig struct {
+	ID                    int       `json:"id" gorm:"column:id;primary_key;auto_increment"`
+	BkBizId               int       `json:"bk_biz_id" gorm:"column:bk_biz_id"`
+	ImmuteDomain          string    `json:"immute_domain" gorm:"column:immute_domain"`
+	Port                  int       `json:"port" gorm:"column:port"`
+	BkCloudId             int       `json:"bk_cloud_id" gorm:"column:bk_cloud_id"`
+	ClusterId             int       `json:"cluster_id" gorm:"column:cluster_id"`
+	DbLike                string    `json:"dblike" gorm:"column:dblike"`
+	TbLike                string    `json:"tblike" gorm:"column:tblike"`
+	PartitionColumn       string    `json:"partition_columns" gorm:"column:partition_column"`
+	PartitionColumnType   string    `json:"partition_column_type" gorm:"column:partition_column_type"`
+	ReservedPartition     int       `json:"reserved_partition" gorm:"column:reserved_partition"`
+	ExtraPartition        int       `json:"extra_partition" gorm:"column:extra_partition"`
+	PartitionTimeInterval int       `json:"partition_time_interval" gorm:"column:partition_time_interval"`
+	PartitionType         int       `json:"partition_type" gorm:"column:partition_type"`
+	ExpireTime            int       `json:"expire_time"`
+	Phase                 string    `json:"phase" gorm:"column:phase"`
+	Creator               string    `json:"creator" gorm:"column:creator"`
+	Updator               string    `json:"updator" gorm:"column:updator"`
+	CreateTime            time.Time `json:"create_time" gorm:"column:create_time"`
+	UpdateTime            time.Time `json:"update_time" gorm:"column:update_time"`
+}
+
+// PartitionConfigWithLog TODO
+type PartitionConfigWithLog struct {
+	PartitionConfig
+	ExecuteTime  time.Time `json:"execute_time" gorm:"execute_time"`
+	TicketId     int       `json:"ticket_id" gorm:"ticket_id"`
+	Status       string    `json:"status" gorm:"status"`
+	TicketStatus string    `json:"ticket_status" gorm:"ticket_status"`
+	CheckInfo    string    `json:"check_info" gorm:"check_info"`
+}
+
+// ConfigDetail TODO
+type ConfigDetail struct {
+	PartitionConfig
+	DbName      string `json:"dbname"`
+	TbName      string `json:"tbname"`
+	Partitioned bool   `json:"partitioned"`
+}
+
+// Ticket TODO
+type Ticket struct {
+	BkBizId    int    `json:"bk_biz_id"`
+	TicketType string `json:"ticket_type"`
+	Remark     string `json:"remark"`
+	Details    Detail `json:"details"`
+}
+
+// Details TODO
+type Details struct {
+	Infos    []Info           `json:"infos"`
+	Clusters ClustersResponse `json:"clusters"`
+}
+
+// ClustersResponse TODO
+type ClustersResponse struct {
+	ClusterResponse map[string]ClusterResponse `json:"cluster_response"`
+}
+
+// ClusterResponse TODO
+type ClusterResponse struct {
+	Id              int    `json:"id"`
+	Creator         string `json:"creator"`
+	Updater         string `json:"updater"`
+	Name            string `json:"name"`
+	Alias           string `json:"alias"`
+	BkBizId         int    `json:"bk_biz_id"`
+	ClusterType     string `json:"cluster_type"`
+	DbModuleId      int    `json:"db_module_id"`
+	ImmuteDomain    string `json:"immute_domain"`
+	MajorVersion    string `json:"major_version"`
+	Phase           string `json:"phase"`
+	Status          string `json:"status"`
+	BkCloudId       int    `json:"bk_cloud_id"`
+	Region          string `json:"region"`
+	TimeZone        string `json:"time_zone"`
+	ClusterTypeName string `json:"cluster_type_name"`
+}
+
+// Detail TODO
+type Detail struct {
+	Infos []Info `json:"infos"`
+}
+
+// Info TODO
+type Info struct {
+	ConfigId         int               `json:"config_id"`
+	ClusterId        int               `json:"cluster_id"`
+	ImmuteDomain     string            `json:"immute_domain"`
+	BkCloudId        int               `json:"bk_cloud_id"`
+	PartitionObjects []PartitionObject `json:"partition_objects"`
+}
+
+// PartitionObject TODO
+type PartitionObject struct {
+	Ip             string         `json:"ip"`
+	Port           int            `json:"port"`
+	ShardName      string         `json:"shard_name"`
+	ExecuteObjects []PartitionSql `json:"execute_objects"`
+}
+
+// ExecResult TODO
+type ExecResult struct {
+	IsSuccess bool
+	Sql       string
+	Msg       string
+}
+
+// Result TODO
+type Result struct {
+	ExecResult
+	DbName string
+	Action string
+}
+
+/*
+show create table mysql_partition_conf\G
+*************************** 1. row ***************************
+       Table: mysql_partition_conf
+Create Table: CREATE TABLE `mysql_partition_conf` (
+  `ID` bigint(20) NOT NULL AUTO_INCREMENT,
+  `App` varchar(100) NOT NULL,
+  `Module` varchar(100) NOT NULL,
+  `Ip` varchar(100) NOT NULL,
+  `Port` int(11) NOT NULL,
+  `DbLike` varchar(100) NOT NULL,
+  `PartitionTableName` varchar(100) NOT NULL,
+  `PartitionColumn` varchar(100) DEFAULT NULL,
+  `PartitionColumnType` varchar(100) DEFAULT NULL,
+  `ReservedPartition` int(11) NOT NULL,
+  `ExtraPartition` int(11) NOT NULL,
+  `PartitionTimeInterval` int(11) NOT NULL,
+  `PartitionTimeWay` enum('DAY','MONTH') DEFAULT NULL,
+  `PartitionType` int(11) NOT NULL,
+  `IsExchange` int(11) NOT NULL DEFAULT '0',
+  `HeartBeat` datetime NOT NULL DEFAULT '2000-01-01 00:00:00',
+  `Alive` int(11) NOT NULL DEFAULT '3' COMMENT '1:success,2:failed,3:not execute,4+:others',
+  `DoSuccess` int(11) DEFAULT NULL,
+  `DoFailed` int(11) DEFAULT NULL,
+  `Creator` varchar(100) DEFAULT NULL,
+  `Updator` varchar(100) DEFAULT NULL,
+  `CreateTime` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
+  `UpdateTime` timestamp NOT NULL DEFAULT '2000-01-01 00:00:00',
+  PRIMARY KEY (`ID`),
+  UNIQUE KEY `UK_MPT` (`App`,`Module`,`Ip`,`Port`,`DbLike`,`PartitionTableName`),
+  UNIQUE KEY `uk_IPDT` (`Ip`,`Port`,`DbLike`,`PartitionTableName`),
+  KEY `idx_app_alive` (`App`,`Alive`),
+  KEY `IDX_DT` (`DbLike`,`PartitionTableName`),
+  KEY `IDX_IDT` (`Ip`,`DbLike`,`PartitionTableName`)
+) ENGINE=InnoDB AUTO_INCREMENT=74533 DEFAULT CHARSET=utf8mb4
+1 row in set (0.00 sec)
+
+select * from mysql_partition_conf limit 1\G
+*************************** 1. row ***************************
+                   ID: 25
+                  App: web
+               Module: web
+                   Ip: gamedb.amspoint16.web.db
+                 Port: 10000
+               DbLike: dbcaccts
+   PartitionTableName: t_acct_water_0
+      PartitionColumn: Fcreate_time
+  PartitionColumnType: timestamp
+    ReservedPartition: 30
+       ExtraPartition: 14
+PartitionTimeInterval: 1
+     PartitionTimeWay: DAY
+        PartitionType: 5
+           IsExchange: 0
+            HeartBeat: 2023-02-12 01:30:08
+                Alive: 1
+            DoSuccess: 2
+             DoFailed: 0
+              Creator: NULL
+              Updator: NULL
+           CreateTime: 0000-00-00 00:00:00
+           UpdateTime: 2000-01-01 00:00:00
+1 row in set (0.00 sec)
+*/
diff --git a/dbm-services/mysql/db-partition/service/manage_config.go b/dbm-services/mysql/db-partition/service/manage_config.go
new file mode 100644
index 0000000000..d6d28f599b
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/manage_config.go
@@ -0,0 +1,450 @@
+package service
+
+import (
+	"dbm-services/mysql/db-partition/errno"
+	"dbm-services/mysql/db-partition/model"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// GetPartitionsConfig TODO
+func (m *QueryParititionsInput) GetPartitionsConfig() ([]*PartitionConfigWithLog, int64, error) {
+	allResults := make([]*PartitionConfigWithLog, 0)
+	var tbName string
+	type Cnt struct {
+		Count int64 `gorm:"column:cnt"`
+	}
+	// 判断是mysql集群还是spider集群
+	switch strings.ToLower(m.ClusterType) {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		return nil, 0, errors.New("不支持的db类型")
+	}
+	where := " 1=1 "
+	if m.BkBizId > 0 {
+		where = fmt.Sprintf("%s and config.bk_biz_id=%d", where, m.BkBizId)
+	}
+	if len(m.ImmuteDomains) != 0 {
+		dns := " and config.immute_domain in ('" + strings.Join(m.ImmuteDomains, "','") + "') "
+		where = where + dns
+	}
+	if len(m.DbLikes) != 0 {
+		dblike := "and config.dblike in ('" + strings.Join(m.DbLikes, "','") + "') "
+		where = where + dblike
+	}
+	if len(m.TbLikes) != 0 {
+		tblike := "and config.tblike in ('" + strings.Join(m.TbLikes, "','") + "') "
+		where = where + tblike
+	}
+	cnt := Cnt{}
+	vsql := fmt.Sprintf("select count(*) as cnt from `%s`", tbName)
+	err := model.DB.Self.Debug().Table(tbName).Raw(vsql).Scan(&cnt).Error
+	if err != nil {
+		slog.Error(vsql, "execute error", err)
+		return nil, 0, err
+	}
+	limitCondition := fmt.Sprintf("limit %d offset %d", m.Limit, m.Offset)
+	condition := fmt.Sprintf("%s %s", where, limitCondition)
+	// ticket_id NULL,规则没有被执行过
+	vsql = fmt.Sprintf("SELECT config.*, d.create_time as execute_time, "+
+		"d.ticket_id as ticket_id, d.ticket_status as ticket_status, d.check_info as check_info, "+
+		"d.status as status FROM "+
+		"%s AS config LEFT JOIN (SELECT c.*, ticket.status as ticket_status  FROM "+
+		"(SELECT a.* FROM partition_cron_log AS a, "+
+		"(SELECT inner_config.id AS config_id, MAX(inner_log.id) AS log_id FROM "+
+		"%s AS inner_config LEFT JOIN "+
+		"partition_cron_log AS inner_log ON inner_config.id = inner_log.config_id where "+
+		"inner_log.create_time > DATE_SUB(now(),interval 100 day) GROUP BY inner_config.id) "+
+		"AS b WHERE a.id = b.log_id) AS c LEFT JOIN `%s`.ticket_ticket AS ticket "+
+		"ON ticket.id = c.ticket_id) AS d ON config.id = d.config_id where %s;", tbName, tbName,
+		viper.GetString("dbm_db_name"), condition)
+	err = model.DB.Self.Debug().Table(tbName).Raw(vsql).Scan(&allResults).Error
+	if err != nil {
+		slog.Error(vsql, "execute error", err)
+		return nil, 0, err
+	}
+	return allResults, cnt.Count, nil
+}
+
+// GetPartitionLog TODO
+func (m *QueryLogInput) GetPartitionLog() ([]*PartitionLog, int64, error) {
+	allResults := make([]*PartitionLog, 0)
+	var tbName string
+	switch strings.ToLower(m.ClusterType) {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		return nil, 0, errors.New("不支持的db类型")
+	}
+	type Cnt struct {
+		Count int64 `gorm:"column:cnt"`
+	}
+	vsql := fmt.Sprintf("select logs.*,ticket.status as ticket_status from "+
+		"(select config.id as id, log.ticket_id as ticket_id, log.create_time as execute_time, "+
+		"log.check_info as check_info, log.status as status "+
+		"from %s as config join partition_cron_log as log "+
+		"where log.config_id=config.id and config.id=%d and log.create_time>"+
+		"DATE_SUB(now(),interval 100 day)) as logs left join "+
+		"`%s`.ticket_ticket as ticket on ticket.id=logs.ticket_id where "+
+		"ticket.create_at > DATE_SUB(now(),interval 100 day)) "+
+		"order by execute_time desc ", tbName, m.ConfigId, viper.GetString("dbm_db_name"))
+	err := model.DB.Self.Debug().Table(tbName).Raw(vsql).Scan(&allResults).Error
+	if err != nil {
+		return nil, 0, err
+	}
+	cnt := Cnt{}
+	countSQL := fmt.Sprintf("select count(*) as cnt from (%s) c", vsql)
+	err = model.DB.Self.Debug().Table(tbName).Raw(countSQL).Scan(&cnt).Error
+	if err != nil {
+		return nil, 0, err
+	}
+	return allResults, cnt.Count, nil
+}
+
+// DeletePartitionsConfig TODO
+func (m *DeletePartitionConfigByIds) DeletePartitionsConfig() error {
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if len(m.Ids) == 0 {
+		return errno.ConfigIdIsEmpty
+	}
+
+	var tbName string
+	switch strings.ToLower(m.ClusterType) {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		return errors.New("不支持的db类型")
+	}
+	var list []string
+	for _, item := range m.Ids {
+		list = append(list, strconv.FormatInt(item, 10))
+
+	}
+	sql := fmt.Sprintf("delete from `%s` where id in (%s) and bk_biz_id = %d", tbName, strings.Join(list, ","),
+		m.BkBizId)
+	result := model.DB.Self.Debug().Exec(sql)
+	if result.Error != nil {
+		return result.Error
+	}
+	if result.RowsAffected == 0 {
+		return errno.PartitionConfigNotExisted
+	}
+	return nil
+}
+
+// CreatePartitionsConfig TODO
+func (m *CreatePartitionsInput) CreatePartitionsConfig() error {
+	var tbName string
+	switch strings.ToLower(m.ClusterType) {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		return errors.New("错误的db类型")
+	}
+
+	if len(m.PartitionColumn) == 0 {
+		return errors.New("请输入分区字段!")
+	}
+
+	if len(m.DbLikes) == 0 || len(m.TbLikes) == 0 {
+		return errors.New("库表名不能为空!")
+	}
+
+	if m.PartitionTimeInterval < 1 {
+		return errors.New("分区间隔不能小于1")
+	}
+
+	if m.ExpireTime < m.PartitionTimeInterval {
+		return errors.New("过期时间必须不小于分区间隔")
+	}
+	if m.ExpireTime%m.PartitionTimeInterval != 0 {
+		return errors.New("过期时间必须是分区间隔的整数倍")
+	}
+
+	reservedPartition := m.ExpireTime / m.PartitionTimeInterval
+	partitionType := 0
+	switch m.PartitionColumnType {
+	case "datetime":
+		partitionType = 0
+	case "timestamp":
+		partitionType = 5
+	case "int":
+		partitionType = 101
+	default:
+		return errors.New("请选择分区字段类型:datetime、timestamp或int")
+	}
+	var errs []string
+	warnings1, err := m.compareWithSameArray()
+	if err != nil {
+		fmt.Println(err)
+		return err
+	}
+	fmt.Println(warnings1)
+	warnings2, err := m.compareWithExistDB(tbName)
+	if err != nil {
+		fmt.Println(err)
+		return err
+	}
+
+	warnings := append(warnings1, warnings2...)
+	if len(warnings) > 0 {
+		return errors.New(strings.Join(warnings, "\n"))
+	}
+
+	for _, dblike := range m.DbLikes {
+		for _, tblike := range m.TbLikes {
+			partitionConfig := PartitionConfig{
+				BkBizId:               m.BkBizId,
+				ImmuteDomain:          m.ImmuteDomain,
+				Port:                  m.Port,
+				BkCloudId:             m.BkCloudId,
+				ClusterId:             m.ClusterId,
+				DbLike:                dblike,
+				TbLike:                tblike,
+				PartitionColumn:       m.PartitionColumn,
+				PartitionColumnType:   m.PartitionColumnType,
+				ReservedPartition:     reservedPartition,
+				ExtraPartition:        extraTime,
+				PartitionTimeInterval: m.PartitionTimeInterval,
+				PartitionType:         partitionType,
+				ExpireTime:            m.ExpireTime,
+				Creator:               m.Creator,
+				Updator:               m.Updator,
+				Phase:                 online,
+				CreateTime:            time.Now(),
+				UpdateTime:            time.Now(),
+			}
+			result := model.DB.Self.Debug().Table(tbName).Create(&partitionConfig)
+			if result.Error != nil {
+				errs = append(errs, result.Error.Error())
+			}
+		}
+	}
+	if len(errs) > 0 {
+		return fmt.Errorf("errors: %s", strings.Join(errs, "\n"))
+	}
+	return nil
+}
+
+// UpdatePartitionsConfig TODO
+func (m *CreatePartitionsInput) UpdatePartitionsConfig() error {
+	var tbName string
+	switch strings.ToLower(m.ClusterType) {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		return errors.New("错误的db类型")
+	}
+
+	if len(m.PartitionColumn) == 0 {
+		return errors.New("请输入分区字段!")
+	}
+
+	if len(m.DbLikes) == 0 || len(m.TbLikes) == 0 {
+		return errors.New("库表名不能为空!")
+	}
+
+	if m.PartitionTimeInterval < 1 {
+		return errors.New("分区间隔不能小于1")
+	}
+
+	if m.ExpireTime < m.PartitionTimeInterval {
+		return errors.New("过期时间必须不小于分区间隔")
+	}
+	if m.ExpireTime%m.PartitionTimeInterval != 0 {
+		return errors.New("过期时间必须是分区间隔的整数倍")
+	}
+
+	reservedPartition := m.ExpireTime / m.PartitionTimeInterval
+	partitionType := 0
+	switch m.PartitionColumnType {
+	case "datetime":
+		partitionType = 0
+	case "timestamp":
+		partitionType = 5
+	case "int":
+		partitionType = 101
+	default:
+		return errors.New("请选择分区字段类型:datetime、timestamp或int")
+	}
+	var errs []string
+	for _, dblike := range m.DbLikes {
+		for _, tblike := range m.TbLikes {
+			update_condition := fmt.Sprintf("bk_biz_id=%d and immute_domain='%s' and dblike='%s' and tblike='%s'",
+				m.BkBizId, m.ImmuteDomain, dblike, tblike)
+			var update_column struct {
+				PartitionColumn       string
+				PartitionColumnType   string
+				ReservedPartition     int
+				ExtraPartition        int
+				PartitionTimeInterval int
+				PartitionType         int
+				ExpireTime            int
+				Creator               string
+				Updator               string
+			}
+			update_column.PartitionColumn = m.PartitionColumn
+			update_column.PartitionColumnType = m.PartitionColumnType
+			update_column.ReservedPartition = reservedPartition
+			update_column.ExtraPartition = extraTime
+			update_column.PartitionTimeInterval = m.PartitionTimeInterval
+			update_column.PartitionType = partitionType
+			update_column.ExpireTime = m.ExpireTime
+			update_column.Creator = m.Creator
+			update_column.Updator = m.Updator
+			result := model.DB.Self.Debug().Table(tbName).Where(update_condition).Updates(&update_column)
+			if result.Error != nil {
+				errs = append(errs, result.Error.Error())
+			}
+		}
+	}
+	if len(errs) > 0 {
+		return fmt.Errorf("errors: %s", strings.Join(errs, "\n"))
+	}
+	return nil
+}
+
+// DisablePartitionConfig TODO
+func (m *DisablePartitionInput) DisablePartitionConfig() error {
+	if len(m.Ids) == 0 {
+		return errno.ConfigIdIsEmpty
+	}
+	var tbName string
+	// 判断是mysql集群还是spider集群
+	switch strings.ToLower(m.ClusterType) {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		return errors.New("不支持的db类型")
+	}
+	var list []string
+	for _, item := range m.Ids {
+		list = append(list, strconv.FormatInt(item, 10))
+
+	}
+	db := model.DB.Self.Debug().Table(tbName)
+	result := db.
+		Where(fmt.Sprintf("id in (%s)", strings.Join(list, ","))).
+		Update("phase", offline)
+	if result.Error != nil {
+		return result.Error
+	}
+	return nil
+}
+
+// EnablePartitionConfig TODO
+func (m *EnablePartitionInput) EnablePartitionConfig() error {
+	if len(m.Ids) == 0 {
+		return errno.ConfigIdIsEmpty
+	}
+	var tbName string
+	// 判断是mysql集群还是spider集群
+	switch strings.ToLower(m.ClusterType) {
+	case Tendbha, Tendbsingle:
+		tbName = MysqlPartitionConfig
+	case Tendbcluster:
+		tbName = SpiderPartitionConfig
+	default:
+		return errors.New("不支持的db类型")
+	}
+	var list []string
+	for _, item := range m.Ids {
+		list = append(list, strconv.FormatInt(item, 10))
+
+	}
+	db := model.DB.Self.Debug().Table(tbName)
+	result := db.
+		Where(fmt.Sprintf("id in (%s)", strings.Join(list, ","))).
+		Update("phase", online)
+	if result.Error != nil {
+		return result.Error
+	}
+	return nil
+}
+
+func (m *CreatePartitionsInput) compareWithSameArray() (warnings []string, err error) {
+	l := len(m.DbLikes)
+	for i := 0; i < l; i++ {
+		dbi := m.DbLikes[i]
+		for j := i + 1; j < l; j++ {
+			dbj := m.DbLikes[j]
+			dbiReg, err := regexp.Compile(strings.Replace(dbi+"$", "%", ".*", -1))
+			if err != nil {
+				return warnings, err
+			}
+			dbjReg, err := regexp.Compile(strings.Replace(dbj+"$", "%", ".*", -1))
+			if err != nil {
+				return warnings, err
+			}
+			if dbiReg.MatchString(dbj) || dbjReg.MatchString(dbi) {
+				waring := fmt.Sprintf("本次提交中,规则%s与规则%s存在冲突,请修改后再次提交!", dbi, dbj)
+				warnings = append(warnings, waring)
+			}
+		}
+
+	}
+	return warnings, nil
+}
+
+func (m *CreatePartitionsInput) compareWithExistDB(tbName string) (warnings []string, err error) {
+	l := len(m.DbLikes)
+	for i := 0; i < l; i++ {
+		db := m.DbLikes[i]
+		existRules, err := m.checkExistRules(tbName)
+		fmt.Println(existRules)
+		if err != nil {
+			return warnings, err
+		}
+		for _, existRule := range existRules {
+			dbReg, err := regexp.Compile(strings.Replace(db+"$", "%", ".*", -1))
+			if err != nil {
+				return warnings, err
+			}
+			dbExistReg, err := regexp.Compile(strings.Replace(existRule.DbLike+"$", "%", ".*", -1))
+			if err != nil {
+				return warnings, err
+			}
+			if dbReg.MatchString(existRule.DbLike) || dbExistReg.MatchString(db) {
+				for _, tb := range m.TbLikes {
+					if tb == existRule.TbLike {
+						waring := fmt.Sprintf("本次提交中,规则%s.%s与已有规则%s.%s存在冲突,请修改后再次提交!", db, tb, existRule.DbLike, existRule.TbLike)
+						warnings = append(warnings, waring)
+					}
+				}
+			}
+		}
+	}
+	return warnings, nil
+}
+
+func (m *CreatePartitionsInput) checkExistRules(tbName string) (existRules []ExistRule, err error) {
+	condition := fmt.Sprintf("bk_biz_id=%d and immute_domain='%s' and bk_cloud_id=%d", m.BkBizId, m.ImmuteDomain,
+		m.BkCloudId)
+	err = model.DB.Self.Debug().Table(tbName).Select("dblike", "tblike").Where(condition).Find(&existRules).Error
+	if err != nil {
+		return existRules, err
+	}
+	return existRules, nil
+}
diff --git a/dbm-services/mysql/db-partition/service/manage_config_object.go b/dbm-services/mysql/db-partition/service/manage_config_object.go
new file mode 100644
index 0000000000..4ec72c2803
--- /dev/null
+++ b/dbm-services/mysql/db-partition/service/manage_config_object.go
@@ -0,0 +1,84 @@
+package service
+
+// MysqlPartitionConfig TODO
+const MysqlPartitionConfig = "mysql_partition_config"
+
+// SpiderPartitionConfig TODO
+const SpiderPartitionConfig = "spider_partition_config"
+
+// MysqlPartitionCronLogTable TODO
+const MysqlPartitionCronLogTable = "mysql_partition_cron_log"
+
+// SpiderPartitionCronLogTable TODO
+const SpiderPartitionCronLogTable = "spider_partition_cron_log"
+
+// MysqlPartition TODO
+const MysqlPartition = "MYSQL_PARTITION"
+
+// SpiderPartition TODO
+const SpiderPartition = "SPIDER_PARTITION"
+const online = "online"
+const offline = "offline"
+const extraTime = 15
+
+// ExistRule TODO
+type ExistRule struct {
+	DbLike string `gorm:"column:dblike"`
+	TbLike string `gorm:"column:tblike"`
+}
+
+// QueryParititionsInput TODO
+type QueryParititionsInput struct {
+	ClusterType   string   `json:"cluster_type"`
+	BkBizId       int64    `json:"bk_biz_id"`
+	ImmuteDomains []string `json:"immute_domains"`
+	DbLikes       []string `json:"dblikes"`
+	TbLikes       []string `json:"tblikes"`
+	Limit         int      `json:"limit"`
+	Offset        int      `json:"offset"`
+}
+
+// QueryLogInput TODO
+type QueryLogInput struct {
+	ClusterType string `json:"cluster_type"`
+	ConfigId    int64  `json:"config_id"`
+}
+
+// CreatePartitionsInput TODO
+type CreatePartitionsInput struct {
+	BkBizId               int      `json:"bk_biz_id"`
+	ClusterType           string   `json:"cluster_type"`
+	ImmuteDomain          string   `json:"immute_domain"`
+	Port                  int      `gorm:"column:port"`
+	BkCloudId             int      `gorm:"column:bk_cloud_id"`
+	ClusterId             int      `json:"cluster_id"`
+	DbLikes               []string `json:"dblikes"`
+	TbLikes               []string `json:"tblikes"`
+	PartitionColumn       string   `json:"partition_column"`
+	PartitionColumnType   string   `json:"partition_column_type"`
+	ExpireTime            int      `json:"expire_time"`             // 分区过期时间
+	PartitionTimeInterval int      `json:"partition_time_interval"` // 分区间隔
+	Creator               string   `json:"creator"`
+	Updator               string   `json:"updator"`
+}
+
+// DeletePartitionConfigByIds TODO
+type DeletePartitionConfigByIds struct {
+	ClusterType string  `json:"cluster_type"`
+	BkBizId     int64   `json:"bk_biz_id"`
+	Ids         []int64 `json:"ids"`
+}
+
+// DisablePartitionInput TODO
+type DisablePartitionInput struct {
+	ClusterType string  `json:"cluster_type"`
+	Operator    string  `json:"operator"`
+	Ids         []int64 `json:"ids"`
+}
+
+// EnablePartitionInput TODO
+type EnablePartitionInput struct {
+	ClusterType string  `json:"cluster_type"`
+	Operator    string  `json:"operator"`
+	Ids         []int64 `json:"ids"`
+}
diff --git a/dbm-services/mysql/db-partition/util/client.go b/dbm-services/mysql/db-partition/util/client.go
new file mode 100644
index 0000000000..e4aa55289c
--- /dev/null
+++ b/dbm-services/mysql/db-partition/util/client.go
@@ -0,0 +1,226 @@
+package util
+
+import (
+	"bytes"
+	"crypto/tls"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"net/http"
+	"net/http/httputil"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/google/go-querystring/query"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+const (
+	// apiserver response code
+	statusSuccess int = 0
+)
+
+// APIServerResponse TODO
+type APIServerResponse struct {
+	Code    int             `json:"code"`
+	Message string          `json:"message"` // 调整
+	Data    json.RawMessage `json:"data"`    // json.RawMessage 等同[]byte
+}
+
+// APIServerResponseCompatible TODO
+type APIServerResponseCompatible struct {
+	Code    string          `json:"code"`
+	Message string          `json:"message"` // 调整
+	Data    json.RawMessage `json:"data"`    // json.RawMessage 等同[]byte
+}
+
+// APIServerResponseCompatible2 TODO
+type APIServerResponseCompatible2 struct {
+	Code    int             `json:"code"`
+	Message string          `json:"msg"`
+	Data    json.RawMessage `json:"data"` // json.RawMessage 等同[]byte
+}
+
+// Client TODO
+type Client struct {
+	apiserver string
+
+	// JWT token
+	token string
+
+	// client for apiservers
+	client *http.Client
+}
+
+// NewClientByHosts TODO
+func NewClientByHosts(host string) *Client {
+	http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+	cli := &Client{}
+	cli.apiserver = host
+	cli.client = &http.Client{
+		Transport: &http.Transport{},
+	}
+	return cli
+}
+
+// DoNew TODO
+// others: other parameters maybe used
+//
+//	other->{"user"}  : for gateway
+//
+// 支持根据返回内容包含特征串自动重试
+func (c *Client) DoNew(method, url string, params interface{}, headers map[string]string) (*APIServerResponse, error) {
+	var response *APIServerResponse
+	var err error
+	for retryIdx := 0; retryIdx < 5; retryIdx++ {
+		response, err = c.doNewInner(method, url, params, headers)
+		if err == nil {
+			break
+		}
+		if strings.Contains(err.Error(), "cse.flowcontrol.Consumer.qps.limit") {
+			slog.Error(fmt.Sprintf("DoNew failed, retryIdx:%d", retryIdx), err)
+			wait := retryIdx*retryIdx*1000 + rand.Intn(1000)
+			time.Sleep(time.Duration(wait) * time.Millisecond)
+			continue
+		}
+		break
+	}
+	return response, err
+}
+
+func (c *Client) doNewInner(method, url string, params interface{}, headers map[string]string) (*APIServerResponse,
+	error) {
+	host := c.apiserver
+	body, err := json.Marshal(params)
+	if err != nil {
+		slog.Error("marshal get an error", err)
+		return nil, fmt.Errorf("json marshal param failed, err: %+v", err)
+	}
+
+	if method == "GET" && !strings.Contains(url, "cc3") {
+		body = nil
+		// 有些 GET 参数拼接在 URL 中,比如/thirdpartyapi/cc3/query-from-shell;有些 GET 参数在结构体中
+		vals, err := query.Values(params)
+		if err != nil {
+			return nil, fmt.Errorf("get querystring param failed, err: %+v", err)
+		}
+		url = url + "?" + vals.Encode()
+	}
+	req, err := http.NewRequest(method, host+url, bytes.NewBuffer(body))
+
+	if err != nil {
+		slog.Error(fmt.Sprintf("create a new request(%s,%s,%+v) get an error", method, host+url, params), err)
+		return nil, fmt.Errorf("new request failed, err: %+v", err)
+	}
+	req.Header.Set("Content-Type", "application/json")
+	bkAuth := fmt.Sprintf(`{"bk_app_code": %s, "bk_app_secret": %s}`, viper.GetString("bk_app_code"),
+		viper.GetString("bk_app_secret"))
+	req.Header.Set("x-bkapi-authorization", bkAuth)
+
+	cookieAppCode := http.Cookie{Name: "bk_app_code", Path: "/", Value: viper.GetString("bk_app_code"), MaxAge: 86400}
+	cookieAppSecret := http.Cookie{Name: "bk_app_secret", Path: "/", Value: viper.GetString("bk_app_secret"),
+		MaxAge: 86400}
+	req.AddCookie(&cookieAppCode)
+	req.AddCookie(&cookieAppSecret)
+
+	resp, err := c.client.Do(req)
+	// slog.Info(fmt.Sprintf("req:%v", req))
+	if err != nil {
+		slog.Error(fmt.Sprintf("invoking http request failed, url: %s", req.URL.String()), err)
+		return nil, fmt.Errorf("do http request failed, err: %+v", err)
+	}
+	defer func() {
+		if resp == nil {
+			return
+		}
+		if err := resp.Body.Close(); err != nil {
+			slog.Warn("close response body failed", "err", err.Error())
+		}
+	}()
+
+	// 目前出现偶现网关超时问题,重试一次看是否时间段内必现
+	for i := 1; i <= 5; i++ {
+		// 500 可能正在发布
+		// 429 可能大并发量偶现超频
+		// 504 具体原因未知,先重试
+		if !HasElem(resp.StatusCode, []int{http.StatusInternalServerError, http.StatusTooManyRequests,
+			http.StatusGatewayTimeout}) {
+			break
+		}
+
+		wait := i*i*1000 + rand.Intn(1000)
+		time.Sleep(time.Duration(wait) * time.Millisecond)
+		slog.Warn(fmt.Sprintf("client.Do result with %s, wait %d milliSeconds and retry, url: %s", resp.Status, wait,
+			req.URL.String()))
+		resp, err = c.client.Do(req)
+		if err != nil {
+			slog.Error(fmt.Sprintf("an error occur while invoking client.Do, url: %s", req.URL.String()), err)
+			return nil, fmt.Errorf("do http request failed, err: %+v", err)
+		}
+	}
+
+	if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
+		bodyBytes, err := httputil.DumpResponse(resp, true)
+		if err != nil {
+			slog.Error("read resp.body failed, err: %+v", err)
+			fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+			return nil, fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+		}
+		if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
+			fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+			return nil, fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+		}
+		slog.Info(fmt.Sprintf("http response: \n\n%s\n", string(bodyBytes)))
+	}
+
+	b, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		err = fmt.Errorf("read resp.body error:%s", err.Error())
+		slog.Error("msg", err)
+		return nil, err
+	}
+	result := &APIServerResponse{}
+	if strings.Contains(url, "v2/push") {
+		temp := &APIServerResponseCompatible{}
+		err = json.Unmarshal(b, temp)
+		if err != nil {
+			slog.Error(fmt.Sprintf("unmarshall %s to %+v get an error", string(b), *result), err)
+			return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+		}
+		code, _ := strconv.Atoi(temp.Code)
+		result = &APIServerResponse{code, temp.Message, temp.Data}
+	} else if strings.Contains(url, "priv_manager") {
+		temp := &APIServerResponseCompatible2{}
+		err = json.Unmarshal(b, temp)
+		if err != nil {
+			slog.Error(fmt.Sprintf("unmarshall %s to %+v get an error", string(b), *result), err)
+			return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+		}
+		result = &APIServerResponse{temp.Code, temp.Message, temp.Data}
+	} else {
+		err = json.Unmarshal(b, result)
+		if err != nil {
+			slog.Error(fmt.Sprintf("unmarshall %s to %+v get an error", string(b), *result), err)
+			return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+		}
+	}
+
+	// check response and data is nil
+	if result.Code != statusSuccess && result.Code != http.StatusOK {
+		slog.Warn(fmt.Sprintf("result.Code is %d not equal to %d,message:%s,data:%s,param:%+v", result.Code, statusSuccess,
+			result.Message, string(result.Data), params))
+		if len(result.Data) != 0 {
+			return nil, fmt.Errorf("[%v - %v - %s]", result.Code, result.Message, string(result.Data))
+		}
+		return nil, fmt.Errorf("%v - %v", result.Code, result.Message)
+	}
+	return result, nil
+}
+
+// Do TODO
+func (c *Client) Do(method, url string, params interface{}) (*APIServerResponse, error) {
+	return c.DoNew(method, url, params, map[string]string{})
+}
diff --git a/dbm-services/mysql/db-partition/util/time.go b/dbm-services/mysql/db-partition/util/time.go
new file mode 100644
index 0000000000..eb9ac222cf
--- /dev/null
+++ b/dbm-services/mysql/db-partition/util/time.go
@@ -0,0 +1,96 @@
+package util
+
+import (
+	"database/sql/driver"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"time"
+
+	"golang.org/x/exp/slog"
+)
+
+// TimeFormat TODO
+type TimeFormat string
+
+// Value TODO
+/*
+在 gorm save 或者 update 的时候调用该方法,这个就是 string -> time 的地方。
+*/
+func (t TimeFormat) Value() (driver.Value, error) {
+	if t.IsNull() {
+		return nil, nil
+	}
+	localTimezone, err := time.LoadLocation("Local") // 服务器设置的时区
+	if err != nil {
+		slog.Error("time.LoadLocation", err)
+		localTimezone, _ = time.LoadLocation("Asia/Shanghai") // 失败的话,默认就是上海的时区
+	}
+	ti, err := time.ParseInLocation("2006-01-02 15:04:05", string(t), localTimezone)
+	if err != nil {
+		slog.Error("TimeFormat Value", err)
+		return time.Now(), nil
+	}
+	return ti.In(localTimezone), nil
+}
+
+/*
+ 1. 在调用 gorm 的 find 查询类操作的时候,会调用数据类型的 Scan 方法。
+  2. 但是如果 value 是 nil ,就不会调用。
+    2.1 这个时候,如果在后面进行 marshall 的时候,需要 return []byte("\"\""),
+		否则会出现 "json: error calling MarshalJSON for type model.TimeFormat: unexpected end of JSON input" 错误。
+*/
+
+// Scan TODO
+/* Scan
+ 1. 在调用 gorm 的 find 查询类操作的时候,会调用数据类型的 Scan 方法。
+ 2. 但是如果 value 是 nil ,就不会调用。
+    2.1 这个时候,如果在后面进行 marshall 的时候,需要 return []byte("\"\""),
+		否则会出现 "json: error calling MarshalJSON for type model.TimeFormat: unexpected end of JSON input" 错误。
+*/
+func (t *TimeFormat) Scan(value interface{}) error {
+	localTimezone, err := time.LoadLocation("Local") // 服务器设置的时区
+	if err != nil {
+		slog.Error("time.LoadLocation error", err)
+		localTimezone, _ = time.LoadLocation("Asia/Shanghai") // 失败的话,默认就是上海的时区
+	}
+	if value == nil {
+		*t = "\"2006-01-02 00:00:00\""
+		return nil
+	}
+	s, ok := value.(time.Time)
+	if !ok {
+		return errors.New("Invalid Scan Source")
+	}
+	// 记得哪里需要加上反引号。。
+	// *t = TimeFormat(s.In(localTimezone).Format("2006-01-02 15:04:05"))
+	*t = TimeFormat(s.In(localTimezone).Format("2006-01-02 15:04:05"))
+	return nil
+}
+
+// MarshalJSON 在 handler.go 执行 SendResponse 的 c.WriteHeaderAndJSON 时候,会调用该方法。
+func (t TimeFormat) MarshalJSON() ([]byte, error) {
+	if t == "" {
+		return []byte("\"\""), nil
+	}
+	return []byte(fmt.Sprintf("\"%s\"", string(t))), nil
+	// return []byte(t), nil
+}
+
+// UnmarshalJSON TODO
+func (t *TimeFormat) UnmarshalJSON(data []byte) error {
+	var str string
+	err := json.Unmarshal(data, &str)
+	*t = TimeFormat(str)
+	return err
+}
+
+// IsNull TODO
+func (t TimeFormat) IsNull() bool {
+	return len(t) == 0 || t == ""
+}
+
+// NowTimeFormat TODO
+func NowTimeFormat() TimeFormat {
+	return TimeFormat(time.Now().Format("2006-01-02 15:04:05"))
+}
diff --git a/dbm-services/mysql/db-partition/util/util.go b/dbm-services/mysql/db-partition/util/util.go
new file mode 100644
index 0000000000..30304dcb0e
--- /dev/null
+++ b/dbm-services/mysql/db-partition/util/util.go
@@ -0,0 +1,84 @@
+// Package util TODO
+package util
+
+import (
+	"bytes"
+	"fmt"
+	"os/exec"
+	"reflect"
+	"regexp"
+	"strings"
+
+	"github.com/pkg/errors"
+	"golang.org/x/exp/slog"
+)
+
+// HasElem 元素是否在数组中存在
+func HasElem(elem interface{}, slice interface{}) bool {
+	defer func() {
+		if err := recover(); err != nil {
+			slog.Error("HasElem error", err)
+		}
+	}()
+	arrV := reflect.ValueOf(slice)
+	if arrV.Kind() == reflect.Slice || arrV.Kind() == reflect.Array {
+		for i := 0; i < arrV.Len(); i++ {
+			// XXX - panics if slice element points to an unexported struct field
+			// see https://golang.org/pkg/reflect/#Value.Interface
+			if arrV.Index(i).Interface() == elem {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// SplitName TODO
+// //切分用户传过来的IP字符串列表等
+// //切分规则:
+// //把\r+|\s+|;+|\n+|,+这些分隔符,转成字符串数组
+// //返回字符串数组
+func SplitName(input string) ([]string, error) {
+	result := []string{}
+	if reg, err := regexp.Compile(`\r+|\s+|;+|\n+`); err != nil {
+		return result, err
+	} else {
+		// 若返回正确的正则表达式,则将分隔符换为 ,
+		input = reg.ReplaceAllString(input, ",")
+	}
+	if reg, err := regexp.Compile(`^,+|,+$`); err != nil {
+		return result, err
+	} else {
+		input = reg.ReplaceAllString(input, "")
+	}
+	if reg, err := regexp.Compile(`,+`); err != nil {
+		return result, err
+	} else {
+		input = reg.ReplaceAllString(input, ",")
+	}
+	result = strings.Split(input, ",")
+	return result, nil
+}
+
+// ExecShellCommand 执行 shell 命令
+// 如果有 err, 返回 stderr; 如果没有 err 返回的是 stdout
+func ExecShellCommand(isSudo bool, param string) (stdoutStr string, err error) {
+	if isSudo {
+		param = "sudo " + param
+	}
+	cmd := exec.Command("bash", "-c", param)
+	var stdout, stderr bytes.Buffer
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+	err = cmd.Run()
+	if err != nil {
+		return stderr.String(), errors.WithMessage(err, stderr.String())
+	}
+
+	if len(stderr.String()) > 0 {
+		err = fmt.Errorf("execute shell command(%s) error:%s", param, stderr.String())
+		return stderr.String(), err
+	}
+
+	return stdout.String(), nil
+}
diff --git a/dbm-services/mysql/db-priv/.ci/codecc.yml b/dbm-services/mysql/db-priv/.ci/codecc.yml
new file mode 100644
index 0000000000..c824dddd90
--- /dev/null
+++ b/dbm-services/mysql/db-priv/.ci/codecc.yml
@@ -0,0 +1,29 @@
+version: v2.0
+resources:
+  repositories:
+    - repository: ci_templates/public/codecc
+      name: codecc
+on:
+  mr:
+    target-branches:  [ "*" ]
+stages:
+  - name: "代码检查"
+    check-out:
+      gates:
+        - template: commonGate.yml@codecc
+      timeout-hours: 10
+    jobs:
+      codecc:
+        name: "CodeCC代码检查"
+        runs-on:
+          pool-name: docker  #docker-on-devcloud、docker、local、agentless
+          container:
+            image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0
+        steps:
+          - checkout: self
+          - uses: CodeccCheckAtomDebug@4.*
+            name: 腾讯代码分析
+            with:
+              beAutoLang: true # 自动检测项目语言
+              checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置
+              toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1
diff --git a/dbm-services/mysql/db-priv/.ci/open_source_check.yml b/dbm-services/mysql/db-priv/.ci/open_source_check.yml
new file mode 100644
index 0000000000..f421f315f3
--- /dev/null
+++ b/dbm-services/mysql/db-priv/.ci/open_source_check.yml
@@ -0,0 +1,84 @@
+version: "v2.0"
+name: "开源检查"
+label: []
+variables: {}
+stages:
+- name: "开源检查"
+  label:
+  - "Build"
+  jobs:
+    job_AfK:
+      name: "构建环境-LINUX"
+      runs-on:
+        pool-name: "docker"
+        container:
+          image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0"
+        needs: {}
+      steps:
+      - checkout: self
+      - name: "敏感信息检查-部门RTX"
+        uses: "SensitiveRtxChecker@3.*"
+      - name: "腾讯代码分析(官方-代码分析工作组)"
+        uses: "CodeccCheckAtomDebug@4.*"
+        with:
+          beAutoLang: true
+          languages:
+          - "GOLANG"
+          checkerSetType: "communityOpenScan"
+          tools:
+          - "WOODPECKER_COMMITSCAN"
+          - "SCC"
+          - "PECKER_SECURITY"
+          - "SENSITIVE"
+          - "DUPC"
+          - "IP_CHECK"
+          - "WOODPECKER_SENSITIVE"
+          - "HORUSPY"
+          - "XCHECK"
+          - "CCN"
+          asyncTask: false
+          asyncTaskId: ""
+          scriptType: "SHELL"
+          script: |-
+            # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷
+            # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh
+            # 确保build.sh能够编译代码
+            # cd path/to/build.sh
+            # sh build.sh
+          languageRuleSetMap: {}
+          checkerSetEnvType: "prod"
+          multiPipelineMark: ""
+          rtxReceiverType: "1"
+          botWebhookUrl: ""
+          botRemindRange: "2"
+          botRemindSeverity: "7"
+          botRemaindTools: []
+          emailReceiverType: "1"
+          emailCCReceiverList: []
+          instantReportStatus: "2"
+          reportDate: []
+          reportTime: ""
+          reportTools: []
+          toolScanType: "1"
+          diffBranch: ""
+          byFile: false
+          mrCommentEnable: true
+          prohibitIgnore: false
+          newDefectJudgeFromDate: ""
+          transferAuthorList: []
+          path: []
+          customPath: []
+          scanTestSource: false
+          openScanPrj: false
+          openScanFilterEnable: false
+          issueSystem: "TAPD"
+          issueSubSystem: ""
+          issueResolvers: []
+          issueReceivers: []
+          issueFindByVersion: ""
+          maxIssue: 1000
+          issueAutoCommit: false
+  check-out:
+    gates:
+      - template: open_source_gate.yml
+    timeout-hours: 10
\ No newline at end of file
diff --git a/dbm-services/mysql/db-priv/.ci/templates/open_source_gate.yml b/dbm-services/mysql/db-priv/.ci/templates/open_source_gate.yml
new file mode 100644
index 0000000000..d14127e08c
--- /dev/null
+++ b/dbm-services/mysql/db-priv/.ci/templates/open_source_gate.yml
@@ -0,0 +1,26 @@
+parameters:
+  - name: receivers
+    type: array
+    default: [ "${{ ci.actor }}" ]
+
+gates:
+  - name: open-source-gate
+    rule:
+      - "CodeccCheckAtomDebug.all_risk <= 0"
+      - "CodeccCheckAtomDebug.high_med_new_issue <= 0"
+      - "CodeccCheckAtomDebug.ccn_new_max_value <= 40"
+      - "CodeccCheckAtomDebug.sensitive_defect <= 0"
+      - "CodeccCheckAtomDebug.dupc_average <= 15"
+      - "CodeccCheckAtomDebug.ccn_average <= 3"
+      - "CodeccCheckAtomDebug.ccn_new_defect <= 0"
+      - "CodeccCheckAtomDebug.ccn_funcmax <= 20"
+      - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0"
+      - "CodeccCheckAtomDebug.horuspy_all_defect <= 0"
+      - "CodeccCheckAtomDebug.go_serious_defect <= 0"
+      - "CodeccCheckAtomDebug.go_all_defect <= 100"
+    notify-on-fail:
+      - type: wework-message
+        receivers: ${{ parameters.receivers }}
+    continue-on-fail:
+      gatekeepers:
+        - "${{ ci.actor }}"
\ No newline at end of file
diff --git a/dbm-services/mysql/db-priv/.gitignore b/dbm-services/mysql/db-priv/.gitignore
new file mode 100644
index 0000000000..ecc836345a
--- /dev/null
+++ b/dbm-services/mysql/db-priv/.gitignore
@@ -0,0 +1,16 @@
+vendor/
+log/
+conf/
+nohup.out
+privkey.txt
+privService
+privservice
+pubkey.txt
+Brewfile
+.idea/
+.vscode/
+pubkey.pem
+privkey.pem
+infile
+outfile
+.code.yml
\ No newline at end of file
diff --git a/dbm-services/mysql/db-priv/.golangci.yml b/dbm-services/mysql/db-priv/.golangci.yml
new file mode 100644
index 0000000000..b165022e4c
--- /dev/null
+++ b/dbm-services/mysql/db-priv/.golangci.yml
@@ -0,0 +1,121 @@
+# 完整版本在 https://golangci-lint.run/usage/configuration/
+linters-settings:
+  funlen:
+    lines: 80
+    statements: 80
+  govet:
+    check-shadowing: true
+  lll:
+    line-length: 120
+  errcheck:
+    check-type-assertions: true
+  goconst:
+    min-len: 2
+    min-occurrences: 2
+  gocyclo:
+    min-complexity: 20
+  goimports:
+  revive:
+    confidence: 0
+    rules:
+      - name: var-declaration
+      - name: package-comments
+      - name: dot-imports
+      - name: blank-imports
+      - name: exported
+      - name: var-naming
+      - name: indent-error-flow
+      - name: range
+      - name: errorf
+      - name: error-naming
+      - name: error-strings
+      - name: receiver-naming
+      - name: increment-decrement
+      - name: error-return
+      #- name: unexported-return
+      - name: time-naming
+      - name: context-keys-type
+      - name: context-as-argument
+      - name: argument-limit
+        severity: warning
+        disabled: false
+        arguments: [ 5 ]
+  gocritic:
+    enabled-checks:
+      - nestingReduce
+      - commentFormatting
+    settings:
+      nestingReduce:
+        bodyWidth: 5
+
+linters:
+  disable-all: true
+  enable:
+    - deadcode
+    - funlen
+    - goconst
+    - gocyclo
+    - gofmt
+    - ineffassign
+    - staticcheck
+    - structcheck # 当非导出结构嵌入另一个结构, 前一个结构被使用就不会监测到, 这个需要每个业务自己屏蔽
+    - typecheck
+    - goimports
+    - revive
+    - gosimple
+    - govet
+    - lll
+    - rowserrcheck
+    - errcheck
+    - unused
+    - varcheck
+    - sqlclosecheck
+    - gocritic
+    # - bodyclose https://github.com/timakin/bodyclose/issues 问题太多了,屏蔽都屏蔽不过来,显式不使用它
+
+run:
+  # default concurrency is a available CPU number
+  concurrency: 4
+  # timeout for analysis, e.g. 30s, 5m, default is 1m
+  timeout: 2m
+  # exit code when at least one issue was found, default is 1
+  issues-exit-code: 1
+  # include test files or not, default is true
+  tests: false
+  # default is true. Enables skipping of directories:
+  #   vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
+  skip-dirs-use-default: true
+  skip-files:
+    - ".*/mock/.*.go"
+    - ".*testing.go"
+    - "docs/*.go"
+
+issues:
+  exclude-use-default: true
+  # The list of ids of default excludes to include or disable. By default it's empty.
+  # 下面的规则,golangci-lint认为应该屏蔽,但是我们选择不屏蔽。所以,`exclude-use-default: true`屏蔽一部分,把下面的再捞出来。
+  # golanglint-ci维护的忽略列表里有一些是我们不想屏蔽的,捞出来。这里说一下,使用白名单是好于黑名单的。名单随着golanglint-ci引入更多工具,我们跟进享受好处。我们搞黑名单,就变成自己维护,不如golanglint-ci去维护,更好。
+  include:
+    - EXC0004 # govet (possible misuse of unsafe.Pointer|should have signature)
+    - EXC0005 # staticcheck ineffective break statement. Did you mean to break out of the outer loop
+    - EXC0012 # revive exported (method|function|type|const) (.+) should have comment or be unexported
+    - EXC0013 # revive package comment should be of the form "(.+)...
+    - EXC0014 # revive comment on exported (.+) should be of the form "(.+)..."
+    - EXC0015 # revive should have a package comment, unless it's in another file for this package
+  exclude-rules:
+    - path: _test\.go
+      linters:
+        - funlen # 规范说单测函数,单个函数可以到160行,但是工具不好做区分处理,这里就直接不检查单测的函数长度
+    - linters:
+        - staticcheck
+      text: "SA6002: argument should be pointer-like to avoid allocations" # sync.pool.Put(buf), slice `var buf []byte` will tiger this
+    - linters:
+        - lll
+      source: "^//go:generate " # Exclude lll issues for long lines with go:generate
+  max-same-issues: 0
+  new: false
+  max-issues-per-linter: 0
+output:
+  sort-results: true
+service:
+  golangci-lint-version: 1.28.x
diff --git a/dbm-services/mysql/db-priv/Dockerfile b/dbm-services/mysql/db-priv/Dockerfile
new file mode 100644
index 0000000000..fc4fe81e23
--- /dev/null
+++ b/dbm-services/mysql/db-priv/Dockerfile
@@ -0,0 +1,12 @@
+FROM mirrors.tencent.com/sccmsp/golang:1.16
+MAINTAINER tencent
+RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
+RUN echo "Asia/Shanghai" > /etc/timezone
+
+ADD privservice /
+RUN mkdir /conf
+
+WORKDIR /
+CMD /privservice --migrate
+
+
diff --git a/dbm-services/mysql/db-priv/Makefile b/dbm-services/mysql/db-priv/Makefile
new file mode 100644
index 0000000000..2638d81142
--- /dev/null
+++ b/dbm-services/mysql/db-priv/Makefile
@@ -0,0 +1,44 @@
+SHELL := /bin/bash
+BASEDIR = $(shell pwd)
+SRV_NAME = privservice
+COMMAND_NAME = privservice
+VER = latest
+CURRENT_VERSION = release-$(VER)
+TEST_VERSION = test-$(VER)
+NAMESPACE = sccmsp
+DH_USER = Ex_vincixu
+DH_PASS = aA456123
+DH_URL = mirrors.tencent.com
+export GOOS = linux
+#export GOOS = darwin
+BUILD_PATH = .
+
+all: build
+api:
+	CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -o $(COMMAND_NAME)  -v ${BUILD_PATH}
+build:clean
+	CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -o $(COMMAND_NAME) -v ${BUILD_PATH}
+
+publish:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(CURRENT_VERSION) .
+	docker tag $(SRV_NAME):$(CURRENT_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+
+test:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(TEST_VERSION) .
+	docker tag $(SRV_NAME):$(TEST_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+	# docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+
+test_mac:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(TEST_VERSION) .
+	docker tag $(SRV_NAME):$(TEST_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+
+curl:
+	@echo curl -XGET -H "Content-Type: application/json" http://127.0.0.1:8080/user
+help:
+	@echo "make - compile go source"
+	@echo "make gotool - run gofmt & go too vet"
+	@echo "make clean - do some clean job"
+
+.PHONY: all gotool clean help api curl
diff --git a/dbm-services/mysql/db-priv/README.md b/dbm-services/mysql/db-priv/README.md
new file mode 100644
index 0000000000..9930197630
--- /dev/null
+++ b/dbm-services/mysql/db-priv/README.md
@@ -0,0 +1 @@
+蓝鲸平台mysql权限后台程序
diff --git a/dbm-services/mysql/db-priv/admin.sh b/dbm-services/mysql/db-priv/admin.sh
new file mode 100644
index 0000000000..baf4982d05
--- /dev/null
+++ b/dbm-services/mysql/db-priv/admin.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+export PATH="/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:/root/bin:"
+
+SERVER="privService"
+BASE_DIR=$PWD
+INTERVAL=2
+
+# 命令行参数,需要手动指定
+ARGS=""
+
+function start()
+{
+        if [ "`pgrep $SERVER -u $UID`" != "" ];then
+                echo "$SERVER already running"
+                exit 1
+        fi
+
+        #nohup $BASE_DIR/$SERVER $ARGS &>/dev/null &
+        nohup $BASE_DIR/$SERVER $ARGS  &
+
+        echo "sleeping..." &&  sleep $INTERVAL
+
+        # check status
+        if [ "`pgrep $SERVER -u $UID`" == "" ];then
+                echo "$SERVER start failed"
+                exit 1
+        fi
+}
+
+function status() 
+{
+        if [ "`pgrep $SERVER -u $UID`" != "" ];then
+                echo $SERVER is running
+        else
+                echo $SERVER is not running
+        fi
+}
+
+function stop() 
+{
+        if [ "`pgrep $SERVER -u $UID`" != "" ];then
+                kill -9 `pgrep $SERVER -u $UID`
+        fi
+
+        echo "sleeping..." &&  sleep $INTERVAL
+
+        if [ "`pgrep $SERVER -u $UID`" != "" ];then
+                echo "$SERVER stop failed"
+                exit 1
+        fi
+}
+
+case "$1" in
+        'start')
+        start
+        ;;  
+        'stop')
+        stop
+        ;;  
+        'status')
+        status
+        ;;  
+        'restart')
+        stop && start
+        ;;  
+        *)  
+        echo "usage: $0 {start|stop|restart|status}"
+        exit 1
+        ;;  
+esac
diff --git a/dbm-services/mysql/db-priv/assests/assests.go b/dbm-services/mysql/db-priv/assests/assests.go
new file mode 100644
index 0000000000..1c4d3bda77
--- /dev/null
+++ b/dbm-services/mysql/db-priv/assests/assests.go
@@ -0,0 +1,2 @@
+// Package assests TODO
+package assests
diff --git a/dbm-services/mysql/db-priv/assests/migrate.go b/dbm-services/mysql/db-priv/assests/migrate.go
new file mode 100644
index 0000000000..1230d4e40b
--- /dev/null
+++ b/dbm-services/mysql/db-priv/assests/migrate.go
@@ -0,0 +1,50 @@
+package assests
+
+import (
+	"embed"
+	"fmt"
+
+	"github.com/golang-migrate/migrate/v4"
+	_ "github.com/golang-migrate/migrate/v4/database/mysql" // mysql TODO
+	"github.com/golang-migrate/migrate/v4/source/iofs"
+	"github.com/pkg/errors"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+//go:embed migrations/*.sql
+var fs embed.FS
+
+// DoMigrateFromEmbed 先尝试从 go embed 文件系统查找 migrations
+// no changes: return nil
+func DoMigrateFromEmbed() error {
+	var mig *migrate.Migrate
+	if d, err := iofs.New(fs, "migrations"); err != nil {
+		return err
+	} else {
+		dbURL := fmt.Sprintf(
+			"mysql://%s:%s@tcp(%s)/%s?charset=%s&parseTime=true&loc=Local&multiStatements=true&interpolateParams=true",
+			viper.GetString("db.username"),
+			viper.GetString("db.password"),
+			viper.GetString("db.addr"),
+			viper.GetString("db.name"),
+			"utf8",
+		)
+		mig, err = migrate.NewWithSourceInstance("iofs", d, dbURL)
+		if err != nil {
+			return errors.WithMessage(err, "migrate from embed")
+		}
+		defer mig.Close()
+		err = mig.Up()
+		if err == nil {
+			slog.Info("migrate source from embed success")
+			return nil
+		} else if err == migrate.ErrNoChange {
+			slog.Info("migrate source from embed success with", "msg", err.Error())
+			return nil
+		} else {
+			slog.Error("migrate source from embed failed", err)
+			return err
+		}
+	}
+}
diff --git a/dbm-services/mysql/db-priv/assests/migrations/000001_init.down.sql.sql b/dbm-services/mysql/db-priv/assests/migrations/000001_init.down.sql.sql
new file mode 100644
index 0000000000..a4f40086f9
--- /dev/null
+++ b/dbm-services/mysql/db-priv/assests/migrations/000001_init.down.sql.sql
@@ -0,0 +1 @@
+SET NAMES utf8;
diff --git a/dbm-services/mysql/db-priv/assests/migrations/000001_init.up.sql b/dbm-services/mysql/db-priv/assests/migrations/000001_init.up.sql
new file mode 100644
index 0000000000..bcaf95e5d0
--- /dev/null
+++ b/dbm-services/mysql/db-priv/assests/migrations/000001_init.up.sql
@@ -0,0 +1 @@
+SET NAMES utf8;
\ No newline at end of file
diff --git a/dbm-services/mysql/db-priv/assests/migrations/000002_init.down.sql b/dbm-services/mysql/db-priv/assests/migrations/000002_init.down.sql
new file mode 100644
index 0000000000..2c54ef2119
--- /dev/null
+++ b/dbm-services/mysql/db-priv/assests/migrations/000002_init.down.sql
@@ -0,0 +1,4 @@
+DROP TABLE IF EXISTS schema_migrations;
+DROP TABLE IF EXISTS priv_logs;
+DROP TABLE IF EXISTS tb_account_rules;
+DROP TABLE IF EXISTS tb_accounts;
diff --git a/dbm-services/mysql/db-priv/assests/migrations/000002_init.up.sql b/dbm-services/mysql/db-priv/assests/migrations/000002_init.up.sql
new file mode 100644
index 0000000000..b9ea5c1661
--- /dev/null
+++ b/dbm-services/mysql/db-priv/assests/migrations/000002_init.up.sql
@@ -0,0 +1,123 @@
+-- MySQL dump 10.13  Distrib 5.7.20, for Linux (x86_64)
+--
+-- Host: localhost    Database: bk_dbpriv
+-- ------------------------------------------------------
+-- Server version	5.7.20-tmysql-3.3.2-log
+/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+/*!40103 SET TIME_ZONE='+00:00' */;
+/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+/*!50717 SELECT COUNT(*) INTO @rocksdb_has_p_s_session_variables FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'performance_schema' AND TABLE_NAME = 'session_variables' */;
+/*!50717 SET @rocksdb_get_is_supported = IF (@rocksdb_has_p_s_session_variables, 'SELECT COUNT(*) INTO @rocksdb_is_supported FROM performance_schema.session_variables WHERE VARIABLE_NAME=\'rocksdb_bulk_load\'', 'SELECT 0') */;
+/*!50717 PREPARE s FROM @rocksdb_get_is_supported */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+/*!50717 SET @rocksdb_enable_bulk_load = IF (@rocksdb_is_supported, 'SET SESSION rocksdb_bulk_load = 1', 'SET @rocksdb_dummy_bulk_load = 0') */;
+/*!50717 PREPARE s FROM @rocksdb_enable_bulk_load */;
+/*!50717 EXECUTE s */;
+/*!50717 DEALLOCATE PREPARE s */;
+
+--
+-- Table structure for table `schema_migrations`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `schema_migrations` (
+    `version` bigint(20) NOT NULL,
+    `dirty` tinyint(1) NOT NULL,
+    PRIMARY KEY (`version`)
+    ) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Table structure for table `priv_logs`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `priv_logs` (
+                             `id` int(11) NOT NULL AUTO_INCREMENT,
+                             `bk_biz_id` int(11) NOT NULL COMMENT '业务的 cmdb id',
+                             `operator` varchar(800) NOT NULL COMMENT '操作者',
+                             `para` longtext NOT NULL COMMENT '参数',
+                             `execute_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '执行时间',
+                             PRIMARY KEY (`id`),
+                             KEY `bk_biz_id` (`bk_biz_id`,`operator`(10),`execute_time`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Dumping data for table `priv_logs`
+--
+
+
+--
+-- Table structure for table `tb_account_rules`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_account_rules` (
+                                    `id` int(11) NOT NULL AUTO_INCREMENT,
+                                    `bk_biz_id` int(11) NOT NULL COMMENT '业务的 cmdb id',
+                                    `account_id` int(11) NOT NULL COMMENT 'tb_accounts表的id',
+                                    `dbname` varchar(800) NOT NULL COMMENT '访问db',
+                                    `priv` varchar(800) NOT NULL COMMENT '访问权限',
+                                    `dml_ddl_priv` varchar(800) NOT NULL COMMENT 'DML,DDL访问权限',
+                                    `global_priv` varchar(800) NOT NULL COMMENT 'GLOBAL访问权限',
+                                    `creator` varchar(800) NOT NULL COMMENT '创建者',
+                                    `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+                                    `operator` varchar(800) DEFAULT NULL COMMENT '最后一次变更者',
+                                    `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '最后一次变更时间',
+                                    `priv_type` varchar(3) DEFAULT NULL,
+                                    PRIMARY KEY (`id`),
+                                    UNIQUE KEY `bk_biz_id` (`bk_biz_id`,`account_id`,`dbname`),
+                                    KEY `account_id` (`account_id`),
+                                    CONSTRAINT `tb_account_rules_ibfk_1` FOREIGN KEY (`account_id`) REFERENCES `tb_accounts` (`id`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8 COMMENT='账号规则配置';
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Dumping data for table `tb_account_rules`
+--
+
+
+--
+-- Table structure for table `tb_accounts`
+--
+
+/*!40101 SET @saved_cs_client     = @@character_set_client */;
+/*!40101 SET character_set_client = utf8 */;
+CREATE TABLE IF NOT EXISTS `tb_accounts` (
+                               `id` int(11) NOT NULL AUTO_INCREMENT,
+                               `bk_biz_id` int(11) NOT NULL COMMENT '业务的 cmdb id',
+                               `user` varchar(200) NOT NULL COMMENT '用户名',
+                               `psw` json NOT NULL COMMENT '密码',
+                               `creator` varchar(800) NOT NULL COMMENT '创建者',
+                               `create_time` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP COMMENT '创建时间',
+                               `operator` varchar(800) DEFAULT NULL COMMENT '最后一次变更者',
+                               `update_time` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT '最后一次变更时间',
+                               PRIMARY KEY (`id`),
+                               UNIQUE KEY `bk_biz_id` (`bk_biz_id`,`user`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+/*!40101 SET character_set_client = @saved_cs_client */;
+
+--
+-- Dumping data for table `tb_accounts`
+--
+
+/*!50112 SET @disable_bulk_load = IF (@is_rocksdb_supported, 'SET SESSION rocksdb_bulk_load = @old_rocksdb_bulk_load', 'SET @dummy_rocksdb_bulk_load = 0') */;
+/*!50112 PREPARE s FROM @disable_bulk_load */;
+/*!50112 EXECUTE s */;
+/*!50112 DEALLOCATE PREPARE s */;
+/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+
+/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+
+-- Dump completed on 2023-01-04 10:12:39
diff --git a/dbm-services/mysql/db-priv/errno/code.go b/dbm-services/mysql/db-priv/errno/code.go
new file mode 100644
index 0000000000..adc14088b6
--- /dev/null
+++ b/dbm-services/mysql/db-priv/errno/code.go
@@ -0,0 +1,403 @@
+package errno
+
+var (
+	// OK TODO
+	// Common errors
+	// OK = Errno{Code: 0, Message: ""}
+	OK = Errno{Code: 0, Message: "", CNMessage: ""}
+	// SaveOK TODO
+	SaveOK = Errno{Code: 0, Message: "Bill save success!", CNMessage: "单据保存成功!"}
+	// CommitOK TODO
+	CommitOK = Errno{Code: 0, Message: "Bill commit success!", CNMessage: "单据提交成功!"}
+	// AuditOK TODO
+	AuditOK = Errno{Code: 0, Message: "Bill audit success!", CNMessage: "单据审核成功!"}
+	// RollbackOK TODO
+	RollbackOK = Errno{Code: 0, Message: "Bill rollback success!", CNMessage: "单据驳回成功!"}
+	// StopOK TODO
+	StopOK = Errno{Code: 0, Message: "Bill stop success!", CNMessage: "单据终止成功!"}
+	// ExecuteOK TODO
+	ExecuteOK = Errno{Code: 0, Message: "Bill execute success!", CNMessage: "单据执行成功!"}
+	// CommonOK TODO
+	CommonOK = Errno{Code: 0, Message: "", CNMessage: "通用成功描述"}
+	// JobUpdateOK TODO
+	JobUpdateOK = Errno{Code: 0, Message: "Job update success!", CNMessage: "Job 更新成功!"}
+	// SubjobUpdateOK TODO
+	SubjobUpdateOK = Errno{Code: 0, Message: "Subjob update success!", CNMessage: "Subjob 更新成功!"}
+
+	// ErrRecordNotFound TODO
+	ErrRecordNotFound = Errno{Code: 404, Message: "There is no records in db.", CNMessage: "数据库未找到对应的记录!"}
+
+	// CommonErr TODO
+	CommonErr = Errno{Code: 10000, Message: "common error!", CNMessage: "通用错误!"}
+
+	// InternalServerError TODO
+	InternalServerError = Errno{Code: 10001, Message: "Internal server error", CNMessage: "服务器内部错误。"}
+	// ErrBind TODO
+	ErrBind = Errno{Code: 10002, Message: "Error occurred while binding the request body to the struct.",
+		CNMessage: "参数处理发生错误。"}
+	// ErrString2Int TODO
+	ErrString2Int = Errno{Code: 10010, Message: "Error occurred while convert string to int.",
+		CNMessage: "string 转化为 int 出错!"}
+	// ErrorJsonToMap TODO
+	ErrorJsonToMap = Errno{Code: 10030, Message: "Error occured while converting json to Map.",
+		CNMessage: "Json 转为 Map 出现错误!"}
+	// ErrorUIDBeZero TODO
+	ErrorUIDBeZero = Errno{Code: 10035, Message: "uid can not be 0!", CNMessage: "uid 不能为 0.!"}
+	// ErrRequestParam TODO
+	ErrRequestParam = Errno{Code: 10036, Message: "request parameter error!", CNMessage: "请求参数错误!"}
+
+	// ErrTypeAssertion TODO
+	ErrTypeAssertion = Errno{Code: 10040, Message: "Error occurred while doing type assertion."}
+	// ErrParameterRequired TODO
+	ErrParameterRequired = Errno{Code: 10050, Message: "Input paramter required"}
+	// StartBiggerThanEndTime TODO
+	StartBiggerThanEndTime = Errno{Code: 10060, Message: "Start time is bigger than end time."}
+
+	// ErrValidation TODO
+	ErrValidation = Errno{Code: 20001, Message: "Validation failed."}
+	// ErrDatabase TODO
+	ErrDatabase = Errno{Code: 20002, Message: "Database error."}
+	// ErrToken TODO
+	ErrToken = Errno{Code: 20003, Message: "Error occurred while signing the JSON web token."}
+
+	// ErrEncrypt TODO
+	// user errors
+	ErrEncrypt = Errno{Code: 20101, Message: "Error occurred while encrypting the user password."}
+	// ErrUserNotFound TODO
+	ErrUserNotFound = Errno{Code: 20102, Message: "The user was not found."}
+	// ErrTokenInvalid TODO
+	ErrTokenInvalid = Errno{Code: 20103, Message: "The token was invalid."}
+	// ErrPasswordIncorrect TODO
+	ErrPasswordIncorrect = Errno{Code: 20104, Message: "The password was incorrect."}
+	// ErrDoNotHavePrivs TODO
+	ErrDoNotHavePrivs = Errno{Code: 20106, Message: "User don't have Privs."}
+	// ErrUserIsEmpty TODO
+	ErrUserIsEmpty = Errno{Code: 20110, Message: "User can't be empty.", CNMessage: "user 不能为空!"}
+	// ErrAppNameIsEmpty TODO
+	ErrAppNameIsEmpty = Errno{Code: 20115, Message: "App name can't be empty.", CNMessage: "业务名不能为空!"}
+
+	// ErrCommonExecute TODO
+	ErrCommonExecute = Errno{Code: 20200, Message: "Error occured while invoking execute method.",
+		CNMessage: "调用 execute 出错!"}
+
+	// ErrUserHaveNoProjectPriv TODO
+	ErrUserHaveNoProjectPriv = Errno{Code: 30000, Message: "User don't have project priv.", CNMessage: "没有 project 权限!"}
+
+	// ErrGcsBillNotFound TODO
+	// gcsbill errors
+	ErrGcsBillNotFound = Errno{Code: 40000, Message: "Gcs bill was not found.", CNMessage: "单据不存在!"}
+	// ErrGCSBillTypeEmpty TODO
+	ErrGCSBillTypeEmpty = Errno{Code: 40001, Message: "Gcs bill type can not be empty.", CNMessage: "单据类型不能为空!"}
+	// InvalidGCSBillType TODO
+	InvalidGCSBillType = Errno{Code: 40002, Message: "Invalid Gcs bill type.", CNMessage: "无效的 GCS 单据类型!"}
+	// InvalidAuditLevel TODO
+	InvalidAuditLevel = Errno{Code: 40003, Message: "Invalid Bill Audit level.", CNMessage: "无效的单据审核级别!"}
+
+	// CannotGetBillStatus TODO
+	CannotGetBillStatus = Errno{Code: 40004, Message: "Cann't get bill status.", CNMessage: `无法获取单据状态`}
+	// ErrGCSBillnotAuditable TODO
+	ErrGCSBillnotAuditable = Errno{Code: 40005, Message: "Current GCS bill is not in audit status now.",
+		CNMessage: `当前单据不在“待审核”状态!`}
+	// ErrGCSBillNotInExecute TODO
+	ErrGCSBillNotInExecute = Errno{Code: 40006, Message: "Bill is not in execute status.", CNMessage: `当前单据不在“待执行”状态!`}
+	// ErrGCSBillAudit TODO
+	ErrGCSBillAudit = Errno{Code: 40007, Message: "Audit bill error.", CNMessage: `审核单据出错。`}
+
+	// ErrNotHaveBillCommitPriv TODO
+	ErrNotHaveBillCommitPriv = Errno{Code: 40008, Message: "user don't have bill commit priv", CNMessage: "用户没有提单权限!"}
+
+	// ErrGetGCSDoneBills TODO
+	ErrGetGCSDoneBills = Errno{Code: 40009, Message: "Error occured while getting done bills.",
+		CNMessage: "获取个人已办事项出错!"}
+	// ErrBillAppIsEmpty TODO
+	ErrBillAppIsEmpty = Errno{Code: 40010, Message: "Gcs bill app can not be empty.", CNMessage: "单据的业务名不能为空!"}
+	// ErrGCSBillNoExecutePriv TODO
+	ErrGCSBillNoExecutePriv = Errno{Code: 40011, Message: "Only apply user and follower can execute the bill!",
+		CNMessage: "只有申请人或者关注人可以执行单据!"}
+	// ErrGetGCSBillModel TODO
+	ErrGetGCSBillModel = Errno{Code: 40012, Message: "Error occured while getting bill info",
+		CNMessage: "获取 Bill 详情出错"}
+	// ErrGetGCSBillTypes TODO
+	ErrGetGCSBillTypes = Errno{Code: 40014, Message: "Error occured while getting bill types",
+		CNMessage: "获取所有单据类型失败!"}
+	// ErrGCSBillCommit TODO
+	ErrGCSBillCommit = Err{Errno: Errno{Code: 40015, Message: "The bill can not be committed repeatly!",
+		CNMessage: "单据不能被重复提交!"}}
+	// ErrInvokeBillCommit TODO
+	ErrInvokeBillCommit = Err{Errno: Errno{Code: 40016, Message: "Error occured while committing gcs bills",
+		CNMessage: "单据提交时发生错误!"}}
+	// ErrInvokeBillExecute TODO
+	ErrInvokeBillExecute = Err{Errno: Errno{Code: 40017, Message: "Error occured while executing gcs bills",
+		CNMessage: "单据执行时发生错误!"}}
+
+	// ErrGCSBillnotRollback TODO
+	ErrGCSBillnotRollback = Errno{Code: 40019, Message: "Bill is not auditable ,it can not be rollback.",
+		CNMessage: `非“待审核”单据不能被驳回!`}
+	// ErrGetGCSBills TODO
+	ErrGetGCSBills = Errno{Code: 40020, Message: "Error occured while getting gcs bills", CNMessage: "获取单据失败!"}
+	// ErrCloneUnfinishedBills TODO
+	ErrCloneUnfinishedBills = Errno{Code: 40022, Message: "Error occured while cloning unfinished gcs bills",
+		CNMessage: "不能克隆没有结束的单据!"}
+	// ErrFinishedBills TODO
+	ErrFinishedBills = Errno{Code: 40027, Message: "Error occured while finishing gcs bills",
+		CNMessage: `设置单据为“完成”状态时失败!`}
+	// ErrBillHaveTerminated TODO
+	ErrBillHaveTerminated = Errno{Code: 40028, Message: "Bill have terminated!", CNMessage: `单据已“终止”!`}
+
+	// ErrNoStopPriv TODO
+	ErrNoStopPriv = Errno{Code: 40037, Message: "Don't have stop bill priv!", CNMessage: `用户没有“终止”单据权限!`}
+	// ErrGCSBillSave TODO
+	ErrGCSBillSave = Err{Errno: Errno{Code: 40042, Message: "Error occured while saving gcs bills!",
+		CNMessage: "单据保存失败!"}}
+	// ErrBillIsNotUncommit TODO
+	ErrBillIsNotUncommit = Err{Errno: Errno{Code: 40043,
+		Message: "Bill phase is not v_uncommit before committing the bill!", CNMessage: "单据提交之前,单据状态不是\"未提交\"!"}}
+	// ErrBillPreCommit TODO
+	ErrBillPreCommit = Err{Errno: Errno{Code: 40046, Message: "Error occured while invoking bill pre commit api:",
+		CNMessage: "调用单据的 PreCommit API 失败:"}}
+	// ErrBillAfterExecute TODO
+	ErrBillAfterExecute = Err{Errno: Errno{Code: 40050, Message: "Error occured while invoking after execute api!",
+		CNMessage: "调用单据的 AfterExecute API 失败!"}}
+
+	// ErrTbBillInfoToBill TODO
+	ErrTbBillInfoToBill = Err{Errno: Errno{Code: 40055, Message: "Error occured while transfer TbBillInfo  to Bill!",
+		CNMessage: "转换 Bill Model 失败"}}
+
+	// ErrCreateGCSJob TODO
+	// job errors
+	ErrCreateGCSJob = Errno{Code: 40100, Message: "Error occured while creating the gcs job.",
+		CNMessage: "创建 GCS Job 失败!"}
+	// ErrGetJobQueue TODO
+	ErrGetJobQueue = Errno{Code: 40101, Message: "Error occured while get the gcs job queue.",
+		CNMessage: "获取 job 失败 !"}
+	// ErrGetJobQueueNotFound TODO
+	ErrGetJobQueueNotFound = Errno{Code: 40102, Message: "Job Queue Not Found.", CNMessage: "Job 不存在!"}
+	// ErrDeleteJobQueue TODO
+	ErrDeleteJobQueue = Errno{Code: 40103, Message: "Error occured while set the jobQueue to be deleted.",
+		CNMessage: "删除 Job 失败!"}
+	// ErrJobIDConvert2Int TODO
+	ErrJobIDConvert2Int = Errno{Code: 40104, Message: "Error occured while converting the jobID to int.",
+		CNMessage: "jobID 转换为int 出错!"}
+	// ErrSubjobIDConvert2Int TODO
+	ErrSubjobIDConvert2Int = Errno{Code: 40105, Message: "Error occured while converting the subjob_id to int.",
+		CNMessage: "subjobID 转换为int 出错!"}
+
+	// ErrPutJobQueueParam TODO
+	ErrPutJobQueueParam = Errno{Code: 40106, Message: " param errors while puting a new JobQueue.",
+		CNMessage: "创建 Job 时参数错误!"}
+	// ErrJobQueueInputParam TODO
+	ErrJobQueueInputParam = Errno{Code: 40107,
+		Message:   "Some parameters is required in EnJobQueue: app,name,input,tag_id",
+		CNMessage: "创建Job 时缺少下列参数:[app,name,input,tag_id]!"}
+	// ErrJobQueueV1InputParam TODO
+	ErrJobQueueV1InputParam = Errno{Code: 40107,
+		Message:   "Some parameters is required in puting JobQueue: [app,name,distributions,payload,user]",
+		CNMessage: "创建/修改 Job 时缺少下列参数:[app,name,distributions,payload,user]!"}
+	// ErrJobQueueDistribution TODO
+	ErrJobQueueDistribution = Errno{Code: 40108, Message: "JobQueue distributions format is wrong.",
+		CNMessage: "创建 JobQueue 时 distributions 格式不正确!"}
+	// ErrCheckJobQueue TODO
+	ErrCheckJobQueue = Errno{Code: 40109, Message: "Error occured while checking JobQueue.",
+		CNMessage: "检查 JobQueue 出错!"}
+	// ErrJoqQueueIsNil TODO
+	ErrJoqQueueIsNil = Errno{Code: 40110, Message: "JobQueue is Nil", CNMessage: "返回的Job 内容为空!"}
+	// ErrCloneJoqQueues TODO
+	ErrCloneJoqQueues = Errno{Code: 40113, Message: "Error occured while cloning jobQueues",
+		CNMessage: "克隆 jobQueues 出错!"}
+
+	// JobResultSuccess TODO
+	JobResultSuccess = Errno{Code: 0, Message: "success", CNMessage: "success"}
+	// JobResultRunning TODO
+	JobResultRunning = Errno{Code: 40114, Message: "running", CNMessage: "running"}
+	// JobResultFailed TODO
+	JobResultFailed = Errno{Code: 40115, Message: "fail", CNMessage: "fail"}
+	// JobResultOthers TODO
+	JobResultOthers = Errno{Code: 40116, Message: "other job status", CNMessage: "other job status"}
+
+	// ErrGetJobFeedbacks TODO
+	// JobFeedback
+	ErrGetJobFeedbacks = Errno{Code: 40210, Message: "Error occured while getting the gcs job feedback.",
+		CNMessage: "获取 job feedback 信息失败!"}
+	// ErrCreateGCSJobFeedback TODO
+	ErrCreateGCSJobFeedback = Errno{Code: 40215, Message: "Error occured while creating the gcs jobFeedback.",
+		CNMessage: "创建 GCS jobFeedback 失败!"}
+
+	// InvalidJobIDorSubjobID TODO
+	InvalidJobIDorSubjobID = Errno{Code: 40220, Message: "Invalid jobID or subJobID while getting the gcs job feedback.",
+		CNMessage: "jobID or subJobID 无效!"}
+
+	// ErrorJobNameBeEmpty TODO
+	// JobDef errors
+	ErrorJobNameBeEmpty = Errno{Code: 40300, Message: "JobName can not be empty.", CNMessage: "JobName 不能为空!"}
+	// ErrorGetJobDef TODO
+	ErrorGetJobDef = Errno{Code: 40302, Message: "Error occured while getting the gcs job_def",
+		CNMessage: "获取 job_def 出现错误!"}
+
+	// ErrorGetJobBlob TODO
+	// JobBlob errors
+	ErrorGetJobBlob = Errno{Code: 40302, Message: "Error occured while getting the gcs job_blob",
+		CNMessage: "获取 job_blob 出现错误!"}
+
+	// ErrorGetSubJobQueue TODO
+	// subjob errors
+	ErrorGetSubJobQueue = Errno{Code: 40800, Message: "Error occured while getting the gcs subjob ",
+		CNMessage: "获取 subjob 出现错误!"}
+	// ErrCreateSubJobQueue TODO
+	ErrCreateSubJobQueue = Errno{Code: 40801, Message: "Error occured while creating the gcs subjobQueue.",
+		CNMessage: "创建 GCS subjobQueue 失败!"}
+	// ErrUpdateSubJobQueue TODO
+	ErrUpdateSubJobQueue = Errno{Code: 40802, Message: "Error occured while updating the gcs subjobQueue.",
+		CNMessage: "更新 GCS subjobQueue 失败!"}
+
+	// SubJobUIDRequied TODO
+	SubJobUIDRequied = Errno{Code: 40804, Message: "Subjob uid is required!", CNMessage: "Subjob uid 是必填项.!"}
+	// ErrorUIDMustBeInt TODO
+	ErrorUIDMustBeInt = Errno{Code: 40808, Message: "Subjob uid must be int!", CNMessage: "Subjob uid 必须是 int 类型.!"}
+	// ErrSubjobQueueInputParam TODO
+	ErrSubjobQueueInputParam = Errno{Code: 40812,
+		Message: "Some parameters [JobID,Username,JobName,AtomjobList,JobInput] " +
+			"are not meet the demands in saving SubjobQueue",
+		CNMessage: "保存 SubjobQueue 时缺少下列参数:[JobID,Username,JobName,AtomjobList,JobInput]!"}
+	// ErrJobFeedbackInputParam TODO
+	ErrJobFeedbackInputParam = Errno{Code: 40815,
+		Message: "Some parameters are not meet the demands in saving JobFeedback", CNMessage: "保存 JobFeedback 时参数不满足要求。"}
+	// ErrGetGCSApps TODO
+	// gcs app errors
+	ErrGetGCSApps = Errno{Code: 40900, Message: "Error occured while getting gcs apps", CNMessage: "获取 GCS App 出现错误!"}
+	// ErrGetCCApps TODO
+	ErrGetCCApps = Errno{Code: 40902, Message: "Error occured while getting cc apps", CNMessage: "获取 App 出现错误!"}
+	// ErrGetProjects TODO
+	ErrGetProjects = Errno{Code: 40905, Message: "Error occured while getting projects", CNMessage: "获取 projects 出现错误!"}
+
+	// ErrDBTransaction TODO
+	// model operation errors
+	ErrDBTransaction = Errno{Code: 50200, Message: "DB Transaction error.", CNMessage: "DB 事务发生错误!"}
+	// ErrModelFunction TODO
+	ErrModelFunction = Err{Errno: Errno{Code: 50201, Message: "Error occured while invoking model function.",
+		CNMessage: "调用 DB model 方法发生错误!"}, Err: nil}
+
+	// ErrSaveFlowAuditLog TODO
+	ErrSaveFlowAuditLog = Errno{Code: 50203, Message: "Error occured while saving Flow Audit Log.",
+		CNMessage: "存储单据审核日志记录出错!"}
+
+	// ErrGetJSONArray TODO
+	// data handle error
+	ErrGetJSONArray = Errno{Code: 50300, Message: "Get simplejson Array error.", CNMessage: ""}
+	// ErrConvert2Map TODO
+	ErrConvert2Map = Errno{Code: 50301, Message: "Error occurred while converting the data to Map.",
+		CNMessage: "Error occurred while converting the data to Map."}
+	// ErrJSONMarshal TODO
+	ErrJSONMarshal = Errno{Code: 50302, Message: "Error occurred while marshaling the data to JSON.",
+		CNMessage: "Error occurred while marshaling the data to JSON."}
+	// ErrReadEntity TODO
+	ErrReadEntity = Errno{Code: 50303, Message: "Error occurred while parsing the request parameter.",
+		CNMessage: "Error occurred while parsing the request parameter."}
+	// ErrJSONUnmarshal TODO
+	ErrJSONUnmarshal = Errno{Code: 50304, Message: "Error occurred while Unmarshaling the JSON to data model.",
+		CNMessage: "Error occurred while Unmarshaling the JSON to data model."}
+	// ErrBytesToMap TODO
+	ErrBytesToMap = Errno{Code: 50307, Message: "Error occurred while converting bytes to map.",
+		CNMessage: "Error occurred while converting bytes to map."}
+
+	// ErrUserIsNotDBA TODO
+	// user login and permission errors
+	ErrUserIsNotDBA = Errno{Code: 50500, Message: "User is not dba."}
+	// ErrNoSaveAndCommitPriv TODO
+	ErrNoSaveAndCommitPriv = Errno{Code: 50502,
+		Message: "User don't have gcs bill save and commit privs in this app.", CNMessage: "用户在当前 APP 上没有单据的保存和提交权限!"}
+	// ErrNoBillAduitPriv TODO
+	ErrNoBillAduitPriv = Errno{Code: 50504, Message: "User don't have gcs audit privs in this app.",
+		CNMessage: "用户在当前 APP 上没有单据的审核权限!"}
+	// ErrUserNotHaveBillRollbackPriv TODO
+	ErrUserNotHaveBillRollbackPriv = Errno{Code: 50506, Message: "User don't have gcs rollback privs in this app.",
+		CNMessage: "用户在当前 APP 上没有单据的驳回权限!"}
+	// ErrUserHasNoPermission TODO
+	ErrUserHasNoPermission = Errno{Code: 50508, Message: "User has no permission.", CNMessage: "当前用户没有权限!"}
+	// ErrUserNotHaveBillClonePriv TODO
+	ErrUserNotHaveBillClonePriv = Errno{Code: 50510, Message: "User don't have gcs bill clone privs in this app.",
+		CNMessage: "用户没有当前单据的克隆权限!"}
+	// ErrViewAppPriv TODO
+	ErrViewAppPriv = Errno{Code: 50515, Message: "User have no priv to view this app!",
+		CNMessage: "用户没有查看当前 APP 的权限!"}
+
+	// ErrInvokeAPI TODO
+	ErrInvokeAPI = Errno{Code: 50601, Message: "Error occurred while invoking API", CNMessage: "调用 API 发生错误!"}
+
+	// ErrSnedRTX TODO
+	// alarm errors
+	ErrSnedRTX = Errno{Code: 50800, Message: "Error occurred while sending RTX message to user.",
+		CNMessage: "发送 RTX 消息出现错误!"}
+
+	// ErrPswNotIdentical TODO
+	// grant privileges errors
+	ErrPswNotIdentical = Errno{Code: 51000,
+		Message: "Password is not identical to the password of existed account rules, " +
+			"same accounts should use same password.",
+		CNMessage: "密码与已存在的账号规则中的密码不同,相同账号的密码需要保持一致!"}
+	// AccountRuleExisted TODO
+	AccountRuleExisted = Errno{Code: 51001, Message: "Account rule of user on this db is existed ",
+		CNMessage: "用户对此DB授权的账号规则已存在"}
+	// AccountExisted TODO
+	AccountExisted = Errno{Code: 51002, Message: "Account is existed ", CNMessage: "账号已存在"}
+	// AccountNotExisted TODO
+	AccountNotExisted = Errno{Code: 51003, Message: "Account not existed ", CNMessage: "账号不存在"}
+	// PasswordConsistentWithAccountName TODO
+	PasswordConsistentWithAccountName = Errno{Code: 51019, Message: "Password should be different from account name ",
+		CNMessage: "账号与密码不能相同"}
+	// PasswordOrAccountNameNull TODO
+	PasswordOrAccountNameNull = Errno{Code: 51020, Message: "Password or account name should not be empty ",
+		CNMessage: "账号与密码不能为空"}
+	// AccountIdNull TODO
+	AccountIdNull = Errno{Code: 51021, Message: "Account ID should not be empty",
+		CNMessage: "账号ID不能为空"}
+	// DbNameNull TODO
+	DbNameNull = Errno{Code: 51022, Message: "Database name should not be empty",
+		CNMessage: "数据库名称不能为空"}
+	// AccountRuleIdNull TODO
+	AccountRuleIdNull = Errno{Code: 51022, Message: "Account rule should not be empty",
+		CNMessage: "账号规则ID不能为空"}
+	// PrivNull TODO
+	PrivNull = Errno{Code: 51022, Message: "No privilege was chosen", CNMessage: "未选择权限"}
+	// AccountRuleNotExisted TODO
+	AccountRuleNotExisted = Errno{Code: 51004, Message: "Account rule not existed ", CNMessage: "账号规则不存在"}
+	// OnlyOneDatabaseAllowed TODO
+	OnlyOneDatabaseAllowed = Errno{Code: 51005,
+		Message: "Only one database allowed, database name should not contain space", CNMessage: "只允许填写一个数据库,数据库名称不能包含空格"}
+	// ErrMysqlInstanceStruct TODO
+	ErrMysqlInstanceStruct = Errno{Code: 51006, Message: "Not either tendbha or orphan structure",
+		CNMessage: "不符合tendbha或者orphan的集群结构"}
+	// GenerateEncryptedPasswordErr TODO
+	GenerateEncryptedPasswordErr = Errno{Code: 51007, Message: "Generate Encrypted Password Err",
+		CNMessage: "创建账号,生成加密的密码时发生错误"}
+	// PasswordNotConsistent TODO
+	PasswordNotConsistent = Errno{Code: 51008,
+		Message:   "user is exist,but the new password is not consistent with the old password, should be consistent",
+		CNMessage: "账号已存在,但是新密码与旧密码不一致,需要保持一致"}
+	// GrantPrivilegesFail TODO
+	GrantPrivilegesFail = Errno{Code: 51009, Message: "Grant Privileges Fail", CNMessage: "授权执行失败"}
+	// GrantPrivilegesSuccess TODO
+	GrantPrivilegesSuccess = Errno{Code: 0, Message: "Grant Privileges success", CNMessage: "授权执行成功"}
+	// GrantPrivilegesParameterCheckFail TODO
+	GrantPrivilegesParameterCheckFail = Errno{Code: 51010, Message: "Parameter of Grant Privileges Check Fail",
+		CNMessage: "授权单据的参数检查失败"}
+	// ClonePrivilegesParameterCheckFail TODO
+	ClonePrivilegesParameterCheckFail = Errno{Code: 51011, Message: "Parameter of Clone Privileges Check Fail",
+		CNMessage: "克隆权限单据的参数检查失败"}
+	// BkBizIdIsEmpty TODO
+	BkBizIdIsEmpty = Errno{Code: 51012, Message: "bk_biz_id can't be empty", CNMessage: "bk_biz_id不能为空"}
+	// ClonePrivilegesFail TODO
+	ClonePrivilegesFail = Errno{Code: 51013, Message: "Clone privileges fail", CNMessage: "克隆权限失败"}
+	// ClonePrivilegesCheckFail TODO
+	ClonePrivilegesCheckFail = Errno{Code: 51014, Message: "Clone privileges check fail", CNMessage: "克隆权限检查失败"}
+	// NoPrivilegesNothingToDo TODO
+	NoPrivilegesNothingToDo = Errno{Code: 51015, Message: "no privileges,nothing to do", CNMessage: "没有权限需要克隆"}
+	// DomainNotExists TODO
+	DomainNotExists = Errno{Code: 51016, Message: "domain not exists", CNMessage: "域名不存在"}
+	// IpPortFormatError TODO
+	IpPortFormatError = Errno{Code: 51017, Message: "format not in 'ip:port' format",
+		CNMessage: "格式不是ip:port的格式"}
+	// InstanceNotExists TODO
+	InstanceNotExists = Errno{Code: 51018, Message: "instance not exists", CNMessage: "实例不存在"}
+	// CloudIdRequired TODO
+	CloudIdRequired = Errno{Code: 51019, Message: "bk_cloud_id is required", CNMessage: "bk_cloud_id不能为空"}
+)
diff --git a/dbm-services/mysql/db-priv/errno/errno.go b/dbm-services/mysql/db-priv/errno/errno.go
new file mode 100644
index 0000000000..d5d107291f
--- /dev/null
+++ b/dbm-services/mysql/db-priv/errno/errno.go
@@ -0,0 +1,133 @@
+// Package errno TODO
+package errno
+
+import (
+	"fmt"
+
+	"github.com/spf13/viper"
+)
+
+// Errno TODO
+type Errno struct {
+	Code      int
+	Message   string
+	CNMessage string
+}
+
+var lang = viper.GetString("lang")
+
+// Error 用于错误处理
+func (err Errno) Error() string {
+	switch lang {
+	case "zh_CN":
+		return err.CNMessage
+	case "en_US":
+		return err.Message
+	default:
+		return err.CNMessage
+	}
+}
+
+// Addf TODO
+func (err Errno) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// Add TODO
+func (err Errno) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+		return err
+	case "en_US":
+		err.Message += message
+		return err
+	default:
+		err.CNMessage += message
+		return err
+	}
+	return err
+}
+
+// AddBefore TODO
+func (err Errno) AddBefore(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage = message + err.CNMessage
+		return err
+	case "en_US":
+		err.Message = message + err.Message
+		return err
+	default:
+		err.CNMessage = message + err.CNMessage
+		return err
+	}
+	return err
+}
+
+// Err TODO
+type Err struct {
+	Errno
+	Err error
+}
+
+// Add TODO
+func (err Err) Add(message string) error {
+	switch lang {
+	case "zh_CN":
+		err.CNMessage += message
+		return err
+	case "en_US":
+		err.Message += message
+		return err
+	default:
+		err.CNMessage += message
+		return err
+	}
+	return err
+}
+
+// SetMsg TODO
+func (err Err) SetMsg(message string) error {
+	err.Message = message
+	return err
+}
+
+// SetCNMsg TODO
+func (err Err) SetCNMsg(cnMessage string) error {
+	err.CNMessage = cnMessage
+	return err
+}
+
+// Addf TODO
+func (err Err) Addf(format string, args ...interface{}) error {
+	return err.Add(fmt.Sprintf(format, args...))
+}
+
+// DecodeErr TODO
+func DecodeErr(err error) (int, string) {
+
+	var CN bool = true
+
+	if err == nil {
+		return OK.Code, OK.Message
+	}
+
+	switch typed := err.(type) {
+	case Err:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	case Errno:
+		if CN {
+			return typed.Code, typed.CNMessage
+		} else {
+			return typed.Code, typed.Message
+		}
+	default:
+	}
+	fmt.Printf("%s", err.Error())
+	return InternalServerError.Code, err.Error()
+}
diff --git a/dbm-services/mysql/db-priv/go.mod b/dbm-services/mysql/db-priv/go.mod
new file mode 100644
index 0000000000..8f572ee593
--- /dev/null
+++ b/dbm-services/mysql/db-priv/go.mod
@@ -0,0 +1,74 @@
+module dbm-services/mysql/priv-service
+
+go 1.19
+
+require (
+	github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
+	github.com/gin-gonic/gin v1.9.0
+	github.com/golang-migrate/migrate/v4 v4.15.2
+	github.com/google/go-querystring v1.1.0
+	github.com/jinzhu/gorm v1.9.16
+	github.com/jmoiron/sqlx v1.3.5
+	github.com/pingcap/parser v3.1.2+incompatible
+	github.com/pingcap/tidb v0.0.0-20190108123336-c68ee7318319
+	github.com/pkg/errors v0.9.1
+	github.com/spf13/pflag v1.0.5
+	github.com/spf13/viper v1.15.0
+	golang.org/x/exp v0.0.0-20230418202329-0354be287a23
+	gopkg.in/natefinch/lumberjack.v2 v2.2.1
+)
+
+require (
+	github.com/BurntSushi/toml v1.2.1 // indirect
+	github.com/bytedance/sonic v1.8.8 // indirect
+	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
+	github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
+	github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-ole/go-ole v1.2.6 // indirect
+	github.com/go-playground/locales v0.14.1 // indirect
+	github.com/go-playground/universal-translator v0.18.1 // indirect
+	github.com/go-playground/validator/v10 v10.12.0 // indirect
+	github.com/go-sql-driver/mysql v1.7.1 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
+	github.com/golang/protobuf v1.5.3 // indirect
+	github.com/hashicorp/errwrap v1.1.0 // indirect
+	github.com/hashicorp/go-multierror v1.1.1 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.5 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+	github.com/leodido/go-urn v1.2.3 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mattn/go-isatty v0.0.18 // indirect
+	github.com/mattn/go-sqlite3 v1.14.16 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.7 // indirect
+	github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63 // indirect
+	github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323 // indirect
+	github.com/prometheus/client_golang v1.14.0 // indirect
+	github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+	github.com/rogpeppe/go-internal v1.8.0 // indirect
+	github.com/shirou/gopsutil v3.21.11+incompatible // indirect
+	github.com/sirupsen/logrus v1.9.0 // indirect
+	github.com/spf13/afero v1.9.5 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
+	github.com/ugorji/go/codec v1.2.11 // indirect
+	github.com/yusufpapurcu/wmi v1.2.2 // indirect
+	go.uber.org/atomic v1.9.0 // indirect
+	golang.org/x/arch v0.3.0 // indirect
+	golang.org/x/crypto v0.8.0 // indirect
+	golang.org/x/net v0.9.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	google.golang.org/protobuf v1.30.0 // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/dbm-services/mysql/db-priv/go.sum b/dbm-services/mysql/db-priv/go.sum
new file mode 100644
index 0000000000..42005ff57a
--- /dev/null
+++ b/dbm-services/mysql/db-priv/go.sum
@@ -0,0 +1,2063 @@
+bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
+cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
+cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
+cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
+cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
+cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
+cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
+github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
+github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
+github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
+github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
+github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
+github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
+github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
+github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
+github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
+github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
+github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
+github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
+github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
+github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
+github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
+github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
+github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY=
+github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
+github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
+github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
+github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
+github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
+github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0=
+github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
+github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU=
+github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw=
+github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM=
+github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ=
+github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI=
+github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo=
+github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk=
+github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g=
+github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
+github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
+github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/blacktear23/go-proxyprotocol v0.0.0-20171102103907-62e368e1c470/go.mod h1:VKt7CNAQxpFpSDz3sXyj9hY/GbVsQCr0sB3w59nE7lU=
+github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
+github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
+github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
+github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
+github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
+github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
+github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
+github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
+github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q=
+github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
+github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
+github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
+github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
+github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
+github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM=
+github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w=
+github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
+github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
+github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
+github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
+github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
+github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
+github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
+github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
+github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
+github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
+github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
+github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
+github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
+github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
+github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
+github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
+github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
+github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
+github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o=
+github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
+github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
+github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
+github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
+github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
+github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
+github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
+github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
+github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
+github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
+github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
+github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
+github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
+github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
+github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
+github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
+github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
+github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
+github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
+github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
+github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
+github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
+github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
+github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
+github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
+github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
+github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
+github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
+github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
+github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/coreos/bbolt v1.3.0/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
+github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/cznic/mathutil v0.0.0-20181021201202-eba54fb065b7/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso=
+github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
+github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
+github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8 h1:LpMLYGyy67BoAFGda1NeOBQwqlv7nUXpm+rIVHGxZZ4=
+github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ=
+github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
+github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
+github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
+github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8=
+github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
+github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
+github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+github.com/dhui/dktest v0.3.10 h1:0frpeeoM9pHouHjhLeZDuDTJ0PqjDTrycaHaMmkJAo8=
+github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0=
+github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
+github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
+github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.13+incompatible h1:5s7uxnKZG+b8hYWlPYUi6x1Sjpq2MSt96d15eLZeHyw=
+github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
+github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
+github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
+github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
+github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
+github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y=
+github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
+github.com/etcd-io/gofail v0.0.0-20180808172546-51ce9a71510a/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw=
+github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
+github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
+github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8=
+github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
+github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
+github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
+github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
+github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
+github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
+github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
+github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U=
+github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
+github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
+github.com/go-sql-driver/mysql v0.0.0-20170715192408-3955978caca4/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
+github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
+github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg=
+github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
+github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs=
+github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI=
+github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk=
+github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28=
+github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo=
+github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk=
+github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw=
+github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360=
+github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg=
+github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE=
+github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
+github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8=
+github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc=
+github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4=
+github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ=
+github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0=
+github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY=
+github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
+github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
+github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
+github.com/gogo/protobuf v0.0.0-20180717141946-636bf0302bc9/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
+github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc=
+github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY=
+github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
+github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
+github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
+github.com/golang/protobuf v0.0.0-20180814211427-aa810b61a9c7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
+github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
+github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE=
+github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
+github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
+github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
+github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.5.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
+github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
+github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
+github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
+github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
+github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
+github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
+github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk=
+github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
+github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o=
+github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds=
+github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
+github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
+github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
+github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
+github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
+github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
+github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
+github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
+github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
+github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
+github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0=
+github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po=
+github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ=
+github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig=
+github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
+github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
+github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
+github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA=
+github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o=
+github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg=
+github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA=
+github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
+github.com/jinzhu/gorm v1.9.16 h1:+IyIjPEABKRpsu/F8OvDPy9fyQlgsg2luMV2ZIH5i5o=
+github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
+github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
+github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
+github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
+github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
+github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4=
+github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA=
+github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
+github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5 h1:2U0HzY8BJ8hVwDKIzp7y4voR9CX/nvcfymLmg2UiOio=
+github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
+github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
+github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
+github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
+github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
+github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
+github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
+github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
+github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
+github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
+github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
+github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
+github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
+github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
+github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
+github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
+github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
+github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/myesui/uuid v1.0.0/go.mod h1:2CDfNgU0LR8mIdO8vdWd8i9gWWxLlcoIGGpSNgafq84=
+github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA=
+github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
+github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI=
+github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7 h1:7KAv7KMGTTqSmYZtNdcNTgsos+vFzULLwyElndwn+5c=
+github.com/ngaut/pools v0.0.0-20180318154953-b7bc8c42aac7/go.mod h1:iWMfgwqYW+e8n5lC/jjNEhwcjbRDpl5NT7n2h+4UNcI=
+github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef h1:K0Fn+DoFqNqktdZtdV3bPQ/0cuYh2H4rkg0tytX/07k=
+github.com/ngaut/sync2 v0.0.0-20141008032647-7a24ed77b2ef/go.mod h1:7WjlapSfwQyo6LNmIvEWzsW1hbBQfpUO4JWnuQRmva8=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
+github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
+github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
+github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
+github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
+github.com/opentracing/basictracer-go v1.0.0 h1:YyUAhaEfjoWXclZVJ9sGoNct7j4TVk7lZWlQw5UXuoo=
+github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
+github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
+github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
+github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
+github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
+github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg=
+github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ=
+github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=
+github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63 h1:+FZIDR/D97YOPik4N4lPDaUcLDF/EQPogxtlHB2ZZRM=
+github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg=
+github.com/pingcap/gofail v0.0.0-20181217135706-6a951c1e42c3/go.mod h1:DazNTg0PTldtpsQiT9I5tVJwV1onHMKBBgXzmJUlMns=
+github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e h1:P73/4dPCL96rGrobssy1nVy2VaVpNCuLpCbr+FEaTA8=
+github.com/pingcap/goleveldb v0.0.0-20171020122428-b9ff6c35079e/go.mod h1:O17XtbryoCJhkKGbT62+L2OlrniwqiGLSqrmdHCMzZw=
+github.com/pingcap/kvproto v0.0.0-20181203065228-c14302da291c h1:Qf5St5XGwKgKQLar9lEXoeO0hJMVaFBj3JqvFguWtVg=
+github.com/pingcap/kvproto v0.0.0-20181203065228-c14302da291c/go.mod h1:Ja9XPjot9q4/3JyCZodnWDGNXt4pKemhIYCvVJM7P24=
+github.com/pingcap/parser v0.0.0-20190108044100-02812c3c22e7/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA=
+github.com/pingcap/parser v3.1.2+incompatible h1:ZAtv2VBZitECpaHshSIp1bkBhEqJYerw7nO/HYsn8MM=
+github.com/pingcap/parser v3.1.2+incompatible/go.mod h1:1FNvfp9+J0wvc4kl8eGNh7Rqrxveg15jJoWo/a0uHwA=
+github.com/pingcap/pd v2.1.0-rc.4+incompatible h1:/buwGk04aHO5odk/+O8ZOXGs4qkUjYTJ2UpCJXna8NE=
+github.com/pingcap/pd v2.1.0-rc.4+incompatible/go.mod h1:nD3+EoYes4+aNNODO99ES59V83MZSI+dFbhyr667a0E=
+github.com/pingcap/tidb v0.0.0-20190108123336-c68ee7318319 h1:ltRU5YUxYpW29ywVKnFXIRRTnY6r2cYxauB79L5gU2E=
+github.com/pingcap/tidb v0.0.0-20190108123336-c68ee7318319/go.mod h1:qXpdYNt83vgSegvc/TNcxKGiAo4Pa4EtIJl0ka7yGXE=
+github.com/pingcap/tidb-tools v2.1.3-0.20190104033906-883b07a04a73+incompatible h1:Ba48wwPwPq5hd1kkQpgua49dqB5cthC2zXVo7fUUDec=
+github.com/pingcap/tidb-tools v2.1.3-0.20190104033906-883b07a04a73+incompatible/go.mod h1:XGdcy9+yqlDSEMTpOXnwf3hiTeqrV6MN/u1se9N8yIM=
+github.com/pingcap/tipb v0.0.0-20170310053819-1043caee48da/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
+github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323 h1:mRKKzRjDNaUNPnAkPAHnRqpNmwNWBX1iA+hxlmvQ93I=
+github.com/pingcap/tipb v0.0.0-20181012112600-11e33c750323/go.mod h1:RtkHW8WbcNxj8lsbzjaILci01CtYnYbIkQhjyZWrWVI=
+github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
+github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
+github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
+github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
+github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
+github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
+github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
+github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
+github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
+github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
+github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
+github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
+github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
+github.com/shirou/gopsutil v2.18.10+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
+github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
+github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
+github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
+github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
+github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+github.com/shurcooL/vfsgen v0.0.0-20181020040650-a97a25d856ca/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
+github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
+github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
+github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
+github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/struCoder/pidusage v0.1.2/go.mod h1:pWBlW3YuSwRl6h7R5KbvA4N8oOqe9LjaKW5CwT1SPjI=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
+github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU=
+github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
+github.com/twinj/uuid v1.0.0 h1:fzz7COZnDrXGTAOHGuUGYd6sG+JMq+AoE7+Jlu0przk=
+github.com/twinj/uuid v1.0.0/go.mod h1:mMgcE1RHFUFqe5AfiwlINXisXfDGro23fWdPUfOMjRY=
+github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
+github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
+github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk=
+github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo=
+github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+github.com/ugorji/go/codec v0.0.0-20181127175209-856da096dbdf/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
+github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/unrolled/render v0.0.0-20180914162206-b9786414de4d/go.mod h1:tu82oB5W2ykJRVioYsB+IQKcft7ryBr7w12qMBUPyXg=
+github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
+github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
+github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
+github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs=
+github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
+github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
+github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
+github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
+github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
+github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
+github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
+github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
+gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE=
+go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
+go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8=
+go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
+go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
+go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
+golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY=
+golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.0.0-20171214130843-f21a4dfb5e38/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181008205924-a2b3f7f249e9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
+gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
+gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
+gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
+google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
+google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
+google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
+google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
+google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
+google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
+google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
+google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20181004005441-af9cb2a35e7f/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
+google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
+google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U=
+google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
+google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
+google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
+google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
+google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
+google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
+google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
+google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/stretchr/testify.v1 v1.2.2/go.mod h1:QI5V/q6UbPmuhtm10CaFZxED9NreB8PnFYN9JcR6TxU=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg=
+gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
+gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
+gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ=
+honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
+k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
+k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
+k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
+k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
+k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
+k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
+k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
+k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
+k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
+k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
+k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
+k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
+k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
+k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
+k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
+k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
+k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg=
+modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878=
+modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo=
+modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8=
+modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw=
+modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8=
+modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM=
+modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
+modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w=
+modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8=
+modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
+modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
+modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc=
+modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
+modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY=
+modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k=
+modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs=
+modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
+modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo=
+modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
+modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA=
+modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4 h1:VO9oZbbkvTwqLimlQt15QNdOOBArT2dw/bvzsMZBiqQ=
+sourcegraph.com/sourcegraph/appdash v0.0.0-20180531100431-4c381bd170b4/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
+sourcegraph.com/sourcegraph/appdash-data v0.0.0-20151005221446-73f23eafcf67/go.mod h1:L5q+DGLGOQFpo1snNEkLOJT2d1YTW66rWNzatr3He1k=
diff --git a/dbm-services/mysql/db-priv/handler/account.go b/dbm-services/mysql/db-priv/handler/account.go
new file mode 100644
index 0000000000..09bba0c342
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/account.go
@@ -0,0 +1,117 @@
+package handler
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/service"
+	"encoding/json"
+	"io/ioutil"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// GetAccount 获取账号
+func (m *PrivService) GetAccount(c *gin.Context) {
+	slog.Info("do GetAccount!")
+	var input service.AccountPara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	accounts, count, err := input.GetAccount()
+	SendResponse(c, err, ListResponse{
+		Count: count,
+		Items: accounts,
+	})
+	return
+}
+
+// AddAccount 新增账号
+func (m *PrivService) AddAccount(c *gin.Context) {
+	slog.Info("do AddAccount!")
+	var input service.AccountPara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.AddAccount(string(body))
+	if err != nil {
+		SendResponse(c, err, nil)
+		return
+	}
+	SendResponse(c, nil, nil)
+	return
+}
+
+// DeleteAccount 删除账号
+func (m *PrivService) DeleteAccount(c *gin.Context) {
+	slog.Info("do DeleteAccount!")
+
+	var input service.AccountPara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.DeleteAccount(string(body))
+	SendResponse(c, err, nil)
+	return
+}
+
+// ModifyAccount 修改账号的密码
+func (m *PrivService) ModifyAccount(c *gin.Context) {
+
+	slog.Info("do ModifyAccount!")
+	var input service.AccountPara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.ModifyAccountPassword(string(body))
+
+	if err != nil {
+		SendResponse(c, err, nil)
+		return
+	}
+	SendResponse(c, nil, nil)
+	return
+}
diff --git a/dbm-services/mysql/db-priv/handler/account_rule.go b/dbm-services/mysql/db-priv/handler/account_rule.go
new file mode 100644
index 0000000000..3392b1bdcd
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/account_rule.go
@@ -0,0 +1,117 @@
+package handler
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/service"
+	"encoding/json"
+	"io/ioutil"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// GetAccountRuleList 获取账号规则
+func (m *PrivService) GetAccountRuleList(c *gin.Context) {
+	slog.Info("do GetAccountRuleList!")
+
+	var input service.BkBizId
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	accountRuleList, count, err := input.QueryAccountRule()
+	SendResponse(c, err, ListResponse{
+		Count: count,
+		Items: accountRuleList,
+	})
+	return
+}
+
+// AddAccountRule 添加账号规则
+func (m *PrivService) AddAccountRule(c *gin.Context) {
+
+	slog.Info("do AddAccountRule!")
+	var input service.AccountRulePara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.AddAccountRule(string(body))
+	if err != nil {
+		SendResponse(c, err, nil)
+		return
+	}
+	SendResponse(c, nil, nil)
+	return
+}
+
+// DeleteAccountRule 删除账号规则
+func (m *PrivService) DeleteAccountRule(c *gin.Context) {
+	slog.Info("do DeleteAccountRule!")
+
+	var input service.DeleteAccountRuleById
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.DeleteAccountRule(string(body))
+	SendResponse(c, err, nil)
+	return
+}
+
+// ModifyAccountRule 修改账号规则,修改账号规则的db名、权限
+func (m *PrivService) ModifyAccountRule(c *gin.Context) {
+	slog.Info("do ModifyAccountRule!")
+	var input service.AccountRulePara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.ModifyAccountRule(string(body))
+	if err != nil {
+		SendResponse(c, err, nil)
+		return
+	}
+	SendResponse(c, nil, nil)
+	return
+}
diff --git a/dbm-services/mysql/db-priv/handler/add_priv.go b/dbm-services/mysql/db-priv/handler/add_priv.go
new file mode 100644
index 0000000000..31c241ea13
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/add_priv.go
@@ -0,0 +1,90 @@
+package handler
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/service"
+	"encoding/json"
+	"io/ioutil"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// AddPrivDryRun 使用账号规则,新增权限预检查
+func (m *PrivService) AddPrivDryRun(c *gin.Context) {
+	slog.Info("do AddPrivDryRun!")
+
+	var (
+		input    service.PrivTaskPara
+		taskpara service.PrivTaskPara
+	)
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err := json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	taskpara, err = input.AddPrivDryRun()
+	if err != nil {
+		SendResponse(c, err, nil)
+		return
+	}
+	SendResponse(c, err, taskpara)
+	return
+}
+
+// AddPriv 使用账号规则,新增权限
+func (m *PrivService) AddPriv(c *gin.Context) {
+	slog.Info("do AddPriv!")
+
+	var input service.PrivTaskPara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err = json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.AddPriv(string(body))
+	SendResponse(c, err, nil)
+	return
+}
+
+// AddPrivWithoutAccountRule 不使用账号规则模版,在mysql实例授权。此接口不被页面前端调用,为后台服务设计。不建议通过此接口授权。
+func (m *PrivService) AddPrivWithoutAccountRule(c *gin.Context) {
+	slog.Info("do AddPrivWithoutAccountRule!")
+
+	var input service.AddPrivWithoutAccountRule
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err = json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.AddPrivWithoutAccountRule(string(body))
+	SendResponse(c, err, nil)
+	return
+}
diff --git a/dbm-services/mysql/db-priv/handler/clone_client_priv.go b/dbm-services/mysql/db-priv/handler/clone_client_priv.go
new file mode 100644
index 0000000000..1336143727
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/clone_client_priv.go
@@ -0,0 +1,59 @@
+package handler
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/service"
+	"encoding/json"
+	"io/ioutil"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// CloneClientPrivDryRun 克隆客户端权限预检查
+func (m *PrivService) CloneClientPrivDryRun(c *gin.Context) {
+	slog.Info("do  CloneClientPrivDryRun!")
+
+	var input service.CloneClientPrivParaList
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err = json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.CloneClientPrivDryRun()
+	SendResponse(c, err, nil)
+	return
+}
+
+// CloneClientPriv 克隆客户端权限
+func (m *PrivService) CloneClientPriv(c *gin.Context) {
+	slog.Info("do  CloneClientPriv!")
+
+	var input service.CloneClientPrivPara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err = json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.CloneClientPriv(string(body))
+	SendResponse(c, err, nil)
+	return
+}
diff --git a/dbm-services/mysql/db-priv/handler/clone_instance_priv.go b/dbm-services/mysql/db-priv/handler/clone_instance_priv.go
new file mode 100644
index 0000000000..51670a7f33
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/clone_instance_priv.go
@@ -0,0 +1,59 @@
+package handler
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/service"
+	"encoding/json"
+	"io/ioutil"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// CloneInstancePrivDryRun 克隆实例权限预检查
+func (m *PrivService) CloneInstancePrivDryRun(c *gin.Context) {
+	slog.Info("do  CloneInstancePrivDryRun!")
+
+	var input service.CloneInstancePrivParaList
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err = json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.CloneInstancePrivDryRun()
+	SendResponse(c, err, nil)
+	return
+}
+
+// CloneInstancePriv 克隆实例权限
+func (m *PrivService) CloneInstancePriv(c *gin.Context) {
+	slog.Info("do  CloneInstancePriv!")
+
+	var input service.CloneInstancePrivPara
+
+	body, err := ioutil.ReadAll(c.Request.Body)
+	if err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	if err = json.Unmarshal(body, &input); err != nil {
+		slog.Error("msg", err)
+		SendResponse(c, errno.ErrBind, err)
+		return
+	}
+
+	err = input.CloneInstancePriv(string(body))
+	SendResponse(c, err, nil)
+	return
+}
diff --git a/dbm-services/mysql/db-priv/handler/handler.go b/dbm-services/mysql/db-priv/handler/handler.go
new file mode 100644
index 0000000000..ec7fe05acc
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/handler.go
@@ -0,0 +1,2 @@
+// Package handler TODO
+package handler
diff --git a/dbm-services/mysql/db-priv/handler/public_key.go b/dbm-services/mysql/db-priv/handler/public_key.go
new file mode 100644
index 0000000000..e93d0d5d5e
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/public_key.go
@@ -0,0 +1,53 @@
+package handler
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"io/ioutil"
+	"net/http"
+	"os"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// GetPubKey 获取公钥。加密账号的明文密码,避免传输过程中暴露密码。
+func (m *PrivService) GetPubKey(c *gin.Context) {
+	slog.Info("do GetPubKey!")
+	file, err := os.Open("./pubkey.pem")
+	if err != nil {
+		SendResponse(c, err, nil)
+		return
+	}
+	defer file.Close()
+	content, err := ioutil.ReadAll(file)
+	if err != nil {
+		SendResponse(c, err, nil)
+		return
+	}
+	SendResponse(c, err, string(content))
+	return
+}
+
+// SendResponse TODO
+func SendResponse(c *gin.Context, err error, data interface{}) {
+	code, message := errno.DecodeErr(err)
+
+	c.JSON(http.StatusOK, Response{
+		Code:    code,
+		Message: message,
+		Data:    data,
+	})
+}
+
+// Response TODO
+type Response struct {
+	Code    int         `json:"code"`
+	Message string      `json:"message"`
+	Data    interface{} `json:"data"`
+}
+
+// ListResponse TODO
+type ListResponse struct {
+	Count int64       `json:"count"`
+	Items interface{} `json:"items"`
+}
diff --git a/dbm-services/mysql/db-priv/handler/register_routes.go b/dbm-services/mysql/db-priv/handler/register_routes.go
new file mode 100644
index 0000000000..a2a39ba302
--- /dev/null
+++ b/dbm-services/mysql/db-priv/handler/register_routes.go
@@ -0,0 +1,59 @@
+package handler
+
+import (
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+// PrivService Mysql 权限服务
+type PrivService struct{}
+
+/*
+		a.账号规则:(1)账号+密码;(2)访问db+权限
+		b."业务+账号+访问db" 唯一
+		c.账号规则的数据库可以为db%或者db1,不可以用符号连接写成"db%,test"
+		d.账号规则可以增删改查,不影响mysql实例中的授权
+	    e.应用账号规则(执行授权),原则:不影响已有账号的db范围以及密码,授权提单时做检查:如果实例存在此账号,不论是单点还是主从模式,规则中的密码要与已有账号密码一致,规则中的db范围与已有db范围不能有包含关系,权限可不一致
+*/
+
+// Routes 服务接口清单
+func (m *PrivService) Routes() []*gin.RouteInfo {
+	return []*gin.RouteInfo{
+		// 账号
+		{Method: http.MethodPost, Path: "add_account", HandlerFunc: m.AddAccount},
+		{Method: http.MethodPost, Path: "get_account", HandlerFunc: m.GetAccount},
+		{Method: http.MethodPost, Path: "modify_account", HandlerFunc: m.ModifyAccount},
+		{Method: http.MethodPost, Path: "delete_account", HandlerFunc: m.DeleteAccount},
+
+		// 账号规则
+		{Method: http.MethodPost, Path: "get_account_rule_list", HandlerFunc: m.GetAccountRuleList},
+		{Method: http.MethodPost, Path: "add_account_rule", HandlerFunc: m.AddAccountRule},
+		{Method: http.MethodPost, Path: "delete_account_rule", HandlerFunc: m.DeleteAccountRule},
+		{Method: http.MethodPost, Path: "modify_account_rule", HandlerFunc: m.ModifyAccountRule},
+
+		// 授权
+		{Method: http.MethodPost, Path: "add_priv_dry_run", HandlerFunc: m.AddPrivDryRun},
+		{Method: http.MethodPost, Path: "add_priv", HandlerFunc: m.AddPriv},
+		{Method: http.MethodPost, Path: "add_priv_without_account_rule", HandlerFunc: m.AddPrivWithoutAccountRule},
+
+		// 实例间权限克隆
+		{Method: http.MethodPost, Path: "clone_instance_priv_dry_run", HandlerFunc: m.CloneInstancePrivDryRun},
+		{Method: http.MethodPost, Path: "clone_instance_priv", HandlerFunc: m.CloneInstancePriv},
+
+		// 客户端权限克隆
+		{Method: http.MethodPost, Path: "clone_client_priv_dry_run", HandlerFunc: m.CloneClientPrivDryRun},
+		{Method: http.MethodPost, Path: "clone_client_priv", HandlerFunc: m.CloneClientPriv},
+
+		// 获取公钥,用于传输过程中加密密码
+		{Method: http.MethodPost, Path: "pub_key", HandlerFunc: m.GetPubKey},
+	}
+}
+
+// RegisterRoutes 注册服务
+func RegisterRoutes(router *gin.Engine, group string, routesInfo []*gin.RouteInfo) {
+	r := router.Group(group)
+	for _, route := range routesInfo {
+		r.Handle(route.Method, route.Path, route.HandlerFunc)
+	}
+}
diff --git a/dbm-services/mysql/db-priv/main.go b/dbm-services/mysql/db-priv/main.go
new file mode 100644
index 0000000000..f168abca4b
--- /dev/null
+++ b/dbm-services/mysql/db-priv/main.go
@@ -0,0 +1,109 @@
+package main
+
+import (
+	"dbm-services/mysql/priv-service/assests"
+	"dbm-services/mysql/priv-service/handler"
+	"dbm-services/mysql/priv-service/service"
+	"dbm-services/mysql/priv-service/util"
+	"io"
+	"os"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+	"github.com/golang-migrate/migrate/v4"
+	_ "github.com/golang-migrate/migrate/v4/source/file"
+	flag "github.com/spf13/pflag"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+	"gopkg.in/natefinch/lumberjack.v2"
+)
+
+func main() {
+	// 把用户传递的命令行参数解析为对应变量的值
+	flag.Parse()
+	// 元数据库 migration
+	if viper.GetBool("migrate") {
+		if err := dbMigrate(); err != nil && err != migrate.ErrNoChange {
+			slog.Error("migrate失败", err)
+			os.Exit(0)
+		}
+	}
+
+	// 生成公钥、私钥
+	if err := util.CreateKeyFile(); err != nil {
+		slog.Error("读取密码文件失败", err)
+	}
+
+	// 数据库初始化
+	service.DB.Init()
+	defer service.DB.Close()
+
+	// 注册服务
+	gin.SetMode(gin.ReleaseMode)
+	engine := gin.New()
+	engine.Use(gin.Recovery())
+	handler.RegisterRoutes(engine, "/priv", (&handler.PrivService{}).Routes())
+	if err := engine.Run(viper.GetString("http.listenAddress")); err != nil {
+		slog.Error("注册服务失败", err)
+	}
+}
+
+// init 初始化环境变量
+func init() {
+	viper.AddConfigPath("conf")
+	viper.SetConfigType("yaml")
+	viper.SetConfigName("config")
+	viper.AutomaticEnv()
+
+	replacer := strings.NewReplacer(".", "_")
+	viper.SetEnvKeyReplacer(replacer)
+	if err := viper.ReadInConfig(); err != nil {
+		slog.Error("读取配置文件失败", err)
+	}
+
+	flag.Bool(
+		"migrate", false,
+		"run migrate to databases, not exit.",
+	)
+	viper.BindPFlags(flag.CommandLine)
+	InitLog()
+}
+
+// dbMigrate 元数据库 migration
+//
+//		1、如果是首次migration:创建元数据库 CREATE DATABASE IF NOT EXISTS `bk_dbpriv` DEFAULT CHARACTER SET utf8;
+//	 2、命令行执行 ./bk_dbpriv --migrate
+func dbMigrate() error {
+	slog.Info("run db migrations...")
+	if err := assests.DoMigrateFromEmbed(); err == nil {
+		return nil
+	} else {
+		return err
+	}
+}
+
+// InitLog 程序日志初始化
+func InitLog() {
+	var logLevel = new(slog.LevelVar)
+	logLevel.Set(slog.LevelInfo)
+	if strings.ToLower(strings.TrimSpace(viper.GetString("log.level"))) == "debug" {
+		logLevel.Set(slog.LevelDebug)
+	}
+	var logger *slog.TextHandler
+	logger = slog.HandlerOptions{Level: logLevel, AddSource: true}.NewTextHandler(os.Stdout)
+	logPath := strings.TrimSpace(viper.GetString("log.path"))
+	if logPath != "" {
+		logger = slog.HandlerOptions{Level: logLevel, AddSource: true}.NewTextHandler(
+			io.MultiWriter(
+				os.Stdout, &lumberjack.Logger{
+					Filename:   logPath,
+					MaxSize:    viper.GetInt("log.max_size"),
+					MaxAge:     viper.GetInt("log.max_age"),
+					MaxBackups: viper.GetInt("log.max_backups"),
+					LocalTime:  true,
+				},
+			),
+		)
+	}
+	slog.SetDefault(slog.New(logger))
+}
diff --git a/dbm-services/mysql/db-priv/service/account.go b/dbm-services/mysql/db-priv/service/account.go
new file mode 100644
index 0000000000..2cf6df5e39
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/account.go
@@ -0,0 +1,214 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/util"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+
+	"github.com/jinzhu/gorm"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// AddAccount 新增账号
+func (m *AccountPara) AddAccount(jsonPara string) error {
+	var (
+		account    *TbAccounts
+		insertTime util.TimeFormat
+		psw        string
+		count      uint64
+		err        error
+	)
+
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if m.User == "" || m.Psw == "" {
+		return errno.PasswordOrAccountNameNull
+	}
+
+	err = DB.Self.Model(&TbAccounts{}).Where(&TbAccounts{BkBizId: m.BkBizId, User: m.User}).Count(&count).Error
+	if err != nil {
+		return err
+	}
+	if count != 0 {
+		return errno.AccountExisted.AddBefore(m.User)
+	}
+	psw, err = DecryptPsw(m.Psw)
+	if err != nil {
+		return err
+	}
+
+	if psw == m.User {
+		return errno.PasswordConsistentWithAccountName
+	}
+
+	psw, err = EncryptPswInDb(psw)
+	if err != nil {
+		return err
+	}
+	insertTime = util.NowTimeFormat()
+	account = &TbAccounts{BkBizId: m.BkBizId, User: m.User, Psw: psw, Creator: m.Operator, CreateTime: insertTime}
+	err = DB.Self.Model(&TbAccounts{}).Create(&account).Error
+	if err != nil {
+		return err
+	}
+
+	log := PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: insertTime}
+	AddPrivLog(log)
+
+	return nil
+}
+
+// ModifyAccountPassword 修改账号的密码
+func (m *AccountPara) ModifyAccountPassword(jsonPara string) error {
+	var (
+		account    TbAccounts
+		id         TbAccounts
+		updateTime util.TimeFormat
+		psw        string
+		err        error
+	)
+
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if m.Psw == "" {
+		return errno.PasswordOrAccountNameNull
+	}
+	if m.Id == 0 {
+		return errno.AccountIdNull
+	}
+
+	psw, err = DecryptPsw(m.Psw)
+	if err != nil {
+		return err
+	}
+
+	if psw == m.User {
+		return errno.PasswordConsistentWithAccountName
+	}
+
+	psw, err = EncryptPswInDb(psw)
+	if err != nil {
+		return err
+	}
+	updateTime = util.NowTimeFormat()
+
+	account = TbAccounts{Psw: psw, Operator: m.Operator, UpdateTime: updateTime}
+	id = TbAccounts{Id: m.Id}
+	result := DB.Self.Model(&id).Update(&account)
+
+	if result.Error != nil {
+		return result.Error
+	}
+	if result.RowsAffected == 0 {
+		return errno.AccountNotExisted
+	}
+
+	log := PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: updateTime}
+	AddPrivLog(log)
+
+	return nil
+}
+
+// DeleteAccount 删除账号
+func (m *AccountPara) DeleteAccount(jsonPara string) error {
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if m.Id == 0 {
+		return errno.AccountIdNull
+	}
+	// result := DB.Self.Delete(&TbAccounts{}, m.Id)
+	sql := fmt.Sprintf("delete from tb_accounts where id=%d and bk_biz_id = %d", m.Id, m.BkBizId)
+	result := DB.Self.Exec(sql)
+	if result.Error != nil {
+		return result.Error
+	}
+	if result.RowsAffected == 0 {
+		return errno.AccountNotExisted
+	}
+
+	log := PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: util.NowTimeFormat()}
+	AddPrivLog(log)
+	return nil
+}
+
+// GetAccount 获取账号
+func (m *AccountPara) GetAccount() ([]*TbAccounts, int64, error) {
+	var (
+		count    int64
+		accounts []*TbAccounts
+		result   *gorm.DB
+	)
+	if m.BkBizId == 0 {
+		return nil, count, errno.BkBizIdIsEmpty
+	}
+	result = DB.Self.Table("tb_accounts").Where(&TbAccounts{BkBizId: m.BkBizId, Id: m.Id, User: m.User}).
+		Select("id,bk_biz_id,user,creator,create_time").Scan(&accounts)
+	if result.Error != nil {
+		return nil, count, result.Error
+	}
+	count = result.RowsAffected
+	return accounts, count, nil
+}
+
+// DecryptPsw 对使用公钥加密的密文,用私钥解密
+func DecryptPsw(psw string) (string, error) {
+	var (
+		xrsa       *util.XRsa
+		decryptPsw string
+	)
+	file, err := os.Open("./privkey.pem")
+	if err != nil {
+		return decryptPsw, err
+	}
+	defer file.Close()
+	content, err := ioutil.ReadAll(file)
+	if err != nil {
+		return decryptPsw, err
+	}
+	xrsa, err = util.NewXRsa(nil, content)
+	if err != nil {
+		return decryptPsw, err
+	}
+	decryptPsw, err = xrsa.PrivateDecrypt(psw)
+	if err != nil {
+		return decryptPsw, err
+	}
+	return decryptPsw, nil
+}
+
+// EncryptPswInDb 明文加密
+func EncryptPswInDb(psw string) (string, error) {
+	// Psw 存储mysql_old_password,mysql_native_password两种密码
+	type Psw struct {
+		OldPsw string `gorm:"column:old_psw;not_null" json:"old_psw"`
+		Psw    string `gorm:"column:psw;not_null" json:"psw"`
+	}
+	var result Psw
+	// 获取2种密文:mysql_old_password,mysql_native_password,密码为 json 格式,新增加密方式,方便扩展
+	err := DBVersion56.Self.Table("user").Select("OLD_PASSWORD(?) AS old_psw,PASSWORD(?) AS psw", psw, psw).Take(&result).
+		Error
+
+	if err != nil {
+		slog.Error("msg", err)
+		return "", errno.GenerateEncryptedPasswordErr
+	}
+	jsonString := fmt.Sprintf(`{"old_psw":"%s","psw":"%s"}`, result.OldPsw, result.Psw)
+	return jsonString, nil
+}
+
+// AddPrivLog 记录操作日志,日志不对外
+func AddPrivLog(log PrivLog) {
+	log.Para = strings.Replace(log.Para, viper.GetString("bk_app_code"), "", -1)
+	log.Para = strings.Replace(log.Para, viper.GetString("bk_app_secret"), "", -1)
+	err := DB.Self.Create(&log).Error
+	if err != nil {
+		slog.Error("add log err", err)
+	}
+}
diff --git a/dbm-services/mysql/db-priv/service/account_object.go b/dbm-services/mysql/db-priv/service/account_object.go
new file mode 100644
index 0000000000..37ff4083cc
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/account_object.go
@@ -0,0 +1,53 @@
+package service
+
+import "dbm-services/mysql/priv-service/util"
+
+// AccountName 账号名
+type AccountName struct {
+	User string `gorm:"column:user;" json:"user"`
+}
+
+// TbAccounts 账号表
+type TbAccounts struct {
+	Id         int64           `gorm:"column:id;primary_key;auto_increment" json:"id"`
+	BkBizId    int64           `gorm:"column:bk_biz_id;not_null" json:"bk_biz_id"`
+	User       string          `gorm:"column:user;not_null" json:"user"`
+	Psw        string          `gorm:"column:psw;not_null" json:"psw"`
+	Creator    string          `gorm:"column:creator;not_null;" json:"creator"`
+	CreateTime util.TimeFormat `gorm:"column:create_time" json:"create_time"`
+	Operator   string          `gorm:"column:operator" json:"operator"`
+	UpdateTime util.TimeFormat `gorm:"column:update_time" json:"update_time"`
+}
+
+// Account 账号表中需要在前端展示的字段
+type Account struct {
+	Id         int64           `gorm:"column:id;not_null" json:"id"`
+	BkBizId    int64           `gorm:"column:bk_biz_id;not_null" json:"bk_biz_id"`
+	User       string          `gorm:"column:user;not_null" json:"user"`
+	Creator    string          `gorm:"column:creator;not_null;" json:"creator"`
+	CreateTime util.TimeFormat `gorm:"column:create_time" json:"create_time"`
+}
+
+// MultiPsw mysql两种身份认证插件mysql_old_password、mysql_native_password生成的密码
+type MultiPsw struct {
+	OldPsw string `json:"old_psw"`
+	Psw    string `json:"psw"`
+}
+
+// AccountPara GetAccount、AddAccount、ModifyAccountPassword、DeleteAccount函数的入参
+type AccountPara struct {
+	Id       int64  `json:"id"`
+	BkBizId  int64  `json:"bk_biz_id"`
+	User     string `json:"user"`
+	Psw      string `json:"psw"`
+	Operator string `json:"operator"`
+}
+
+// PrivLog 记录权限相关接口的调用日志
+type PrivLog struct {
+	Id       int64           `gorm:"column:id;primary_key;auto_increment" json:"id"`
+	BkBizId  int64           `gorm:"column:bk_biz_id;not_null" json:"bk_biz_id"`
+	Operator string          `gorm:"column:operator" json:"operator"`
+	Para     string          `gorm:"column:para" json:"para"`
+	Time     util.TimeFormat `gorm:"column:execute_time" json:"execute_time"`
+}
diff --git a/dbm-services/mysql/db-priv/service/accout_rule.go b/dbm-services/mysql/db-priv/service/accout_rule.go
new file mode 100644
index 0000000000..411af61f7f
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/accout_rule.go
@@ -0,0 +1,298 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/util"
+	errors2 "errors"
+	"fmt"
+	"strings"
+
+	"github.com/jinzhu/gorm"
+)
+
+// QueryAccountRule 获取账号规则
+func (m *BkBizId) QueryAccountRule() ([]*AccountRuleSplitUser, int64, error) {
+	var (
+		rules                []*Rule
+		accounts             []*Account
+		accountRuleSplitUser []*AccountRuleSplitUser
+		count                int64
+		result               *gorm.DB
+		err                  error
+	)
+	if m.BkBizId == 0 {
+		return nil, count, errno.BkBizIdIsEmpty
+	}
+	err = DB.Self.Model(&TbAccounts{}).Where(&TbAccounts{BkBizId: m.BkBizId}).Select(
+		"id,bk_biz_id,user,creator,create_time").Scan(&accounts).Error
+	if err != nil {
+		return nil, count, err
+	}
+	accountRuleSplitUser = make([]*AccountRuleSplitUser, len(accounts))
+	for k, v := range accounts {
+		result = DB.Self.Model(&TbAccountRules{}).Where(&TbAccountRules{BkBizId: m.BkBizId, AccountId: (*v).Id}).
+			Select("id,account_id,bk_biz_id,dbname,priv,creator,create_time").Scan(&rules)
+		accountRuleSplitUser[k] = &AccountRuleSplitUser{Account: v, Rules: rules}
+		if err != nil {
+			return nil, count, err
+		}
+		count += result.RowsAffected
+	}
+	// count账号规则的数目,不是账号的数目
+	return accountRuleSplitUser, count, nil
+}
+
+// AddAccountRule 新增账号规则
+func (m *AccountRulePara) AddAccountRule(jsonPara string) error {
+	var (
+		accountRule TbAccountRules
+		insertTime  util.TimeFormat
+		dbs         []string
+		allTypePriv string
+		dmlDdlPriv  string
+		globalPriv  string
+		err         error
+	)
+	ConstPrivType := []string{"dml", "ddl", "global"}
+
+	err = m.ParaPreCheck()
+	if err != nil {
+		return err
+	}
+
+	dbs, err = util.String2Slice(m.Dbname)
+	if err != nil {
+		return err
+	}
+
+	err = AccountRuleExistedPreCheck(m.BkBizId, m.AccountId, dbs)
+	if err != nil {
+		return err
+	}
+
+	for _, _type := range ConstPrivType {
+		value, exists := m.Priv[_type]
+		if exists {
+			if _type == "dml" || _type == "ddl" {
+				dmlDdlPriv = fmt.Sprintf("%s,%s", dmlDdlPriv, value)
+			} else {
+				globalPriv = value
+			}
+			allTypePriv = fmt.Sprintf("%s,%s", allTypePriv, value)
+		}
+	}
+
+	dmlDdlPriv = strings.Trim(dmlDdlPriv, ",")
+	allTypePriv = strings.Trim(allTypePriv, ",")
+
+	tx := DB.Self.Begin()
+	insertTime = util.NowTimeFormat()
+	for _, db := range dbs {
+		accountRule = TbAccountRules{BkBizId: m.BkBizId, AccountId: m.AccountId, Dbname: db, Priv: allTypePriv,
+			DmlDdlPriv: dmlDdlPriv, GlobalPriv: globalPriv, Creator: m.Operator, CreateTime: insertTime}
+		err = tx.Debug().Model(&TbAccountRules{}).Create(&accountRule).Error
+		if err != nil {
+			tx.Rollback()
+			return err
+		}
+	}
+	tx.Commit()
+
+	log := PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: insertTime}
+	AddPrivLog(log)
+
+	return nil
+}
+
+// ModifyAccountRule 修改账号规则
+func (m *AccountRulePara) ModifyAccountRule(jsonPara string) error {
+	var (
+		accountRule TbAccountRules
+		updateTime  util.TimeFormat
+		dbname      string
+		allTypePriv string
+		dmlDdlPriv  string
+		globalPriv  string
+		err         error
+	)
+
+	ConstPrivType := []string{"dml", "ddl", "global"}
+
+	err = m.ParaPreCheck()
+	if err != nil {
+		return err
+	}
+	if m.Id == 0 {
+		return errno.AccountRuleIdNull
+	}
+
+	// 可以修改账号规则的db名、权限
+	// 不能与已有账号规则冲突
+	updateTime = util.NowTimeFormat()
+	dbname = strings.TrimSpace(m.Dbname)
+	if strings.Contains(dbname, " ") {
+		return errno.OnlyOneDatabaseAllowed
+	}
+
+	err = DB.Self.Model(&TbAccountRules{}).Where(&TbAccountRules{BkBizId: m.BkBizId, AccountId: m.AccountId,
+		Dbname: dbname}).Take(&accountRule).Error
+	/*
+		修改后,新的"bk_biz_id+account_id+dbname",是否会与已有规则冲突
+		修改前检查是否存"bk_biz_id+account_id+dbname",要排除本账号
+		两种情况,检查通过:1、查询到本账号,说明没有修改dbname,只是修改权限 2、没有查询到记录,说明修改了dbname,但是新的账号规则与已有账号规则不冲突。
+	*/
+
+	// 修改后的账号规则与已有账号规则冲突
+	if err == nil && accountRule.Id != m.Id {
+		return errno.AccountRuleExisted
+	}
+
+	if err != nil && !errors2.Is(err, gorm.ErrRecordNotFound) {
+		return err
+	}
+
+	for _, _type := range ConstPrivType {
+		value, exists := m.Priv[_type]
+		if exists {
+			if _type == "dml" || _type == "ddl" {
+				dmlDdlPriv = fmt.Sprintf("%s,%s", dmlDdlPriv, value)
+			} else {
+				globalPriv = value
+			}
+			allTypePriv = fmt.Sprintf("%s,%s", allTypePriv, value)
+		}
+	}
+
+	dmlDdlPriv = strings.Trim(dmlDdlPriv, ",")
+	allTypePriv = strings.Trim(allTypePriv, ",")
+
+	/*
+		通过结构体变量更新字段值, gorm库会忽略零值字段,0, nil,"", false这些值会被忽略掉,不会更新。
+		实际可能需要将global_priv更新为"",map类型替代结构体。
+		accountRule = TbAccountRules{Dbname: dbname, Priv:
+		allTypePriv, DmlDdlPriv:dmlDdlPriv,GlobalPriv: globalPriv,
+		Operator: m.Operator, UpdateTime: updateTime}
+		err = DB.Self.Model(&TbAccountRules{Id: m.Id}).Update(&accountRule).Error
+	*/
+	accountRuleMap := map[string]interface{}{"dbname": dbname, "priv": allTypePriv, "dml_ddl_priv": dmlDdlPriv,
+		"global_priv": globalPriv, "operator": m.Operator, "update_time": updateTime}
+	result := DB.Self.Model(&TbAccountRules{Id: m.Id}).Update(accountRuleMap)
+	if result.Error != nil {
+		return result.Error
+	}
+	if result.RowsAffected == 0 {
+		return errno.AccountRuleNotExisted
+	}
+
+	log := PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: updateTime}
+	AddPrivLog(log)
+
+	return nil
+}
+
+// DeleteAccountRule 删除账号规则
+func (m *DeleteAccountRuleById) DeleteAccountRule(jsonPara string) error {
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if len(m.Id) == 0 {
+		return errno.AccountRuleIdNull
+	}
+
+	/*
+		批量删除调整为execute sql。
+			(1)当多个条件中存在主键,gorm生成的语句自动忽略非主键条件,导致条件丢失:
+			result := DB.Self.Delete(&TbAccountRules{}, m.Id, m.BkBizId)
+			result := DB.Self.Delete(&TbAccountRules{}, m.Id).Where("bk_biz_id=?", m.BkBizId)
+			(2)delete where多个条件不支持:
+			result := DB.Self.Delete(&TbAccountRules{}).Where("id IN (?) AND bk_biz_id = ?", strings.Join(temp, ","), m.BkBizId)
+	*/
+
+	var temp = make([]string, len(m.Id))
+	for k, v := range m.Id {
+		temp[k] = fmt.Sprintf("%d", v)
+	}
+
+	sql := fmt.Sprintf("delete from tb_account_rules where id in (%s) and bk_biz_id = %d", strings.Join(temp, ","),
+		m.BkBizId)
+	result := DB.Self.Exec(sql)
+	if result.Error != nil {
+		return result.Error
+	}
+	if result.RowsAffected == 0 {
+		return errno.AccountRuleNotExisted
+	}
+	log := PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: util.NowTimeFormat()}
+	AddPrivLog(log)
+	return nil
+}
+
+// AccountRuleExistedPreCheck 检查账号规则是否已存在
+func AccountRuleExistedPreCheck(bkBizId, accountId int64, dbs []string) error {
+	var (
+		err         error
+		count       uint64
+		existedRule []string
+	)
+
+	// 账号是否存在,存在才可以申请账号规则
+	err = DB.Self.Model(&TbAccounts{}).Where(&TbAccounts{BkBizId: bkBizId, Id: accountId}).Count(&count).Error
+	if err != nil {
+		return err
+	}
+	if count == 0 {
+		return errno.AccountNotExisted
+	}
+
+	// 检查账号规则是否已存在,"业务+账号+db"是否已存在,存在不再创建
+	for _, db := range dbs {
+		err = DB.Self.Model(&TbAccountRules{}).Where(&TbAccountRules{BkBizId: bkBizId, AccountId: accountId, Dbname: db}).
+			Count(&count).Error
+		if err != nil {
+			return err
+		}
+		if count != 0 {
+			existedRule = append(existedRule, db)
+		}
+	}
+
+	if len(existedRule) > 0 {
+		return errno.Errno{Code: 51001, Message: fmt.Sprintf("Account rule of user on database(%s) is existed",
+			strings.Join(existedRule, ",")), CNMessage: fmt.Sprintf("用户对数据库(%s)授权的账号规则已存在", strings.Join(existedRule, ","))}
+	}
+	return nil
+}
+
+// ParaPreCheck 入参AccountRulePara检查
+func (m *AccountRulePara) ParaPreCheck() error {
+	ConstPrivType := []string{"dml", "ddl", "global"}
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if m.AccountId == 0 {
+		return errno.AccountIdNull
+	}
+	if m.Dbname == "" {
+		return errno.DbNameNull
+	}
+
+	// 权限为空的情况
+	// 1、"priv": {}
+	// 2、"priv": {"dml":"","ddl":"","global":""}  or  "priv": {"dml":""} or ...
+
+	nullFlag := true
+	for _, _type := range ConstPrivType {
+		value, exists := m.Priv[_type]
+		if exists {
+			if value != "" {
+				nullFlag = false
+				break
+			}
+		}
+	}
+
+	if len(m.Priv) == 0 || nullFlag {
+		return errno.PrivNull
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-priv/service/accout_rule_object.go b/dbm-services/mysql/db-priv/service/accout_rule_object.go
new file mode 100644
index 0000000000..717d542658
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/accout_rule_object.go
@@ -0,0 +1,55 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/util"
+)
+
+// TbAccountRules 账号规则表
+type TbAccountRules struct {
+	Id         int64           `gorm:"column:id;primary_key;auto_increment" json:"id"`
+	BkBizId    int64           `gorm:"column:bk_biz_id;not_null" json:"bk_biz_id"`
+	AccountId  int64           `gorm:"column:account_id;not_null" json:"account_id"`
+	Dbname     string          `gorm:"column:dbname;not_null" json:"dbname"`
+	Priv       string          `gorm:"column:priv;not_null" json:"priv"`
+	DmlDdlPriv string          `gorm:"column:dml_ddl_priv;not_null" json:"dml_ddl_priv"`
+	GlobalPriv string          `gorm:"column:global_priv;not_null" json:"global_priv"`
+	Creator    string          `gorm:"column:creator;not_null;" json:"creator"`
+	CreateTime util.TimeFormat `gorm:"column:create_time" json:"create_time"`
+	Operator   string          `gorm:"column:operator" json:"operator"`
+	UpdateTime util.TimeFormat `gorm:"column:update_time" json:"update_time"`
+}
+
+// Rule 账号规则表中需要在前端展示的字段
+type Rule struct {
+	Id         int64           `gorm:"column:id;primary_key;auto_increment" json:"id"`
+	AccountId  int64           `gorm:"column:account_id;not_null" json:"account_id"`
+	BkBizId    int64           `gorm:"column:bk_biz_id;not_null" json:"bk_biz_id"`
+	Dbname     string          `gorm:"column:dbname;not_null" json:"dbname"`
+	Priv       string          `gorm:"column:priv;not_null" json:"priv"`
+	Creator    string          `gorm:"column:creator;not_null;" json:"creator"`
+	CreateTime util.TimeFormat `gorm:"column:create_time" json:"create_time"`
+}
+
+// AccountRuleSplitUser 账号与账号规则表中需要在前端展示的内容
+type AccountRuleSplitUser struct {
+	Account *Account `json:"account"`
+	Rules   []*Rule  `json:"rules"`
+}
+
+// DeleteAccountRuleById 根据账号规则表中id,删除账号规则
+type DeleteAccountRuleById struct {
+	BkBizId  int64  `json:"bk_biz_id"`
+	Operator string `json:"operator"`
+	Id       []int  `json:"id"`
+}
+
+// AccountRulePara AddAccountRule、ModifyAccountRule、ParaPreCheck函数的入参
+type AccountRulePara struct {
+	BkBizId   int64  `json:"bk_biz_id"`
+	Id        int64  `json:"id"`         // account rule的id
+	AccountId int64  `json:"account_id"` // account的id
+	Dbname    string `json:"dbname"`
+	// key为dml、ddl、global;value为逗号分隔的权限;示例{"dml":"select,update","ddl":"create","global":"REPLICATION SLAVE"}
+	Priv     map[string]string `json:"priv"`
+	Operator string            `json:"operator"`
+}
diff --git a/dbm-services/mysql/db-priv/service/add_priv.go b/dbm-services/mysql/db-priv/service/add_priv.go
new file mode 100644
index 0000000000..39b3bdc59a
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/add_priv.go
@@ -0,0 +1,204 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/util"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// AddPrivDryRun 使用账号规则,新增权限预检查
+func (m *PrivTaskPara) AddPrivDryRun() (PrivTaskPara, error) {
+	var taskPara PrivTaskPara
+	var errMsg []string
+	var errMsgTemp []string
+
+	taskPara.SourceIPs, errMsgTemp = DeduplicationIP(m.SourceIPs)
+	if len(errMsgTemp) > 0 {
+		errMsg = append(errMsg, errMsgTemp...)
+	}
+
+	taskPara.TargetInstances, errMsgTemp = DeduplicationTargetInstance(m.TargetInstances, m.ClusterType)
+	if len(errMsgTemp) > 0 {
+		errMsg = append(errMsg, errMsgTemp...)
+	}
+
+	for _, rule := range m.AccoutRules {
+		_, _, err := GetAccountRuleInfo(m.BkBizId, m.User, rule.Dbname)
+		if err != nil {
+			errMsg = append(errMsg, err.Error())
+		}
+	}
+
+	if len(errMsg) > 0 {
+		return taskPara, errno.GrantPrivilegesParameterCheckFail.Add("\n" + strings.Join(errMsg, "\n"))
+	}
+
+	taskPara.BkBizId = m.BkBizId
+	taskPara.Operator = m.Operator
+	taskPara.AccoutRules = m.AccoutRules
+	taskPara.ClusterType = m.ClusterType
+	taskPara.User = m.User
+
+	return taskPara, nil
+}
+
+// AddPriv 使用账号规则,新增权限
+func (m *PrivTaskPara) AddPriv(jsonPara string) error {
+	slog.Info(fmt.Sprintf("PrivTaskPara:%v", m))
+	var errMsg, successMsg Err
+	var wg sync.WaitGroup
+	// 为了避免通过api未调用AddPrivDryRun,直接调用AddPriv,未做检查参数,所以AddPriv先调用AddPrivDryRun
+	if _, outerErr := m.AddPrivDryRun(); outerErr != nil {
+		return outerErr
+	}
+	AddPrivLog(PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: util.NowTimeFormat()})
+	tokenBucket := make(chan int, 10)
+	client := util.NewClientByHosts(viper.GetString("dbmeta"))
+	for _, rule := range m.AccoutRules { // 添加权限,for acccountRuleList;for instanceList; do create a routine
+		account, accountRule, outerErr := GetAccountRuleInfo(m.BkBizId, m.User, rule.Dbname)
+		if outerErr != nil {
+			AddErrorOnly(&errMsg, outerErr)
+			continue
+		}
+		for _, dns := range m.TargetInstances {
+			wg.Add(1)
+			tokenBucket <- 0
+			go func(dns string) {
+				defer func() {
+					<-tokenBucket
+					wg.Done()
+				}()
+				var (
+					instance                                      Instance
+					proxySQL, proxyIPs, errMsgInner               []string
+					err                                           error
+					tendbhaMasterDomain                           bool // 是否为集群的主域名
+					successInfo, failInfo, baseInfo, ips, address string
+				)
+				dns = strings.Trim(strings.TrimSpace(dns), ".")
+				ips = strings.Join(m.SourceIPs, " ")
+				baseInfo = fmt.Sprintf(`账号规则:"%s-%s", 授权来源ip:"%s",使用账号:"%s",访问目标集群:"%s"的数据库:"%s"`,
+					account.User, accountRule.Dbname, ips, account.User, dns, accountRule.Dbname)
+				successInfo = fmt.Sprintf(`%s,授权成功。`, baseInfo)
+				failInfo = fmt.Sprintf(`%s,授权失败:`, baseInfo)
+
+				instance, err = GetCluster(client, m.ClusterType, Domain{EntryName: dns})
+				if err != nil {
+					AddErrorOnly(&errMsg, errors.New(failInfo+sep+err.Error()))
+					return
+				}
+				if m.ClusterType == tendbha || m.ClusterType == tendbsingle {
+					// 当"cluster_type": "tendbha", "bind_to": "proxy" tendbha的主域名, "bind_to": "storage" tendbha的备域名
+					if instance.ClusterType == tendbha && instance.BindTo == machineTypeProxy {
+						tendbhaMasterDomain = true
+						for _, proxy := range instance.Proxies {
+							proxyIPs = append(proxyIPs, proxy.IP)
+						}
+					}
+					for _, storage := range instance.Storages {
+						if tendbhaMasterDomain && storage.InstanceRole == backendSlave && storage.Status != running {
+							slog.Warn(baseInfo, "slave instance not running state, skipped",
+								fmt.Sprintf("%s:%d", storage.IP, storage.Port))
+							continue
+						}
+						address = fmt.Sprintf("%s:%d", storage.IP, storage.Port)
+						err = ImportBackendPrivilege(account, accountRule, address, proxyIPs, m.SourceIPs, instance.ClusterType,
+							tendbhaMasterDomain, instance.BkCloudId)
+						if err != nil {
+							errMsgInner = append(errMsgInner, err.Error())
+						}
+					}
+					if len(errMsgInner) > 0 {
+						AddErrorOnly(&errMsg, errors.New(failInfo+sep+strings.Join(errMsgInner, sep)))
+						return
+					}
+					if tendbhaMasterDomain { // proxy授权放到mysql授权执行之后,mysql授权成功,才在proxy执行
+						proxySQL = GenerateProxyPrivilege(account.User, m.SourceIPs)
+						var runningNum int
+						for _, proxy := range instance.Proxies {
+							if proxy.Status == running {
+								runningNum = runningNum + 1
+							}
+						}
+						for _, proxy := range instance.Proxies {
+							if runningNum > 0 && proxy.Status != running {
+								slog.Warn(baseInfo, "proxy instance not running state, skipped", fmt.Sprintf("%s:%d", proxy.IP, proxy.Port))
+								continue
+							}
+							err = ImportProxyPrivilege(proxy, proxySQL, instance.BkCloudId)
+							if err != nil {
+								errMsgInner = append(errMsgInner, err.Error())
+							}
+						}
+					}
+					if len(errMsgInner) > 0 {
+						AddErrorOnly(&errMsg, errors.New(failInfo+sep+strings.Join(errMsgInner, sep)))
+						return
+					}
+				} else if m.ClusterType == tendbcluster {
+					for _, spider := range instance.SpiderMaster {
+						address = fmt.Sprintf("%s:%d", spider.IP, spider.Port)
+						err = ImportBackendPrivilege(account, accountRule, address, proxyIPs, m.SourceIPs, instance.ClusterType,
+							tendbhaMasterDomain, instance.BkCloudId)
+						if err != nil {
+							errMsgInner = append(errMsgInner, err.Error())
+						}
+					}
+					if len(errMsgInner) > 0 {
+						AddErrorOnly(&errMsg, errors.New(failInfo+sep+strings.Join(errMsgInner, sep)))
+						return
+					}
+				} else {
+					AddErrorOnly(&errMsg, errors.New(fmt.Sprintf("%s%scluster type is %s, wrong type", failInfo, sep,
+						instance.ClusterType)))
+					return
+				}
+				AddErrorOnly(&successMsg, errors.New(successInfo))
+			}(dns)
+		}
+	}
+	wg.Wait() // 一个协程失败,其报错信息添加到errMsg.errs。主协程wg.Wait(),等待所有协程执行完成才会返回。
+	close(tokenBucket)
+	return AddPrivResult(errMsg, successMsg)
+}
+
+// AddPrivWithoutAccountRule 不使用账号规则模版,在mysql实例授权。此接口不被页面前端调用,为后台服务设计。不建议通过此接口授权。
+func (m *AddPrivWithoutAccountRule) AddPrivWithoutAccountRule(jsonPara string) error {
+	var clusterType string
+	psw, err := DecryptPsw(m.Psw)
+	if err != nil {
+		return err
+	}
+
+	if psw == m.User {
+		return errno.PasswordConsistentWithAccountName
+	}
+
+	psw, err = EncryptPswInDb(psw)
+	if err != nil {
+		return err
+	}
+	ts := util.NowTimeFormat()
+	tmpAccount := TbAccounts{0, 0, m.User, psw, "", ts, "", ts}
+	tmpAccountRule := TbAccountRules{0, 0, 0, m.Dbname, m.Priv, m.DmlDdlPriv, m.GlobalPriv, "", ts, "", ts}
+	if m.BkCloudId == nil {
+		return errno.CloudIdRequired
+	}
+	if m.SpiderFlag == true {
+		clusterType = tendbcluster
+	} else {
+		clusterType = tendbsingle
+	}
+	err = ImportBackendPrivilege(tmpAccount, tmpAccountRule, m.Address, nil, m.Hosts, clusterType, false, *m.BkCloudId)
+	if err != nil {
+		return errno.GrantPrivilegesFail.Add(err.Error())
+	}
+	AddPrivLog(PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: util.NowTimeFormat()})
+	return nil
+}
diff --git a/dbm-services/mysql/db-priv/service/add_priv_base_func.go b/dbm-services/mysql/db-priv/service/add_priv_base_func.go
new file mode 100644
index 0000000000..99f27ee038
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/add_priv_base_func.go
@@ -0,0 +1,562 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/util"
+	"encoding/json"
+	errors2 "errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/asaskevich/govalidator"
+	"github.com/jinzhu/gorm"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// GetAccountRuleInfo 根据账号名获取账号信息,根据账号 id 以及授权数据库获取账号规则
+func GetAccountRuleInfo(bkBizId int64, user, dbname string) (TbAccounts, TbAccountRules, error) {
+	var account TbAccounts
+	var accountRule TbAccountRules
+	err := DB.Self.Table("tb_accounts").Where(&TbAccounts{BkBizId: bkBizId, User: user}).Take(&account).Error
+	if errors2.Is(err, gorm.ErrRecordNotFound) {
+		return account, accountRule, fmt.Errorf("账号%s不存在", user)
+	} else if err != nil {
+		return account, accountRule, err
+	}
+	err = DB.Self.Model(&TbAccountRules{}).Where(
+		&TbAccountRules{BkBizId: bkBizId, AccountId: account.Id, Dbname: dbname}).Take(&accountRule).Error
+	if errors2.Is(err, gorm.ErrRecordNotFound) {
+		return account, accountRule, fmt.Errorf("账号规则(账号:%s,数据库:%s)不存在", user, dbname)
+	} else if err != nil {
+		return account, accountRule, err
+	}
+	return account, accountRule, nil
+}
+
+// ImportBackendPrivilege 生成 mysql 授权语句,mysql 执行授权语句
+func ImportBackendPrivilege(account TbAccounts, accountRule TbAccountRules, address string, proxyIPs []string,
+	sourceIps []string, clusterType string, tendbhaMasterDomain bool, bkCloudId int64) error {
+	var backendSQL []string
+	mysqlVersion, err := GetMySQLVersion(address, bkCloudId)
+	if err != nil {
+		slog.Error("mysqlVersion", err)
+		return err
+	}
+	if tendbhaMasterDomain {
+		backendSQL, err = GenerateBackendSQL(account, accountRule, proxyIPs, mysqlVersion, address, clusterType,
+			tendbhaMasterDomain, bkCloudId)
+		if err != nil {
+			slog.Error("backendSQL", err)
+			return err
+		}
+	} else {
+		backendSQL, err = GenerateBackendSQL(account, accountRule, sourceIps, mysqlVersion, address, clusterType,
+			tendbhaMasterDomain, bkCloudId)
+		if err != nil {
+			slog.Error("backendSQL", err)
+			return err
+		}
+	}
+	var queryRequest = QueryRequest{[]string{address}, backendSQL, true, 60, bkCloudId}
+	_, err = OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		slog.Error("OneAddressExecuteSql", err)
+		return err
+	}
+	return nil
+}
+
+// GenerateBackendSQL 生成 mysql 授权语句
+func GenerateBackendSQL(account TbAccounts, rule TbAccountRules, ips []string, mysqlVersion string, address string,
+	clusterType string, tendbhaMasterDomain bool, bkCloudId int64) ([]string, error) {
+	var multiPsw MultiPsw
+	var wg sync.WaitGroup
+	var errOuter error
+	// ResultTemp 授权语句集合
+	type ResultTemp struct {
+		mu         sync.RWMutex
+		backendSQL []string
+	}
+	var result ResultTemp
+	finishChan := make(chan bool, 1)
+	errorChan := make(chan error, 1)
+	tokenBucket := make(chan int, 10)
+	if errOuter = json.Unmarshal([]byte(account.Psw), &multiPsw); errOuter != nil {
+		return nil, errOuter
+	}
+
+	// For almost all cases, we do not need to binlog grants.
+	// Meanwhile, binlogging grants can cause problems, eg. when starting a MySQL 8.0 replica for MySQL 5.7,
+	// because MySQL 8.0 does not recognize "GRANT ... IDENTIFIED BY ...".
+
+	if clusterType == tendbcluster {
+		result.backendSQL = append(result.backendSQL, flushPriv, setBinlogOff, setDdlByCtlOFF)
+	} else {
+		result.backendSQL = append(result.backendSQL, flushPriv, setBinlogOff)
+	}
+
+	containConnLogDBFlag := ContainConnLogDB(rule.Dbname)
+	needInsertConnLogFlag := containConnLogDBFlag && !strings.Contains(strings.ToLower(rule.DmlDdlPriv), "insert")
+	/*
+		非管理用户登录时记录审计日志
+		mysql> show global variables like 'init_connect'
+		Variable_name: init_connect
+		Value: set @user=user(),@cur_user=current_user();insert into
+		test.conn_log values(connection_id(),now(),@user,@cur_user,'127.0.0.1');
+
+		如果用户权限不包含对test.conn_log的授权,由于用户默认拥有对test库的增删改查权限,登录不会报错。
+		实例安装时默认给所有用户访问test的权限,从mysql.db可以看出。
+
+		BUG:如果授权的库表范围包含test.conn_log,但是又没有插入权限,比如如下权限:
+			GRANT SELECT ON `%`.* TO 'temp'@'localhost' identified by 'temp';
+			由于用户权限优先级高于mysql.db,mysql.db的权限被"覆盖",用户无法插入test.conn_log,登录失败 ERROR 1184。
+
+			mysql> show databases;
+			ERROR 2006 (HY000): MySQL server has gone away
+			No connection. Trying to reconnect...
+			Connection id:    2817282
+			Current database: *** NONE ***
+
+			ERROR 1184 (08S01): Aborted connection 2817282 to db: 'unconnected' user: 'temp' host: 'localhost' (
+			init_connect command failed)
+
+
+		解决方案:对test.conn_log授予insert权限可以解决问题。
+	*/
+
+	for _, ip := range ips {
+		wg.Add(1)
+		tokenBucket <- 0 // 在这里操作 token 可以防止过多的协程启动但处于等待 token 的阻塞状态
+		go func(ip string) {
+			defer func() {
+				<-tokenBucket
+				wg.Done()
+			}()
+			defer func() {
+				if r := recover(); r != nil {
+					errorChan <- fmt.Errorf("inner panic,error:%v", r)
+					return
+				}
+			}()
+
+			var (
+				identifiedByPassword string
+				CreateUserVersion8   string
+				sql                  string
+				pswResp              PasswordResp
+				err                  error
+				sqlTemp              []string
+			)
+
+			identifiedByPassword = fmt.Sprintf("IDENTIFIED BY PASSWORD '%s'", multiPsw.Psw)
+			CreateUserVersion8 = fmt.Sprintf(`CREATE USER IF NOT EXISTS '%s'@'%s' %s;`, account.User, ip,
+				fmt.Sprintf("IDENTIFIED WITH mysql_native_password AS '%s'", multiPsw.Psw))
+			// err 为空,没有此账号或者账号密码相同
+			pswResp, err = GetPassword(account.User, multiPsw, mysqlVersion, ip, address, tendbhaMasterDomain, bkCloudId)
+			if err != nil {
+				slog.Error("GetPassword", err)
+				errorChan <- err
+				return
+			}
+
+			// 存在此账号,新旧密码相同
+			if pswResp.PwdType != "" {
+				err = CheckDbCross(account.User, rule.Dbname, ip, address, tendbhaMasterDomain, bkCloudId)
+				if err != nil {
+					errorChan <- err
+					return
+				}
+				identifiedByPassword = fmt.Sprintf("IDENTIFIED BY PASSWORD '%s'", pswResp.Psw)
+				CreateUserVersion8 = fmt.Sprintf(`CREATE USER IF NOT EXISTS '%s'@'%s' %s;`,
+					account.User, ip, fmt.Sprintf("IDENTIFIED WITH %s AS '%s'", pswResp.PwdType, pswResp.Psw))
+			}
+			if (clusterType == tendbha || clusterType == tendbsingle) && MySQLVersionParse(mysqlVersion, "") >=
+				MySQLVersionParse("8.0.0", "") {
+				sqlTemp = append(sqlTemp, CreateUserVersion8)
+				identifiedByPassword = ""
+			}
+			// 备库域名只授予查询类权限
+			if clusterType == tendbha && tendbhaMasterDomain == false {
+				// 执行 show databases 可以查看授予 select 的 db。不授予全局 show databases 权限,因为看到所有 db。
+				sql = fmt.Sprintf("GRANT SELECT, SHOW VIEW ON `%s`.* TO '%s'@'%s' %s;",
+					rule.Dbname, account.User, ip, identifiedByPassword)
+				sqlTemp = append(sqlTemp, sql)
+				if containConnLogDBFlag {
+					sql = fmt.Sprintf("%s '%s'@'%s' %s;", insertConnLogPriv, account.User, ip, identifiedByPassword)
+					sqlTemp = append(sqlTemp, sql)
+				}
+				result.mu.Lock()
+				result.backendSQL = append(result.backendSQL, sqlTemp...)
+				result.mu.Unlock()
+				return
+			}
+
+			if rule.DmlDdlPriv != "" {
+				sql = fmt.Sprintf("GRANT %s ON `%s`.* TO '%s'@'%s' %s;",
+					rule.DmlDdlPriv, rule.Dbname, account.User, ip, identifiedByPassword)
+				sqlTemp = append(sqlTemp, sql)
+				if needInsertConnLogFlag {
+					sql = fmt.Sprintf("%s '%s'@'%s' %s;", insertConnLogPriv, account.User, ip, identifiedByPassword)
+					sqlTemp = append(sqlTemp, sql)
+				}
+			}
+			if rule.GlobalPriv != "" {
+				sql = fmt.Sprintf(`GRANT %s ON *.* TO '%s'@'%s' %s;`,
+					rule.GlobalPriv, account.User, ip, identifiedByPassword)
+				// all privileges授予with grant option, 内部调用
+				if strings.Contains(strings.ToLower(rule.GlobalPriv), "all privileges") {
+					sql = fmt.Sprintf(`GRANT %s ON *.* TO '%s'@'%s' %s with grant option;`,
+						rule.GlobalPriv, account.User, ip, identifiedByPassword)
+				}
+				sqlTemp = append(sqlTemp, sql)
+			}
+
+			result.mu.Lock()
+			result.backendSQL = append(result.backendSQL, sqlTemp...)
+			result.mu.Unlock()
+			return
+		}(ip)
+	}
+
+	/*
+		wg.Wait在协程中执行,其被阻塞时,不影响主协程执行select。
+		如果errorChan收到一个值,则主协程结束,所有子协程也会一并结束。
+		直到所有的上面的子协程结束,errorChan没有值写入,wg.Wait完成并执行close(finishChan),finishChan被关闭后,select读取finishChan将不被阻塞。
+	*/
+	go func() {
+		wg.Wait()
+		close(finishChan)
+		close(tokenBucket)
+	}()
+
+	select {
+	case <-finishChan:
+	case err := <-errorChan:
+		return nil, err
+	}
+	result.backendSQL = append(result.backendSQL, setBinlogOn, flushPriv)
+	return result.backendSQL, nil
+}
+
+// GenerateProxyPrivilege 生成 proxy 新增白名单语句
+func GenerateProxyPrivilege(user string, ips []string) []string {
+	var (
+		sql      string
+		proxySQL []string
+	)
+	for _, ip := range ips {
+		sql = fmt.Sprintf("refresh_users('%s@%s','+');", user, ip)
+		proxySQL = append(proxySQL, sql)
+	}
+	return proxySQL
+}
+
+// ImportProxyPrivilege proxy 执行新增白名单语句
+func ImportProxyPrivilege(proxy Proxy, proxySQL []string, bkCloudId int64) error {
+	var errMsg []string
+	address := fmt.Sprintf("%s:%d", proxy.IP, proxy.AdminPort)
+	for _, grantSQL := range proxySQL {
+		queryRequest := QueryRequest{[]string{address}, []string{grantSQL}, true, 30, bkCloudId}
+		_, err := OneAddressExecuteProxySql(queryRequest)
+		if err != nil {
+			errMsg = append(errMsg, fmt.Sprintf("execute(%s) in bk_cloud_id(%d) mysqld(%s:%d) error:%s",
+				grantSQL, bkCloudId, proxy.IP, proxy.Port, err.Error()))
+		}
+	}
+	if len(errMsg) > 0 {
+		return fmt.Errorf(strings.Join(errMsg, "\n"))
+	}
+	return nil
+}
+
+// GetPassword 实例是否已经存在 user@host,如果不存在,可以新增授权;如果存在并且新旧密码相同,可以新增授权;如果新旧密码不同,不可以新增授权。
+func GetPassword(user string, multiPsw MultiPsw, mysqlVersion, ip string, address string,
+	masterDomain bool, bkCloudId int64) (PasswordResp, error) {
+	var pswResp PasswordResp
+	var passwdColName = "password"
+	var pswLen int
+	var result oneAddressResult
+	var err error
+	var tipsForProxyIP string
+
+	if MySQLVersionParse(mysqlVersion, "") > MySQLVersionParse("5.7.5", "") {
+		passwdColName = "authentication_string"
+	}
+
+	queryPwdSQL := fmt.Sprintf("SELECT %s AS psw,plugin AS pswType FROM mysql.user WHERE user='%s' AND host='%s'",
+		passwdColName, user, ip)
+	var queryRequest = QueryRequest{[]string{address}, []string{queryPwdSQL}, true, 60, bkCloudId}
+
+	result, err = OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return pswResp, err
+	}
+
+	// 在实例上,不存在此账号
+	if len(result.CmdResults[0].TableData) == 0 {
+		return pswResp, nil
+	}
+
+	// 存在密码相同,获取密码类型
+	pswResp.Psw = result.CmdResults[0].TableData[0]["psw"].(string)
+	pswLen = len(pswResp.Psw)
+	if masterDomain {
+		tipsForProxyIP = fmt.Sprintf("%s是proxy的IP。", ip)
+	}
+
+	switch {
+	case pswLen == 41:
+		if pswResp.Psw == multiPsw.Psw {
+			pswResp.PwdType = "mysql_native_password"
+			return pswResp, nil
+		}
+	case pswLen == 16:
+		if pswResp.Psw == multiPsw.OldPsw {
+			pswResp.PwdType = "mysql_old_password"
+			return pswResp, nil
+		}
+	case pswLen == 70:
+		// caching_sha2_password 生成的密码是动态变化的
+		return pswResp, fmt.Errorf("账号(%s@%s)在%s已存在,建议使用mysql_native_password代替caching_sha2_password。%s",
+			user, ip, address, tipsForProxyIP)
+	default:
+		return pswResp, fmt.Errorf("账号(%s@%s)在%s已存在,但是新密码与旧密码不一致,需要保持一致。%s",
+			user, ip, address, tipsForProxyIP)
+	}
+	return pswResp, fmt.Errorf("账号(%s@%s)在%s已存在,但是新密码与旧密码不一致,需要保持一致。%s",
+		user, ip, address, tipsForProxyIP)
+}
+
+// CheckDbCross 如果 user@host 已存在,CheckDbCross 检查已授权的数据库和准备授权的数据库是否有包含关系。
+// 如果有包含关系,不可以授权,因为鉴权去匹配到多条权限中的某一条,存在不确定性
+func CheckDbCross(user string, dbname string, ip string, address string, masterDomain bool, bkCloudId int64) error {
+	QueryGrantDbSQL := fmt.Sprintf("select db from mysql.db where user='%s' and host = '%s';", user, ip)
+	var result oneAddressResult
+	var errMsg []string
+	var err error
+	var queryRequest = QueryRequest{[]string{address}, []string{QueryGrantDbSQL}, true, 60, bkCloudId}
+	var tipsForProxyIP string
+
+	result, err = OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		slog.Error("QueryGrantDbSQL", err)
+		return err
+	}
+
+	if len(result.CmdResults[0].TableData) == 0 {
+		slog.Info("if dbname cross, check ok")
+		return nil
+	}
+
+	newDBRegexp := mysqlGrantDBToReg(dbname)
+	percentrBegin := regexp.MustCompile("^%.*$")
+	percentrEnd := regexp.MustCompile("^.*%$")
+
+	for _, grantDb := range result.CmdResults[0].TableData {
+		existDb := grantDb["db"].(string)
+		if dbname == existDb {
+			continue
+		}
+		existDbRegexp := mysqlGrantDBToReg(existDb)
+		/*
+			bug: (%d%、%h%)或者(d%、%h)存在交集,newDBRegexp.MatchString(existDb)、existDbRegexp.MatchString(dbname)无法判断出交集
+			fix: 已存在的db与新db规则如果都包含%,并且,一个在开头有%,一个在结尾有%,存在交集情况
+		*/
+		if newDBRegexp.MatchString(existDb) || existDbRegexp.MatchString(dbname) ||
+			(percentrBegin.MatchString(existDb) && percentrEnd.MatchString(dbname)) ||
+			(percentrBegin.MatchString(dbname) && percentrEnd.MatchString(existDb)) {
+			if masterDomain {
+				tipsForProxyIP = fmt.Sprintf("%s是proxy的IP。", ip)
+			}
+			// 已授权的数据库和准备授权的数据库有包含关系
+			// 单个的%转换为%%,避免 fmt.Sprintf 对于%输出%!`(MISSING)
+			msg := fmt.Sprintf("账号(%s@%s)在%s已经对数据库[`%s`]授权,新增授权中的数据库[`%s`]与数据库[`%s`]存在交集,不可以授权。%s",
+				user, ip, address, strings.Replace(existDb, "%", "%%", -1),
+				strings.Replace(dbname, "%", "%%", -1),
+				strings.Replace(existDb, "%", "%%", -1), tipsForProxyIP)
+			errMsg = append(errMsg, msg)
+			continue
+		}
+	}
+	if len(errMsg) > 0 {
+		return fmt.Errorf(strings.Join(errMsg, "\n"))
+	}
+	return nil
+}
+
+// mysqlGrantDBToReg 数据库名转换为正则表达式
+func mysqlGrantDBToReg(dbName string) *regexp.Regexp {
+	dbNameRegStr := strings.Replace(dbName, "%", ".*", -1)
+	return regexp.MustCompile(fmt.Sprintf("^%s$", dbNameRegStr))
+}
+
+// GetMySQLVersion 获取实例的 mysql 版本
+func GetMySQLVersion(address string, bkCloudId int64) (version string, err error) {
+	var output oneAddressResult
+	var queryRequest = QueryRequest{[]string{address}, []string{"select version() as version;"}, true, 30, bkCloudId}
+	output, err = OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		slog.Error("msg", err)
+		return "", err
+	}
+	return output.CmdResults[0].TableData[0]["version"].(string), nil
+}
+
+// DeduplicationIP 检查授权对象 host 的格式,并且去重
+func DeduplicationIP(sourceIPs []string) ([]string, []string) {
+	var (
+		errMsg  []string
+		ips     []string
+		UniqMap = make(map[string]struct{})
+	)
+
+	// 相同 ip 要去重,相同 user 的授权 ip 记录(包括1.1.1.1或者带%)
+	reg := regexp.MustCompile(`(\d+\.)+\%`)
+	for _, ip := range sourceIPs {
+		ip = strings.TrimSpace(ip)
+		// 检验访问来源 ip 格式,是否为 ip,或者 localhost 或者是否包含%
+		if !(govalidator.IsIP(ip) || ip == "localhost" || ip == "%" || reg.MatchString(ip)) {
+			errMsg = append(errMsg, fmt.Sprintf("%s is not an valid ip", ip))
+			continue
+		}
+		if _, isExists := UniqMap[ip]; isExists == true {
+			continue
+		}
+		UniqMap[ip] = struct{}{}
+		ips = append(ips, ip)
+	}
+	if len(errMsg) > 0 {
+		return nil, errMsg
+	}
+
+	return ips, nil
+}
+
+// DeduplicationTargetInstance 检查实例是否存在,是否使用与类型相匹配的单据,并且去重
+func DeduplicationTargetInstance(instances []string, clusterType string) ([]string, []string) {
+	var (
+		errMsg  []string
+		dnsList []string
+		dns     Domain
+		UniqMap = make(map[string]struct{})
+		err     error
+	)
+
+	client := util.NewClientByHosts(viper.GetString("dbmeta"))
+	for _, instance := range instances {
+		instance = strings.Trim(strings.TrimSpace(instance), ".")
+		if !govalidator.IsDNSName(instance) {
+			err = fmt.Errorf("%s is not an valid domain name", instance)
+			errMsg = append(errMsg, err.Error())
+			continue
+		}
+		dns = Domain{EntryName: instance}
+		_, err = GetCluster(client, clusterType, dns)
+		if err != nil {
+			errMsg = append(errMsg, err.Error())
+			continue
+		}
+
+		/* dbmeta 查询到的信息,与传入的 cluster type 参数名称需要统一。单点是 tendbsingle,集群是 tendbha
+		if instanceInfo.ClusterType != clusterType {
+			errMsg = append(errMsg, fmt.Sprintf("%s是%s集群,不是%s集群,请使用与集群类型相符的单据", instance, instanceInfo.ClusterType, clusterType))
+			continue
+		}
+		*/
+		if _, isExists := UniqMap[instance]; isExists == true {
+			continue
+		}
+		UniqMap[instance] = struct{}{}
+		dnsList = append(dnsList, instance)
+	}
+	if len(errMsg) > 0 {
+		return nil, errMsg
+	}
+	return dnsList, nil
+}
+
+// MySQLVersionParse 解析 mysql 版本
+func MySQLVersionParse(mysqlVersion, prefix string) uint64 {
+	var matchExp = ""
+	if prefix == "" {
+		matchExp = "([\\d]+).?([\\d]+)?.?([\\d]+)?"
+	} else {
+		matchExp = fmt.Sprintf("%s-([\\d]+).?([\\d]+)?.?([\\d]+)?", prefix)
+	}
+	re := regexp.MustCompile(matchExp)
+	result := re.FindStringSubmatch(mysqlVersion)
+	// [tmysql-2.10.3 2 10 3]
+	var (
+		total    uint64
+		billion  string
+		thousand string
+		single   string
+		// 2.1.5  => 2 * 1000000 + 1 * 1000 + 5
+	)
+	if len(result) == 0 {
+		return 0
+	} else if len(result) == 4 {
+		billion = result[1]
+		thousand = result[2]
+		single = result[3]
+		if billion != "" {
+			b, err := strconv.ParseUint(billion, 10, 64)
+			if err != nil {
+				slog.Error("msg", err)
+				b = 0
+			}
+			total += b * 1000000
+		}
+		if thousand != "" {
+			t, err := strconv.ParseUint(thousand, 10, 64)
+			if err != nil {
+				slog.Error("msg", err)
+				t = 0
+			}
+			total += t * 1000
+		}
+		if single != "" {
+			s, err := strconv.ParseUint(single, 10, 64)
+			if err != nil {
+				slog.Error("msg", err)
+				s = 0
+			}
+			total += s
+		}
+	} else {
+		// impossible condition,just for safe.
+		return 0
+	}
+	return total
+}
+
+// ContainConnLogDB TODO
+func ContainConnLogDB(dbname string) bool {
+	DBRegexp := mysqlGrantDBToReg(dbname)
+	if DBRegexp.MatchString(connLogDB) {
+		return true
+	}
+	return false
+}
+
+// AddPrivResult 展示授权结果
+func AddPrivResult(errMsg, successMsg Err) error {
+	// 全部成功列表
+	fail := errno.GrantPrivilegesFail.Add("\n" + strings.Join(errMsg.errs, "\n"))
+	// 全部失败列表
+	success := errno.GrantPrivilegesSuccess.Add("\n" + strings.Join(successMsg.errs, "\n"))
+	// 部分失败
+	subFail := errno.GrantPrivilegesFail.Add("\n" + strings.Join(errMsg.errs, "\n") + "\n\n\n\n" + success.Error())
+
+	if len(errMsg.errs) > 0 {
+		if len(successMsg.errs) > 0 {
+			return subFail
+		}
+		return fail
+	}
+	return success
+}
diff --git a/dbm-services/mysql/db-priv/service/add_priv_object.go b/dbm-services/mysql/db-priv/service/add_priv_object.go
new file mode 100644
index 0000000000..ae932b4c10
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/add_priv_object.go
@@ -0,0 +1,106 @@
+package service
+
+const connLogDB = "infodba_schema"
+const insertConnLogPriv = "grant insert on infodba_schema.conn_log to"
+const setBinlogOff = "SET SESSION sql_log_bin=0;"
+const setBinlogOn = "SET SESSION sql_log_bin=1;"
+const setDdlByCtlOFF = "SET SESSION ddl_execute_by_ctl=off;"
+const setDdlByCtlON = "SET SESSION ddl_execute_by_ctl=on;"
+const flushPriv = "flush privileges;"
+const sep string = "\n        "
+
+// PrivTaskPara AddPrivDryRun,AddPriv 函数的入参
+type PrivTaskPara struct {
+	BkBizId int64 `json:"bk_biz_id"`
+	// 单据要记录提单时的账号规则内容,执行时以提单时的账号规则执行
+	ClusterType     string           `json:"cluster_type"`
+	User            string           `json:"user"`
+	Operator        string           `json:"operator"`
+	AccoutRules     []TbAccountRules `json:"account_rules"`
+	SourceIPs       []string         `json:"source_ips"`
+	TargetInstances []string         `json:"target_instances"`
+}
+
+/*
+// Instance GetClusterInfo 函数返回的结构体
+type Instance struct {
+	Proxies     []Proxy   `json:"proxies"`
+	Storages    []Storage `json:"storages"`
+	ClusterType string    `json:"cluster_type"`
+	BkBizId     int64     `json:"bk_biz_id"`
+	DbModuleId  int64     `json:"db_module_id"`
+	BindTo      string    `json:"bind_to"`
+	BkCloudId   int64     `json:"bk_cloud_id"`
+}
+*/
+
+// Instance GetCluster 函数返回的结构体
+type Instance struct {
+	Proxies      []Proxy   `json:"proxies"`
+	Storages     []Storage `json:"storages"`
+	SpiderMaster []Proxy   `json:"spider_master"`
+	SpiderSlave  []Proxy   `json:"spider_slave"`
+	ClusterType  string    `json:"cluster_type"`
+	BkBizId      int64     `json:"bk_biz_id"`
+	DbModuleId   int64     `json:"db_module_id"`
+	BindTo       string    `json:"bind_to"`
+	EntryRole    string    `json:"entry_role"`
+	BkCloudId    int64     `json:"bk_cloud_id"`
+	ImmuteDomain string    `json:"immute_domain"`
+}
+
+// Cluster GetAllClustersInfo 函数返回 Cluster 数组
+type Cluster struct {
+	DbModuleId   int64     `json:"db_module_id"`
+	BkBizId      string    `json:"bk_biz_id"`
+	SpiderMaster []Proxy   `json:"spider_master"`
+	SpiderSlave  []Proxy   `json:"spider_slave"`
+	SpiderMnt    []Proxy   `json:"spider_mnt"`
+	Proxies      []Proxy   `json:"proxies"`
+	Storages     []Storage `json:"storages"`
+	ClusterType  string    `json:"cluster_type"`
+	ImmuteDomain string    `json:"immute_domain"`
+	BkCloudId    int64     `json:"bk_cloud_id"`
+}
+
+// Proxy proxy 实例
+type Proxy struct {
+	IP        string `json:"ip"`
+	Port      int64  `json:"port"`
+	AdminPort int64  `json:"admin_port"`
+	Status    string `json:"status"`
+}
+
+// Storage mysql 后端节点
+type Storage struct {
+	IP           string `json:"ip"`
+	Port         int64  `json:"port"`
+	InstanceRole string `json:"instance_role"`
+	Status       string `json:"status"`
+}
+
+// Domain GetClusterInfo 函数的入参
+type Domain struct {
+	EntryName string `json:"entry_name" url:"entry_name"`
+}
+
+// BkBizId 业务 id,QueryAccountRule、GetAllClustersInfo 函数的入参
+type BkBizId struct {
+	BkBizId int64 `json:"bk_biz_id" url:"bk_biz_id"`
+}
+
+// AddPrivWithoutAccountRule AddPrivWithoutAccountRule 函数的入参
+type AddPrivWithoutAccountRule struct {
+	BkBizId    int64    `json:"bk_biz_id"`
+	User       string   `json:"user"`
+	Hosts      []string `json:"hosts"`
+	Psw        string   `json:"psw"`
+	Dbname     string   `json:"dbname"`
+	Priv       string   `json:"priv"`
+	DmlDdlPriv string   `json:"dml_ddl_priv"`
+	GlobalPriv string   `json:"global_priv"`
+	Operator   string   `json:"operator"`
+	Address    string   `json:"address"`
+	BkCloudId  *int64   `json:"bk_cloud_id"`
+	SpiderFlag bool     `json:"spider_flag"`
+}
diff --git a/dbm-services/mysql/db-priv/service/clone_client_priv.go b/dbm-services/mysql/db-priv/service/clone_client_priv.go
new file mode 100644
index 0000000000..76becf1728
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/clone_client_priv.go
@@ -0,0 +1,144 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/util"
+	"fmt"
+	"strings"
+	"sync"
+
+	"github.com/spf13/viper"
+)
+
+// CloneClientPrivDryRun 克隆客户端权限预检查
+func (m *CloneClientPrivParaList) CloneClientPrivDryRun() error {
+
+	var errMsg []string
+	var errMsgTemp []string
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+
+	var UniqMap = make(map[string]struct{})
+	for index, slaveRecord := range m.CloneClientPrivRecords {
+		errMsgTemp = validateIP(slaveRecord.SourceIp, slaveRecord.TargetIp, slaveRecord.BkCloudId)
+		if len(errMsgTemp) > 0 {
+			msg := fmt.Sprintf("line %d: input is invalid, reason: %s", index+1, strings.Join(errMsgTemp, ", "))
+			errMsg = append(errMsg, msg)
+		}
+
+		tempStr := slaveRecord.String()
+		if _, isExists := UniqMap[tempStr]; isExists == true {
+			msg := fmt.Sprintf("line %d: record is duplicate", index+1)
+			errMsg = append(errMsg, msg)
+			continue
+		}
+		UniqMap[tempStr] = struct{}{}
+	}
+
+	if len(errMsg) > 0 {
+		return errno.ClonePrivilegesCheckFail.Add("\n" + strings.Join(errMsg, "\n"))
+	}
+
+	return nil
+}
+
+// CloneClientPriv 克隆客户端权限
+func (m *CloneClientPrivPara) CloneClientPriv(jsonPara string) error {
+	var errMsg Err
+	wg := sync.WaitGroup{}
+	tokenBucket := make(chan int, 10)
+
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if m.BkCloudId == nil {
+		return errno.CloudIdRequired
+	}
+
+	AddPrivLog(PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: util.NowTimeFormat()})
+
+	client := util.NewClientByHosts(viper.GetString("dbmeta"))
+	resp, errOuter := GetAllClustersInfo(client, BkBizId{m.BkBizId})
+	if errOuter != nil {
+		return errOuter
+	}
+	var clusters []Cluster
+	for _, item := range resp {
+		if (item.ClusterType == tendbha || item.ClusterType == tendbsingle || item.ClusterType == tendbcluster) &&
+			item.BkCloudId == *m.BkCloudId {
+			clusters = append(clusters, item)
+		}
+	}
+	errMsg.errs = validateIP(m.SourceIp, m.TargetIp, m.BkCloudId)
+	if len(errMsg.errs) > 0 {
+		return errno.ClonePrivilegesFail.Add("\n" + strings.Join(errMsg.errs, "\n"))
+	}
+	// 获取业务下所有的集群,并行获取对旧的client授权的语句,替换旧client的ip为新client,执行导入
+	// 一个协程失败,其报错信息添加到errMsg.errs。主协程wg.Wait(),等待所有协程执行完成才会返回。
+
+	// 每个集群一个协程
+	for _, item := range clusters {
+		wg.Add(1)
+		tokenBucket <- 0
+		go func(item Cluster) {
+			defer func() {
+				<-tokenBucket
+				wg.Done()
+			}()
+
+			if item.ClusterType == tendbha || item.ClusterType == tendbsingle {
+				for _, storage := range item.Storages {
+					address := fmt.Sprintf("%s:%d", storage.IP, storage.Port)
+					userGrants, err := GetRemotePrivilege(address, m.SourceIp, item.BkCloudId, machineTypeBackend)
+					if err != nil {
+						AddError(&errMsg, address, err)
+						return
+					}
+					userGrants = ReplaceHostInMysqlGrants(userGrants, m.SourceIp, m.TargetIp)
+					err = ImportMysqlPrivileges(userGrants, address, item.BkCloudId)
+					if err != nil {
+						AddError(&errMsg, address, err)
+						return
+					}
+				}
+			} else {
+				spiders := append(append(item.SpiderMaster, item.SpiderSlave...), item.SpiderMnt...)
+				for _, spider := range spiders {
+					address := fmt.Sprintf("%s:%d", spider.IP, spider.Port)
+					userGrants, err := GetRemotePrivilege(address, m.SourceIp, item.BkCloudId, machineTypeSpider)
+					if err != nil {
+						AddError(&errMsg, address, err)
+						return
+					}
+					userGrants = ReplaceHostInMysqlGrants(userGrants, m.SourceIp, m.TargetIp)
+					err = ImportMysqlPrivileges(userGrants, address, item.BkCloudId)
+					if err != nil {
+						AddError(&errMsg, address, err)
+						return
+					}
+				}
+			}
+			if item.ClusterType == tendbha {
+				for _, proxy := range item.Proxies {
+					address := fmt.Sprintf("%s:%d", proxy.IP, proxy.AdminPort)
+					proxyGrants, err := GetProxyPrivilege(address, m.SourceIp, item.BkCloudId)
+					if err != nil {
+						AddError(&errMsg, address, err)
+					}
+					proxyGrants = ReplaceHostInProxyGrants(proxyGrants, m.SourceIp, m.TargetIp)
+					err = ImportProxyPrivileges(proxyGrants, address, item.BkCloudId)
+					if err != nil {
+						AddError(&errMsg, address, err)
+					}
+				}
+			}
+		}(item)
+	}
+	wg.Wait()
+	close(tokenBucket)
+	if len(errMsg.errs) > 0 {
+		return errno.ClonePrivilegesFail.Add("\n" + strings.Join(errMsg.errs, "\n"))
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-priv/service/clone_client_priv_base_func.go b/dbm-services/mysql/db-priv/service/clone_client_priv_base_func.go
new file mode 100644
index 0000000000..7c2b537112
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/clone_client_priv_base_func.go
@@ -0,0 +1,162 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"fmt"
+	"regexp"
+	"strings"
+	"sync"
+
+	"github.com/asaskevich/govalidator"
+)
+
+// ReplaceHostInMysqlGrants 替换mysql授权语句中的host
+func ReplaceHostInMysqlGrants(userGrants []UserGrant, sourceIp string, targetIp []string) []UserGrant {
+	newUserGrants := NewUserGrants{}
+	wg := sync.WaitGroup{}
+
+	regForCreateUser := regexp.MustCompile(`(?i)^\s*CREATE USER `) // CREATE USER变为CREATE USER IF NOT EXISTS
+
+	for _, row := range userGrants {
+		wg.Add(1)
+		go func(row UserGrant) {
+			defer wg.Done()
+			var tmp []string
+			var grantTmp string
+			for _, grant := range row.Grants {
+				if regForCreateUser.MatchString(grant) {
+					grant = regForCreateUser.ReplaceAllString(grant, `CREATE USER /*!50706 IF NOT EXISTS */ `)
+				}
+				for _, ip := range targetIp {
+					grantTmp = strings.ReplaceAll(grant, sourceIp, ip)
+					tmp = append(tmp, grantTmp)
+				}
+			}
+			row.Grants = tmp
+			newUserGrants.mu.Lock()
+			newUserGrants.Data = append(newUserGrants.Data, row)
+			newUserGrants.mu.Unlock()
+		}(row)
+	}
+	wg.Wait()
+	return newUserGrants.Data
+}
+
+// ReplaceHostInProxyGrants 替换proxy新增白名单语句中的host
+func ReplaceHostInProxyGrants(grants []string, sourceIp string, targetIp []string) []string {
+	var newGrants []string
+	var grantTmp string
+	for _, item := range grants {
+		for _, ip := range targetIp {
+			grantTmp = strings.ReplaceAll(item, sourceIp, ip)
+			newGrants = append(newGrants, grantTmp)
+		}
+	}
+	return newGrants
+}
+
+// GetProxyPrivilege 获取proxy白名单
+func GetProxyPrivilege(address string, host string, bkCloudId int64) ([]string, error) {
+	var grants []string
+	sql := "select * from user;"
+	var queryRequest = QueryRequest{[]string{address}, []string{sql}, true, 30, bkCloudId}
+	output, err := OneAddressExecuteProxySql(queryRequest)
+	if err != nil {
+		return nil, errno.ClonePrivilegesFail.Add(fmt.Sprintf(
+			"execute (%s) in bk_cloud_id (%d) instance (%s) get an error:%s", sql, bkCloudId, address,
+			err.Error()))
+	}
+	usersResult := output.CmdResults[0].TableData
+	if host == "" {
+		for _, user := range usersResult {
+			addUserSQL := fmt.Sprintf("refresh_users('%s','+')", user["user@ip"].(string))
+			grants = append(grants, addUserSQL)
+		}
+	} else {
+		regexp := regexp.MustCompile(fmt.Sprintf(".*@%s$", strings.ReplaceAll(host, ".", "\\.")))
+		for _, user := range usersResult {
+			tmpUser := user["user@ip"].(string)
+			if regexp.MatchString(tmpUser) {
+				addUserSQL := fmt.Sprintf("refresh_users('%s','+')", tmpUser)
+				grants = append(grants, addUserSQL)
+			}
+		}
+
+	}
+	return grants, nil
+}
+
+// ImportProxyPrivileges 导入proxy白名单
+func ImportProxyPrivileges(grants []string, address string, bkCloudId int64) error {
+	var errMsg Err
+	wg := sync.WaitGroup{}
+	tokenBucket := make(chan int, 10)
+
+	for _, item := range grants {
+		wg.Add(1)
+		tokenBucket <- 0
+		go func(item string) {
+			defer func() {
+				<-tokenBucket
+				wg.Done()
+			}()
+			queryRequest := QueryRequest{[]string{address}, []string{item}, true, 30, bkCloudId}
+			_, err := OneAddressExecuteProxySql(queryRequest)
+			if err != nil {
+				AddError(&errMsg, address, err)
+				return
+			}
+		}(item)
+	}
+	wg.Wait()
+	close(tokenBucket)
+	if len(errMsg.errs) > 0 {
+		return errno.ClonePrivilegesFail.Add(strings.Join(errMsg.errs, "\n"))
+	}
+	return nil
+}
+
+func validateIP(sourceIp string, targetIp []string, bkCloudId *int64) []string {
+	var errMsg []string
+	sourceIp = strings.TrimSpace(sourceIp)
+
+	if bkCloudId == nil {
+		errMsg = append(errMsg, errno.CloudIdRequired.Error())
+	}
+
+	// 检查是否是合法的实例
+	result := govalidator.IsIP(sourceIp)
+	if !result {
+		errMsg = append(errMsg, fmt.Sprintf("Source ip (%s) is not a valid ip", sourceIp))
+	}
+
+	for _, ip := range targetIp {
+		ip = strings.TrimSpace(ip)
+		result = govalidator.IsIP(ip)
+		if !result {
+			errMsg = append(errMsg, fmt.Sprintf("Target ip (%s) is not a valid ip", targetIp))
+		}
+		if sourceIp == ip {
+			errMsg = append(errMsg, "Source ip and target ip are the same one")
+		}
+	}
+
+	if len(errMsg) > 0 {
+		return errMsg
+	}
+	return nil
+}
+
+// AddError 添加错误信息,包括实例信息
+func AddError(errMsg *Err, address string, err error) {
+	errMsg.mu.Lock()
+	errMsg.errs = append(errMsg.errs, address, err.Error())
+	errMsg.mu.Unlock()
+}
+
+// AddErrorOnly 添加错误信息
+func AddErrorOnly(errMsg *Err, err error) {
+	errMsg.mu.Lock()
+	errMsg.errs = append(errMsg.errs, err.Error())
+	errMsg.mu.Unlock()
+}
diff --git a/dbm-services/mysql/db-priv/service/clone_client_priv_object.go b/dbm-services/mysql/db-priv/service/clone_client_priv_object.go
new file mode 100644
index 0000000000..cdfed60412
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/clone_client_priv_object.go
@@ -0,0 +1,38 @@
+package service
+
+import (
+	"strings"
+	"sync"
+)
+
+// CloneClientPrivParaList CloneClientPrivDryRun函数的入参
+type CloneClientPrivParaList struct {
+	BkBizId                int64                 `json:"bk_biz_id"`
+	CloneClientPrivRecords []CloneClientPrivPara `json:"clone_client_priv_records"`
+}
+
+// CloneClientPrivPara CloneClientPriv函数的入参
+type CloneClientPrivPara struct {
+	BkBizId   int64    `json:"bk_biz_id"`
+	Operator  string   `json:"operator"`
+	SourceIp  string   `json:"source_ip"`
+	TargetIp  []string `json:"target_ip"`
+	BkCloudId *int64   `json:"bk_cloud_id"`
+}
+
+// String 打印CloneClientPrivPara
+func (m CloneClientPrivPara) String() string {
+	return m.SourceIp + "|||" + strings.Join(m.TargetIp, "|||")
+}
+
+// UserGrant 授权账号user@host、授权语句
+type UserGrant struct {
+	UserHost string   `json:"user_host"`
+	Grants   []string `json:"grants"`
+}
+
+// Err 错误信息列表
+type Err struct {
+	mu   sync.RWMutex
+	errs []string
+}
diff --git a/dbm-services/mysql/db-priv/service/clone_instance_priv.go b/dbm-services/mysql/db-priv/service/clone_instance_priv.go
new file mode 100644
index 0000000000..44c989978b
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/clone_instance_priv.go
@@ -0,0 +1,106 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/util"
+	"fmt"
+	"strings"
+)
+
+// CloneInstancePrivDryRun 克隆实例权限预检查
+func (m *CloneInstancePrivParaList) CloneInstancePrivDryRun() error {
+
+	var errMsg []string
+	var UniqMap = make(map[string]struct{})
+
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	for index, slaveRecord := range m.CloneInstancePrivRecords {
+		_, err := ValidateInstancePair(slaveRecord.Source, slaveRecord.Target)
+		if err != nil {
+			msg := fmt.Sprintf("line %d: input is invalid, reason: %s", index+1, err)
+			errMsg = append(errMsg, msg)
+		}
+		tempStr := slaveRecord.String()
+		if _, isExists := UniqMap[tempStr]; isExists == true {
+			msg := fmt.Sprintf("line %d: line duplicate", index+1)
+			errMsg = append(errMsg, msg)
+			continue
+		}
+		UniqMap[tempStr] = struct{}{}
+	}
+
+	if len(errMsg) > 0 {
+		return errno.ClonePrivilegesCheckFail.Add("\n" + strings.Join(errMsg, "\n"))
+	}
+
+	return nil
+}
+
+// CloneInstancePriv 克隆实例权限
+func (m *CloneInstancePrivPara) CloneInstancePriv(jsonPara string) error {
+
+	AddPrivLog(PrivLog{BkBizId: m.BkBizId, Operator: m.Operator, Para: jsonPara, Time: util.NowTimeFormat()})
+
+	if m.BkBizId == 0 {
+		return errno.BkBizIdIsEmpty
+	}
+	if m.BkCloudId == nil {
+		return errno.CloudIdRequired
+	}
+	m.Source.Address = strings.TrimSpace(m.Source.Address)
+	m.Target.Address = strings.TrimSpace(m.Target.Address)
+
+	instanceType, errOuter := ValidateInstancePair(m.Source, m.Target)
+	if errOuter != nil {
+		return errno.ClonePrivilegesFail.Add("\n" + errOuter.Error())
+	}
+
+	// 此处单集群instanceType是single
+	if instanceType == machineTypeSingle || instanceType == machineTypeBackend || instanceType == machineTypeSpider {
+		userGrants, err := GetRemotePrivilege(m.Source.Address, "", *m.BkCloudId, instanceType)
+		if err != nil {
+			return err
+		} else if len(userGrants) == 0 {
+			return errno.NoPrivilegesNothingToDo
+		}
+		userGrants, err = m.DealWithPrivileges(userGrants, instanceType)
+		if err != nil {
+			return err
+		} else if len(userGrants) == 0 {
+			return errno.NoPrivilegesNothingToDo
+		}
+		if instanceType != machineTypeSpider {
+			err = CheckGrantInMySqlVersion(userGrants, m.Target.Address, *m.BkCloudId)
+			if err != nil {
+				return err
+			}
+		}
+		err = ImportMysqlPrivileges(userGrants, m.Target.Address, *m.BkCloudId)
+		if err != nil {
+			return err
+		}
+	} else if instanceType == machineTypeProxy {
+		var err error
+		m.Source.Address, err = changeToProxyAdminPort(m.Source.Address)
+		if err != nil {
+			return errno.ClonePrivilegesFail.Add(err.Error())
+		}
+		m.Target.Address, err = changeToProxyAdminPort(m.Target.Address)
+		if err != nil {
+			return errno.ClonePrivilegesFail.Add(err.Error())
+		}
+		proxyGrants, err := GetProxyPrivilege(m.Source.Address, "", *m.BkCloudId)
+		if err != nil {
+			return err
+		} else if len(proxyGrants) == 0 {
+			return errno.NoPrivilegesNothingToDo
+		}
+		err = ImportProxyPrivileges(proxyGrants, m.Target.Address, *m.BkCloudId)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-priv/service/clone_instance_priv_base_func.go b/dbm-services/mysql/db-priv/service/clone_instance_priv_base_func.go
new file mode 100644
index 0000000000..7dde3f7880
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/clone_instance_priv_base_func.go
@@ -0,0 +1,555 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/util"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/pingcap/parser"
+	"github.com/pingcap/parser/ast"
+	_ "github.com/pingcap/tidb/types/parser_driver" // parser_driver TODO
+	"golang.org/x/exp/slog"
+)
+
+// Enter TODO
+func (v *visitor) Enter(node ast.Node) (out ast.Node, skipChildren bool) {
+	if grantstmt, ok := node.(*ast.GrantStmt); ok {
+		v.secText = grantstmt.SecureText()
+		v.username = grantstmt.Users[0].User.Username
+		v.secPassword, v.legal = grantstmt.Users[0].EncodedPassword()
+		v.withgrant = grantstmt.WithGrant
+		v.hostname = grantstmt.Users[0].User.Hostname
+	}
+	return node, false
+}
+
+// Leave TODO
+func (v *visitor) Leave(node ast.Node) (out ast.Node, ok bool) {
+	return node, true
+}
+
+// GetRemotePrivilege 获取mysql上的授权语句
+func GetRemotePrivilege(address string, host string, bkCloudId int64, instanceType string) ([]UserGrant, error) {
+	var version string
+	var errOuter error
+	var repsOuter oneAddressResult
+	// ResultTemp 授权语句集合
+	type ResultTemp struct {
+		mu         sync.RWMutex
+		userGrants []UserGrant
+	}
+	var resultTemp ResultTemp
+	needShowCreateUser := false
+	wg := sync.WaitGroup{}
+	finishChan := make(chan bool, 1)
+	errorChan := make(chan error, 1)
+	tokenBucket := make(chan int, 10)
+
+	version, errOuter = GetMySQLVersion(address, bkCloudId)
+	if errOuter != nil {
+		return nil, errOuter
+	}
+	if MySQLVersionParse(version, "") > MySQLVersionParse("5.7.8", "") &&
+		(instanceType == machineTypeBackend || instanceType == machineTypeSingle) {
+		needShowCreateUser = true
+	}
+	selectUser := `select user,host from mysql.user`
+	if host != "" {
+		selectUser += fmt.Sprintf(` where host='%s'`, host)
+	}
+	queryRequestOuter := QueryRequest{[]string{address}, []string{selectUser}, true, 30, bkCloudId}
+	repsOuter, errOuter = OneAddressExecuteSql(queryRequestOuter)
+	if errOuter != nil {
+		return nil, errOuter
+	}
+	flush := UserGrant{"", []string{flushPriv}}
+	resultTemp.userGrants = append([]UserGrant{flush}, resultTemp.userGrants...)
+
+	for _, row := range repsOuter.CmdResults[0].TableData {
+		if row["user"] == "" || row["host"] == "" {
+			return nil, fmt.Errorf("execute %s in %s ,user or host is null", selectUser, address)
+		}
+		userHost := fmt.Sprintf(`'%s'@'%s'`, row["user"], row["host"])
+		wg.Add(1)
+		tokenBucket <- 0 // 在这里操作 token 可以防止过多的协程启动但处于等待 token 的阻塞状态
+		go func(userHost string, needShowCreateUser bool) {
+			defer func() {
+				<-tokenBucket
+				wg.Done()
+			}()
+			var Grants []string
+			var err error
+			err = GetUserGantSql(needShowCreateUser, userHost, address, &Grants, bkCloudId)
+			if err != nil {
+				errorChan <- err
+				return
+			}
+			resultTemp.mu.Lock()
+			resultTemp.userGrants = append(resultTemp.userGrants, UserGrant{userHost, Grants})
+			resultTemp.mu.Unlock()
+		}(userHost, needShowCreateUser)
+	}
+	go func() {
+		wg.Wait()
+		close(finishChan)
+		close(tokenBucket)
+	}()
+
+	select {
+	case <-finishChan:
+	case err := <-errorChan:
+		return nil, err
+	}
+	return append(resultTemp.userGrants, flush), nil
+}
+
+// GetUserGantSql 查询用户创建以及授权语句
+func GetUserGantSql(needShowCreateUser bool, userHost, address string, grants *[]string, bkCloudId int64) error {
+	var (
+		sql      string
+		err      error
+		hasValue bool
+	)
+	if needShowCreateUser {
+		sql = fmt.Sprintf("show create user %s;", userHost)
+		err, hasValue = GetGrantResponse(sql, address, grants, bkCloudId)
+		if err != nil {
+			return err
+		} else if !hasValue {
+			return fmt.Errorf("execute (%s) return nothing", sql)
+		}
+	}
+	sql = fmt.Sprintf("show grants for %s ", userHost)
+	err, _ = GetGrantResponse(sql, address, grants, bkCloudId)
+	if err != nil {
+		return err
+	}
+	if len(*grants) == 0 {
+		return fmt.Errorf("show grants in %s fail,query return nothing", userHost)
+	}
+	return nil
+}
+
+// GetGrantResponse 执行sql语句,获取结果
+func GetGrantResponse(sql, address string, grants *[]string, bkCloudId int64) (error, bool) {
+	hasValue := false
+	queryRequest := QueryRequest{[]string{address}, []string{sql}, true, 60, bkCloudId}
+	reps, err := OneAddressExecuteSql(queryRequest)
+	if err != nil {
+		return fmt.Errorf("execute (%s) fail, error:%s", sql, err.Error()), hasValue
+	}
+
+	if len(reps.CmdResults[0].TableData) > 0 {
+		for _, item := range reps.CmdResults[0].TableData {
+			for _, grant := range item {
+				if grant != nil {
+					*grants = append(*grants, grant.(string))
+				} else {
+					return fmt.Errorf("execute (%s), content of return is null", sql), hasValue
+				}
+			}
+		}
+	} else {
+		return nil, hasValue
+	}
+	hasValue = true
+	return nil, hasValue
+}
+
+// DealWithPrivileges 处理授权语句,做版本兼容
+func (m *CloneInstancePrivPara) DealWithPrivileges(userGrants []UserGrant, instanceType string) ([]UserGrant, error) {
+	newUserGrants := NewUserGrants{}
+	m.Source.Address = strings.TrimSpace(m.Source.Address)
+	m.Target.Address = strings.TrimSpace(m.Target.Address)
+	sourceVersion, err := GetMySQLVersion(m.Source.Address, *m.BkCloudId)
+	if err != nil {
+		return newUserGrants.Data, err
+	}
+	targetVersion, err := GetMySQLVersion(m.Target.Address, *m.BkCloudId)
+	if err != nil {
+		return newUserGrants.Data, err
+	}
+	sourceIp := strings.Split(m.Source.Address, ":")[0]
+	targetIp := strings.Split(m.Target.Address, ":")[0]
+	var mysql5Tomysql8, mysql80Tomysql57, mysql57Tomysql56 bool
+	// mysql8.0克隆到mysql5.7。后面有新版本比如验证mysql8.1,就把8000改为8001
+
+	if instanceType == machineTypeBackend || instanceType == machineTypeSingle {
+		if MySQLVersionParse(sourceVersion, "")/1000 == 8000 && MySQLVersionParse(targetVersion, "")/1000 == 5007 {
+			mysql80Tomysql57 = true
+		} else if MySQLVersionParse(sourceVersion, "")/1000 == 5007 && MySQLVersionParse(targetVersion, "")/1000 == 5006 {
+			mysql57Tomysql56 = true
+		} else if MySQLVersionParse(sourceVersion, "")/1000 < 8000 && MySQLVersionParse(targetVersion, "")/1000 >= 8000 {
+			mysql5Tomysql8 = true
+		}
+	}
+
+	wg := sync.WaitGroup{}
+	errorChan := make(chan error, 1)
+	finishChan := make(chan bool, 1)
+	var userExcluded = []string{"ADMIN", "mysql.session", "mysql.sys", "mysql.infoschema"} // Delete system user
+	for _, row := range userGrants {
+		wg.Add(1)
+		go func(row UserGrant, targetIp, sourceIp string) {
+			defer wg.Done()
+			defer func() {
+				if r := recover(); r != nil {
+					slog.Info("(Merge tree)panic error:%v", r)
+					errorChan <- fmt.Errorf("(Merge tree)panic error:%v", r)
+					return
+				}
+			}()
+			for _, user := range userExcluded { // delete system user
+				if regexp.MustCompile(fmt.Sprintf(`%s`, user)).MatchString(row.UserHost) {
+					return
+				}
+			}
+			reg := regexp.MustCompile(fmt.Sprintf(`'%s'`, targetIp)) // delete local ip user
+			if reg.MatchString(row.UserHost) {
+				return
+			}
+			reg = regexp.MustCompile(fmt.Sprintf(`%s`, sourceIp)) // change source ip user to local ip user
+			if reg.MatchString(row.UserHost) {
+				row.UserHost = reg.ReplaceAllString(row.UserHost, fmt.Sprintf(`%s`, targetIp))
+				var tmp []string
+				for _, str := range row.Grants {
+					tmp = append(tmp, reg.ReplaceAllString(str, fmt.Sprintf(`%s`, targetIp)))
+				}
+				row.Grants = tmp
+			}
+			errInner := DiffVersionConvert(&row.Grants, mysql80Tomysql57, mysql57Tomysql56, mysql5Tomysql8)
+			if errInner != nil {
+				errorChan <- errInner
+				return
+			}
+			newUserGrants.mu.Lock()
+			newUserGrants.Data = append(newUserGrants.Data, row)
+			newUserGrants.mu.Unlock()
+			return
+		}(row, targetIp, sourceIp)
+	}
+	go func() {
+		wg.Wait()
+		close(finishChan)
+	}()
+	select {
+	case <-finishChan:
+	case err := <-errorChan:
+		return nil, err
+	}
+	return newUserGrants.Data, nil
+}
+
+// DiffVersionConvert 跨版本克隆权限对授权语句变形,做兼容
+func DiffVersionConvert(grants *[]string, mysql80Tomysql57, mysql57Tomysql56, mysql5Tomysql8 bool) error {
+	var err error
+	var tmp []string
+	regForCreateUser := regexp.MustCompile(
+		`(?i)^\s*CREATE USER `,
+	) // CREATE USER变为CREATE USER IF NOT EXISTS
+	regForPasswordExpired := regexp.MustCompile(
+		`(?i)\s*REQUIRE NONE PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK`,
+	) // 5.7->5.6去掉
+
+	switch {
+	case mysql80Tomysql57:
+		err = PrivMysql80ToMysql57(grants)
+		if err != nil {
+			return err
+		}
+	case mysql57Tomysql56:
+		for _, str := range *grants {
+			if regForPasswordExpired.MatchString(str) {
+				str = regForPasswordExpired.ReplaceAllString(str, ``)
+			}
+			tmp = append(tmp, str)
+		}
+		*grants = tmp
+	case mysql5Tomysql8:
+		err = PrivMysql5ToMysql8(grants)
+		if err != nil {
+			return err
+		}
+	default:
+		for _, str := range *grants {
+			if regForCreateUser.MatchString(str) {
+				str = regForCreateUser.ReplaceAllString(str, `CREATE USER /*!50706 IF NOT EXISTS */ `)
+			}
+			tmp = append(tmp, str)
+		}
+		*grants = tmp
+	}
+	return nil
+}
+
+// PrivMysql5ToMysql8 Mysql5授权语句向Mysql8兼容
+func PrivMysql5ToMysql8(grants *[]string) error {
+	var tmp []string
+	regForCreateUser := regexp.MustCompile(
+		`(?i)^\s*CREATE USER `,
+	) // CREATE USER变为CREATE USER IF NOT EXISTS
+	regForPlainText := regexp.MustCompile(`(?i)\s+IDENTIFIED\s+BY\s+`)
+
+	for _, item := range *grants {
+		if regForCreateUser.MatchString(item) {
+			item = regForCreateUser.ReplaceAllString(item, `CREATE USER /*!50706 IF NOT EXISTS */ `)
+		}
+		if regForPlainText.MatchString(item) {
+			sqlParser := parser.New()
+			stmtNodes, warns, err := sqlParser.Parse(item, "", "")
+			if err != nil {
+				return fmt.Errorf("parse sql failed, sql:%s, error:%s", item, err.Error())
+			}
+			if len(warns) > 0 {
+				slog.Warn("some warnings happend", warns)
+			}
+			for _, stmtNode := range stmtNodes {
+				v := visitor{}
+				stmtNode.Accept(&v)
+				if !v.legal {
+					return fmt.Errorf("parse pass,but sql format error,sql:%s", item)
+				}
+				// statement which have password need to CREATE USER
+				if v.legal && len(v.secPassword) > 0 {
+					tmp = append(
+						tmp, fmt.Sprintf(
+							"CREATE USER IF NOT EXISTS '%s'@'%s' IDENTIFIED WITH mysql_native_password AS '%s';",
+							v.username,
+							v.hostname,
+							v.secPassword,
+						),
+					)
+				}
+
+				if v.withgrant {
+					v.secText += " WITH GRANT OPTION"
+				}
+				tmp = append(tmp, v.secText+";")
+			}
+		} else {
+			tmp = append(tmp, item)
+		}
+	}
+	*grants = tmp
+	return nil
+}
+
+// PrivMysql80ToMysql57 Mysql8.0授权语句向Mysql5.7兼容
+func PrivMysql80ToMysql57(grants *[]string) error {
+	var tmp []string
+	var dynamicGrantsForMySQL8 = []string{
+		"APPLICATION_PASSWORD_ADMIN",
+		"AUDIT_ADMIN",
+		"BACKUP_ADMIN",
+		"BINLOG_ADMIN",
+		"BINLOG_ENCRYPTION_ADMIN",
+		"CLONE_ADMIN",
+		"SERVICE_CONNECTION_ADMIN",
+		"CONNECTION_ADMIN",
+		"ENCRYPTION_KEY_ADMIN",
+		"GROUP_REPLICATION_ADMIN",
+		"INNODB_REDO_LOG_ARCHIVE",
+		"PERSIST_RO_VARIABLES_ADMIN",
+		"REPLICATION_APPLIER",
+		"REPLICATION_SLAVE_ADMIN",
+		"RESOURCE_GROUP_ADMIN",
+		"RESOURCE_GROUP_USER",
+		"ROLE_ADMIN",
+		"SESSION_VARIABLES_ADMIN",
+		"SET_USER_ID",
+		"SYSTEM_USER",
+		"SYSTEM_VARIABLES_ADMIN",
+		"TABLE_ENCRYPTION_ADMIN",
+		"XA_RECOVER_ADMIN",
+	}
+	var staticGrantsForMySQL8 = []string{"CREATE ROLE", "DROP ROLE"}
+
+	regForPasswordOption := regexp.MustCompile(
+		`(?i)\s*PASSWORD HISTORY DEFAULT PASSWORD REUSE INTERVAL DEFAULT PASSWORD REQUIRE CURRENT DEFAULT`,
+	) // 5.8->5.7去掉
+	regForCreateUser := regexp.MustCompile(
+		`(?i)^\s*CREATE USER `,
+	) // CREATE USER变为CREATE USER IF NOT EXISTS
+	regForPasswordPlugin := regexp.MustCompile(
+		`'caching_sha2_password'`,
+	) // 排除8.0使用caching_sha2_password作为密码验证方式
+
+	for _, item := range *grants {
+		if regForPasswordOption.MatchString(item) {
+			item = regForPasswordOption.ReplaceAllString(item, ``)
+		}
+
+		dynamicGrantsFlag := false
+		for _, dynamic := range dynamicGrantsForMySQL8 {
+			/* 排除8.0的动态权限
+			在8.0 grant all privileges on *.* to xxx,show grants for xxx结果:
+			(1) GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE,
+			REFERENCES, INDEX, ALTER, SHOW DATABASES, SUPER, CREATE TEMPORARY TABLES, LOCK TABLES,
+			EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE,
+			ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE,
+			DROP ROLE ON *.* TO xxx
+			(2) GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ADMIN,BACKUP_ADMIN,BINLOG_ADMIN,
+			BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,CONNECTION_ADMIN,ENCRYPTION_KEY_ADMIN,
+			GROUP_REPLICATION_ADMIN,INNODB_REDO_LOG_ARCHIVE,PERSIST_RO_VARIABLES_ADMIN,
+			REPLICATION_APPLIER,REPLICATION_SLAVE_ADMIN,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,
+			ROLE_ADMIN,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SYSTEM_USER,
+			SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,XA_RECOVER_ADMIN ON *.* TO xxx;
+			固定权限(1)8.0比5.7多了CREATE ROLE, DROP ROLE;8.0有动态权限,5.7没有
+			*/
+			if regexp.MustCompile(dynamic).MatchString(item) {
+				slog.Info("dynamicGrantExist", "sql", item)
+				dynamicGrantsFlag = true
+				break
+			}
+		}
+		if dynamicGrantsFlag == true {
+			break
+		}
+
+		for _, _static := range staticGrantsForMySQL8 {
+			if regexp.MustCompile(_static).MatchString(item) {
+				// 8.0 CREATE ROLE, DROP ROLE 替换为CREATE USER
+
+				// 5.7 CREATE USER: Enables use of the ALTER USER, CREATE USER, DROP USER, RENAME USER,
+				// and REVOKE ALL PRIVILEGES
+
+				// 8.0 CREATE USER: Enables use of the ALTER USER, CREATE ROLE, CREATE USER, DROP ROLE,
+				// DROP USER, RENAME USER, and REVOKE ALL PRIVILEGES statements.
+				item = regexp.MustCompile(_static).ReplaceAllString(item, "CREATE USER")
+			}
+		}
+		if regForPasswordPlugin.MatchString(item) {
+			return fmt.Errorf("using caching_sha2_password, sql: %s", item)
+		}
+		if regForCreateUser.MatchString(item) {
+			item = regForCreateUser.ReplaceAllString(item, `CREATE USER /*!50706 IF NOT EXISTS */ `)
+		}
+		tmp = append(tmp, item)
+	}
+	*grants = tmp
+	return nil
+}
+
+// ValidateInstancePair 验证实例是否存在
+func ValidateInstancePair(source, target InstancePara) (string, error) {
+	var errMsg []string
+	var instanceType string
+	// 格式是否为ip:port
+	if !util.IsIPPortFormat(source.Address) {
+		errMsg = append(errMsg, fmt.Sprintf("sourceDBInstance(%s) is not a valid instance", source))
+	}
+
+	if !util.IsIPPortFormat(target.Address) {
+		errMsg = append(errMsg, fmt.Sprintf("targetDBInstance(%s) is not a valid instance", target))
+	}
+
+	// 源实例和目标实例不能是同一个实例
+	if source.Address == target.Address && source.Address != "" {
+		errMsg = append(errMsg, "Source instance and target instance are the same one")
+	}
+
+	if len(errMsg) > 0 {
+		return instanceType, fmt.Errorf(strings.Join(errMsg, "\n"))
+	}
+
+	// 类型是否相同
+	instanceType = source.MachineType
+	if source.MachineType != target.MachineType {
+		if !((source.MachineType == machineTypeBackend || source.MachineType == machineTypeSingle) &&
+			(target.MachineType == machineTypeBackend || target.MachineType == machineTypeSingle)) {
+			errMsg = append(
+				errMsg, fmt.Sprintf(
+					"instance type not same, %s is %s,but %s is %s ", source.Address,
+					source.MachineType, target.Address, target.MachineType,
+				),
+			)
+		} else {
+			instanceType = machineTypeBackend
+		}
+	}
+	if len(errMsg) > 0 {
+		return instanceType, fmt.Errorf(strings.Join(errMsg, "\n"))
+	}
+	return instanceType, nil
+}
+
+// CheckGrantInMySqlVersion 目标实例低于mysql 5,不能执行包含mysql_native_password加密方式的授权语句
+func CheckGrantInMySqlVersion(userGrants []UserGrant, address string, bkCloudId int64) error {
+	localBigVersion := ""
+	if version, err := GetMySQLVersion(address, bkCloudId); err != nil {
+		return err
+	} else {
+		reg := regexp.MustCompile(`\.+`)
+		array := reg.Split(version, -1)
+		if len(array) == 0 {
+			return fmt.Errorf("获取%s的mysql大版本失败", address)
+		}
+		localBigVersion = array[0]
+	}
+
+	if localBigVersion >= "5" {
+		return nil
+	}
+
+	for _, row := range userGrants {
+		for _, str := range row.Grants {
+			reg := regexp.MustCompile(`BY PASSWORD '\*`)
+			if reg.MatchString(str) {
+				return fmt.Errorf("目标实例%s的大版本是%s,低于5.0,不支持mysql_native_password加密方式", localBigVersion)
+			}
+		}
+	}
+	return nil
+}
+
+// ImportMysqlPrivileges 执行mysql权限
+func ImportMysqlPrivileges(userGrants []UserGrant, address string, bkCloudId int64) error {
+	// Err 错误信息列表
+	type Err struct {
+		mu   sync.RWMutex
+		errs []string
+	}
+	var errMsg Err
+	wg := sync.WaitGroup{}
+	tokenBucket := make(chan int, 10)
+
+	for _, row := range userGrants {
+		wg.Add(1)
+		tokenBucket <- 0
+		go func(row UserGrant) {
+			defer func() {
+				<-tokenBucket
+				wg.Done()
+			}()
+			queryRequest := QueryRequest{[]string{address}, row.Grants, true, 60, bkCloudId}
+			_, err := OneAddressExecuteSql(queryRequest)
+			if err != nil {
+				errMsg.mu.Lock()
+				errMsg.errs = append(errMsg.errs, err.Error())
+				errMsg.mu.Unlock()
+				return
+			}
+		}(row)
+	}
+
+	wg.Wait()
+	close(tokenBucket)
+
+	if len(errMsg.errs) > 0 {
+		return fmt.Errorf(strings.Join(errMsg.errs, "\n"))
+	}
+	return nil
+}
+
+// changeToProxyAdminPort 获取proxy管理端口
+func changeToProxyAdminPort(address string) (string, error) {
+	tmp := strings.Split(address, ":")
+	port, err := strconv.ParseInt(tmp[1], 10, 64)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%s:%s", tmp[0], strconv.FormatInt(port+1000, 10)), nil
+}
diff --git a/dbm-services/mysql/db-priv/service/clone_instance_priv_object.go b/dbm-services/mysql/db-priv/service/clone_instance_priv_object.go
new file mode 100644
index 0000000000..5d38d15d2f
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/clone_instance_priv_object.go
@@ -0,0 +1,80 @@
+package service
+
+import "sync"
+
+// CloneInstancePrivParaList CloneInstancePrivDryRun函数的入参
+type CloneInstancePrivParaList struct {
+	BkBizId                  int64                   `json:"bk_biz_id"`
+	CloneInstancePrivRecords []CloneInstancePrivPara `json:"clone_instance_priv_records"`
+}
+
+// visitor 访问语法树的结构体
+type visitor struct {
+	username    string
+	hostname    string
+	secText     string
+	secPassword string
+	withgrant   bool
+	legal       bool
+}
+
+// CloneInstancePrivPara 克隆实例权限CloneInstancePriv、DealWithPrivileges函数的入参
+type CloneInstancePrivPara struct {
+	BkBizId   int64        `json:"bk_biz_id"`
+	Operator  string       `json:"operator"`
+	Source    InstancePara `json:"source"`
+	Target    InstancePara `json:"target"`
+	BkCloudId *int64       `json:"bk_cloud_id"`
+}
+
+// InstancePara TODO
+type InstancePara struct {
+	Address     string `json:"address"`
+	MachineType string `json:"machine_type"`
+}
+
+/*
+// InstanceDetail GetInstanceInfo函数返回的结构体
+type InstanceDetail struct {
+	BkBizId      int64    `json:"bk_biz_id"`
+	ImmuteDomain string   `json:"immute_domain"`
+	BindEntry    []string `json:"bind_entry"`
+	InstanceRole string   `json:"instance_role"`
+	MachineType  string   `json:"machine_type"`
+	Port         int64    `json:"port"`
+	Ip           string   `json:"ip"`
+	BkCloudId    int64    `json:"bk_cloud_id"`
+}
+*/
+
+// InstanceDetail GetInstanceInfo函数返回的结构体
+type InstanceDetail struct {
+	BkBizId      int64       `json:"bk_biz_id"`
+	ImmuteDomain string      `json:"immute_domain"`
+	BindEntry    []BindEntry `json:"bind_entry"`
+	InstanceRole string      `json:"instance_role"`
+	SpiderRole   string      `json:"spider_role"`
+	MachineType  string      `json:"machine_type"`
+	Port         int64       `json:"port"`
+	Ip           string      `json:"ip"`
+	BkCloudId    int64       `json:"bk_cloud_id"`
+	ClusterType  string      `json:"cluster_type"`
+}
+
+// BindEntry TODO
+type BindEntry struct {
+	Entry     string `json:"entry"`
+	EntryType string `json:"entry_type"`
+	EntryRole string `json:"entry_role"`
+}
+
+// String 打印CloneInstancePrivPara
+func (m CloneInstancePrivPara) String() string {
+	return m.Source.Address + "|||" + m.Target.Address
+}
+
+// NewUserGrants 授权语句列表
+type NewUserGrants struct {
+	mu   sync.RWMutex
+	Data []UserGrant `json:"data"`
+}
diff --git a/dbm-services/mysql/db-priv/service/db_meta_service.go b/dbm-services/mysql/db-priv/service/db_meta_service.go
new file mode 100644
index 0000000000..b2ccdb6072
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/db_meta_service.go
@@ -0,0 +1,99 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/errno"
+	"dbm-services/mysql/priv-service/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+
+	"golang.org/x/exp/slog"
+)
+
+const tendbha string = "tendbha"
+const tendbsingle string = "tendbsingle"
+const tendbcluster string = "tendbcluster"
+const machineTypeBackend string = "backend"
+const machineTypeSingle string = "single"
+const machineTypeProxy string = "proxy"
+const machineTypeSpider string = "spider"
+const backendSlave string = "backend_slave"
+const running string = "running"
+
+// GetAllClustersInfo TODO
+/*
+GetAllClustersInfo 获取业务下所有集群信息
+
+	[{
+		  "db_module_id": 126,
+		  "bk_biz_id": "3",
+		  "cluster_type": "tendbsingle",
+		  "proxies": [],
+		  "storages": [
+		    {
+		      "ip": "1.1.1.1.",
+		      "instance_role": "orphan",
+		      "port": 30000
+		    }
+		  ],
+		  "immute_domain": "singledb.1.hayley.db"
+		},
+		{
+		  "db_module_id": 500,
+		  "bk_biz_id": "3",
+		  "cluster_type": "tendbha",
+		  "proxies": [
+		    {
+		      "ip": "1.1.1.1",
+		      "admin_port": 41000,
+		      "port": 40000
+		    },
+		    {
+		      "ip": "2.2.2.2",
+		      "admin_port": 41000,
+		      "port": 40000
+		    }
+		  ],
+		  "storages": [
+		    {
+		      "ip": "3.3.3.3",
+		      "instance_role": "backend_slave",
+		      "port": 30000
+		    },
+		    {
+		      "ip": "4.4.4.4",
+		      "instance_role": "backend_master",
+		      "port": 40000
+		    }
+		  ],
+		  "immute_domain": "gamedb.2.hayley.db"
+		}]
+*/
+func GetAllClustersInfo(c *util.Client, id BkBizId) ([]Cluster, error) {
+	var resp []Cluster
+	result, err := c.Do(http.MethodGet, "/db_meta/priv_manager/biz_clusters", id)
+	if err != nil {
+		slog.Error("priv_manager/biz_clusters", err)
+		return resp, err
+	}
+	if err := json.Unmarshal(result.Data, &resp); err != nil {
+		slog.Error("/db_meta/priv_manager/biz_clusters", err)
+		return resp, err
+	}
+	return resp, nil
+}
+
+// GetCluster 根据域名获取集群信息
+func GetCluster(c *util.Client, ClusterType string, dns Domain) (Instance, error) {
+	var resp Instance
+	url := fmt.Sprintf("/db_meta/priv_manager/%s/cluster_instances", ClusterType)
+	result, err := c.Do(http.MethodGet, url, dns)
+	if err != nil {
+		slog.Error(url, err)
+		return resp, errno.DomainNotExists.Add(fmt.Sprintf(" %s: %s", dns.EntryName, err.Error()))
+	}
+	if err := json.Unmarshal(result.Data, &resp); err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
diff --git a/dbm-services/mysql/db-priv/service/db_remote_service.go b/dbm-services/mysql/db-priv/service/db_remote_service.go
new file mode 100644
index 0000000000..f6050971f7
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/db_remote_service.go
@@ -0,0 +1,125 @@
+package service
+
+import (
+	"dbm-services/mysql/priv-service/util"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// OneAddressExecuteSqlBasic OneAddressExecuteSql 通过db-remote-service服务连接mysql实例执行sql语句
+func OneAddressExecuteSqlBasic(vtype string, queryRequest QueryRequest) (oneAddressResult, error) {
+	var errMsg []string
+	var result oneAddressResult
+	var temp []oneAddressResult
+	c := util.NewClientByHosts(viper.GetString("dbRemoteService"))
+
+	var url string
+	if vtype == "mysql" {
+		url = "mysql/rpc/"
+	} else if vtype == "proxy" {
+		url = "proxy-admin/rpc/"
+	}
+
+	apiResp, err := c.Do(http.MethodPost, url, queryRequest)
+	if err != nil {
+		slog.Error("drs err", err)
+		return result, err
+	}
+	if apiResp.Code != 0 {
+		slog.Error("remote service api", fmt.Errorf(apiResp.Message))
+		return result, fmt.Errorf(apiResp.Message)
+	} else {
+		if err := json.Unmarshal(apiResp.Data, &temp); err != nil {
+			return result, err
+		}
+		if temp[0].ErrorMsg != "" {
+			errMsg = append(errMsg, fmt.Sprintf("instance: %s err: %s", queryRequest.Addresses[0], temp[0].ErrorMsg))
+		}
+		for _, res := range temp[0].CmdResults {
+			if res.ErrorMsg != "" {
+				errMsg = append(errMsg, fmt.Sprintf("instance: %s execute: `%s` error:`%s`;", queryRequest.Addresses[0],
+					strings.Replace(res.Cmd, "%", "%%", -1),
+					strings.Replace(res.ErrorMsg, "%", "%%", -1)))
+			}
+		}
+	}
+
+	if len(errMsg) > 0 {
+		slog.Error("msg", fmt.Errorf(strings.Join(errMsg, "\n")))
+		return result, fmt.Errorf(strings.Join(errMsg, "\n"))
+	}
+	return temp[0], nil
+}
+
+// OneAddressExecuteSql TODO
+func OneAddressExecuteSql(queryRequest QueryRequest) (oneAddressResult, error) {
+	result, err := OneAddressExecuteSqlBasic("mysql", queryRequest)
+	if err != nil {
+		return result, err
+	}
+	return result, nil
+}
+
+// OneAddressExecuteProxySql TODO
+func OneAddressExecuteProxySql(queryRequest QueryRequest) (oneAddressResult, error) {
+	result, err := OneAddressExecuteSqlBasic("proxy", queryRequest)
+	if err != nil {
+		return result, err
+	}
+	return result, nil
+}
+
+// QueryRequest OneAddressExecuteSql函数的入参
+type QueryRequest struct {
+	Addresses []string `form:"addresses" json:"addresses" url:"addresses"` // mysql实例数组,ip:port数组
+	Cmds      []string `form:"cmds" json:"cmds" url:"cmds"`                // sql语句数组
+	Force     bool     `form:"force" json:"force" url:"force"`             // 是否强制执行,强制:一个sql语句执行失败,不会中断,继续执行其他sql语句
+	/*
+			QueryTimeout是sql执行的超时时间,默认超时时间是30秒
+			ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout))
+		    defer cancel()
+			rows, err := db.QueryxContext(ctx, cmd)
+	*/
+	QueryTimeout int64 `form:"query_timeout" json:"query_timeout" url:"query_timeout"` // sql执行超时时间
+	BkCloudId    int64 `form:"bk_cloud_id" json:"bk_cloud_id" url:"bk_cloud_id"`       // mysql服务所在的云域
+}
+
+// queryResponse db-remote-service服务/mysql/rpc接口返回的结构
+type queryResponse struct {
+	Code      int               `json:"code"`
+	Data      queryResponseData `json:"data"`
+	Msg       string            `json:"message"`
+	RequestId string            `json:"request_id"`
+}
+
+// queryResponseData 在多个ip:port执行sql返回的结果
+type queryResponseData []oneAddressResult
+
+// oneAddressResult 在一个ip:port执行sql返回的结果
+type oneAddressResult struct {
+	Address    string      `json:"address"`
+	CmdResults []cmdResult `json:"cmd_results"`
+	ErrorMsg   string      `json:"error_msg"`
+}
+
+// cmdResult
+type cmdResult struct {
+	Cmd          string        `json:"cmd"`
+	TableData    tableDataType `json:"table_data"`
+	RowsAffected int64         `json:"rows_affected"`
+	ErrorMsg     string        `json:"error_msg"`
+}
+
+// tableDataType 查询返回记录
+type tableDataType []map[string]interface{}
+
+// PasswordResp mysql实例中user@host的密码以及加密类型
+type PasswordResp struct {
+	Psw     string `gorm:"column:psw;not_null;" json:"psw"`
+	PwdType string `gorm:"column:psw_type;not_null;" json:"psw_type"`
+}
diff --git a/dbm-services/mysql/db-priv/service/init_db.go b/dbm-services/mysql/db-priv/service/init_db.go
new file mode 100644
index 0000000000..86db4b2c14
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/init_db.go
@@ -0,0 +1,90 @@
+package service
+
+import (
+	"fmt"
+	"log"
+	"time"
+
+	"github.com/jinzhu/gorm"
+	_ "github.com/jinzhu/gorm/dialects/mysql" // mysql TODO
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// Database TODO
+type Database struct {
+	Self *gorm.DB
+}
+
+// DB TODO
+var DB *Database
+
+// DBVersion56 TODO
+var DBVersion56 *Database
+
+// setupDatabase initialize the database tables.
+func setupDatabase(db *gorm.DB) {
+}
+
+func openDB(username, password, addr, name string) *gorm.DB {
+	config := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=%t&loc=%s",
+		username,
+		password,
+		addr,
+		name,
+		true,
+		// "Asia/Shanghai"),
+		"Local")
+	db, err := gorm.Open("mysql", config)
+	if err != nil {
+		log.Fatalf("Database connection failed. Database name: %s, error: %v", name, err)
+	}
+
+	// set for db connection
+	setupDB(db)
+	return db
+}
+
+func setupDB(db *gorm.DB) {
+	// setup tables
+	setupDatabase(db)
+
+	db.LogMode(viper.GetBool("gormlog"))
+	db.DB().SetMaxIdleConns(60) // 用于设置闲置的连接数.设置闲置的连接数则当开启的一个连接使用完成后可以放在池里等候下一次使用。
+	db.DB().SetMaxOpenConns(200)
+	/*
+		First of all, you should use DB.SetConnMaxLifetime() instead of wait_timeout.
+		Closing connection from client is always better than closing from server,
+		because client may send query just when server start closing the connection.
+		In such case, client can't know sent query is received or not.
+	*/
+	db.DB().SetConnMaxLifetime(3600 * time.Second)
+}
+
+// initSelfDB TODO
+// used for cli
+func initSelfDB(dbconfig string) *gorm.DB {
+	slog.Info(fmt.Sprintf("%s.%s", dbconfig, "addr"))
+	return openDB(viper.GetString(fmt.Sprintf("%s.%s", dbconfig, "username")),
+		viper.GetString(fmt.Sprintf("%s.%s", dbconfig, "password")),
+		viper.GetString(fmt.Sprintf("%s.%s", dbconfig, "addr")),
+		viper.GetString(fmt.Sprintf("%s.%s", dbconfig, "name")))
+}
+
+// Init TODO
+func (db *Database) Init() {
+	DB = &Database{
+		Self: initSelfDB("db"),
+	}
+
+	DBVersion56 = &Database{
+		Self: initSelfDB("generatePswDBVersion56"),
+	}
+
+}
+
+// Close TODO
+func (db *Database) Close() {
+	DB.Self.Close()
+	DBVersion56.Self.Close()
+}
diff --git a/dbm-services/mysql/db-priv/service/service.go b/dbm-services/mysql/db-priv/service/service.go
new file mode 100644
index 0000000000..2d680ff1db
--- /dev/null
+++ b/dbm-services/mysql/db-priv/service/service.go
@@ -0,0 +1,2 @@
+// Package service TODO
+package service
diff --git a/dbm-services/mysql/db-priv/util/base_func.go b/dbm-services/mysql/db-priv/util/base_func.go
new file mode 100644
index 0000000000..5df9f0e592
--- /dev/null
+++ b/dbm-services/mysql/db-priv/util/base_func.go
@@ -0,0 +1,72 @@
+package util
+
+import (
+	"fmt"
+	"reflect"
+	"regexp"
+	"runtime"
+	"strings"
+
+	"github.com/asaskevich/govalidator"
+)
+
+// AtWhere return the parent function name.
+func AtWhere() string {
+	pc, _, _, ok := runtime.Caller(1)
+	if ok {
+		return runtime.FuncForPC(pc).Name()
+	} else {
+		return "Method not Found!"
+	}
+}
+
+// String2Slice split string, string -> []string
+func String2Slice(input string) (result []string, err error) {
+	reg, err := regexp.Compile(`\r+|\s+|;+|\n+|,+`)
+	if err != nil {
+		return result, err
+	}
+
+	tmp := reg.Split(input, -1)
+	for _, s := range tmp {
+		s = strings.TrimSpace(s)
+		if s == "" {
+			continue
+		}
+		result = append(result, s)
+	}
+	return result, nil
+}
+
+// IsIPPortFormat check whether input is ip:port format
+func IsIPPortFormat(input string) bool {
+	tmp := strings.Split(input, ":")
+	if len(tmp) != 2 {
+		return false
+	}
+	ip, port := tmp[0], tmp[1]
+	if govalidator.IsIP(ip) && govalidator.IsPort(port) {
+		return true
+	}
+	return false
+}
+
+// HasElem 元素是否在数组中存在
+func HasElem(elem interface{}, slice interface{}) bool {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Printf("HasElem error %s at  %s", err, AtWhere())
+		}
+	}()
+	arrV := reflect.ValueOf(slice)
+	if arrV.Kind() == reflect.Slice || arrV.Kind() == reflect.Array {
+		for i := 0; i < arrV.Len(); i++ {
+			// XXX - panics if slice element points to an unexported struct field
+			// see https://golang.org/pkg/reflect/#Value.Interface
+			if arrV.Index(i).Interface() == elem {
+				return true
+			}
+		}
+	}
+	return false
+}
diff --git a/dbm-services/mysql/db-priv/util/client.go b/dbm-services/mysql/db-priv/util/client.go
new file mode 100644
index 0000000000..5cefda94c0
--- /dev/null
+++ b/dbm-services/mysql/db-priv/util/client.go
@@ -0,0 +1,210 @@
+package util
+
+import (
+	"bytes"
+	"crypto/tls"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"net/http"
+	"net/http/httputil"
+	"strings"
+	"time"
+
+	"github.com/google/go-querystring/query"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+const (
+	// apiserver response code
+	statusSuccess int = 0
+)
+
+// APIServerResponse TODO
+type APIServerResponse struct {
+	Code    int             `json:"code"`
+	Message string          `json:"message"`
+	Data    json.RawMessage `json:"data"`
+}
+
+// APIServerResponseCompatible TODO
+type APIServerResponseCompatible struct {
+	Code    int             `json:"code"`
+	Message string          `json:"msg"`
+	Data    json.RawMessage `json:"data"`
+}
+
+// Client TODO
+type Client struct {
+	apiserver string
+
+	// JWT token
+	token string
+
+	// client for apiservers
+	client *http.Client
+}
+
+// NewClientByHosts TODO
+func NewClientByHosts(host string) *Client {
+	http.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+	cli := &Client{}
+	cli.apiserver = host
+	cli.client = &http.Client{
+		Transport: &http.Transport{},
+	}
+	return cli
+}
+
+// DoNew TODO
+// others: other parameters maybe used
+//
+//	other->{"user"}  : for gateway
+//
+// 支持根据返回内容包含特征串自动重试
+func (c *Client) DoNew(method, url string, params interface{}, headers map[string]string) (*APIServerResponse, error) {
+	var response *APIServerResponse
+	var err error
+	for retryIdx := 0; retryIdx < 5; retryIdx++ {
+		response, err = c.doNewInner(method, url, params, headers)
+		if err == nil {
+			break
+		}
+		if strings.Contains(err.Error(), "cse.flowcontrol.Consumer.qps.limit") {
+			slog.Error(fmt.Sprintf("DoNew failed, retryIdx:%d", retryIdx), err)
+			wait := retryIdx*retryIdx*1000 + rand.Intn(1000)
+			time.Sleep(time.Duration(wait) * time.Millisecond)
+			continue
+		}
+		break
+	}
+	return response, err
+}
+
+func (c *Client) doNewInner(method, url string, params interface{}, headers map[string]string) (*APIServerResponse,
+	error) {
+	host := c.apiserver
+	body, err := json.Marshal(params)
+	if err != nil {
+		slog.Error("marshal get an error", err)
+		return nil, fmt.Errorf("json marshal param failed, err: %+v", err)
+	}
+
+	if method == "GET" && !strings.Contains(url, "cc3") {
+		body = nil
+		// 有些 GET 参数拼接在 URL 中,比如/thirdpartyapi/cc3/query-from-shell;有些 GET 参数在结构体中
+		vals, err := query.Values(params)
+		if err != nil {
+			return nil, fmt.Errorf("get querystring param failed, err: %+v", err)
+		}
+		url = url + "?" + vals.Encode()
+	}
+
+	req, err := http.NewRequest(method, host+url, bytes.NewBuffer(body))
+
+	if err != nil {
+		slog.Error(fmt.Sprintf("create a new request(%s,%s,%+v) get an error", method, host+url, params), err)
+		return nil, fmt.Errorf("new request failed, err: %+v", err)
+	}
+	req.Header.Set("Content-Type", "application/json")
+	bkAuth := fmt.Sprintf(`{"bk_app_code": %s, "bk_app_secret": %s}`, viper.GetString("bk_app_code"),
+		viper.GetString("bk_app_secret"))
+	req.Header.Set("x-bkapi-authorization", bkAuth)
+
+	cookieAppCode := http.Cookie{Name: "bk_app_code", Path: "/", Value: viper.GetString("bk_app_code"), MaxAge: 86400}
+	cookieAppSecret := http.Cookie{Name: "bk_app_secret", Path: "/", Value: viper.GetString("bk_app_secret"),
+		MaxAge: 86400}
+	req.AddCookie(&cookieAppCode)
+	req.AddCookie(&cookieAppSecret)
+
+	resp, err := c.client.Do(req)
+	// slog.Info(fmt.Sprintf("req:%v", req))
+	if err != nil {
+		slog.Error(fmt.Sprintf("invoking http request failed, url: %s", req.URL.String()), err)
+		return nil, fmt.Errorf("do http request failed, err: %+v", err)
+	}
+	defer func() {
+		if resp == nil {
+			return
+		}
+		if err := resp.Body.Close(); err != nil {
+			slog.Warn("close response body failed", "err", err.Error())
+		}
+	}()
+
+	// 目前出现偶现网关超时问题,重试一次看是否时间段内必现
+	for i := 1; i <= 5; i++ {
+		// 500 可能正在发布
+		// 429 可能大并发量偶现超频
+		// 504 具体原因未知,先重试
+		if !HasElem(resp.StatusCode, []int{http.StatusInternalServerError, http.StatusTooManyRequests,
+			http.StatusGatewayTimeout}) {
+			break
+		}
+
+		wait := i*i*1000 + rand.Intn(1000)
+		time.Sleep(time.Duration(wait) * time.Millisecond)
+		slog.Warn(fmt.Sprintf("client.Do result with %s, wait %d milliSeconds and retry, url: %s", resp.Status, wait,
+			req.URL.String()))
+		resp, err = c.client.Do(req)
+		if err != nil {
+			slog.Error(fmt.Sprintf("an error occur while invoking client.Do, url: %s", req.URL.String()), err)
+			return nil, fmt.Errorf("do http request failed, err: %+v", err)
+		}
+	}
+
+	if resp.StatusCode != http.StatusOK {
+		bodyBytes, err := httputil.DumpResponse(resp, true)
+		if err != nil {
+			slog.Error("read resp.body failed, err: %+v", err)
+			fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+			return nil, fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+		}
+		if resp.StatusCode != http.StatusOK {
+			fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+			return nil, fmt.Errorf("http response: %s, status code: %d", string(bodyBytes), resp.StatusCode)
+		}
+		slog.Info(fmt.Sprintf("http response: \n\n%s\n", string(bodyBytes)))
+	}
+
+	b, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		err = fmt.Errorf("read resp.body error:%s", err.Error())
+		slog.Error("msg", err)
+		return nil, err
+	}
+	result := &APIServerResponse{}
+	if strings.Contains(url, "priv_manager") {
+		temp := &APIServerResponseCompatible{}
+		err = json.Unmarshal(b, temp)
+		if err != nil {
+			slog.Error(fmt.Sprintf("unmarshall %s to %+v get an error", string(b), *result), err)
+			return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+		}
+		result = &APIServerResponse{temp.Code, temp.Message, temp.Data}
+	} else {
+		err = json.Unmarshal(b, result)
+		if err != nil {
+			slog.Error(fmt.Sprintf("unmarshall %s to %+v get an error", string(b), *result), err)
+			return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+		}
+	}
+
+	// check response and data is nil
+	if result.Code != statusSuccess {
+		slog.Warn(fmt.Sprintf("result.Code is %d not equal to %d,message:%s,data:%s,param:%+v", result.Code, statusSuccess,
+			result.Message, string(result.Data), params))
+		if len(result.Data) != 0 {
+			return nil, fmt.Errorf("[%v - %v - %s]", result.Code, result.Message, string(result.Data))
+		}
+		return nil, fmt.Errorf("%v - %v", result.Code, result.Message)
+	}
+	return result, nil
+}
+
+// Do TODO
+func (c *Client) Do(method, url string, params interface{}) (*APIServerResponse, error) {
+	return c.DoNew(method, url, params, map[string]string{})
+}
diff --git a/dbm-services/mysql/db-priv/util/db.go b/dbm-services/mysql/db-priv/util/db.go
new file mode 100644
index 0000000000..83431b0ed9
--- /dev/null
+++ b/dbm-services/mysql/db-priv/util/db.go
@@ -0,0 +1,41 @@
+package util
+
+import (
+	"fmt"
+
+	"github.com/jmoiron/sqlx"
+	"golang.org/x/exp/slog"
+)
+
+// ConnectionParam TODO
+type ConnectionParam struct {
+	Addr     string `json:"addr"`
+	Dbname   string `json:"dbname"`
+	Username string `json:"username"`
+	Password string `json:"password"`
+}
+
+// ConnectDB TODO
+func (c *ConnectionParam) ConnectDB() (*sqlx.DB, error) {
+	return ConnectSqlx(c.Addr, c.Username, c.Password, c.Dbname)
+}
+
+// ConnectSqlx TODO
+func ConnectSqlx(user, password, address, dbName string) (*sqlx.DB, error) {
+
+	config := fmt.Sprintf("%s:%s@tcp(%s)/%s?multiStatements=true&timeout=10s",
+		user,
+		password,
+		address,
+		dbName)
+	db, err := sqlx.Connect("mysql", config)
+	if err != nil {
+		slog.Error(fmt.Sprintf("Database connection failed. user: %s, address: %v", user, address), err)
+		return nil, err
+	}
+	if err := db.Ping(); err != nil {
+		slog.Error("Database ping failed.", err)
+		return nil, err
+	}
+	return db, nil
+}
diff --git a/dbm-services/mysql/db-priv/util/rsa.go b/dbm-services/mysql/db-priv/util/rsa.go
new file mode 100644
index 0000000000..1d855347c0
--- /dev/null
+++ b/dbm-services/mysql/db-priv/util/rsa.go
@@ -0,0 +1,213 @@
+package util
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/asn1"
+	"encoding/base64"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+)
+
+const (
+	// RSA_ALGORITHM_SIGN TODO
+	RSA_ALGORITHM_SIGN = crypto.SHA256
+)
+
+// XRsa TODO
+type XRsa struct {
+	publicKey  *rsa.PublicKey
+	privateKey *rsa.PrivateKey
+}
+
+// CreateKeyFile 创建公钥、私钥文件
+func CreateKeyFile() error {
+	var fpPub, fpPriv *os.File
+	var err error
+
+	defer func() {
+		fpPub.Close()
+		fpPriv.Close()
+	}()
+
+	_, errPub := os.Stat("./pubkey.pem")
+	_, errPriv := os.Stat("./privkey.pem")
+	if os.IsNotExist(errPub) || os.IsNotExist(errPriv) {
+		fpPub, err = os.Create("./pubkey.pem")
+		if err != nil {
+			return fmt.Errorf("公钥文件创建失败 errInfo:%s", err)
+		}
+		fpPriv, err = os.Create("./privkey.pem")
+		if err != nil {
+			return fmt.Errorf("私钥文件创建失败 errinfo:%s", err)
+		}
+		err = CreateKeys(fpPub, fpPriv, 1024)
+		if err != nil {
+			return fmt.Errorf("密钥文件生成失败 errinfo:%s", err)
+		}
+	} else if errPub != nil {
+		return fmt.Errorf("公钥文件pubkey.pem状态错误:%s", errPub.Error())
+	} else if errPriv != nil {
+		return fmt.Errorf("私钥文件privkey.pem状态错误:%s", errPub.Error())
+	}
+	return nil
+}
+
+// CreateKeys 生成密钥对
+func CreateKeys(publicKeyWriter, privateKeyWriter io.Writer, keyLength int) error {
+	// 生成私钥文件
+	privateKey, err := rsa.GenerateKey(rand.Reader, keyLength)
+	if err != nil {
+		return err
+	}
+	derStream := MarshalPKCS8PrivateKey(privateKey)
+	block := &pem.Block{
+		Type:  "PRIVATE KEY",
+		Bytes: derStream,
+	}
+	err = pem.Encode(privateKeyWriter, block)
+	if err != nil {
+		return err
+	}
+	// 生成公钥文件
+	publicKey := &privateKey.PublicKey
+	derPkix, err := x509.MarshalPKIXPublicKey(publicKey)
+	if err != nil {
+		return err
+	}
+	block = &pem.Block{
+		Type:  "PUBLIC KEY",
+		Bytes: derPkix,
+	}
+	err = pem.Encode(publicKeyWriter, block)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// NewXRsa 构造XRsa
+func NewXRsa(publicKey []byte, privateKey []byte) (*XRsa, error) {
+	xrsa := XRsa{}
+	if publicKey != nil {
+		block, _ := pem.Decode(publicKey)
+		if block == nil {
+			return nil, errors.New("public key error")
+		}
+		pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes)
+		if err != nil {
+			return nil, err
+		}
+		pub := pubInterface.(*rsa.PublicKey)
+		xrsa.publicKey = pub
+	}
+	if privateKey != nil {
+		block, _ := pem.Decode(privateKey)
+		if block == nil {
+			return nil, errors.New("private key error!")
+		}
+		priv, err := x509.ParsePKCS8PrivateKey(block.Bytes)
+		if err != nil {
+			return nil, err
+		}
+		pri, ok := priv.(*rsa.PrivateKey)
+		if !ok {
+			return nil, errors.New("private key not supported")
+		}
+		xrsa.privateKey = pri
+	}
+	return &xrsa, nil
+}
+
+// PublicEncrypt 公钥加密
+func (r *XRsa) PublicEncrypt(data string) (string, error) {
+	partLen := r.publicKey.N.BitLen()/8 - 11
+	chunks := split([]byte(data), partLen)
+	buffer := bytes.NewBufferString("")
+	for _, chunk := range chunks {
+		bytes, err := rsa.EncryptPKCS1v15(rand.Reader, r.publicKey, chunk)
+		if err != nil {
+			return "", err
+		}
+		buffer.Write(bytes)
+	}
+	return base64.StdEncoding.EncodeToString(buffer.Bytes()), nil
+}
+
+// PrivateDecrypt 私钥解密
+func (r *XRsa) PrivateDecrypt(encrypted string) (string, error) {
+	// partLen := r.publicKey.N.BitLen() / 8  导致 "panic":"invalid memory address or nil pointer dereference"
+	partLen := r.privateKey.N.BitLen() / 8
+	// partLen := r.publicKey.N.BitLen() / 8
+	// raw, err := base64.StdEncoding.DecodeString(encrypted)
+	raw, err := base64.StdEncoding.DecodeString(encrypted)
+	chunks := split([]byte(raw), partLen)
+	buffer := bytes.NewBufferString("")
+	for _, chunk := range chunks {
+		decrypted, err := rsa.DecryptPKCS1v15(rand.Reader, r.privateKey, chunk)
+		if err != nil {
+			return "", err
+		}
+		buffer.Write(decrypted)
+	}
+	return buffer.String(), err
+}
+
+// Sign 数据加签
+func (r *XRsa) Sign(data string) (string, error) {
+	h := RSA_ALGORITHM_SIGN.New()
+	h.Write([]byte(data))
+	hashed := h.Sum(nil)
+	sign, err := rsa.SignPKCS1v15(rand.Reader, r.privateKey, RSA_ALGORITHM_SIGN, hashed)
+	if err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(sign), err
+}
+
+// Verify 数据验签
+func (r *XRsa) Verify(data string, sign string) error {
+	h := RSA_ALGORITHM_SIGN.New()
+	h.Write([]byte(data))
+	hashed := h.Sum(nil)
+	decodedSign, err := base64.StdEncoding.DecodeString(sign)
+	if err != nil {
+		return err
+	}
+	return rsa.VerifyPKCS1v15(r.publicKey, RSA_ALGORITHM_SIGN, hashed, decodedSign)
+}
+
+// MarshalPKCS8PrivateKey TODO
+func MarshalPKCS8PrivateKey(key *rsa.PrivateKey) []byte {
+	info := struct {
+		Version             int
+		PrivateKeyAlgorithm []asn1.ObjectIdentifier
+		PrivateKey          []byte
+	}{}
+	info.Version = 0
+	info.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1)
+	info.PrivateKeyAlgorithm[0] = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
+	info.PrivateKey = x509.MarshalPKCS1PrivateKey(key)
+	k, _ := asn1.Marshal(info)
+	return k
+
+}
+
+func split(buf []byte, lim int) [][]byte {
+	var chunk []byte
+	chunks := make([][]byte, 0, len(buf)/lim+1)
+	for len(buf) >= lim {
+		chunk, buf = buf[:lim], buf[lim:]
+		chunks = append(chunks, chunk)
+	}
+	if len(buf) > 0 {
+		chunks = append(chunks, buf[:len(buf)])
+	}
+	return chunks
+}
diff --git a/dbm-services/mysql/db-priv/util/time.go b/dbm-services/mysql/db-priv/util/time.go
new file mode 100644
index 0000000000..eb9ac222cf
--- /dev/null
+++ b/dbm-services/mysql/db-priv/util/time.go
@@ -0,0 +1,96 @@
+package util
+
+import (
+	"database/sql/driver"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"time"
+
+	"golang.org/x/exp/slog"
+)
+
+// TimeFormat TODO
+type TimeFormat string
+
+// Value TODO
+/*
+在 gorm save 或者 update 的时候调用该方法,这个就是 string -> time 的地方。
+*/
+func (t TimeFormat) Value() (driver.Value, error) {
+	if t.IsNull() {
+		return nil, nil
+	}
+	localTimezone, err := time.LoadLocation("Local") // 服务器设置的时区
+	if err != nil {
+		slog.Error("time.LoadLocation", err)
+		localTimezone, _ = time.LoadLocation("Asia/Shanghai") // 失败的话,默认就是上海的时区
+	}
+	ti, err := time.ParseInLocation("2006-01-02 15:04:05", string(t), localTimezone)
+	if err != nil {
+		slog.Error("TimeFormat Value", err)
+		return time.Now(), nil
+	}
+	return ti.In(localTimezone), nil
+}
+
+/*
+ 1. 在调用 gorm 的 find 查询类操作的时候,会调用数据类型的 Scan 方法。
+  2. 但是如果 value 是 nil ,就不会调用。
+    2.1 这个时候,如果在后面进行 marshall 的时候,需要 return []byte("\"\""),
+		否则会出现 "json: error calling MarshalJSON for type model.TimeFormat: unexpected end of JSON input" 错误。
+*/
+
+// Scan TODO
+/* Scan
+ 1. 在调用 gorm 的 find 查询类操作的时候,会调用数据类型的 Scan 方法。
+ 2. 但是如果 value 是 nil ,就不会调用。
+    2.1 这个时候,如果在后面进行 marshall 的时候,需要 return []byte("\"\""),
+		否则会出现 "json: error calling MarshalJSON for type model.TimeFormat: unexpected end of JSON input" 错误。
+*/
+func (t *TimeFormat) Scan(value interface{}) error {
+	localTimezone, err := time.LoadLocation("Local") // 服务器设置的时区
+	if err != nil {
+		slog.Error("time.LoadLocation error", err)
+		localTimezone, _ = time.LoadLocation("Asia/Shanghai") // 失败的话,默认就是上海的时区
+	}
+	if value == nil {
+		*t = "\"2006-01-02 00:00:00\""
+		return nil
+	}
+	s, ok := value.(time.Time)
+	if !ok {
+		return errors.New("Invalid Scan Source")
+	}
+	// 记得哪里需要加上反引号。。
+	// *t = TimeFormat(s.In(localTimezone).Format("2006-01-02 15:04:05"))
+	*t = TimeFormat(s.In(localTimezone).Format("2006-01-02 15:04:05"))
+	return nil
+}
+
+// MarshalJSON 在 handler.go 执行 SendResponse 的 c.WriteHeaderAndJSON 时候,会调用该方法。
+func (t TimeFormat) MarshalJSON() ([]byte, error) {
+	if t == "" {
+		return []byte("\"\""), nil
+	}
+	return []byte(fmt.Sprintf("\"%s\"", string(t))), nil
+	// return []byte(t), nil
+}
+
+// UnmarshalJSON TODO
+func (t *TimeFormat) UnmarshalJSON(data []byte) error {
+	var str string
+	err := json.Unmarshal(data, &str)
+	*t = TimeFormat(str)
+	return err
+}
+
+// IsNull TODO
+func (t TimeFormat) IsNull() bool {
+	return len(t) == 0 || t == ""
+}
+
+// NowTimeFormat TODO
+func NowTimeFormat() TimeFormat {
+	return TimeFormat(time.Now().Format("2006-01-02 15:04:05"))
+}
diff --git a/dbm-services/mysql/db-priv/util/util.go b/dbm-services/mysql/db-priv/util/util.go
new file mode 100644
index 0000000000..80d62b1ad1
--- /dev/null
+++ b/dbm-services/mysql/db-priv/util/util.go
@@ -0,0 +1,2 @@
+// Package util TODO
+package util
diff --git a/dbm-services/mysql/db-remote-service/.gitignore b/dbm-services/mysql/db-remote-service/.gitignore
new file mode 100644
index 0000000000..d7141d0193
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/.gitignore
@@ -0,0 +1,26 @@
+!.gitkeep
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+# Test binary, built with `go test -c`
+*.test
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+# Dependency directories (remove the comment below to include it)
+vendor/
+# Go workspace file
+go.work
+configs/*
+log/
+build/
+conf/
+*exe
+*.log
+.idea/
+.DS_Store
+.vscode/
+db-remote-service
+tmysqlparse
\ No newline at end of file
diff --git a/dbm-services/mysql/db-remote-service/.golangci.yml b/dbm-services/mysql/db-remote-service/.golangci.yml
new file mode 100644
index 0000000000..74b121ed6f
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/.golangci.yml
@@ -0,0 +1,57 @@
+linters-settings:
+  lll:
+    line-length: 120  
+  funlen:
+    lines: 80
+    statements: 80
+  gocritic:
+    enabled-checks:
+      - nestingReduce
+      - commentFormatting
+      
+run:
+  # default concurrency is a available CPU number
+  concurrency: 4
+  # timeout for analysis, e.g. 30s, 5m, default is 1m
+  timeout: 2m
+  # exit code when at least one issue was found, default is 1
+  issues-exit-code: 1
+  # include test files or not, default is true
+  tests: false
+  # default is true. Enables skipping of directories:
+  #   vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
+  skip-dirs-use-default: true
+
+  skip-files:
+    - ".*/mock/.*.go"
+    - ".*testing.go"
+
+linters:
+  # enable-all: true
+  # disable-all: true
+  disable:
+    - errcheck
+  enable:
+    - nilerr
+    - nakedret
+    - lll
+    - gofmt
+    - gocritic
+    - gocyclo
+    - whitespace
+    - sqlclosecheck
+    - deadcode
+    - govet
+    - bodyclose
+    - staticcheck
+    # - errorlint
+    # - varcheck
+    # - typecheck
+    # - nestif
+    # - gofumpt
+    # - godox
+    # - wsl
+    # - funlen
+    # - golint
+    # - cyclop
+  fast: false
\ No newline at end of file
diff --git a/dbm-services/mysql/db-remote-service/Dockerfile b/dbm-services/mysql/db-remote-service/Dockerfile
new file mode 100644
index 0000000000..40587fdd41
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/Dockerfile
@@ -0,0 +1,6 @@
+FROM mirrors.tencent.com/sccmsp/tmysqlparse:3.0.7
+
+ADD build/db-remote-service /
+
+WORKDIR /
+ENTRYPOINT ["/db-remote-service"]
diff --git a/dbm-services/mysql/db-remote-service/LICENSE b/dbm-services/mysql/db-remote-service/LICENSE
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/mysql/db-remote-service/Makefile b/dbm-services/mysql/db-remote-service/Makefile
new file mode 100644
index 0000000000..3000281973
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/Makefile
@@ -0,0 +1,36 @@
+PROJ="db-remote-service"
+MODULE="dbm-services/mysql/db-remote-service"
+VERSION = $(error please set VERSION flag)
+PKG = ${PROJ}.tar.gz
+OUTPUT_DIR = build
+RELEASE_BUILD_FLAG = "-X ${MODULE}/cmd.version=${VERSION} -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash=`git rev-parse HEAD` "
+DEV_BUILD_FLAG = "-X ${MODULE}/cmd.version="develop" -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash="" "
+
+BK_NAMESPACE = blueking
+BK_DH_URL = mirrors.tencent.com/build
+
+.PHONY: release-bin
+release-bin:
+	@CGO_ENABLE=0 GOARCH=amd64 GOOS=linux go build -ldflags ${RELEASE_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ}
+	@tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} ${PROJ}
+
+.PHONY: dev-bin
+dev-bin:
+	@go build -ldflags ${DEV_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ}
+	@tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} ${PROJ}
+
+.PHONY: bk-image
+bk-image: release-bin
+	docker build --build-arg SRV_NAME=${PROJ} --rm -t ${BK_DH_URL}/${BK_NAMESPACE}/${PROJ}:latest .
+
+.PHONY: bk-publish
+bk-publish: bk-image
+	docker push ${BK_DH_URL}/${BK_NAMESPACE}/${PROJ}:latest
+
+.PHONY: clean
+clean:
+	@rm -rf ${OUTPUT_DIR}
+
+
+
+
diff --git a/dbm-services/mysql/db-remote-service/all_sql_commands.txt b/dbm-services/mysql/db-remote-service/all_sql_commands.txt
new file mode 100644
index 0000000000..6ba9d554a0
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/all_sql_commands.txt
@@ -0,0 +1,150 @@
+"select"
+"create_table"
+"create_index"
+"alter_table"
+"update"
+"insert"
+"insert_select"
+"delete"
+"truncate"
+"drop_table"
+"drop_index"
+"show_databases"`
+"show_tables"
+"show_fields"
+"show_keys"
+"show_variables"
+"show_status"
+"show_engine_logs"
+"show_engine_status"
+"show_engine_mutex"
+"show_processlist"
+"show_master_stat"
+"show_slave_stat"
+"show_grants"
+"show_create"
+"show_charsets"
+"show_collations"
+"show_create_db"
+"show_table_status"
+"show_triggers"
+"load"
+"set_option"
+"lock_tables"
+"unlock_tables"
+"grant"
+"change_db"
+"create_db"
+"drop_db"
+"alter_db"
+"repair"
+"replace"
+"replace_select"
+"create_function"
+"drop_function"
+"revoke"
+"optimize"
+"check"
+"assign_to_keycache"
+"preload_keys"
+"flush"
+"kill"
+"analyze"
+"rollback"
+"rollback_to_savepoint"
+"commit"
+"savepoint"
+"release_savepoint"
+"slave_start"
+"slave_stop"
+"start_group_replication"
+"stop_group_replication"
+"begin"
+"change_master"
+"change_replication_filter"
+"rename_table"
+"reset"
+"purge"
+"purge_before"
+"show_binlogs"
+"show_open_tables"
+"ha_open"
+"ha_close"
+"ha_read"
+"show_slave_hosts"
+"delete_multi"
+"update_multi"
+"show_binlog_events"
+"do"
+"show_warns"
+"empty_query"
+"show_errors"
+"show_storage_engines"
+"show_privileges"
+"help"
+"create_user"
+"drop_user"
+"rename_user"
+"revoke_all"
+"checksum"
+"create_procedure"
+"create_spfunction"
+"call"
+"drop_procedure"
+"alter_procedure"
+"alter_function"
+"show_create_proc"
+"show_create_func"
+"show_status_proc"
+"show_status_func"
+"prepare"
+"execute"
+"deallocate_prepare"
+"create_view"
+"drop_view"
+"create_trigger"
+"drop_trigger"
+"xa_start"
+"xa_end"
+"xa_prepare"
+"xa_commit"
+"xa_rollback"
+"xa_recover"
+"show_proc_code"
+"show_func_code"
+"alter_tablespace"
+"install_plugin"
+"uninstall_plugin"
+"binlog_base64_event"
+"show_plugins"
+"create_server"
+"drop_server"
+"alter_server"
+"create_event"
+"alter_event"
+"drop_event"
+"show_create_event"
+"show_events"
+"show_create_trigger"
+"alter_db_upgrade"
+"show_profile"
+"show_profiles"
+"signal"
+"resignal"
+"show_relaylog_events"
+"get_diagnostics"
+"alter_user"
+"explain_other"
+"show_create_user"
+"shutdown"
+"alter_instance"
+"show_user_stats"
+"show_table_stats"
+"show_index_stats"
+"show_client_stats"
+"show_thread_stats"
+"lock_tables_for_backup"
+"lock_binlog_for_backup"
+"unlock_binlog"
+"create_compression_dictionary"
+"drop_compression_dictionary"
\ No newline at end of file
diff --git a/dbm-services/mysql/db-remote-service/cmd/init.go b/dbm-services/mysql/db-remote-service/cmd/init.go
new file mode 100644
index 0000000000..4d457ab093
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/cmd/init.go
@@ -0,0 +1,54 @@
+// Package cmd service cmd
+package cmd
+
+import "github.com/spf13/viper"
+
+func init() {
+	rootCmd.PersistentFlags().Int("concurrent", 500, "concurrent")
+	rootCmd.PersistentFlags().String("mysql_admin_password", "123", "mysql password")
+	rootCmd.PersistentFlags().String("mysql_admin_user", "root", "mysql user")
+
+	rootCmd.PersistentFlags().String("proxy_admin_password", "123", "proxy password")
+	rootCmd.PersistentFlags().String("proxy_admin_user", "root", "proxy user")
+
+	rootCmd.PersistentFlags().Int("port", 8888, "port")
+
+	rootCmd.PersistentFlags().Bool("log_json", true, "json format log")
+	rootCmd.PersistentFlags().Bool("log_console", true, "log to console stdout")
+	rootCmd.PersistentFlags().Bool("log_debug", true, "display debug log")
+	rootCmd.PersistentFlags().Bool("log_source", true, "display source log")
+
+	rootCmd.PersistentFlags().String("tmysqlparser_bin", "/tmysqlparse", "tmysqlparse path")
+	rootCmd.PersistentFlags().String("redis_cli_bin", "/redis-cli", "redis-cli path")
+
+	rootCmd.PersistentFlags().String("log_file_dir", "", "log to dir")
+
+	rootCmd.PersistentFlags().String("ca_file", "", "ca file")
+	rootCmd.PersistentFlags().String("cert_file", "", "cert file")
+	rootCmd.PersistentFlags().String("key_file", "", "key file")
+	rootCmd.PersistentFlags().Bool("tls", false, "use tls")
+
+	viper.SetEnvPrefix("DRS")
+	viper.AutomaticEnv()
+	_ = viper.BindEnv("mysql_admin_user", "MYSQL_ADMIN_USER")
+	_ = viper.BindEnv("mysql_admin_password", "MYSQL_ADMIN_PASSWORD")
+	_ = viper.BindEnv("proxy_admin_user", "PROXY_ADMIN_USER")
+	_ = viper.BindEnv("proxy_admin_password", "PROXY_ADMIN_PASSWORD")
+	_ = viper.BindEnv("concurrent", "CONCURRENT")
+	_ = viper.BindEnv("port", "PORT")
+	_ = viper.BindEnv("tmysqlparser_bin", "TMYSQLPARSER_BIN")
+	_ = viper.BindEnv("redis_cli_bin", "REDIS_CLI_BIN")
+
+	_ = viper.BindEnv("log_json", "LOG_JSON")         // bool
+	_ = viper.BindEnv("log_console", "LOG_CONSOLE")   // bool
+	_ = viper.BindEnv("log_source", "LOG_SOURCE")     // bool
+	_ = viper.BindEnv("log_file_dir", "LOG_FILE_DIR") // string
+	_ = viper.BindEnv("log_debug", "LOG_DEBUG")
+
+	_ = viper.BindEnv("ca_file", "CA_FILE")
+	_ = viper.BindEnv("cert_file", "CERT_FILE")
+	_ = viper.BindEnv("key_file", "KEY_FILE")
+	_ = viper.BindEnv("tls", "TLS")
+
+	_ = viper.BindPFlags(rootCmd.PersistentFlags())
+}
diff --git a/dbm-services/mysql/db-remote-service/cmd/root.go b/dbm-services/mysql/db-remote-service/cmd/root.go
new file mode 100644
index 0000000000..fd6095b680
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/cmd/root.go
@@ -0,0 +1,138 @@
+package cmd
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"dbm-services/mysql/db-remote-service/pkg/config"
+	"dbm-services/mysql/db-remote-service/pkg/service"
+	"fmt"
+	"io"
+	"net/http"
+	"os"
+	"path"
+	"path/filepath"
+
+	"github.com/gin-gonic/gin"
+	"github.com/spf13/cobra"
+	"golang.org/x/exp/slog"
+	"gopkg.in/natefinch/lumberjack.v2"
+)
+
+// rootCmd represents the base command when called without any subcommands
+var rootCmd = &cobra.Command{
+	Use:   "start",
+	Short: "start db remote service",
+	Long:  `start db remote service`,
+	Run: func(cmd *cobra.Command, args []string) {
+		config.InitConfig()
+		initLogger()
+
+		slog.Debug("run", slog.Any("runtime config", config.RuntimeConfig))
+		slog.Debug("run", slog.Any("log config", config.LogConfig))
+
+		r := gin.Default()
+		service.RegisterRouter(r)
+
+		if config.RuntimeConfig.TLS {
+			slog.Info("run in tls mode")
+			s := &http.Server{
+				Addr:    fmt.Sprintf(":%d", config.RuntimeConfig.Port),
+				Handler: r,
+				TLSConfig: &tls.Config{
+					ClientCAs: func() *x509.CertPool {
+						pool := x509.NewCertPool()
+						ca, err := os.ReadFile(config.RuntimeConfig.CAFile)
+						if err != nil {
+							slog.Error("read cer file", err)
+							panic(err)
+						}
+						pool.AppendCertsFromPEM(ca)
+						return pool
+					}(),
+					ClientAuth: tls.RequireAnyClientCert,
+				},
+			}
+			if err := s.ListenAndServeTLS(config.RuntimeConfig.CertFile, config.RuntimeConfig.KeyFile); err != nil {
+				slog.Error("run service", err)
+				os.Exit(1)
+			}
+		} else {
+			slog.Info("run in http mode")
+			if err := r.Run(fmt.Sprintf(":%d", config.RuntimeConfig.Port)); err != nil {
+				slog.Error("run service", err)
+				os.Exit(1)
+			}
+		}
+	},
+}
+
+// Execute adds all child commands to the root command and sets flags appropriately.
+// This is called by main.main(). It only needs to happen once to the rootCmd.
+func Execute() {
+	err := rootCmd.Execute()
+	if err != nil {
+		slog.Error("execute cobra cmd", err)
+		os.Exit(1)
+	}
+}
+
+func initLogger() {
+	executable, _ := os.Executable()
+
+	var ioWriters []io.Writer
+
+	if config.LogConfig.Console {
+		ioWriters = append(ioWriters, os.Stdout)
+	}
+
+	// logFileDir := viper.GetString("log_file_dir")
+	if config.LogConfig.LogFileDir != "" {
+		if !path.IsAbs(config.LogConfig.LogFileDir) {
+			config.LogConfig.LogFileDir = filepath.Join(filepath.Dir(executable), config.LogConfig.LogFileDir)
+		}
+
+		err := os.MkdirAll(config.LogConfig.LogFileDir, 0755)
+		if err != nil {
+			panic(err)
+		}
+
+		logFile := path.Join(config.LogConfig.LogFileDir, fmt.Sprintf("%s.log", filepath.Base(executable)))
+		_, err = os.Stat(logFile)
+		if err != nil {
+			if os.IsNotExist(err) {
+				_, err := os.Create(logFile)
+				if err != nil {
+					panic(err)
+				}
+			} else {
+				panic(err)
+			}
+		}
+
+		ioWriters = append(
+			ioWriters, &lumberjack.Logger{
+				Filename:   logFile,
+				MaxAge:     5,
+				MaxBackups: 5,
+			},
+		)
+	}
+
+	handleOpt := slog.HandlerOptions{
+		AddSource: config.LogConfig.Source,
+	}
+
+	if config.LogConfig.Debug {
+		handleOpt.Level = slog.LevelDebug
+	} else {
+		handleOpt.Level = slog.LevelInfo
+	}
+
+	var logger *slog.Logger
+	if config.LogConfig.Json {
+		logger = slog.New(handleOpt.NewJSONHandler(io.MultiWriter(ioWriters...)))
+	} else {
+		logger = slog.New(handleOpt.NewTextHandler(io.MultiWriter(ioWriters...)))
+	}
+	slog.SetDefault(logger)
+}
diff --git a/dbm-services/mysql/db-remote-service/cmd/version.go b/dbm-services/mysql/db-remote-service/cmd/version.go
new file mode 100644
index 0000000000..a48f577194
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/cmd/version.go
@@ -0,0 +1,42 @@
+package cmd
+
+import (
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// versionCmd represents the version command
+var versionCmd = &cobra.Command{
+	Use:   "version",
+	Short: "A brief description of your command",
+	Long: `A longer description that spans multiple lines and likely contains examples
+and usage of using your command. For example:
+
+Cobra is a CLI library for Go that empowers applications.
+This application is a tool to generate the needed files
+to quickly create a Cobra application.`,
+	Run: func(cmd *cobra.Command, args []string) {
+		printVersion()
+	},
+}
+var version = ""
+var buildStamp = ""
+var gitHash = ""
+
+func init() {
+	rootCmd.AddCommand(versionCmd)
+
+	// Here you will define your flags and configuration settings.
+
+	// Cobra supports Persistent Flags which will work for this command
+	// and all subcommands, e.g.:
+	// versionCmd.PersistentFlags().String("foo", "", "A help for foo")
+
+	// Cobra supports local flags which will only run when this command
+	// is called directly, e.g.:
+	// versionCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
+}
+func printVersion() {
+	fmt.Printf("Version: %s, GitHash: %s, BuildAt: %s\n", version, gitHash, buildStamp)
+}
diff --git a/dbm-services/mysql/db-remote-service/go.mod b/dbm-services/mysql/db-remote-service/go.mod
new file mode 100644
index 0000000000..70fe4814b2
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/go.mod
@@ -0,0 +1,59 @@
+module dbm-services/mysql/db-remote-service
+
+go 1.19
+
+require (
+	github.com/gin-gonic/gin v1.9.0
+	github.com/go-redis/redis/v8 v8.11.5
+	github.com/go-sql-driver/mysql v1.7.1
+	github.com/google/uuid v1.3.0
+	github.com/jmoiron/sqlx v1.3.5
+	github.com/pkg/errors v0.9.1
+	github.com/spf13/cobra v1.7.0
+	github.com/spf13/viper v1.15.0
+	golang.org/x/exp v0.0.0-20230418202329-0354be287a23
+	gopkg.in/natefinch/lumberjack.v2 v2.2.1
+)
+
+require (
+	github.com/bytedance/sonic v1.8.8 // indirect
+	github.com/cespare/xxhash/v2 v2.1.2 // indirect
+	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
+	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-playground/locales v0.14.1 // indirect
+	github.com/go-playground/universal-translator v0.18.1 // indirect
+	github.com/go-playground/validator/v10 v10.12.0 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+	github.com/leodido/go-urn v1.2.3 // indirect
+	github.com/lib/pq v1.10.0 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mattn/go-isatty v0.0.18 // indirect
+	github.com/mattn/go-sqlite3 v1.14.16 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.7 // indirect
+	github.com/rogpeppe/go-internal v1.8.0 // indirect
+	github.com/spf13/afero v1.9.5 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
+	github.com/ugorji/go/codec v1.2.11 // indirect
+	golang.org/x/arch v0.3.0 // indirect
+	golang.org/x/crypto v0.8.0 // indirect
+	golang.org/x/net v0.9.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	google.golang.org/protobuf v1.30.0 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+)
diff --git a/dbm-services/mysql/db-remote-service/go.sum b/dbm-services/mysql/db-remote-service/go.sum
new file mode 100644
index 0000000000..f92d956332
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/go.sum
@@ -0,0 +1,575 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
+github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q=
+github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
+github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
+github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
+github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
+github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
+github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
+github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
+github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
+github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
+github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
+golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY=
+golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/dbm-services/mysql/db-remote-service/main.go b/dbm-services/mysql/db-remote-service/main.go
new file mode 100644
index 0000000000..e1a04645b8
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/main.go
@@ -0,0 +1,9 @@
+package main
+
+import (
+	"dbm-services/mysql/db-remote-service/cmd"
+)
+
+func main() {
+	cmd.Execute()
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/config/config.go b/dbm-services/mysql/db-remote-service/pkg/config/config.go
new file mode 100644
index 0000000000..11322eb83f
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/config/config.go
@@ -0,0 +1,67 @@
+// Package config 配置
+package config
+
+import (
+	"os"
+	"path/filepath"
+
+	"github.com/spf13/viper"
+)
+
+// RuntimeConfig 运行配置
+var RuntimeConfig *runtimeConfig
+
+// LogConfig 日志配置
+var LogConfig *logConfig
+
+type runtimeConfig struct {
+	Concurrent         int
+	MySQLAdminUser     string
+	MySQLAdminPassword string
+	ProxyAdminUser     string
+	ProxyAdminPassword string
+	Port               int
+	ParserBin          string
+	CAFile             string
+	CertFile           string
+	KeyFile            string
+	TLS                bool
+}
+
+type logConfig struct {
+	Console    bool   `yaml:"console"`
+	LogFileDir string `yaml:"log_file_dir"`
+	Debug      bool   `yaml:"debug"`
+	Source     bool   `yaml:"source"`
+	Json       bool   `yaml:"json"`
+}
+
+// InitConfig 初始化配置
+func InitConfig() {
+	RuntimeConfig = &runtimeConfig{
+		Concurrent:         viper.GetInt("concurrent"),
+		MySQLAdminUser:     viper.GetString("mysql_admin_user"),
+		MySQLAdminPassword: viper.GetString("mysql_admin_password"),
+		ProxyAdminUser:     viper.GetString("proxy_admin_user"),
+		ProxyAdminPassword: viper.GetString("proxy_admin_password"),
+		Port:               viper.GetInt("port"),
+		ParserBin:          viper.GetString("tmysqlparser_bin"),
+		TLS:                viper.GetBool("tls"),
+		CAFile:             viper.GetString("ca_file"),
+		CertFile:           viper.GetString("cert_file"),
+		KeyFile:            viper.GetString("key_file"),
+	}
+
+	if !filepath.IsAbs(RuntimeConfig.ParserBin) {
+		executable, _ := os.Executable()
+		RuntimeConfig.ParserBin = filepath.Join(filepath.Dir(executable), RuntimeConfig.ParserBin)
+	}
+
+	LogConfig = &logConfig{
+		Console:    viper.GetBool("log_console"),
+		LogFileDir: viper.GetString("log_file_dir"),
+		Debug:      viper.GetBool("log_debug"),
+		Source:     viper.GetBool("log_source"),
+		Json:       viper.GetBool("log_json"),
+	}
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/embed.go b/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/embed.go
new file mode 100644
index 0000000000..2e8c6d16e0
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/embed.go
@@ -0,0 +1,78 @@
+package mysql_rpc
+
+import (
+	"context"
+	"dbm-services/mysql/db-remote-service/pkg/config"
+	"dbm-services/mysql/db-remote-service/pkg/parser"
+	"fmt"
+	"strings"
+	"time"
+
+	_ "github.com/go-sql-driver/mysql" // mysql
+	"github.com/jmoiron/sqlx"
+	"golang.org/x/exp/slices"
+	"golang.org/x/exp/slog"
+)
+
+// MySQLRPCEmbed mysql 实现
+type MySQLRPCEmbed struct {
+}
+
+// MakeConnection mysql 建立连接
+func (c *MySQLRPCEmbed) MakeConnection(address string, user string, password string, timeout int) (*sqlx.DB, error) {
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout))
+	defer cancel()
+
+	db, err := sqlx.ConnectContext(
+		ctx,
+		"mysql",
+		fmt.Sprintf(`%s:%s@tcp(%s)/`, user, password, address),
+	)
+
+	if err != nil {
+		slog.Error("connect to mysql", err, slog.String("address", address))
+		return nil, err
+	}
+
+	return db, nil
+}
+
+// ParseCommand mysql 解析命令
+func (c *MySQLRPCEmbed) ParseCommand(command string) (*parser.ParseQueryBase, error) {
+	/*
+		由于 tmysqlparser 和中控兼容性不好, 不再使用 tmysqlparser 解析
+		改回不那么精确的用 sql 首单词来区分下
+	*/
+	firstWord := strings.Split(command, " ")[0]
+	slog.Info("parse command",
+		slog.String("command", command),
+		slog.String("first command word", firstWord))
+
+	return &parser.ParseQueryBase{
+		QueryId:   0,
+		Command:   firstWord,
+		ErrorCode: 0,
+		ErrorMsg:  "",
+	}, nil
+}
+
+// IsQueryCommand mysql 解析命令
+func (c *MySQLRPCEmbed) IsQueryCommand(pc *parser.ParseQueryBase) bool {
+	return slices.Index(genericDoQueryCommand, strings.ToLower(pc.Command)) >= 0
+}
+
+// IsExecuteCommand mysql 解析命令
+func (c *MySQLRPCEmbed) IsExecuteCommand(pc *parser.ParseQueryBase) bool {
+	return !c.IsQueryCommand(pc)
+	// return slices.Index(doExecuteParseCommands, pc.Command) >= 0
+}
+
+// User mysql 用户
+func (c *MySQLRPCEmbed) User() string {
+	return config.RuntimeConfig.MySQLAdminUser
+}
+
+// Password mysql 密码
+func (c *MySQLRPCEmbed) Password() string {
+	return config.RuntimeConfig.MySQLAdminPassword
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/init.go b/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/init.go
new file mode 100644
index 0000000000..4b68ed76fb
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/init.go
@@ -0,0 +1,105 @@
+package mysql_rpc
+
+var genericDoQueryCommand = []string{
+	"use",
+	"explain",
+	"select",
+	"show",
+}
+
+var doQueryParseCommands = []string{
+	"change_db",
+	"explain_other",
+	"select",
+	"show_binlog_events",
+	"show_binlogs",
+	"show_charsets",
+	"show_client_stats",
+	"show_collations",
+	"show_create",
+	"show_create_db",
+	"show_create_event",
+	"show_create_func",
+	"show_create_proc",
+	"show_create_trigger",
+	"show_create_user",
+	"show_databases",
+	"show_engine_logs",
+	"show_engine_mutex",
+	"show_engine_status",
+	"show_errors",
+	"show_events",
+	"show_fields",
+	"show_func_code",
+	"show_grants",
+	"show_index_stats",
+	"show_keys",
+	"show_master_stat",
+	"show_open_tables",
+	"show_plugins",
+	"show_privileges",
+	"show_proc_code",
+	"show_processlist",
+	"show_profile",
+	"show_profiles",
+	"show_relaylog_events",
+	"show_slave_hosts",
+	"show_slave_stat",
+	"show_status",
+	"show_status_func",
+	"show_status_proc",
+	"show_storage_engines",
+	"show_table_stats",
+	"show_table_status",
+	"show_tables",
+	"show_thread_stats",
+	"show_triggers",
+	"show_user_stats",
+	"show_variables",
+	"show_warns",
+}
+
+var doExecuteParseCommands = []string{
+	"alter_table",
+	"alter_user",
+	"change_master",
+	"change_replication_filter",
+	"create_db",
+	"create_event",
+	"create_function",
+	"create_procedure",
+	"create_table",
+	"create_trigger",
+	"create_user",
+	"create_view",
+	"delete",
+	"delete_multi",
+	"drop_compression_dictionary",
+	"drop_db",
+	"drop_event",
+	"drop_function",
+	"drop_index",
+	"drop_procedure",
+	"drop_server",
+	"drop_table",
+	"drop_trigger",
+	"drop_user",
+	"drop_view",
+	"flush",
+	"grant",
+	"insert",
+	"kill",
+	"rename_table",
+	"rename_user",
+	"replace",
+	"reset",
+	"revoke",
+	"revoke_all",
+	"set_option",
+	"slave_start",
+	"slave_stop",
+	"truncate",
+	"update",
+	"update_multi",
+	"flush",
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/mysql_rpc.go b/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/mysql_rpc.go
new file mode 100644
index 0000000000..42337c6330
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/mysql_rpc/mysql_rpc.go
@@ -0,0 +1,2 @@
+// Package mysql_rpc mysql rpc
+package mysql_rpc
diff --git a/dbm-services/mysql/db-remote-service/pkg/parser/parser.go b/dbm-services/mysql/db-remote-service/pkg/parser/parser.go
new file mode 100644
index 0000000000..99b6201c8f
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/parser/parser.go
@@ -0,0 +1,80 @@
+// Package parser sql解析
+package parser
+
+import (
+	"bytes"
+	"dbm-services/mysql/db-remote-service/pkg/config"
+	"encoding/json"
+	"os"
+	"os/exec"
+	"path"
+
+	"github.com/google/uuid"
+	"github.com/pkg/errors"
+)
+
+// ParseQueryBase query result base field
+type ParseQueryBase struct {
+	QueryId   int    `json:"query_id"`
+	Command   string `json:"command"`
+	ErrorCode int    `json:"error_code"`
+	ErrorMsg  string `json:"error_msg"`
+}
+
+// ParseResult parse result
+type ParseResult struct {
+	Result          []*ParseQueryBase `json:"result"`
+	MinMySQLVersion int               `json:"min_mysql_version"`
+	MaxMySQLVersion int               `json:"max_my_sql_version"`
+}
+
+// Parse parser impl
+func Parse(payLoad string) (*ParseResult, error) {
+	tempDir, err := os.MkdirTemp("", uuid.New().String())
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		_ = os.RemoveAll(tempDir)
+	}()
+
+	inputFile, err := os.CreateTemp(tempDir, "tmp_input_")
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		_ = inputFile.Close()
+	}()
+
+	_, err = inputFile.WriteString(payLoad)
+	if err != nil {
+		return nil, err
+	}
+
+	var stdout, stderr bytes.Buffer
+	cmd := exec.Command(
+		config.RuntimeConfig.ParserBin,
+		"--sql-file", inputFile.Name(),
+		"--output-path", tempDir,
+	)
+	cmd.Stdout = &stdout
+	cmd.Stderr = &stderr
+
+	err = cmd.Run()
+	if err != nil {
+		return nil, errors.Wrap(err, stderr.String())
+	}
+
+	output, err := os.ReadFile(path.Join(tempDir, "tmysqlparse_out.json"))
+	if err != nil {
+		return nil, err
+	}
+
+	var res ParseResult
+	err = json.Unmarshal(output, &res)
+	if err != nil {
+		return nil, err
+	}
+
+	return &res, nil
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/proxy_rpc/proxy_rpc.go b/dbm-services/mysql/db-remote-service/pkg/proxy_rpc/proxy_rpc.go
new file mode 100644
index 0000000000..69f31da0e0
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/proxy_rpc/proxy_rpc.go
@@ -0,0 +1,96 @@
+// Package proxy_rpc proxy rpc 实现
+package proxy_rpc
+
+import (
+	"context"
+	"dbm-services/mysql/db-remote-service/pkg/config"
+	"dbm-services/mysql/db-remote-service/pkg/parser"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/go-sql-driver/mysql"
+	_ "github.com/go-sql-driver/mysql" // mysql
+	"github.com/jmoiron/sqlx"
+	"golang.org/x/exp/slog"
+)
+
+var proxyQueryParseCommands = []string{
+	"select",
+}
+
+var proxyExecuteParseCommands = []string{
+	"refresh_users",
+}
+
+// ProxyRPCEmbed proxy 实现
+type ProxyRPCEmbed struct {
+}
+
+// ParseCommand proxy 解析命令
+func (c *ProxyRPCEmbed) ParseCommand(command string) (*parser.ParseQueryBase, error) {
+	return &parser.ParseQueryBase{
+		QueryId:   0,
+		Command:   command,
+		ErrorCode: 0,
+		ErrorMsg:  "",
+	}, nil
+}
+
+// MakeConnection proxy 建立连接
+func (c *ProxyRPCEmbed) MakeConnection(address string, user string, password string, timeout int) (*sqlx.DB, error) {
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout))
+	defer cancel()
+
+	db, err := sqlx.ConnectContext(
+		ctx,
+		"mysql",
+		fmt.Sprintf(`%s:%s@tcp(%s)/`, user, password, address),
+	)
+
+	if err != nil {
+		if merr, ok := err.(*mysql.MySQLError); ok {
+			if merr.Number != 1105 {
+				slog.Error("connect to proxy", err, slog.String("address", address))
+				return nil, merr
+			}
+		} else {
+			slog.Error("connect to proxy", err, slog.String("address", address))
+			return nil, err
+		}
+	}
+
+	return db, nil
+}
+
+// IsQueryCommand proxy 解析命令
+func (c *ProxyRPCEmbed) IsQueryCommand(pc *parser.ParseQueryBase) bool {
+	for _, ele := range proxyQueryParseCommands {
+		if strings.HasPrefix(strings.ToLower(pc.Command), ele) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// IsExecuteCommand proxy 解析命令
+func (c *ProxyRPCEmbed) IsExecuteCommand(pc *parser.ParseQueryBase) bool {
+	for _, ele := range proxyExecuteParseCommands {
+		if strings.HasPrefix(strings.ToLower(pc.Command), ele) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// User proxy 用户
+func (c *ProxyRPCEmbed) User() string {
+	return config.RuntimeConfig.ProxyAdminUser
+}
+
+// Password proxy 密码
+func (c *ProxyRPCEmbed) Password() string {
+	return config.RuntimeConfig.ProxyAdminPassword
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/redis_rpc/client.go b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/client.go
new file mode 100644
index 0000000000..76b1a5455f
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/client.go
@@ -0,0 +1,137 @@
+package redis_rpc
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/go-redis/redis/v8"
+	"golang.org/x/exp/slog"
+)
+
+// RedisClient redis连接信息
+type RedisClient struct {
+	Addr           string        `json:"addr"`
+	Password       string        `json:"password"`
+	DB             int           `json:"db"`
+	MaxRetryTime   int           `json:"maxRetryTimes"`
+	InstanceClient *redis.Client `json:"-"`
+}
+
+// NewRedisClientWithTimeout 建redis客户端,可指定超时时间
+func NewRedisClientWithTimeout(addr, passwd string, db int, timeout time.Duration) (
+	conn *RedisClient, err error) {
+	conn = &RedisClient{
+		Addr:         addr,
+		Password:     passwd,
+		DB:           db,
+		MaxRetryTime: int(timeout.Seconds()),
+	}
+	err = conn.newConn()
+	if err != nil {
+		return nil, err
+	}
+	return
+}
+
+func (db *RedisClient) newConn() (err error) {
+	// 执行命令失败重连,确保重连后,databases正确
+	var redisConnHook = func(ctx context.Context, cn *redis.Conn) error {
+		pipe01 := cn.Pipeline()
+		_, err := pipe01.Select(context.TODO(), db.DB).Result()
+		if err != nil {
+			err = fmt.Errorf("newConnct pipeline change db fail,err:%v", err)
+			return err
+		}
+		_, err = pipe01.Exec(context.TODO())
+		if err != nil {
+			err = fmt.Errorf("newConnct pipeline.exec db fail,err:%v", err)
+			slog.Error(err.Error())
+			return err
+		}
+		return nil
+	}
+	redisOpt := &redis.Options{
+		Addr:            db.Addr,
+		DB:              db.DB,
+		DialTimeout:     1 * time.Minute,
+		ReadTimeout:     1 * time.Minute,
+		MaxConnAge:      24 * time.Hour,
+		MaxRetries:      db.MaxRetryTime, // 失败自动重试,重试次数
+		MinRetryBackoff: 1 * time.Second, // 重试间隔
+		MaxRetryBackoff: 1 * time.Second,
+		PoolSize:        10,
+		OnConnect:       redisConnHook,
+	}
+	clusterOpt := &redis.ClusterOptions{
+		Addrs:           []string{db.Addr},
+		DialTimeout:     1 * time.Minute,
+		ReadTimeout:     1 * time.Minute,
+		MaxConnAge:      24 * time.Hour,
+		MaxRetries:      db.MaxRetryTime, // 失败自动重试,重试次数
+		MinRetryBackoff: 1 * time.Second, // 重试间隔
+		MaxRetryBackoff: 1 * time.Second,
+		PoolSize:        10,
+		OnConnect:       redisConnHook,
+	}
+	if db.Password != "" {
+		redisOpt.Password = db.Password
+		clusterOpt.Password = db.Password
+	}
+	db.InstanceClient = redis.NewClient(redisOpt)
+	_, err = db.InstanceClient.Ping(context.TODO()).Result()
+	if err != nil {
+		errStr := fmt.Sprintf("redis new conn fail,sleep 10s then retry.err:%v,addr:%s", err, db.Addr)
+		slog.Error(errStr)
+		return fmt.Errorf("redis new conn fail,err:%v addr:%s", err, db.Addr)
+	}
+	return
+}
+
+// DoCommand Do command(auto switch db)
+func (db *RedisClient) DoCommand(cmdArgv []string, dbnum int) (interface{}, error) {
+	err := db.SelectDB(dbnum)
+	if err != nil {
+		return nil, err
+	}
+	var ret interface{}
+	dstCmds := []interface{}{}
+	for _, cmd01 := range cmdArgv {
+		dstCmds = append(dstCmds, cmd01)
+	}
+	ret, err = db.InstanceClient.Do(context.TODO(), dstCmds...).Result()
+	if err != nil && err != redis.Nil {
+		slog.Error("Redis  DoCommand fail,err:%v,command:%+v,addr:%s", err, cmdArgv, db.Addr)
+		return nil, err
+	} else if err != nil && err == redis.Nil {
+		return "", nil
+	}
+	return ret, nil
+}
+
+// SelectDB db
+func (db *RedisClient) SelectDB(dbNum int) (err error) {
+	if db.DB == dbNum {
+		return nil
+	}
+	pipe01 := db.InstanceClient.Pipeline()
+	_, err = pipe01.Select(context.TODO(), dbNum).Result()
+	if err != nil && err != redis.Nil {
+		err = fmt.Errorf("redis:%s selectdb fail,err:%v", db.Addr, err)
+		slog.Error(err.Error())
+		return
+	}
+	_, err = pipe01.Exec(context.TODO())
+	if err != nil && err != redis.Nil {
+		err = fmt.Errorf("redis:%s selectdb fail,err:%v", db.Addr, err)
+		slog.Error(err.Error())
+		return
+	}
+	db.DB = dbNum
+	return nil
+}
+
+// Close redis
+func (db *RedisClient) Close() {
+	db.InstanceClient.Close()
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/redis_rpc/common.go b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/common.go
new file mode 100644
index 0000000000..03dcd79b3f
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/common.go
@@ -0,0 +1,467 @@
+package redis_rpc
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"os/exec"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/gin-gonic/gin"
+	"github.com/go-redis/redis/v8"
+	"github.com/pkg/errors"
+	"github.com/spf13/viper"
+	"golang.org/x/exp/slog"
+)
+
+// MAX TODO
+const MAX = 1 * 1024 * 1024
+
+var ctx = context.Background()
+
+// RedisQueryParams redis请求参数
+type RedisQueryParams struct {
+	Addresses []string `json:"addresses"`
+	DbNum     int      `json:"db_num"`
+	Password  string   `json:"password"`
+	Command   string   `json:"command"`
+}
+
+// StringWithoutPasswd 打印参数,不打印密码
+func (param *RedisQueryParams) StringWithoutPasswd() string {
+	return fmt.Sprintf("{addresses:%+v,db_num:%d,command:%s,password:xxxx}", param.Addresses, param.DbNum, param.Command)
+}
+
+// CmdResult TODO
+// ==== 返回
+type CmdResult struct {
+	Address string      `json:"address"`
+	Result  interface{} `json:"result"`
+}
+
+// RedisQueryResp TODO
+type RedisQueryResp struct {
+	Code     int         `json:"code"`
+	Data     []CmdResult `json:"data"`
+	ErrorMsg string      `json:"error_msg"`
+}
+
+// SendResponse TODO
+func SendResponse(c *gin.Context, code int, errMsg string, data []CmdResult) {
+	c.JSON(http.StatusOK, RedisQueryResp{
+		Code:     code,
+		ErrorMsg: errMsg,
+		Data:     data,
+	})
+}
+
+// FormatName 返回格式化后的执行命令
+func FormatName(msg string) (string, error) {
+	stringMsg := strings.TrimSpace(msg)
+	if len(stringMsg) == 0 {
+		return "", fmt.Errorf("bad input: msg length too short")
+	}
+	stringMsg = strings.ReplaceAll(stringMsg, `"`, ``)
+	stringMsg = strings.ReplaceAll(stringMsg, `''`, ``)
+
+	// 去除连续空格
+	pattern := regexp.MustCompile(`\s+`)
+	stringMsg = pattern.ReplaceAllString(stringMsg, " ")
+
+	return stringMsg, nil
+}
+
+// DoRedisCmd 执行redis命令
+func DoRedisCmd(address, redisPass, cmdline, dbNum string, raw bool) (string, error) {
+	splitList := strings.Split(address, ":")
+	redisHost, redisPort := splitList[0], splitList[1]
+	var err error
+	var cmdPath string
+	redisBin := viper.GetString("redis_cli_bin")
+	if cmdPath, err = exec.LookPath(redisBin); err != nil {
+		return "", errors.New("no redis-cli found")
+	}
+	var argv []string
+
+	argv = append(argv, []string{"--no-auth-warning", "-h", redisHost, "-p", redisPort}...)
+	argv = append(argv, "-n", dbNum)
+	if redisPass != "" {
+		argv = append(argv, "-a", redisPass)
+	}
+	if raw {
+		argv = append(argv, "--raw")
+	} else {
+		argv = append(argv, "--no-raw")
+
+	}
+	cmd := exec.Command(cmdPath, argv...)
+	stdin, err := cmd.StdinPipe()
+	if err != nil {
+		return "", errors.Wrap(err, "StdinPipe")
+	}
+
+	go func() {
+		defer stdin.Close()
+		io.WriteString(stdin, cmdline)
+	}()
+
+	out, err := cmd.CombinedOutput()
+	if err != nil {
+		return "", errors.Wrap(err, "CombinedOutput")
+	}
+
+	// 一些明显的错误,认为是失败
+	for _, e := range redisError {
+		if strings.Contains(string(out), e) {
+			return "", fmt.Errorf(string(out))
+		}
+	}
+
+	return string(out), nil
+}
+
+// DoRedisCmdNew 执行redis命令
+func DoRedisCmdNew(address, redisPass, cmd string, dbNum int) (ret string, err error) {
+	var strBuilder strings.Builder
+	cli, err := NewRedisClientWithTimeout(address, redisPass, dbNum, time.Second*2)
+	if err != nil {
+		return
+	}
+	defer cli.Close()
+	cmdlist := strings.Fields(cmd)
+	cmdRet, err := cli.DoCommand(cmdlist, dbNum)
+	if err != nil {
+		return
+	}
+	switch v := cmdRet.(type) {
+	case int64:
+		strBuilder.WriteString(strconv.FormatInt(v, 10))
+	case string:
+		strBuilder.WriteString(cmdRet.(string))
+	case []string:
+		list01 := cmdRet.([]string)
+		for _, item01 := range list01 {
+			strBuilder.WriteString(item01 + "\n")
+		}
+	case []interface{}:
+		list02 := cmdRet.([]interface{})
+		for _, item01 := range list02 {
+			strBuilder.WriteString(fmt.Sprintf("%v", item01) + "\n")
+		}
+	default:
+		slog.Info(fmt.Sprintf("ExecuteCmd unknown result type,cmds:'%s',retType:%s,addr:%s", cmd, v, address))
+		byte01, _ := json.Marshal(cmdRet)
+		strBuilder.WriteString(string(byte01))
+	}
+	ret = strBuilder.String()
+	return
+}
+
+// TcpClient01 向tcp端口发送一条指令 并接受 返回
+// 模仿netcat
+func TcpClient01(addr01, cmd string) (ret string, err error) {
+	client, err := net.Dial("tcp", addr01)
+	if err != nil {
+		err = fmt.Errorf("net.Dial fail,err:%v", err)
+		return "", err
+	}
+	defer client.Close()
+	_, err = client.Write([]byte(cmd))
+	if err != nil {
+		err = fmt.Errorf("tcp client.Write fail,err:%v,addr:%s,command:%s", err, addr01, cmd)
+		return "", err
+	}
+	buf := make([]byte, 1024)
+	for {
+		readCnt, err := client.Read(buf)
+		if err != nil {
+			if err == io.EOF {
+				return ret, nil
+			}
+			err = fmt.Errorf("tcp client.read fail,err:%v,addr:%s,command:%s", err, addr01, cmd)
+			return "", err
+		}
+		ret = ret + string(buf[:readCnt])
+	}
+}
+
+// GetValueSize 分析cmdLine,返回它的valueSize
+// 返回值说明,出现各种 error 以及非 read 指令,返回 -1;对于非受限的 read 指令,返回0;对于受限的 read 指令,返回其value_size
+func GetValueSize(address, pass string, cmdLine string) (int, bool, error) {
+	rdb := redis.NewClient(&redis.Options{
+		Addr:     address,
+		Password: pass,
+		DB:       0, // use default DB
+	})
+
+	// 检测是否连接到redis数据库
+	pong, err := rdb.Ping(ctx).Result()
+	if err != nil {
+		return -1, false, fmt.Errorf("connect redis failed" + err.Error())
+	}
+	if pong != "PONG" {
+		return -1, false, fmt.Errorf("connect redis failed")
+	} else {
+		fmt.Printf("connect redis %s  succeed\n", address)
+	}
+
+	return getValue(rdb, cmdLine)
+}
+
+func getValue(rdb *redis.Client, cmdLine string) (int, bool, error) {
+	stringMsg := strings.TrimSpace(cmdLine)
+	inputs := strings.Fields(stringMsg)
+	// 先得到命令的名称,其余参数分情况处理
+	command := inputs[0]
+	commandLc := treatString(strings.ToLower(command))
+	keys, err := RedisCommandTable[commandLc].GetKeys(cmdLine)
+	if err != nil {
+		return -1, false, err
+	}
+	if len(keys) == 0 {
+		return 0, true, nil
+	}
+	key := keys[0]
+
+	if commandLc == "get" {
+		// redis_parse.go 中标注了各种命令所需的最小参数个数
+		len, err := rdb.StrLen(ctx, key).Result()
+		if err != nil {
+			return -1, true, fmt.Errorf("check your redis command, strlen cmd error: " + err.Error())
+		}
+		return int(len), true, checkLength(int(len))
+	} else if commandLc == "mget" { // 有多个key
+		if len(inputs) < 2 {
+			return -1, true, fmt.Errorf("incomplete argvs")
+		}
+		totalLen := 0
+		for i := 1; i < len(inputs); i++ { // 检索所有的 key 的长度,加起来再比较
+			len, err := rdb.StrLen(ctx, inputs[i]).Result()
+			if err != nil {
+				return -1, true, fmt.Errorf("check your redis command, strlen cmd error: " + err.Error())
+			}
+			totalLen += int(len)
+		}
+		return totalLen, true, checkLength(totalLen)
+	} else if commandLc == "hgetall" || commandLc == "hkeys" || commandLc == "hvals" {
+		return PrecheckHashKeysCmd(rdb, commandLc, inputs)
+	} else if commandLc == "lrange" {
+		PrecheckListKeysCmd(rdb, commandLc, inputs)
+	} else if commandLc == "smembers" || commandLc == "srandmember" {
+		return PrecheckSetKeysCmd(rdb, commandLc, inputs)
+	} else if commandLc == "zrangebyscore" || commandLc == "zrevrangebyscore" || commandLc == "zrangebylex" ||
+		commandLc == "zrevrangebylex" {
+		return PrecheckZsetKeysCmd(rdb, commandLc, inputs)
+	} else if commandLc == "scan" || commandLc == "hscan" || commandLc == "sscan" || commandLc == "zscan" {
+		return PrecheckScanCmd(rdb, commandLc, inputs)
+	}
+	// 非受限的 read 指令
+	return 0, false, nil
+}
+
+// treatString 删除' "
+func treatString(str string) string {
+	str = strings.Trim(str, `"`)
+	str = strings.Trim(str, `'`)
+	return str
+}
+
+// treatLRangeArgv LRange 的偏移量可以是负数,为方便计算,将其调整
+func treatLRangeArgv(start int, end int, list_len int) (int, int) {
+	if start < 0 {
+		if start*(-1) > list_len { // 一律算作从头开始取
+			start = 0
+		} else {
+			start = list_len - start*(-1) // 否则,偏移量是负数代表从尾部计数,为方便计算,将其转化为从头部计数的正数
+		}
+	}
+	if end < 0 {
+		if end*(-1) > list_len { // lrange 一定会返回空列表,为方便计算直接将其置为-1
+			end = -1
+		} else {
+			end = list_len - end*(-1)
+		}
+	}
+	if end >= list_len { // 一律算作取到尾部
+		end = list_len - 1
+	}
+	return start, end
+}
+
+// treatSRandMemberCount count参数也有多种情况
+func treatSRandMemberCount(count int, set_len int) int {
+	if count < 0 {
+		count = count * (-1)
+		return count
+	} else {
+		if count > set_len {
+			return set_len
+		} else {
+			return count
+		}
+	}
+}
+
+func checkLength(len int) error {
+	if len > MAX {
+		return fmt.Errorf("length of value more than 10M")
+	} else {
+		return nil
+	}
+}
+
+// PrecheckHashKeysCmd 预检查hash类型key相关命令
+func PrecheckHashKeysCmd(rdb *redis.Client, commandLc string, inputs []string) (int, bool, error) {
+	if len(inputs) < 2 {
+		return -1, false, fmt.Errorf("incomplete argvs")
+	}
+	key := inputs[1]
+	len, err := rdb.HLen(ctx, key).Result()
+	if err != nil {
+		return -1, false, fmt.Errorf("check your redis command, hlen cmd error: " + err.Error())
+	}
+	return int(len), false, checkLength(int(len))
+}
+
+// PrecheckListKeysCmd 预检查list类型key相关命令
+func PrecheckListKeysCmd(rdb *redis.Client, commandLc string, inputs []string) (int, bool, error) {
+	if len(inputs) < 4 {
+		return -1, false, fmt.Errorf("incomplete argvs")
+	}
+	key := inputs[1]
+	start, err := strconv.Atoi(inputs[2])
+	if err != nil {
+		return -1, false, err
+	}
+	end, err := strconv.Atoi(inputs[3])
+	if err != nil {
+		return -1, false, err
+	}
+	listLen, err := rdb.LLen(ctx, key).Result()
+	if err != nil {
+		return -1, false, fmt.Errorf("check your redis command, llen cmd error: " + err.Error())
+	}
+	start, end = treatLRangeArgv(start, end, int(listLen))
+	len := end - start + 1
+	if len < 0 {
+		return -1, false, nil
+	}
+	return int(len), false, checkLength(int(len))
+}
+
+// PrecheckSetKeysCmd 预检查set类型key相关命令
+func PrecheckSetKeysCmd(rdb *redis.Client, commandLc string, inputs []string) (int, bool, error) {
+	if len(inputs) < 2 {
+		return -1, false, fmt.Errorf("incomplete argvs")
+	}
+	if commandLc == "smembers" {
+		key := inputs[1]
+		set_len, err := rdb.SCard(ctx, key).Result()
+		if err != nil {
+			return -1, false, fmt.Errorf("check your redis command, scard cmd error: " + err.Error())
+		}
+		return int(set_len), false, checkLength(int(set_len))
+	} else if commandLc == "srandmember" {
+		if len(inputs) == 2 { // 随机返回一个成员
+			return 1, false, nil
+		} else if len(inputs) == 3 { // 有 count 参数
+			count, err := strconv.Atoi(inputs[2])
+			if err != nil {
+				return -1, false, fmt.Errorf("check your count argv: " + err.Error())
+			}
+			key := inputs[1]
+			set_len, err := rdb.SCard(ctx, key).Result()
+			if err != nil {
+				return -1, false, fmt.Errorf("check your redis command, scard cmd error: " + err.Error())
+			}
+			count = treatSRandMemberCount(count, int(set_len))
+			return count, false, checkLength(count)
+		}
+	}
+	return 0, false, nil
+}
+
+// PrecheckZsetKeysCmd 预检查zset命令
+func PrecheckZsetKeysCmd(rdb *redis.Client, commandLc string, inputs []string) (int, bool, error) {
+	// ZRANGEBYSCORE key min max [WITHSCORES] [LIMIT offset count]
+	// ZRANGEBYLEX key min max [LIMIT offset count]
+	haveLimit := false
+	var count int
+
+	for i, argv := range inputs { // 循环检测是否有 LIMIT 关键字
+		argvLow := strings.ToLower(argv)
+		if argvLow != "limit" {
+			continue
+		}
+		if len(inputs) < i+3 { // [LIMIT offset count]
+			return -1, false, fmt.Errorf("incomplete argvs")
+		}
+		if cnt, err := strconv.Atoi(inputs[i+2]); err != nil {
+			return -1, false, fmt.Errorf("check your count argv: " + err.Error())
+		} else {
+			haveLimit = true
+			count = cnt
+		}
+		break
+	}
+
+	if haveLimit {
+		return count, false, checkLength(count)
+	}
+
+	// 没有提供[Limit offset count]参数
+	if len(inputs) < 4 {
+		return -1, false, fmt.Errorf("incomplete argvs")
+	}
+	key := inputs[1]
+	min := inputs[2]
+	max := inputs[3]
+	if commandLc == "zrevrangebyscore" || commandLc == "zrevrangebylex" {
+		min, max = max, min
+	}
+	if commandLc == "zrangebyscore" || commandLc == "zrevrangebyscore" {
+		len, err := rdb.ZCount(ctx, key, min, max).Result()
+		if err != nil {
+			return -1, false, fmt.Errorf("check your redis command, zcount cmd error: " + err.Error())
+		}
+		return int(len), false, checkLength(int(len))
+	} else {
+		len, err := rdb.ZLexCount(ctx, key, min, max).Result()
+		if err != nil {
+			return -1, false, fmt.Errorf("check your redis command, zlexcount cmd error: " + err.Error())
+		}
+		return int(len), false, checkLength(int(len))
+	}
+}
+
+// PrecheckScanCmd 预检查scan命令
+func PrecheckScanCmd(rdb *redis.Client, commandLc string, inputs []string) (int, bool, error) {
+	haveCount := false
+	var count int
+	for i, argv := range inputs {
+		argvLow := strings.ToLower(argv)
+		if argvLow == "count" {
+			if len(inputs) < i+2 { // [COUNT count]
+				return -1, false, fmt.Errorf("incomplete argvs")
+			}
+			cnt, err := strconv.Atoi(inputs[i+1])
+			if err != nil {
+				return -1, false, fmt.Errorf("check your count argv: " + err.Error())
+			} else {
+				haveCount = true
+				count = cnt
+			}
+		}
+	}
+	if haveCount {
+		return count, false, checkLength(count)
+	} else {
+		return -1, false, fmt.Errorf("you must haveCount count argv")
+	}
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/redis_rpc/init.go b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/init.go
new file mode 100644
index 0000000000..6402f8a9e0
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/init.go
@@ -0,0 +1,532 @@
+package redis_rpc
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+)
+
+const (
+	adminFlag    = "admin"
+	writeFlag    = "write"
+	readOnlyFlag = "read-only"
+)
+
+var once sync.Once
+
+// RedisCommandTable redis命令表
+var RedisCommandTable map[string]*RedisCmdMeta
+
+// RedisCmdMeta redis命令项属性
+type RedisCmdMeta struct {
+	Name     string `json:"name"`
+	Arity    int    `json:"arity"`  // 参数个数,用 -N 表示 >= N
+	Sflags   string `json:"sflags"` // admin/write/read-only
+	FirstKey int    `json:"firstKey"`
+	LastKey  int    `json:"lastKey"`
+	KeyStep  int    `json:"keyStep"`
+}
+
+// GetKeys 获取命令中的keys
+func (m *RedisCmdMeta) GetKeys(srcCmd string) (keys []string, err error) {
+	if strings.Contains(m.Sflags, adminFlag) {
+		// admin类型的命令,没有keys
+		return
+	}
+	if m.FirstKey == 0 {
+		return
+	}
+	cmdArgs := strings.Fields(srcCmd)
+	absArity := m.Arity
+	if m.Arity < 0 {
+		absArity = -m.Arity
+	}
+	if len(cmdArgs) < absArity {
+		err = fmt.Errorf("'%s' cmdArgs len:%d not enough,need greater than or equal %d", cmdArgs[0], len(cmdArgs), absArity)
+		return
+	}
+	last := m.LastKey
+	if last < 0 {
+		last = last + len(cmdArgs)
+	}
+	for i := m.FirstKey; i <= last; i = i + m.KeyStep {
+		keys = append(keys, cmdArgs[i])
+	}
+	return
+}
+
+var twemproxyCommand = []string{
+	// TODO 先不做限制。后面再看要不要指定命令限制
+	"get nosqlproxy servers",
+}
+
+var redisError = []string{
+	"Could not connect to Redis at",
+	"NOAUTH Authentication required",
+	"AUTH failed: ERR invalid password",
+	"ERR wrong number of arguments",
+}
+
+func init() {
+	once.Do(func() {
+		RedisCommandTable = make(map[string]*RedisCmdMeta)
+		RedisCommandTable["module"] = &RedisCmdMeta{Name: "module", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["get"] = &RedisCmdMeta{Name: "get", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["getex"] = &RedisCmdMeta{Name: "getex", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["getdel"] = &RedisCmdMeta{Name: "getdel", Arity: 2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["set"] = &RedisCmdMeta{Name: "set", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["setnx"] = &RedisCmdMeta{Name: "setnx", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["setex"] = &RedisCmdMeta{Name: "setex", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["psetex"] = &RedisCmdMeta{Name: "psetex", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["append"] = &RedisCmdMeta{Name: "append", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["strlen"] = &RedisCmdMeta{Name: "strlen", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["del"] = &RedisCmdMeta{Name: "del", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["unlink"] = &RedisCmdMeta{Name: "unlink", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["exists"] = &RedisCmdMeta{Name: "exists", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["setbit"] = &RedisCmdMeta{Name: "setbit", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["getbit"] = &RedisCmdMeta{Name: "getbit", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["bitfield"] = &RedisCmdMeta{Name: "bitfield", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["bitfield_ro"] = &RedisCmdMeta{Name: "bitfield_ro", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["setrange"] = &RedisCmdMeta{Name: "setrange", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["getrange"] = &RedisCmdMeta{Name: "getrange", Arity: 4, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["substr"] = &RedisCmdMeta{Name: "substr", Arity: 4, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["incr"] = &RedisCmdMeta{Name: "incr", Arity: 2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["decr"] = &RedisCmdMeta{Name: "decr", Arity: 2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["mget"] = &RedisCmdMeta{Name: "mget", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["rpush"] = &RedisCmdMeta{Name: "rpush", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lpush"] = &RedisCmdMeta{Name: "lpush", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["rpushx"] = &RedisCmdMeta{Name: "rpushx", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lpushx"] = &RedisCmdMeta{Name: "lpushx", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["linsert"] = &RedisCmdMeta{Name: "linsert", Arity: 5, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["rpop"] = &RedisCmdMeta{Name: "rpop", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lpop"] = &RedisCmdMeta{Name: "lpop", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["brpop"] = &RedisCmdMeta{Name: "brpop", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: -2,
+			KeyStep: 1}
+		RedisCommandTable["brpoplpush"] = &RedisCmdMeta{Name: "brpoplpush", Arity: 4, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 2, KeyStep: 1}
+		RedisCommandTable["blmove"] = &RedisCmdMeta{Name: "blmove", Arity: 6, Sflags: writeFlag, FirstKey: 1, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["blpop"] = &RedisCmdMeta{Name: "blpop", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: -2,
+			KeyStep: 1}
+		RedisCommandTable["llen"] = &RedisCmdMeta{Name: "llen", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lindex"] = &RedisCmdMeta{Name: "lindex", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lset"] = &RedisCmdMeta{Name: "lset", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lrange"] = &RedisCmdMeta{Name: "lrange", Arity: 4, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["ltrim"] = &RedisCmdMeta{Name: "ltrim", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lpos"] = &RedisCmdMeta{Name: "lpos", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["lrem"] = &RedisCmdMeta{Name: "lrem", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["rpoplpush"] = &RedisCmdMeta{Name: "rpoplpush", Arity: 3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 2, KeyStep: 1}
+		RedisCommandTable["lmove"] = &RedisCmdMeta{Name: "lmove", Arity: 5, Sflags: writeFlag, FirstKey: 1, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["sadd"] = &RedisCmdMeta{Name: "sadd", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["srem"] = &RedisCmdMeta{Name: "srem", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["smove"] = &RedisCmdMeta{Name: "smove", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["sismember"] = &RedisCmdMeta{Name: "sismember", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["smismember"] = &RedisCmdMeta{Name: "smismember", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["scard"] = &RedisCmdMeta{Name: "scard", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["spop"] = &RedisCmdMeta{Name: "spop", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["srandmember"] = &RedisCmdMeta{Name: "srandmember", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["sinter"] = &RedisCmdMeta{Name: "sinter", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["sinterstore"] = &RedisCmdMeta{Name: "sinterstore", Arity: -3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: -1, KeyStep: 1}
+		RedisCommandTable["sunion"] = &RedisCmdMeta{Name: "sunion", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["sunionstore"] = &RedisCmdMeta{Name: "sunionstore", Arity: -3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: -1, KeyStep: 1}
+		RedisCommandTable["sdiff"] = &RedisCmdMeta{Name: "sdiff", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["sdiffstore"] = &RedisCmdMeta{Name: "sdiffstore", Arity: -3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: -1, KeyStep: 1}
+		RedisCommandTable["smembers"] = &RedisCmdMeta{Name: "smembers", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["sscan"] = &RedisCmdMeta{Name: "sscan", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zadd"] = &RedisCmdMeta{Name: "zadd", Arity: -4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zincrby"] = &RedisCmdMeta{Name: "zincrby", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zrem"] = &RedisCmdMeta{Name: "zrem", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zremrangebyscore"] = &RedisCmdMeta{Name: "zremrangebyscore", Arity: 4, Sflags: writeFlag,
+			FirstKey: 1, LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zremrangebyrank"] = &RedisCmdMeta{Name: "zremrangebyrank", Arity: 4, Sflags: writeFlag,
+			FirstKey: 1, LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zremrangebylex"] = &RedisCmdMeta{Name: "zremrangebylex", Arity: 4, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zunionstore"] = &RedisCmdMeta{Name: "zunionstore", Arity: -4, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zinterstore"] = &RedisCmdMeta{Name: "zinterstore", Arity: -4, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zdiffstore"] = &RedisCmdMeta{Name: "zdiffstore", Arity: -4, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zunion"] = &RedisCmdMeta{Name: "zunion", Arity: -3, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["zinter"] = &RedisCmdMeta{Name: "zinter", Arity: -3, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["zdiff"] = &RedisCmdMeta{Name: "zdiff", Arity: -3, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["zrange"] = &RedisCmdMeta{Name: "zrange", Arity: -4, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zrangestore"] = &RedisCmdMeta{Name: "zrangestore", Arity: -5, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 2, KeyStep: 1}
+		RedisCommandTable["zrangebyscore"] = &RedisCmdMeta{Name: "zrangebyscore", Arity: -4, Sflags: readOnlyFlag,
+			FirstKey: 1, LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zrevrangebyscore"] = &RedisCmdMeta{Name: "zrevrangebyscore", Arity: -4, Sflags: readOnlyFlag,
+			FirstKey: 1, LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zrangebylex"] = &RedisCmdMeta{Name: "zrangebylex", Arity: -4, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zrevrangebylex"] = &RedisCmdMeta{Name: "zrevrangebylex", Arity: -4, Sflags: readOnlyFlag,
+			FirstKey: 1, LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zcount"] = &RedisCmdMeta{Name: "zcount", Arity: 4, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zlexcount"] = &RedisCmdMeta{Name: "zlexcount", Arity: 4, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zrevrange"] = &RedisCmdMeta{Name: "zrevrange", Arity: -4, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zcard"] = &RedisCmdMeta{Name: "zcard", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zscore"] = &RedisCmdMeta{Name: "zscore", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zmscore"] = &RedisCmdMeta{Name: "zmscore", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zrank"] = &RedisCmdMeta{Name: "zrank", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zrevrank"] = &RedisCmdMeta{Name: "zrevrank", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["zscan"] = &RedisCmdMeta{Name: "zscan", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zpopmin"] = &RedisCmdMeta{Name: "zpopmin", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["zpopmax"] = &RedisCmdMeta{Name: "zpopmax", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["bzpopmin"] = &RedisCmdMeta{Name: "bzpopmin", Arity: -3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: -2, KeyStep: 1}
+		RedisCommandTable["bzpopmax"] = &RedisCmdMeta{Name: "bzpopmax", Arity: -3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: -2, KeyStep: 1}
+		RedisCommandTable["zrandmember"] = &RedisCmdMeta{Name: "zrandmember", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["hset"] = &RedisCmdMeta{Name: "hset", Arity: -4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hsetnx"] = &RedisCmdMeta{Name: "hsetnx", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hget"] = &RedisCmdMeta{Name: "hget", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hmset"] = &RedisCmdMeta{Name: "hmset", Arity: -4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hmget"] = &RedisCmdMeta{Name: "hmget", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hincrby"] = &RedisCmdMeta{Name: "hincrby", Arity: 4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hincrbyfloat"] = &RedisCmdMeta{Name: "hincrbyfloat", Arity: 4, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["hdel"] = &RedisCmdMeta{Name: "hdel", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hlen"] = &RedisCmdMeta{Name: "hlen", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hstrlen"] = &RedisCmdMeta{Name: "hstrlen", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hkeys"] = &RedisCmdMeta{Name: "hkeys", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hvals"] = &RedisCmdMeta{Name: "hvals", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hgetall"] = &RedisCmdMeta{Name: "hgetall", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hexists"] = &RedisCmdMeta{Name: "hexists", Arity: 3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["hrandfield"] = &RedisCmdMeta{Name: "hrandfield", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["hscan"] = &RedisCmdMeta{Name: "hscan", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["incrby"] = &RedisCmdMeta{Name: "incrby", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["decrby"] = &RedisCmdMeta{Name: "decrby", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["incrbyfloat"] = &RedisCmdMeta{Name: "incrbyfloat", Arity: 3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["getset"] = &RedisCmdMeta{Name: "getset", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["mset"] = &RedisCmdMeta{Name: "mset", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 2}
+		RedisCommandTable["msetnx"] = &RedisCmdMeta{Name: "msetnx", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 2}
+		RedisCommandTable["randomkey"] = &RedisCmdMeta{Name: "randomkey", Arity: 1, Sflags: readOnlyFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["select"] = &RedisCmdMeta{Name: "select", Arity: 2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["swapdb"] = &RedisCmdMeta{Name: "swapdb", Arity: 3, Sflags: writeFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["move"] = &RedisCmdMeta{Name: "move", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["copy"] = &RedisCmdMeta{Name: "copy", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["rename"] = &RedisCmdMeta{Name: "rename", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["renamenx"] = &RedisCmdMeta{Name: "renamenx", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["expire"] = &RedisCmdMeta{Name: "expire", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["expireat"] = &RedisCmdMeta{Name: "expireat", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["pexpire"] = &RedisCmdMeta{Name: "pexpire", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["pexpireat"] = &RedisCmdMeta{Name: "pexpireat", Arity: 3, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["keys"] = &RedisCmdMeta{Name: "keys", Arity: 2, Sflags: readOnlyFlag + "|" + adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["scan"] = &RedisCmdMeta{Name: "scan", Arity: -2, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["dbsize"] = &RedisCmdMeta{Name: "dbsize", Arity: 1, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["auth"] = &RedisCmdMeta{Name: "auth", Arity: -2, Sflags: readOnlyFlag + "|" + adminFlag,
+			FirstKey: 0, LastKey: 0, KeyStep: 0}
+		RedisCommandTable["ping"] = &RedisCmdMeta{Name: "ping", Arity: -1, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["echo"] = &RedisCmdMeta{Name: "echo", Arity: 2, Sflags: readOnlyFlag + "|" + adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["save"] = &RedisCmdMeta{Name: "save", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["bgsave"] = &RedisCmdMeta{Name: "bgsave", Arity: -1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["bgrewriteaof"] = &RedisCmdMeta{Name: "bgrewriteaof", Arity: 1, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["shutdown"] = &RedisCmdMeta{Name: "shutdown", Arity: -1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["lastsave"] = &RedisCmdMeta{Name: "lastsave", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["type"] = &RedisCmdMeta{Name: "type", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["multi"] = &RedisCmdMeta{Name: "multi", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["exec"] = &RedisCmdMeta{Name: "exec", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["discard"] = &RedisCmdMeta{Name: "discard", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["sync"] = &RedisCmdMeta{Name: "sync", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["psync"] = &RedisCmdMeta{Name: "psync", Arity: -3, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["replconf"] = &RedisCmdMeta{Name: "replconf", Arity: -1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["flushdb"] = &RedisCmdMeta{Name: "flushdb", Arity: -1, Sflags: writeFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["cleandb"] = &RedisCmdMeta{Name: "cleandb", Arity: -1, Sflags: writeFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["flushall"] = &RedisCmdMeta{Name: "flushall", Arity: -1, Sflags: writeFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["cleanall"] = &RedisCmdMeta{Name: "cleanall", Arity: -1, Sflags: writeFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["sort"] = &RedisCmdMeta{Name: "sort", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["info"] = &RedisCmdMeta{Name: "info", Arity: -1, Sflags: readOnlyFlag + "|" + adminFlag,
+			FirstKey: 0, LastKey: 0, KeyStep: 0}
+		RedisCommandTable["monitor"] = &RedisCmdMeta{Name: "monitor", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["ttl"] = &RedisCmdMeta{Name: "ttl", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["touch"] = &RedisCmdMeta{Name: "touch", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["pttl"] = &RedisCmdMeta{Name: "pttl", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["expiretime"] = &RedisCmdMeta{Name: "expiretime", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["pexpiretime"] = &RedisCmdMeta{Name: "pexpiretime", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["persist"] = &RedisCmdMeta{Name: "persist", Arity: 2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["slaveof"] = &RedisCmdMeta{Name: "slaveof", Arity: 3, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["replicaof"] = &RedisCmdMeta{Name: "replicaof", Arity: 3, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["role"] = &RedisCmdMeta{Name: "role", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["debug"] = &RedisCmdMeta{Name: "debug", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["config"] = &RedisCmdMeta{Name: "config", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["confxx"] = &RedisCmdMeta{Name: "confxx", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["subscribe"] = &RedisCmdMeta{Name: "subscribe", Arity: -2, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["unsubscribe"] = &RedisCmdMeta{Name: "unsubscribe", Arity: -1, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["psubscribe"] = &RedisCmdMeta{Name: "psubscribe", Arity: -2, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["punsubscribe"] = &RedisCmdMeta{Name: "punsubscribe", Arity: -1, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["publish"] = &RedisCmdMeta{Name: "publish", Arity: 3, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["pubsub"] = &RedisCmdMeta{Name: "pubsub", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["watch"] = &RedisCmdMeta{Name: "watch", Arity: -2, Sflags: adminFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["unwatch"] = &RedisCmdMeta{Name: "unwatch", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["cluster"] = &RedisCmdMeta{Name: "cluster", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["restore"] = &RedisCmdMeta{Name: "restore", Arity: -4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["restore-asking"] = &RedisCmdMeta{Name: "restore-asking", Arity: -4, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["migrate"] = &RedisCmdMeta{Name: "migrate", Arity: -6, Sflags: writeFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["asking"] = &RedisCmdMeta{Name: "asking", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["readonly"] = &RedisCmdMeta{Name: "readonly", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["readwrite"] = &RedisCmdMeta{Name: "readwrite", Arity: 1, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["dump"] = &RedisCmdMeta{Name: "dump", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["object"] = &RedisCmdMeta{Name: "object", Arity: -2, Sflags: readOnlyFlag, FirstKey: 2, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["memory"] = &RedisCmdMeta{Name: "memory", Arity: -2, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["client"] = &RedisCmdMeta{Name: "client", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["hello"] = &RedisCmdMeta{Name: "hello", Arity: -1, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["eval"] = &RedisCmdMeta{Name: "eval", Arity: -3, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["eval_ro"] = &RedisCmdMeta{Name: "eval_ro", Arity: -3, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["evalsha"] = &RedisCmdMeta{Name: "evalsha", Arity: -3, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["evalsha_ro"] = &RedisCmdMeta{Name: "evalsha_ro", Arity: -3, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["slowlog"] = &RedisCmdMeta{Name: "slowlog", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["script"] = &RedisCmdMeta{Name: "script", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["time"] = &RedisCmdMeta{Name: "time", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["bitop"] = &RedisCmdMeta{Name: "bitop", Arity: -4, Sflags: writeFlag, FirstKey: 2, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["bitcount"] = &RedisCmdMeta{Name: "bitcount", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["bitpos"] = &RedisCmdMeta{Name: "bitpos", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["wait"] = &RedisCmdMeta{Name: "wait", Arity: 3, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["command"] = &RedisCmdMeta{Name: "command", Arity: -1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["geoadd"] = &RedisCmdMeta{Name: "geoadd", Arity: -5, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["georadius"] = &RedisCmdMeta{Name: "georadius", Arity: -6, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["georadius_ro"] = &RedisCmdMeta{Name: "georadius_ro", Arity: -6, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["georadiusbymember"] = &RedisCmdMeta{Name: "georadiusbymember", Arity: -5, Sflags: writeFlag,
+			FirstKey: 1, LastKey: 1, KeyStep: 1}
+		RedisCommandTable["georadiusbymember_ro"] = &RedisCmdMeta{Name: "georadiusbymember_ro", Arity: -5,
+			Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1, KeyStep: 1}
+		RedisCommandTable["geohash"] = &RedisCmdMeta{Name: "geohash", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["geopos"] = &RedisCmdMeta{Name: "geopos", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["geodist"] = &RedisCmdMeta{Name: "geodist", Arity: -4, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["geosearch"] = &RedisCmdMeta{Name: "geosearch", Arity: -7, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["geosearchstore"] = &RedisCmdMeta{Name: "geosearchstore", Arity: -8, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 2, KeyStep: 1}
+		RedisCommandTable["pfselftest"] = &RedisCmdMeta{Name: "pfselftest", Arity: 1, Sflags: adminFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["pfadd"] = &RedisCmdMeta{Name: "pfadd", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["pfcount"] = &RedisCmdMeta{Name: "pfcount", Arity: -2, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: -1, KeyStep: 1}
+		RedisCommandTable["pfmerge"] = &RedisCmdMeta{Name: "pfmerge", Arity: -2, Sflags: writeFlag, FirstKey: 1, LastKey: -1,
+			KeyStep: 1}
+		RedisCommandTable["pfdebug"] = &RedisCmdMeta{Name: "pfdebug", Arity: -3, Sflags: adminFlag, FirstKey: 2, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["xadd"] = &RedisCmdMeta{Name: "xadd", Arity: -5, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["xrange"] = &RedisCmdMeta{Name: "xrange", Arity: -4, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["xrevrange"] = &RedisCmdMeta{Name: "xrevrange", Arity: -4, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["xlen"] = &RedisCmdMeta{Name: "xlen", Arity: 2, Sflags: readOnlyFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["xread"] = &RedisCmdMeta{Name: "xread", Arity: -4, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["xreadgroup"] = &RedisCmdMeta{Name: "xreadgroup", Arity: -7, Sflags: writeFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["xgroup"] = &RedisCmdMeta{Name: "xgroup", Arity: -2, Sflags: writeFlag, FirstKey: 2, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["xsetid"] = &RedisCmdMeta{Name: "xsetid", Arity: 3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["xack"] = &RedisCmdMeta{Name: "xack", Arity: -4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["xpending"] = &RedisCmdMeta{Name: "xpending", Arity: -3, Sflags: readOnlyFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["xclaim"] = &RedisCmdMeta{Name: "xclaim", Arity: -6, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["xautoclaim"] = &RedisCmdMeta{Name: "xautoclaim", Arity: -6, Sflags: writeFlag, FirstKey: 1,
+			LastKey: 1, KeyStep: 1}
+		RedisCommandTable["xinfo"] = &RedisCmdMeta{Name: "xinfo", Arity: -2, Sflags: readOnlyFlag, FirstKey: 2, LastKey: 2,
+			KeyStep: 1}
+		RedisCommandTable["xdel"] = &RedisCmdMeta{Name: "xdel", Arity: -3, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["xtrim"] = &RedisCmdMeta{Name: "xtrim", Arity: -4, Sflags: writeFlag, FirstKey: 1, LastKey: 1,
+			KeyStep: 1}
+		RedisCommandTable["latency"] = &RedisCmdMeta{Name: "latency", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["lolwut"] = &RedisCmdMeta{Name: "lolwut", Arity: -1, Sflags: readOnlyFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["acl"] = &RedisCmdMeta{Name: "acl", Arity: -2, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["stralgo"] = &RedisCmdMeta{Name: "stralgo", Arity: -2, Sflags: readOnlyFlag, FirstKey: 0,
+			LastKey: 0, KeyStep: 0}
+		RedisCommandTable["reset"] = &RedisCmdMeta{Name: "reset", Arity: 1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+		RedisCommandTable["failover"] = &RedisCmdMeta{Name: "failover", Arity: -1, Sflags: adminFlag, FirstKey: 0, LastKey: 0,
+			KeyStep: 0}
+	})
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/redis_rpc/redis_rpc.go b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/redis_rpc.go
new file mode 100644
index 0000000000..76c7b40b7e
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/redis_rpc.go
@@ -0,0 +1,123 @@
+// Package redis_rpc TODO
+package redis_rpc
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// RedisRPCEmbed redis 实现
+type RedisRPCEmbed struct {
+}
+
+// NewRedisRPCEmbed TODO
+func NewRedisRPCEmbed() *RedisRPCEmbed {
+	return &RedisRPCEmbed{}
+}
+
+// IsQueryCommand redis 解析命令
+func (r *RedisRPCEmbed) IsQueryCommand(cmdArgs []string) bool {
+	if len(cmdArgs) == 0 {
+		return false
+	}
+	cmd := strings.ToLower(cmdArgs[0])
+	if len(cmdArgs) >= 2 {
+		if cmdArgs[0] == "cluster" && cmdArgs[1] == "nodes" {
+			return true
+		}
+		if cmdArgs[0] == "cluster" && cmdArgs[1] == "info" {
+			return true
+		}
+		if cmdArgs[0] == "cluster" && cmdArgs[1] == "slots" {
+			return true
+		}
+		if (cmdArgs[0] == "confxx" || cmdArgs[0] == "config") && cmdArgs[1] == "get" {
+			return true
+		}
+	}
+	if _, ok := RedisCommandTable[cmd]; !ok {
+		return false
+	}
+	return strings.Contains(RedisCommandTable[cmd].Sflags, readOnlyFlag)
+}
+
+// IsExecuteCommand 不允许执行写命令
+func (r *RedisRPCEmbed) IsExecuteCommand() bool {
+	return false
+}
+
+// DoCommand 执行redis命令
+func (r *RedisRPCEmbed) DoCommand(c *gin.Context) {
+	// 获取参数
+	var param RedisQueryParams
+	err := c.BindJSON(¶m)
+	if err != nil {
+		slog.Error("RedisRPCEmbed bind json", err)
+		SendResponse(c, 1, err.Error(), nil)
+		return
+	}
+	slog.Info("RedisRPCEmbed request data", slog.String("param", param.StringWithoutPasswd()))
+
+	// 格式化并检查命令
+	formatCmd, err := FormatName(param.Command)
+	if err != nil {
+		slog.Error("RedisRPCEmbed format name", err, slog.String("command", param.Command))
+		SendResponse(c, 1, err.Error(), nil)
+		return
+	}
+	cmdArgs := strings.Fields(formatCmd)
+	if !r.IsQueryCommand(cmdArgs) {
+		slog.Error("RedisRPCEmbed is query command, not support", slog.String("command", formatCmd))
+		SendResponse(c, 1, fmt.Sprintf("non-support redis command:'%s'", formatCmd), nil)
+		return
+	}
+
+	genErrInfo := func(isString bool, valueSize, maxLen int) string {
+		name := "Member Count"
+		if isString {
+			name = "Value Size"
+		}
+		return fmt.Sprintf("ERR: 该查询返回的%s为%d,超过了阀值%d。\n", name, valueSize, maxLen)
+	}
+
+	// 执行命令
+	var respData []CmdResult
+	var maxLen int
+	password := param.Password
+	for _, address := range param.Addresses {
+		valueSize, isString, err := GetValueSize(address, password, formatCmd)
+		if isString {
+			maxLen = 1 * 1024 * 1024
+		} else {
+			maxLen = 1000
+		}
+		if err != nil {
+			slog.Error("RedisRPCEmbed get value size", err, slog.String("command", formatCmd))
+			SendResponse(c, 1, err.Error(), nil)
+			return
+		} else if valueSize > maxLen {
+			slog.Error("RedisRPCEmbed get value size",
+				genErrInfo(isString, valueSize, maxLen),
+				slog.String("command", formatCmd))
+			SendResponse(c, 1, genErrInfo(isString, valueSize, maxLen), nil)
+			return
+		}
+
+		// ret, err := DoRedisCmd(address, password, formatCmd, strconv.Itoa(param.DbNum), true)
+		ret, err := DoRedisCmdNew(address, password, formatCmd, param.DbNum)
+		if err != nil {
+			slog.Error("RedisRPCEmbed execute command", err,
+				slog.String("address", address),
+				slog.String("command", formatCmd),
+				slog.Int("dbNum", param.DbNum))
+			SendResponse(c, 1, err.Error(), nil)
+			return
+		}
+		respData = append(respData, CmdResult{Address: address, Result: ret})
+	}
+	SendResponse(c, 0, "", respData)
+	return
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/redis_rpc/twemproxy_rpc.go b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/twemproxy_rpc.go
new file mode 100644
index 0000000000..bb6704f213
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/redis_rpc/twemproxy_rpc.go
@@ -0,0 +1,67 @@
+package redis_rpc
+
+import (
+	"fmt"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+// TwemproxyRPCEmbed TODO
+// RedisRPCEmbed redis 实现
+type TwemproxyRPCEmbed struct {
+}
+
+// NewTwemproxyRPCEmbed TODO
+func NewTwemproxyRPCEmbed() *TwemproxyRPCEmbed {
+	return &TwemproxyRPCEmbed{}
+}
+
+// IsProxyQueryCommand proxy命令 需要走nc协议。暂时先不限制,都可以执行
+func (r *TwemproxyRPCEmbed) IsProxyQueryCommand(cmd string) bool {
+	return true
+}
+
+// DoCommand 执行redis命令
+func (r *TwemproxyRPCEmbed) DoCommand(c *gin.Context) {
+	// 获取参数
+	var param RedisQueryParams
+	err := c.BindJSON(¶m)
+	if err != nil {
+		slog.Error("TwemproxyRPCEmbed bind json", err)
+		SendResponse(c, 1, err.Error(), nil)
+		return
+	}
+
+	slog.Info("TwemproxyRPCEmbed request data", slog.String("param", param.StringWithoutPasswd()))
+
+	// 格式化并检查命令
+	formatCmd, err := FormatName(param.Command)
+	if err != nil {
+		slog.Error("TwemproxyRPCEmbed format name", err, slog.String("command", param.Command))
+		SendResponse(c, 1, err.Error(), nil)
+		return
+	}
+	if !r.IsProxyQueryCommand(formatCmd) {
+		slog.Error("TwemproxyRPCEmbed isProxyQueryCommand, not support", slog.String("cmdName", formatCmd))
+		SendResponse(c, 1, fmt.Sprintf("non-support twemproxy admin command:'%s'", formatCmd), nil)
+		return
+	}
+
+	// 执行命令
+	var respData []CmdResult
+	for _, address := range param.Addresses {
+		ret, err := TcpClient01(address, formatCmd)
+		if err != nil {
+			slog.Error("TwemproxyRPCEmbed execute command", err,
+				slog.String("address", address),
+				slog.String("command", formatCmd))
+			SendResponse(c, 1, err.Error(), nil)
+			return
+		}
+
+		respData = append(respData, CmdResult{Address: address, Result: ret})
+	}
+	SendResponse(c, 0, "", respData)
+	return
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmd.go b/dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmd.go
new file mode 100644
index 0000000000..bbfb676055
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmd.go
@@ -0,0 +1,60 @@
+package rpc_core
+
+import (
+	"context"
+
+	"github.com/jmoiron/sqlx"
+)
+
+// executeCmd TODO
+// func executeCmd(db *sqlx.DB, cmd string, timeout int) (int64, error) {
+func executeCmd(conn *sqlx.Conn, cmd string, ctx context.Context) (int64, error) {
+	// ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout))
+	// defer cancel()
+
+	result, err := conn.ExecContext(ctx, cmd)
+	if err != nil {
+		return 0, err
+	}
+
+	return result.RowsAffected()
+}
+
+// queryCmd TODO
+// func queryCmd(db *sqlx.DB, cmd string, timeout int) (tableDataType, error) {
+func queryCmd(conn *sqlx.Conn, cmd string, ctx context.Context) (tableDataType, error) {
+	// ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(timeout))
+	// defer cancel()
+
+	rows, err := conn.QueryxContext(ctx, cmd)
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		_ = rows.Close()
+	}()
+
+	tableData := make(tableDataType, 0)
+
+	for rows.Next() {
+		data := make(map[string]interface{})
+		err := rows.MapScan(data)
+		if err != nil {
+			return nil, err
+		}
+		for k, v := range data {
+			if value, ok := v.([]byte); ok {
+				data[k] = string(value)
+			}
+		}
+		tableData = append(tableData, data)
+
+	}
+
+	if err = rows.Err(); err != nil {
+		return nil, err
+	}
+
+	return tableData, nil
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmds_on_addr.go b/dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmds_on_addr.go
new file mode 100644
index 0000000000..6129aa11a4
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/rpc_core/execute_cmds_on_addr.go
@@ -0,0 +1,107 @@
+package rpc_core
+
+import (
+	"context"
+	"time"
+
+	"github.com/pkg/errors"
+	"golang.org/x/exp/slog"
+)
+
+func (c *RPCWrapper) executeOneAddr(address string) (res []cmdResult, err error) {
+	db, err := c.MakeConnection(address, c.user, c.password, c.connectTimeout)
+
+	if err != nil {
+		slog.Error("make connection", err)
+		return nil, err
+	}
+
+	defer func() {
+		_ = db.Close()
+	}()
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(c.queryTimeout))
+	defer cancel()
+
+	conn, err := db.Connx(ctx)
+	if err != nil {
+		slog.Error("get conn from db", err)
+		return nil, err
+	}
+
+	for idx, command := range c.commands {
+		pc, err := c.ParseCommand(command)
+		if err != nil {
+			slog.Error("parse command", err)
+			return nil, err
+		}
+
+		if c.IsQueryCommand(pc) {
+			tableData, err := queryCmd(conn, command, ctx)
+			if err != nil {
+				slog.Error(
+					"query command", err,
+					slog.String("address", address), slog.String("command", command),
+				)
+				res = append(
+					res, cmdResult{
+						Cmd:          command,
+						RowsAffected: 0,
+						TableData:    nil,
+						ErrorMsg:     err.Error(),
+					},
+				)
+				if !c.force {
+					return res, err
+				}
+				continue
+			}
+			res = append(
+				res, cmdResult{
+					Cmd:          command,
+					TableData:    tableData,
+					RowsAffected: 0,
+					ErrorMsg:     "",
+				},
+			)
+		} else if c.IsExecuteCommand(pc) {
+			rowsAffected, err := executeCmd(conn, command, ctx)
+			if err != nil {
+				slog.Error(
+					"execute command", err,
+					slog.String("address", address), slog.String("command", command),
+				)
+				res = append(
+					res, cmdResult{
+						Cmd:          command,
+						TableData:    nil,
+						RowsAffected: 0,
+						ErrorMsg:     err.Error(),
+					},
+				)
+				if !c.force {
+					return res, err
+				}
+				continue
+			}
+			res = append(
+				res, cmdResult{
+					Cmd:          command,
+					TableData:    nil,
+					RowsAffected: rowsAffected,
+					ErrorMsg:     "",
+				},
+			)
+		} else {
+			err = errors.Errorf("commands[%d]: %s not support", idx, command)
+			slog.Error("dispatch command", err)
+			res = append(
+				res, cmdResult{Cmd: command, TableData: nil, RowsAffected: 0, ErrorMsg: err.Error()},
+			)
+			if !c.force {
+				return res, err
+			}
+		}
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/rpc_core/init.go b/dbm-services/mysql/db-remote-service/pkg/rpc_core/init.go
new file mode 100644
index 0000000000..d14a12f9f5
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/rpc_core/init.go
@@ -0,0 +1,16 @@
+package rpc_core
+
+type tableDataType []map[string]interface{}
+
+type cmdResult struct {
+	Cmd          string        `json:"cmd"`
+	TableData    tableDataType `json:"table_data"`
+	RowsAffected int64         `json:"rows_affected"`
+	ErrorMsg     string        `json:"error_msg"`
+}
+
+type oneAddressResult struct {
+	Address    string      `json:"address"`
+	CmdResults []cmdResult `json:"cmd_results"`
+	ErrorMsg   string      `json:"error_msg"`
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/rpc_core/interface.go b/dbm-services/mysql/db-remote-service/pkg/rpc_core/interface.go
new file mode 100644
index 0000000000..daf1eaaa5c
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/rpc_core/interface.go
@@ -0,0 +1,22 @@
+package rpc_core
+
+import (
+	"dbm-services/mysql/db-remote-service/pkg/parser"
+
+	"github.com/jmoiron/sqlx"
+)
+
+// RPCEmbedInterface 实现 interface
+type RPCEmbedInterface interface {
+	MakeConnection(
+		address string,
+		user string,
+		password string,
+		timeout int,
+	) (*sqlx.DB, error)
+	ParseCommand(command string) (*parser.ParseQueryBase, error)
+	IsQueryCommand(*parser.ParseQueryBase) bool
+	IsExecuteCommand(*parser.ParseQueryBase) bool
+	User() string
+	Password() string
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_core.go b/dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_core.go
new file mode 100644
index 0000000000..1e5669c967
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_core.go
@@ -0,0 +1,2 @@
+// Package rpc_core rpc 核心实现
+package rpc_core
diff --git a/dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_wrapper.go b/dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_wrapper.go
new file mode 100644
index 0000000000..d077b63107
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/rpc_core/rpc_wrapper.go
@@ -0,0 +1,36 @@
+package rpc_core
+
+// RPCWrapper RPC 对象
+type RPCWrapper struct {
+	addresses      []string
+	commands       []string
+	user           string
+	password       string
+	connectTimeout int
+	queryTimeout   int
+	force          bool
+	RPCEmbedInterface
+}
+
+// NewRPCWrapper 新建 RPC 对象
+func NewRPCWrapper(
+	addresses []string,
+	commands []string,
+	user string,
+	password string,
+	connectTimeout int,
+	queryTimeout int,
+	force bool,
+	em RPCEmbedInterface,
+) *RPCWrapper {
+	return &RPCWrapper{
+		addresses:         addresses,
+		commands:          commands,
+		user:              user,
+		password:          password,
+		connectTimeout:    connectTimeout,
+		queryTimeout:      queryTimeout,
+		force:             force,
+		RPCEmbedInterface: em,
+	}
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/rpc_core/run.go b/dbm-services/mysql/db-remote-service/pkg/rpc_core/run.go
new file mode 100644
index 0000000000..e4b2793205
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/rpc_core/run.go
@@ -0,0 +1,46 @@
+package rpc_core
+
+import (
+	"dbm-services/mysql/db-remote-service/pkg/config"
+	"sync"
+
+	"golang.org/x/exp/slog"
+)
+
+// Run 执行
+func (c *RPCWrapper) Run() (res []oneAddressResult) {
+	addrResChan := make(chan oneAddressResult)
+	tokenBulkChan := make(chan struct{}, config.RuntimeConfig.Concurrent)
+	slog.Debug("init bulk chan", slog.Int("concurrent", config.RuntimeConfig.Concurrent))
+
+	go func() {
+		var wg sync.WaitGroup
+		wg.Add(len(c.addresses))
+
+		for _, address := range c.addresses {
+			tokenBulkChan <- struct{}{}
+			go func(address string) {
+				addrRes, err := c.executeOneAddr(address)
+				<-tokenBulkChan
+
+				var errMsg string
+				if err != nil {
+					errMsg = err.Error()
+				}
+				addrResChan <- oneAddressResult{
+					Address:    address,
+					CmdResults: addrRes,
+					ErrorMsg:   errMsg,
+				}
+				wg.Done()
+			}(address)
+		}
+		wg.Wait()
+		close(addrResChan)
+	}()
+
+	for addrRes := range addrResChan {
+		res = append(res, addrRes)
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_parser.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_parser.go
new file mode 100644
index 0000000000..9b9832f239
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_parser.go
@@ -0,0 +1,46 @@
+package service
+
+import (
+	"dbm-services/mysql/db-remote-service/pkg/parser"
+	"net/http"
+
+	"github.com/gin-gonic/gin"
+)
+
+type parseRequest struct {
+	Statements string `form:"statements" json:"statements"`
+}
+
+// parseHandler parser 服务
+func parseHandler(c *gin.Context) {
+	var req parseRequest
+	if err := c.ShouldBindJSON(&req); err != nil {
+		c.JSON(
+			http.StatusBadRequest, gin.H{
+				"code": 1,
+				"data": "",
+				"msg":  err.Error(),
+			},
+		)
+		return
+	}
+
+	resp, err := parser.Parse(req.Statements)
+	if err != nil {
+		c.JSON(
+			http.StatusInternalServerError, gin.H{
+				"code": 1,
+				"data": "",
+				"msg":  err.Error(),
+			},
+		)
+	}
+
+	c.JSON(
+		http.StatusOK, gin.H{
+			"code": 0,
+			"data": resp,
+			"msg":  "",
+		},
+	)
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/general_handler.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/general_handler.go
new file mode 100644
index 0000000000..329bcd582e
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/general_handler.go
@@ -0,0 +1,71 @@
+package handler_rpc
+
+import (
+	"dbm-services/mysql/db-remote-service/pkg/rpc_core"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+	"golang.org/x/exp/slog"
+)
+
+func generalHandler(rpcEmbed rpc_core.RPCEmbedInterface) func(*gin.Context) {
+	return func(c *gin.Context) {
+		req := queryRequest{
+			ConnectTimeout: 2,
+			QueryTimeout:   30,
+			Force:          false,
+		}
+
+		if err := c.ShouldBindJSON(&req); err != nil {
+			c.JSON(
+				http.StatusBadRequest, gin.H{
+					"code": 1,
+					"data": "",
+					"msg":  err.Error(),
+				},
+			)
+			return
+		}
+		req.TrimSpace()
+
+		slog.Info(
+			"enter rpc handler",
+			slog.String("addresses", strings.Join(req.Addresses, ",")),
+			slog.String("cmds", strings.Join(req.Cmds, ",")),
+			slog.Bool("force", req.Force),
+			slog.Int("connect_timeout", req.ConnectTimeout),
+			slog.Int("query_timeout", req.QueryTimeout),
+		)
+		dupAddrs := findDuplicateAddresses(req.Addresses)
+		slog.Info("duplicate address", slog.String("addresses", strings.Join(dupAddrs, ",")))
+
+		if len(dupAddrs) > 0 {
+			c.JSON(
+				http.StatusBadRequest, gin.H{
+					"code": 1,
+					"data": "",
+					"msg":  fmt.Sprintf("duplicate addresses %s", dupAddrs),
+				},
+			)
+		}
+
+		rpcWrapper := rpc_core.NewRPCWrapper(
+			req.Addresses, req.Cmds,
+			rpcEmbed.User(), rpcEmbed.Password(),
+			req.ConnectTimeout, req.QueryTimeout, req.Force,
+			rpcEmbed,
+		)
+
+		resp := rpcWrapper.Run()
+
+		c.JSON(
+			http.StatusOK, gin.H{
+				"code": 0,
+				"data": resp,
+				"msg":  "",
+			},
+		)
+	}
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/handler_rpc.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/handler_rpc.go
new file mode 100644
index 0000000000..23cd9d7e1f
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/handler_rpc.go
@@ -0,0 +1,2 @@
+// Package handler_rpc proxy rpc
+package handler_rpc
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/init.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/init.go
new file mode 100644
index 0000000000..483f8566e5
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/init.go
@@ -0,0 +1,18 @@
+package handler_rpc
+
+import "strings"
+
+type queryRequest struct {
+	Addresses      []string `form:"addresses" json:"addresses" binding:"required"`
+	Cmds           []string `form:"cmds" json:"cmds" binding:"required"`
+	Force          bool     `form:"force" json:"force"`
+	ConnectTimeout int      `form:"connect_timeout" json:"connect_timeout"`
+	QueryTimeout   int      `form:"query_timeout" json:"query_timeout"`
+}
+
+// TrimSpace delete space around address
+func (r *queryRequest) TrimSpace() {
+	for idx, val := range r.Addresses {
+		r.Addresses[idx] = strings.TrimSpace(val)
+	}
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/mysql.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/mysql.go
new file mode 100644
index 0000000000..9b6d04a8b5
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/mysql.go
@@ -0,0 +1,6 @@
+package handler_rpc
+
+import "dbm-services/mysql/db-remote-service/pkg/mysql_rpc"
+
+// MySQLRPCHandler mysql 请求响应
+var MySQLRPCHandler = generalHandler(&mysql_rpc.MySQLRPCEmbed{})
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/proxy.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/proxy.go
new file mode 100644
index 0000000000..fc2cfceb1a
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/proxy.go
@@ -0,0 +1,8 @@
+package handler_rpc
+
+import (
+	"dbm-services/mysql/db-remote-service/pkg/proxy_rpc"
+)
+
+// ProxyRPCHandler proxy 请求响应
+var ProxyRPCHandler = generalHandler(&proxy_rpc.ProxyRPCEmbed{})
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/redis.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/redis.go
new file mode 100644
index 0000000000..d5f153c46a
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/redis.go
@@ -0,0 +1,9 @@
+package handler_rpc
+
+import "dbm-services/mysql/db-remote-service/pkg/redis_rpc"
+
+// RedisRPCHandler TODO
+var RedisRPCHandler = redis_rpc.NewRedisRPCEmbed().DoCommand
+
+// TwemproxyRPCHandler TODO
+var TwemproxyRPCHandler = redis_rpc.NewTwemproxyRPCEmbed().DoCommand
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/utils.go b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/utils.go
new file mode 100644
index 0000000000..b9ebcff173
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/handler_rpc/utils.go
@@ -0,0 +1,21 @@
+package handler_rpc
+
+func findDuplicateAddresses(addresses []string) []string {
+	m := make(map[string]int)
+	for _, address := range addresses {
+		if _, exist := m[address]; !exist {
+			m[address] = 1
+			continue
+		}
+		m[address] += 1
+	}
+
+	var dup []string
+	for address, count := range m {
+		if count > 1 {
+			dup = append(dup, address)
+		}
+	}
+
+	return dup
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/router.go b/dbm-services/mysql/db-remote-service/pkg/service/router.go
new file mode 100644
index 0000000000..664031d94e
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/router.go
@@ -0,0 +1,26 @@
+package service
+
+import (
+	"dbm-services/mysql/db-remote-service/pkg/service/handler_rpc"
+
+	"github.com/gin-gonic/gin"
+)
+
+// RegisterRouter 服务路由
+func RegisterRouter(engine *gin.Engine) {
+	mysqlGroup := engine.Group("/mysql")
+	mysqlGroup.POST("/rpc", handler_rpc.MySQLRPCHandler)
+	mysqlGroup.POST("/parser", parseHandler)
+
+	proxyGroup := engine.Group("/proxy-admin")
+	proxyGroup.POST("/rpc", handler_rpc.ProxyRPCHandler)
+
+	redisGroup := engine.Group("/redis")
+	redisGroup.POST("/rpc", handler_rpc.RedisRPCHandler)
+
+	twemproxyGroup := engine.Group("/twemproxy")
+	twemproxyGroup.POST("/rpc", handler_rpc.TwemproxyRPCHandler)
+
+	// predixyGroup := engine.Group("/predixy")
+	// predixyGroup.POST("/rpc", handler_rpc.RedisRPCHandler)
+}
diff --git a/dbm-services/mysql/db-remote-service/pkg/service/service.go b/dbm-services/mysql/db-remote-service/pkg/service/service.go
new file mode 100644
index 0000000000..7396d459be
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/pkg/service/service.go
@@ -0,0 +1,2 @@
+// Package service 服务
+package service
diff --git a/dbm-services/mysql/db-remote-service/readme.md b/dbm-services/mysql/db-remote-service/readme.md
new file mode 100644
index 0000000000..ca7b64c71a
--- /dev/null
+++ b/dbm-services/mysql/db-remote-service/readme.md
@@ -0,0 +1,220 @@
+## 依赖
+* 严重依赖 _tmysqlparse_ / _redis-cli_ 程序
+* 人工部署缺少 _tmysqlparse_ / _redis-cli_ 无法运行
+*  _redis-cli_ 版本需要大于4.0
+* 推荐使用 _db-remote-service:test-v0.0.3_
+* 启用 _tls_ 模式时, 必须同时提供 _CA/Cert/Key_ 这 _3_ 个文件
+
+## _server start env_
+```shell
+# 以下为默认值
+
+export DRS_CONCURRENT=500 
+export DRS_MYSQL_ADMIN_PASSWORD="123" 
+export DRS_MYSQL_ADMIN_USER="root"
+export DRS_PROXY_ADMIN_PASSWORD="123"
+export DRS_PROXY_ADMIN_USER="root"
+export DRS_PORT=8888
+export DRS_LOG_JSON=true # 是否使用 json 格式日志
+export DRS_LOG_CONSOLE=true # 是否在 stdout 打印日志
+export DRS_LOG_DEBUG=true # 启用 debug 日志级别
+export DRS_LOG_SOURCE=true # 日志记录源文件位置
+export DRS_CA_FILE="" # CA 文件
+export DRS_CERT_FILE="" # Cert
+export DRS_KEY_FILE="" # Key
+export DRS_TLS=false 
+
+# 容器环境不要使用
+export DRS_TMYSQLPARSER_BIN="tmysqlparse"
+export DRS_LOG_FILE_DIR=/log/dir # 是否在文件打印日志, 文件目录
+```
+
+## _MySQL RPC_
+
+`POST /mysql/rpc`
+
+
+## _Request_
+```go
+type queryRequest struct {
+	Addresses      []string `form:"addresses" json:"addresses"`
+	Cmds           []string `form:"cmds" json:"cmds"`
+	Force          bool     `form:"force" json:"force"`
+	ConnectTimeout int      `form:"connect_timeout" json:"connect_timeout"`
+	QueryTimeout   int      `form:"query_timeout" json:"query_timeout"`
+}
+```
+
+|参数名|默认值||
+| --- | --- | --- |
+| addresses | 无 | 必须 |
+| cmds | 无 | 必须 |
+| force | false | 可选 |
+| connect_timeout | 2 | 可选 |
+| query_timeout | 30 | 可选 |
+
+_Addresses_ 是如 _127.0.0.1:20000_ 这样的字符串数组
+
+## _Response_
+```go
+type tableDataType []map[string]interface{}
+
+type cmdResult struct {
+	Cmd       string        `json:"cmd"`
+	TableData tableDataType `json:"table_data"`
+	RowsAffected int64       `json:"rows_affected"`	
+	ErrorMsg  string        `json:"error_msg"`
+}
+
+type oneAddressResult struct {
+	Address    string      `json:"address"`
+	CmdResults []cmdResult `json:"cmd_results"`
+	ErrorMsg   string      `json:"error_msg"`
+}
+
+type queryResponseData []oneAddressResult
+```
+
+接口返回的 _json_ 结构是
+```json
+{
+  code: int,
+  data: queryResponseData,
+  msg: string
+}
+```
+
+### _tableDataType_
+_sql_ 执行后的返回结果
+
+`SELECT user, host from mysql.user limit 2` 的结果看起来像
+
+```json
+[
+  {"host": "localhost", "user": "root"},
+  {"host": "127.0.0.1", "user": "system"}
+]
+```
+
+### _cmdResult_
+_TableData_ 和 _ErrorMsg_ 是互斥的, 不会同时有意义
+
+访问的方法大概这样子
+```go
+var cr cmdResult
+if cr.ErrorMsg != "" {
+  // sql execute failed
+}
+```
+
+### _oneAddressResult_
+* 当 _api_ 参数中的 _force == true_ 时, _ErrorMsg_ 只会包含诸如连接错误这样地址级别的错误. _sql_ 的执行报错不会记录在这里
+* 当 _api_ 参数中的 _force == false_ 时, _ErrorMsg_ 还可能是最后一条 _sql_ 执行出错的信息; _CmdResults_ 的最后一个元素也是执行出错的那条 _sql_
+
+
+## 支持的命令
+全量的 _sql commands_ 可以参考 _all_sql_commands.txt_
+
+```go
+	"change_db",
+	"explain_other",
+	"select",
+	"show_binlog_events",
+	"show_binlogs",
+	"show_charsets",
+	"show_client_stats",
+	"show_collations",
+	"show_create",
+	"show_create_db",
+	"show_create_event",
+	"show_create_func",
+	"show_create_proc",
+	"show_create_trigger",
+	"show_create_user",
+	"show_databases",
+	"show_engine_logs",
+	"show_engine_mutex",
+	"show_engine_status",
+	"show_errors",
+	"show_events",
+	"show_fields",
+	"show_func_code",
+	"show_grants",
+	"show_index_stats",
+	"show_keys",
+	"show_master_stat",
+	"show_open_tables",
+	"show_plugins",
+	"show_privileges",
+	"show_proc_code",
+	"show_processlist",
+	"show_profile",
+	"show_profiles",
+	"show_relaylog_events",
+	"show_slave_hosts",
+	"show_slave_stat",
+	"show_status",
+	"show_status_func",
+	"show_status_proc",
+	"show_storage_engines",
+	"show_table_stats",
+	"show_table_status",
+	"show_tables",
+	"show_thread_stats",
+	"show_triggers",
+	"show_user_stats",
+	"show_variables",
+	"show_warns",
+	"alter_user",
+	"change_master",
+	"change_replication_filter",
+	"create_db",
+	"create_event",
+	"create_function",
+	"create_procedure",
+	"create_table",
+	"create_trigger",
+	"create_user",
+	"create_view",
+	"delete",
+	"delete_multi",
+	"drop_compression_dictionary",
+	"drop_db",
+	"drop_event",
+	"drop_function",
+	"drop_index",
+	"drop_procedure",
+	"drop_server",
+	"drop_table",
+	"drop_trigger",
+	"drop_user",
+	"drop_view",
+	"flush",
+	"grant",
+	"insert",
+	"kill",
+	"rename_table",
+	"rename_user",
+	"replace",
+	"reset",
+	"revoke",
+	"revoke_all",
+	"set_option",
+	"slave_start",
+	"slave_stop",
+	"truncate",
+	"update",
+	"update_multi",
+```
+
+## _Proxy Admin RPC_
+
+`POST /proxy-admin/rpc`
+
+_request_ 和 _response_ 同 _MySQL RPC_
+
+### 支持的命令
+```go
+    "select"
+    "refresh_users"
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-simulation/.gitignore b/dbm-services/mysql/db-simulation/.gitignore
new file mode 100644
index 0000000000..fa94ec81e7
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/.gitignore
@@ -0,0 +1,10 @@
+vendor/
+.vscode/
+conf/
+log/
+bksimulation
+tmysqlparse
+tmysqlparse*.*tar.gz
+restart.sh
+mysql*.*tar.gz
+*.env
\ No newline at end of file
diff --git a/dbm-services/mysql/db-simulation/.golangci.yml b/dbm-services/mysql/db-simulation/.golangci.yml
new file mode 100644
index 0000000000..74b121ed6f
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/.golangci.yml
@@ -0,0 +1,57 @@
+linters-settings:
+  lll:
+    line-length: 120  
+  funlen:
+    lines: 80
+    statements: 80
+  gocritic:
+    enabled-checks:
+      - nestingReduce
+      - commentFormatting
+      
+run:
+  # default concurrency is a available CPU number
+  concurrency: 4
+  # timeout for analysis, e.g. 30s, 5m, default is 1m
+  timeout: 2m
+  # exit code when at least one issue was found, default is 1
+  issues-exit-code: 1
+  # include test files or not, default is true
+  tests: false
+  # default is true. Enables skipping of directories:
+  #   vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
+  skip-dirs-use-default: true
+
+  skip-files:
+    - ".*/mock/.*.go"
+    - ".*testing.go"
+
+linters:
+  # enable-all: true
+  # disable-all: true
+  disable:
+    - errcheck
+  enable:
+    - nilerr
+    - nakedret
+    - lll
+    - gofmt
+    - gocritic
+    - gocyclo
+    - whitespace
+    - sqlclosecheck
+    - deadcode
+    - govet
+    - bodyclose
+    - staticcheck
+    # - errorlint
+    # - varcheck
+    # - typecheck
+    # - nestif
+    # - gofumpt
+    # - godox
+    # - wsl
+    # - funlen
+    # - golint
+    # - cyclop
+  fast: false
\ No newline at end of file
diff --git a/dbm-services/mysql/db-simulation/Dockerfile b/dbm-services/mysql/db-simulation/Dockerfile
new file mode 100644
index 0000000000..8b73de7fea
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/Dockerfile
@@ -0,0 +1,11 @@
+FROM mirrors.tencent.com/build/blueking/tmysqlparse:3.0.8
+
+RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
+RUN echo "Asia/Shanghai" > /etc/timezone
+
+ADD bksimulation /
+#ADD tmysqlparse /
+ADD rule.yaml /
+ADD spider_rule.yaml /
+WORKDIR /
+ENTRYPOINT ["/bksimulation"]
\ No newline at end of file
diff --git a/dbm-services/mysql/db-simulation/Makefile b/dbm-services/mysql/db-simulation/Makefile
new file mode 100644
index 0000000000..164a3297d5
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/Makefile
@@ -0,0 +1,47 @@
+SHELL := /bin/bash
+BASEDIR = $(shell pwd)
+
+SRV_NAME = bksimulation
+COMMAND_NAME = bksimulation
+VER = v0.1.22
+CURRENT_VERSION = release-$(VER)
+TEST_VERSION = test-$(VER)
+NAMESPACE = build/blueking
+DH_URL = mirrors.tencent.com
+BUILD_FLAG = "-X main.version=${VER} -X main.buildstamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.githash=`git rev-parse HEAD` "
+all: build
+api:
+	go build -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD}  -ldflags ${BUILD_FLAG}    -o bksimulation  -v .
+
+build:clean
+	CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD}  -ldflags ${BUILD_FLAG}  -o $(COMMAND_NAME) -v .
+
+publish:build
+	docker build  --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(CURRENT_VERSION) .
+	docker tag $(SRV_NAME):$(CURRENT_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(CURRENT_VERSION)
+
+latest:build
+	docker build  --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):latest .
+	docker tag $(SRV_NAME):latest $(DH_URL)/${NAMESPACE}/$(SRV_NAME):latest
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):latest
+
+test:build
+	docker build --build-arg SRV_NAME=$(COMMAND_NAME) --rm -t $(SRV_NAME):$(TEST_VERSION) .
+	docker tag $(SRV_NAME):$(TEST_VERSION) $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+	docker push $(DH_URL)/${NAMESPACE}/$(SRV_NAME):$(TEST_VERSION)
+
+gotool:
+	@-gofmt -w  .
+	@-go tool vet . 2>&1 | grep -v vendor;true
+clean:
+	# find . -name "[._]*.s[a-w][a-z]" | xargs -i rm -f {}
+	rm -f bksimulation
+	rm -f tmysqlparse
+	rm -rf tmysqlparse-${TMYSQLPARSE_VER}*
+help:
+	@echo "make - compile go source"
+	@echo "make gotool - run gofmt & go too vet"
+	@echo "make clean - do some clean job"
+
+.PHONY: all gotool clean help api curl
diff --git a/dbm-services/mysql/db-simulation/all_sql_commands.txt b/dbm-services/mysql/db-simulation/all_sql_commands.txt
new file mode 100644
index 0000000000..6ba9d554a0
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/all_sql_commands.txt
@@ -0,0 +1,150 @@
+"select"
+"create_table"
+"create_index"
+"alter_table"
+"update"
+"insert"
+"insert_select"
+"delete"
+"truncate"
+"drop_table"
+"drop_index"
+"show_databases"`
+"show_tables"
+"show_fields"
+"show_keys"
+"show_variables"
+"show_status"
+"show_engine_logs"
+"show_engine_status"
+"show_engine_mutex"
+"show_processlist"
+"show_master_stat"
+"show_slave_stat"
+"show_grants"
+"show_create"
+"show_charsets"
+"show_collations"
+"show_create_db"
+"show_table_status"
+"show_triggers"
+"load"
+"set_option"
+"lock_tables"
+"unlock_tables"
+"grant"
+"change_db"
+"create_db"
+"drop_db"
+"alter_db"
+"repair"
+"replace"
+"replace_select"
+"create_function"
+"drop_function"
+"revoke"
+"optimize"
+"check"
+"assign_to_keycache"
+"preload_keys"
+"flush"
+"kill"
+"analyze"
+"rollback"
+"rollback_to_savepoint"
+"commit"
+"savepoint"
+"release_savepoint"
+"slave_start"
+"slave_stop"
+"start_group_replication"
+"stop_group_replication"
+"begin"
+"change_master"
+"change_replication_filter"
+"rename_table"
+"reset"
+"purge"
+"purge_before"
+"show_binlogs"
+"show_open_tables"
+"ha_open"
+"ha_close"
+"ha_read"
+"show_slave_hosts"
+"delete_multi"
+"update_multi"
+"show_binlog_events"
+"do"
+"show_warns"
+"empty_query"
+"show_errors"
+"show_storage_engines"
+"show_privileges"
+"help"
+"create_user"
+"drop_user"
+"rename_user"
+"revoke_all"
+"checksum"
+"create_procedure"
+"create_spfunction"
+"call"
+"drop_procedure"
+"alter_procedure"
+"alter_function"
+"show_create_proc"
+"show_create_func"
+"show_status_proc"
+"show_status_func"
+"prepare"
+"execute"
+"deallocate_prepare"
+"create_view"
+"drop_view"
+"create_trigger"
+"drop_trigger"
+"xa_start"
+"xa_end"
+"xa_prepare"
+"xa_commit"
+"xa_rollback"
+"xa_recover"
+"show_proc_code"
+"show_func_code"
+"alter_tablespace"
+"install_plugin"
+"uninstall_plugin"
+"binlog_base64_event"
+"show_plugins"
+"create_server"
+"drop_server"
+"alter_server"
+"create_event"
+"alter_event"
+"drop_event"
+"show_create_event"
+"show_events"
+"show_create_trigger"
+"alter_db_upgrade"
+"show_profile"
+"show_profiles"
+"signal"
+"resignal"
+"show_relaylog_events"
+"get_diagnostics"
+"alter_user"
+"explain_other"
+"show_create_user"
+"shutdown"
+"alter_instance"
+"show_user_stats"
+"show_table_stats"
+"show_index_stats"
+"show_client_stats"
+"show_thread_stats"
+"lock_tables_for_backup"
+"lock_binlog_for_backup"
+"unlock_binlog"
+"create_compression_dictionary"
+"drop_compression_dictionary"
\ No newline at end of file
diff --git a/dbm-services/mysql/db-simulation/app/app.go b/dbm-services/mysql/db-simulation/app/app.go
new file mode 100644
index 0000000000..2de94695f4
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/app.go
@@ -0,0 +1,14 @@
+// Package app TODO
+package app
+
+const (
+	// MySQL TODO
+	MySQL = "mysql"
+	// Spider TODO
+	Spider = "spider"
+	// TdbCtl TODO
+	TdbCtl = "tdbctl"
+
+	// TendbCluster TODO
+	TendbCluster = "tendbcluster"
+)
diff --git a/dbm-services/mysql/db-simulation/app/config/config.go b/dbm-services/mysql/db-simulation/app/config/config.go
new file mode 100644
index 0000000000..11f3777ad9
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/config/config.go
@@ -0,0 +1,185 @@
+// Package config TODO
+package config
+
+import (
+	util "dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"path"
+
+	"github.com/spf13/viper"
+)
+
+// GAppConfig TODO
+var GAppConfig = AppConfig{}
+
+// AppConfig TODO
+type AppConfig struct {
+	ListenAddr     string       `yaml:"listenAddr"`
+	BkRepo         BkRepoConfig `yaml:"bkrepo"`
+	Bcs            BcsConfig    `yaml:"bcs"`
+	MirrorsAddress []ImgConfig  `yaml:"mirrorsAddress"`
+	Image          Images
+	DbConf         DbConfig `yaml:"dbconf"`
+	Debug          bool     `yaml:"debug"`
+	RulePath       string   `yaml:"rulePath"`
+	SpiderRulePath string   `yaml:"spiderRulePath"`
+}
+
+// BkRepoConfig TODO
+type BkRepoConfig struct {
+	Project      string `yaml:"project"`
+	PublicBucket string `yaml:"publicBucket"`
+	User         string `yaml:"user"`
+	Pwd          string `yaml:"pwd"`
+	EndPointUrl  string `yaml:"endpointUrl"`
+}
+
+// Images TODO
+type Images struct {
+	Tendb57Img string // 5.7版本对应的镜像
+	Tendb56Img string // 5.6版本对应的镜像
+	Tendb80Img string // 8.0版本对应的镜像
+	TdbCtlImg  string // tdbctl 对应版本镜像
+	SpiderImg  string // spider 镜像
+}
+
+// BcsConfig TODO
+type BcsConfig struct {
+	EndpointUrl string `yaml:"endpointUrl"`
+	ClusterId   string `yaml:"clusterId"`
+	Token       string `yaml:"token"`
+	NameSpace   string `yaml:"namespace"`
+	Timeout     int    `yaml:"timeout"`
+}
+
+// DbConfig TODO
+type DbConfig struct {
+	User string `yaml:"user"`
+	Pwd  string `yaml:"pwd"`
+	Name string `yaml:"name"`
+	Host string `yaml:"host"`
+	Port int    `yaml:"port"`
+}
+
+// ImgConfig TODO
+type ImgConfig struct {
+	Version string `yaml:"version"`
+	Image   string `yaml:"image"`
+}
+
+func init() {
+	viper.AutomaticEnv()
+	// bkrepo
+	viper.BindEnv("bkrepo_project", "BKREPO_PROJECT")
+	viper.BindEnv("bkrepo_public_bucket", "BKREPO_PUBLIC_BUCKET")
+	viper.BindEnv("bkrepo_username", "BKREPO_USERNAME")
+	viper.BindEnv("bkrepo_password", "BKREPO_PASSWORD")
+	viper.BindEnv("bkrepo_endpoint_url", "BKREPO_ENDPOINT_URL")
+	// bcs
+	viper.BindEnv("bcs_base_url", "BCS_BASE_URL")
+	viper.BindEnv("bcs_token", "BCS_TOKEN")
+	viper.BindEnv("bcs_cluster_id", "BCS_CLUSTER_ID")
+	viper.BindEnv("bcs_namespace", "BCS_NAMESPACE")
+	// db conf
+	viper.BindEnv("db_user", "DB_USER")
+	viper.BindEnv("db_password", "DB_PASSWORD")
+	viper.BindEnv("db_host", "DB_HOST")
+	viper.BindEnv("db_port", "DB_PORT")
+	viper.BindEnv("dbsimulation_db", "DBSIMULATION_DB")
+	viper.BindEnv("debug", "DEBUG")
+	// img conf
+	viper.BindEnv("mirrors_addr", "MIRRORS_ADDR")
+	viper.BindEnv("mysql56", "MYSQL56")
+	viper.BindEnv("mysql57", "MYSQL57")
+	viper.BindEnv("mysql80", "MYSQL80")
+	viper.BindEnv("spider_img", "SPIDER_IMG")
+	viper.BindEnv("tdbctl_img", "TDBCTL_IMG")
+
+	GAppConfig.ListenAddr = "0.0.0.0:80"
+	if viper.GetString("LISTEN_ADDR") != "" {
+		GAppConfig.ListenAddr = viper.GetString("LISTEN_ADDR")
+	}
+	GAppConfig.Debug = viper.GetBool("DEBUG")
+	GAppConfig.BkRepo = BkRepoConfig{
+		PublicBucket: viper.GetString("BKREPO_BUCKET"),
+		Project:      viper.GetString("BKREPO_PROJECT"),
+		User:         viper.GetString("BKREPO_USERNAME"),
+		Pwd:          viper.GetString("BKREPO_PASSWORD"),
+		EndPointUrl:  viper.GetString("BKREPO_ENDPOINT_URL"),
+	}
+	GAppConfig.Bcs = BcsConfig{
+		NameSpace:   viper.GetString("BCS_NAMESPACE"),
+		EndpointUrl: viper.GetString("BCS_BASE_URL"),
+		ClusterId:   viper.GetString("BCS_CLUSTER_ID"),
+		Token:       viper.GetString("BCS_TOKEN"),
+		Timeout:     10,
+	}
+	GAppConfig.DbConf = DbConfig{
+		User: viper.GetString("DB_USER"),
+		Pwd:  viper.GetString("DB_PASSWORD"),
+		Host: viper.GetString("DB_HOST"),
+		Port: viper.GetInt("DB_PORT"),
+		Name: viper.GetString("DBSIMULATION_DB"),
+	}
+	mirroraddr := viper.GetString("MIRRORS_ADDR")
+	if !util.IsEmpty(mirroraddr) {
+		mysql56 := viper.GetString("MYSQL56")
+		mysql57 := viper.GetString("MYSQL57")
+		mysql80 := viper.GetString("MYSQL80")
+		spiderImg := viper.GetString("SPIDER_IMG")
+		tdbctlImg := viper.GetString("TDBCTL_IMG")
+		if !util.IsEmpty(mysql56) {
+			GAppConfig.Image.Tendb56Img = path.Join(mirroraddr, mysql56)
+		}
+		if !util.IsEmpty(mysql57) {
+			GAppConfig.Image.Tendb57Img = path.Join(mirroraddr, mysql57)
+		}
+		if !util.IsEmpty(mysql80) {
+			GAppConfig.Image.Tendb80Img = path.Join(mirroraddr, mysql80)
+		}
+		if !util.IsEmpty(spiderImg) {
+			GAppConfig.Image.SpiderImg = path.Join(mirroraddr, spiderImg)
+		}
+		if !util.IsEmpty(tdbctlImg) {
+			GAppConfig.Image.TdbCtlImg = path.Join(mirroraddr, tdbctlImg)
+		}
+	}
+	if err := loadConfig(); err != nil {
+		logger.Error("load config file failed:%s", err.Error())
+	}
+	for _, v := range GAppConfig.MirrorsAddress {
+		switch v.Version {
+		case "5.6":
+			GAppConfig.Image.Tendb56Img = v.Image
+		case "5.7":
+			GAppConfig.Image.Tendb57Img = v.Image
+		case "8.0":
+			GAppConfig.Image.Tendb80Img = v.Image
+		case "spider":
+			GAppConfig.Image.SpiderImg = v.Image
+		case "tdbctl":
+			GAppConfig.Image.TdbCtlImg = v.Image
+		}
+	}
+	logger.Info("app config %v", GAppConfig)
+}
+
+// loadConfig 加载配置文件
+func loadConfig() (err error) {
+	viper.SetConfigName("config")
+	viper.SetConfigType("yaml")
+	viper.AddConfigPath("$HOME/conf")
+	viper.AddConfigPath("./conf")
+	if err = viper.ReadInConfig(); err != nil {
+		if _, ok := err.(viper.ConfigFileNotFoundError); ok {
+			logger.Error("config file not found,maybe read by env")
+			return nil
+		}
+		return err
+	}
+	if err = viper.Unmarshal(&GAppConfig); err != nil {
+		return err
+	}
+	logger.Debug("load config is: %v", GAppConfig)
+	return
+}
diff --git a/dbm-services/mysql/db-simulation/app/service/kubernets.go b/dbm-services/mysql/db-simulation/app/service/kubernets.go
new file mode 100644
index 0000000000..806f720d73
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/service/kubernets.go
@@ -0,0 +1,377 @@
+package service
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	util "dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app"
+	"dbm-services/mysql/db-simulation/app/config"
+	"fmt"
+	"io"
+	"net/url"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/client-go/kubernetes"
+	"k8s.io/client-go/kubernetes/scheme"
+	"k8s.io/client-go/rest"
+	"k8s.io/client-go/tools/remotecommand"
+)
+
+// Kcs TODO
+var Kcs KubeClientSets
+
+// DefaultUser TODO
+const DefaultUser = "root"
+
+// KubeClientSets TODO
+type KubeClientSets struct {
+	Cli        *kubernetes.Clientset
+	RestConfig *rest.Config
+	Namespace  string // namespace
+}
+
+// MySQLPodBaseInfo TODO
+type MySQLPodBaseInfo struct {
+	PodName string
+	Lables  map[string]string
+	RootPwd string
+	Charset string
+}
+
+// DbPodSets TODO
+type DbPodSets struct {
+	K8S         KubeClientSets
+	BaseInfo    *MySQLPodBaseInfo
+	DbWork      *util.DbWorker
+	DbImage     string
+	TdbCtlImage string
+	SpiderImage string
+}
+
+// ClusterPodSets TODO
+type ClusterPodSets struct {
+	DbPodSets
+}
+
+func init() {
+	logger.Info("start init bcs client ")
+	Kcs.RestConfig = &rest.Config{
+		Host:        config.GAppConfig.Bcs.EndpointUrl + "/clusters/" + config.GAppConfig.Bcs.ClusterId + "/",
+		BearerToken: config.GAppConfig.Bcs.Token,
+		ContentConfig: rest.ContentConfig{
+			ContentType:  "application/json",
+			GroupVersion: &v1.SchemeGroupVersion,
+		},
+		Timeout: 10 * time.Second,
+	}
+	clientSet, err := kubernetes.NewForConfig(Kcs.RestConfig)
+	if err != nil {
+		logger.Fatal("init kubernets client failed %s", err.Error())
+		return
+	}
+	Kcs.Cli = clientSet
+	Kcs.Namespace = config.GAppConfig.Bcs.NameSpace
+}
+
+// NewDbPodSets TODO
+func NewDbPodSets() *DbPodSets {
+	return &DbPodSets{
+		K8S: Kcs,
+	}
+}
+
+func (k *DbPodSets) getCreateClusterSqls() []string {
+	var ss []string
+	ss = append(ss, fmt.Sprintf(
+		"tdbctl create node wrapper 'SPIDER' options(user 'root', password '%s', host 'localhost', port 25000);",
+		k.BaseInfo.RootPwd))
+	ss = append(ss, fmt.Sprintf(
+		"tdbctl create node wrapper 'mysql' options(user 'root', password '%s', host 'localhost', port 20000);",
+		k.BaseInfo.RootPwd))
+	ss = append(ss, fmt.Sprintf(
+		"tdbctl create node wrapper 'TDBCTL' options(user 'root', password '%s', host 'localhost', port 26000);",
+		k.BaseInfo.RootPwd))
+	ss = append(ss, "tdbctl flush routing;")
+	return ss
+}
+
+// CreateClusterPod TODO
+func (k *DbPodSets) CreateClusterPod() (err error) {
+	c := &v1.Pod{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       "Pod",
+			APIVersion: "v1",
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      k.BaseInfo.PodName,
+			Namespace: k.K8S.Namespace,
+			Labels:    k.BaseInfo.Lables,
+		},
+		Spec: v1.PodSpec{
+			Containers: []v1.Container{
+				{
+					Name: "backend",
+					Env: []v1.EnvVar{{
+						Name:  "MYSQL_ROOT_PASSWORD",
+						Value: k.BaseInfo.RootPwd,
+					}},
+					ImagePullPolicy: v1.PullIfNotPresent,
+					Image:           k.DbImage,
+					Args: []string{"mysqld", "--defaults-file=/etc/my.cnf", "--port=20000", fmt.Sprintf("--character-set-server=%s",
+						k.BaseInfo.Charset),
+						"--user=mysql"},
+					ReadinessProbe: &v1.Probe{
+						Handler: v1.Handler{
+							Exec: &v1.ExecAction{
+								Command: []string{"/bin/bash", "-c", fmt.Sprintf("mysql -uroot -p%s -e 'select 1'", k.BaseInfo.RootPwd)},
+							},
+						},
+						InitialDelaySeconds: 2,
+						PeriodSeconds:       5,
+					},
+				}, {
+					Name: "spider",
+					Env: []v1.EnvVar{{
+						Name:  "MYSQL_ROOT_PASSWORD",
+						Value: k.BaseInfo.RootPwd,
+					}},
+					ImagePullPolicy: v1.PullIfNotPresent,
+					Image:           k.SpiderImage,
+					Args: []string{"mysqld", "--defaults-file=/etc/my.cnf", "--port=25000", fmt.Sprintf("--character-set-server=%s",
+						k.BaseInfo.Charset),
+						"--user=mysql"},
+					ReadinessProbe: &v1.Probe{
+						Handler: v1.Handler{
+							Exec: &v1.ExecAction{
+								Command: []string{"/bin/bash", "-c", fmt.Sprintf("mysql -uroot -p%s -e 'select 1'", k.BaseInfo.RootPwd)},
+							},
+						},
+						InitialDelaySeconds: 2,
+						PeriodSeconds:       5,
+					},
+				},
+				{
+					Name: "tdbctl",
+					Env: []v1.EnvVar{{
+						Name:  "MYSQL_ROOT_PASSWORD",
+						Value: k.BaseInfo.RootPwd,
+					}},
+					ImagePullPolicy: v1.PullIfNotPresent,
+					Image:           k.TdbCtlImage,
+					Args: []string{"mysqld", "--defaults-file=/etc/my.cnf", "--port=26000", "--tc-is-primary=1",
+						fmt.Sprintf("--character-set-server=%s",
+							k.BaseInfo.Charset),
+						"--user=mysql"},
+					ReadinessProbe: &v1.Probe{
+						Handler: v1.Handler{
+							Exec: &v1.ExecAction{
+								Command: []string{"/bin/bash", "-c", fmt.Sprintf("mysql -uroot -p%s -e 'select 1'", k.BaseInfo.RootPwd)},
+							},
+						},
+						InitialDelaySeconds: 2,
+						PeriodSeconds:       5,
+					},
+				},
+			},
+		},
+	}
+	if err := k.createpod(c, 26000); err != nil {
+		logger.Error("create spider cluster failed %s", err.Error())
+		return err
+	}
+	logger.Info("connect tdbctl success ~")
+	// create cluster relation
+	for _, ql := range k.getCreateClusterSqls() {
+		if _, err = k.DbWork.Db.Exec(ql); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// createpod TODO
+func (k *DbPodSets) createpod(pod *v1.Pod, probePort int) (err error) {
+	if _, err = k.K8S.Cli.CoreV1().Pods(k.K8S.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
+		logger.Error("create pod failed %s", err.Error())
+		return err
+	}
+	var podIp string
+	// 连续多次探测pod的状态
+	if err := util.Retry(util.RetryConfig{Times: 120, DelayTime: 2 * time.Second}, func() error {
+		podI, err := k.K8S.Cli.CoreV1().Pods(k.K8S.Namespace).Get(context.TODO(), k.BaseInfo.PodName, metav1.GetOptions{})
+		if err != nil {
+			return err
+		}
+		for _, cStatus := range podI.Status.ContainerStatuses {
+			logger.Info("%s: %v", cStatus.Name, cStatus.Ready)
+			if !cStatus.Ready {
+				return fmt.Errorf("container %s is not ready", cStatus.Name)
+			}
+		}
+		podIp = podI.Status.PodIP
+		return nil
+	}); err != nil {
+		return err
+	}
+	fn := func() error {
+		k.DbWork, err = util.NewDbWorker(fmt.Sprintf("%s:%s@tcp(%s:%d)/?timeout=5s&multiStatements=true",
+			DefaultUser,
+			k.BaseInfo.RootPwd,
+			podIp, probePort))
+		if err != nil {
+			logger.Error("connect to pod %s failed %s", podIp, err.Error())
+			return errors.Wrap(err, "create pod success,connect to mysql pod failed")
+		}
+		return nil
+	}
+	return util.Retry(util.RetryConfig{Times: 60, DelayTime: 2 * time.Second}, fn)
+}
+
+// CreateMySQLPod TODO
+func (k *DbPodSets) CreateMySQLPod() (err error) {
+	c := &v1.Pod{
+		TypeMeta: metav1.TypeMeta{
+			Kind:       "Pod",
+			APIVersion: "v1",
+		},
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      k.BaseInfo.PodName,
+			Namespace: k.K8S.Namespace,
+			Labels:    k.BaseInfo.Lables,
+		},
+		Spec: v1.PodSpec{
+			Containers: []v1.Container{{
+				Name: app.MySQL,
+				Env: []v1.EnvVar{{
+					Name:  "MYSQL_ROOT_PASSWORD",
+					Value: k.BaseInfo.RootPwd,
+				}},
+				Ports: []v1.ContainerPort{
+					{ContainerPort: 3306},
+				},
+				ImagePullPolicy: v1.PullIfNotPresent,
+				Image:           k.DbImage,
+				Args: []string{"mysqld", "--defaults-file=/etc/my.cnf", fmt.Sprintf("--character-set-server=%s",
+					k.BaseInfo.Charset),
+					"--user=mysql"},
+				ReadinessProbe: &v1.Probe{
+					Handler: v1.Handler{
+						Exec: &v1.ExecAction{
+							Command: []string{"/bin/bash", "-c", fmt.Sprintf("mysql -uroot -p%s -e 'select 1'", k.BaseInfo.RootPwd)},
+						},
+					},
+					InitialDelaySeconds: 2,
+					PeriodSeconds:       5,
+				},
+			}},
+		},
+	}
+
+	return k.createpod(c, 3306)
+}
+
+// DeletePod TODO
+func (k *DbPodSets) DeletePod() (err error) {
+	return k.K8S.Cli.CoreV1().Pods(k.K8S.Namespace).Delete(context.TODO(), k.BaseInfo.PodName, metav1.DeleteOptions{})
+}
+
+// GetLoadSchemaSQLCmd TODO
+func (k *DbPodSets) GetLoadSchemaSQLCmd(bkpath, file string) (cmd string) {
+	cmd = fmt.Sprintf(
+		"curl -o %s %s && mysql --defaults-file=/etc/my.cnf -uroot -p%s --default-character-set=%s -vvv < %s",
+		file, getdownloadUrl(bkpath, file), k.BaseInfo.RootPwd, k.BaseInfo.Charset, file)
+	return
+}
+
+// GetLoadSQLCmd TODO
+func (k *DbPodSets) GetLoadSQLCmd(bkpath, file string, dbs []string) (cmd []string) {
+	// cmd = fmt.Sprintf(
+	// 	"wget %s && mysql --defaults-file=/etc/my.cnf -uroot -p%s --default-character-set=%s %s < %s",
+	cmd = append(cmd, fmt.Sprintf("curl -o %s %s", file, getdownloadUrl(bkpath, file)))
+	for _, db := range dbs {
+		cmd = append(cmd, fmt.Sprintf("mysql --defaults-file=/etc/my.cnf -uroot -p%s --default-character-set=%s -vvv %s < %s",
+			k.BaseInfo.RootPwd, k.BaseInfo.Charset, db, file))
+	}
+	return cmd
+}
+
+func getdownloadUrl(bkpath, file string) string {
+	endpoint := config.GAppConfig.BkRepo.EndPointUrl
+	project := config.GAppConfig.BkRepo.Project
+	publicbucket := config.GAppConfig.BkRepo.PublicBucket
+	u, err := url.Parse(endpoint)
+	if err != nil {
+		return ""
+	}
+	r, err := url.Parse(path.Join("/generic", project, publicbucket, bkpath, file))
+	if err != nil {
+		logger.Error(err.Error())
+		return ""
+	}
+	ll := u.ResolveReference(r).String()
+	logger.Info("dbeug url is %s", ll)
+	return ll
+}
+
+// ExecuteInPod TODO
+func (k *DbPodSets) ExecuteInPod(cmd, container string, extMap map[string]string) (stdout, stderr bytes.Buffer,
+	err error) {
+	xlogger := logger.New(os.Stdout, true, logger.InfoLevel, extMap)
+	logger.Info("start exec...")
+	req := k.K8S.Cli.CoreV1().RESTClient().Post().Resource("pods").Name(k.BaseInfo.PodName).Namespace(k.K8S.Namespace).
+		SubResource("exec").
+		Param("container", container)
+	logger.Info(cmd)
+	req.VersionedParams(
+		&v1.PodExecOptions{
+			Command: []string{"/bin/bash", "-c", cmd},
+			Stdin:   false,
+			Stdout:  true,
+			Stderr:  true,
+		},
+		scheme.ParameterCodec,
+	)
+	reader, writer := io.Pipe()
+	exec, err := remotecommand.NewSPDYExecutor(k.K8S.RestConfig, "POST", req.URL())
+	if err != nil {
+		logger.Error("remotecommand.NewSPDYExecutor", err.Error())
+		return bytes.Buffer{}, bytes.Buffer{}, err
+	}
+	go func() {
+		buf := []byte{}
+		sc := bufio.NewScanner(reader)
+		sc.Buffer(buf, 2048*1024)
+		lineNumber := 1
+		for sc.Scan() {
+			xlogger.Info(sc.Text())
+			lineNumber++
+		}
+		if err := sc.Err(); err != nil {
+			logger.Error("something bad happened in the line %v: %v", lineNumber, err)
+			return
+		}
+	}()
+	err = exec.Stream(remotecommand.StreamOptions{
+		Stdin:  nil,
+		Stdout: writer,
+		Stderr: &stderr,
+		Tty:    false,
+	})
+	if err != nil {
+		logger.Error("exec.Stream failed %s:\n stdout:%s\n stderr: %s", err.Error(), strings.TrimSpace(stdout.String()),
+			strings.TrimSpace(stderr.String()))
+		return
+	}
+	logger.Info("exec successfuly...")
+	logger.Info("info stdout:%s\nstderr:%s ", strings.TrimSpace(stdout.String()),
+		strings.TrimSpace(stderr.String()))
+	return stdout, stderr, err
+}
diff --git a/dbm-services/mysql/db-simulation/app/service/kubernets_test.go b/dbm-services/mysql/db-simulation/app/service/kubernets_test.go
new file mode 100644
index 0000000000..62e755d8e6
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/service/kubernets_test.go
@@ -0,0 +1,24 @@
+package service_test
+
+import (
+	"dbm-services/mysql/db-simulation/app/config"
+	"dbm-services/mysql/db-simulation/app/service"
+	"testing"
+)
+
+func TestCreateClusterPod(t *testing.T) {
+	ps := service.NewDbPodSets()
+	ps.BaseInfo = &service.MySQLPodBaseInfo{
+		PodName: "test1",
+		RootPwd: "",
+		Charset: "utf8",
+	}
+	ps.DbImage = config.GAppConfig.Image.Tendb57Img
+	ps.TdbCtlImage = config.GAppConfig.Image.TdbCtlImg
+	ps.SpiderImage = config.GAppConfig.Image.SpiderImg
+	if err := ps.CreateClusterPod(); err != nil {
+		t.Fatalf(err.Error())
+		return
+	}
+	t.Log("ending..")
+}
diff --git a/dbm-services/mysql/db-simulation/app/service/service.go b/dbm-services/mysql/db-simulation/app/service/service.go
new file mode 100644
index 0000000000..2d680ff1db
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/service/service.go
@@ -0,0 +1,2 @@
+// Package service TODO
+package service
diff --git a/dbm-services/mysql/db-simulation/app/service/simulation_task.go b/dbm-services/mysql/db-simulation/app/service/simulation_task.go
new file mode 100644
index 0000000000..e2300cb4e4
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/service/simulation_task.go
@@ -0,0 +1,317 @@
+package service
+
+import (
+	util "dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app"
+	"dbm-services/mysql/db-simulation/app/config"
+	"dbm-services/mysql/db-simulation/model"
+	"fmt"
+	"os"
+	"regexp"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// DelPod TODO
+var DelPod bool = true
+
+// BaseParam TODO
+type BaseParam struct {
+	Uid       string `json:"uid"`
+	NodeId    string `json:"node_id"`
+	RootId    string `json:"root_id"`
+	VersionId string `json:"version_id"`
+	// app
+	TaskId        string             `json:"task_id"  binding:"required"`
+	MySQLVersion  string             `json:"mysql_version"  binding:"required"`
+	MySQLCharSet  string             `json:"mysql_charset"  binding:"required"`
+	Path          string             `json:"path"  binding:"required"`
+	ExcuteObjects []ExcuteSQLFileObj `json:"execute_objects"  binding:"gt=0,dive,required"`
+	SchemaSQLFile string             `json:"schema_sql_file"  binding:"required"`
+}
+
+// SpiderSimulationExecParam TODO
+// SpiderSimulationExecParam TODO
+type SpiderSimulationExecParam struct {
+	BaseParam
+	SpiderVersion string `json:"spider_version"`
+}
+
+// SimulationTask TODO
+type SimulationTask struct {
+	RequestId string
+	PodName   string
+	*BaseParam
+	*DbPodSets
+	TaskRuntimCtx
+}
+
+// GetSpiderImg TODO
+func (in SpiderSimulationExecParam) GetSpiderImg() string {
+	return config.GAppConfig.Image.SpiderImg
+}
+
+// GetTdbctlImg TODO
+func (in SpiderSimulationExecParam) GetTdbctlImg() string {
+	return config.GAppConfig.Image.TdbCtlImg
+}
+
+// ExcuteSQLFileObj 单个文件的执行对象
+// 一次可以多个文件操作不同的数据库
+type ExcuteSQLFileObj struct {
+	SQLFile       string   `json:"sql_file"  binding:"required"` // 变更文件名称
+	IgnoreDbNames []string `json:"ignore_dbnames"`               // 忽略的,需要排除变更的dbName,支持模糊匹配
+	DbNames       []string `json:"dbnames"  binding:"gt=0"`      // 需要变更的DBNames,支持模糊匹配
+}
+
+// parseDbParamRe TODO
+// ConvertDbParamToRegular 解析DbNames参数成正则参数
+//
+//	@receiver e
+func (e *ExcuteSQLFileObj) parseDbParamRe() (s []string) {
+	return changeToMatch(e.DbNames)
+}
+
+// parseIgnoreDbParamRe  解析IgnoreDbNames参数成正则参数
+//
+//	@receiver e
+//	@return []string
+func (e *ExcuteSQLFileObj) parseIgnoreDbParamRe() (s []string) {
+	return changeToMatch(e.IgnoreDbNames)
+}
+
+// changeToMatch 将输入的参数转成正则匹配的格式
+//
+//	@receiver input
+//	@return []string
+func changeToMatch(input []string) []string {
+	var result []string
+	for _, str := range input {
+		str = strings.Replace(str, "?", ".", -1)
+		str = strings.Replace(str, "%", ".*", -1)
+		str = `^` + str + `$`
+		result = append(result, str)
+	}
+	return result
+}
+
+// GetImgFromMySQLVersion TODO
+func GetImgFromMySQLVersion(verion string) (img string, err error) {
+	switch {
+	case regexp.MustCompile("5.6").MatchString(verion):
+		return config.GAppConfig.Image.Tendb56Img, nil
+	case regexp.MustCompile("5.7").MatchString(verion):
+		return config.GAppConfig.Image.Tendb57Img, nil
+	case regexp.MustCompile("8.0").MatchString(verion):
+		return config.GAppConfig.Image.Tendb80Img, nil
+	default:
+		return "", fmt.Errorf("not match any version")
+	}
+}
+
+// TaskRuntimCtx TODO
+type TaskRuntimCtx struct {
+	dbsExcludeSysDb []string // 过滤了系统库的全部db list
+	version         string
+}
+
+// TaskChan TODO
+var TaskChan chan SimulationTask
+
+// SpiderTaskChan TODO
+var SpiderTaskChan chan SimulationTask
+
+// CtrlChan TODO
+// 并发控制
+var ctrlChan chan struct{}
+
+func init() {
+	TaskChan = make(chan SimulationTask, 100)
+	SpiderTaskChan = make(chan SimulationTask, 100)
+	ctrlChan = make(chan struct{}, 30)
+}
+
+// init TODO
+func init() {
+	timer := time.NewTicker(60 * time.Second)
+	go func() {
+		for {
+			select {
+			case task := <-TaskChan:
+				go run(task, app.MySQL)
+			case task := <-SpiderTaskChan:
+				go run(task, app.TdbCtl)
+			case <-timer.C:
+				logger.Info("current run %d task", len(TaskChan))
+			}
+		}
+	}()
+}
+
+// run TODO
+func run(task SimulationTask, tkType string) {
+	var err error
+	var so, se string
+	ctrlChan <- struct{}{}
+	defer func() {
+		<-ctrlChan
+		var status string
+		var errMsg string
+		status = model.Task_Success
+		if err != nil {
+			status = model.Task_Failed
+			errMsg = err.Error()
+		}
+		if err := model.CompleteTask(task.TaskId, status, se, so, errMsg); err != nil {
+			logger.Error("update task status faield %s", err.Error())
+			return
+		}
+	}()
+	// create Pod
+	model.UpdatePhase(task.TaskId, model.Phase_CreatePod)
+	defer func() {
+		if DelPod {
+			if err := task.DbPodSets.DeletePod(); err != nil {
+				logger.Warn("delete Pod failed %s", err.Error())
+			}
+			logger.Info("delete pod successfuly~")
+		}
+	}()
+	if err = createPod(task, tkType); err != nil {
+		logger.Error("create pod failed %s", err.Error())
+		return
+	}
+	so, se, err = task.SimulationRun(tkType)
+	if err != nil {
+		logger.Error("模拟执行失败%s", err.Error())
+		return
+	}
+}
+
+func createPod(task SimulationTask, tkType string) (err error) {
+	switch tkType {
+	case app.MySQL:
+		return task.CreateMySQLPod()
+	case app.TdbCtl:
+		return task.DbPodSets.CreateClusterPod()
+	}
+	return
+}
+
+func (t *SimulationTask) getDbsExcludeSysDb() (err error) {
+	alldbs, err := t.DbWork.ShowDatabases()
+	if err != nil {
+		logger.Error("获取实例db list失败:%s", err.Error())
+		return err
+	}
+	logger.Info("get all database is %v", alldbs)
+	if err = t.DbWork.Queryxs(&t.version, "select version();"); err != nil {
+		logger.Error("query version failed %s", err.Error())
+		return err
+	}
+	logger.Info("version is %s", t.version)
+	t.dbsExcludeSysDb = util.FilterOutStringSlice(alldbs, util.GetGcsSystemDatabasesIgnoreTest(t.version))
+	return nil
+}
+
+// SimulationRun TODO
+func (t *SimulationTask) SimulationRun(containerName string) (sstdout, sstderr string, err error) {
+	logger.Info("will execute in %s", containerName)
+	doneChan := make(chan struct{})
+	go func() {
+		ticker := time.NewTicker(5 * time.Second)
+		for {
+			select {
+			case <-ticker.C:
+				model.UpdateHeartbeat(t.TaskId, sstderr, sstdout)
+			case <-doneChan:
+				logger.Info("simulation run done")
+				return
+			}
+		}
+	}()
+	// 关闭协程
+	defer func() { doneChan <- struct{}{} }()
+	xlogger := logger.New(os.Stdout, true, logger.InfoLevel, t.getExtmap())
+	// execute load schema
+	model.UpdatePhase(t.TaskId, model.Phase_LoadSchema)
+	stdout, stderr, err := t.DbPodSets.ExecuteInPod(t.GetLoadSchemaSQLCmd(t.Path, t.SchemaSQLFile),
+		containerName,
+		t.getExtmap())
+	sstdout += stdout.String() + "\n"
+	sstderr += stderr.String() + "\n"
+	if err != nil {
+		logger.Error("load database schema sql failed %s", err.Error())
+		return sstdout, sstderr, errors.Wrap(err, "[导入表结构失败]")
+	}
+	logger.Info(stdout.String(), stderr.String())
+	// load real databases
+	if err = t.getDbsExcludeSysDb(); err != nil {
+		logger.Error("getDbsExcludeSysDb faiked")
+		err = errors.Wrap(err, "[getDbsExcludeSysDb failed]")
+		return
+	}
+	model.UpdatePhase(t.TaskId, model.Phase_Running)
+	for _, e := range t.ExcuteObjects {
+		xlogger.Info("[start]-%s", e.SQLFile)
+		var realexcutedbs []string
+		intentionDbs, err := t.match(e.parseDbParamRe())
+		if err != nil {
+			return "", "", err
+		}
+		ignoreDbs, err := t.match(e.parseIgnoreDbParamRe())
+		if err != nil {
+			return "", "", err
+		}
+		realexcutedbs = util.FilterOutStringSlice(intentionDbs, ignoreDbs)
+		if len(realexcutedbs) <= 0 {
+			return "", "", fmt.Errorf("the changed db does not exist!!!")
+		}
+		for idx, cmd := range t.GetLoadSQLCmd(t.Path, e.SQLFile, realexcutedbs) {
+			sstdout += util.RemovePassword(cmd) + "\n"
+			stdout, stderr, err := t.DbPodSets.ExecuteInPod(cmd, containerName, t.getExtmap())
+			sstdout += stdout.String() + "\n"
+			sstderr += stderr.String() + "\n"
+			if err != nil {
+				if idx == 0 {
+					logger.Error("download file failed:%s", err.Error())
+					return sstdout, sstderr, fmt.Errorf("download file %s failed:%s", e.SQLFile, err.Error())
+				}
+				logger.Error("%s[%s]:ExecuteInPod failed %s", e.SQLFile, realexcutedbs[idx-1], err.Error())
+				return sstdout, sstderr, fmt.Errorf("exec %s in %s failed:%s", e.SQLFile, realexcutedbs[idx-1],
+					err.Error())
+			}
+			logger.Info("%s \n %s", stdout.String(), stderr.String())
+		}
+		xlogger.Info("[end]-%s", e.SQLFile)
+	}
+	return sstdout, sstderr, err
+}
+
+func (t *SimulationTask) match(regularDbNames []string) (matched []string, err error) {
+	for _, regexpStr := range regularDbNames {
+		re, err := regexp.Compile(regexpStr)
+		if err != nil {
+			logger.Error(" regexp.Compile(%s) failed:%s", regexpStr, err.Error())
+			return nil, err
+		}
+		for _, db := range t.dbsExcludeSysDb {
+			if re.MatchString(db) {
+				matched = append(matched, db)
+			}
+		}
+	}
+	return
+}
+
+func (t *SimulationTask) getExtmap() map[string]string {
+	return map[string]string{
+		"uid":        t.Uid,
+		"node_id":    t.NodeId,
+		"root_id":    t.RootId,
+		"version_id": t.VersionId,
+	}
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/alter_table_rule.go b/dbm-services/mysql/db-simulation/app/syntax/alter_table_rule.go
new file mode 100644
index 0000000000..2c920a6895
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/alter_table_rule.go
@@ -0,0 +1,46 @@
+package syntax
+
+import util "dbm-services/common/go-pubpkg/cmutil"
+
+// Checker TODO
+func (c AlterTableResult) Checker(mysqlVersion string) (r *CheckerResult) {
+	r = &CheckerResult{}
+	for _, altercmd := range c.AlterCommands {
+		r.Parse(R.AlterTableRule.HighRiskType, altercmd.Type, "")
+		r.Parse(R.AlterTableRule.HighRiskPkAlterType, altercmd.GetPkAlterType(), "")
+		r.Parse(R.AlterTableRule.AlterUseAfter, altercmd.After, "")
+	}
+	r.Parse(R.AlterTableRule.AddColumnMixed, c.GetAllAlterType(), "")
+	return
+}
+
+// GetAllAlterType TODO
+// 对于 `alter table add a int(11),drop b,add d int(11);`
+// 这种语句,我们需要把 alter type
+// 也就是 add,drop,add 提取出来
+// 去重后得到所有的alter types
+func (c AlterTableResult) GetAllAlterType() (alterTypes []string) {
+	for _, a := range c.AlterCommands {
+		if !util.StringsHas([]string{"algorithm", "lock"}, a.Type) {
+			alterTypes = append(alterTypes, a.Type)
+		}
+	}
+	return util.RemoveDuplicate(alterTypes)
+}
+
+// GetPkAlterType  get the primary key change type
+//
+//	@receiver a
+func (a AlterCommand) GetPkAlterType() string {
+	if a.ColDef.PrimaryKey {
+		return a.Type
+	}
+	return ""
+}
+
+// GetAlterAlgorithm TODO
+//
+//	@receiver a
+func (a AlterCommand) GetAlterAlgorithm() string {
+	return a.Algorithm
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/create_db_rule.go b/dbm-services/mysql/db-simulation/app/syntax/create_db_rule.go
new file mode 100644
index 0000000000..c92138f426
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/create_db_rule.go
@@ -0,0 +1,18 @@
+package syntax
+
+// Checker TODO
+func (c CreateDBResult) Checker(mysqlVersion string) (r *CheckerResult) {
+	r = &CheckerResult{}
+	// 检查库名规范
+	// R.CreateTableRule.NormalizedName指明yaml文件中的键,根据键获得item 进而和 val比较
+	etypesli, charsli := NameCheck(c.DbName, mysqlVersion)
+	for i, etype := range etypesli {
+		r.Parse(R.CreateTableRule.NormalizedName, etype, charsli[i])
+	}
+	return
+}
+
+// SpiderChecker TODO
+func (c CreateDBResult) SpiderChecker(mysqlVersion string) (r *CheckerResult) {
+	return c.Checker(mysqlVersion)
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/create_table_rule.go b/dbm-services/mysql/db-simulation/app/syntax/create_table_rule.go
new file mode 100644
index 0000000000..2c3c481236
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/create_table_rule.go
@@ -0,0 +1,227 @@
+package syntax
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/pkg/util"
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+// SpiderChecker TODO
+func (c CreateTableResult) SpiderChecker(spiderVersion string) (r *CheckerResult) {
+	r = &CheckerResult{}
+	r.Parse(SR.SpiderCreateTableRule.CreateTbLike, !c.IsCreateTableLike, "")
+	r.Parse(SR.SpiderCreateTableRule.CreateWithSelect, !c.IsCreateTableSelect, "")
+	r.Parse(SR.SpiderCreateTableRule.ColChasetNotEqTbChaset, c.ColCharsetEqTbCharset(), "")
+	ilegal, msg := c.ValidateSpiderComment()
+	r.Parse(SR.SpiderCreateTableRule.IllegalComment, ilegal, msg)
+	// comment 合法且非空
+	if ilegal {
+		b_shardKeyIsIndex := c.ShardKeyIsIndex()
+		r.Parse(SR.SpiderCreateTableRule.ShardKeyNotIndex, b_shardKeyIsIndex, "")
+		if !b_shardKeyIsIndex {
+			r.Parse(SR.SpiderCreateTableRule.ShardKeyNotPk, c.ShardKeyIsNotPrimaryKey(), "")
+		}
+	}
+	return
+}
+
+// Checker TODO
+func (c CreateTableResult) Checker(mysqlVersion string) (r *CheckerResult) {
+	r = &CheckerResult{}
+	r.Parse(R.CreateTableRule.SuggestEngine, c.GetEngine(), "")
+	r.Parse(R.CreateTableRule.SuggestBlobColumCount, c.BlobColumCount(), "")
+	// 检查表名规范
+	// R.CreateTableRule.NormalizedName指明yaml文件中的键,根据键获得item 进而和 val比较
+	etypesli, charsli := NameCheck(c.TableName, mysqlVersion)
+	for i, etype := range etypesli {
+		r.Parse(R.CreateTableRule.NormalizedName, etype, charsli[i])
+	}
+	return
+}
+
+// BlobColumCount TODO
+// ExceedMaxBlobColum 检查创建表时blob/text字段最大数,是否超过
+func (c CreateTableResult) BlobColumCount() (blobColumCount int) {
+	for _, v := range c.CreateDefinitions.ColDefs {
+		if v.Type == "blob" {
+			blobColumCount++
+		}
+	}
+	logger.Info("blobColumCount:%d", blobColumCount)
+	return
+}
+
+// ShardKeyIsIndex TODO
+func (c CreateTableResult) ShardKeyIsIndex() bool {
+	cmt := c.GetComment()
+	if cmutil.IsEmpty(cmt) {
+		return len(c.CreateDefinitions.KeyDefs) > 0
+	}
+	if !strings.Contains(cmt, "shard_key") {
+		return true
+	}
+	sk, err := util.ParseGetShardKeyForSpider(cmt)
+	if err != nil {
+		logger.Error("parse get shardkey %s", err.Error())
+		return false
+	}
+	for _, v := range c.CreateDefinitions.KeyDefs {
+		for _, k := range v.KeyParts {
+			if strings.Compare(k.ColName, sk) == 0 {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// ShardKeyIsNotPrimaryKey TODO
+func (c CreateTableResult) ShardKeyIsNotPrimaryKey() bool {
+	cmt := c.GetComment()
+	logger.Info("will check %s ,ShardKeyIsNotPrimaryKey", cmt)
+	if cmutil.IsEmpty(cmt) {
+		return true
+	}
+	if !strings.Contains(cmt, "shard_key") {
+		return true
+	}
+	logger.Info("will check xaxsasxaxax ")
+	sk, err := util.ParseGetShardKeyForSpider(cmt)
+	if err != nil {
+		logger.Error("parse get shardkey %s", err.Error())
+		return false
+	}
+	for _, v := range c.CreateDefinitions.ColDefs {
+		if v.PrimaryKey {
+			logger.Info("get sk %s,pk:%s", sk, v.ColName)
+			if strings.Compare(sk, v.ColName) == 0 {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// GetValFromTbOptions TODO
+func (c CreateTableResult) GetValFromTbOptions(key string) (val string) {
+	for _, tableOption := range c.TableOptions {
+		if tableOption.Key == key {
+			val = tableOption.Value.(string)
+		}
+	}
+	logger.Info("%s:%s", key, val)
+	return val
+}
+
+// GetEngine TODO
+func (c CreateTableResult) GetEngine() (engine string) {
+	if v, ok := c.TableOptionMap["engine"]; ok {
+		return v.(string)
+	}
+	return ""
+}
+
+// GetComment TODO
+// comment
+func (c CreateTableResult) GetComment() (engine string) {
+	if v, ok := c.TableOptionMap["comment"]; ok {
+		return v.(string)
+	}
+	return ""
+}
+
+// GetTableCharset TODO
+// character_set
+func (c CreateTableResult) GetTableCharset() (engine string) {
+	if v, ok := c.TableOptionMap["character_set"]; ok {
+		return v.(string)
+	}
+	return ""
+}
+
+// ValidateSpiderComment TODO
+func (c CreateTableResult) ValidateSpiderComment() (bool, string) {
+	comment := c.GetComment()
+	if cmutil.IsEmpty(comment) {
+		return true, ""
+	}
+	ret := util.ParseGetSpiderUserComment(comment)
+	switch ret {
+	case 0:
+		return true, "OK"
+	case 1:
+		return false, "SQL CREATE TABLE WITH ERROR TABLE COMMENT"
+	case 2:
+		return false, "UNSUPPORT CREATE TABLE WITH ERROR COMMENT"
+	}
+	return false, ""
+}
+
+// GetAllColCharsets TODO
+func (c CreateTableResult) GetAllColCharsets() (charsets []string) {
+	for _, colDef := range c.CreateDefinitions.ColDefs {
+		if !cmutil.IsEmpty(colDef.CharacterSet) {
+			charsets = append(charsets, colDef.CharacterSet)
+		}
+	}
+	return cmutil.RemoveDuplicate(charsets)
+}
+
+// ColCharsetEqTbCharset TODO
+func (c CreateTableResult) ColCharsetEqTbCharset() bool {
+	colCharsets := c.GetAllColCharsets()
+	fmt.Println("colCharsets", colCharsets, len(colCharsets))
+	if len(colCharsets) == 0 {
+		return true
+	}
+	if len(colCharsets) > 1 {
+		return false
+	}
+	if strings.Compare(strings.ToUpper(colCharsets[0]), c.GetTableCharset()) == 0 {
+		return true
+	}
+	return false
+}
+
+// NameCheck TODO
+func NameCheck(name string, mysqlVersion string) (etypesli, charsli []string) {
+	reservesmap := getKewords(mysqlVersion)
+	etypesli = []string{}
+	charsli = []string{}
+	if _, ok := reservesmap[name]; ok {
+		etypesli = append(etypesli, "Keyword_exception")
+		charsli = append(charsli, fmt.Sprintf("。库表名中包含了MySQL关键字: %s。请避免使用这些关键字!", name))
+	}
+	if regexp.MustCompile(`[¥$!@#%^&*()+={}\[\];:'"<>,.?/\\| ]`).MatchString(name) {
+		chars := regexp.MustCompile(`[¥$!@#%^&*()+={}\[\];:'"<>,.?/\\| ]`).FindAllString(name, -1)
+		etypesli = append(etypesli, "special_char")
+		charsli = append(charsli, fmt.Sprintf("。库表名中包含以下特殊字符: %s。请避免在库表名称中使用这些特殊字符!", chars))
+	}
+	if regexp.MustCompile(`^[_]`).MatchString(name) {
+		etypesli = append(etypesli, "first_char_exception")
+		charsli = append(charsli, "。首字符不规范,请使用尽量字母或数字作为首字母")
+	}
+	return etypesli, charsli
+}
+
+func getKewords(mysqlVersion string) (keywordsmap map[string]string) {
+	var keysli []string
+	switch mysqlVersion {
+	case "mysql5.6":
+		keysli = MySQL56_KEYWORD
+	case "mysql5.7":
+		keysli = MySQL57_KEYWORD
+	case "mysql8.0":
+		keysli = MySQL80_KEYWORD
+	default:
+		keysli = ALL_KEYWORD
+	}
+	keywordsmap = map[string]string{}
+	for _, key := range keysli {
+		keywordsmap[key] = ""
+	}
+	return keywordsmap
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/definer_rule.go b/dbm-services/mysql/db-simulation/app/syntax/definer_rule.go
new file mode 100644
index 0000000000..b38a64faf1
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/definer_rule.go
@@ -0,0 +1,17 @@
+package syntax
+
+// DefinerBase TODO
+type DefinerBase struct {
+	ParseBase
+	Definer UserHost `json:"definer,omitempty"`
+}
+
+// Checker TODO
+func (c DefinerBase) Checker(mysqlVersion string) (r *CheckerResult) {
+	r = &CheckerResult{}
+	emptydefiner := UserHost{}
+	if c.Definer != emptydefiner {
+		r.Parse(R.CreateTableRule.DefinerRule, c.Command, "")
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/dml_rule.go b/dbm-services/mysql/db-simulation/app/syntax/dml_rule.go
new file mode 100644
index 0000000000..737264ec90
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/dml_rule.go
@@ -0,0 +1,15 @@
+package syntax
+
+// Checker TODO
+func (c DeleteResult) Checker(mysqlVersion string) (r *CheckerResult) {
+	r = &CheckerResult{}
+	r.Parse(R.DmlRule.DmlNotHasWhere, c.HasWhere || c.Limit > 0, "")
+	return
+}
+
+// Checker TODO
+func (c UpdateResult) Checker(mysqlVersion string) (r *CheckerResult) {
+	r = &CheckerResult{}
+	r.Parse(R.DmlRule.DmlNotHasWhere, c.HasWhere || c.Limit > 0, "")
+	return
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/mysql_keyword.go b/dbm-services/mysql/db-simulation/app/syntax/mysql_keyword.go
new file mode 100644
index 0000000000..17b48cc1c2
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/mysql_keyword.go
@@ -0,0 +1,435 @@
+package syntax
+
+// MySQL56_KEYWORD TODO
+var MySQL56_KEYWORD = []string{"ACCESSIBLE", "ACTION", "ADD", "AFTER", "AGAINST", "AGGREGATE",
+	"ALGORITHM", "ALL", "ALTER", "ANALYSE", "ANALYZE", "AND", "ANY", "AS", "ASC", "ASCII",
+	"ASENSITIVE", "AT", "AUTHORS", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG", "AVG_ROW_LENGTH",
+	"BACKUP", "BEFORE", "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BINLOG", "BIT", "BLOB", "BLOCK",
+	"BOOL", "BOOLEAN", "BOTH", "BTREE", "BY", "BYTE", "CACHE", "CALL", "CASCADE", "CASCADED", "CASE",
+	"CATALOG_NAME", "CHAIN", "CHANGE", "CHANGED", "CHAR", "CHARACTER", "CHARSET", "CHECK", "CHECKSUM",
+	"CIPHER", "CLASS_ORIGIN", "CLIENT", "CLOSE", "COALESCE", "CODE", "COLLATE", "COLLATION", "COLUMN",
+	"COLUMNS", "COLUMN_FORMAT", "COLUMN_NAME", "COMMENT", "COMMIT", "COMMITTED", "COMPACT", "COMPLETION",
+	"COMPRESSED", "CONCURRENT", "CONDITION", "CONNECTION", "CONSISTENT", "CONSTRAINT", "CONSTRAINT_CATALOG",
+	"CONSTRAINT_NAME", "CONSTRAINT_SCHEMA", "CONTAINS", "CONTEXT", "CONTINUE", "CONTRIBUTORS", "CONVERT", "CPU",
+	"CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR",
+	"CURSOR_NAME", "DATA", "DATABASE", "DATABASES", "DATAFILE", "DATE", "DATETIME", "DAY", "DAY_HOUR", "DAY_MICROSECOND",
+	"DAY_MINUTE", "DAY_SECOND", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFAULT_AUTH", "DEFINER",
+	"DELAYED", "DELAY_KEY_WRITE", "DELETE", "DESC", "DESCRIBE", "DES_KEY_FILE", "DETERMINISTIC", "DIAGNOSTICS",
+	"DIRECTORY", "DISABLE", "DISCARD", "DISK", "DISTINCT", "DISTINCTROW", "DIV", "DO", "DOUBLE", "DROP", "DUAL",
+	"DUMPFILE", "DUPLICATE", "DYNAMIC", "EACH", "ELSE", "ELSEIF", "ENABLE", "ENCLOSED", "END", "ENDS", "ENGINE", "ENGINES",
+	"ENUM", "ERROR", "ERRORS", "ESCAPE", "ESCAPED", "EVENT", "EVENTS", "EVERY", "EXCHANGE", "EXECUTE", "EXISTS", "EXIT",
+	"EXPANSION", "EXPIRE", "EXPLAIN", "EXPORT", "EXTENDED", "EXTENT_SIZE", "FALSE", "FAST", "FAULTS", "FETCH", "FIELDS",
+	"FILE", "FIRST", "FIXED", "FLOAT", "FLOAT4", "FLOAT8",
+	"FLUSH", "FOR", "FORCE", "FOREIGN", "FORMAT", "FOUND", "FROM", "FULL", "FULLTEXT", "FUNCTION", "GENERAL", "GEOMETRY",
+	"GEOMETRYCOLLECTION",
+	"GET", "GET_FORMAT", "GLOBAL", "GRANT", "GRANTS", "GROUP", "HANDLER", "HASH", "HAVING", "HELP", "HIGH_PRIORITY",
+	"HOST", "HOSTS", "HOUR",
+	"HOUR_MICROSECOND", "HOUR_MINUTE", "HOUR_SECOND", "IDENTIFIED", "IF", "IGNORE", "IGNORE_SERVER_IDS", "IMPORT", "IN",
+	"INDEX", "INDEXES",
+	"INFILE", "INITIAL_SIZE", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INSERT_METHOD", "INSTALL", "INT", "INT1",
+	"INT2", "INT3", "INT4",
+	"INT8", "INTEGER", "INTERVAL", "INTO", "INVOKER", "IO", "IO_AFTER_GTIDS", "IO_BEFORE_GTIDS", "IO_THREAD", "IPC",
+	"IS", "ISOLATION", "ISSUER",
+	"ITERATE", "JOIN", "KEY", "KEYS", "KEY_BLOCK_SIZE", "KILL", "LANGUAGE", "LAST", "LEADING", "LEAVE", "LEAVES", "LEFT",
+	"LESS", "LEVEL",
+	"LIKE", "LIMIT", "LINEAR", "LINES", "LINESTRING", "LIST", "LOAD", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP", "LOCK",
+	"LOCKS", "LOGFILE", "LOGS",
+	"LONG", "LONGBLOB", "LONGTEXT", "LOOP", "LOW_PRIORITY", "MASTER", "MASTER_AUTO_POSITION", "MASTER_BIND",
+	"MASTER_CONNECT_RETRY", "MASTER_DELAY",
+	"MASTER_HEARTBEAT_PERIOD", "MASTER_HOST", "MASTER_LOG_FILE", "MASTER_LOG_POS", "MASTER_PASSWORD", "MASTER_PORT",
+	"MASTER_RETRY_COUNT", "MASTER_SERVER_ID",
+	"MASTER_SSL", "MASTER_SSL_CA", "MASTER_SSL_CAPATH", "MASTER_SSL_CERT", "MASTER_SSL_CIPHER", "MASTER_SSL_CRL",
+	"MASTER_SSL_CRLPATH", "MASTER_SSL_KEY",
+	"MASTER_SSL_VERIFY_SERVER_CERT", "MASTER_USER", "MATCH", "MAXVALUE", "MAX_CONNECTIONS_PER_HOUR",
+	"MAX_QUERIES_PER_HOUR", "MAX_ROWS", "MAX_SIZE", "MAX_UPDATES_PER_HOUR",
+	"MAX_USER_CONNECTIONS", "MEDIUM", "MEDIUMBLOB",
+	"MEDIUMINT", "MEDIUMTEXT", "MEMORY", "MERGE", "MESSAGE_TEXT", "MICROSECOND", "MIDDLEINT", "MIGRATE", "MINUTE",
+	"MINUTE_MICROSECOND", "MINUTE_SECOND", "MIN_ROWS",
+	"MOD", "MODE", "MODIFIES", "MODIFY", "MONTH", "MULTILINESTRING", "MULTIPOINT", "MULTIPOLYGON", "MUTEX",
+	"MYSQL_ERRNO", "NAME", "NAMES", "NATIONAL", "NATURAL", "NCHAR",
+	"NDB", "NDBCLUSTER", "NEW", "NEXT", "NO", "NODEGROUP", "NONE", "NOT", "NO_WAIT", "NO_WRITE_TO_BINLOG", "NULL",
+	"NUMBER", "NUMERIC", "NVARCHAR", "OFFSET", "OLD_PASSWORD",
+	"ON", "ONE", "ONE_SHOT", "ONLY", "OPEN", "OPTIMIZE", "OPTION", "OPTIONALLY", "OPTIONS", "OR", "ORDER", "OUT",
+	"OUTER", "OUTFILE", "OWNER", "PACK_KEYS", "PAGE", "PARSER",
+	"PARTIAL", "PARTITION", "PARTITIONING", "PARTITIONS", "PASSWORD", "PHASE", "PLUGIN", "PLUGINS", "PLUGIN_DIR",
+	"POINT", "POLYGON", "PORT", "PRECISION", "PREPARE",
+	"PRESERVE", "PREV", "PRIMARY", "PRIVILEGES", "PROCEDURE", "PROCESSLIST", "PROFILE", "PROFILES", "PROXY", "PURGE",
+	"QUARTER", "QUERY", "QUICK", "RANGE", "READ",
+	"READS", "READ_ONLY", "READ_WRITE", "REAL", "REBUILD", "RECOVER", "REDOFILE", "REDO_BUFFER_SIZE", "REDUNDANT",
+	"REFERENCES", "REGEXP", "RELAY", "RELAYLOG",
+	"RELAY_LOG_FILE", "RELAY_LOG_POS", "RELAY_THREAD", "RELEASE", "RELOAD", "REMOVE", "RENAME", "REORGANIZE", "REPAIR",
+	"REPEAT", "REPEATABLE", "REPLACE", "REPLICATION",
+	"REQUIRE", "RESET", "RESIGNAL", "RESTORE", "RESTRICT", "RESUME", "RETURN", "RETURNED_SQLSTATE", "RETURNS", "REVERSE",
+	"REVOKE", "RIGHT", "RLIKE", "ROLLBACK",
+	"ROLLUP", "ROUTINE", "ROW", "ROWS", "ROW_COUNT", "ROW_FORMAT", "RTREE", "SAVEPOINT", "SCHEDULE", "SCHEMA", "SCHEMAS",
+	"SCHEMA_NAME", "SECOND", "SECOND_MICROSECOND",
+	"SECURITY", "SELECT", "SENSITIVE", "SEPARATOR", "SERIAL", "SERIALIZABLE", "SERVER", "SESSION", "SET", "SHARE",
+	"SHOW", "SHUTDOWN", "SIGNAL", "SIGNED", "SIMPLE",
+	"SLAVE", "SLOW", "SMALLINT", "SNAPSHOT", "SOCKET", "SOME", "SONAME", "SOUNDS", "SOURCE", "SPATIAL", "SPECIFIC",
+	"SQL", "SQLEXCEPTION", "SQLSTATE", "SQLWARNING",
+	"SQL_AFTER_GTIDS", "SQL_AFTER_MTS_GAPS", "SQL_BEFORE_GTIDS", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT", "SQL_CACHE",
+	"SQL_CALC_FOUND_ROWS", "SQL_NO_CACHE", "SQL_SMALL_RESULT",
+	"SQL_THREAD", "SQL_TSI_DAY", "SQL_TSI_HOUR", "SQL_TSI_MINUTE", "SQL_TSI_MONTH", "SQL_TSI_QUARTER", "SQL_TSI_SECOND",
+	"SQL_TSI_WEEK", "SQL_TSI_YEAR", "SSL", "START", "STARTING",
+	"STARTS", "STATS_AUTO_RECALC", "STATS_PERSISTENT", "STATS_SAMPLE_PAGES", "STATUS", "STOP", "STORAGE",
+	"STRAIGHT_JOIN", "STRING", "SUBCLASS_ORIGIN", "SUBJECT", "SUBPARTITION",
+	"SUBPARTITIONS", "SUPER", "SUSPEND", "SWAPS", "SWITCHES", "TABLE", "TABLES", "TABLESPACE", "TABLE_CHECKSUM",
+	"TABLE_NAME", "TEMPORARY", "TEMPTABLE", "TERMINATED",
+	"TEXT", "THAN", "THEN", "TIME", "TIMESTAMP", "TIMESTAMPADD", "TIMESTAMPDIFF", "TINYBLOB", "TINYINT", "TINYTEXT",
+	"TO", "TRAILING", "TRANSACTION", "TRIGGER", "TRIGGERS",
+	"TRUE", "TRUNCATE", "TYPE", "TYPES", "UNCOMMITTED", "UNDEFINED", "UNDO", "UNDOFILE", "UNDO_BUFFER_SIZE", "UNICODE",
+	"UNINSTALL", "UNION", "UNIQUE", "UNKNOWN", "UNLOCK",
+	"UNSIGNED", "UNTIL", "UPDATE", "UPGRADE", "USAGE", "USE", "USER", "USER_RESOURCES", "USE_FRM", "USING", "UTC_DATE",
+	"UTC_TIME", "UTC_TIMESTAMP", "VALUE", "VALUES",
+	"VARBINARY", "VARCHAR", "VARCHARACTER", "VARIABLES", "VARYING", "VIEW", "WAIT", "WARNINGS", "WEEK", "WEIGHT_STRING",
+	"WHEN", "WHERE", "WHILE", "WITH", "WORK", "WRAPPER",
+	"WRITE", "X509", "XA", "XML", "XOR", "YEAR", "YEAR_MONTH", "ZEROFILL"}
+
+// MySQL57_KEYWORD TODO
+var MySQL57_KEYWORD = []string{"ACCESSIBLE", "ACCOUNT", "ACTION", "ADD", "AFTER", "AGAINST", "AGGREGATE", "ALGORITHM",
+	"ALL", "ALTER",
+	"ALWAYS", "ANALYSE", "ANALYZE", "AND", "ANY", "AS", "ASC", "ASCII", "ASENSITIVE", "AT", "AUTOEXTEND_SIZE",
+	"AUTO_INCREMENT",
+	"AVG", "AVG_ROW_LENGTH", "BACKUP", "BEFORE", "BEGIN", "BETWEEN", "BIGINT", "BINARY", "BINLOG", "BIT", "BLOB",
+	"BLOCK",
+	"BOOL", "BOOLEAN", "BOTH", "BTREE", "BY", "BYTE", "CACHE", "CALL", "CASCADE", "CASCADED", "CASE", "CATALOG_NAME",
+	"CHAIN",
+	"CHANGE", "CHANGED", "CHANNEL", "CHAR", "CHARACTER", "CHARSET", "CHECK", "CHECKSUM", "CIPHER", "CLASS_ORIGIN",
+	"CLIENT",
+	"CLOSE", "COALESCE", "CODE", "COLLATE", "COLLATION", "COLUMN", "COLUMNS", "COLUMN_FORMAT", "COLUMN_NAME", "COMMENT",
+	"COMMIT", "COMMITTED", "COMPACT", "COMPLETION", "COMPRESSED", "COMPRESSION", "CONCURRENT", "CONDITION", "CONNECTION",
+	"CONSISTENT", "CONSTRAINT", "CONSTRAINT_CATALOG", "CONSTRAINT_NAME", "CONSTRAINT_SCHEMA", "CONTAINS", "CONTEXT",
+	"CONTINUE",
+	"CONVERT", "CPU", "CREATE", "CROSS", "CUBE", "CURRENT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP",
+	"CURRENT_USER",
+	"CURSOR", "CURSOR_NAME", "DATA", "DATABASE", "DATABASES", "DATAFILE", "DATE", "DATETIME", "DAY", "DAY_HOUR",
+	"DAY_MICROSECOND",
+	"DAY_MINUTE", "DAY_SECOND", "DEALLOCATE", "DEC", "DECIMAL", "DECLARE", "DEFAULT", "DEFAULT_AUTH", "DEFINER",
+	"DELAYED", "DELAY_KEY_WRITE",
+	"DELETE", "DESC", "DESCRIBE", "DES_KEY_FILE", "DETERMINISTIC", "DIAGNOSTICS", "DIRECTORY", "DISABLE", "DISCARD",
+	"DISK", "DISTINCT",
+	"DISTINCTROW", "DIV", "DO", "DOUBLE", "DROP", "DUAL", "DUMPFILE", "DUPLICATE", "DYNAMIC", "EACH", "ELSE", "ELSEIF",
+	"ENABLE", "ENCLOSED",
+	"ENCRYPTION", "END", "ENDS", "ENGINE", "ENGINES", "ENUM", "ERROR", "ERRORS", "ESCAPE", "ESCAPED", "EVENT", "EVENTS",
+	"EVERY", "EXCHANGE",
+	"EXECUTE", "EXISTS", "EXIT", "EXPANSION", "EXPIRE", "EXPLAIN", "EXPORT", "EXTENDED", "EXTENT_SIZE", "FALSE", "FAST",
+	"FAULTS", "FETCH",
+	"FIELDS", "FILE", "FILE_BLOCK_SIZE", "FILTER", "FIRST", "FIXED", "FLOAT", "FLOAT4", "FLOAT8", "FLUSH", "FOLLOWS",
+	"FOR", "FORCE", "FOREIGN",
+	"FORMAT", "FOUND", "FROM", "FULL", "FULLTEXT", "FUNCTION", "GENERAL", "GENERATED", "GEOMETRY", "GEOMETRYCOLLECTION",
+	"GET", "GET_FORMAT",
+	"GLOBAL", "GRANT", "GRANTS", "GROUP", "GROUP_REPLICATION", "HANDLER", "HASH", "HAVING", "HELP", "HIGH_PRIORITY",
+	"HOST", "HOSTS", "HOUR",
+	"HOUR_MICROSECOND", "HOUR_MINUTE", "HOUR_SECOND", "IDENTIFIED", "IF", "IGNORE", "IGNORE_SERVER_IDS", "IMPORT", "IN",
+	"INDEX", "INDEXES",
+	"INFILE", "INITIAL_SIZE", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INSERT_METHOD", "INSTALL", "INSTANCE", "INT",
+	"INT1", "INT2", "INT3",
+	"INT4", "INT8", "INTEGER", "INTERVAL", "INTO", "INVOKER", "IO", "IO_AFTER_GTIDS", "IO_BEFORE_GTIDS", "IO_THREAD",
+	"IPC", "IS", "ISOLATION",
+	"ISSUER", "ITERATE", "JOIN", "JSON", "KEY", "KEYS", "KEY_BLOCK_SIZE", "KILL", "LANGUAGE", "LAST", "LEADING", "LEAVE",
+	"LEAVES", "LEFT", "LESS",
+	"LEVEL", "LIKE", "LIMIT", "LINEAR", "LINES", "LINESTRING", "LIST", "LOAD", "LOCAL", "LOCALTIME", "LOCALTIMESTAMP",
+	"LOCK", "LOCKS", "LOGFILE",
+	"LOGS", "LONG", "LONGBLOB", "LONGTEXT", "LOOP", "LOW_PRIORITY", "MASTER", "MASTER_AUTO_POSITION", "MASTER_BIND",
+	"MASTER_CONNECT_RETRY", "MASTER_DELAY",
+	"MASTER_HEARTBEAT_PERIOD", "MASTER_HOST", "MASTER_LOG_FILE", "MASTER_LOG_POS", "MASTER_PASSWORD", "MASTER_PORT",
+	"MASTER_RETRY_COUNT", "MASTER_SERVER_ID",
+	"MASTER_SSL", "MASTER_SSL_CA", "MASTER_SSL_CAPATH", "MASTER_SSL_CERT", "MASTER_SSL_CIPHER", "MASTER_SSL_CRL",
+	"MASTER_SSL_CRLPATH", "MASTER_SSL_KEY",
+	"MASTER_SSL_VERIFY_SERVER_CERT", "MASTER_TLS_VERSION", "MASTER_USER", "MATCH", "MAXVALUE",
+	"MAX_CONNECTIONS_PER_HOUR", "MAX_QUERIES_PER_HOUR", "MAX_ROWS",
+	"MAX_SIZE", "MAX_STATEMENT_TIME", "MAX_UPDATES_PER_HOUR", "MAX_USER_CONNECTIONS", "MEDIUM", "MEDIUMBLOB",
+	"MEDIUMINT", "MEDIUMTEXT", "MEMORY", "MERGE",
+	"MESSAGE_TEXT", "MICROSECOND", "MIDDLEINT", "MIGRATE", "MINUTE", "MINUTE_MICROSECOND", "MINUTE_SECOND", "MIN_ROWS",
+	"MOD", "MODE", "MODIFIES", "MODIFY",
+	"MONTH", "MULTILINESTRING", "MULTIPOINT", "MULTIPOLYGON", "MUTEX", "MYSQL_ERRNO", "NAME", "NAMES", "NATIONAL",
+	"NATURAL", "NCHAR", "NDB", "NDBCLUSTER",
+	"NEVER", "NEW", "NEXT", "NO", "NODEGROUP", "NONBLOCKING", "NONE", "NOT", "NO_WAIT", "NO_WRITE_TO_BINLOG", "NULL",
+	"NUMBER", "NUMERIC", "NVARCHAR", "OFFSET",
+	"OLD_PASSWORD", "ON", "ONE", "ONLY", "OPEN", "OPTIMIZE", "OPTIMIZER_COSTS", "OPTION", "OPTIONALLY", "OPTIONS", "OR",
+	"ORDER", "OUT", "OUTER", "OUTFILE",
+	"OWNER", "PACK_KEYS", "PAGE", "PARSER", "PARSE_GCOL_EXPR", "PARTIAL", "PARTITION", "PARTITIONING", "PARTITIONS",
+	"PASSWORD", "PHASE", "PLUGIN", "PLUGINS",
+	"PLUGIN_DIR", "POINT", "POLYGON", "PORT", "PRECEDES", "PRECISION", "PREPARE", "PRESERVE", "PREV", "PRIMARY",
+	"PRIVILEGES", "PROCEDURE", "PROCESSLIST",
+	"PROFILE", "PROFILES", "PROXY", "PURGE", "QUARTER", "QUERY", "QUICK", "RANGE", "READ", "READS", "READ_ONLY",
+	"READ_WRITE", "REAL", "REBUILD", "RECOVER",
+	"REDOFILE", "REDO_BUFFER_SIZE", "REDUNDANT", "REFERENCES", "REGEXP", "RELAY", "RELAYLOG", "RELAY_LOG_FILE",
+	"RELAY_LOG_POS", "RELAY_THREAD", "RELEASE",
+	"RELOAD", "REMOVE", "RENAME", "REORGANIZE", "REPAIR", "REPEAT", "REPEATABLE", "REPLACE", "REPLICATE_DO_DB",
+	"REPLICATE_DO_TABLE", "REPLICATE_IGNORE_DB",
+	"REPLICATE_IGNORE_TABLE", "REPLICATE_REWRITE_DB", "REPLICATE_WILD_DO_TABLE", "REPLICATE_WILD_IGNORE_TABLE",
+	"REPLICATION", "REQUIRE", "RESET", "RESIGNAL",
+	"RESTORE", "RESTRICT", "RESUME", "RETURN", "RETURNED_SQLSTATE", "RETURNS", "REVERSE", "REVOKE", "RIGHT", "RLIKE",
+	"ROLLBACK", "ROLLUP", "ROTATE", "ROUTINE",
+	"ROW", "ROWS", "ROW_COUNT", "ROW_FORMAT", "RTREE", "SAVEPOINT", "SCHEDULE", "SCHEMA", "SCHEMAS", "SCHEMA_NAME",
+	"SECOND", "SECOND_MICROSECOND", "SECURITY",
+	"SELECT", "SENSITIVE", "SEPARATOR", "SERIAL", "SERIALIZABLE", "SERVER", "SESSION", "SET", "SHARE", "SHOW",
+	"SHUTDOWN", "SIGNAL", "SIGNED", "SIMPLE", "SLAVE",
+	"SLOW", "SMALLINT", "SNAPSHOT", "SOCKET", "SOME", "SONAME", "SOUNDS", "SOURCE", "SPATIAL", "SPECIFIC", "SQL",
+	"SQLEXCEPTION", "SQLSTATE", "SQLWARNING",
+	"SQL_AFTER_GTIDS", "SQL_AFTER_MTS_GAPS", "SQL_BEFORE_GTIDS", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT", "SQL_CACHE",
+	"SQL_CALC_FOUND_ROWS", "SQL_NO_CACHE",
+	"SQL_SMALL_RESULT", "SQL_THREAD", "SQL_TSI_DAY", "SQL_TSI_HOUR", "SQL_TSI_MINUTE", "SQL_TSI_MONTH",
+	"SQL_TSI_QUARTER", "SQL_TSI_SECOND", "SQL_TSI_WEEK",
+	"SQL_TSI_YEAR", "SSL", "STACKED", "START", "STARTING", "STARTS", "STATS_AUTO_RECALC", "STATS_PERSISTENT",
+	"STATS_SAMPLE_PAGES", "STATUS", "STOP", "STORAGE",
+	"STORED", "STRAIGHT_JOIN", "STRING", "SUBCLASS_ORIGIN", "SUBJECT", "SUBPARTITION", "SUBPARTITIONS", "SUPER",
+	"SUSPEND", "SWAPS", "SWITCHES", "TABLE", "TABLES",
+	"TABLESPACE", "TABLE_CHECKSUM", "TABLE_NAME", "TEMPORARY", "TEMPTABLE", "TERMINATED", "TEXT", "THAN", "THEN", "TIME",
+	"TIMESTAMP", "TIMESTAMPADD", "TIMESTAMPDIFF",
+	"TINYBLOB", "TINYINT", "TINYTEXT", "TO", "TRAILING", "TRANSACTION", "TRIGGER", "TRIGGERS", "TRUE", "TRUNCATE",
+	"TYPE", "TYPES", "UNCOMMITTED", "UNDEFINED", "UNDO",
+	"UNDOFILE", "UNDO_BUFFER_SIZE", "UNICODE", "UNINSTALL", "UNION", "UNIQUE", "UNKNOWN", "UNLOCK", "UNSIGNED", "UNTIL",
+	"UPDATE", "UPGRADE", "USAGE", "USE", "USER",
+	"USER_RESOURCES", "USE_FRM", "USING", "UTC_DATE", "UTC_TIME", "UTC_TIMESTAMP", "VALIDATION", "VALUE", "VALUES",
+	"VARBINARY", "VARCHAR", "VARCHARACTER", "VARIABLES",
+	"VARYING", "VIEW", "VIRTUAL", "WAIT", "WARNINGS", "WEEK", "WEIGHT_STRING", "WHEN", "WHERE", "WHILE", "WITH",
+	"WITHOUT", "WORK", "WRAPPER", "WRITE", "X509", "XA", "XID",
+	"XML", "XOR", "YEAR", "YEAR_MONTH", "ZEROFILL"}
+
+// MySQL80_KEYWORD TODO
+var MySQL80_KEYWORD = []string{"ACCESSIBLE", "ACCOUNT", "ACTION", "ACTIVE", "ADD", "ADMIN", "AFTER", "AGAINST",
+	"AGGREGATE", "ALGORITHM", "ALL",
+	"ALTER", "ALWAYS", "ANALYSE", "ANALYZE", "AND", "ANY", "ARRAY", "AS", "ASC", "ASCII", "ASENSITIVE", "AT",
+	"ATTRIBUTE", "AUTHENTICATION",
+	"AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG", "AVG_ROW_LENGTH", "BACKUP", "BEFORE", "BEGIN", "BETWEEN", "BIGINT",
+	"BINARY", "BINLOG",
+	"BIT", "BLOB", "BLOCK", "BOOL", "BOOLEAN", "BOTH", "BTREE", "BUCKETS", "BULK", "BY", "BYTE", "CACHE", "CALL",
+	"CASCADE", "CASCADED",
+	"CASE", "CATALOG_NAME", "CHAIN", "CHALLENGE_RESPONSE", "CHANGE", "CHANGED", "CHANNEL", "CHAR", "CHARACTER",
+	"CHARSET", "CHECK", "CHECKSUM",
+	"CIPHER", "CLASS_ORIGIN", "CLIENT", "CLONE", "CLOSE", "COALESCE", "CODE", "COLLATE", "COLLATION", "COLUMN",
+	"COLUMNS", "COLUMN_FORMAT", "COLUMN_NAME",
+	"COMMENT", "COMMIT", "COMMITTED", "COMPACT", "COMPLETION", "COMPONENT", "COMPRESSED", "COMPRESSION", "CONCURRENT",
+	"CONDITION", "CONNECTION", "CONSISTENT",
+	"CONSTRAINT", "CONSTRAINT_CATALOG", "CONSTRAINT_NAME", "CONSTRAINT_SCHEMA", "CONTAINS", "CONTEXT", "CONTINUE",
+	"CONVERT", "CPU", "CREATE", "CROSS", "CUBE",
+	"CUME_DIST", "CURRENT", "CURRENT_DATE", "CURRENT_TIME", "CURRENT_TIMESTAMP", "CURRENT_USER", "CURSOR", "CURSOR_NAME",
+	"DATA", "DATABASE", "DATABASES",
+	"DATAFILE", "DATE", "DATETIME", "DAY", "DAY_HOUR", "DAY_MICROSECOND", "DAY_MINUTE", "DAY_SECOND", "DEALLOCATE",
+	"DEC", "DECIMAL", "DECLARE", "DEFAULT",
+	"DEFAULT_AUTH", "DEFINER", "DEFINITION", "DELAYED", "DELAY_KEY_WRITE", "DELETE", "DENSE_RANK", "DESC", "DESCRIBE",
+	"DESCRIPTION", "DES_KEY_FILE",
+	"DETERMINISTIC", "DIAGNOSTICS", "DIRECTORY", "DISABLE", "DISCARD", "DISK", "DISTINCT", "DISTINCTROW", "DIV", "DO",
+	"DOUBLE", "DROP", "DUAL", "DUMPFILE",
+	"DUPLICATE", "DYNAMIC", "EACH", "ELSE", "ELSEIF", "EMPTY", "ENABLE", "ENCLOSED", "ENCRYPTION", "END", "ENDS",
+	"ENFORCED", "ENGINE", "ENGINES",
+	"ENGINE_ATTRIBUTE", "ENUM", "ERROR", "ERRORS", "ESCAPE", "ESCAPED", "EVENT", "EVENTS", "EVERY", "EXCEPT", "EXCHANGE",
+	"EXCLUDE", "EXECUTE", "EXISTS",
+	"EXIT", "EXPANSION", "EXPIRE", "EXPLAIN", "EXPORT", "EXTENDED", "EXTENT_SIZE", "FACTOR", "FAILED_LOGIN_ATTEMPTS",
+	"FALSE", "FAST", "FAULTS", "FETCH",
+	"FIELDS", "FILE", "FILE_BLOCK_SIZE", "FILTER", "FINISH", "FIRST", "FIRST_VALUE", "FIXED", "FLOAT", "FLOAT4",
+	"FLOAT8", "FLUSH", "FOLLOWING", "FOLLOWS",
+	"FOR", "FORCE", "FOREIGN", "FORMAT", "FOUND", "FROM", "FULL", "FULLTEXT", "FUNCTION", "GENERAL", "GENERATE",
+	"GENERATED", "GEOMCOLLECTION", "GEOMETRY",
+	"GEOMETRYCOLLECTION", "GET", "GET_FORMAT", "GET_MASTER_PUBLIC_KEY", "GET_SOURCE_PUBLIC_KEY", "GLOBAL", "GRANT",
+	"GRANTS", "GROUP", "GROUPING", "GROUPS",
+	"GROUP_REPLICATION", "GTID_ONLY", "HANDLER", "HASH", "HAVING", "HELP", "HIGH_PRIORITY", "HISTOGRAM", "HISTORY",
+	"HOST", "HOSTS", "HOUR", "HOUR_MICROSECOND",
+	"HOUR_MINUTE", "HOUR_SECOND", "IDENTIFIED", "IF", "IGNORE", "IGNORE_SERVER_IDS", "IMPORT", "IN", "INACTIVE", "INDEX",
+	"INDEXES", "INFILE", "INITIAL",
+	"INITIAL_SIZE", "INITIATE", "INNER", "INOUT", "INSENSITIVE", "INSERT", "INSERT_METHOD", "INSTALL", "INSTANCE", "INT",
+	"INT1", "INT2", "INT3", "INT4", "INT8",
+	"INTEGER", "INTERSECT", "INTERVAL", "INTO", "INVISIBLE", "INVOKER", "IO", "IO_AFTER_GTIDS", "IO_BEFORE_GTIDS",
+	"IO_THREAD", "IPC", "IS", "ISOLATION", "ISSUER",
+	"ITERATE", "JOIN", "JSON", "JSON_TABLE", "JSON_VALUE", "KEY", "KEYRING", "KEYS", "KEY_BLOCK_SIZE", "KILL", "LAG",
+	"LANGUAGE", "LAST", "LAST_VALUE",
+	"LATERAL", "LEAD", "LEADING", "LEAVE", "LEAVES", "LEFT", "LESS", "LEVEL", "LIKE", "LIMIT", "LINEAR", "LINES",
+	"LINESTRING", "LIST", "LOAD", "LOCAL",
+	"LOCALTIME", "LOCALTIMESTAMP", "LOCK", "LOCKED", "LOCKS", "LOGFILE", "LOGS", "LONG", "LONGBLOB", "LONGTEXT", "LOOP",
+	"LOW_PRIORITY", "MASTER", "MASTER_AUTO_POSITION",
+	"MASTER_BIND", "MASTER_COMPRESSION_ALGORITHMS", "MASTER_CONNECT_RETRY", "MASTER_DELAY", "MASTER_HEARTBEAT_PERIOD",
+	"MASTER_HOST", "MASTER_LOG_FILE",
+	"MASTER_LOG_POS", "MASTER_PASSWORD", "MASTER_PORT", "MASTER_PUBLIC_KEY_PATH", "MASTER_RETRY_COUNT",
+	"MASTER_SERVER_ID", "MASTER_SSL", "MASTER_SSL_CA",
+	"MASTER_SSL_CAPATH", "MASTER_SSL_CERT", "MASTER_SSL_CIPHER", "MASTER_SSL_CRL", "MASTER_SSL_CRLPATH",
+	"MASTER_SSL_KEY", "MASTER_SSL_VERIFY_SERVER_CERT",
+	"MASTER_TLS_CIPHERSUITES", "MASTER_TLS_VERSION", "MASTER_USER", "MASTER_ZSTD_COMPRESSION_LEVEL", "MATCH", "MAXVALUE",
+	"MAX_CONNECTIONS_PER_HOUR",
+	"MAX_QUERIES_PER_HOUR", "MAX_ROWS", "MAX_SIZE", "MAX_UPDATES_PER_HOUR", "MAX_USER_CONNECTIONS", "MEDIUM",
+	"MEDIUMBLOB", "MEDIUMINT", "MEDIUMTEXT",
+	"MEMBER", "MEMORY", "MERGE", "MESSAGE_TEXT", "MICROSECOND", "MIDDLEINT", "MIGRATE", "MINUTE", "MINUTE_MICROSECOND",
+	"MINUTE_SECOND", "MIN_ROWS",
+	"MOD", "MODE", "MODIFIES", "MODIFY", "MONTH", "MULTILINESTRING", "MULTIPOINT", "MULTIPOLYGON", "MUTEX",
+	"MYSQL_ERRNO", "NAME", "NAMES", "NATIONAL",
+	"NATURAL", "NCHAR", "NDB", "NDBCLUSTER", "NESTED", "NETWORK_NAMESPACE", "NEVER", "NEW", "NEXT", "NO", "NODEGROUP",
+	"NONE", "NOT", "NOWAIT", "NO_WAIT",
+	"NO_WRITE_TO_BINLOG", "NTH_VALUE", "NTILE", "NULL", "NULLS", "NUMBER", "NUMERIC", "NVARCHAR", "OF", "OFF", "OFFSET",
+	"OJ", "OLD", "ON", "ONE", "ONLY",
+	"OPEN", "OPTIMIZE", "OPTIMIZER_COSTS", "OPTION", "OPTIONAL", "OPTIONALLY", "OPTIONS", "OR", "ORDER", "ORDINALITY",
+	"ORGANIZATION", "OTHERS", "OUT",
+	"OUTER", "OUTFILE", "OVER", "OWNER", "PACK_KEYS", "PAGE", "PARSER", "PARTIAL", "PARTITION", "PARTITIONING",
+	"PARTITIONS", "PASSWORD", "PASSWORD_LOCK_TIME",
+	"PATH", "PERCENT_RANK", "PERSIST", "PERSIST_ONLY", "PHASE", "PLUGIN", "PLUGINS", "PLUGIN_DIR", "POINT", "POLYGON",
+	"PORT", "PRECEDES", "PRECEDING", "PRECISION",
+	"PREPARE", "PRESERVE", "PREV", "PRIMARY", "PRIVILEGES", "PRIVILEGE_CHECKS_USER", "PROCEDURE", "PROCESS",
+	"PROCESSLIST", "PROFILE", "PROFILES", "PROXY",
+	"PURGE", "QUARTER", "QUERY", "QUICK", "RANDOM", "RANGE", "RANK", "READ", "READS", "READ_ONLY", "READ_WRITE", "REAL",
+	"REBUILD", "RECOVER", "RECURSIVE",
+	"REDOFILE", "REDO_BUFFER_SIZE", "REDUNDANT", "REFERENCE", "REFERENCES", "REGEXP", "REGISTRATION", "RELAY",
+	"RELAYLOG", "RELAY_LOG_FILE", "RELAY_LOG_POS",
+	"RELAY_THREAD", "RELEASE", "RELOAD", "REMOTE", "REMOVE", "RENAME", "REORGANIZE", "REPAIR", "REPEAT", "REPEATABLE",
+	"REPLACE", "REPLICA", "REPLICAS",
+	"REPLICATE_DO_DB", "REPLICATE_DO_TABLE", "REPLICATE_IGNORE_DB", "REPLICATE_IGNORE_TABLE", "REPLICATE_REWRITE_DB",
+	"REPLICATE_WILD_DO_TABLE", "REPLICATE_WILD_IGNORE_TABLE",
+	"REPLICATION", "REQUIRE", "REQUIRE_ROW_FORMAT", "RESET", "RESIGNAL", "RESOURCE", "RESPECT", "RESTART", "RESTORE",
+	"RESTRICT", "RESUME", "RETAIN", "RETURN",
+	"RETURNED_SQLSTATE", "RETURNING", "RETURNS", "REUSE", "REVERSE", "REVOKE", "RIGHT", "RLIKE", "ROLE", "ROLLBACK",
+	"ROLLUP", "ROTATE", "ROUTINE", "ROW", "ROWS",
+	"ROW_COUNT", "ROW_FORMAT", "ROW_NUMBER", "RTREE", "SAVEPOINT", "SCHEDULE", "SCHEMA", "SCHEMAS", "SCHEMA_NAME",
+	"SECOND", "SECONDARY", "SECONDARY_ENGINE",
+	"SECONDARY_ENGINE_ATTRIBUTE", "SECONDARY_LOAD", "SECONDARY_UNLOAD", "SECOND_MICROSECOND", "SECURITY", "SELECT",
+	"SENSITIVE", "SEPARATOR", "SERIAL", "SERIALIZABLE",
+	"SERVER", "SESSION", "SET", "SHARE", "SHOW", "SHUTDOWN", "SIGNAL", "SIGNED", "SIMPLE", "SKIP", "SLAVE", "SLOW",
+	"SMALLINT", "SNAPSHOT", "SOCKET", "SOME", "SONAME",
+	"SOUNDS", "SOURCE", "SOURCE_AUTO_POSITION", "SOURCE_BIND", "SOURCE_COMPRESSION_ALGORITHMS", "SOURCE_CONNECT_RETRY",
+	"SOURCE_DELAY", "SOURCE_HEARTBEAT_PERIOD",
+	"SOURCE_HOST", "SOURCE_LOG_FILE", "SOURCE_LOG_POS", "SOURCE_PASSWORD", "SOURCE_PORT", "SOURCE_PUBLIC_KEY_PATH",
+	"SOURCE_RETRY_COUNT", "SOURCE_SSL", "SOURCE_SSL_CA",
+	"SOURCE_SSL_CAPATH", "SOURCE_SSL_CERT", "SOURCE_SSL_CIPHER", "SOURCE_SSL_CRL", "SOURCE_SSL_CRLPATH",
+	"SOURCE_SSL_KEY", "SOURCE_SSL_VERIFY_SERVER_CERT",
+	"SOURCE_TLS_CIPHERSUITES", "SOURCE_TLS_VERSION", "SOURCE_USER", "SOURCE_ZSTD_COMPRESSION_LEVEL", "SPATIAL",
+	"SPECIFIC", "SQL", "SQLEXCEPTION", "SQLSTATE",
+	"SQLWARNING", "SQL_AFTER_GTIDS", "SQL_AFTER_MTS_GAPS", "SQL_BEFORE_GTIDS", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT",
+	"SQL_CACHE", "SQL_CALC_FOUND_ROWS", "SQL_NO_CACHE",
+	"SQL_SMALL_RESULT", "SQL_THREAD", "SQL_TSI_DAY", "SQL_TSI_HOUR", "SQL_TSI_MINUTE", "SQL_TSI_MONTH",
+	"SQL_TSI_QUARTER", "SQL_TSI_SECOND", "SQL_TSI_WEEK", "SQL_TSI_YEAR",
+	"SRID", "SSL", "STACKED", "START", "STARTING", "STARTS", "STATS_AUTO_RECALC", "STATS_PERSISTENT",
+	"STATS_SAMPLE_PAGES", "STATUS", "STOP", "STORAGE", "STORED", "STRAIGHT_JOIN",
+	"STREAM", "STRING", "SUBCLASS_ORIGIN", "SUBJECT", "SUBPARTITION", "SUBPARTITIONS", "SUPER", "SUSPEND", "SWAPS",
+	"SWITCHES", "SYSTEM", "TABLE", "TABLES", "TABLESPACE",
+	"TABLE_CHECKSUM", "TABLE_NAME", "TEMPORARY", "TEMPTABLE", "TERMINATED", "TEXT", "THAN", "THEN", "THREAD_PRIORITY",
+	"TIES", "TIME", "TIMESTAMP", "TIMESTAMPADD",
+	"TIMESTAMPDIFF", "TINYBLOB", "TINYINT", "TINYTEXT", "TLS", "TO", "TRAILING", "TRANSACTION", "TRIGGER", "TRIGGERS",
+	"TRUE", "TRUNCATE", "TYPE", "TYPES", "UNBOUNDED",
+	"UNCOMMITTED", "UNDEFINED", "UNDO", "UNDOFILE", "UNDO_BUFFER_SIZE", "UNICODE", "UNINSTALL", "UNION", "UNIQUE",
+	"UNKNOWN", "UNLOCK", "UNREGISTER", "UNSIGNED", "UNTIL",
+	"UPDATE", "UPGRADE", "URL", "USAGE", "USE", "USER", "USER_RESOURCES", "USE_FRM", "USING", "UTC_DATE", "UTC_TIME",
+	"UTC_TIMESTAMP", "VALIDATION", "VALUE", "VALUES",
+	"VARBINARY", "VARCHAR", "VARCHARACTER", "VARIABLES", "VARYING", "VCPU", "VIEW", "VIRTUAL", "VISIBLE", "WAIT",
+	"WARNINGS", "WEEK", "WEIGHT_STRING", "WHEN", "WHERE",
+	"WHILE", "WINDOW", "WITH", "WITHOUT", "WORK", "WRAPPER", "WRITE", "X509", "XA", "XID", "XML", "XOR", "YEAR",
+	"YEAR_MONTH", "ZEROFILL", "ZONE"}
+
+// ALL_KEYWORD TODO
+var ALL_KEYWORD = []string{"CONSTRAINT_NAME", "JSON_VALUE", "SOURCE_HOST", "CUBE", "COLLATE", "EXCEPT",
+	"MASTER_CONNECT_RETRY", "INITIAL", "REDO_BUFFER_SIZE",
+	"HOUR", "DEFINER", "DISABLE", "PERSIST_ONLY", "DESC", "OTHERS", "SKIP", "CHANGED", "KEYS", "HOUR_SECOND", "REDOFILE",
+	"DEC", "CHARSET", "ASCII",
+	"MASTER_PORT", "DEFINITION", "PLUGIN_DIR", "ON", "LINESTRING", "LONGTEXT", "PRESERVE", "MASTER_PUBLIC_KEY_PATH",
+	"UNSIGNED", "COMMITTED", "CHANNEL",
+	"AS", "CUME_DIST", "REUSE", "SWAPS", "SECONDARY_LOAD", "LIKE", "OPTIMIZE", "ENUM", "ELSE", "VISIBLE", "WINDOW",
+	"FOUND", "PARTIAL", "DESCRIPTION",
+	"READ_WRITE", "CPU", "MAX_USER_CONNECTIONS", "TINYBLOB", "FOLLOWS", "CURRENT_USER", "FLOAT8", "MASTER_LOG_POS",
+	"TIMESTAMP", "BETWEEN", "INSTALL",
+	"SOURCE_SSL_CERT", "SHUTDOWN", "EXPANSION", "FLOAT4", "RETURNED_SQLSTATE", "MASTER_TLS_VERSION", "REPLICATE_DO_DB",
+	"SOURCE_SSL_CAPATH", "STATS_SAMPLE_PAGES",
+	"JSON", "DELETE", "INITIAL_SIZE", "OPTIONALLY", "REQUIRE", "TRUNCATE", "SIMPLE", "OPTIMIZER_COSTS",
+	"REPLICATE_WILD_DO_TABLE", "FORCE", "FIRST_VALUE", "AND",
+	"BINARY", "TRUE", "OFF", "INFILE", "STREAM", "NDBCLUSTER", "GENERATED", "ESCAPE", "DATABASES", "VALUES", "WAIT",
+	"TINYTEXT", "FLUSH", "IDENTIFIED", "CURRENT",
+	"AGGREGATE", "FILE", "GRANT", "SOUNDS", "ALTER", "EXPLAIN", "FLOAT", "DUMPFILE", "DUAL", "USER_RESOURCES",
+	"MIN_ROWS", "GROUP", "DELAY_KEY_WRITE", "LEAVES",
+	"PARTITIONS", "MAX_UPDATES_PER_HOUR", "SCHEMAS", "BIT", "RETAIN", "ISOLATION", "SRID", "DUPLICATE", "AFTER",
+	"REPLICA", "CALL", "OJ", "TLS", "ADMIN", "TEMPORARY",
+	"DIV", "CONSTRAINT", "SQL_TSI_WEEK", "CROSS", "MASTER_SSL_CERT", "CHECKSUM", "PREPARE", "COMPRESSED", "SERIAL",
+	"LOCALTIME", "SECONDARY_ENGINE", "BOOL", "ROLLBACK",
+	"XOR", "LOCKS", "NO_WRITE_TO_BINLOG", "RELOAD", "SHOW", "PRECEDING", "REPLICATE_IGNORE_TABLE",
+	"SOURCE_AUTO_POSITION", "MAX_ROWS", "INDEX", "SLAVE", "CURRENT_DATE",
+	"PROXY", "RELAY", "TERMINATED", "DISTINCT", "X509", "ENGINE_ATTRIBUTE", "CURSOR_NAME", "RENAME", "SUBPARTITIONS",
+	"NO", "QUARTER", "REBUILD", "WRAPPER", "MASTER",
+	"SQL_BEFORE_GTIDS", "THAN", "OR", "MEMORY", "OFFSET", "UNDOFILE", "ORDINALITY", "VARYING", "ERROR", "IPC",
+	"SMALLINT", "SIGNED", "OUTER", "STATS_AUTO_RECALC", "PERCENT_RANK",
+	"DES_KEY_FILE", "INOUT", "NODEGROUP", "URL", "DISTINCTROW", "REPLICATION", "TIME", "MASTER_AUTO_POSITION",
+	"MINUTE_SECOND", "HAVING", "NCHAR", "STOP", "MASTER_SSL_CA",
+	"RETURN", "NATIONAL", "CURSOR", "IGNORE", "NEW", "STORAGE", "CHAIN", "REPLICATE_WILD_IGNORE_TABLE", "DAY_MINUTE",
+	"LIMIT", "PRIMARY", "COMPRESSION", "INACTIVE", "REPEAT",
+	"TEMPTABLE", "EXISTS", "ATTRIBUTE", "ROLE", "OPEN", "INDEXES", "TIMESTAMPDIFF", "REAL", "COLUMNS", "PURGE", "ROTATE",
+	"HIGH_PRIORITY", "MULTIPOINT", "VARCHAR", "ROWS",
+	"NESTED", "GROUP_REPLICATION", "PLUGIN", "DELAYED", "EVENT", "SCHEMA_NAME", "INVISIBLE", "AGAINST", "ANY",
+	"SOURCE_LOG_FILE", "EXTENT_SIZE", "LAST", "ZONE", "PARTITION",
+	"INTERVAL", "REPLICATE_DO_TABLE", "NATURAL", "MESSAGE_TEXT", "MEMBER", "BLOCK", "EXPORT", "ROLLUP", "START", "LEAVE",
+	"UNIQUE", "SOURCE_RETRY_COUNT", "NTILE", "MODIFIES",
+	"BEGIN", "BIGINT", "OWNER", "STRING", "PRECISION", "LAST_VALUE", "JOIN", "RANDOM", "HELP", "REPLACE", "SLOW",
+	"DEFAULT_AUTH", "SQL_BUFFER_RESULT", "TO", "SCHEDULE",
+	"MEDIUM", "LOW_PRIORITY", "FIRST", "SOURCE_COMPRESSION_ALGORITHMS", "USER", "REPLICAS", "OUT", "SQL_TSI_SECOND",
+	"AUTO_INCREMENT", "MAX_STATEMENT_TIME", "GEOMCOLLECTION",
+	"TABLE_NAME", "COMPACT", "CHALLENGE_RESPONSE", "MIDDLEINT", "TABLESPACE", "REPLICATE_REWRITE_DB", "INT1", "UNICODE",
+	"CLOSE", "EXTENDED", "DATETIME", "RESET", "MATCH",
+	"DECIMAL", "EACH", "FETCH", "LESS", "SOURCE_TLS_VERSION", "OPTION", "FILTER", "NUMBER", "MODE", "MASTER_SSL", "LOCK",
+	"COMPLETION", "FALSE", "REGISTRATION", "NAME",
+	"CONDITION", "STRAIGHT_JOIN", "LAG", "WEIGHT_STRING", "BTREE", "TABLE_CHECKSUM", "VALIDATION", "IO", "MONTH",
+	"CIPHER", "RETURNS", "SPECIFIC", "HANDLER", "ENFORCED",
+	"MASTER_HOST", "TIES", "UNKNOWN", "CONSISTENT", "DROP", "MOD", "PROFILES", "NOWAIT", "SOURCE_SSL_CRL",
+	"AUTOEXTEND_SIZE", "PASSWORD", "MASTER_SERVER_ID", "SERVER",
+	"TRIGGERS", "FROM", "COMPONENT", "MINUTE_MICROSECOND", "OF", "CLASS_ORIGIN", "NONBLOCKING", "CONCURRENT",
+	"UTC_TIMESTAMP", "NVARCHAR", "MASTER_SSL_CRLPATH",
+	"SQL_BIG_RESULT", "LOGS", "ALGORITHM", "DESCRIBE", "MEDIUMINT", "BOTH", "MASTER_BIND", "NTH_VALUE", "RANGE",
+	"REMOTE", "THEN", "QUERY", "UNION", "PORT", "FAST",
+	"CASCADE", "LOAD", "INT4", "FAULTS", "CASCADED", "PRECEDES", "BOOLEAN", "INVOKER", "ASENSITIVE", "SET",
+	"SOURCE_PUBLIC_KEY_PATH", "DENSE_RANK", "ROUTINE",
+	"REORGANIZE", "MAX_SIZE", "CLIENT", "DIAGNOSTICS", "ROW_COUNT", "RIGHT", "LOCKED", "GENERATE", "NULLS", "CONNECTION",
+	"SWITCHES", "PREV", "TABLE", "SOURCE_SSL_CRLPATH",
+	"SQL_TSI_HOUR", "CATALOG_NAME", "RETURNING", "NDB", "SOURCE_ZSTD_COMPRESSION_LEVEL", "MASTER_SSL_CIPHER",
+	"AVG_ROW_LENGTH", "SUPER", "DAY_SECOND", "PAGE",
+	"OUTFILE", "LOCAL", "HISTORY", "DEFAULT", "WITH", "KEYRING", "AUTHENTICATION", "EXCLUDE", "ISSUER", "TYPES",
+	"REGEXP", "FOR", "UPDATE", "SAVEPOINT", "CONVERT",
+	"BULK", "ENGINE", "ROW", "DAY_HOUR", "DAY_MICROSECOND", "OLD_PASSWORD", "SOURCE_PASSWORD", "ALL", "MASTER_PASSWORD",
+	"FULLTEXT", "RLIKE", "PASSWORD_LOCK_TIME",
+	"WHEN", "DEALLOCATE", "SOURCE", "LANGUAGE", "RELAYLOG", "EMPTY", "FULL", "NUMERIC", "GROUPS", "DETERMINISTIC",
+	"PROFILE", "UNDEFINED", "FOREIGN", "RESUME",
+	"MASTER_DELAY", "HASH", "SEPARATOR", "BY", "RESTRICT", "MEDIUMBLOB", "GET_MASTER_PUBLIC_KEY", "VALUE", "LIST",
+	"VARBINARY", "UTC_TIME", "PROCESSLIST",
+	"SQLWARNING", "EXCHANGE", "VIRTUAL", "UNDO", "READ_ONLY", "SOURCE_SSL_KEY", "CONTEXT", "YEAR", "GLOBAL", "ALWAYS",
+	"SIGNAL", "ACTION", "GET", "RESTORE",
+	"SQLSTATE", "AVG", "MEDIUMTEXT", "SOURCE_TLS_CIPHERSUITES", "MASTER_SSL_KEY", "CHANGE", "GET_FORMAT",
+	"MULTILINESTRING", "RELAY_LOG_POS", "UTC_DATE", "EVERY",
+	"SQL_TSI_MONTH", "XA", "INTERSECT", "MASTER_TLS_CIPHERSUITES", "RELAY_LOG_FILE", "IO_AFTER_GTIDS", "INTEGER", "EXIT",
+	"HOUR_MINUTE", "ACCOUNT",
+	"THREAD_PRIORITY", "FINISH", "GEOMETRY", "READS", "TINYINT", "TYPE", "BEFORE", "FOLLOWING", "SUBCLASS_ORIGIN",
+	"SHARE", "SCHEMA", "SESSION",
+	"ADD", "INSENSITIVE", "GEOMETRYCOLLECTION", "WITHOUT", "WEEK", "GTID_ONLY", "SUSPEND", "LEVEL", "PARTITIONING",
+	"SECOND", "SELECT", "INSERT_METHOD",
+	"SUBPARTITION", "INTO", "FUNCTION", "ENGINES", "REMOVE", "STATUS", "WORK", "REPLICATE_IGNORE_DB", "OPTIONS",
+	"LOCALTIMESTAMP", "REPAIR", "EVENTS",
+	"MYSQL_ERRNO", "SQL_TSI_MINUTE", "PROCESS", "NAMES", "RELAY_THREAD", "SNAPSHOT", "ENCLOSED", "TEXT", "UPGRADE",
+	"ASC", "EXECUTE", "RESTART", "COLUMN_NAME",
+	"ESCAPED", "HOUR_MICROSECOND", "MASTER_RETRY_COUNT", "COLUMN", "DATE", "DATA", "SQL_NO_CACHE", "STORED", "LATERAL",
+	"READ", "RESPECT",
+	"SOURCE_USER", "RECURSIVE", "END", "CONTINUE", "BLOB", "DOUBLE", "USE_FRM", "TIMESTAMPADD", "RECOVER", "ENDS",
+	"LONGBLOB", "INSERT", "USING",
+	"ROW_FORMAT", "DISK", "AUTHORS", "LINEAR", "MAXVALUE", "XML", "CHARACTER", "INNER", "OLD", "REFERENCES",
+	"SOURCE_SSL_CIPHER", "MAX_CONNECTIONS_PER_HOUR",
+	"SQL_SMALL_RESULT", "LEAD", "DATAFILE", "PATH", "SENSITIVE", "REDUNDANT", "POLYGON", "SQL_CACHE", "FACTOR",
+	"REFERENCE", "ANALYZE", "BACKUP", "SOURCE_HEARTBEAT_PERIOD",
+	"IS", "ENCRYPTION", "XID", "NO_WAIT", "UNREGISTER", "CODE", "SQL_CALC_FOUND_ROWS", "USE", "SQL_AFTER_GTIDS",
+	"MASTER_SSL_CAPATH", "BINLOG", "ORDER",
+	"ZEROFILL", "STARTING", "CURRENT_TIME", "INT8", "TABLES", "NETWORK_NAMESPACE", "IN", "SOME", "CHAR", "CASE", "LONG",
+	"AT", "REVERSE", "FILE_BLOCK_SIZE",
+	"FAILED_LOGIN_ATTEMPTS", "KILL", "TRANSACTION", "SQL", "SQL_THREAD", "PHASE", "WRITE",
+	"MASTER_COMPRESSION_ALGORITHMS", "JSON_TABLE", "TRIGGER", "STACKED",
+	"CONTAINS", "LINES", "SSL", "WARNINGS", "LOGFILE", "SECOND_MICROSECOND", "GET_SOURCE_PUBLIC_KEY",
+	"MAX_QUERIES_PER_HOUR", "UNCOMMITTED", "PRIVILEGE_CHECKS_USER",
+	"MASTER_SSL_CRL", "ARRAY", "ENABLE", "CLONE", "NONE", "DATABASE", "EXPIRE", "WHERE", "PRIVILEGES", "ONE", "CACHE",
+	"STATS_PERSISTENT", "UNINSTALL", "MODIFY",
+	"COLUMN_FORMAT", "COMMENT", "RELEASE", "IF", "UNTIL", "VIEW", "PERSIST", "SPATIAL", "KEY_BLOCK_SIZE", "QUICK", "NOT",
+	"GRANTS", "IMPORT", "MICROSECOND", "RESIGNAL",
+	"INSTANCE", "ROW_NUMBER", "SOURCE_CONNECT_RETRY", "CURRENT_TIMESTAMP", "ERRORS", "USAGE", "PLUGINS",
+	"CONSTRAINT_CATALOG", "SQL_AFTER_MTS_GAPS", "LEADING",
+	"ACCESSIBLE", "SECONDARY_UNLOAD", "LEFT", "SOURCE_LOG_POS", "UNBOUNDED", "MULTIPOLYGON", "MASTER_LOG_FILE",
+	"GENERAL", "ITERATE", "IGNORE_SERVER_IDS",
+	"INT3", "POINT", "HOST", "TRAILING", "DIRECTORY", "NEVER", "SQL_TSI_DAY", "OPTIONAL", "KEY", "REPEATABLE", "NEXT",
+	"SQLEXCEPTION", "BUCKETS", "RESOURCE",
+	"YEAR_MONTH", "SECURITY", "SQL_TSI_YEAR", "MASTER_ZSTD_COMPRESSION_LEVEL", "VARCHARACTER",
+	"SECONDARY_ENGINE_ATTRIBUTE", "SOURCE_SSL_CA", "CONSTRAINT_SCHEMA",
+	"REQUIRE_ROW_FORMAT", "COMMIT", "CHECK", "VCPU", "UNDO_BUFFER_SIZE", "COLLATION", "COALESCE", "DISCARD", "MERGE",
+	"MASTER_HEARTBEAT_PERIOD", "PARSER", "RANK",
+	"MINUTE", "MASTER_SSL_VERIFY_SERVER_CERT", "MUTEX", "UNLOCK", "FIXED", "DAY", "SOCKET", "SONAME", "RTREE", "DYNAMIC",
+	"SQL_TSI_QUARTER", "IO_BEFORE_GTIDS",
+	"SOURCE_BIND", "INT", "PARSE_GCOL_EXPR", "HISTOGRAM", "CONTRIBUTORS", "SOURCE_PORT", "OVER",
+	"SOURCE_SSL_VERIFY_SERVER_CERT", "DO", "ELSEIF", "SERIALIZABLE",
+	"WHILE", "ONE_SHOT", "PACK_KEYS", "ACTIVE", "INITIATE", "SYSTEM", "STARTS", "FORMAT", "SECONDARY", "CREATE", "HOSTS",
+	"SOURCE_DELAY", "NULL", "VARIABLES",
+	"IO_THREAD", "MASTER_USER", "REVOKE", "DECLARE", "GROUPING", "LOOP", "SUBJECT", "BYTE", "PROCEDURE", "MIGRATE",
+	"ORGANIZATION", "SOURCE_SSL", "FIELDS", "ONLY", "INT2", "ANALYSE"}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/rule.go b/dbm-services/mysql/db-simulation/app/syntax/rule.go
new file mode 100644
index 0000000000..3fb987a1ec
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/rule.go
@@ -0,0 +1,230 @@
+package syntax
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app/config"
+	"dbm-services/mysql/db-simulation/model"
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+
+	"github.com/antonmedv/expr"
+	"github.com/antonmedv/expr/vm"
+	"gopkg.in/yaml.v2"
+	"gorm.io/gorm"
+)
+
+// R TODO
+var R *Rules
+
+// Checker TODO
+type Checker interface {
+	Checker(mysqlVersion string) *CheckerResult
+}
+
+// CheckerResult TODO
+type CheckerResult struct {
+	BanWarns  []string
+	RiskWarns []string
+}
+
+// IsPass TODO
+func (c CheckerResult) IsPass() bool {
+	return len(c.BanWarns) == 0 && len(c.RiskWarns) == 0
+}
+
+// Parse TODO
+func (c *CheckerResult) Parse(rule *RuleItem, val interface{}, s string) {
+	matched, err := rule.CheckItem(val)
+	if matched {
+		if rule.Ban {
+			c.BanWarns = append(c.BanWarns, fmt.Sprintf("%s\n%s", err.Error(), s))
+		} else {
+			c.RiskWarns = append(c.RiskWarns, fmt.Sprintf("%s\n%s", err.Error(), s))
+		}
+	}
+}
+
+const (
+	// DEFAUTL_RULE_FILE TODO
+	DEFAUTL_RULE_FILE = "rule.yaml"
+	// DEFAUTL_SPIDER_RULE_FILE TODO
+	DEFAUTL_SPIDER_RULE_FILE = "spider_rule.yaml"
+)
+
+func init() {
+	R = &Rules{}
+	var fileContent []byte
+	var err error
+	if cmutil.FileExists(config.GAppConfig.RulePath) {
+		fileContent, err = os.ReadFile(config.GAppConfig.RulePath)
+	} else {
+		fileContent, err = os.ReadFile(DEFAUTL_RULE_FILE)
+	}
+	if err != nil {
+		logger.Error("failed to read the rule file:%s", err.Error())
+		panic(err)
+	}
+	if err := yaml.Unmarshal(fileContent, R); err != nil {
+		panic(err)
+	}
+	if err = traverseLoadRule(*R); err != nil {
+		logger.Error("load rule from database failed %s", err.Error())
+	}
+	var initCompiles = []*RuleItem{}
+	initCompiles = append(initCompiles, traverseRule(R.CommandRule)...)
+	initCompiles = append(initCompiles, traverseRule(R.CreateTableRule)...)
+	initCompiles = append(initCompiles, traverseRule(R.AlterTableRule)...)
+	initCompiles = append(initCompiles, traverseRule(R.DmlRule)...)
+	for _, c := range initCompiles {
+		if err := c.Compile(); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// RuleItem TODO
+type RuleItem struct {
+	Expr string      `yaml:"expr"`
+	Desc string      `yaml:"desc"`
+	Item interface{} `yaml:"item"`
+	// 是都是禁用的行为
+	Ban         bool `yaml:"ban"`
+	Val         interface{}
+	ruleProgram *vm.Program
+}
+
+// Rules TODO
+type Rules struct {
+	CommandRule     CommandRule     `yaml:"CommandRule"`
+	CreateTableRule CreateTableRule `yaml:"CreateTableRule"`
+	AlterTableRule  AlterTableRule  `yaml:"AlterTableRule"`
+	DmlRule         DmlRule         `yaml:"DmlRule"`
+}
+
+// CommandRule TODO
+type CommandRule struct {
+	HighRiskCommandRule *RuleItem `yaml:"HighRiskCommandRule"`
+	BanCommandRule      *RuleItem `yaml:"BanCommandRule"`
+}
+
+// CreateTableRule TODO
+type CreateTableRule struct {
+	SuggestBlobColumCount *RuleItem `yaml:"SuggestBlobColumCount"`
+	SuggestEngine         *RuleItem `yaml:"SuggestEngine"`
+	NeedPrimaryKey        *RuleItem `yaml:"NeedPrimaryKey"`
+	DefinerRule           *RuleItem `yaml:"DefinerRule"`
+	NormalizedName        *RuleItem `yaml:"NormalizedName"`
+}
+
+// AlterTableRule TODO
+type AlterTableRule struct {
+	HighRiskType        *RuleItem `yaml:"HighRiskType"`
+	HighRiskPkAlterType *RuleItem `yaml:"HighRiskPkAlterType"`
+	AlterUseAfter       *RuleItem `yaml:"AlterUseAfter"`
+	AddColumnMixed      *RuleItem `yaml:"AddColumnMixed"`
+}
+
+// DmlRule TODO
+type DmlRule struct {
+	DmlNotHasWhere *RuleItem `yaml:"DmlNotHasWhere"`
+}
+
+func traverseLoadRule(rulepointer interface{}) error {
+	tv := reflect.TypeOf(rulepointer)
+	v := reflect.ValueOf(rulepointer)
+	var groupname, rulename string
+	for i := 0; i < tv.NumField(); i++ {
+		groupname = tv.Field(i).Name
+		if v.Field(i).Type().Kind() == reflect.Struct {
+			structField := v.Field(i).Type()
+			for j := 0; j < structField.NumField(); j++ {
+				rulename = structField.Field(j).Name
+				drule, err := model.GetRuleByName(groupname, rulename)
+				if err != nil {
+					if err == gorm.ErrRecordNotFound {
+						logger.Warn("not found group:%s,rule:%s rules in databases", groupname, rulename)
+						continue
+					}
+					logger.Error("from db get  group:%s,rule:%s failed: %s", groupname, rulename, err.Error())
+					return err
+				}
+				rule, err := parseRule(drule)
+				if err != nil {
+					logger.Error("parse rule failed %s", err.Error())
+					return err
+				}
+				logger.Info("%v", &rule)
+				v.Field(i).Field(j).Elem().Set(reflect.ValueOf(rule))
+			}
+		}
+	}
+	logger.Info("load AlterTableRule  %v", R.CommandRule.BanCommandRule.Item)
+	return nil
+}
+
+func parseRule(drule model.TbSyntaxRule) (rule RuleItem, err error) {
+	iv, err := model.GetItemVal(drule)
+	if err != nil {
+		return RuleItem{}, err
+	}
+	rule = RuleItem{
+		Desc: drule.Desc,
+		Ban:  drule.WarnLevel == 1,
+		Expr: drule.Expr,
+		Item: iv,
+	}
+	return
+}
+
+// traverseRule 遍历规则
+func traverseRule(v interface{}) (rules []*RuleItem) {
+	value := reflect.ValueOf(v) // coordinate 是一个 Coordinate 实例
+	for num := 0; num < value.NumField(); num++ {
+		rule := value.Field(num).Interface().(*RuleItem)
+		rules = append(rules, rule)
+	}
+	return rules
+}
+
+// Env TODO
+type Env struct {
+	Val  interface{}
+	Item interface{}
+}
+
+// Compile TODO
+func (i *RuleItem) Compile() (err error) {
+	p, err := expr.Compile(i.Expr, expr.Env(Env{}), expr.AsBool())
+	if err != nil {
+		log.Printf("expr.Compile error %s\n", err.Error())
+		return err
+	}
+	i.ruleProgram = p
+	return
+}
+
+// CheckItem 运行规则检查
+//
+//	@receiver i
+func (i *RuleItem) CheckItem(val interface{}) (matched bool, err error) {
+	// i.ruleProgram是具体执行的规则,此处为接下来如何对比  对比item与val
+	// Item: i.Item是rule.yaml中的规定项
+	// Val:  val是Tparsemysql分析后的结果,存储在json文件中,读取后获得相应值
+	p, err := expr.Run(i.ruleProgram, Env{
+		Item: i.Item,
+		Val:  val,
+	})
+	if err != nil {
+		return false, err
+	}
+	if v, assetok := p.(bool); assetok {
+		matched = v
+	}
+	if !matched {
+		return false, fmt.Errorf("")
+	}
+	return matched, fmt.Errorf("%s:%v", i.Desc, val)
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/rule_test.go b/dbm-services/mysql/db-simulation/app/syntax/rule_test.go
new file mode 100644
index 0000000000..d13030b5c8
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/rule_test.go
@@ -0,0 +1,28 @@
+package syntax_test
+
+import (
+	"fmt"
+	"testing"
+
+	"github.com/antonmedv/expr"
+)
+
+func TestRule(t *testing.T) {
+	t.Log("start testing...")
+	type CCC struct {
+		Val  interface{}
+		Item interface{}
+	}
+	e := CCC{
+		Item: true,
+	}
+	pgm, err := expr.Compile(" Item ", expr.Env(CCC{}), expr.AsBool())
+	if err != nil {
+		t.Fatal(err)
+	}
+	output, err := expr.Run(pgm, e)
+	if err != nil {
+		panic(err)
+	}
+	fmt.Println(output)
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/spider_rule.go b/dbm-services/mysql/db-simulation/app/syntax/spider_rule.go
new file mode 100644
index 0000000000..38c0fa54d1
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/spider_rule.go
@@ -0,0 +1,65 @@
+package syntax
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app/config"
+	"os"
+
+	"gopkg.in/yaml.v2"
+)
+
+// SR TODO
+var SR *SpiderRules
+
+// SpiderChecker TODO
+type SpiderChecker interface {
+	SpiderChecker(mysqlVersion string) *CheckerResult
+}
+
+func init() {
+	SR = &SpiderRules{}
+	var fileContent []byte
+	var err error
+	if cmutil.FileExists(config.GAppConfig.SpiderRulePath) {
+		fileContent, err = os.ReadFile(config.GAppConfig.SpiderRulePath)
+	} else {
+		fileContent, err = os.ReadFile(DEFAUTL_SPIDER_RULE_FILE)
+	}
+	if err != nil {
+		logger.Error("read rule config file failed %s", err.Error())
+		panic(err)
+	}
+	if err = yaml.Unmarshal(fileContent, SR); err != nil {
+		logger.Error("yaml Unmarshal failed %s", err.Error())
+		panic(err)
+	}
+	//	panic("panic there..")
+	if err = traverseLoadRule(*SR); err != nil {
+		logger.Error("load rule from database failed %s", err.Error())
+	}
+	var initCompiles = []*RuleItem{}
+	initCompiles = append(initCompiles, traverseRule(SR.CommandRule)...)
+	initCompiles = append(initCompiles, traverseRule(SR.SpiderCreateTableRule)...)
+	for _, c := range initCompiles {
+		if err = c.Compile(); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// SpiderRules TODO
+type SpiderRules struct {
+	CommandRule           CommandRule           `yaml:"CommandRule"`
+	SpiderCreateTableRule SpiderCreateTableRule `yaml:"SpiderCreateTableRule"`
+}
+
+// SpiderCreateTableRule TODO
+type SpiderCreateTableRule struct {
+	ColChasetNotEqTbChaset *RuleItem `yaml:"ColChasetNotEqTbChaset"`
+	CreateWithSelect       *RuleItem `yaml:"CreateWithSelect"`
+	CreateTbLike           *RuleItem `yaml:"CreateTbLike"`
+	ShardKeyNotPk          *RuleItem `yaml:"ShardKeyNotPk"`
+	ShardKeyNotIndex       *RuleItem `yaml:"ShardKeyNotIndex"`
+	IllegalComment         *RuleItem `yaml:"IllegalComment"`
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/syntax.go b/dbm-services/mysql/db-simulation/app/syntax/syntax.go
new file mode 100644
index 0000000000..3e250639f5
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/syntax.go
@@ -0,0 +1,490 @@
+// Package syntax TODO
+package syntax
+
+import (
+	"bufio"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app"
+	"dbm-services/mysql/db-simulation/app/config"
+	"dbm-services/mysql/db-simulation/pkg/bkrepo"
+	"encoding/json"
+	"fmt"
+	"io"
+	"math/rand"
+	"net/http"
+	"os"
+	"os/exec"
+	"path"
+	"path/filepath"
+	"runtime/debug"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// CheckSyntax TODO
+type CheckSyntax interface {
+	Do() (result map[string]*CheckInfo, err error)
+}
+
+type inputFileName = string
+type outputFileName = string
+
+// TmysqlParseSQL TODO
+type TmysqlParseSQL struct {
+	TmysqlParse
+	Sqls []string `json:"sqls"` // SQL文件名称
+}
+
+// TmysqlParseFile TODO
+type TmysqlParseFile struct {
+	TmysqlParse
+	Param CheckSqlFileParam
+}
+
+// CheckSqlFileParam TODO
+type CheckSqlFileParam struct {
+	BkRepoBasePath string   `json:"bkrepo_base_path"` // 制品库相对路径
+	FileNames      []string `json:"file_names"`       // SQL文件名称
+	MysqlVersion   string   `json:"mysql_version"`    // mysql版本
+}
+
+// TmysqlParse TODO
+type TmysqlParse struct {
+	TmysqlParseBinPath string
+	BaseWorkdir        string
+	result             map[string]*CheckInfo
+	runtimeCtx
+	bkRepoClient *bkrepo.BkRepoClient
+}
+
+type runtimeCtx struct {
+	tmpWorkdir string
+	fileMap    map[inputFileName]outputFileName
+}
+
+// CheckInfo TODO
+type CheckInfo struct {
+	SyntaxFailInfos []FailedInfo `json:"syntax_fails"`
+	RiskWarnings    []RiskInfo   `json:"highrisk_warnings"`
+	BanWarnings     []RiskInfo   `json:"bancommand_warnings"`
+}
+
+// FailedInfo TODO
+type FailedInfo struct {
+	Line      int64  `json:"line"`
+	Sqltext   string `json:"sqltext"`
+	ErrorCode int64  `json:"error_code"`
+	ErrorMsg  string `json:"error_msg"`
+}
+
+// RiskInfo TODO
+type RiskInfo struct {
+	Line        int64  `json:"line"`
+	CommandType string `json:"command_type"`
+	Sqltext     string `json:"sqltext"`
+	WarnInfo    string `json:"warn_info"`
+}
+
+var lock sync.Mutex
+
+// DoSQL TODO
+func (tf *TmysqlParseFile) DoSQL(dbtype string) (result map[string]*CheckInfo, err error) {
+	tf.fileMap = make(map[inputFileName]outputFileName)
+	tf.result = make(map[string]*CheckInfo)
+	tf.tmpWorkdir = tf.BaseWorkdir
+	if err = tf.Execute(); err != nil {
+		logger.Error("failed to execute tmysqlparse: %s", err.Error())
+		return nil, err
+	}
+	logger.Info("err is %v", err)
+	// 对tmysqlparse的处理结果进行分析,为json文件,后面用到了rule
+	mysqlVersion := tf.Param.MysqlVersion
+	if err = tf.AnalyzeParseResult(mysqlVersion, dbtype); err != nil {
+		logger.Error("failed to analyze the parsing result:%s", err.Error())
+		return tf.result, err
+	}
+
+	return tf.result, nil
+}
+
+// Do  运行语法检查 For SQL 文件
+//
+//	@receiver tf
+//	@return result
+//	@return err
+func (tf *TmysqlParseFile) Do(dbtype string) (result map[string]*CheckInfo, err error) {
+	logger.Info("doing....")
+	if err = tf.Init(); err != nil {
+		logger.Error("Do init failed %s", err.Error())
+		return nil, err
+	}
+	// 最后删除临时目录,不会返回错误
+	// 暂时屏蔽 观察过程文件
+	defer tf.delTempDir()
+
+	if err = tf.Downloadfile(); err != nil {
+		logger.Error("failed to download sql file from the product library %s", err.Error())
+		return nil, err
+	}
+
+	if err = tf.Execute(); err != nil {
+		logger.Error("failed to execute tmysqlparse: %s", err.Error())
+		return nil, err
+	}
+	logger.Info("err is %v", err)
+	// 对tmysqlparse的处理结果进行分析,为json文件,后面用到了rule
+	mysqlVersion := tf.Param.MysqlVersion
+	if err = tf.AnalyzeParseResult(mysqlVersion, dbtype); err != nil {
+		logger.Error("failed to analyze the parsing result:%s", err.Error())
+		return tf.result, err
+	}
+
+	return tf.result, nil
+}
+
+// Init TODO
+func (t *TmysqlParse) Init() (err error) {
+	tmpDir := fmt.Sprintf("tmysqlparse_%s_%s", time.Now().Format("20060102150405"), strconv.Itoa(rand.Intn(10000)))
+	t.tmpWorkdir = path.Join(t.BaseWorkdir, tmpDir)
+	if err = os.MkdirAll(t.tmpWorkdir, os.ModePerm); err != nil {
+		logger.Error("mkdir %s failed, err:%+v", t.tmpWorkdir, err)
+		return fmt.Errorf("failed to initialize tmysqlparse temporary directory(%s).detail:%s", t.tmpWorkdir, err.Error())
+	}
+	t.bkRepoClient = &bkrepo.BkRepoClient{
+		Client: &http.Client{
+			Transport: &http.Transport{},
+		},
+		BkRepoProject:   config.GAppConfig.BkRepo.Project,
+		BkRepoPubBucket: config.GAppConfig.BkRepo.PublicBucket,
+		BkRepoUser:      config.GAppConfig.BkRepo.User,
+		BkRepoPwd:       config.GAppConfig.BkRepo.Pwd,
+		BkRepoEndpoint:  config.GAppConfig.BkRepo.EndPointUrl,
+	}
+	t.fileMap = make(map[inputFileName]outputFileName)
+	t.result = make(map[string]*CheckInfo)
+	return nil
+}
+
+func (t *TmysqlParse) delTempDir() {
+	if err := os.RemoveAll(t.tmpWorkdir); err != nil {
+		logger.Warn("remove tempDir:" + t.tmpWorkdir + ".error info:" + err.Error())
+	}
+}
+
+func (t *TmysqlParse) getCommand(filename string) (cmd string) {
+	var in, out string
+	in = path.Join(t.tmpWorkdir, filename)
+	if outputFileName, ok := t.fileMap[filename]; ok {
+		out = path.Join(t.tmpWorkdir, outputFileName)
+	}
+	bin := t.TmysqlParseBinPath
+	return fmt.Sprintf(`%s --sql-file=%s --output-path=%s --print-query-mode=2 --output-format='JSON_LINE_PER_OBJECT'`,
+		bin, in, out)
+}
+
+// Downloadfile TODO
+func (tf *TmysqlParseFile) Downloadfile() (err error) {
+	for _, fileName := range tf.Param.FileNames {
+		err = tf.bkRepoClient.Download(tf.Param.BkRepoBasePath, fileName, tf.tmpWorkdir)
+		if err != nil {
+			logger.Error("download %s from bkrepo failed :%s", fileName, err.Error())
+			return err
+		}
+	}
+	return
+}
+
+// Execute 运行tmysqlpase
+//
+//	@receiver tf
+//	@return err
+func (tf *TmysqlParseFile) Execute() (err error) {
+	var wg sync.WaitGroup
+	var mu sync.Mutex
+	var errs []string
+	c := make(chan struct{}, 10)
+	for _, fileName := range tf.Param.FileNames {
+		wg.Add(1)
+		tf.fileMap[fileName] = fileName + ".json"
+		c <- struct{}{}
+		go func() {
+			command := exec.Command("/bin/bash", "-c", tf.getCommand(fileName))
+			logger.Info("command is %s", command)
+			output, err := command.CombinedOutput()
+			if err != nil {
+				mu.Lock()
+				errs = append(errs, fmt.Sprintf("tmysqlparse.sh command run failed. error info:"+err.Error()+","+string(output)))
+				mu.Unlock()
+			}
+			<-c
+			wg.Done()
+		}()
+		wg.Wait()
+	}
+	if len(errs) > 0 {
+		return fmt.Errorf("errrors: %s", strings.Join(errs, "\n"))
+	}
+	return err
+}
+
+func (tf *TmysqlParse) getAbsoutputfilePath(inputFileName string) string {
+	fileAbPath, _ := filepath.Abs(path.Join(tf.tmpWorkdir, tf.fileMap[inputFileName]))
+	return fileAbPath
+}
+
+// AnalyzeParseResult TODO
+func (t *TmysqlParse) AnalyzeParseResult(mysqlVersion string, dbtype string) (err error) {
+	wg := &sync.WaitGroup{}
+	var errs []string
+	c := make(chan struct{}, 10)
+	// 开启多个线程,同时对多个sql文件进行分析
+	for inputFileName := range t.fileMap {
+		wg.Add(1)
+		c <- struct{}{}
+		go func(fileName string) {
+			err = t.AnalyzeOne(fileName, mysqlVersion, dbtype)
+			if err != nil {
+				errs = append(errs, err.Error())
+			}
+			<-c
+			wg.Done()
+		}(inputFileName)
+	}
+	wg.Wait()
+	if len(errs) > 0 {
+		return fmt.Errorf("errors: %s", strings.Join(errs, "\n"))
+	}
+	return err
+}
+
+// ParseResult TODO
+func (c *CheckInfo) ParseResult(rule *RuleItem, res ParseLineQueryBase) {
+	matched, err := rule.CheckItem(res.Command)
+	if matched {
+		if rule.Ban {
+			c.BanWarnings = append(c.BanWarnings, RiskInfo{
+				Line:        int64(res.QueryId),
+				Sqltext:     res.QueryString,
+				CommandType: res.Command,
+				WarnInfo:    err.Error(),
+			})
+		} else {
+			c.RiskWarnings = append(c.RiskWarnings, RiskInfo{
+				Line:        int64(res.QueryId),
+				Sqltext:     res.QueryString,
+				CommandType: res.Command,
+				WarnInfo:    err.Error(),
+			})
+		}
+	}
+}
+
+// AnalyzeOne TODO
+func (tf *TmysqlParse) AnalyzeOne(inputfileName string, mysqlVersion string, dbtype string) (err error) {
+	var idx int
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("panic error:%v,stack:%s", r, string(debug.Stack()))
+			logger.Error("Recovered. Error: %v", r)
+			err = fmt.Errorf("line:%d,err:%v", idx, r)
+		}
+	}()
+	lock.Lock()
+	tf.result[inputfileName] = &CheckInfo{}
+	f, err := os.Open(tf.getAbsoutputfilePath(inputfileName))
+	if err != nil {
+		logger.Error("open file failed %s", err.Error())
+		return
+	}
+	defer f.Close()
+	reader := bufio.NewReader(f)
+	var syntaxFailInfos []FailedInfo
+	var buf []byte
+	for {
+		idx++
+		line, isPrefix, err_r := reader.ReadLine()
+		if err_r != nil {
+			if err_r == io.EOF {
+				break
+			}
+			logger.Error("read Line Error %s", err_r.Error())
+			return err_r
+		}
+		buf = append(buf, line...)
+		if isPrefix {
+			continue
+		}
+		// 清空
+		bs := buf
+		buf = []byte{}
+
+		var res ParseLineQueryBase
+		logger.Debug("buf: %s", string(bs))
+		if len(bs) == 0 {
+			logger.Info("blank line skip")
+			continue
+		}
+		if err = json.Unmarshal(bs, &res); err != nil {
+			logger.Error("json unmasrshal line:%s failed %s", string(bs), err.Error())
+			return
+		}
+		// 判断是否有语法错误
+		if res.ErrorCode != 0 {
+			syntaxFailInfos = append(syntaxFailInfos, FailedInfo{
+				Line:      int64(res.QueryId),
+				Sqltext:   res.QueryString,
+				ErrorCode: int64(res.ErrorCode),
+				ErrorMsg:  res.ErrorMsg,
+			})
+			continue
+		}
+		switch dbtype {
+		case app.MySQL:
+			// tmysqlparse检查结果全部正确,开始判断语句是否符合定义的规则(即虽然语法正确,但语句可能是高危语句或禁用的命令)
+			tf.result[inputfileName].ParseResult(R.CommandRule.HighRiskCommandRule, res)
+			tf.result[inputfileName].ParseResult(R.CommandRule.BanCommandRule, res)
+			//
+			tf.result[inputfileName].runcheck(res, bs, mysqlVersion)
+		case app.Spider:
+			// tmysqlparse检查结果全部正确,开始判断语句是否符合定义的规则(即虽然语法正确,但语句可能是高危语句或禁用的命令)
+			tf.result[inputfileName].ParseResult(SR.CommandRule.HighRiskCommandRule, res)
+			tf.result[inputfileName].ParseResult(SR.CommandRule.BanCommandRule, res)
+			tf.result[inputfileName].runSpidercheck(res, bs, mysqlVersion)
+		}
+	}
+	tf.result[inputfileName].SyntaxFailInfos = syntaxFailInfos
+	lock.Unlock()
+	return nil
+}
+
+func (ch *CheckInfo) runSpidercheck(res ParseLineQueryBase, bs []byte, mysqlVersion string) (err error) {
+	var c SpiderChecker
+	// 其他规则分析
+	switch res.Command {
+	case "create_table":
+		var o CreateTableResult
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		o.TableOptionMap = ConverTableOptionToMap(o.TableOptions)
+		c = o
+	case "create_db":
+		var o CreateDBResult
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		c = o
+	}
+	if c == nil {
+		return
+	}
+	// 不同结构体绑定不同的Checker
+	result := c.SpiderChecker(mysqlVersion)
+	if result.IsPass() {
+		return
+	}
+	if len(result.BanWarns) > 0 {
+		ch.BanWarnings = append(ch.BanWarnings, RiskInfo{
+			Line:        int64(res.QueryId),
+			Sqltext:     res.QueryString,
+			CommandType: res.Command,
+			WarnInfo:    prettyErrorsOutput(result.BanWarns),
+		})
+	}
+	if len(result.RiskWarns) > 0 {
+		ch.RiskWarnings = append(ch.RiskWarnings, RiskInfo{
+			Line:        int64(res.QueryId),
+			Sqltext:     res.QueryString,
+			CommandType: res.Command,
+			WarnInfo:    prettyErrorsOutput(result.RiskWarns),
+		})
+	}
+	return err
+}
+
+func (ch *CheckInfo) runcheck(res ParseLineQueryBase, bs []byte, mysqlVersion string) (err error) {
+	var c Checker
+	// 其他规则分析
+	switch res.Command {
+	case "create_table":
+		var o CreateTableResult
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		c = o
+	case "alter_table":
+		var o AlterTableResult
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		c = o
+	case "delete":
+		var o DeleteResult
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		c = o
+	case "update":
+		var o UpdateResult
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		c = o
+	case "create_function", "create_trigger", "create_event", "create_procedure", "create_view":
+		var o DefinerBase
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		logger.Info("detail %v", o.Definer)
+		c = o
+	case "create_db":
+		var o CreateDBResult
+		if err = json.Unmarshal(bs, &o); err != nil {
+			logger.Error("json unmasrshal line failed %s", err.Error())
+			return
+		}
+		c = o
+	}
+
+	if c == nil {
+		return
+	}
+	// 不同结构体绑定不同的Checker
+	result := c.Checker(mysqlVersion)
+	if result.IsPass() {
+		return
+	}
+	if len(result.BanWarns) > 0 {
+		ch.BanWarnings = append(ch.BanWarnings, RiskInfo{
+			Line:        int64(res.QueryId),
+			Sqltext:     res.QueryString,
+			CommandType: res.Command,
+			WarnInfo:    prettyErrorsOutput(result.BanWarns),
+		})
+	}
+	if len(result.RiskWarns) > 0 {
+		ch.RiskWarnings = append(ch.RiskWarnings, RiskInfo{
+			Line:        int64(res.QueryId),
+			Sqltext:     res.QueryString,
+			CommandType: res.Command,
+			WarnInfo:    prettyErrorsOutput(result.RiskWarns),
+		})
+	}
+	return err
+}
+
+func prettyErrorsOutput(warnInfos []string) (msg string) {
+	for idx, v := range warnInfos {
+		msg += fmt.Sprintf("Error %d: %s\n", idx+1, v)
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/syntax_test.go b/dbm-services/mysql/db-simulation/app/syntax/syntax_test.go
new file mode 100644
index 0000000000..64d7a5fecc
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/syntax_test.go
@@ -0,0 +1,43 @@
+package syntax_test
+
+import (
+	"bufio"
+	"encoding/json"
+	"io"
+	"os"
+	"testing"
+)
+
+type ParseQueryBase struct {
+	QueryId         int    `json:"query_id"`
+	Command         string `json:"command"`
+	QueryString     string `json:"query_string,omitempty"`
+	ErrorCode       int    `json:"error_code,omitempty"`
+	ErrorMsg        string `json:"error_msg,omitempty"`
+	MinMySQLVersion int    `json:"min_mysql_version"`
+	MaxMySQLVersion int    `json:"max_my_sql_version"`
+}
+
+func Test_tmysqlparse(t *testing.T) {
+	t.Log("starting ...")
+	f, err := os.Open("/data/tmysqlparse_out.json")
+	if err != nil {
+		t.Logf("open file failed %s", err.Error())
+		return
+	}
+	defer f.Close()
+	reader := bufio.NewReader(f)
+	for {
+		line, _, err := reader.ReadLine()
+		if err == io.EOF {
+			break
+		}
+		var res ParseQueryBase
+		if err = json.Unmarshal(line, &res); err != nil {
+			t.Fatal(err)
+			return
+		}
+		t.Log(res)
+	}
+	t.Log("ending ...")
+}
diff --git a/dbm-services/mysql/db-simulation/app/syntax/tmysqlpase.go b/dbm-services/mysql/db-simulation/app/syntax/tmysqlpase.go
new file mode 100644
index 0000000000..4cca444460
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/app/syntax/tmysqlpase.go
@@ -0,0 +1,227 @@
+package syntax
+
+import util "dbm-services/common/go-pubpkg/cmutil"
+
+// ColDef TODO
+type ColDef struct {
+	Type        string `json:"type"`
+	ColName     string `json:"col_name"`
+	DataType    string `json:"data_type"`
+	FieldLength int    `json:"field_length"`
+	Nullable    bool   `json:"nullable"`
+	DefaultVal  struct {
+		Type  string `json:"type"`
+		Value string `json:"value"`
+	} `json:"default_val"`
+	AutoIncrement       bool        `json:"auto_increment"`
+	UniqueKey           bool        `json:"unique_key"`
+	PrimaryKey          bool        `json:"primary_key"`
+	Comment             string      `json:"comment"`
+	CharacterSet        string      `json:"character_set"`
+	Collate             string      `json:"collate"`
+	ReferenceDefinition interface{} `json:"reference_definition"`
+}
+
+// KeyDef TODO
+type KeyDef struct {
+	Type     string `json:"type"`
+	KeyName  string `json:"key_name"`
+	KeyParts []struct {
+		ColName string `json:"col_name"`
+		KeyLen  int    `json:"key_len"`
+	} `json:"key_parts"`
+	KeyAlg              string      `json:"key_alg"`
+	UniqueKey           bool        `json:"unique_key"`
+	PrimaryKey          bool        `json:"primary_key"`
+	Comment             string      `json:"comment"`
+	ForeignKey          bool        `json:"foreign_key"`
+	ReferenceDefinition interface{} `json:"reference_definition"`
+}
+
+// TableOption TODO
+type TableOption struct {
+	Key   string      `json:"key"`
+	Value interface{} `json:"value"`
+}
+
+// ConverTableOptionToMap TODO
+func ConverTableOptionToMap(options []TableOption) map[string]interface{} {
+	r := make(map[string]interface{})
+	for _, v := range options {
+		if !util.IsEmpty(v.Key) {
+			r[v.Key] = v.Value
+		}
+	}
+	return r
+}
+
+// CreateTableResult TODO
+type CreateTableResult struct {
+	QueryID             int    `json:"query_id"`
+	Command             string `json:"command"`
+	DbName              string `json:"db_name"`
+	TableName           string `json:"table_name"`
+	IsTemporary         bool   `json:"is_temporary"`
+	IfNotExists         bool   `json:"if_not_exists"`
+	IsCreateTableLike   bool   `json:"is_create_table_like"`
+	IsCreateTableSelect bool   `json:"is_create_table_select"`
+	CreateDefinitions   struct {
+		ColDefs []ColDef `json:"col_defs"`
+		KeyDefs []KeyDef `json:"key_defs"`
+	} `json:"create_definitions"`
+	TableOptions     []TableOption          `json:"table_options,omitempty"`
+	TableOptionMap   map[string]interface{} `json:"-"`
+	PartitionOptions interface{}            `json:"partition_options"`
+}
+
+// CreateDBResult TODO
+type CreateDBResult struct {
+	QueryID      int    `json:"query_id"`
+	Command      string `json:"command"`
+	DbName       string `json:"db_name"`
+	CharacterSet string `json:"character_set"`
+	Collate      string `json:"collate"`
+}
+
+// AlterTableResult TODO
+type AlterTableResult struct {
+	QueryID          int            `json:"query_id"`
+	Command          string         `json:"command"`
+	DbName           string         `json:"db_name"`
+	TableName        string         `json:"table_name"`
+	AlterCommands    []AlterCommand `json:"alter_commands"`
+	PartitionOptions interface{}    `json:"partition_options"`
+}
+
+// AlterCommand TODO
+type AlterCommand struct {
+	Type         string        `json:"type"`
+	ColDef       ColDef        `json:"col_def,omitempty"`
+	After        string        `json:"after,omitempty"`
+	KeyDef       KeyDef        `json:"key_def,omitempty"`
+	ColName      string        `json:"col_name,omitempty"`
+	KeyName      string        `json:"key_name,omitempty"`
+	DropPrimary  bool          `json:"drop_primary,omitempty"`
+	DropForeign  bool          `json:"drop_foreign,omitempty"`
+	DbName       string        `json:"db_name,omitempty"`
+	TableName    string        `json:"table_name,omitempty"`
+	OldKeyName   string        `json:"old_key_name,omitempty"`
+	NewKeyName   string        `json:"new_key_name,omitempty"`
+	TableOptions []TableOption `json:"table_options,omitempty"`
+	Algorithm    string        `json:"algorithm,omitempty"`
+	Lock         string        `json:"lock,omitempty"`
+}
+
+// ChangeDbResult TODO
+type ChangeDbResult struct {
+	QueryID int    `json:"query_id"`
+	Command string `json:"command"`
+	DbName  string `json:"db_name"`
+}
+
+// ErrorResult TODO
+type ErrorResult struct {
+	QueryID   int    `json:"query_id"`
+	Command   string `json:"command"`
+	ErrorCode int    `json:"error_code,omitempty"`
+	ErrorMsg  string `json:"error_msg,omitempty"`
+}
+
+// ParseBase TODO
+type ParseBase struct {
+	QueryId     int    `json:"query_id"`
+	Command     string `json:"command"`
+	QueryString string `json:"query_string,omitempty"`
+}
+
+// ParseLineQueryBase TODO
+type ParseLineQueryBase struct {
+	QueryId         int    `json:"query_id"`
+	Command         string `json:"command"`
+	QueryString     string `json:"query_string,omitempty"`
+	ErrorCode       int    `json:"error_code,omitempty"`
+	ErrorMsg        string `json:"error_msg,omitempty"`
+	MinMySQLVersion int    `json:"min_mysql_version"`
+	MaxMySQLVersion int    `json:"max_my_sql_version"`
+}
+
+// UserHost TODO
+type UserHost struct {
+	User string `json:"user"`
+	Host string `json:"host"`
+}
+
+// CreateView TODO
+type CreateView struct {
+	ParseBase
+	DbName      string   `json:"db_name,omitempty"`
+	ViewName    string   `json:"view_name,omitempty"`
+	FieldNames  []string `json:"field_names,omitempty"`
+	Definer     UserHost `json:"definer,omitempty"`
+	Algorithm   string   `json:"algorithm,omitempty"`
+	SqlSecurity string   `json:"sql_security,omitempty"`
+	AsSelect    string   `json:"as_select,omitempty"`
+	CheckOption string   `json:"check_option,omitempty"`
+}
+
+// CreateProcedure TODO
+type CreateProcedure struct {
+	ParseBase
+	Definer     UserHost `json:"definer,omitempty"`
+	SpName      string   `json:"sp_name,omitempty"`
+	SqlSecurity string   `json:"sql_security,omitempty"`
+	DataAccess  string   `json:"data_access,omitempty"`
+}
+
+// CreateTrigger TODO
+type CreateTrigger struct {
+	ParseBase
+	Definer      UserHost `json:"definer,omitempty"`
+	TriggerName  string   `json:"trigger_name,omitempty"`
+	TableName    string   `json:"table_name"`
+	TriggerEvent string   `json:"trigger_event"`
+}
+
+// CreateFunction TODO
+type CreateFunction struct {
+	ParseBase
+	Definer UserHost `json:"definer,omitempty"`
+	// TODO
+}
+
+// CreateEvent TODO
+type CreateEvent struct {
+	ParseBase
+	Definer UserHost `json:"definer,omitempty"`
+	// TODO
+}
+
+// CreateIndex TODO
+type CreateIndex struct {
+	ParseBase
+	DbName    string   `json:"db_name,omitempty"`
+	TableName string   `json:"table_name"`
+	KeyDefs   []KeyDef `json:"key_defs"`
+	Algorithm string   `json:"algorithm,omitempty"`
+	Lock      string   `json:"lock,omitempty"`
+}
+
+// DeleteResult TODO
+type DeleteResult struct {
+	ParseBase
+	DbName    string `json:"db_name,omitempty"`
+	TableName string `json:"table_name"`
+	HasWhere  bool   `json:"has_where"`
+	Limit     int    `json:"limit"`
+}
+
+// UpdateResult TODO
+type UpdateResult struct {
+	ParseBase
+	DbName           string `json:"db_name,omitempty"`
+	TableName        string `json:"table_name"`
+	UpdateLockOption string `json:"update_lock_option"`
+	HasIgnore        bool   `json:"has_ignore"`
+	HasWhere         bool   `json:"has_where"`
+	Limit            int    `json:"limit"`
+}
diff --git a/dbm-services/mysql/db-simulation/go.mod b/dbm-services/mysql/db-simulation/go.mod
new file mode 100644
index 0000000000..37b69bcac7
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/go.mod
@@ -0,0 +1,75 @@
+module dbm-services/mysql/db-simulation
+
+go 1.19
+
+require (
+	github.com/antonmedv/expr v1.12.5
+	github.com/gin-contrib/pprof v1.4.0
+	github.com/gin-contrib/requestid v0.0.6
+	github.com/gin-gonic/gin v1.9.0
+	github.com/pkg/errors v0.9.1
+	github.com/spf13/viper v1.15.0
+	gopkg.in/yaml.v2 v2.4.0
+	gorm.io/driver/mysql v1.5.0
+	gorm.io/gorm v1.25.0
+	k8s.io/api v0.22.5
+	k8s.io/apimachinery v0.22.5
+	k8s.io/client-go v0.22.5
+)
+
+require (
+	github.com/bytedance/sonic v1.8.8 // indirect
+	github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
+	github.com/davecgh/go-spew v1.1.1 // indirect
+	github.com/fsnotify/fsnotify v1.6.0 // indirect
+	github.com/gin-contrib/sse v0.1.0 // indirect
+	github.com/go-logr/logr v1.2.2 // indirect
+	github.com/go-playground/locales v0.14.1 // indirect
+	github.com/go-playground/universal-translator v0.18.1 // indirect
+	github.com/go-playground/validator/v10 v10.12.0 // indirect
+	github.com/go-sql-driver/mysql v1.7.1 // indirect
+	github.com/goccy/go-json v0.10.2 // indirect
+	github.com/gogo/protobuf v1.3.2 // indirect
+	github.com/golang/protobuf v1.5.3 // indirect
+	github.com/google/go-cmp v0.5.9 // indirect
+	github.com/google/gofuzz v1.2.0 // indirect
+	github.com/google/uuid v1.3.0 // indirect
+	github.com/googleapis/gnostic v0.5.5 // indirect
+	github.com/hashicorp/hcl v1.0.0 // indirect
+	github.com/jinzhu/inflection v1.0.0 // indirect
+	github.com/jinzhu/now v1.1.5 // indirect
+	github.com/json-iterator/go v1.1.12 // indirect
+	github.com/klauspost/cpuid/v2 v2.2.4 // indirect
+	github.com/leodido/go-urn v1.2.3 // indirect
+	github.com/magiconair/properties v1.8.7 // indirect
+	github.com/mattn/go-isatty v0.0.18 // indirect
+	github.com/mitchellh/mapstructure v1.5.0 // indirect
+	github.com/moby/spdystream v0.2.0 // indirect
+	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
+	github.com/modern-go/reflect2 v1.0.2 // indirect
+	github.com/pelletier/go-toml/v2 v2.0.7 // indirect
+	github.com/spf13/afero v1.9.5 // indirect
+	github.com/spf13/cast v1.5.0 // indirect
+	github.com/spf13/jwalterweatherman v1.1.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/subosito/gotenv v1.4.2 // indirect
+	github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
+	github.com/ugorji/go/codec v1.2.11 // indirect
+	golang.org/x/arch v0.3.0 // indirect
+	golang.org/x/crypto v0.8.0 // indirect
+	golang.org/x/net v0.9.0 // indirect
+	golang.org/x/oauth2 v0.7.0 // indirect
+	golang.org/x/sys v0.7.0 // indirect
+	golang.org/x/term v0.7.0 // indirect
+	golang.org/x/text v0.9.0 // indirect
+	golang.org/x/time v0.3.0 // indirect
+	google.golang.org/appengine v1.6.7 // indirect
+	google.golang.org/protobuf v1.30.0 // indirect
+	gopkg.in/inf.v0 v0.9.1 // indirect
+	gopkg.in/ini.v1 v1.67.0 // indirect
+	gopkg.in/yaml.v3 v3.0.1 // indirect
+	k8s.io/klog/v2 v2.80.1 // indirect
+	k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
+	sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
+	sigs.k8s.io/yaml v1.3.0 // indirect
+)
diff --git a/dbm-services/mysql/db-simulation/go.sum b/dbm-services/mysql/db-simulation/go.sum
new file mode 100644
index 0000000000..4ddbbdea91
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/go.sum
@@ -0,0 +1,716 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
+github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/antonmedv/expr v1.12.5 h1:Fq4okale9swwL3OeLLs9WD9H6GbgBLJyN/NUHRv+n0E=
+github.com/antonmedv/expr v1.12.5/go.mod h1:FPC8iWArxls7axbVLsW+kpg1mz29A1b2M6jt+hZfDkU=
+github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
+github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q=
+github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
+github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
+github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc=
+github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
+github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
+github.com/gin-contrib/requestid v0.0.6 h1:mGcxTnHQ45F6QU5HQRgQUDsAfHprD3P7g2uZ4cSZo9o=
+github.com/gin-contrib/requestid v0.0.6/go.mod h1:9i4vKATX/CdggbkY252dPVasgVucy/ggBeELXuQztm4=
+github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
+github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
+github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
+github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8=
+github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
+github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
+github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
+github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
+github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
+github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
+github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
+github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
+github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
+github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
+github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
+github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
+github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
+github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
+github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
+github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
+github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
+github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
+github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk=
+github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
+github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA=
+github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
+github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
+github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
+github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
+github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
+github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM=
+github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
+github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
+github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
+github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
+github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
+github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
+github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
+github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
+github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k=
+golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
+google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gorm.io/driver/mysql v1.5.0 h1:6hSAT5QcyIaty0jfnff0z0CLDjyRgZ8mlMHLqSt7uXM=
+gorm.io/driver/mysql v1.5.0/go.mod h1:FFla/fJuCvyTi7rJQd27qlNX2v3L6deTR1GgTjSOLPo=
+gorm.io/gorm v1.24.7-0.20230306060331-85eaf9eeda11/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+gorm.io/gorm v1.25.0 h1:+KtYtb2roDz14EQe4bla8CbQlmb9dN3VejSai3lprfU=
+gorm.io/gorm v1.25.0/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.22.5 h1:xk7C+rMjF/EGELiD560jdmwzrB788mfcHiNbMQLIVI8=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
+k8s.io/apimachinery v0.22.5 h1:cIPwldOYm1Slq9VLBRPtEYpyhjIm1C6aAMAoENuvN9s=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
+k8s.io/client-go v0.22.5 h1:I8Zn/UqIdi2r02aZmhaJ1hqMxcpfJ3t5VqvHtctHYFo=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
+k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
+k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
+sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
+sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/dbm-services/mysql/db-simulation/handler/handler.go b/dbm-services/mysql/db-simulation/handler/handler.go
new file mode 100644
index 0000000000..7fbe9c5510
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/handler/handler.go
@@ -0,0 +1,202 @@
+// Package handler TODO
+package handler
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app/config"
+	"dbm-services/mysql/db-simulation/app/service"
+	"dbm-services/mysql/db-simulation/model"
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+)
+
+// Response TODO
+type Response struct {
+	RequestId string      `json:"request_id"`
+	Code      int         `json:"code"`
+	Message   string      `json:"msg"`
+	Data      interface{} `json:"data"`
+}
+
+// CreateClusterParam TODO
+type CreateClusterParam struct {
+	Pwd     string `json:"pwd"`
+	PodName string `json:"podname"`
+}
+
+// CreateTmpSpiderPodCluster TODO
+func CreateTmpSpiderPodCluster(r *gin.Context) {
+	var param CreateClusterParam
+	if err := r.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(r, err, "failed to deserialize parameters", "")
+		return
+	}
+	ps := service.NewDbPodSets()
+	ps.BaseInfo = &service.MySQLPodBaseInfo{
+		PodName: param.PodName,
+		RootPwd: param.Pwd,
+		Charset: "utf8mb4",
+	}
+	ps.DbImage = config.GAppConfig.Image.Tendb57Img
+	ps.TdbCtlImage = config.GAppConfig.Image.TdbCtlImg
+	ps.SpiderImage = config.GAppConfig.Image.SpiderImg
+	if err := ps.CreateClusterPod(); err != nil {
+		logger.Error(err.Error())
+		return
+	}
+	SendResponse(r, nil, "ok", "")
+}
+
+// SpiderClusterSimulation TODO
+func SpiderClusterSimulation(r *gin.Context) {
+	var param service.SpiderSimulationExecParam
+	requestId := r.GetString("request_id")
+	if err := r.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(r, err, "failed to deserialize parameters", requestId)
+		return
+	}
+	img, err := service.GetImgFromMySQLVersion(param.MySQLVersion)
+	if err != nil {
+		logger.Error("GetImgFromMySQLVersion %s failed:%s", param.MySQLVersion, err.Error())
+		SendResponse(r, err, nil, requestId)
+		return
+	}
+
+	if err := model.CreateTask(param.TaskId, requestId); err != nil {
+		logger.Error("create task db record error %s", err.Error())
+		SendResponse(r, err, nil, requestId)
+		return
+	}
+	tsk := service.SimulationTask{
+		RequestId: requestId,
+		DbPodSets: service.NewDbPodSets(),
+		BaseParam: ¶m.BaseParam,
+	}
+	tsk.DbImage = img
+	tsk.SpiderImage = param.GetSpiderImg()
+	tsk.TdbCtlImage = param.GetTdbctlImg()
+	tsk.BaseInfo = &service.MySQLPodBaseInfo{
+		PodName: fmt.Sprintf("spider-%s-%s", strings.ToLower(param.MySQLVersion),
+			replaceUnderSource(param.TaskId)),
+		Lables: map[string]string{"task_id": replaceUnderSource(param.TaskId),
+			"request_id": requestId},
+		RootPwd: cmutil.RandStr(10),
+		Charset: param.MySQLCharSet,
+	}
+	service.SpiderTaskChan <- tsk
+	SendResponse(r, nil, "request successful", requestId)
+}
+
+// Dbsimulation TODO
+func Dbsimulation(r *gin.Context) {
+	var param service.BaseParam
+	requestId := r.GetString("request_id")
+	if err := r.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(r, err, "failed to deserialize parameters", requestId)
+		return
+	}
+	if requestId == "" {
+		SendResponse(r, fmt.Errorf("create request id failed"), nil, requestId)
+		return
+	}
+	img, err := service.GetImgFromMySQLVersion(param.MySQLVersion)
+	if err != nil {
+		logger.Error("GetImgFromMySQLVersion %s failed:%s", param.MySQLVersion, err.Error())
+		SendResponse(r, err, nil, requestId)
+		return
+	}
+	if err := model.CreateTask(param.TaskId, requestId); err != nil {
+		logger.Error("create task db record error %s", err.Error())
+		SendResponse(r, err, nil, requestId)
+		return
+	}
+	tsk := service.SimulationTask{
+		RequestId: requestId,
+		DbPodSets: service.NewDbPodSets(),
+		BaseParam: ¶m,
+	}
+	tsk.DbImage = img
+	tsk.BaseInfo = &service.MySQLPodBaseInfo{
+		PodName: fmt.Sprintf("tendb-%s-%s", strings.ToLower(param.MySQLVersion),
+			replaceUnderSource(param.TaskId)),
+		Lables: map[string]string{"task_id": replaceUnderSource(param.TaskId),
+			"request_id": requestId},
+		RootPwd: cmutil.RandStr(10),
+		Charset: param.MySQLCharSet,
+	}
+	service.TaskChan <- tsk
+	SendResponse(r, nil, "request successful", requestId)
+}
+
+func replaceUnderSource(str string) string {
+	return strings.ReplaceAll(str, "_", "-")
+}
+
+// T TODO
+type T struct {
+	TaskId string `json:"task_id"`
+}
+
+// QueryTask TODO
+func QueryTask(c *gin.Context) {
+	var param T
+	if err := c.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(c, err, "failed to deserialize parameters", "")
+		return
+	}
+	logger.Info("get task_id is %s", param.TaskId)
+	var task model.TbSimulationTask
+	if err := model.DB.Where(&model.TbSimulationTask{TaskId: param.TaskId}).First(&task).Error; err != nil {
+		logger.Error("query task failed %s", err.Error())
+		SendResponse(c, err, "query task failed", "")
+		return
+	}
+	if task.Phase != model.Phase_Done {
+		c.JSON(http.StatusOK, Response{
+			Code:    2,
+			Message: fmt.Sprintf("task current phase is %s", task.Phase),
+			Data:    "",
+		})
+		return
+	}
+	switch task.Status {
+	case model.Task_Failed:
+		SendResponse(c, fmt.Errorf(task.SysErrMsg), map[string]interface{}{"stdout": task.Stdout, "stderr": task.Stderr,
+			"errmsg": fmt.Sprintf("the program has been run with abnormal status:%s", task.Status)}, "")
+
+	case model.Task_Success:
+		SendResponse(c, nil, map[string]interface{}{"stdout": task.Stdout, "stderr": task.Stderr}, "")
+
+	default:
+		SendResponse(c, fmt.Errorf("unknown transition state"), map[string]interface{}{"stdout": task.Stdout,
+			"stderr": task.Stderr,
+			"errmsg": fmt.Sprintf("the program has been run with abnormal status:%s", task.Status)}, "")
+	}
+}
+
+// SendResponse TODO
+func SendResponse(r *gin.Context, err error, data interface{}, requestid string) {
+	if err != nil {
+		r.JSON(http.StatusOK, Response{
+			Code:      1,
+			Message:   err.Error(),
+			Data:      data,
+			RequestId: requestid,
+		})
+		return
+	}
+	r.JSON(http.StatusOK, Response{
+		Code:      0,
+		Message:   "successfully",
+		Data:      data,
+		RequestId: requestid,
+	})
+}
diff --git a/dbm-services/mysql/db-simulation/handler/rule.go b/dbm-services/mysql/db-simulation/handler/rule.go
new file mode 100644
index 0000000000..965c114546
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/handler/rule.go
@@ -0,0 +1,45 @@
+package handler
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/model"
+
+	"github.com/gin-gonic/gin"
+)
+
+// OptRuleParam TODO
+type OptRuleParam struct {
+	RuleId int  `json:"rule_id" binding:"required"`
+	Status bool `json:"status" `
+	// GroupName string `json:"group_name"`
+	// RuleName  string `json:"rule_name"`
+}
+
+// ManageRule TODO
+func ManageRule(c *gin.Context) {
+	var param OptRuleParam
+	if err := c.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(c, err, "failed to deserialize parameters", "")
+		return
+	}
+	result := model.DB.Model(&model.TbSyntaxRule{}).Where(&model.TbSyntaxRule{ID: param.RuleId}).Update("status",
+		param.Status).Limit(1)
+	if result.Error != nil {
+		logger.Error("update rule status failed %s,affect rows %d", result.Error.Error(), result.RowsAffected)
+		SendResponse(c, result.Error, result.Error, "")
+		return
+	}
+	SendResponse(c, nil, "ok", "")
+}
+
+// GetAllRule TODO
+func GetAllRule(c *gin.Context) {
+	var rs []model.TbSyntaxRule
+	if err := model.DB.Find(&rs).Error; err != nil {
+		logger.Error("query rules failed %s", err.Error())
+		SendResponse(c, err, err.Error(), "")
+		return
+	}
+	SendResponse(c, nil, rs, "")
+}
diff --git a/dbm-services/mysql/db-simulation/handler/syntax_check.go b/dbm-services/mysql/db-simulation/handler/syntax_check.go
new file mode 100644
index 0000000000..0465c602ab
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/handler/syntax_check.go
@@ -0,0 +1,139 @@
+package handler
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app"
+	"dbm-services/mysql/db-simulation/app/syntax"
+	"os"
+	"path"
+	"strings"
+
+	"github.com/gin-gonic/gin"
+	"github.com/spf13/viper"
+)
+
+var tmysqlParserBin string
+var workdir string
+
+func init() {
+	tmysqlParserBin = strings.TrimSpace(viper.GetString("tmysqlparser_bin"))
+	// 容器环境会把 tmysqlparse 打包进来
+	// 放到和 svr 程序一个目录下
+	// 所以在使用这个工程的 img 时, 可以不用设置这个 env
+	if len(tmysqlParserBin) == 0 {
+		tmysqlParserBin = "/tmysqlparse"
+	}
+	workdir = strings.TrimSpace(viper.GetString("workdir"))
+	if workdir == "" {
+		workdir = "/tmp"
+	}
+}
+
+// SyntaxHandler TODO
+type SyntaxHandler struct{}
+
+// CheckSqlStringParam TODO
+type CheckSqlStringParam struct {
+	ClusterType string   `json:"cluster_type" binding:"required"`
+	Sqls        []string `json:"sqls" binding:"gt=0,dive,required"` // SQLS
+	Version     string   `json:"version"`                           // mysql版本
+}
+
+// SyntaxCheckSQL TODO
+func SyntaxCheckSQL(r *gin.Context) {
+	requestId := r.GetString("request_id")
+	var param CheckSqlStringParam
+	// 将request中的数据按照json格式直接解析到结构体中
+	if err := r.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(r, err, nil, requestId)
+		return
+	}
+	sqlContext := strings.Join(param.Sqls, "\n")
+	fileName := "ce_" + cmutil.RandStr(10) + ".sql"
+	f := path.Join(workdir, fileName)
+	err := os.WriteFile(f, []byte(sqlContext), 0666)
+	if err != nil {
+		SendResponse(r, err, err.Error(), requestId)
+		return
+	}
+	check := &syntax.TmysqlParseFile{
+		TmysqlParse: syntax.TmysqlParse{
+			TmysqlParseBinPath: tmysqlParserBin,
+			BaseWorkdir:        workdir,
+		},
+		Param: syntax.CheckSqlFileParam{
+			BkRepoBasePath: "",
+			FileNames:      []string{fileName},
+			MysqlVersion:   param.Version,
+		},
+	}
+	var data map[string]*syntax.CheckInfo
+	logger.Info("cluster type :%s", param.ClusterType)
+	switch strings.ToLower(param.ClusterType) {
+	case app.Spider, app.TendbCluster:
+		data, err = check.DoSQL(app.Spider)
+	case app.MySQL:
+		data, err = check.DoSQL(app.MySQL)
+	default:
+		data, err = check.DoSQL(app.MySQL)
+	}
+
+	if err != nil {
+		SendResponse(r, err, data, requestId)
+		return
+	}
+	SendResponse(r, nil, data, requestId)
+}
+
+// CheckFileParam TODO
+type CheckFileParam struct {
+	ClusterType string   `json:"cluster_type"`
+	Path        string   `json:"path" binding:"required"`            // 蓝鲸制品库SQL文件存储的相对路径
+	Files       []string `json:"files" binding:"gt=0,dive,required"` // SQL 文件名
+	Version     string   `json:"version"`                            // mysql版本
+}
+
+// SyntaxCheckFile 运行语法检查
+//
+//	@receiver s
+//	@param r
+func SyntaxCheckFile(r *gin.Context) {
+	requestId := r.GetString("request_id")
+	var param CheckFileParam
+	// 将request中的数据按照json格式直接解析到结构体中
+	if err := r.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(r, err, nil, requestId)
+		return
+	}
+	check := &syntax.TmysqlParseFile{
+		TmysqlParse: syntax.TmysqlParse{
+			TmysqlParseBinPath: tmysqlParserBin,
+			BaseWorkdir:        workdir,
+		},
+		Param: syntax.CheckSqlFileParam{
+			BkRepoBasePath: param.Path,
+			FileNames:      param.Files,
+			MysqlVersion:   param.Version,
+		},
+	}
+	var data map[string]*syntax.CheckInfo
+	var err error
+	logger.Info("cluster type :%s", param.ClusterType)
+	switch strings.ToLower(param.ClusterType) {
+	case app.Spider, app.TendbCluster:
+		data, err = check.Do(app.Spider)
+	case app.MySQL:
+		data, err = check.Do(app.MySQL)
+	default:
+		data, err = check.Do(app.MySQL)
+	}
+
+	if err != nil {
+		SendResponse(r, err, data, requestId)
+		return
+	}
+	SendResponse(r, nil, data, requestId)
+}
diff --git a/dbm-services/mysql/db-simulation/handler/updaterule.go b/dbm-services/mysql/db-simulation/handler/updaterule.go
new file mode 100644
index 0000000000..9fe70fa98c
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/handler/updaterule.go
@@ -0,0 +1,86 @@
+package handler
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/model"
+	"errors"
+	"fmt"
+
+	"github.com/gin-gonic/gin"
+)
+
+// UpdateRuleParam TODO
+type UpdateRuleParam struct {
+	ID   int         `json:"id" binding:"required"`
+	Item interface{} `json:"item" binding:"required"`
+}
+
+// UpdateRule TODO
+func UpdateRule(r *gin.Context) {
+	logger.Info("UpdateRule...")
+	var param UpdateRuleParam
+	// 将request中的数据按照json格式直接解析到结构体中
+	if err := r.ShouldBindJSON(¶m); err != nil {
+		logger.Error("ShouldBind failed %s", err)
+		SendResponse(r, err, nil, "")
+		return
+	}
+	var tsr model.TbSyntaxRule
+	model.DB.Select("item_type").First(&tsr, param.ID)
+
+	var err error
+	switch v := param.Item.(type) {
+	case float64:
+		// 判断float64存的是整数
+		if v == float64(int64(v)) {
+			if tsr.ItemType == "int" {
+				updateTable(param.ID, int(v))
+			} else {
+				errReturn(r, &tsr)
+				return
+			}
+		} else {
+			err = errors.New("not int")
+			logger.Error("Type of error: %s", err)
+			SendResponse(r, err, nil, "")
+			return
+		}
+	case bool:
+		if tsr.ItemType == "bool" {
+			updateTable(param.ID, fmt.Sprintf("%t", v))
+		} else {
+			errReturn(r, &tsr)
+			return
+		}
+	case string:
+		if tsr.ItemType == "string" {
+			updateTable(param.ID, fmt.Sprintf("%+q", v))
+		} else {
+			errReturn(r, &tsr)
+			return
+		}
+	case []interface{}:
+		if tsr.ItemType == "arry" {
+			updateTable(param.ID, fmt.Sprintf("%+q", v))
+		} else {
+			errReturn(r, &tsr)
+			return
+		}
+	default:
+		err = errors.New("illegal type")
+		logger.Error("%s", err)
+		SendResponse(r, err, nil, "")
+		return
+	}
+	SendResponse(r, nil, "sucessed", "")
+}
+
+func updateTable(id int, item interface{}) {
+	model.DB.Model(&model.TbSyntaxRule{}).Where("id", id).Update("item", item)
+}
+
+func errReturn(r *gin.Context, tsr *model.TbSyntaxRule) {
+	err := fmt.Errorf("%s type required", tsr.ItemType)
+	logger.Error("Item type error: %s", err)
+	SendResponse(r, err, nil, "")
+}
diff --git a/dbm-services/mysql/db-simulation/main.go b/dbm-services/mysql/db-simulation/main.go
new file mode 100644
index 0000000000..cfa5f860b4
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/main.go
@@ -0,0 +1,53 @@
+package main
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app/config"
+	"dbm-services/mysql/db-simulation/model"
+	"dbm-services/mysql/db-simulation/router"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+
+	"github.com/gin-contrib/pprof"
+	"github.com/gin-contrib/requestid"
+	"github.com/gin-gonic/gin"
+)
+
+var buildstamp = ""
+var githash = ""
+var version = ""
+
+func main() {
+	app := gin.New()
+	pprof.Register(app)
+	app.Use(requestid.New())
+	app.Use(apiLogger)
+	router.RegisterRouter(app)
+	app.POST("/app", func(ctx *gin.Context) {
+		ctx.SecureJSON(http.StatusOK, map[string]interface{}{"buildstamp": buildstamp, "githash": githash,
+			"version": version})
+	})
+	app.Run(config.GAppConfig.ListenAddr)
+}
+
+func init() {
+	logger.New(os.Stdout, true, logger.InfoLevel, map[string]string{})
+	defer logger.Sync()
+}
+
+// apiLogger TODO
+func apiLogger(c *gin.Context) {
+	rid := requestid.Get(c)
+	c.Set("request_id", rid)
+	var buf bytes.Buffer
+	if c.Request.Method == http.MethodPost {
+		tee := io.TeeReader(c.Request.Body, &buf)
+		body, _ := ioutil.ReadAll(tee)
+		c.Request.Body = ioutil.NopCloser(&buf)
+		model.CreateRequestRecord(rid, string(body))
+	}
+	c.Next()
+}
diff --git a/dbm-services/mysql/db-simulation/model/model.go b/dbm-services/mysql/db-simulation/model/model.go
new file mode 100644
index 0000000000..a8615b106a
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/model/model.go
@@ -0,0 +1,82 @@
+// Package model TODO
+package model
+
+import (
+	"database/sql"
+	"dbm-services/mysql/db-simulation/app/config"
+	"fmt"
+	"log"
+	"os"
+	"time"
+
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+	"gorm.io/gorm/logger"
+)
+
+// DB TODO
+var DB *gorm.DB
+
+// SqlDB TODO
+var SqlDB *sql.DB
+
+func init() {
+	user := config.GAppConfig.DbConf.User
+	pwd := config.GAppConfig.DbConf.Pwd
+	addr := fmt.Sprintf("%s:%d", config.GAppConfig.DbConf.Host, config.GAppConfig.DbConf.Port)
+	db := config.GAppConfig.DbConf.Name
+	log.Printf("connect to %s", addr)
+	testConn := openDB(user, pwd, addr, "")
+	err := testConn.Exec(fmt.Sprintf("create database IF NOT EXISTS `%s`;", db)).Error
+	if err != nil {
+		log.Fatalf("init create db failed:%s", err.Error())
+	}
+	sqldb, err := testConn.DB()
+	if err != nil {
+		log.Fatalf("init create db failed:%s", err.Error())
+	}
+	sqldb.Close()
+	DB = openDB(user, pwd, addr, db)
+	Migration()
+}
+
+// Migration TODO
+func Migration() {
+	DB.AutoMigrate(&TbSimulationTask{}, &TbRequestRecord{}, &TbSyntaxRule{})
+}
+
+func openDB(username, password, addr, name string) *gorm.DB {
+	newLogger := logger.New(
+		log.New(os.Stdout, "\r\n", log.LstdFlags), // io writer
+		logger.Config{
+			SlowThreshold: time.Second, // Slow SQL threshold
+			LogLevel:      logger.Info, // Log level
+			Colorful:      true,        // Disable color
+		},
+	)
+	dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8mb4&parseTime=%t&loc=%s",
+		username,
+		password,
+		addr,
+		name,
+		true,
+		"Local")
+	var err error
+	// SqlDB是上面定义了全局变量
+	SqlDB, err = sql.Open("mysql", dsn)
+	if err != nil {
+		log.Fatalf("connect to mysql failed %s", err.Error())
+		return nil
+	}
+	db, err := gorm.Open(mysql.New(mysql.Config{
+		Conn: SqlDB,
+	}), &gorm.Config{
+		DisableForeignKeyConstraintWhenMigrating: true,
+		Logger:                                   newLogger,
+	})
+
+	if err != nil {
+		log.Fatalf("Database connection failed. Database name: %s, error: %v", name, err)
+	}
+	return db
+}
diff --git a/dbm-services/mysql/db-simulation/model/tb_request_record.go b/dbm-services/mysql/db-simulation/model/tb_request_record.go
new file mode 100644
index 0000000000..f623506643
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/model/tb_request_record.go
@@ -0,0 +1,33 @@
+package model
+
+import (
+	"strings"
+	"time"
+)
+
+// TbRequestRecord TODO
+type TbRequestRecord struct {
+	ID          int       `gorm:"primaryKey;column:id;type:int(11);not null" json:"-"`
+	RequestID   string    `gorm:"unique;column:request_id;type:varchar(64);not null" json:"request_id"` // request_id
+	RequestBody string    `gorm:"column:request_body;type:json" json:"request_body"`
+	UpdateTime  time.Time `gorm:"column:update_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"update_time"` // 最后修改时间
+	CreateTime  time.Time `gorm:"column:create_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"create_time"` // 创建时间
+}
+
+// GetTableName get sql table name.获取数据库名字
+func (obj *TbRequestRecord) GetTableName() string {
+	return "tb_request_record"
+}
+
+// CreateRequestRecord TODO
+func CreateRequestRecord(requestid, body string) (err error) {
+	if strings.TrimSpace(body) == "" {
+		body = "{}"
+	}
+	return DB.Create(&TbRequestRecord{
+		RequestID:   requestid,
+		RequestBody: body,
+		UpdateTime:  time.Now(),
+		CreateTime:  time.Now(),
+	}).Error
+}
diff --git a/dbm-services/mysql/db-simulation/model/tb_simulation_task.go b/dbm-services/mysql/db-simulation/model/tb_simulation_task.go
new file mode 100644
index 0000000000..4095aebc55
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/model/tb_simulation_task.go
@@ -0,0 +1,109 @@
+package model
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"errors"
+	"fmt"
+	"time"
+
+	"gorm.io/gorm"
+)
+
+// TbSimulationTask [...]
+// MEDIUMTEXT
+type TbSimulationTask struct {
+	ID            int       `gorm:"primaryKey;column:id;type:int(11);not null" json:"-"`
+	TaskId        string    `gorm:"unique;column:task_id;type:varchar(256);not null" json:"task_id"`
+	RequestID     string    `gorm:"unique;column:request_id;type:varchar(64);not null" json:"request_id"`
+	Phase         string    `gorm:"column:phase;type:varchar(16);not null" json:"phase"`
+	Status        string    `gorm:"column:status;type:varchar(16);not null" json:"status"`
+	Stdout        string    `gorm:"column:stdout;type:mediumtext" json:"stdout"`
+	Stderr        string    `gorm:"column:stderr;type:mediumtext" json:"stderr"`
+	SysErrMsg     string    `gorm:"column:sys_err_msg;type:varchar(512);not null" json:"sys_err_msg"`
+	Extra         string    `gorm:"column:extra;type:varchar(512);not null" json:"extra"`
+	HeartbeatTime time.Time `gorm:"column:heartbeat_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"heartbeat_time"`
+	UpdateTime    time.Time `gorm:"column:update_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"update_time"`
+	CreateTime    time.Time `gorm:"column:create_time;type:timestamp;default:CURRENT_TIMESTAMP()" json:"create_time"`
+}
+
+// GetTableName get sql table name
+func (obj *TbSimulationTask) GetTableName() string {
+	return "tb_simulation_task"
+}
+
+const (
+	// Phase_Waitting TODO
+	Phase_Waitting = "Waitting"
+	// Phase_CreatePod TODO
+	Phase_CreatePod = "PodCreating"
+	// Phase_LoadSchema TODO
+	Phase_LoadSchema = "SchemaLoading"
+	// Phase_Running TODO
+	Phase_Running = "Running"
+	// Phase_Done TODO
+	Phase_Done = "Done"
+)
+
+const (
+	// Task_Failed TODO
+	Task_Failed = "Failed"
+	// Task_Success TODO
+	Task_Success = "Success"
+)
+
+// CompleteTask TODO
+func CompleteTask(task_id, status, stderr, stdout, syserrMsg string) (err error) {
+	return DB.Model(TbSimulationTask{}).Where("task_id = ?", task_id).Updates(
+		TbSimulationTask{
+			Phase:      Phase_Done,
+			Status:     status,
+			Stdout:     stdout,
+			Stderr:     stderr,
+			SysErrMsg:  syserrMsg,
+			UpdateTime: time.Now()}).Error
+}
+
+// UpdateHeartbeat TODO
+func UpdateHeartbeat(taskid, stderr, stdout string) {
+	err := DB.Model(TbSimulationTask{}).Where("task_id = ?", taskid).Updates(
+		TbSimulationTask{
+			Stdout:        stdout,
+			Stderr:        stderr,
+			HeartbeatTime: time.Now(),
+		}).Error
+	if err != nil {
+		logger.Error("update heartbeat time failed %s", err.Error())
+	}
+}
+
+// UpdatePhase TODO
+func UpdatePhase(taskid, phase string) {
+	err := DB.Model(TbSimulationTask{}).Where("task_id = ?", taskid).Updates(
+		TbSimulationTask{
+			Phase:      phase,
+			UpdateTime: time.Now(),
+		}).Error
+	if err != nil {
+		logger.Error("update heartbeat time failed %s", err.Error())
+	}
+}
+
+// CreateTask TODO
+func CreateTask(taskid, requestid string) (err error) {
+	var task TbSimulationTask
+	err = DB.Where(&TbSimulationTask{TaskId: taskid}).First(&task).Error
+	if err == nil {
+		return fmt.Errorf("this task exists:%s", taskid)
+	}
+	if !errors.Is(err, gorm.ErrRecordNotFound) {
+		logger.Error("")
+		return err
+	}
+	err = nil
+	return DB.Create(&TbSimulationTask{
+		TaskId:     taskid,
+		RequestID:  requestid,
+		Phase:      Phase_Waitting,
+		CreateTime: time.Now(),
+	}).Error
+}
diff --git a/dbm-services/mysql/db-simulation/model/tb_syntax_rule.go b/dbm-services/mysql/db-simulation/model/tb_syntax_rule.go
new file mode 100644
index 0000000000..e78f876e44
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/model/tb_syntax_rule.go
@@ -0,0 +1,240 @@
+package model
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+
+	"gorm.io/gorm/clause"
+)
+
+const (
+	// StringItem TODO
+	StringItem = "string"
+	// ArryItem TODO
+	ArryItem = "arry"
+	// IntItem TODO
+	IntItem = "int"
+	// BoolItem TODO
+	BoolItem = "bool"
+)
+
+// TbSyntaxRule [...]
+type TbSyntaxRule struct {
+	ID        int             `gorm:"primaryKey;column:id;type:int(11);not null" json:"-"`
+	GroupName string          `gorm:"uniqueIndex:group;column:group_name;type:varchar(64);not null" json:"group_name"` // 规则组名称
+	RuleName  string          `gorm:"uniqueIndex:group;column:rule_name;type:varchar(64);not null" json:"rule_name"`   // 子规则项,一个规则可能包括过个子规则
+	Item      json.RawMessage `gorm:"column:item;type:varchar(1024);not null" json:"item"`
+	ItemType  string          `gorm:"column:item_type;type:varchar(128);not null" json:"item_type"`
+	Expr      string          `gorm:"column:expr;type:varchar(128);not null" json:"expr"`            // 规则表达式
+	Desc      string          `gorm:"column:desc;type:varchar(512);not null" json:"desc"`            // 规则提示信息
+	WarnLevel int16           `gorm:"column:warn_level;type:smallint(2);not null" json:"warn_level"` // 0:作为普通检查项,1:禁用命中该规则的行为
+	Status    bool            `gorm:"column:status;type:tinyint(1);not null" json:"status"`          // 1:启用,0:禁用
+}
+
+// GetTableName get sql table name.获取数据库名字
+func (obj *TbSyntaxRule) GetTableName() string {
+	return "tb_syntax_rules"
+}
+
+func init() {
+	if err := InitRule(); err != nil {
+		logger.Fatal("init syntax rule failed %s", err.Error())
+		return
+	}
+}
+
+// InitRule TODO
+func InitRule() (err error) {
+	initRules := []TbSyntaxRule{}
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "CommandRule",
+		RuleName:  "HighRiskCommandRule",
+		Expr:      "Val in Item",
+		ItemType:  ArryItem,
+		Item: []byte(
+			`["drop_table", "drop_index", "lock_tables", "drop_db", "analyze","rename_table", "drop_procedure", "drop_view", "drop_trigger","drop_function", "drop_server", "drop_event", "drop_compression_dictionary","optimize", "alter_tablespace"]`),
+		Desc:      "高危命令",
+		WarnLevel: 0,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "CommandRule",
+		RuleName:  "BanCommandRule",
+		Expr:      "Val in Item",
+		ItemType:  ArryItem,
+		Item: []byte(
+			` ["truncate", "revoke", "kill", "reset", "drop_user", "grant","create_user", "revoke_all", "shutdown", "lock_tables_for_backup","reset", "purge", "lock_binlog_for_backup","lock_tables_for_backup","install_plugin", "uninstall_plugin","alter_user"]`),
+		Desc:      "高危变更类型",
+		WarnLevel: 1,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "CreateTableRule",
+		RuleName:  "SuggestBlobColumCount",
+		Expr:      "Val >= Item ",
+		ItemType:  IntItem,
+		Item:      []byte(`10`),
+		Desc:      "建议单表Blob字段不要过多",
+		WarnLevel: 0,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "CreateTableRule",
+		RuleName:  "SuggestEngine",
+		Expr:      "not (Val contains Item) and ( len(Val) != 0 )",
+		ItemType:  StringItem,
+		Item:      []byte(`"innodb"`),
+		Desc:      "建议使用Innodb表",
+		WarnLevel: 0,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "CreateTableRule",
+		RuleName:  "NeedPrimaryKey",
+		Expr:      "Val == Item",
+		ItemType:  IntItem,
+		Item:      []byte(`1`),
+		Desc:      "建议包含主键",
+		WarnLevel: 0,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "CreateTableRule",
+		RuleName:  "DefinerRule",
+		Expr:      "Val in Item ",
+		ItemType:  ArryItem,
+		Item:      []byte(`["create_function","create_trigger","create_event","create_procedure","create_view"]`),
+		Desc:      "不允许指定definer",
+		WarnLevel: 0,
+		Status:    true,
+	})
+
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "CreateTableRule",
+		RuleName:  "NormalizedName",
+		Expr:      "Val in Item ",
+		ItemType:  ArryItem,
+		Item:      []byte(`["first_char_exception", "special_char", "Keyword_exception"]`),
+		Desc:      "规范化命名",
+		WarnLevel: 0,
+		Status:    true,
+	})
+
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "AlterTableRule",
+		RuleName:  "HighRiskType",
+		Expr:      "Val in Item",
+		ItemType:  ArryItem,
+		Item:      []byte(`["drop_column"]`),
+		Desc:      "高危变更类型",
+		WarnLevel: 0,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "AlterTableRule",
+		RuleName:  "HighRiskPkAlterType",
+		Expr:      "Val in Item",
+		ItemType:  ArryItem,
+		Item:      []byte(`["add_column", "add_key", "change_column"]`),
+		Desc:      "主键高危变更类型",
+		WarnLevel: 0,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "AlterTableRule",
+		RuleName:  "AlterUseAfter",
+		Expr:      "Val != Item",
+		ItemType:  StringItem,
+		Item:      []byte(`""`),
+		Desc:      "变更表时使用了after",
+		WarnLevel: 0,
+		Status:    true,
+	})
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "AlterTableRule",
+		RuleName:  "AddColumnMixed",
+		Expr:      "( Item in Val ) && ( len(Val) > 1 )",
+		ItemType:  StringItem,
+		Item:      []byte(`"add_column"`),
+		Desc:      "加字段和其它alter table 类型混用,可能导致非在线加字段",
+		WarnLevel: 0,
+		Status:    true,
+	})
+
+	initRules = append(initRules, TbSyntaxRule{
+		GroupName: "DmlRule",
+		RuleName:  "DmlNotHasWhere",
+		Expr:      " Val != Item ",
+		ItemType:  BoolItem,
+		Item:      []byte(`true`),
+		Desc:      "没有使用WHERE或者LIMIT,可能会导致全表数据更改",
+		WarnLevel: 0,
+		Status:    true,
+	})
+
+	for _, rule := range initRules {
+		if err := CreateRule(&rule); err != nil {
+			logger.Error("初始化规则失败%s", err.Error())
+			return err
+		}
+	}
+	GetAllRule()
+	return
+}
+
+// CreateRule TODO
+func CreateRule(m *TbSyntaxRule) (err error) {
+	return DB.Clauses(clause.OnConflict{
+		DoNothing: true,
+	}).Create(m).Error
+}
+
+// GetAllRule TODO
+func GetAllRule() (rs []TbSyntaxRule, err error) {
+	err = DB.Find(&rs).Error
+	return
+}
+
+// GetRuleByName TODO
+func GetRuleByName(group, rulename string) (rs TbSyntaxRule, err error) {
+	err = DB.Where("group_name = ? and rule_name = ? ", group, rulename).First(&rs).Error
+	return
+}
+
+// GetItemVal TODO
+func GetItemVal(rule TbSyntaxRule) (val interface{}, err error) {
+	switch rule.ItemType {
+	case ArryItem:
+		var d []string
+		if err = json.Unmarshal(rule.Item, &d); err != nil {
+			logger.Error("umarshal failed %s", err.Error())
+			return
+		}
+		val = d
+	case StringItem:
+		var d string
+		if err = json.Unmarshal(rule.Item, &d); err != nil {
+			logger.Error("umarshal failed %s", err.Error())
+			return
+		}
+		val = d
+	case IntItem:
+		var d int
+		if err = json.Unmarshal(rule.Item, &d); err != nil {
+			logger.Error("umarshal failed %s", err.Error())
+			return
+		}
+		val = d
+	case BoolItem:
+		var d bool
+		if err = json.Unmarshal(rule.Item, &d); err != nil {
+			logger.Error("umarshal failed %s", err.Error())
+			return
+		}
+		val = d
+	default:
+		return nil, fmt.Errorf("unrecognizable type:%s", rule.ItemType)
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo.go b/dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo.go
new file mode 100644
index 0000000000..12158ed736
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo.go
@@ -0,0 +1,173 @@
+// Package bkrepo TODO
+package bkrepo
+
+import (
+	util "dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"path/filepath"
+	"strings"
+)
+
+/*
+	API: GET /generic/{project}/{repo}/{path}?download=true
+	API 名称: download
+	功能说明:
+
+	中文:下载通用制品文件
+	English:download generic file
+	请求体 此接口请求体为空
+*/
+
+// BkRepoClient TODO
+type BkRepoClient struct {
+	Client          *http.Client
+	BkRepoProject   string
+	BkRepoPubBucket string
+	BkRepoEndpoint  string
+	BkRepoUser      string
+	BkRepoPwd       string
+}
+
+// BkRepoRespone TODO
+type BkRepoRespone struct {
+	Code    int             `json:"code"`
+	Message string          `json:"message"`
+	Data    json.RawMessage `json:"data"`
+	TraceId string          `json:"traceId"`
+}
+
+// getBaseUrl TODO
+//
+//	@receiver b
+func (b *BkRepoClient) getBaseUrl() string {
+	u, err := url.Parse(b.BkRepoEndpoint)
+	if err != nil {
+		log.Fatal(err)
+	}
+	r, err := url.Parse(path.Join(u.Path, "generic", b.BkRepoProject, b.BkRepoPubBucket))
+	if err != nil {
+		log.Fatal(err)
+	}
+	uri := u.ResolveReference(r).String()
+	logger.Info("uri:%s", uri)
+	return uri
+}
+
+// Download 从制品库下载文件
+//
+//	@receiver b
+func (b *BkRepoClient) Download(sqlpath, filename, downloaddir string) (err error) {
+	uri := b.getBaseUrl() + path.Join("/", sqlpath, filename) + "?download=true"
+	logger.Info("The download url is %s", uri)
+	req, err := http.NewRequest(http.MethodGet, uri, nil)
+	if err != nil {
+		return err
+	}
+	if strings.Contains(filename, "..") {
+		return fmt.Errorf("%s there is a risk of path crossing", filename)
+	}
+	fileAbPath, err := filepath.Abs(path.Join(downloaddir, filename))
+	if err != nil {
+		return err
+	}
+	f, err := os.Create(fileAbPath)
+	if err != nil {
+		return err
+	}
+	req.SetBasicAuth(b.BkRepoUser, b.BkRepoPwd)
+	resp, err := b.Client.Do(req)
+	if err != nil {
+		return err
+	}
+	defer resp.Body.Close()
+	logger.Info("respone code %d", resp.StatusCode)
+	if resp.StatusCode != http.StatusOK {
+		bs, err := io.ReadAll(resp.Body)
+		if err != nil {
+			return err
+		}
+		return fmt.Errorf("respone code is %d,respone body is :%s", resp.StatusCode, string(bs))
+	}
+	size, err := io.Copy(f, resp.Body)
+	if err != nil {
+		return err
+	}
+	logger.GetLogger().Info(fmt.Sprintf("Downloaded a file %s with size %d", filename, size))
+	fileNodeInfo, err := b.QueryFileNodeInfo(sqlpath, filename)
+	if err != nil {
+		return err
+	}
+	logger.Info("node detail %v", fileNodeInfo)
+	if size != int64(fileNodeInfo.Size) {
+		bs, _ := os.ReadFile(fileAbPath)
+		return fmt.Errorf("body:%s,current file:%s source file size is inconsistent,current file is:%d,bkrepo file is:%d",
+			string(bs), filename, size,
+			fileNodeInfo.Size)
+	}
+
+	currentFileMd5, err := util.GetFileMd5(fileAbPath)
+	if err != nil {
+		return err
+	}
+	if currentFileMd5 != fileNodeInfo.Md5 {
+		return fmt.Errorf("current file:%s  source file md5 is inconsistent,current file is:%s,bkrepo file is:%s", filename,
+			currentFileMd5,
+			fileNodeInfo.Md5)
+	}
+	return nil
+}
+
+// FileNodeInfo TODO
+type FileNodeInfo struct {
+	Name     string            `json:"name"`
+	Sha256   string            `json:"sha256"`
+	Md5      string            `json:"md5"`
+	Size     int               `json:"size"`
+	Metadata map[string]string `json:"metadata"`
+}
+
+// QueryFileNodeInfo TODO
+// QueryMetaData 查询文件元数据信息
+//
+//	@receiver b
+func (b *BkRepoClient) QueryFileNodeInfo(filepath, filename string) (realData FileNodeInfo, err error) {
+	var baseResp BkRepoRespone
+	u, err := url.Parse(b.BkRepoEndpoint)
+	if err != nil {
+		return
+	}
+	r, err := url.Parse(path.Join("repository/api/node/detail/", b.BkRepoProject, b.BkRepoPubBucket, filepath, filename))
+	if err != nil {
+		logger.Error(err.Error())
+		return
+	}
+	uri := u.ResolveReference(r).String()
+	logger.Info("query node detail url %s", uri)
+	req, err := http.NewRequest(http.MethodGet, uri, nil)
+	if err != nil {
+		return FileNodeInfo{}, err
+	}
+	resp, err := b.Client.Do(req)
+	if err != nil {
+		return FileNodeInfo{}, err
+	}
+	defer resp.Body.Close()
+	if err = json.NewDecoder(resp.Body).Decode(&baseResp); err != nil {
+		return FileNodeInfo{}, err
+	}
+	if baseResp.Code != 0 {
+		return FileNodeInfo{}, fmt.Errorf("bkrepo Return Code: %d,Messgae:%s", baseResp.Code, baseResp.Message)
+	}
+	if err = json.Unmarshal([]byte(baseResp.Data), &realData); err != nil {
+		return FileNodeInfo{}, err
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo_test.go b/dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo_test.go
new file mode 100644
index 0000000000..2ef75606a8
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/pkg/bkrepo/bkrepo_test.go
@@ -0,0 +1,45 @@
+package bkrepo_test
+
+import (
+	"dbm-services/mysql/db-simulation/pkg/bkrepo"
+	"net/http"
+	"testing"
+)
+
+func TestDownload(t *testing.T) {
+	t.Log("start ...")
+	b := &bkrepo.BkRepoClient{
+		Client: &http.Client{
+			Transport: &http.Transport{},
+		},
+		BkRepoProject:   "",
+		BkRepoPubBucket: "",
+		BkRepoUser:      "",
+		BkRepoPwd:       "",
+		BkRepoEndpoint:  "",
+	}
+	err := b.Download("/dbbackup/latest", "dbbackup_2.2.48.tar.gz", "/data/")
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+	t.Log("ending ...")
+}
+
+func TestQueryMeta(t *testing.T) {
+	t.Log("start ...")
+	b := &bkrepo.BkRepoClient{
+		Client: &http.Client{
+			Transport: &http.Transport{},
+		},
+		BkRepoProject:   "",
+		BkRepoPubBucket: "",
+		BkRepoUser:      "",
+		BkRepoPwd:       "",
+		BkRepoEndpoint:  "",
+	}
+	d, err := b.QueryFileNodeInfo("/dbbackup/latest", "dbbackup_2.2.48.tar.gz")
+	if err != nil {
+		t.Fatalf(err.Error())
+	}
+	t.Log("ending ...", d)
+}
diff --git a/dbm-services/mysql/db-simulation/pkg/util/spider.go b/dbm-services/mysql/db-simulation/pkg/util/spider.go
new file mode 100644
index 0000000000..748932a1b2
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/pkg/util/spider.go
@@ -0,0 +1,150 @@
+package util
+
+import (
+	util "dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"errors"
+	"fmt"
+	"strings"
+)
+
+// ParseGetShardKeyForSpider TODO
+func ParseGetShardKeyForSpider(tableComment string) (string, error) {
+	pos := strings.Index(tableComment, "shard_key")
+	if pos == -1 {
+		return "", errors.New("not found shard_key")
+	}
+	pos += len("shard_key")
+
+	// ignore the space
+	for pos < len(tableComment) && (tableComment[pos] == ' ' || tableComment[pos] == '\t') {
+		pos++
+	}
+
+	// find the beginning "
+	if pos < len(tableComment) && tableComment[pos] == '"' {
+		pos++
+	} else {
+		return "", errors.New("parse error")
+	}
+
+	// find the ending "
+	end := strings.Index(tableComment[pos:], "\"")
+	if end == -1 {
+		return "", errors.New("parse error")
+	}
+
+	end += pos
+
+	if end-pos <= 0 {
+		return "", errors.New("parse error")
+	}
+
+	len := uint(end - pos)
+	keyBuf := make([]byte, len+1)
+	copy(keyBuf, tableComment[pos:end])
+	return string(keyBuf), nil
+}
+
+const (
+	// TCADMIN_PARSE_TABLE_COMMENT_OK TODO
+	TCADMIN_PARSE_TABLE_COMMENT_OK = 0
+	// TCADMIN_PARSE_TABLE_COMMENT_ERROR TODO
+	TCADMIN_PARSE_TABLE_COMMENT_ERROR = 1
+	// TCADMIN_PARSE_TABLE_COMMENT_UNSUPPORTED TODO
+	TCADMIN_PARSE_TABLE_COMMENT_UNSUPPORTED = 2
+	// TRIM TODO
+	TRIM = 0
+	// PARSE_KEY TODO
+	PARSE_KEY = 1
+	// PARSE_VALUE TODO
+	PARSE_VALUE = 2
+	// PARSE_DONE TODO
+	PARSE_DONE = 3
+)
+
+// ParseGetSpiderUserComment TODO
+func ParseGetSpiderUserComment(tableComment string) (ret int) {
+	bs := []byte(tableComment)
+	keywordBuf := []byte{}
+	valueBuf := []byte{}
+	stage := 0
+	get_key := 0
+	get_value := 0
+	pos := 0
+	for {
+		switch stage {
+		case TRIM:
+			if pos >= len(bs)-1 {
+				goto parseAllDone
+			}
+			if bs[pos] == 0x20 || bs[pos] == 0x09 {
+				pos++
+				continue
+			}
+			if get_key != 0 && get_value != 0 {
+				stage = PARSE_DONE
+			} else if get_key != 0 {
+				stage = PARSE_VALUE
+			} else {
+				stage = PARSE_KEY
+			}
+		case PARSE_KEY:
+			for {
+				keywordBuf = append(keywordBuf, bs[pos])
+				pos++
+				if bs[pos] == 0x20 || pos >= len(bs)-1 {
+					break
+				}
+
+			}
+			kw := string(keywordBuf)
+			if !validateCommentKeyWord(kw) {
+				logger.Info(" illegal keyword:%s", kw)
+				return TCADMIN_PARSE_TABLE_COMMENT_UNSUPPORTED
+			}
+			get_key = 1
+			stage = TRIM
+			keywordBuf = []byte{}
+		case PARSE_VALUE:
+			if bs[pos] != 0x22 {
+				return TCADMIN_PARSE_TABLE_COMMENT_ERROR
+			}
+			pos++
+			for {
+				if bs[pos] == 0x22 || pos >= len(bs)-1 {
+					break
+				}
+				valueBuf = append(valueBuf, bs[pos])
+				pos++
+			}
+			pos++
+			get_value = 1
+			stage = TRIM
+			valueBuf = []byte{}
+		case PARSE_DONE:
+			if pos >= len(bs)-1 {
+				return 0
+			}
+			fmt.Println(bs[pos])
+			if bs[pos] == 0x2c {
+				stage = TRIM
+				get_key = 0
+				get_value = 0
+				pos++
+			} else {
+				return TCADMIN_PARSE_TABLE_COMMENT_ERROR
+			}
+		default:
+			continue
+		}
+
+	}
+parseAllDone:
+	return ret
+}
+
+// validateCommentKeyWord TODO
+func validateCommentKeyWord(keyword string) bool {
+	return util.StringsHas([]string{"shard_count", "shard_func", "shard_type", "shard_key", "config_table"}, keyword)
+}
diff --git a/dbm-services/mysql/db-simulation/pkg/util/util.go b/dbm-services/mysql/db-simulation/pkg/util/util.go
new file mode 100644
index 0000000000..80d62b1ad1
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/pkg/util/util.go
@@ -0,0 +1,2 @@
+// Package util TODO
+package util
diff --git a/dbm-services/mysql/db-simulation/router/router.go b/dbm-services/mysql/db-simulation/router/router.go
new file mode 100644
index 0000000000..9a26a355d9
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/router/router.go
@@ -0,0 +1,42 @@
+// Package router TODO
+package router
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-simulation/app/service"
+	"dbm-services/mysql/db-simulation/handler"
+
+	"github.com/gin-gonic/gin"
+)
+
+// RegisterRouter reg routers
+func RegisterRouter(engine *gin.Engine) {
+	engine.POST("/app/debug", TurnOnDebug)
+	// mysql
+	g := engine.Group("/mysql")
+	g.POST("/simulation", handler.Dbsimulation)
+	g.POST("/task", handler.QueryTask)
+	// syntax
+	s := engine.Group("/syntax")
+	s.POST("/check/file", handler.SyntaxCheckFile)
+	s.POST("/check/sql", handler.SyntaxCheckSQL)
+	// rule
+	r := engine.Group("/rule")
+	r.POST("/manage", handler.ManageRule)
+	r.GET("/getall", handler.GetAllRule)
+	r.POST("/update", handler.UpdateRule)
+	// spider
+	sp := engine.Group("/spider")
+	sp.POST("/simulation", handler.SpiderClusterSimulation)
+	sp.POST("/create", handler.CreateTmpSpiderPodCluster)
+
+}
+
+// TurnOnDebug TODO
+func TurnOnDebug(r *gin.Context) {
+	logger.Info("current delpod: %v", service.DelPod)
+	service.DelPod = !service.DelPod
+	r.JSON(0, map[string]interface{}{
+		"delpod": service.DelPod,
+	})
+}
diff --git a/dbm-services/mysql/db-simulation/rule.yaml b/dbm-services/mysql/db-simulation/rule.yaml
new file mode 100644
index 0000000000..3e16a2f6ee
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/rule.yaml
@@ -0,0 +1,59 @@
+CommandRule:
+  HighRiskCommandRule:
+    expr: ' Val in Item '
+    desc: '高危命令'
+    item: [	"drop_table", "drop_index", "lock_tables", "analyze","rename_table", "drop_procedure", "drop_view", "drop_trigger","drop_function", "drop_server", "drop_event", "drop_compression_dictionary","optimize", "alter_tablespace"]
+    
+  BanCommandRule:
+    expr: ' Val in Item '
+    desc: '禁用命令'
+    ban: true
+    item: ["truncate", "revoke", "kill", "reset", "drop_db","drop_user", "grant","create_user", "revoke_all", "shutdown", "lock_tables_for_backup","reset", "purge", "lock_binlog_for_backup","lock_tables_for_backup","install_plugin", "uninstall_plugin","alter_user"]
+
+
+CreateTableRule:
+  SuggestBlobColumCount:
+      expr: ' Val >= Item '
+      item: 10
+      desc: "建议单表Blob字段不要过多"
+  SuggestEngine:
+      expr: ' not (Val contains Item) and ( len(Val) != 0 ) '
+      item: 'innodb'
+      desc: "建议使用Innodb表"
+  NeedPrimaryKey:
+      expr: ' Val == Item '
+      item: 1
+      desc: "建议包含主键"
+  DefinerRule:
+      expr: ' Val in Item '
+      desc: '不允许指定definer'
+      ban: true
+      item: ["create_function","create_trigger","create_event","create_procedure","create_view"]
+  NormalizedName:
+      expr: ' Val in Item '
+      desc: '规范化命名'
+      item: ["first_char_exception", "special_char", "Keyword_exception"]
+
+AlterTableRule:
+  HighRiskType:
+    expr: ' Val in Item '
+    item: ["drop_column", "drop_key","change_column","rename_table", "rename_key"]
+    desc: "高危变更类型"
+  HighRiskPkAlterType:
+    expr: ' Val in Item '
+    item: ["add_column", "add_key", "change_column"]
+    desc: "高危主键变更类型"
+  AlterUseAfter:
+    expr: ' Val != Item '
+    item: ""
+    desc: "变更表时使用了after"
+  AddColumnMixed:    
+    expr: ' ( Item in Val ) && ( len(Val) > 1 ) '
+    item:  'add_column'
+    desc: "加字段和其它alter table 类型混用,可能导致非在线加字段"
+
+DmlRule:
+  DmlNotHasWhere:
+ #  expr: ' Val != Item '
+    item:  true
+    desc: "没有使用WHERE或者LIMIT,可能会导致全表数据更改"
diff --git a/dbm-services/mysql/db-simulation/spider_rule.yaml b/dbm-services/mysql/db-simulation/spider_rule.yaml
new file mode 100644
index 0000000000..641e7edbfd
--- /dev/null
+++ b/dbm-services/mysql/db-simulation/spider_rule.yaml
@@ -0,0 +1,43 @@
+CommandRule:
+  HighRiskCommandRule:
+    expr: ' Val in Item '
+    desc: '高危命令'
+    item: [	"drop_table", "drop_index", "lock_tables", "analyze","rename_table", "drop_procedure", "drop_view", "drop_trigger","drop_function", "drop_server", "drop_event", "drop_compression_dictionary","optimize", "alter_tablespace"]
+    
+  BanCommandRule:
+    expr: ' Val in Item '
+    desc: '禁用命令'
+    ban: true
+    item: ["truncate", "revoke", "kill", "reset", "drop_db","drop_user", "grant","create_user", "revoke_all", "shutdown", "lock_tables_for_backup","reset", "purge", "lock_binlog_for_backup","lock_tables_for_backup","install_plugin", "uninstall_plugin","alter_user"]
+
+SpiderCreateTableRule:
+    ColChasetNotEqTbChaset:
+      expr: " Val != Item "
+      ban: true
+      item: true
+      desc: "create table 语句中列字符集定义与表字符集不一致"
+    CreateWithSelect:
+      expr: " Val != Item "
+      ban: true
+      item: true
+      desc: "UNSUPPORT SQL CREATE TABLE WITH SELECT"
+    CreateTbLike:
+      expr: " Val != Item "
+      ban: true
+      item:  true
+      desc: "UNSUPPORT SQL CREATE TABLE LIKE"
+    ShardKeyNotPk:
+      expr: " Val != Item "
+      ban: true
+      item:  true
+      desc: " CREATE TABLE shard_key 非主键(表存在主键)"
+    ShardKeyNotIndex:
+      expr: " Val != Item "
+      item: true
+      ban: true
+      desc: " CREATE TABLE shard_key 非索引键	"
+    IllegalComment:
+      expr: " Val != Item "
+      item: true
+      ban: true
+      desc: " 非法的CREATE TABlE的COMMENT "
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/.ci/codecc.yml b/dbm-services/mysql/db-tools/dbactuator/.ci/codecc.yml
new file mode 100644
index 0000000000..9be59c2114
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/.ci/codecc.yml
@@ -0,0 +1,29 @@
+version: v2.0
+resources:
+  repositories:
+    - repository: ci_templates/public/codecc
+      name: codecc
+on:
+  mr:
+    target-branches:  [ "*" ]
+stages:
+  - name: "代码检查"
+    check-out:
+      gates:
+        - template: commonGate.yml@codecc
+      timeout-hours: 10
+    jobs:
+      codecc:
+        name: "CodeCC代码检查"
+        runs-on:
+          pool-name: docker  #docker-on-devcloud、docker、local、agentless
+          container:
+            image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0
+        steps:
+          - checkout: self
+          - uses: CodeccCheckAtomDebug@4.*
+            name: 腾讯代码分析
+            with:
+                beAutoLang: true # 自动检测项目语言
+                checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置
+                toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1
diff --git a/dbm-services/mysql/db-tools/dbactuator/.ci/open_source_check.yml b/dbm-services/mysql/db-tools/dbactuator/.ci/open_source_check.yml
new file mode 100644
index 0000000000..f421f315f3
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/.ci/open_source_check.yml
@@ -0,0 +1,84 @@
+version: "v2.0"
+name: "开源检查"
+label: []
+variables: {}
+stages:
+- name: "开源检查"
+  label:
+  - "Build"
+  jobs:
+    job_AfK:
+      name: "构建环境-LINUX"
+      runs-on:
+        pool-name: "docker"
+        container:
+          image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0"
+        needs: {}
+      steps:
+      - checkout: self
+      - name: "敏感信息检查-部门RTX"
+        uses: "SensitiveRtxChecker@3.*"
+      - name: "腾讯代码分析(官方-代码分析工作组)"
+        uses: "CodeccCheckAtomDebug@4.*"
+        with:
+          beAutoLang: true
+          languages:
+          - "GOLANG"
+          checkerSetType: "communityOpenScan"
+          tools:
+          - "WOODPECKER_COMMITSCAN"
+          - "SCC"
+          - "PECKER_SECURITY"
+          - "SENSITIVE"
+          - "DUPC"
+          - "IP_CHECK"
+          - "WOODPECKER_SENSITIVE"
+          - "HORUSPY"
+          - "XCHECK"
+          - "CCN"
+          asyncTask: false
+          asyncTaskId: ""
+          scriptType: "SHELL"
+          script: |-
+            # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷
+            # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh
+            # 确保build.sh能够编译代码
+            # cd path/to/build.sh
+            # sh build.sh
+          languageRuleSetMap: {}
+          checkerSetEnvType: "prod"
+          multiPipelineMark: ""
+          rtxReceiverType: "1"
+          botWebhookUrl: ""
+          botRemindRange: "2"
+          botRemindSeverity: "7"
+          botRemaindTools: []
+          emailReceiverType: "1"
+          emailCCReceiverList: []
+          instantReportStatus: "2"
+          reportDate: []
+          reportTime: ""
+          reportTools: []
+          toolScanType: "1"
+          diffBranch: ""
+          byFile: false
+          mrCommentEnable: true
+          prohibitIgnore: false
+          newDefectJudgeFromDate: ""
+          transferAuthorList: []
+          path: []
+          customPath: []
+          scanTestSource: false
+          openScanPrj: false
+          openScanFilterEnable: false
+          issueSystem: "TAPD"
+          issueSubSystem: ""
+          issueResolvers: []
+          issueReceivers: []
+          issueFindByVersion: ""
+          maxIssue: 1000
+          issueAutoCommit: false
+  check-out:
+    gates:
+      - template: open_source_gate.yml
+    timeout-hours: 10
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/.ci/templates/open_source_gate.yml b/dbm-services/mysql/db-tools/dbactuator/.ci/templates/open_source_gate.yml
new file mode 100644
index 0000000000..34ff9b0cb8
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/.ci/templates/open_source_gate.yml
@@ -0,0 +1,26 @@
+parameters:
+- name: receivers
+  type: array
+  default: [ "${{ ci.actor }}" ]
+ 
+gates:
+- name: open-source-gate
+  rule:
+    - "CodeccCheckAtomDebug.all_risk <= 0"
+    - "CodeccCheckAtomDebug.high_med_new_issue <= 0"
+    - "CodeccCheckAtomDebug.ccn_new_max_value <= 40"
+    - "CodeccCheckAtomDebug.sensitive_defect <= 0"
+    - "CodeccCheckAtomDebug.dupc_average <= 15"
+    - "CodeccCheckAtomDebug.ccn_average <= 3"
+    - "CodeccCheckAtomDebug.ccn_new_defect <= 0"
+    - "CodeccCheckAtomDebug.ccn_funcmax <= 20"
+    - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0"
+    - "CodeccCheckAtomDebug.horuspy_all_defect <= 0"
+    - "CodeccCheckAtomDebug.go_serious_defect <= 0"
+    - "CodeccCheckAtomDebug.go_all_defect <= 100"
+  notify-on-fail:
+  - type: wework-message
+    receivers: ${{ parameters.receivers }}
+  continue-on-fail:
+    gatekeepers:
+    - "${{ ci.actor }}"
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/.gitignore b/dbm-services/mysql/db-tools/dbactuator/.gitignore
new file mode 100644
index 0000000000..1a34a6ea74
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/.gitignore
@@ -0,0 +1,27 @@
+!.gitkeep
+# Binaries for programs and plugins
+*.exe
+*.exe~ 
+*.dll
+*.so
+*.dylib
+# Test binary, built with `go test -c`
+*.test
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+# Dependency directories (remove the comment below to include it)
+vendor/
+# Go workspace file
+go.work
+configs/*
+log/
+build/
+conf/
+*exe
+*.log
+.idea/
+.DS_Store
+sync_test.sh
+.vscode/
+scripts/upload_media.sh
+scripts/upload.sh
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/.golangci.yml b/dbm-services/mysql/db-tools/dbactuator/.golangci.yml
new file mode 100644
index 0000000000..74b121ed6f
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/.golangci.yml
@@ -0,0 +1,57 @@
+linters-settings:
+  lll:
+    line-length: 120  
+  funlen:
+    lines: 80
+    statements: 80
+  gocritic:
+    enabled-checks:
+      - nestingReduce
+      - commentFormatting
+      
+run:
+  # default concurrency is a available CPU number
+  concurrency: 4
+  # timeout for analysis, e.g. 30s, 5m, default is 1m
+  timeout: 2m
+  # exit code when at least one issue was found, default is 1
+  issues-exit-code: 1
+  # include test files or not, default is true
+  tests: false
+  # default is true. Enables skipping of directories:
+  #   vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
+  skip-dirs-use-default: true
+
+  skip-files:
+    - ".*/mock/.*.go"
+    - ".*testing.go"
+
+linters:
+  # enable-all: true
+  # disable-all: true
+  disable:
+    - errcheck
+  enable:
+    - nilerr
+    - nakedret
+    - lll
+    - gofmt
+    - gocritic
+    - gocyclo
+    - whitespace
+    - sqlclosecheck
+    - deadcode
+    - govet
+    - bodyclose
+    - staticcheck
+    # - errorlint
+    # - varcheck
+    # - typecheck
+    # - nestif
+    # - gofumpt
+    # - godox
+    # - wsl
+    # - funlen
+    # - golint
+    # - cyclop
+  fast: false
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/LICENSE b/dbm-services/mysql/db-tools/dbactuator/LICENSE
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/mysql/db-tools/dbactuator/Makefile b/dbm-services/mysql/db-tools/dbactuator/Makefile
new file mode 100644
index 0000000000..dcca994de4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/Makefile
@@ -0,0 +1,35 @@
+SHELL := /bin/bash
+BASE_DIR = $(shell pwd)
+VERSION = 0.0.1
+APPNAME = dbactuator
+GOOS ?= linux
+BUILD_FLAG = " -X main.version=${VERSION} -X main.buildstamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.githash=`git rev-parse HEAD` "
+BUILD_EXTERNAL_FLAG = " -X main.external=ON -X main.version=${VERSION} -X main.buildstamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.githash=`git rev-parse HEAD` "
+BUILD_MINI_FLAG = " -s -w -X main.version=${VERSION} -X main.buildstamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.githash=`git rev-parse HEAD` "
+
+.PHONY: all build clean
+
+build:
+	cd ${BASE_DIR}/cmd && CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build  -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD}   -ldflags ${BUILD_FLAG}  -o $(BASE_DIR)/build/$(APPNAME) -v .
+
+external :
+	cd ${BASE_DIR}/cmd && CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build  -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD}   -ldflags ${BUILD_EXTERNAL_FLAG}  -o $(BASE_DIR)/build/$(APPNAME) -v .
+
+mini:
+	cd ${BASE_DIR}/cmd && CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build  -gcflags=-trimpath=${PWD} -asmflags=-trimpath=${PWD}   -ldflags ${BUILD_MINI_FLAG}  -o $(BASE_DIR)/build/$(APPNAME) -v .
+	# need install upx 
+	upx $(BASE_DIR)/build/$(APPNAME)
+	
+clean:
+	cd ${BASE_DIR}/build && rm -rf ${APPNAME}
+
+rotatebinlog:
+	cd ${BASE_DIR}/cmd/rotatebinlog && CGO_ENABLED=0 GOOS=${GOOS} GOARCH=amd64 go build  -ldflags ${BUILD_FLAG}  -o $(BASE_DIR)/build/rotatebinlog -v .
+
+gotool:
+	@-gofmt -w .
+
+help:
+	@echo "make - compile go source"
+	@echo "make gotool - run gofmt"
+	@echo "make clean - do some clean job"
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/README.md b/dbm-services/mysql/db-tools/dbactuator/README.md
new file mode 100644
index 0000000000..3509b1e9e5
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/README.md
@@ -0,0 +1,170 @@
+# git.tencent.com/dbs/bk-dbactuator
+
+## 开发规范
+
+> go版本 >= **1.9**  
+
+> 配置代码格式化 & import规范,请配置 **goimports** `vscode: "go.formatTool": "goimports"`  
+
+> 使用 **golangci-lint** 进行代码规范检查   
+
+> 配置自动化注释**gonote**     
+  ```
+    // vscode 配置
+      "filewatcher.commands": [
+        {
+            "match": ".*\\.go$",
+            "isAsync": false,
+            "cmd": "cd '${fileDirname}' && gonote -w '${file}' > /tmp/gonote.log 2>&1",
+            "event": "onFileChange"
+        }
+    ]
+  ```
+  > 其他规范参照 **Go开发规范**  
+
+
+
+
+## dbactuator 
+
+数据库操作指令集合,实现MySQL、Proxy、监控、备份 部署,MySQL、Proxy 变更等原子任务操作,由上层Pipeline 编排组合不同的指令,来完成不同的场景化的任务
+```
+Db Operation Command Line Interface
+Version: 0.0.1 
+Githash: 212617a717c3a3a968eb0c7d3a2c4ea2bc21abc2
+Buildstamp:2022-05-27_06:42:56AM
+
+Usage:
+  dbactuator [flags]
+  dbactuator [command]
+
+Available Commands:
+  completion  Generate the autocompletion script for the specified shell
+  help        Help about any command
+  mysql       MySQL Operation Command Line Interface
+  proxy       MySQL Proxy Operation Command Line Interface
+  sysinit     Exec sysinit_mysql.sh,Init mysql default os user,password
+
+Flags:
+  -h, --help             help for dbactuator
+  -n, --node_id string   节点id
+  -p, --payload string   command payload 
+  -r, --rollback         回滚任务
+  -x, --show-payload     show payload for man
+  -u, --uid string       单据id
+
+Use "dbactuator [command] --help" for more information about a command.
+```
+
+## 文档
+
+### subcommand 开发
+
+#### 给 payload 添加说明和 example (swagger)
+##### **查看注释**  
+```
+./dbactuator mysql find-local-backup --helper
+```
+
+##### **怎么增加注释到 --helper**  
+在 subcommand 定义上添加注释,示例:
+```
+// FindLocalBackupCommand godoc
+//
+// @Summary      查找本地备份
+// @Description  查找本地备份
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.FindLocalBackupParam  true  "short description"
+// @Success      200  {object}  mysql.FindLocalBackupResp
+// @Router       /mysql/find-local-backup [post]
+func FindLocalBackupCommand() *cobra.Command {
+...
+```
+
+- `@Param` 把中间的 `mysql.FindLocalBackupParam` 替换成 subcommand 的参数 struct 定义,且 struct 需要能被当前包引用
+ swagger 使用 `@param` 来解析参数,所以不要与其它函数注释冲突,否则可能 build doc 失败,output example 见下文
+- `@Router` 格式 `/cmd/subcmd [post]`,保留后面的`[post]`
+- 如果没有输出则去掉 `@Success` 这行,output example 见下文
+
+**param struct 的字段注释示例:**
+```
+// field 枚举说明. 字段注释可以在字段上一行,或者字段行后
+Field1 int `json:"field1" enums:"0,1,2"` // 枚举类型
+Field2 string `json:"field2" validate:"required" example:"test"`  // 必填项,example内容
+Field3 int `json:"field2" valildate:"gte:999,lte:0" default:"2"` // 最大值最小值,默认值
+```
+
+##### **怎么增加 example**  
+在 component 的 struct 增加 `Example() interface()` 方法,示例:
+```
+func (f *FindLocalBackupComp) Example() interface{} {
+	comp := FindLocalBackupComp{
+		Params: FindLocalBackupParam{
+			BackupDirs:  []string{"/data/dbbak", "/data1/dbbak"},
+			TgtInstance: &common.InstanceExample,
+			FileServer:  false,
+		},
+	}
+	return comp
+}
+```
+填充你需要的示例字段,能序列化成 json 格式。
+
+然后在 subcommand 定义里面完善 `Example` 字段,示例:
+```
+cmd := &cobra.Command{
+		Use:   "find-local-backup",
+		Example: fmt.Sprintf(`dbactuator mysql find-local-backup %s %s`,
+			subcmd.CmdBaseExampleStr, common.ToPrettyJson(act.Service.Example())),
+		...
+	}
+```
+
+如果有输出 output 示例需求,可以参照 `mysql restore-dr` 写一个 `ExampleOutput()`。
+
+##### **生成注释**
+需要先从 https://github.com/swaggo/swag 下载 `swag` 命令(推荐 v1.8.12,低版本可能不适应go1.19)。
+```
+# 需要想先让 swagger 生成注释 docs/swagger.json
+# 需要关注注释是否编译成功
+./build_doc.sh
+
+# 再编译打包进二进制
+make
+```
+或者一步 `./build.sh`
+
+目前为了避免代码冲突,.gitignore 忽略了 docs/swagger.json, docs/swagger.yaml
+
+### payload 中字段从环境变量读取
+如果有一些认证或者密码信息,不方便在 payload 里提供,可以设置环境变量。
+只需要在 struct 上设置 tag `env` 为环境变量名即可,其它 validate required 属性依然会生效
+
+但是目前对于嵌套 struct 类型,nested struct 不能是 nil point,要空初始化之后才能 Parse 进去。
+
+示例:
+```
+EXPORT GENERAL_ACCOUNT_admin_user=ADMIN
+EXPORT GENERAL_ACCOUNT_admin_pwd=xxx
+
+或者 IBS_INFO_key=xxxxx ./dbactuator download ibs-recover ...
+```
+
+
+#### 格式化
+-  代码都必须用 `gofmt` 格式化。(使用不用ide的同学注意调整)
+
+#### import 规范
+- 使用 `goimports` 自动格式化引入的包名,import 规范原则上以 `goimports` 规则为准。
+
+#### 包命名
+- 保持 `package` 的名字和目录一致。
+- 包名应该为小写单词,不要使用下划线或者混合大小写,使用多级目录来划分层级。
+- 不要使用无意义的包名,如:`util`、`common`、`misc`、`global`。`package`名字应该追求清晰且越来越收敛,符合‘单一职责’原则。而不是像`common`一样,什么都能往里面放,越来越膨胀,让依赖关系变得复杂,不利于阅读、复用、重构。注意,`xx/util/encryption`这样的包名是允许的。
+
+#### 文件命名
+- 文件名应该采用小写,并且使用下划线分割各个单词。
+
+#### 变量命名
+- 变量名必须遵循驼峰式,首字母根据访问控制决定使用大写或小写。
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/build.sh b/dbm-services/mysql/db-tools/dbactuator/build.sh
new file mode 100755
index 0000000000..499fb68a77
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/build.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+workDir=`pwd`
+
+# unit test
+cd  $workDir
+chmod +x *.sh
+./build_doc.sh
+make
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/build_doc.sh b/dbm-services/mysql/db-tools/dbactuator/build_doc.sh
new file mode 100755
index 0000000000..78bea6292e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/build_doc.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+# https://github.com/swaggo/swag
+# --parseDependency to avoid: ParseComment ... cannot find type definition: json.RawMessage
+swag init -g cmd/cmd.go  --o docs/ --ot json,yaml  --parseDependency
+if [ $? -gt 0 ];then
+  echo "generate swagger api docs failed"
+  exit 1
+fi
+tree docs/
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/cmd/cmd.go b/dbm-services/mysql/db-tools/dbactuator/cmd/cmd.go
new file mode 100644
index 0000000000..285f7a7f44
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/cmd/cmd.go
@@ -0,0 +1,204 @@
+// Package main 总入口
+/*
+ * @Description: dbactuator 入口函数,主要实现数据侧一些操作的命令,比如安装mysql 等等一系列的操作集合
+ * @Useage: dbactuator --help
+ */
+package main
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/templates"
+	"fmt"
+	"os"
+	"runtime/debug"
+	"strings"
+	"time"
+
+	"github.com/spf13/cobra"
+)
+
+const (
+	// CMD actor入口
+	CMD = "dbactuator"
+)
+
+var (
+	buildstamp, githash, version, external string
+)
+
+// @title           dbactuator API
+// @version         0.0.1
+// @description     This is a dbactuator command collection.
+// @termsOfService  http://swagger.io/terms/
+// @Schemes        http
+// @contact.name   API Support
+// @contact.url    http://www.swagger.io/support
+// @contact.email  support@swagger.io
+
+// @license.name  Apache 2.0
+// @license.url   http://www.apache.org/licenses/LICENSE-2.0.html
+
+// @host            ./dbactuator
+// @BasePath  /
+
+// main godoc
+func main() {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Println(err)
+			logger.Error("panic goroutine inner error!%v;%s", err, string(debug.Stack()))
+			os.Exit(1)
+			return
+		}
+	}()
+	if err := NewDbActuatorCommand().Execute(); err != nil {
+		fmt.Fprint(os.Stderr, err.Error())
+		logger.Error("NewDbActuatorCommand run failed:%s", err.Error())
+		os.Exit(1)
+	}
+}
+
+// NewDbActuatorCommand 新建命令
+func NewDbActuatorCommand() *cobra.Command {
+	cmds := &cobra.Command{
+		Use: CMD,
+		Short: fmt.Sprintf(
+			`Db Operation Command Line Interface
+Version: %s 
+Githash: %s
+External: %s 
+Buildstamp:%s`, version, githash, strings.ToUpper(external), buildstamp,
+		),
+		Args: cobra.OnlyValidArgs,
+		PersistentPreRun: func(cmd *cobra.Command, args []string) {
+			if !cmd.IsAvailableCommand() {
+				runHelp(cmd, args)
+				return
+			}
+			subcmd.SetLogger(cmd, subcmd.GBaseOptions)
+			if subcmd.PrintSubCommandHelper(cmd, subcmd.GBaseOptions) {
+				runHelp(cmd, args)
+			}
+			// 定时输出标准心跳输出
+			startHeartbeat(10 * time.Second)
+		},
+		Run:        runHelp,
+		SuggestFor: []string{CMD},
+	}
+	groups := templates.CommandGroups{
+		{
+			Message: "mysql operation sets",
+			Commands: []*cobra.Command{
+				mysqlcmd.NewMysqlCommand(),
+			},
+		},
+		{
+			Message: "sysinit operation sets",
+			Commands: []*cobra.Command{
+				sysinitcmd.NewSysInitCommand(),
+			},
+		},
+		{
+			Message: "crontab operation sets",
+			Commands: []*cobra.Command{
+				crontabcmd.ClearCrontabCommand(),
+			},
+		},
+		{
+			Message: "mysql-proxy sets",
+			Commands: []*cobra.Command{
+				proxycmd.NewMysqlProxyCommand(),
+			},
+		},
+		{
+			Message: "common operation sets",
+			Commands: []*cobra.Command{
+				commoncmd.NewCommonCommand(),
+			},
+		},
+		{
+			Message: "download operation sets",
+			Commands: []*cobra.Command{
+				commoncmd.NewDownloadCommand(),
+			},
+		},
+		{
+			Message: "spider operation sets",
+			Commands: []*cobra.Command{
+				spidercmd.NewSpiderCommand(),
+			},
+		},
+		{
+			Message: "spiderctl operation sets",
+			Commands: []*cobra.Command{
+				spiderctlcmd.NewSpiderCtlCommand(),
+			},
+		},
+	}
+	groups.Add(cmds)
+	// 标志可以是 "persistent" 的,这意味着该标志将可用于分配给它的命令以及该命令下的每个命令。对于全局标志,将标志分配为根上的持久标志。
+	// 默认每个subcomand 都默认带这些参数
+	cmds.PersistentFlags().StringVarP(
+		&subcmd.GBaseOptions.Payload, "payload", "p", subcmd.GBaseOptions.Payload,
+		"command payload ",
+	)
+	cmds.PersistentFlags().StringVarP(
+		&subcmd.GBaseOptions.PayloadFormat, "payload-format", "m",
+		subcmd.GBaseOptions.PayloadFormat, "command payload format, default base64, value_allowed: base64|raw",
+	)
+	cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.Uid, "uid", "U", subcmd.GBaseOptions.Uid, "bill id")
+	cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.RootId, "root_id", "R", subcmd.GBaseOptions.NodeId,
+		"process id")
+	cmds.PersistentFlags().StringVarP(&subcmd.GBaseOptions.NodeId, "node_id", "N", subcmd.GBaseOptions.NodeId, "node id")
+	cmds.PersistentFlags().StringVarP(
+		&subcmd.GBaseOptions.VersionId, "version_id", "V", subcmd.GBaseOptions.NodeId,
+		"run version id",
+	)
+	cmds.PersistentFlags().BoolVarP(
+		&subcmd.GBaseOptions.RollBack,
+		"rollback",
+		"r",
+		subcmd.GBaseOptions.RollBack,
+		"rollback task",
+	)
+	cmds.PersistentFlags().BoolVarP(
+		&subcmd.GBaseOptions.Helper,
+		"helper",
+		"E",
+		subcmd.GBaseOptions.Helper,
+		"payload parameter description",
+	)
+	subcmd.GBaseOptions.External = external
+	// @todo add --daemon mode to serve http to call subcmd/components
+	return cmds
+}
+
+func runHelp(cmd *cobra.Command, args []string) {
+	cmd.Help()
+	os.Exit(1)
+}
+
+// startHeartbeat 定時输出日志
+func startHeartbeat(period time.Duration) {
+	go func() {
+		ticker := time.NewTicker(period)
+		defer ticker.Stop()
+		var hearbeatTime string
+		for {
+			select {
+			case <-ticker.C:
+				hearbeatTime = time.Now().Local().Format(cst.TIMELAYOUT)
+				fmt.Fprintf(os.Stdin, "["+hearbeatTime+"]hearbeating ...\n")
+			}
+		}
+	}()
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/.gitkeep b/dbm-services/mysql/db-tools/dbactuator/docs/.gitkeep
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/dbactuator.md b/dbm-services/mysql/db-tools/dbactuator/docs/dbactuator.md
new file mode 100644
index 0000000000..3fa75ae070
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/docs/dbactuator.md
@@ -0,0 +1,30 @@
+# dbactuator 
+
+数据库操作指令集合,实现MySQL、Proxy、监控、备份 部署,MySQL、Proxy 变更等原子任务操作,由上层Pipeline 编排组合不同的指令,来完成不同的场景化的任务
+```
+Db Operation Command Line Interface
+Version: 0.0.1 
+Githash: 212617a717c3a3a968eb0c7d3a2c4ea2bc21abc2
+Buildstamp:2022-05-27_06:42:56AM
+
+Usage:
+  dbactuator [flags]
+  dbactuator [command]
+
+Available Commands:
+  completion  Generate the autocompletion script for the specified shell
+  help        Help about any command
+  mysql       MySQL Operation Command Line Interface
+  proxy       MySQL Proxy Operation Command Line Interface
+  sysinit     Exec sysinit_mysql.sh,Init mysql default os user,password
+
+Flags:
+  -h, --help             help for dbactuator
+  -n, --node_id string   节点id
+  -p, --payload string   command payload 
+  -r, --rollback         回滚任务
+  -x, --show-payload     show payload for man
+  -u, --uid string       单据id
+
+Use "dbactuator [command] --help" for more information about a command.
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/docs.go b/dbm-services/mysql/db-tools/dbactuator/docs/docs.go
new file mode 100644
index 0000000000..67384e3939
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/docs/docs.go
@@ -0,0 +1,2 @@
+// Package docs TODO
+package docs
diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/embed_docs.go b/dbm-services/mysql/db-tools/dbactuator/docs/embed_docs.go
new file mode 100644
index 0000000000..f7c1683e24
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/docs/embed_docs.go
@@ -0,0 +1,8 @@
+package docs
+
+import "embed"
+
+// SwaggerDocs TODO
+//
+//go:embed swagger.json
+var SwaggerDocs embed.FS
diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/swagger.json b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.json
new file mode 100644
index 0000000000..dc9d750987
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.json
@@ -0,0 +1,3170 @@
+{
+    "schemes": [
+        "http"
+    ],
+    "swagger": "2.0",
+    "info": {
+        "description": "This is a dbactuator command collection.",
+        "title": "dbactuator API",
+        "termsOfService": "http://swagger.io/terms/",
+        "contact": {
+            "name": "API Support",
+            "url": "http://www.swagger.io/support",
+            "email": "support@swagger.io"
+        },
+        "license": {
+            "name": "Apache 2.0",
+            "url": "http://www.apache.org/licenses/LICENSE-2.0.html"
+        },
+        "version": "0.0.1"
+    },
+    "host": "./dbactuator",
+    "basePath": "/",
+    "paths": {
+        "/common/file-server": {
+            "post": {
+                "description": "通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份\n在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "common"
+                ],
+                "summary": "简单文件服务",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServerComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/common/rm-file": {
+            "post": {
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "common"
+                ],
+                "summary": "限速删除大文件",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/internal_subcmd_commoncmd.RMLargeFileParam"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/download/http": {
+            "post": {
+                "description": "支持限速、basicAuth 认证. 一般配合 common fileserver 使用\n# server1\n./dbactuator common file-server \\\n--payload-format raw \\\n--payload '{\"extend\":{\"bind_address\":\":8082\",\"mount_path\":\"/data/dbbak\",\"user\":\"xiaog\",\"password\":\"xxxx\",\"proc_maxidle_duration\":\"60s\"}}'\n\n# server2\ncurl -u 'xiaog:xxxx' 'http://server1:8082/datadbbak8082/dbactuator' -o dbactuator.bin --limit-rate 10k",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "download"
+                ],
+                "summary": "http下载文件",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/download/ibs-query": {
+            "post": {
+                "description": "filename 会进行模糊匹配,返回 task_id 用于下载",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "download"
+                ],
+                "summary": "从 ieg 备份系统查询文件",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryComp"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryResult"
+                        }
+                    }
+                }
+            }
+        },
+        "/download/ibs-recover": {
+            "post": {
+                "description": "提供 task_id,从 ieg 备份系统下载文件\ntask_files_wild: 模糊搜索文件并下载, task_files: 精确文件查询并下载\ntask_files_wild, task_files 二选一\n启用 skip_local_exists=true 时,如果目标目录已存在要下载的文件,会自动跳过",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "download"
+                ],
+                "summary": "从 ieg 备份系统下载文件",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverComp"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverTask"
+                        }
+                    }
+                }
+            }
+        },
+        "/download/scp": {
+            "post": {
+                "description": "支持限速",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "download"
+                ],
+                "summary": "scp下载文件",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFScpParam"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/change-master": {
+            "post": {
+                "description": "执行 change master to",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "建立主从关系",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/clean-mysql": {
+            "post": {
+                "description": "清空本地实例,保留系统库",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "清空实例,高危",
+                "parameters": [
+                    {
+                        "description": "description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/deploy": {
+            "post": {
+                "description": "部署 mysql 实例说明",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "部署 mysql 实例",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/deploy-dbbackup": {
+            "post": {
+                "description": "部署GO版本备份程序",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "部署备份程序",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/find-local-backup": {
+            "post": {
+                "description": "查找本地备份",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "查找本地备份",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupParam"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupResp"
+                        }
+                    }
+                }
+            }
+        },
+        "/mysql/flashback-binlog": {
+            "post": {
+                "description": "通过 `mysqlbinlog --flashback xxx | mysql` 导入 binlog",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "导入 binlog",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.FlashbackComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/grant-repl": {
+            "post": {
+                "description": "在目标机器新建 repl 账号",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "建立复制账号",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/init-cluster-routing": {
+            "post": {
+                "description": "初始化tendb cluster 集群的路由关系说明",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "spiderctl"
+                ],
+                "summary": "初始化tendb cluster 集群的路由关系",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/install-checksum": {
+            "post": {
+                "description": "安装mysql校验",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "安装mysql校验",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/install-dbatoolkit": {
+            "post": {
+                "description": "部署 /home/mysql/dba_toolkit,覆盖",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "部署DBA工具箱",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/mycnf-change": {
+            "post": {
+                "description": "修改mysql配置",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "修改mysql配置",
+                "parameters": [
+                    {
+                        "description": "description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/mycnf-clone": {
+            "post": {
+                "description": "用于 slave 重建或迁移,保持新实例与 my.cnf 实例关键参数相同的场景\n默认 clone 参数:",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "从源实例克隆 my.cnf 部分参数到目标实例",
+                "parameters": [
+                    {
+                        "description": "description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/parse-binlog-time": {
+            "post": {
+                "description": "获取 binlog FileDescriptionFormat 和 RotateEvent 事件",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "获取 binlog 的开始和结束时间",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/pt-table-checksum": {
+            "post": {
+                "description": "数据校验",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "数据校验",
+                "parameters": [
+                    {
+                        "description": "description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/recover-binlog": {
+            "post": {
+                "description": "通过 `mysqlbinlog xxx | mysql` 导入 binlog",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "导入 binlog",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlogComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/restore-dr": {
+            "post": {
+                "description": "物理备份、逻辑备份恢复",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "备份恢复",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreDRComp"
+                        }
+                    }
+                ],
+                "responses": {
+                    "200": {
+                        "description": "OK",
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_util_mysqlutil.ChangeMaster"
+                        }
+                    }
+                }
+            }
+        },
+        "/mysql/semantic-check": {
+            "post": {
+                "description": "运行语义检查",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "运行语义检查",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticCheckComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/mysql/semantic-dumpschema": {
+            "post": {
+                "description": "运行语义检查",
+                "consumes": [
+                    "application/json"
+                ],
+                "produces": [
+                    "application/json"
+                ],
+                "tags": [
+                    "mysql"
+                ],
+                "summary": "运行语义检查",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticDumpSchemaComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/spdierctl/deploy": {
+            "post": {
+                "description": "部署 spider ctl 实例说明",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "spiderctl"
+                ],
+                "summary": "部署 spider ctl 实例",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        },
+        "/spider/deploy": {
+            "post": {
+                "description": "部署 spider 实例说明",
+                "consumes": [
+                    "application/json"
+                ],
+                "tags": [
+                    "spider"
+                ],
+                "summary": "部署 spider 实例",
+                "parameters": [
+                    {
+                        "description": "short description",
+                        "name": "body",
+                        "in": "body",
+                        "required": true,
+                        "schema": {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp"
+                        }
+                    }
+                ],
+                "responses": {}
+            }
+        }
+    },
+    "definitions": {
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam": {
+            "type": "object",
+            "properties": {
+                "runtime_account": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeAccountParam"
+                },
+                "runtime_extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeExtend"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeAccountParam": {
+            "type": "object",
+            "properties": {
+                "admin_pwd": {
+                    "description": "mysql admin 密码,环境变量 GENERAL_ACCOUNT_admin_pwd",
+                    "type": "string"
+                },
+                "admin_user": {
+                    "description": "mysql admin 账户,环境变量 GENERAL_ACCOUNT_admin_user",
+                    "type": "string"
+                },
+                "backup_pwd": {
+                    "description": "dbbackup pwd",
+                    "type": "string"
+                },
+                "backup_user": {
+                    "description": "dbbackup user",
+                    "type": "string"
+                },
+                "monitor_access_all_pwd": {
+                    "description": "mysql monitor@% 密码",
+                    "type": "string"
+                },
+                "monitor_access_all_user": {
+                    "description": "mysql monitor@%",
+                    "type": "string"
+                },
+                "monitor_pwd": {
+                    "description": "mysql monitor 密码,环境变量 GENERAL_ACCOUNT_monitor_pwd",
+                    "type": "string"
+                },
+                "monitor_user": {
+                    "description": "mysql monitor 账户,环境变量 GENERAL_ACCOUNT_monitor_user",
+                    "type": "string"
+                },
+                "proxy_admin_pwd": {
+                    "description": "proxy admin pwd",
+                    "type": "string"
+                },
+                "proxy_admin_user": {
+                    "description": "proxy admin user",
+                    "type": "string"
+                },
+                "repl_pwd": {
+                    "description": "repl pwd, 环境变量 GENERAL_ACCOUNT_repl_pwd",
+                    "type": "string"
+                },
+                "repl_user": {
+                    "description": "repl user, 环境变量 GENERAL_ACCOUNT_repl_user",
+                    "type": "string"
+                },
+                "tdbctl_pwd": {
+                    "type": "string"
+                },
+                "tdbctl_user": {
+                    "type": "string"
+                },
+                "yw_pwd": {
+                    "description": "yw pwd",
+                    "type": "string"
+                },
+                "yw_user": {
+                    "description": "yw user",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeExtend": {
+            "type": "object",
+            "properties": {
+                "mysql_sys_users": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam": {
+            "type": "object",
+            "required": [
+                "file_list",
+                "path_tgt",
+                "server"
+            ],
+            "properties": {
+                "auth_pass": {
+                    "description": "http url basic auth pass",
+                    "type": "string"
+                },
+                "auth_user": {
+                    "description": "http url basic auth user",
+                    "type": "string"
+                },
+                "bk_biz_id": {
+                    "type": "integer"
+                },
+                "bwlimit_mb": {
+                    "description": "单文件下载限速,单位 MB/s",
+                    "type": "integer"
+                },
+                "curl_options": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "curl_path": {
+                    "description": "curl 命令路径,默认留空. 目前只用于测试 url",
+                    "type": "string"
+                },
+                "file_list": {
+                    "description": "下载哪些文件",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "max_concurrency": {
+                    "description": "并发下载数",
+                    "type": "integer"
+                },
+                "path_tgt": {
+                    "description": "文件存放到本机哪个目录",
+                    "type": "string"
+                },
+                "server": {
+                    "description": "下载 url",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFScpParam": {
+            "type": "object",
+            "required": [
+                "file_src",
+                "file_tgt"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "type": "integer"
+                },
+                "bwlimit_mb": {
+                    "description": "单文件下载限速,单位 MB/s",
+                    "type": "integer"
+                },
+                "file_src": {
+                    "description": "下载源",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileSrc"
+                        }
+                    ]
+                },
+                "file_tgt": {
+                    "description": "下载目标",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileTgt"
+                        }
+                    ]
+                },
+                "max_concurrency": {
+                    "description": "并发下载数",
+                    "type": "integer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileSrc": {
+            "type": "object",
+            "required": [
+                "file_list",
+                "path",
+                "ssh_host",
+                "ssh_port",
+                "ssh_user"
+            ],
+            "properties": {
+                "file_list": {
+                    "description": "源文件名列表,相对上面的 path",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "match": {
+                    "type": "string"
+                },
+                "path": {
+                    "description": "源文件所在目录",
+                    "type": "string"
+                },
+                "ssh_host": {
+                    "type": "string"
+                },
+                "ssh_pass": {
+                    "type": "string"
+                },
+                "ssh_port": {
+                    "type": "string"
+                },
+                "ssh_user": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileTgt": {
+            "type": "object",
+            "required": [
+                "path"
+            ],
+            "properties": {
+                "path": {
+                    "description": "文件下载目标目录",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSBaseInfo": {
+            "type": "object",
+            "required": [
+                "key",
+                "sys_id",
+                "url"
+            ],
+            "properties": {
+                "key": {
+                    "description": "16位字串,由备份系统分配,可从环境变量获取 IBS_INFO__key",
+                    "type": "string"
+                },
+                "sys_id": {
+                    "description": "application标识,亦即哪个系统需要访问本接口,可从环境变量获取 IBS_INFO_sys_id",
+                    "type": "string"
+                },
+                "ticket": {
+                    "description": "OA验证的ticket,一个长串,通常附加在访问内网应用的URL上,主要用来验证用户身份,可以留空",
+                    "type": "string"
+                },
+                "url": {
+                    "description": "ieg 备份系统 api url 地址,会在后面拼接 /query /recover 后缀进行请求\n可从环境变量获取 IBS_INFO_url",
+                    "type": "string",
+                    "example": "http://127.0.0.1/backupApi"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryForRecover": {
+            "type": "object",
+            "properties": {
+                "begin_date": {
+                    "description": "查询文件起始时间,备份系统以 file_last_mtime 为条件",
+                    "type": "string"
+                },
+                "end_date": {
+                    "description": "哪一天提交,结束时间,与begin_date形成一个时间范围,建议begin_date与end_date形成的时间范围不要超过3天",
+                    "type": "string"
+                },
+                "source_ip": {
+                    "description": "来源IP,即提交备份任务的机器IP",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryParam": {
+            "type": "object",
+            "required": [
+                "begin_date",
+                "end_date",
+                "filename",
+                "ibs_info",
+                "source_ip"
+            ],
+            "properties": {
+                "begin_date": {
+                    "description": "哪一天提交,起始时间",
+                    "type": "string"
+                },
+                "end_date": {
+                    "description": "哪一天提交,结束时间,与begin_date形成一个时间范围,建议begin_date与end_date形成的时间范围不要超过3天",
+                    "type": "string"
+                },
+                "filename": {
+                    "description": "文件名",
+                    "type": "string"
+                },
+                "ibs_info": {
+                    "description": "ieg backup system url and auth params",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSBaseInfo"
+                        }
+                    ]
+                },
+                "source_ip": {
+                    "description": "来源IP,即提交备份任务的机器IP",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryResult": {
+            "type": "object",
+            "properties": {
+                "bkstif": {
+                    "description": "备份状态信息, 'done, success', 'Fail: bad md5' 等",
+                    "type": "string"
+                },
+                "createTime": {
+                    "description": "非备份系统字段,全备(截取文件名中的字段),binlog 打开文件读取",
+                    "type": "string"
+                },
+                "expire_time": {
+                    "type": "string"
+                },
+                "expired": {
+                    "type": "string"
+                },
+                "file_last_mtime": {
+                    "description": "文件最后修改时间",
+                    "type": "string"
+                },
+                "file_name": {
+                    "type": "string"
+                },
+                "file_tag": {
+                    "type": "string"
+                },
+                "md5": {
+                    "type": "string"
+                },
+                "path": {
+                    "description": "非备份系统字段",
+                    "type": "string"
+                },
+                "size": {
+                    "description": "文件大小",
+                    "type": "string"
+                },
+                "source_ip": {
+                    "description": "上报该备份任务的IP",
+                    "type": "string"
+                },
+                "source_port": {
+                    "description": "非备份系统字段",
+                    "type": "string"
+                },
+                "status": {
+                    "description": "文件状态",
+                    "type": "string"
+                },
+                "task_id": {
+                    "description": "任务ID,用于下载",
+                    "type": "string"
+                },
+                "uptime": {
+                    "description": "备份任务上报时间",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverParam": {
+            "type": "object",
+            "required": [
+                "dest_ip",
+                "diretory",
+                "ibs_info",
+                "login_user"
+            ],
+            "properties": {
+                "dest_ip": {
+                    "description": "目标IP,文件恢复到哪一台机器上的",
+                    "type": "string",
+                    "example": "127.0.0.1"
+                },
+                "diretory": {
+                    "description": "diretory 是备份系统参数错误拼写",
+                    "type": "string",
+                    "example": "/data/dbbak"
+                },
+                "ibs_info": {
+                    "description": "ieg backup system url and auth params",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSBaseInfo"
+                        }
+                    ]
+                },
+                "ibs_query": {
+                    "description": "根据文件名下载,或者判断是否跳过下载时,需要提供 ibs_query 参数用于查询",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryForRecover"
+                        }
+                    ]
+                },
+                "login_passwd": {
+                    "description": "登录 dest_ip 的用户名的密码, ieg 传统scp 方式下载才需要。如果是 cos 下载则不需要",
+                    "type": "string"
+                },
+                "login_user": {
+                    "description": "登录 dest_ip 的用户名,下载后的文件属组是该用户",
+                    "type": "string"
+                },
+                "reason": {
+                    "description": "恢复原因(备注用途)",
+                    "type": "string"
+                },
+                "skip_local_exists": {
+                    "description": "如果本地目标目录已经存在对应文件,是否保留(即跳过下载). 默认 false",
+                    "type": "boolean",
+                    "example": false
+                },
+                "task_files": {
+                    "description": "如果是精确文件名下载,用 task_files。提供需要下载的文件列表,提供 task_id 或者完整的 file_name 即可\n如果顺便提供了 size 信息则不用请求备份系统获取大小 来决定文件是否需要重新下载",
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesExact"
+                    }
+                },
+                "task_files_wild": {
+                    "description": "如果是模糊匹配搜索并下载,用 task_files_wild",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesWild"
+                        }
+                    ]
+                },
+                "taskid_list": {
+                    "description": "taskid 列表,,逗号分隔。会根据 task_files 里的信息,追加到这里。这里一般不传值,在 task_files 里提供 task_id 或者 file_name",
+                    "type": "string",
+                    "example": "10000,100001"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverTask": {
+            "type": "object",
+            "properties": {
+                "file_last_mtime": {
+                    "description": "文件最后修改时间",
+                    "type": "string"
+                },
+                "file_name": {
+                    "type": "string"
+                },
+                "file_tag": {
+                    "type": "string"
+                },
+                "md5": {
+                    "type": "string"
+                },
+                "size": {
+                    "description": "文件大小",
+                    "type": "string"
+                },
+                "source_ip": {
+                    "description": "上报该备份任务的IP",
+                    "type": "string"
+                },
+                "status": {
+                    "description": "文件状态",
+                    "type": "string"
+                },
+                "task_id": {
+                    "description": "任务ID,用于下载",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesExact": {
+            "type": "object",
+            "properties": {
+                "file_name": {
+                    "type": "string"
+                },
+                "md5": {
+                    "type": "string"
+                },
+                "size": {
+                    "description": "文件大小",
+                    "type": "string"
+                },
+                "task_id": {
+                    "description": "任务ID,用于下载",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesWild": {
+            "type": "object",
+            "properties": {
+                "file_tag": {
+                    "type": "string"
+                },
+                "name_regex": {
+                    "description": "在搜索的结果里面,应用该正则进行过滤",
+                    "type": "string"
+                },
+                "name_search": {
+                    "description": "搜索的模糊条件,不用带 *",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServer": {
+            "type": "object",
+            "required": [
+                "auth_user",
+                "bind_address",
+                "mount_path"
+            ],
+            "properties": {
+                "acls": {
+                    "description": "访问来源限制,从前往后匹配。格式 `[\"allow 127.0.0.1/32\", \"deny all\"]`",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    },
+                    "example": [
+                        "allow all"
+                    ]
+                },
+                "auth_pass": {
+                    "description": "http basic auth pass,为空时会随机生成密码",
+                    "type": "string"
+                },
+                "auth_user": {
+                    "description": "http basic auth user",
+                    "type": "string"
+                },
+                "bind_address": {
+                    "description": "http file-server 监听地址. 不提供端口,会在 12000-19999 之间随机选择一个端口,不提供 ip 时默认 localhost",
+                    "type": "string"
+                },
+                "enable_tls": {
+                    "description": "暂不支持",
+                    "type": "boolean"
+                },
+                "max_connections": {
+                    "description": "限制最大连接数,超过需要等待. 为 0 时表示不限制",
+                    "type": "integer"
+                },
+                "mount_path": {
+                    "description": "将本地哪个目录通过 http 分享",
+                    "type": "string"
+                },
+                "path_prefix": {
+                    "description": "path_prefix 用在生成 url 时的路径前缀. 可留空",
+                    "type": "string"
+                },
+                "print_download": {
+                    "description": "输出 download http 的信息,方便使用",
+                    "type": "boolean"
+                },
+                "proc_maxidle_duration": {
+                    "description": "超过最大空闲时间,自动退出. 示例 3600s, 60m, 1h",
+                    "type": "string",
+                    "example": "1h"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServerComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BackupOptions": {
+            "type": "object",
+            "required": [
+                "BackupType",
+                "CrontabTime",
+                "Master"
+            ],
+            "properties": {
+                "BackupType": {
+                    "type": "string"
+                },
+                "CrontabTime": {
+                    "type": "string"
+                },
+                "Logical": {
+                    "type": "object",
+                    "properties": {
+                        "ExcludeDatabases": {
+                            "description": "\"mysql,test,db_infobase,information_schema,performance_schema,sys\"",
+                            "type": "string"
+                        },
+                        "ExcludeTables": {
+                            "type": "string"
+                        }
+                    }
+                },
+                "Master": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.LogicBackupDataOption"
+                },
+                "Slave": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.LogicBackupDataOption"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeParam": {
+            "type": "object",
+            "required": [
+                "binlog_dir",
+                "binlog_files"
+            ],
+            "properties": {
+                "binlog_dir": {
+                    "type": "string"
+                },
+                "binlog_files": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "format": {
+                    "type": "string",
+                    "enum": [
+                        "",
+                        "json",
+                        "dump"
+                    ]
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationParam"
+                },
+                "general": {
+                    "description": "本地使用 ADMIN, change master 使用 repl",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                        }
+                    ]
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationParam": {
+            "type": "object",
+            "required": [
+                "bin_file",
+                "bin_position",
+                "host",
+                "master_host",
+                "master_port",
+                "port"
+            ],
+            "properties": {
+                "bin_file": {
+                    "description": "binlog 文件名称",
+                    "type": "string"
+                },
+                "bin_position": {
+                    "description": "binlog 位点信息",
+                    "type": "integer",
+                    "minimum": 0
+                },
+                "force": {
+                    "description": "如果当前实例存在主从关系是否直接reset slave后,强制change master",
+                    "type": "boolean",
+                    "example": false
+                },
+                "host": {
+                    "description": "具体操作内容需要操作的参数",
+                    "type": "string"
+                },
+                "is_gtid": {
+                    "description": "是否启动GID方式进行建立主从",
+                    "type": "boolean"
+                },
+                "master_host": {
+                    "description": "change master to 主库ip",
+                    "type": "string"
+                },
+                "master_port": {
+                    "description": "change master to 主库端口",
+                    "type": "integer",
+                    "maximum": 65535,
+                    "minimum": 3306
+                },
+                "max_tolerate_delay": {
+                    "description": "最大容忍延迟, 当 主从延迟 小于 该值, 认为建立主从关系成功. 不传或者为 0 时,表示不检查",
+                    "type": "integer"
+                },
+                "not_start_io_thread": {
+                    "description": "不启动 io_thread。默认false 表示启动 io_thread",
+                    "type": "boolean",
+                    "example": false
+                },
+                "not_start_sql_thread": {
+                    "description": "不启动 sql_thread。默认false 表示启动 sql_thread",
+                    "type": "boolean",
+                    "example": false
+                },
+                "port": {
+                    "description": "当前实例的端口",
+                    "type": "integer",
+                    "maximum": 65535,
+                    "minimum": 3306
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlParam": {
+            "type": "object",
+            "required": [
+                "tgt_instance"
+            ],
+            "properties": {
+                "check_interval_sec": {
+                    "type": "integer"
+                },
+                "drop_database": {
+                    "description": "是否执行 drop database,这里是确认行为. 如果 false 则只把 drop 命令打印到输出",
+                    "type": "boolean"
+                },
+                "force": {
+                    "description": "当实例不空闲时是否强制清空",
+                    "type": "boolean"
+                },
+                "reset_slave": {
+                    "description": "是否执行 reset slave all",
+                    "type": "boolean"
+                },
+                "restart": {
+                    "description": "drop_database 之后是否重启实例",
+                    "type": "boolean"
+                },
+                "stop_slave": {
+                    "description": "是否执行 stop slave",
+                    "type": "boolean"
+                },
+                "tgt_instance": {
+                    "description": "清空目标实例",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance"
+                        }
+                    ]
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ConfItemOp": {
+            "type": "object",
+            "required": [
+                "op_type"
+            ],
+            "properties": {
+                "conf_value": {
+                    "description": "ConfName  string `json:\"conf_name\" validate:\"required\"`",
+                    "type": "string"
+                },
+                "need_restart": {
+                    "type": "boolean"
+                },
+                "op_type": {
+                    "description": "配置项修改动作,允许值 `upsert`,`remove`",
+                    "type": "string",
+                    "enum": [
+                        "upsert",
+                        "remove"
+                    ]
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DBHAAccount": {
+            "type": "object",
+            "required": [
+                "pwd",
+                "user"
+            ],
+            "properties": {
+                "access_hosts": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "pwd": {
+                    "type": "string"
+                },
+                "user": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DumpSchemaParam": {
+            "type": "object",
+            "required": [
+                "charset",
+                "host",
+                "port"
+            ],
+            "properties": {
+                "backup_dir": {
+                    "type": "string"
+                },
+                "backup_file_name": {
+                    "type": "string"
+                },
+                "bk_cloud_id": {
+                    "description": "所在的云区域",
+                    "type": "integer"
+                },
+                "charset": {
+                    "description": "字符集参数",
+                    "type": "string"
+                },
+                "db_cloud_token": {
+                    "description": "云区域token",
+                    "type": "string"
+                },
+                "fileserver": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FileServer"
+                },
+                "host": {
+                    "description": "当前实例的主机地址",
+                    "type": "string"
+                },
+                "port": {
+                    "description": "当前实例的端口",
+                    "type": "integer",
+                    "minimum": 3306
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ExcuteSQLFileObj": {
+            "type": "object",
+            "properties": {
+                "dbnames": {
+                    "description": "需要变更的DBNames,支持模糊匹配",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "ignore_dbnames": {
+                    "description": "忽略的,需要排除变更的dbName,支持模糊匹配",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "sql_file": {
+                    "description": "变更文件名称",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FileServer": {
+            "type": "object",
+            "properties": {
+                "bucket": {
+                    "description": "目标bucket",
+                    "type": "string"
+                },
+                "password": {
+                    "description": "制品库 password",
+                    "type": "string"
+                },
+                "project": {
+                    "description": "制品库 project",
+                    "type": "string"
+                },
+                "upload_path": {
+                    "description": "上传路径",
+                    "type": "string"
+                },
+                "url": {
+                    "description": "制品库地址",
+                    "type": "string"
+                },
+                "username": {
+                    "description": "制品库 username",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupParam": {
+            "type": "object",
+            "required": [
+                "backup_dirs",
+                "tgt_instance"
+            ],
+            "properties": {
+                "backup_dirs": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "cluster_id": {
+                    "description": "指定查询哪个 cluster_id 的备份,如果不指定可能查询到其它非法的备份",
+                    "type": "integer"
+                },
+                "file_server": {
+                    "type": "boolean"
+                },
+                "tgt_instance": {
+                    "description": "查找哪个实例的备份",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance"
+                        }
+                    ]
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupResp": {
+            "type": "object",
+            "properties": {
+                "backups": {
+                    "description": "backups key 是 .info 文件",
+                    "type": "object",
+                    "additionalProperties": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.LocalBackupObj"
+                    }
+                },
+                "latest": {
+                    "description": "记录上面 backups 最近的一次备份",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitParam": {
+            "type": "object",
+            "required": [
+                "pkg",
+                "pkg_md5"
+            ],
+            "properties": {
+                "exec_user": {
+                    "description": "发起执行actor的用户,仅用于审计",
+                    "type": "string"
+                },
+                "pkg": {
+                    "description": "安装包名",
+                    "type": "string"
+                },
+                "pkg_md5": {
+                    "description": "安装包MD5",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumParam": {
+            "type": "object",
+            "required": [
+                "pkg",
+                "pkg_md5"
+            ],
+            "properties": {
+                "api_url": {
+                    "type": "string"
+                },
+                "exec_user": {
+                    "type": "string"
+                },
+                "instances_info": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstanceInfo"
+                    }
+                },
+                "pkg": {
+                    "description": "安装包名",
+                    "type": "string"
+                },
+                "pkg_md5": {
+                    "description": "安装包MD5",
+                    "type": "string"
+                },
+                "schedule": {
+                    "type": "string"
+                },
+                "system_dbs": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLParams"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                },
+                "timeZone": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLParams": {
+            "type": "object",
+            "required": [
+                "charset",
+                "host",
+                "mycnf_configs",
+                "mysql_version",
+                "pkg",
+                "pkg_md5",
+                "ports"
+            ],
+            "properties": {
+                "allowDiskFileSystemTypes": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "charset": {
+                    "description": "字符集参数",
+                    "type": "string"
+                },
+                "dbha_account": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DBHAAccount"
+                },
+                "host": {
+                    "type": "string"
+                },
+                "inst_mem": {
+                    "description": "安装实例的内存大小,可以不指定,会自动计算",
+                    "type": "integer"
+                },
+                "mycnf_configs": {
+                    "description": "map[port]my.cnf",
+                    "type": "array",
+                    "items": {
+                        "type": "integer"
+                    }
+                },
+                "mysql_version": {
+                    "description": "MySQLVerion 只需5.6 5.7 这样的大版本号",
+                    "type": "string"
+                },
+                "pkg": {
+                    "description": "安装包名",
+                    "type": "string"
+                },
+                "pkg_md5": {
+                    "description": "安装包MD5",
+                    "type": "string"
+                },
+                "ports": {
+                    "description": "Ports",
+                    "type": "array",
+                    "items": {
+                        "type": "integer"
+                    }
+                },
+                "spider_auto_incr_mode_map": {
+                    "type": "array",
+                    "items": {
+                        "type": "integer"
+                    }
+                },
+                "super_account": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SuperAccount"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupComp": {
+            "type": "object",
+            "properties": {
+                "generalParam": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                },
+                "params": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupParam": {
+            "type": "object",
+            "required": [
+                "bk_biz_id",
+                "configs",
+                "host",
+                "options",
+                "pkg",
+                "pkg_md5",
+                "ports",
+                "role"
+            ],
+            "properties": {
+                "bk_biz_id": {
+                    "description": "bkbizid",
+                    "type": "string"
+                },
+                "bk_cloud_id": {
+                    "description": "bk_cloud_id",
+                    "type": "string"
+                },
+                "cluster_address": {
+                    "description": "cluster addresss",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "cluster_id": {
+                    "description": "cluster id",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "integer"
+                    }
+                },
+                "configs": {
+                    "description": "模板配置",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.Cnf"
+                        }
+                    ]
+                },
+                "exec_user": {
+                    "description": "执行Job的用户",
+                    "type": "string"
+                },
+                "host": {
+                    "description": "当前实例的主机地址",
+                    "type": "string"
+                },
+                "options": {
+                    "description": "选项参数配置",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BackupOptions"
+                        }
+                    ]
+                },
+                "pkg": {
+                    "description": "安装包名",
+                    "type": "string"
+                },
+                "pkg_md5": {
+                    "description": "安装包MD5",
+                    "type": "string"
+                },
+                "ports": {
+                    "description": "被监控机器的上所有需要监控的端口",
+                    "type": "array",
+                    "items": {
+                        "type": "integer"
+                    }
+                },
+                "role": {
+                    "description": "当前主机安装的mysqld的角色",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstanceInfo": {
+            "type": "object",
+            "properties": {
+                "bk_biz_id": {
+                    "type": "integer"
+                },
+                "bk_instance_id": {
+                    "description": "0 被视为空, 不序列化",
+                    "type": "integer"
+                },
+                "cluster_id": {
+                    "type": "integer"
+                },
+                "immute_domain": {
+                    "type": "string"
+                },
+                "ip": {
+                    "type": "string"
+                },
+                "port": {
+                    "type": "integer"
+                },
+                "role": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.LocalBackupObj": {
+            "type": "object",
+            "properties": {
+                "backup_dir": {
+                    "description": "备份所在目录",
+                    "type": "string"
+                },
+                "backup_id": {
+                    "type": "string"
+                },
+                "backup_time": {
+                    "description": "备份时间,目前是备份开始时间",
+                    "type": "string"
+                },
+                "backup_type": {
+                    "type": "string"
+                },
+                "bill_id": {
+                    "type": "string"
+                },
+                "bk_biz_id": {
+                    "type": "string"
+                },
+                "cluster_id": {
+                    "type": "integer"
+                },
+                "data_schema_grant": {
+                    "type": "string"
+                },
+                "db_role": {
+                    "type": "string"
+                },
+                "file_list": {
+                    "description": "InfoFile   common.InfoFileDetail `json:\"info_file\"`\n备份文件列表",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "index_file": {
+                    "type": "string"
+                },
+                "inst_host": {
+                    "description": "备份所属 host",
+                    "type": "string"
+                },
+                "inst_port": {
+                    "description": "备份所属 port",
+                    "type": "integer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeParam": {
+            "type": "object",
+            "required": [
+                "items",
+                "persistent",
+                "restart",
+                "tgt_instance"
+            ],
+            "properties": {
+                "items": {
+                    "type": "object",
+                    "additionalProperties": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ConfItemOp"
+                    }
+                },
+                "persistent": {
+                    "description": "是否持久化到 my.cnf 文件,-1: 不持久化,1: 持久化, 2: 仅持久化但不修改运行时",
+                    "type": "integer",
+                    "enum": [
+                        -1,
+                        1,
+                        2
+                    ]
+                },
+                "restart": {
+                    "description": "指定是否 允许重启, -1:不重启, 1: 重启, 2:根据 items need_restart 自动判断是否重启",
+                    "type": "integer",
+                    "enum": [
+                        -1,
+                        1,
+                        2
+                    ]
+                },
+                "tgt_instance": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneParam": {
+            "type": "object",
+            "required": [
+                "persistent",
+                "restart",
+                "src_instance",
+                "tgt_instance"
+            ],
+            "properties": {
+                "items": {
+                    "description": "需要克隆哪些变量, 考虑到不同版本参数不一样,这里不要求指定一定存在; 只修改 mysqld 区。即失败忽略\n有些参数是 readonly 的,只会保存到 my.cnf 中,如果与运行值不一样需要用户重启\n默认值见 MycnfCloneItemsDefault",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "persistent": {
+                    "description": "是否持久化到 my.cnf 文件,0: 不持久化,1: 持久化, 2: 仅持久化但不修改运行时",
+                    "type": "integer",
+                    "enum": [
+                        0,
+                        1,
+                        2
+                    ],
+                    "example": 1
+                },
+                "restart": {
+                    "description": "指定是否 允许重启, 0:不重启, 1: 重启, 2:根据 items need_restart 自动判断是否重启",
+                    "type": "integer",
+                    "enum": [
+                        0,
+                        1,
+                        2
+                    ],
+                    "example": 2
+                },
+                "src_instance": {
+                    "description": "参数克隆,获取源实例,可以提供 repl 账号权限",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject"
+                        }
+                    ]
+                },
+                "tgt_instance": {
+                    "description": "应用到本地目标实例,需要有 ADMIN 权限",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject"
+                        }
+                    ]
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumParam": {
+            "type": "object",
+            "properties": {
+                "bk_biz_id": {
+                    "description": "业务 id",
+                    "type": "integer"
+                },
+                "cluster_id": {
+                    "description": "集群 id",
+                    "type": "integer"
+                },
+                "db_patterns": {
+                    "description": "库表过滤选项",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "ignore_dbs": {
+                    "description": "库表过滤选项",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "ignore_tables": {
+                    "description": "库表过滤选项",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "immute_domain": {
+                    "description": "集群域名",
+                    "type": "string"
+                },
+                "inner_role": {
+                    "description": "执行校验的 db inner role, 应该是[master, repeater]",
+                    "type": "string"
+                },
+                "master_access_slave_password": {
+                    "description": "从 db 访问 slave 的密码",
+                    "type": "string"
+                },
+                "master_access_slave_user": {
+                    "description": "从 db 访问 slave 的用户名",
+                    "type": "string"
+                },
+                "master_ip": {
+                    "description": "执行校验的 db ip",
+                    "type": "string"
+                },
+                "master_port": {
+                    "description": "执行校验的 db port",
+                    "type": "integer"
+                },
+                "replicate_table": {
+                    "description": "结果表, 带库前缀",
+                    "type": "string"
+                },
+                "runtime_hour": {
+                    "description": "校验运行时长",
+                    "type": "integer"
+                },
+                "slaves": {
+                    "description": "slave 列表",
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SlaveInfo"
+                    }
+                },
+                "system_dbs": {
+                    "description": "系统表",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "table_patterns": {
+                    "description": "库表过滤选项",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticCheckComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SenmanticCheckParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticDumpSchemaComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DumpSchemaParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SenmanticCheckParam": {
+            "type": "object",
+            "required": [
+                "host",
+                "port",
+                "remote_host",
+                "remote_port",
+                "schemafile"
+            ],
+            "properties": {
+                "execute_objects": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ExcuteSQLFileObj"
+                    }
+                },
+                "host": {
+                    "description": "语义检查实例的主机地址",
+                    "type": "string"
+                },
+                "port": {
+                    "description": "语义检查实例的端口",
+                    "type": "integer",
+                    "minimum": 3306
+                },
+                "remote_host": {
+                    "description": "用于获取目标实例的字符集,默认存储引擎",
+                    "type": "string"
+                },
+                "remote_port": {
+                    "description": "获取表结构的源实例Port",
+                    "type": "integer",
+                    "minimum": 3306
+                },
+                "schemafile": {
+                    "description": "表结构文件",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SlaveInfo": {
+            "type": "object",
+            "properties": {
+                "id": {
+                    "description": "slave id",
+                    "type": "integer"
+                },
+                "ip": {
+                    "description": "slave ip",
+                    "type": "string"
+                },
+                "port": {
+                    "description": "slave port",
+                    "type": "integer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SuperAccount": {
+            "type": "object",
+            "required": [
+                "pwd",
+                "user"
+            ],
+            "properties": {
+                "access_hosts": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "pwd": {
+                    "type": "string"
+                },
+                "user": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.Cnf": {
+            "type": "object",
+            "required": [
+                "BackupClient",
+                "LogicalBackup",
+                "Public"
+            ],
+            "properties": {
+                "BackupClient": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfBackupClient"
+                },
+                "LogicalBackup": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfLogicalBackup"
+                },
+                "PhysicalBackup": {
+                    "description": "LogicalLoad          CnfLogicalLoad          `json:\"LogicalLoad\" ini:\"LogicalLoad\"`",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfPhysicalBackup"
+                        }
+                    ]
+                },
+                "Public": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfShared"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfBackupClient": {
+            "type": "object",
+            "properties": {
+                "doChecksum": {
+                    "type": "string"
+                },
+                "fileTag": {
+                    "type": "string"
+                },
+                "remoteFileSystem": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfLogicalBackup": {
+            "type": "object",
+            "properties": {
+                "chunkFilesize": {
+                    "description": "ChunkFilesize Split tables into chunks of this output file size. This value is in MB",
+                    "type": "integer"
+                },
+                "defaultsFile": {
+                    "type": "string"
+                },
+                "disableCompress": {
+                    "type": "boolean"
+                },
+                "extraOpt": {
+                    "description": "ExtraOpt other mydumper options string to be appended",
+                    "type": "string"
+                },
+                "flushRetryCount": {
+                    "type": "integer"
+                },
+                "regex": {
+                    "type": "string"
+                },
+                "threads": {
+                    "type": "integer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfPhysicalBackup": {
+            "type": "object",
+            "required": [
+                "defaultsFile"
+            ],
+            "properties": {
+                "defaultsFile": {
+                    "type": "string"
+                },
+                "extraOpt": {
+                    "description": "ExtraOpt other xtrabackup options string to be appended",
+                    "type": "string"
+                },
+                "splitSpeed": {
+                    "description": "SplitSpeed tar split limit in MB/s, default 300",
+                    "type": "integer"
+                },
+                "threads": {
+                    "description": "Threads –parallel to copy files",
+                    "type": "integer"
+                },
+                "throttle": {
+                    "description": "Throttle limits the number of chunks copied per second. The chunk size is 10 MB, 0 means no limit",
+                    "type": "integer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfShared": {
+            "type": "object",
+            "required": [
+                "backupDir",
+                "backupTimeOut",
+                "tarSizeThreshold"
+            ],
+            "properties": {
+                "backupDir": {
+                    "type": "string"
+                },
+                "backupId": {
+                    "type": "string"
+                },
+                "backupTimeOut": {
+                    "type": "string"
+                },
+                "backupType": {
+                    "type": "string"
+                },
+                "billId": {
+                    "type": "string"
+                },
+                "bkBizId": {
+                    "type": "string"
+                },
+                "bkCloudId": {
+                    "type": "string"
+                },
+                "clusterAddress": {
+                    "type": "string"
+                },
+                "clusterId": {
+                    "type": "string"
+                },
+                "dataSchemaGrant": {
+                    "type": "string"
+                },
+                "iolimitMBPerSec": {
+                    "description": "IOLimitMBPerSec tar or split default io limit, mb/s. 0 means no limit",
+                    "type": "integer"
+                },
+                "mysqlCharset": {
+                    "type": "string"
+                },
+                "mysqlHost": {
+                    "type": "string"
+                },
+                "mysqlPasswd": {
+                    "type": "string"
+                },
+                "mysqlPort": {
+                    "type": "string"
+                },
+                "mysqlRole": {
+                    "type": "string"
+                },
+                "mysqlUser": {
+                    "type": "string"
+                },
+                "oldFileLeftDay": {
+                    "type": "string"
+                },
+                "resultReportPath": {
+                    "type": "string"
+                },
+                "statusReportPath": {
+                    "type": "string"
+                },
+                "tarSizeThreshold": {
+                    "description": "TarSizeThreshold tar file will be split to this package size. MB",
+                    "type": "integer",
+                    "minimum": 128
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.LogicBackupDataOption": {
+            "type": "object",
+            "properties": {
+                "DataSchemaGrant": {
+                    "description": "\"grant,schema,data\"",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplComp": {
+            "type": "object",
+            "properties": {
+                "db": {
+                    "description": "本地db链接",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.DbWorker"
+                        }
+                    ]
+                },
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplParam": {
+            "type": "object",
+            "properties": {
+                "host": {
+                    "description": "当前实例的主机地址",
+                    "type": "string"
+                },
+                "port": {
+                    "description": "当前实例的端口",
+                    "type": "integer"
+                },
+                "repl_hosts": {
+                    "description": "slave host",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLBinlogUtil": {
+            "type": "object",
+            "properties": {
+                "databases": {
+                    "description": "row event 解析指定 databases",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "databases_ignore": {
+                    "description": "row event 解析指定 忽略 databases",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "filter_statement_match_error": {
+                    "description": "匹配字符串成功,则解析 binlog 报错",
+                    "type": "string"
+                },
+                "filter_statement_match_ignore": {
+                    "description": "匹配字符串成功,则忽略语句,加入注释中",
+                    "type": "string"
+                },
+                "filter_statement_match_ignore_force": {
+                    "description": "匹配字符串成功,强制忽略语句,加入注释中。当与 filter_statement_match_error 都匹配时,ignore_force会优先生效\n默认 infodba_schema",
+                    "type": "string"
+                },
+                "flashback": {
+                    "description": "是否启用 flashback",
+                    "type": "boolean"
+                },
+                "idempotent_mode": {
+                    "description": "是否开启幂等模式, mysql --slave-exec-mode=idempotent or mysqlbinlog --idempotent",
+                    "type": "boolean"
+                },
+                "mysql_client_opt": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLClientOpt"
+                },
+                "not_write_binlog": {
+                    "description": "导入时是否记录 binlog, mysql sql_log_bin=0 or mysqlbinlog --disable-log-bin. true表示不写",
+                    "type": "boolean"
+                },
+                "query_event_handler": {
+                    "description": "query event 默认处理策略。keep:保留解析出的query event 语句, ignore:注释(丢弃)该 query event, error:认为是不接受的语句,报错\n默认 keep",
+                    "type": "string",
+                    "enum": [
+                        "keep",
+                        "ignore",
+                        "safe",
+                        "error"
+                    ]
+                },
+                "rewrite_db": {
+                    "description": "--rewrite_db=\"db1-\u003exx_db1,db2-\u003exx_db2\"",
+                    "type": "string"
+                },
+                "start_pos": {
+                    "description": "--start-position",
+                    "type": "integer"
+                },
+                "start_time": {
+                    "description": "--start-datetime",
+                    "type": "string"
+                },
+                "stop_pos": {
+                    "description": "--stop-position",
+                    "type": "integer"
+                },
+                "stop_time": {
+                    "description": "--stop-datetime",
+                    "type": "string"
+                },
+                "tables": {
+                    "description": "row event 解析指定 tables",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "tables_ignore": {
+                    "description": "row event 解析指定 忽略 tables",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLClientOpt": {
+            "type": "object",
+            "properties": {
+                "binary_mode": {
+                    "description": "是否启用 --binary-mode",
+                    "type": "boolean"
+                },
+                "max_allowed_packet": {
+                    "type": "integer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlog": {
+            "type": "object",
+            "required": [
+                "binlog_dir",
+                "binlog_files",
+                "recover_opt",
+                "tgt_instance",
+                "work_dir"
+            ],
+            "properties": {
+                "binlog_dir": {
+                    "description": "恢复时 binlog 存放目录,一般是下载目录",
+                    "type": "string",
+                    "example": "/data/dbbak/123456/binlog"
+                },
+                "binlog_files": {
+                    "description": "binlog列表",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "binlog_start_file": {
+                    "description": "指定要开始应用的第 1 个 binlog。如果指定,一般要设置 start_pos,如果不指定则使用 start_time",
+                    "type": "string"
+                },
+                "parse_concurrency": {
+                    "description": "解析的并发度,默认 1",
+                    "type": "integer"
+                },
+                "parse_only": {
+                    "description": "仅解析 binlog,不做导入",
+                    "type": "boolean"
+                },
+                "quick_mode": {
+                    "description": "如果启用 quick_mode,解析 binlog 时根据 filter databases 等选项过滤 row event,对 query event 会全部保留 。需要 mysqlbinlog 工具支持 --tables 选项,可以指定参数的 tools\n当 quick_mode=false 时,recover_opt 里的 databases 等选项无效,会应用全部 binlog",
+                    "type": "boolean"
+                },
+                "recover_opt": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLBinlogUtil"
+                },
+                "source_binlog_format": {
+                    "type": "string",
+                    "enum": [
+                        "",
+                        "ROW",
+                        "STATEMENT",
+                        "MIXED"
+                    ]
+                },
+                "tgt_instance": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject"
+                },
+                "tools": {
+                    "description": "外部指定工具路径",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "work_dir": {
+                    "description": "binlog 解析所在目录,存放运行日志",
+                    "type": "string",
+                    "example": "/data/dbbak/"
+                },
+                "work_id": {
+                    "type": "string",
+                    "example": "123456"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlogComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlog"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreDRComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "description": "恢复参数,会复制给具体的 Restore 实现. 见 ChooseType 方法",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreParam"
+                        }
+                    ]
+                },
+                "general": {
+                    "description": "通用参数",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                        }
+                    ]
+                },
+                "resume": {
+                    "description": "是否是中断后继续执行",
+                    "type": "boolean"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreOpt": {
+            "type": "object",
+            "properties": {
+                "databases": {
+                    "description": "恢复哪些 db,当前只对 逻辑恢复有效",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "ignore_databases": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "ignore_tables": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "recover_binlog": {
+                    "description": "在指定时间点回档场景才需要,是否恢复 binlog。在 doSlave 场景,是不需要 recover_binlog。这个选项是控制下一步恢复binlog的行为\n当 recover_binlog 时,要确保实例的所有库表结构都恢复。在逻辑回档场景,只回档部分库表数据时,依然要恢复所有表结构",
+                    "type": "boolean"
+                },
+                "recover_privs": {
+                    "type": "boolean"
+                },
+                "source_binlog_format": {
+                    "description": "在库表级定点回档时有用,如果是 statement/mixed 格式,导入数据时需要全部导入;\n如果是 row,可只导入指定库表数据, 在 recover-binlog 时可指定 quick_mode=true 也恢复指定库表 binlog",
+                    "type": "string",
+                    "enum": [
+                        "",
+                        "ROW",
+                        "STATEMENT",
+                        "MIXED"
+                    ]
+                },
+                "tables": {
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreParam": {
+            "type": "object",
+            "required": [
+                "backup_dir",
+                "backup_files",
+                "work_dir"
+            ],
+            "properties": {
+                "backup_dir": {
+                    "description": "备份文件所在本地目录,理论上doDr不会对该目录写入,而是写入 targetDir",
+                    "type": "string",
+                    "example": "/data/dbbak"
+                },
+                "backup_files": {
+                    "description": "备份文件名列表,key 是 info|full|priv|index, value 是是相对于 backup_dir 的文件名列表",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "array",
+                        "items": {
+                            "type": "string"
+                        }
+                    }
+                },
+                "change_master": {
+                    "description": "恢复完成后是否执行 change master,会 change master 到 src_instance",
+                    "type": "boolean"
+                },
+                "restore_opts": {
+                    "description": "恢复选项,比如恢复库表、是否导入binlog等。目前只对逻辑恢复有效",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreOpt"
+                        }
+                    ]
+                },
+                "src_instance": {
+                    "description": "备份实例的 ip port,用于生产 change master 语句",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance"
+                        }
+                    ]
+                },
+                "tgt_instance": {
+                    "description": "恢复本地的目标实例",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject"
+                        }
+                    ]
+                },
+                "tools": {
+                    "description": "恢复用到的客户端工具,不提供时会有默认值",
+                    "allOf": [
+                        {
+                            "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_tools.ToolSet"
+                        }
+                    ]
+                },
+                "work_dir": {
+                    "description": "备份恢复目录,工作目录",
+                    "type": "string",
+                    "example": "/data1/dbbak"
+                },
+                "work_id": {
+                    "description": "work_id 标识本次恢复,若为0则为当前时间戳",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.Flashback": {
+            "type": "object",
+            "required": [
+                "recover_opt",
+                "target_time",
+                "tgt_instance",
+                "work_dir"
+            ],
+            "properties": {
+                "binlog_dir": {
+                    "description": "当 binlog_dir 不为空,表示 binlog 已下载;当为空时,目前只从本地软连接",
+                    "type": "string"
+                },
+                "binlog_files": {
+                    "description": "binlog列表,如果不提供,则自动从本地查找符合时间范围的 binlog",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "parse_concurrency": {
+                    "description": "解析binlog并发度",
+                    "type": "integer"
+                },
+                "recover_opt": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.RecoverOpt"
+                },
+                "stop_time": {
+                    "type": "string"
+                },
+                "target_time": {
+                    "description": "闪回的目标时间点,对应 recover-binlog 的 start_time, 精确到秒。目标实例的时区",
+                    "type": "string"
+                },
+                "tgt_instance": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject"
+                },
+                "tools": {
+                    "description": "外部指定工具路径",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                },
+                "work_dir": {
+                    "description": "binlog 解析所在目录,存放运行日志",
+                    "type": "string"
+                },
+                "work_id": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.FlashbackComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.Flashback"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.RecoverOpt": {
+            "type": "object",
+            "properties": {
+                "databases": {
+                    "description": "row event 解析指定 databases",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "databases_ignore": {
+                    "description": "row event 解析指定 忽略 databases",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "filter_rows": {
+                    "description": "暂不支持行级闪回",
+                    "type": "string"
+                },
+                "tables": {
+                    "description": "row event 解析指定 tables",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                },
+                "tables_ignore": {
+                    "description": "row event 解析指定 忽略 tables",
+                    "type": "array",
+                    "items": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingComp": {
+            "type": "object",
+            "properties": {
+                "extend": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingParam"
+                },
+                "general": {
+                    "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingParam": {
+            "type": "object",
+            "required": [
+                "ctl_instances",
+                "host",
+                "mysql_instances",
+                "port",
+                "spider_instances"
+            ],
+            "properties": {
+                "ctl_instances": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance"
+                    }
+                },
+                "host": {
+                    "type": "string"
+                },
+                "mysql_instances": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance"
+                    }
+                },
+                "port": {
+                    "type": "integer",
+                    "minimum": 3306
+                },
+                "spider_instances": {
+                    "type": "array",
+                    "items": {
+                        "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance": {
+            "type": "object",
+            "properties": {
+                "host": {
+                    "type": "string"
+                },
+                "port": {
+                    "type": "integer"
+                },
+                "shard_id": {
+                    "type": "integer"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_native.DbWorker": {
+            "type": "object",
+            "properties": {
+                "db": {
+                    "$ref": "#/definitions/sql.DB"
+                },
+                "dsn": {
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject": {
+            "type": "object",
+            "properties": {
+                "charset": {
+                    "description": "连接字符集",
+                    "type": "string"
+                },
+                "host": {
+                    "description": "当前实例的主机地址",
+                    "type": "string"
+                },
+                "options": {
+                    "description": "其它选项",
+                    "type": "string"
+                },
+                "port": {
+                    "description": "当前实例的端口",
+                    "type": "integer"
+                },
+                "pwd": {
+                    "description": "连接当前实例的User Pwd",
+                    "type": "string"
+                },
+                "socket": {
+                    "description": "连接socket",
+                    "type": "string"
+                },
+                "user": {
+                    "description": "连接当前实例的User",
+                    "type": "string"
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance": {
+            "type": "object",
+            "properties": {
+                "host": {
+                    "description": "当前实例的主机地址",
+                    "type": "string",
+                    "example": "127.0.0.1"
+                },
+                "port": {
+                    "description": "当前实例的端口",
+                    "type": "integer",
+                    "example": 33060
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_tools.ToolSet": {
+            "type": "object",
+            "properties": {
+                "tools": {
+                    "description": "外部指定工具路径",
+                    "type": "object",
+                    "additionalProperties": {
+                        "type": "string"
+                    }
+                }
+            }
+        },
+        "dbm-services_mysql_db-tools_dbactuator_pkg_util_mysqlutil.ChangeMaster": {
+            "type": "object",
+            "required": [
+                "master_host",
+                "master_password",
+                "master_port",
+                "master_user"
+            ],
+            "properties": {
+                "change_sql": {
+                    "type": "string"
+                },
+                "channel": {
+                    "type": "string"
+                },
+                "executed_gtid_set": {
+                    "type": "string"
+                },
+                "force": {
+                    "description": "如果当前实例存在主从关系是否直接reset slave后,强制change master",
+                    "type": "boolean"
+                },
+                "is_gtid": {
+                    "description": "是否启动GID方式进行建立主从",
+                    "type": "boolean"
+                },
+                "master_auto_position": {
+                    "type": "integer"
+                },
+                "master_host": {
+                    "description": "主库ip",
+                    "type": "string"
+                },
+                "master_log_file": {
+                    "description": "binlog 文件名称",
+                    "type": "string"
+                },
+                "master_log_pos": {
+                    "description": "binlog 位点信息",
+                    "type": "integer"
+                },
+                "master_password": {
+                    "type": "string"
+                },
+                "master_port": {
+                    "description": "主库端口",
+                    "type": "integer",
+                    "minimum": 3306
+                },
+                "master_user": {
+                    "type": "string"
+                },
+                "max_tolerate_delay": {
+                    "description": "最大容忍延迟,即主从延迟小于该值,认为建立主从关系成功",
+                    "type": "integer"
+                }
+            }
+        },
+        "internal_subcmd_commoncmd.RMLargeFileParam": {
+            "type": "object",
+            "required": [
+                "bw_limit_mb",
+                "filename"
+            ],
+            "properties": {
+                "bw_limit_mb": {
+                    "description": "删除速度,MB/s,默认 30",
+                    "type": "integer",
+                    "default": 30,
+                    "maximum": 1000,
+                    "minimum": 1
+                },
+                "filename": {
+                    "type": "string"
+                }
+            }
+        },
+        "sql.DB": {
+            "type": "object"
+        }
+    }
+}
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml
new file mode 100644
index 0000000000..e6b8ef78c9
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml
@@ -0,0 +1,2223 @@
+basePath: /
+definitions:
+  dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam:
+    properties:
+      runtime_account:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeAccountParam'
+      runtime_extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeExtend'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeAccountParam:
+    properties:
+      admin_pwd:
+        description: mysql admin 密码,环境变量 GENERAL_ACCOUNT_admin_pwd
+        type: string
+      admin_user:
+        description: mysql admin 账户,环境变量 GENERAL_ACCOUNT_admin_user
+        type: string
+      backup_pwd:
+        description: dbbackup pwd
+        type: string
+      backup_user:
+        description: dbbackup user
+        type: string
+      monitor_access_all_pwd:
+        description: mysql monitor@% 密码
+        type: string
+      monitor_access_all_user:
+        description: mysql monitor@%
+        type: string
+      monitor_pwd:
+        description: mysql monitor 密码,环境变量 GENERAL_ACCOUNT_monitor_pwd
+        type: string
+      monitor_user:
+        description: mysql monitor 账户,环境变量 GENERAL_ACCOUNT_monitor_user
+        type: string
+      proxy_admin_pwd:
+        description: proxy admin pwd
+        type: string
+      proxy_admin_user:
+        description: proxy admin user
+        type: string
+      repl_pwd:
+        description: repl pwd, 环境变量 GENERAL_ACCOUNT_repl_pwd
+        type: string
+      repl_user:
+        description: repl user, 环境变量 GENERAL_ACCOUNT_repl_user
+        type: string
+      tdbctl_pwd:
+        type: string
+      tdbctl_user:
+        type: string
+      yw_pwd:
+        description: yw pwd
+        type: string
+      yw_user:
+        description: yw user
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components.RuntimeExtend:
+    properties:
+      mysql_sys_users:
+        items:
+          type: string
+        type: array
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam:
+    properties:
+      auth_pass:
+        description: http url basic auth pass
+        type: string
+      auth_user:
+        description: http url basic auth user
+        type: string
+      bk_biz_id:
+        type: integer
+      bwlimit_mb:
+        description: 单文件下载限速,单位 MB/s
+        type: integer
+      curl_options:
+        items:
+          type: string
+        type: array
+      curl_path:
+        description: curl 命令路径,默认留空. 目前只用于测试 url
+        type: string
+      file_list:
+        description: 下载哪些文件
+        items:
+          type: string
+        type: array
+      max_concurrency:
+        description: 并发下载数
+        type: integer
+      path_tgt:
+        description: 文件存放到本机哪个目录
+        type: string
+      server:
+        description: 下载 url
+        type: string
+    required:
+    - file_list
+    - path_tgt
+    - server
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFScpParam:
+    properties:
+      bk_biz_id:
+        type: integer
+      bwlimit_mb:
+        description: 单文件下载限速,单位 MB/s
+        type: integer
+      file_src:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileSrc'
+        description: 下载源
+      file_tgt:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileTgt'
+        description: 下载目标
+      max_concurrency:
+        description: 并发下载数
+        type: integer
+    required:
+    - file_src
+    - file_tgt
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileSrc:
+    properties:
+      file_list:
+        description: 源文件名列表,相对上面的 path
+        items:
+          type: string
+        type: array
+      match:
+        type: string
+      path:
+        description: 源文件所在目录
+        type: string
+      ssh_host:
+        type: string
+      ssh_pass:
+        type: string
+      ssh_port:
+        type: string
+      ssh_user:
+        type: string
+    required:
+    - file_list
+    - path
+    - ssh_host
+    - ssh_port
+    - ssh_user
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.FileTgt:
+    properties:
+      path:
+        description: 文件下载目标目录
+        type: string
+    required:
+    - path
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSBaseInfo:
+    properties:
+      key:
+        description: 16位字串,由备份系统分配,可从环境变量获取 IBS_INFO__key
+        type: string
+      sys_id:
+        description: application标识,亦即哪个系统需要访问本接口,可从环境变量获取 IBS_INFO_sys_id
+        type: string
+      ticket:
+        description: OA验证的ticket,一个长串,通常附加在访问内网应用的URL上,主要用来验证用户身份,可以留空
+        type: string
+      url:
+        description: |-
+          ieg 备份系统 api url 地址,会在后面拼接 /query /recover 后缀进行请求
+          可从环境变量获取 IBS_INFO_url
+        example: http://127.0.0.1/backupApi
+        type: string
+    required:
+    - key
+    - sys_id
+    - url
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryForRecover:
+    properties:
+      begin_date:
+        description: 查询文件起始时间,备份系统以 file_last_mtime 为条件
+        type: string
+      end_date:
+        description: 哪一天提交,结束时间,与begin_date形成一个时间范围,建议begin_date与end_date形成的时间范围不要超过3天
+        type: string
+      source_ip:
+        description: 来源IP,即提交备份任务的机器IP
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryParam:
+    properties:
+      begin_date:
+        description: 哪一天提交,起始时间
+        type: string
+      end_date:
+        description: 哪一天提交,结束时间,与begin_date形成一个时间范围,建议begin_date与end_date形成的时间范围不要超过3天
+        type: string
+      filename:
+        description: 文件名
+        type: string
+      ibs_info:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSBaseInfo'
+        description: ieg backup system url and auth params
+      source_ip:
+        description: 来源IP,即提交备份任务的机器IP
+        type: string
+    required:
+    - begin_date
+    - end_date
+    - filename
+    - ibs_info
+    - source_ip
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryResult:
+    properties:
+      bkstif:
+        description: '备份状态信息, ''done, success'', ''Fail: bad md5'' 等'
+        type: string
+      createTime:
+        description: 非备份系统字段,全备(截取文件名中的字段),binlog 打开文件读取
+        type: string
+      expire_time:
+        type: string
+      expired:
+        type: string
+      file_last_mtime:
+        description: 文件最后修改时间
+        type: string
+      file_name:
+        type: string
+      file_tag:
+        type: string
+      md5:
+        type: string
+      path:
+        description: 非备份系统字段
+        type: string
+      size:
+        description: 文件大小
+        type: string
+      source_ip:
+        description: 上报该备份任务的IP
+        type: string
+      source_port:
+        description: 非备份系统字段
+        type: string
+      status:
+        description: 文件状态
+        type: string
+      task_id:
+        description: 任务ID,用于下载
+        type: string
+      uptime:
+        description: 备份任务上报时间
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverParam:
+    properties:
+      dest_ip:
+        description: 目标IP,文件恢复到哪一台机器上的
+        example: 127.0.0.1
+        type: string
+      diretory:
+        description: diretory 是备份系统参数错误拼写
+        example: /data/dbbak
+        type: string
+      ibs_info:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSBaseInfo'
+        description: ieg backup system url and auth params
+      ibs_query:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryForRecover'
+        description: 根据文件名下载,或者判断是否跳过下载时,需要提供 ibs_query 参数用于查询
+      login_passwd:
+        description: 登录 dest_ip 的用户名的密码, ieg 传统scp 方式下载才需要。如果是 cos 下载则不需要
+        type: string
+      login_user:
+        description: 登录 dest_ip 的用户名,下载后的文件属组是该用户
+        type: string
+      reason:
+        description: 恢复原因(备注用途)
+        type: string
+      skip_local_exists:
+        description: 如果本地目标目录已经存在对应文件,是否保留(即跳过下载). 默认 false
+        example: false
+        type: boolean
+      task_files:
+        description: |-
+          如果是精确文件名下载,用 task_files。提供需要下载的文件列表,提供 task_id 或者完整的 file_name 即可
+          如果顺便提供了 size 信息则不用请求备份系统获取大小 来决定文件是否需要重新下载
+        items:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesExact'
+        type: array
+      task_files_wild:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesWild'
+        description: 如果是模糊匹配搜索并下载,用 task_files_wild
+      taskid_list:
+        description: taskid 列表,,逗号分隔。会根据 task_files 里的信息,追加到这里。这里一般不传值,在 task_files
+          里提供 task_id 或者 file_name
+        example: 10000,100001
+        type: string
+    required:
+    - dest_ip
+    - diretory
+    - ibs_info
+    - login_user
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverTask:
+    properties:
+      file_last_mtime:
+        description: 文件最后修改时间
+        type: string
+      file_name:
+        type: string
+      file_tag:
+        type: string
+      md5:
+        type: string
+      size:
+        description: 文件大小
+        type: string
+      source_ip:
+        description: 上报该备份任务的IP
+        type: string
+      status:
+        description: 文件状态
+        type: string
+      task_id:
+        description: 任务ID,用于下载
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesExact:
+    properties:
+      file_name:
+        type: string
+      md5:
+        type: string
+      size:
+        description: 文件大小
+        type: string
+      task_id:
+        description: 任务ID,用于下载
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.TaskFilesWild:
+    properties:
+      file_tag:
+        type: string
+      name_regex:
+        description: 在搜索的结果里面,应用该正则进行过滤
+        type: string
+      name_search:
+        description: 搜索的模糊条件,不用带 *
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServer:
+    properties:
+      acls:
+        description: 访问来源限制,从前往后匹配。格式 `["allow 127.0.0.1/32", "deny all"]`
+        example:
+        - allow all
+        items:
+          type: string
+        type: array
+      auth_pass:
+        description: http basic auth pass,为空时会随机生成密码
+        type: string
+      auth_user:
+        description: http basic auth user
+        type: string
+      bind_address:
+        description: http file-server 监听地址. 不提供端口,会在 12000-19999 之间随机选择一个端口,不提供 ip
+          时默认 localhost
+        type: string
+      enable_tls:
+        description: 暂不支持
+        type: boolean
+      max_connections:
+        description: 限制最大连接数,超过需要等待. 为 0 时表示不限制
+        type: integer
+      mount_path:
+        description: 将本地哪个目录通过 http 分享
+        type: string
+      path_prefix:
+        description: path_prefix 用在生成 url 时的路径前缀. 可留空
+        type: string
+      print_download:
+        description: 输出 download http 的信息,方便使用
+        type: boolean
+      proc_maxidle_duration:
+        description: 超过最大空闲时间,自动退出. 示例 3600s, 60m, 1h
+        example: 1h
+        type: string
+    required:
+    - auth_user
+    - bind_address
+    - mount_path
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServerComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServer'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BackupOptions:
+    properties:
+      BackupType:
+        type: string
+      CrontabTime:
+        type: string
+      Logical:
+        properties:
+          ExcludeDatabases:
+            description: '"mysql,test,db_infobase,information_schema,performance_schema,sys"'
+            type: string
+          ExcludeTables:
+            type: string
+        type: object
+      Master:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.LogicBackupDataOption'
+      Slave:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.LogicBackupDataOption'
+    required:
+    - BackupType
+    - CrontabTime
+    - Master
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeParam:
+    properties:
+      binlog_dir:
+        type: string
+      binlog_files:
+        items:
+          type: string
+        type: array
+      format:
+        enum:
+        - ""
+        - json
+        - dump
+        type: string
+    required:
+    - binlog_dir
+    - binlog_files
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationParam'
+      general:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+        description: 本地使用 ADMIN, change master 使用 repl
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationParam:
+    properties:
+      bin_file:
+        description: binlog 文件名称
+        type: string
+      bin_position:
+        description: binlog 位点信息
+        minimum: 0
+        type: integer
+      force:
+        description: 如果当前实例存在主从关系是否直接reset slave后,强制change master
+        example: false
+        type: boolean
+      host:
+        description: 具体操作内容需要操作的参数
+        type: string
+      is_gtid:
+        description: 是否启动GID方式进行建立主从
+        type: boolean
+      master_host:
+        description: change master to 主库ip
+        type: string
+      master_port:
+        description: change master to 主库端口
+        maximum: 65535
+        minimum: 3306
+        type: integer
+      max_tolerate_delay:
+        description: 最大容忍延迟, 当 主从延迟 小于 该值, 认为建立主从关系成功. 不传或者为 0 时,表示不检查
+        type: integer
+      not_start_io_thread:
+        description: 不启动 io_thread。默认false 表示启动 io_thread
+        example: false
+        type: boolean
+      not_start_sql_thread:
+        description: 不启动 sql_thread。默认false 表示启动 sql_thread
+        example: false
+        type: boolean
+      port:
+        description: 当前实例的端口
+        maximum: 65535
+        minimum: 3306
+        type: integer
+    required:
+    - bin_file
+    - bin_position
+    - host
+    - master_host
+    - master_port
+    - port
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlParam:
+    properties:
+      check_interval_sec:
+        type: integer
+      drop_database:
+        description: 是否执行 drop database,这里是确认行为. 如果 false 则只把 drop 命令打印到输出
+        type: boolean
+      force:
+        description: 当实例不空闲时是否强制清空
+        type: boolean
+      reset_slave:
+        description: 是否执行 reset slave all
+        type: boolean
+      restart:
+        description: drop_database 之后是否重启实例
+        type: boolean
+      stop_slave:
+        description: 是否执行 stop slave
+        type: boolean
+      tgt_instance:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance'
+        description: 清空目标实例
+    required:
+    - tgt_instance
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ConfItemOp:
+    properties:
+      conf_value:
+        description: ConfName  string `json:"conf_name" validate:"required"`
+        type: string
+      need_restart:
+        type: boolean
+      op_type:
+        description: 配置项修改动作,允许值 `upsert`,`remove`
+        enum:
+        - upsert
+        - remove
+        type: string
+    required:
+    - op_type
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DBHAAccount:
+    properties:
+      access_hosts:
+        items:
+          type: string
+        type: array
+      pwd:
+        type: string
+      user:
+        type: string
+    required:
+    - pwd
+    - user
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DumpSchemaParam:
+    properties:
+      backup_dir:
+        type: string
+      backup_file_name:
+        type: string
+      bk_cloud_id:
+        description: 所在的云区域
+        type: integer
+      charset:
+        description: 字符集参数
+        type: string
+      db_cloud_token:
+        description: 云区域token
+        type: string
+      fileserver:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FileServer'
+      host:
+        description: 当前实例的主机地址
+        type: string
+      port:
+        description: 当前实例的端口
+        minimum: 3306
+        type: integer
+    required:
+    - charset
+    - host
+    - port
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ExcuteSQLFileObj:
+    properties:
+      dbnames:
+        description: 需要变更的DBNames,支持模糊匹配
+        items:
+          type: string
+        type: array
+      ignore_dbnames:
+        description: 忽略的,需要排除变更的dbName,支持模糊匹配
+        items:
+          type: string
+        type: array
+      sql_file:
+        description: 变更文件名称
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FileServer:
+    properties:
+      bucket:
+        description: 目标bucket
+        type: string
+      password:
+        description: 制品库 password
+        type: string
+      project:
+        description: 制品库 project
+        type: string
+      upload_path:
+        description: 上传路径
+        type: string
+      url:
+        description: 制品库地址
+        type: string
+      username:
+        description: 制品库 username
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupParam:
+    properties:
+      backup_dirs:
+        items:
+          type: string
+        type: array
+      cluster_id:
+        description: 指定查询哪个 cluster_id 的备份,如果不指定可能查询到其它非法的备份
+        type: integer
+      file_server:
+        type: boolean
+      tgt_instance:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance'
+        description: 查找哪个实例的备份
+    required:
+    - backup_dirs
+    - tgt_instance
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupResp:
+    properties:
+      backups:
+        additionalProperties:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.LocalBackupObj'
+        description: backups key 是 .info 文件
+        type: object
+      latest:
+        description: 记录上面 backups 最近的一次备份
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitParam:
+    properties:
+      exec_user:
+        description: 发起执行actor的用户,仅用于审计
+        type: string
+      pkg:
+        description: 安装包名
+        type: string
+      pkg_md5:
+        description: 安装包MD5
+        type: string
+    required:
+    - pkg
+    - pkg_md5
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumParam:
+    properties:
+      api_url:
+        type: string
+      exec_user:
+        type: string
+      instances_info:
+        items:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstanceInfo'
+        type: array
+      pkg:
+        description: 安装包名
+        type: string
+      pkg_md5:
+        description: 安装包MD5
+        type: string
+      schedule:
+        type: string
+      system_dbs:
+        items:
+          type: string
+        type: array
+    required:
+    - pkg
+    - pkg_md5
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLParams'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+      timeZone:
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLParams:
+    properties:
+      allowDiskFileSystemTypes:
+        items:
+          type: string
+        type: array
+      charset:
+        description: 字符集参数
+        type: string
+      dbha_account:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DBHAAccount'
+      host:
+        type: string
+      inst_mem:
+        description: 安装实例的内存大小,可以不指定,会自动计算
+        type: integer
+      mycnf_configs:
+        description: map[port]my.cnf
+        items:
+          type: integer
+        type: array
+      mysql_version:
+        description: MySQLVerion 只需5.6 5.7 这样的大版本号
+        type: string
+      pkg:
+        description: 安装包名
+        type: string
+      pkg_md5:
+        description: 安装包MD5
+        type: string
+      ports:
+        description: Ports
+        items:
+          type: integer
+        type: array
+      spider_auto_incr_mode_map:
+        items:
+          type: integer
+        type: array
+      super_account:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SuperAccount'
+    required:
+    - charset
+    - host
+    - mycnf_configs
+    - mysql_version
+    - pkg
+    - pkg_md5
+    - ports
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupComp:
+    properties:
+      generalParam:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+      params:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupParam:
+    properties:
+      bk_biz_id:
+        description: bkbizid
+        type: string
+      bk_cloud_id:
+        description: bk_cloud_id
+        type: string
+      cluster_address:
+        additionalProperties:
+          type: string
+        description: cluster addresss
+        type: object
+      cluster_id:
+        additionalProperties:
+          type: integer
+        description: cluster id
+        type: object
+      configs:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.Cnf'
+        description: 模板配置
+      exec_user:
+        description: 执行Job的用户
+        type: string
+      host:
+        description: 当前实例的主机地址
+        type: string
+      options:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BackupOptions'
+        description: 选项参数配置
+      pkg:
+        description: 安装包名
+        type: string
+      pkg_md5:
+        description: 安装包MD5
+        type: string
+      ports:
+        description: 被监控机器的上所有需要监控的端口
+        items:
+          type: integer
+        type: array
+      role:
+        description: 当前主机安装的mysqld的角色
+        type: string
+    required:
+    - bk_biz_id
+    - configs
+    - host
+    - options
+    - pkg
+    - pkg_md5
+    - ports
+    - role
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstanceInfo:
+    properties:
+      bk_biz_id:
+        type: integer
+      bk_instance_id:
+        description: 0 被视为空, 不序列化
+        type: integer
+      cluster_id:
+        type: integer
+      immute_domain:
+        type: string
+      ip:
+        type: string
+      port:
+        type: integer
+      role:
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.LocalBackupObj:
+    properties:
+      backup_dir:
+        description: 备份所在目录
+        type: string
+      backup_id:
+        type: string
+      backup_time:
+        description: 备份时间,目前是备份开始时间
+        type: string
+      backup_type:
+        type: string
+      bill_id:
+        type: string
+      bk_biz_id:
+        type: string
+      cluster_id:
+        type: integer
+      data_schema_grant:
+        type: string
+      db_role:
+        type: string
+      file_list:
+        description: |-
+          InfoFile   common.InfoFileDetail `json:"info_file"`
+          备份文件列表
+        items:
+          type: string
+        type: array
+      index_file:
+        type: string
+      inst_host:
+        description: 备份所属 host
+        type: string
+      inst_port:
+        description: 备份所属 port
+        type: integer
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeParam:
+    properties:
+      items:
+        additionalProperties:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ConfItemOp'
+        type: object
+      persistent:
+        description: '是否持久化到 my.cnf 文件,-1: 不持久化,1: 持久化, 2: 仅持久化但不修改运行时'
+        enum:
+        - -1
+        - 1
+        - 2
+        type: integer
+      restart:
+        description: '指定是否 允许重启, -1:不重启, 1: 重启, 2:根据 items need_restart 自动判断是否重启'
+        enum:
+        - -1
+        - 1
+        - 2
+        type: integer
+      tgt_instance:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject'
+    required:
+    - items
+    - persistent
+    - restart
+    - tgt_instance
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneParam:
+    properties:
+      items:
+        description: |-
+          需要克隆哪些变量, 考虑到不同版本参数不一样,这里不要求指定一定存在; 只修改 mysqld 区。即失败忽略
+          有些参数是 readonly 的,只会保存到 my.cnf 中,如果与运行值不一样需要用户重启
+          默认值见 MycnfCloneItemsDefault
+        items:
+          type: string
+        type: array
+      persistent:
+        description: '是否持久化到 my.cnf 文件,0: 不持久化,1: 持久化, 2: 仅持久化但不修改运行时'
+        enum:
+        - 0
+        - 1
+        - 2
+        example: 1
+        type: integer
+      restart:
+        description: '指定是否 允许重启, 0:不重启, 1: 重启, 2:根据 items need_restart 自动判断是否重启'
+        enum:
+        - 0
+        - 1
+        - 2
+        example: 2
+        type: integer
+      src_instance:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject'
+        description: 参数克隆,获取源实例,可以提供 repl 账号权限
+      tgt_instance:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject'
+        description: 应用到本地目标实例,需要有 ADMIN 权限
+    required:
+    - persistent
+    - restart
+    - src_instance
+    - tgt_instance
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumParam:
+    properties:
+      bk_biz_id:
+        description: 业务 id
+        type: integer
+      cluster_id:
+        description: 集群 id
+        type: integer
+      db_patterns:
+        description: 库表过滤选项
+        items:
+          type: string
+        type: array
+      ignore_dbs:
+        description: 库表过滤选项
+        items:
+          type: string
+        type: array
+      ignore_tables:
+        description: 库表过滤选项
+        items:
+          type: string
+        type: array
+      immute_domain:
+        description: 集群域名
+        type: string
+      inner_role:
+        description: 执行校验的 db inner role, 应该是[master, repeater]
+        type: string
+      master_access_slave_password:
+        description: 从 db 访问 slave 的密码
+        type: string
+      master_access_slave_user:
+        description: 从 db 访问 slave 的用户名
+        type: string
+      master_ip:
+        description: 执行校验的 db ip
+        type: string
+      master_port:
+        description: 执行校验的 db port
+        type: integer
+      replicate_table:
+        description: 结果表, 带库前缀
+        type: string
+      runtime_hour:
+        description: 校验运行时长
+        type: integer
+      slaves:
+        description: slave 列表
+        items:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SlaveInfo'
+        type: array
+      system_dbs:
+        description: 系统表
+        items:
+          type: string
+        type: array
+      table_patterns:
+        description: 库表过滤选项
+        items:
+          type: string
+        type: array
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticCheckComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SenmanticCheckParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticDumpSchemaComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.DumpSchemaParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SenmanticCheckParam:
+    properties:
+      execute_objects:
+        items:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.ExcuteSQLFileObj'
+        type: array
+      host:
+        description: 语义检查实例的主机地址
+        type: string
+      port:
+        description: 语义检查实例的端口
+        minimum: 3306
+        type: integer
+      remote_host:
+        description: 用于获取目标实例的字符集,默认存储引擎
+        type: string
+      remote_port:
+        description: 获取表结构的源实例Port
+        minimum: 3306
+        type: integer
+      schemafile:
+        description: 表结构文件
+        type: string
+    required:
+    - host
+    - port
+    - remote_host
+    - remote_port
+    - schemafile
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SlaveInfo:
+    properties:
+      id:
+        description: slave id
+        type: integer
+      ip:
+        description: slave ip
+        type: string
+      port:
+        description: slave port
+        type: integer
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SuperAccount:
+    properties:
+      access_hosts:
+        items:
+          type: string
+        type: array
+      pwd:
+        type: string
+      user:
+        type: string
+    required:
+    - pwd
+    - user
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.Cnf:
+    properties:
+      BackupClient:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfBackupClient'
+      LogicalBackup:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfLogicalBackup'
+      PhysicalBackup:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfPhysicalBackup'
+        description: LogicalLoad          CnfLogicalLoad          `json:"LogicalLoad"
+          ini:"LogicalLoad"`
+      Public:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfShared'
+    required:
+    - BackupClient
+    - LogicalBackup
+    - Public
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfBackupClient:
+    properties:
+      doChecksum:
+        type: string
+      fileTag:
+        type: string
+      remoteFileSystem:
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfLogicalBackup:
+    properties:
+      chunkFilesize:
+        description: ChunkFilesize Split tables into chunks of this output file size.
+          This value is in MB
+        type: integer
+      defaultsFile:
+        type: string
+      disableCompress:
+        type: boolean
+      extraOpt:
+        description: ExtraOpt other mydumper options string to be appended
+        type: string
+      flushRetryCount:
+        type: integer
+      regex:
+        type: string
+      threads:
+        type: integer
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfPhysicalBackup:
+    properties:
+      defaultsFile:
+        type: string
+      extraOpt:
+        description: ExtraOpt other xtrabackup options string to be appended
+        type: string
+      splitSpeed:
+        description: SplitSpeed tar split limit in MB/s, default 300
+        type: integer
+      threads:
+        description: Threads –parallel to copy files
+        type: integer
+      throttle:
+        description: Throttle limits the number of chunks copied per second. The chunk
+          size is 10 MB, 0 means no limit
+        type: integer
+    required:
+    - defaultsFile
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.CnfShared:
+    properties:
+      backupDir:
+        type: string
+      backupId:
+        type: string
+      backupTimeOut:
+        type: string
+      backupType:
+        type: string
+      billId:
+        type: string
+      bkBizId:
+        type: string
+      bkCloudId:
+        type: string
+      clusterAddress:
+        type: string
+      clusterId:
+        type: string
+      dataSchemaGrant:
+        type: string
+      iolimitMBPerSec:
+        description: IOLimitMBPerSec tar or split default io limit, mb/s. 0 means
+          no limit
+        type: integer
+      mysqlCharset:
+        type: string
+      mysqlHost:
+        type: string
+      mysqlPasswd:
+        type: string
+      mysqlPort:
+        type: string
+      mysqlRole:
+        type: string
+      mysqlUser:
+        type: string
+      oldFileLeftDay:
+        type: string
+      resultReportPath:
+        type: string
+      statusReportPath:
+        type: string
+      tarSizeThreshold:
+        description: TarSizeThreshold tar file will be split to this package size.
+          MB
+        minimum: 128
+        type: integer
+    required:
+    - backupDir
+    - backupTimeOut
+    - tarSizeThreshold
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_dbbackup.LogicBackupDataOption:
+    properties:
+      DataSchemaGrant:
+        description: '"grant,schema,data"'
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplComp:
+    properties:
+      db:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.DbWorker'
+        description: 本地db链接
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplParam:
+    properties:
+      host:
+        description: 当前实例的主机地址
+        type: string
+      port:
+        description: 当前实例的端口
+        type: integer
+      repl_hosts:
+        description: slave host
+        items:
+          type: string
+        type: array
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLBinlogUtil:
+    properties:
+      databases:
+        description: row event 解析指定 databases
+        items:
+          type: string
+        type: array
+      databases_ignore:
+        description: row event 解析指定 忽略 databases
+        items:
+          type: string
+        type: array
+      filter_statement_match_error:
+        description: 匹配字符串成功,则解析 binlog 报错
+        type: string
+      filter_statement_match_ignore:
+        description: 匹配字符串成功,则忽略语句,加入注释中
+        type: string
+      filter_statement_match_ignore_force:
+        description: |-
+          匹配字符串成功,强制忽略语句,加入注释中。当与 filter_statement_match_error 都匹配时,ignore_force会优先生效
+          默认 infodba_schema
+        type: string
+      flashback:
+        description: 是否启用 flashback
+        type: boolean
+      idempotent_mode:
+        description: 是否开启幂等模式, mysql --slave-exec-mode=idempotent or mysqlbinlog --idempotent
+        type: boolean
+      mysql_client_opt:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLClientOpt'
+      not_write_binlog:
+        description: 导入时是否记录 binlog, mysql sql_log_bin=0 or mysqlbinlog --disable-log-bin.
+          true表示不写
+        type: boolean
+      query_event_handler:
+        description: |-
+          query event 默认处理策略。keep:保留解析出的query event 语句, ignore:注释(丢弃)该 query event, error:认为是不接受的语句,报错
+          默认 keep
+        enum:
+        - keep
+        - ignore
+        - safe
+        - error
+        type: string
+      rewrite_db:
+        description: --rewrite_db="db1->xx_db1,db2->xx_db2"
+        type: string
+      start_pos:
+        description: --start-position
+        type: integer
+      start_time:
+        description: --start-datetime
+        type: string
+      stop_pos:
+        description: --stop-position
+        type: integer
+      stop_time:
+        description: --stop-datetime
+        type: string
+      tables:
+        description: row event 解析指定 tables
+        items:
+          type: string
+        type: array
+      tables_ignore:
+        description: row event 解析指定 忽略 tables
+        items:
+          type: string
+        type: array
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLClientOpt:
+    properties:
+      binary_mode:
+        description: 是否启用 --binary-mode
+        type: boolean
+      max_allowed_packet:
+        type: integer
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlog:
+    properties:
+      binlog_dir:
+        description: 恢复时 binlog 存放目录,一般是下载目录
+        example: /data/dbbak/123456/binlog
+        type: string
+      binlog_files:
+        description: binlog列表
+        items:
+          type: string
+        type: array
+      binlog_start_file:
+        description: 指定要开始应用的第 1 个 binlog。如果指定,一般要设置 start_pos,如果不指定则使用 start_time
+        type: string
+      parse_concurrency:
+        description: 解析的并发度,默认 1
+        type: integer
+      parse_only:
+        description: 仅解析 binlog,不做导入
+        type: boolean
+      quick_mode:
+        description: |-
+          如果启用 quick_mode,解析 binlog 时根据 filter databases 等选项过滤 row event,对 query event 会全部保留 。需要 mysqlbinlog 工具支持 --tables 选项,可以指定参数的 tools
+          当 quick_mode=false 时,recover_opt 里的 databases 等选项无效,会应用全部 binlog
+        type: boolean
+      recover_opt:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLBinlogUtil'
+      source_binlog_format:
+        enum:
+        - ""
+        - ROW
+        - STATEMENT
+        - MIXED
+        type: string
+      tgt_instance:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject'
+      tools:
+        additionalProperties:
+          type: string
+        description: 外部指定工具路径
+        type: object
+      work_dir:
+        description: binlog 解析所在目录,存放运行日志
+        example: /data/dbbak/
+        type: string
+      work_id:
+        example: "123456"
+        type: string
+    required:
+    - binlog_dir
+    - binlog_files
+    - recover_opt
+    - tgt_instance
+    - work_dir
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlogComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlog'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreDRComp:
+    properties:
+      extend:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreParam'
+        description: 恢复参数,会复制给具体的 Restore 实现. 见 ChooseType 方法
+      general:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+        description: 通用参数
+      resume:
+        description: 是否是中断后继续执行
+        type: boolean
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreOpt:
+    properties:
+      databases:
+        description: 恢复哪些 db,当前只对 逻辑恢复有效
+        items:
+          type: string
+        type: array
+      ignore_databases:
+        items:
+          type: string
+        type: array
+      ignore_tables:
+        items:
+          type: string
+        type: array
+      recover_binlog:
+        description: |-
+          在指定时间点回档场景才需要,是否恢复 binlog。在 doSlave 场景,是不需要 recover_binlog。这个选项是控制下一步恢复binlog的行为
+          当 recover_binlog 时,要确保实例的所有库表结构都恢复。在逻辑回档场景,只回档部分库表数据时,依然要恢复所有表结构
+        type: boolean
+      recover_privs:
+        type: boolean
+      source_binlog_format:
+        description: |-
+          在库表级定点回档时有用,如果是 statement/mixed 格式,导入数据时需要全部导入;
+          如果是 row,可只导入指定库表数据, 在 recover-binlog 时可指定 quick_mode=true 也恢复指定库表 binlog
+        enum:
+        - ""
+        - ROW
+        - STATEMENT
+        - MIXED
+        type: string
+      tables:
+        items:
+          type: string
+        type: array
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreParam:
+    properties:
+      backup_dir:
+        description: 备份文件所在本地目录,理论上doDr不会对该目录写入,而是写入 targetDir
+        example: /data/dbbak
+        type: string
+      backup_files:
+        additionalProperties:
+          items:
+            type: string
+          type: array
+        description: 备份文件名列表,key 是 info|full|priv|index, value 是是相对于 backup_dir 的文件名列表
+        type: object
+      change_master:
+        description: 恢复完成后是否执行 change master,会 change master 到 src_instance
+        type: boolean
+      restore_opts:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreOpt'
+        description: 恢复选项,比如恢复库表、是否导入binlog等。目前只对逻辑恢复有效
+      src_instance:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance'
+        description: 备份实例的 ip port,用于生产 change master 语句
+      tgt_instance:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject'
+        description: 恢复本地的目标实例
+      tools:
+        allOf:
+        - $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_tools.ToolSet'
+        description: 恢复用到的客户端工具,不提供时会有默认值
+      work_dir:
+        description: 备份恢复目录,工作目录
+        example: /data1/dbbak
+        type: string
+      work_id:
+        description: work_id 标识本次恢复,若为0则为当前时间戳
+        type: string
+    required:
+    - backup_dir
+    - backup_files
+    - work_dir
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.Flashback:
+    properties:
+      binlog_dir:
+        description: 当 binlog_dir 不为空,表示 binlog 已下载;当为空时,目前只从本地软连接
+        type: string
+      binlog_files:
+        description: binlog列表,如果不提供,则自动从本地查找符合时间范围的 binlog
+        items:
+          type: string
+        type: array
+      parse_concurrency:
+        description: 解析binlog并发度
+        type: integer
+      recover_opt:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.RecoverOpt'
+      stop_time:
+        type: string
+      target_time:
+        description: 闪回的目标时间点,对应 recover-binlog 的 start_time, 精确到秒。目标实例的时区
+        type: string
+      tgt_instance:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject'
+      tools:
+        additionalProperties:
+          type: string
+        description: 外部指定工具路径
+        type: object
+      work_dir:
+        description: binlog 解析所在目录,存放运行日志
+        type: string
+      work_id:
+        type: string
+    required:
+    - recover_opt
+    - target_time
+    - tgt_instance
+    - work_dir
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.FlashbackComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.Flashback'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.RecoverOpt:
+    properties:
+      databases:
+        description: row event 解析指定 databases
+        items:
+          type: string
+        type: array
+      databases_ignore:
+        description: row event 解析指定 忽略 databases
+        items:
+          type: string
+        type: array
+      filter_rows:
+        description: 暂不支持行级闪回
+        type: string
+      tables:
+        description: row event 解析指定 tables
+        items:
+          type: string
+        type: array
+      tables_ignore:
+        description: row event 解析指定 忽略 tables
+        items:
+          type: string
+        type: array
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingComp:
+    properties:
+      extend:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingParam'
+      general:
+        $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam'
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingParam:
+    properties:
+      ctl_instances:
+        items:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance'
+        type: array
+      host:
+        type: string
+      mysql_instances:
+        items:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance'
+        type: array
+      port:
+        minimum: 3306
+        type: integer
+      spider_instances:
+        items:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance'
+        type: array
+    required:
+    - ctl_instances
+    - host
+    - mysql_instances
+    - port
+    - spider_instances
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance:
+    properties:
+      host:
+        type: string
+      port:
+        type: integer
+      shard_id:
+        type: integer
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_native.DbWorker:
+    properties:
+      db:
+        $ref: '#/definitions/sql.DB'
+      dsn:
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject:
+    properties:
+      charset:
+        description: 连接字符集
+        type: string
+      host:
+        description: 当前实例的主机地址
+        type: string
+      options:
+        description: 其它选项
+        type: string
+      port:
+        description: 当前实例的端口
+        type: integer
+      pwd:
+        description: 连接当前实例的User Pwd
+        type: string
+      socket:
+        description: 连接socket
+        type: string
+      user:
+        description: 连接当前实例的User
+        type: string
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_native.Instance:
+    properties:
+      host:
+        description: 当前实例的主机地址
+        example: 127.0.0.1
+        type: string
+      port:
+        description: 当前实例的端口
+        example: 33060
+        type: integer
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_tools.ToolSet:
+    properties:
+      tools:
+        additionalProperties:
+          type: string
+        description: 外部指定工具路径
+        type: object
+    type: object
+  dbm-services_mysql_db-tools_dbactuator_pkg_util_mysqlutil.ChangeMaster:
+    properties:
+      change_sql:
+        type: string
+      channel:
+        type: string
+      executed_gtid_set:
+        type: string
+      force:
+        description: 如果当前实例存在主从关系是否直接reset slave后,强制change master
+        type: boolean
+      is_gtid:
+        description: 是否启动GID方式进行建立主从
+        type: boolean
+      master_auto_position:
+        type: integer
+      master_host:
+        description: 主库ip
+        type: string
+      master_log_file:
+        description: binlog 文件名称
+        type: string
+      master_log_pos:
+        description: binlog 位点信息
+        type: integer
+      master_password:
+        type: string
+      master_port:
+        description: 主库端口
+        minimum: 3306
+        type: integer
+      master_user:
+        type: string
+      max_tolerate_delay:
+        description: 最大容忍延迟,即主从延迟小于该值,认为建立主从关系成功
+        type: integer
+    required:
+    - master_host
+    - master_password
+    - master_port
+    - master_user
+    type: object
+  internal_subcmd_commoncmd.RMLargeFileParam:
+    properties:
+      bw_limit_mb:
+        default: 30
+        description: 删除速度,MB/s,默认 30
+        maximum: 1000
+        minimum: 1
+        type: integer
+      filename:
+        type: string
+    required:
+    - bw_limit_mb
+    - filename
+    type: object
+  sql.DB:
+    type: object
+host: ./dbactuator
+info:
+  contact:
+    email: support@swagger.io
+    name: API Support
+    url: http://www.swagger.io/support
+  description: This is a dbactuator command collection.
+  license:
+    name: Apache 2.0
+    url: http://www.apache.org/licenses/LICENSE-2.0.html
+  termsOfService: http://swagger.io/terms/
+  title: dbactuator API
+  version: 0.0.1
+paths:
+  /common/file-server:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份
+        在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_fileserver.FileServerComp'
+      responses: {}
+      summary: 简单文件服务
+      tags:
+      - common
+  /common/rm-file:
+    post:
+      consumes:
+      - application/json
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/internal_subcmd_commoncmd.RMLargeFileParam'
+      responses: {}
+      summary: 限速删除大文件
+      tags:
+      - common
+  /download/http:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        支持限速、basicAuth 认证. 一般配合 common fileserver 使用
+        # server1
+        ./dbactuator common file-server \
+        --payload-format raw \
+        --payload '{"extend":{"bind_address":":8082","mount_path":"/data/dbbak","user":"xiaog","password":"xxxx","proc_maxidle_duration":"60s"}}'
+
+        # server2
+        curl -u 'xiaog:xxxx' 'http://server1:8082/datadbbak8082/dbactuator' -o dbactuator.bin --limit-rate 10k
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFHttpParam'
+      responses: {}
+      summary: http下载文件
+      tags:
+      - download
+  /download/ibs-query:
+    post:
+      consumes:
+      - application/json
+      description: filename 会进行模糊匹配,返回 task_id 用于下载
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryComp'
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSQueryResult'
+      summary: 从 ieg 备份系统查询文件
+      tags:
+      - download
+  /download/ibs-recover:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        提供 task_id,从 ieg 备份系统下载文件
+        task_files_wild: 模糊搜索文件并下载, task_files: 精确文件查询并下载
+        task_files_wild, task_files 二选一
+        启用 skip_local_exists=true 时,如果目标目录已存在要下载的文件,会自动跳过
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverComp'
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.IBSRecoverTask'
+      summary: 从 ieg 备份系统下载文件
+      tags:
+      - download
+  /download/scp:
+    post:
+      consumes:
+      - application/json
+      description: 支持限速
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_backup_download.DFScpParam'
+      responses: {}
+      summary: scp下载文件
+      tags:
+      - download
+  /mysql/change-master:
+    post:
+      consumes:
+      - application/json
+      description: 执行 change master to
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BuildMSRelationComp'
+      responses: {}
+      summary: 建立主从关系
+      tags:
+      - mysql
+  /mysql/clean-mysql:
+    post:
+      consumes:
+      - application/json
+      description: 清空本地实例,保留系统库
+      parameters:
+      - description: description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.CleanMysqlComp'
+      produces:
+      - application/json
+      responses: {}
+      summary: 清空实例,高危
+      tags:
+      - mysql
+  /mysql/deploy:
+    post:
+      consumes:
+      - application/json
+      description: 部署 mysql 实例说明
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp'
+      responses: {}
+      summary: 部署 mysql 实例
+      tags:
+      - mysql
+  /mysql/deploy-dbbackup:
+    post:
+      consumes:
+      - application/json
+      description: 部署GO版本备份程序
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallNewDbBackupComp'
+      responses: {}
+      summary: 部署备份程序
+      tags:
+      - mysql
+  /mysql/find-local-backup:
+    post:
+      consumes:
+      - application/json
+      description: 查找本地备份
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupParam'
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.FindLocalBackupResp'
+      summary: 查找本地备份
+      tags:
+      - mysql
+  /mysql/flashback-binlog:
+    post:
+      consumes:
+      - application/json
+      description: 通过 `mysqlbinlog --flashback xxx | mysql` 导入 binlog
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_rollback.FlashbackComp'
+      responses: {}
+      summary: 导入 binlog
+      tags:
+      - mysql
+  /mysql/grant-repl:
+    post:
+      consumes:
+      - application/json
+      description: 在目标机器新建 repl 账号
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_grant.GrantReplComp'
+      responses: {}
+      summary: 建立复制账号
+      tags:
+      - mysql
+  /mysql/init-cluster-routing:
+    post:
+      consumes:
+      - application/json
+      description: 初始化tendb cluster 集群的路由关系说明
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingComp'
+      responses: {}
+      summary: 初始化tendb cluster 集群的路由关系
+      tags:
+      - spiderctl
+  /mysql/install-checksum:
+    post:
+      consumes:
+      - application/json
+      description: 安装mysql校验
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLChecksumComp'
+      responses: {}
+      summary: 安装mysql校验
+      tags:
+      - mysql
+  /mysql/install-dbatoolkit:
+    post:
+      consumes:
+      - application/json
+      description: 部署 /home/mysql/dba_toolkit,覆盖
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallDBAToolkitComp'
+      responses: {}
+      summary: 部署DBA工具箱
+      tags:
+      - mysql
+  /mysql/mycnf-change:
+    post:
+      consumes:
+      - application/json
+      description: 修改mysql配置
+      parameters:
+      - description: description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeComp'
+      produces:
+      - application/json
+      responses: {}
+      summary: 修改mysql配置
+      tags:
+      - mysql
+  /mysql/mycnf-clone:
+    post:
+      consumes:
+      - application/json
+      description: |-
+        用于 slave 重建或迁移,保持新实例与 my.cnf 实例关键参数相同的场景
+        默认 clone 参数:
+      parameters:
+      - description: description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfCloneComp'
+      produces:
+      - application/json
+      responses: {}
+      summary: 从源实例克隆 my.cnf 部分参数到目标实例
+      tags:
+      - mysql
+  /mysql/parse-binlog-time:
+    post:
+      consumes:
+      - application/json
+      description: 获取 binlog FileDescriptionFormat 和 RotateEvent 事件
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.BinlogTimeComp'
+      responses: {}
+      summary: 获取 binlog 的开始和结束时间
+      tags:
+      - mysql
+  /mysql/pt-table-checksum:
+    post:
+      consumes:
+      - application/json
+      description: 数据校验
+      parameters:
+      - description: description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.PtTableChecksumComp'
+      responses: {}
+      summary: 数据校验
+      tags:
+      - mysql
+  /mysql/recover-binlog:
+    post:
+      consumes:
+      - application/json
+      description: 通过 `mysqlbinlog xxx | mysql` 导入 binlog
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RecoverBinlogComp'
+      responses: {}
+      summary: 导入 binlog
+      tags:
+      - mysql
+  /mysql/restore-dr:
+    post:
+      consumes:
+      - application/json
+      description: 物理备份、逻辑备份恢复
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.RestoreDRComp'
+      responses:
+        "200":
+          description: OK
+          schema:
+            $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_util_mysqlutil.ChangeMaster'
+      summary: 备份恢复
+      tags:
+      - mysql
+  /mysql/semantic-check:
+    post:
+      consumes:
+      - application/json
+      description: 运行语义检查
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticCheckComp'
+      produces:
+      - application/json
+      responses: {}
+      summary: 运行语义检查
+      tags:
+      - mysql
+  /mysql/semantic-dumpschema:
+    post:
+      consumes:
+      - application/json
+      description: 运行语义检查
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.SemanticDumpSchemaComp'
+      produces:
+      - application/json
+      responses: {}
+      summary: 运行语义检查
+      tags:
+      - mysql
+  /spdierctl/deploy:
+    post:
+      consumes:
+      - application/json
+      description: 部署 spider ctl 实例说明
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp'
+      responses: {}
+      summary: 部署 spider ctl 实例
+      tags:
+      - spiderctl
+  /spider/deploy:
+    post:
+      consumes:
+      - application/json
+      description: 部署 spider 实例说明
+      parameters:
+      - description: short description
+        in: body
+        name: body
+        required: true
+        schema:
+          $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLComp'
+      responses: {}
+      summary: 部署 spider 实例
+      tags:
+      - spider
+schemes:
+- http
+swagger: "2.0"
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/import-sqlfile.example.md b/dbm-services/mysql/db-tools/dbactuator/example/import-sqlfile.example.md
new file mode 100644
index 0000000000..d01757e7d4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/import-sqlfile.example.md
@@ -0,0 +1,43 @@
+# dbactuator mysql import-sqlfile
+
+## 执行SQL导入
+
+
+### 原始payload
+```
+{
+    "general": {
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+     }
+    },
+    "extend": {
+        "host": "127.0.0.1",
+        "port": 3306,
+        "force": false,
+        "execute_objects": [
+            {
+                "sql_file": "/data/install/test.sql",
+				"ignore_dbnames": [],
+                "dbnames": [
+                    "test"
+                ]
+            }
+        ]
+    }
+}
+
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-database-table.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-database-table.example.md
new file mode 100644
index 0000000000..c587059d00
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-database-table.example.md
@@ -0,0 +1,16 @@
+# dbactuator mysql backup-database-table
+
+## 备份库表
+
+
+### 原始payload
+```
+{
+    "extend": {
+        "host": "127.0.0.1",
+        "port": 3306,
+        "regex": "^(?!db1)db.*\.t)"
+    }
+}
+
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-download.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-download.example.md
new file mode 100644
index 0000000000..3fbe5cfcbf
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-download.example.md
@@ -0,0 +1,41 @@
+
+
+### 原始 payload 
+
+#### scp
+
+- 开始
+```
+{
+    "extend": {
+        "bk_biz_id": "0",
+        "backup_type": "GZTAB",
+        "download_type": "scp",
+        "download_options": "",
+        "file_date": "latest",
+        "file_src": {
+            "ssh_host": "",
+            "ssh_port": "",
+            "ssh_user": "",
+            "ssh_pass": "",
+            "path": "/data/dbbak",
+            "match": "",
+            "file_list": []
+        },
+        "file_tgt": {
+            "path": "/data/dbbak"
+        },
+        "resume": true,
+        "check_disksize": true
+    }
+}
+```
+
+- 暂停
+- 回滚
+
+#### gse
+
+#### ieg_backup_center
+
+#### wget
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-importfull.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-importfull.example.md
new file mode 100644
index 0000000000..77bb706fd7
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-backup-importfull.example.md
@@ -0,0 +1,30 @@
+
+### payload
+
+- 开始
+```
+{
+    "extend": {
+        "bk_biz_id": "0",
+        "backup_type": "GZTAB",
+        "file_from": {
+            "path_full": "/data/dbbak/recover",
+            "path_incr": "/data/dbbak/recover/binlog",
+            "path_privileges": "/data/dbbak/recover"
+        },
+        "recover_full": true,
+        "recover_incr": false,
+        "recover_privileges": true,
+        "concurrency": 10,
+        "partial_recover": {
+            "databases": "",
+            "tables": "",
+        },
+        "check_myisam": false,
+        "enable_imdepotent": false
+    }
+}
+```
+
+- 暂停
+- 回滚
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-change-master.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-change-master.example.md
new file mode 100644
index 0000000000..456778a450
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-change-master.example.md
@@ -0,0 +1,40 @@
+# dbactuator mysql change-master 
+安装mysql  
+`dbactuator mysql change-master -u xx -n xx -p  `   
+前置工作
+-  安装好两组mysql实例
+
+### 原始 payload 
+```
+{
+ "general":{
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+     }
+    },
+    "extend":{
+		"host": "127.0.0.1",
+		"port": 20000,
+		"master_host": "127.0.0.2",
+		"master_port": 20000,
+		"is_gtid": false,
+		"bin_file": "binlog20000.000003",
+		"bin_position": 2362,
+		"max_tolerate_delay": 10,
+		"force": false
+    }
+}
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-dbbackup.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-dbbackup.example.md
new file mode 100644
index 0000000000..9b7187b3c6
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-dbbackup.example.md
@@ -0,0 +1,98 @@
+# dbactuator mysql deploy-dbbackup 
+
+### 原始payload
+{
+    "general":{
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+     }
+    },
+"extend":{
+    "pkg": "backup_1.0.15.tar.gz",
+    "pkg_md5": "20bc5a0172c72991d499bdb079fea445",
+    "app": "test_app",
+    "role": "MASTER",
+    "exec_user": "",
+    "configs": [
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlBackup",
+                "conf_file": "dbbackup.conf",
+                "conf_type_lc": "",
+                "conf_file_lc": "",
+                "namespace_info": "",
+                "description": "",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "BackTimeOut": "09:00:00",
+                "BackType": "GZTAB",
+                "BackupDir": "/data/dbbak",
+                "CrontabTime": "3 5 * * *",
+                "DataOrGrant": "ALL",
+                "FlushRetryCount": "3",
+                "FlushWaitTimeout": "30",
+                "LargetableSize": "10G",
+                "MysqlBinPath": "/usr/local/mysql/bin",
+                "MysqlCharset": "utf8mb4",
+                "MysqlHost": "DEFAULT",
+                "MysqlIgnoreDbList": "performance_schema information_schema mysql test infodba_schema",
+                "MysqlPass": "DEFAULT",
+                "MysqlPort": "3306",
+                "MysqlRole": "MASTER",
+                "MysqlUser": "DEFAULT",
+                "OldFileLeftDay": "2",
+                "ProductName": "DEFAULT",
+                "SplitCount": "10"
+            }
+        },
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlBackup",
+                "conf_file": "local_backup_config_not_upload",
+                "conf_type_lc": "",
+                "conf_file_lc": "",
+                "namespace_info": "",
+                "description": "",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "MaxConcurrency": "1",
+                "MaxResourceUsePercent": "90",
+                "SlowQueryWhiteUsers": "repl,system user,event_scheduler,dnf_oss",
+                "TarAndSplitSpeedLimit": "200"
+            }
+        }
+    ],
+    "host": "127.0.0.1",
+    "ports": [
+        20000
+    ]
+    }
+}
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-monitor.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-monitor.example.md
new file mode 100644
index 0000000000..2280276a16
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy-monitor.example.md
@@ -0,0 +1,389 @@
+#   dbactuator mysql deploy-monitor 
+
+### 原始payload 
+```
+{
+    "general":{
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+     }
+    },
+    "extend":{
+    "pkg": "monitor_9.3.23.tar.gz",
+    "pkg_md5": "d9ef73e30ca3e3407c08f2f4c2af3794",
+    "app": "test_app",
+    "dbas": "xxx",
+    "exec_user": "xx",
+    "configs": [
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlMasterMonitor",
+                "conf_file": "db_monitor",
+                "conf_type_lc": "",
+                "conf_file_lc": "",
+                "namespace_info": "",
+                "description": "",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "conn_log": "{\"check\": \"YES\",\"expire_days\": \"1\",\"max_size\": \"2G\"}",
+                "data_consistency": "{\"begin_time\": \"0205 0210\",\"check_data\": \"YES\",\"check_dataresult\": \"YES\",\"chunk_size\": \"10M\",\"chunk_time\": \"0.9\",\"do_database\": \"\",\"end_time\": \"0500\",\"ignore_databases\": \"mysql,test,information_schema,infodba_schema,sys,dbFFOTmpStat,performance_schema\",\"ignore_tables\": \"\",\"report_time\": \"0900 0905\",\"working_days\": \"1,2,3,4,5\"}",
+                "db_character": "{\"check\": \"YES\"}",
+                "db_hang": "{\"mail_valve\": \"20\",\"mc_valve\": \"200\"}",
+                "db_variables": "{\"check\": \"NO\",\"value\": \"\"}",
+                "dblogin_check": "{\"check_connlog_table\": \"YES\",\"check_init_connect\": \"YES\"}",
+                "dbmon_Heartbeat": "{\"check\": \"YES\",\"common_id\": \"000400000135\",\"slave_id\": \"000400000131\"}",
+                "dead_lock": "{\"check\": \"YES\",\"ignore\": \"\",\"valve\": \"5\"}",
+                "definer": "{\"check\": \"YES\"}",
+                "disk_io": "{\"avg_cpu_iowait_id\": \"13216\",\"avg_cpu_nice_id\": \"13219\",\"avg_cpu_system_id\": \"13217\",\"avg_cpu_used_id\": \"13215\",\"avg_cpu_user_id\": \"13218\",\"avgqu_sz_tnm_id\": \"1272\",\"avgqu_sz_tnm_id_1\": \"1893\",\"await_tnm_id\": \"1271\",\"await_tnm_id_1\": \"1892\",\"rs_tnm_id\": \"1273\",\"rs_tnm_id_1\": \"1894\",\"util_tnm_id\": \"1270\",\"util_tnm_id_1\": \"1891\",\"ws_tnm_id\": \"1274\",\"ws_tnm_id_1\": \"1895\"}",
+                "disk_os_space": "{\"check\": \"YES\",\"dba_used_percent\": \"90%\",\"percent_value_pre\": \"87\",\"percent_valve\": \"91\"}",
+                "free_space": "{\"percent_valve\": \"90\",\"size_valve\": \"5120\",\"tnm_id_per\": \"1018\",\"tnm_id_size\": \"1016\",\"tnm_mysql_data_size\": \"15030\"}",
+                "heavy_load_check": "{\"bak_time\": \"04 07\",\"io_valve\": \"90\",\"slow_valve\": \"100\"}",
+                "ignore_err": "{\"check\": \"YES\",\"ignore_err\": \"\"}",
+                "modify_data": "{\"check\": \"YES\",\"value\": \"60\"}",
+                "myisam_check": "{\"check\": \"YES\"}",
+                "mysql_memory": "{\"check_memory\": \"YES\",\"mysql32_warn\": \"2800000\",\"mysql64_warn\": \"97\",\"system_lowmem\": \"100000\",\"tnm_id\": \"1373\",\"tnm_rsz_id\": \"15787\"}",
+                "slave_status": "{\"check\": \"NO\",\"exec_id\": \"1459\",\"exec_valve\": \"1048576\",\"read_id\": \"1458\",\"read_valve\": \"262144\",\"time_id\": \"1460\"}",
+                "unnormal_sql_check": "{\"accounts\": \"event_scheduler\",\"check\": \"YES\",\"timelimit\": \"18000\"}",
+                "warn_level": {
+                    "DB_DEADLOCK_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "DB_SQL_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "DB_limit_connect": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "DB_memory_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "FREE_SPACE_warn_2": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "FREE_SPACE_warn_3": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "InnoDB_engines_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "MODIFY_DATA_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "Master_show_Connect": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "Slave_Connect_Master": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "Slave_Error_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "Slave_IO_Running_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"sh /home/mysql/monitor/callup_script/skip_binlog_lose.sh\",\"level\": \"0\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "Slave_SQL_Running_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"sh /home/mysql/monitor/callup_script/skip_slave_error.sh\",\"level\": \"0\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "Socket_File": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "Unnormal_Sql_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "conn_log_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"1\",\"unimportance_time\": \"0 0\"}",
+                    "conn_log_connect_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"1\",\"unimportance_time\": \"0 0\"}",
+                    "conn_log_switch_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"1\",\"unimportance_time\": \"0 0\"}",
+                    "conn_log_table_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"5\",\"unimportance_time\": \"0 0\"}",
+                    "data_consistency_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "db_agent_check_0": "{\"Triggering_warning\": \"3\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "db_agent_check_1": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "db_agent_check_5": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "db_character": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "db_con_timeout_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "db_connect_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "db_variables": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"10\",\"unimportance_time\": \"0 0\"}",
+                    "dblogin_connlog_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"sh /home/mysql/monitor/callup_script/create_connlog_table.sh\",\"level\": \"1\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "dblogin_initconnect_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "disk_os_space": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"1\",\"unimportance_time\": \"0 0\"}",
+                    "disk_os_space_1": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "disk_os_space_pre": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "err_log_exit_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "flush_table_with_read_lock": "{\"Triggering_warning\": \"0\",\"callup_script\": \"perl /home/mysql/monitor/callup_script/kill_FlushTableLocked_process.pl\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "free_space_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"sh /home/mysql/monitor/callup_script/create_free_space.sh\",\"level\": \"1\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "free_space_compare": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "frist_running_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "innodb_status_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "last_xml_timeout": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "lock_monitor_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "master_show_status": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "monitor_center_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "monitor_pause_info": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "my_cnf_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "myisam_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "mysql_errlog_content_l0": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "mysql_errlog_content_l3": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "routine_definer_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"6\",\"unimportance_time\": \"0 0\"}",
+                    "routine_definer_check_user": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"6\",\"unimportance_time\": \"0 0\"}",
+                    "send_mail_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "send_xml_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "send_xml_info": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "show_slave_status_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "slave_slow_info": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"288\",\"unimportance_time\": \"0 0\"}",
+                    "slowquery_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "slowquery_file_info": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                    "sql_monitor_check_log": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "status_config_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                    "tokudb_space": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"3\",\"unimportance_time\": \"0 0\"}"
+                },
+                "warn_swith": "{\"valve\": \"OPEN\"}",
+            }
+        },
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlMasterMonitor",
+                "conf_file": "global_status",
+                "conf_type_lc": "",
+                "conf_file_lc": "",
+                "namespace_info": "",
+                "description": "",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "Aborted_clients": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Aborted_connects": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Binlog_cache_disk_use": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Binlog_cache_use": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Bytes_received": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1014\",\"valve\": \"100000000\"}",
+                "Bytes_sent": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1015\",\"valve\": \"100000000\"}",
+                "CacheHit1": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1027\",\"valve\": \"100000000\"}",
+                "CacheHit2": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1028\",\"valve\": \"100000000\"}",
+                "Com_admin_commands": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_alter_db": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_alter_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_analyze": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_backup_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_begin": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_change_db": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_change_master": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_check": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_checksum": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_commit": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_create_db": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_create_function": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_create_index": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_create_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_create_user": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_dealloc_sql": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_delete": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1009\",\"valve\": \"100000000\"}",
+                "Com_delete_multi": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_do": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_drop_db": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_drop_function": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_drop_index": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_drop_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_drop_user": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_execute_sql": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_flush": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_grant": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_ha_close": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_ha_open": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_ha_read": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_help": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_insert": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1017\",\"valve\": \"100000000\"}",
+                "Com_insert_select": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_kill": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_load": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_load_master_data": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_load_master_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_lock_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_optimize": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_preload_keys": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_prepare_sql": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_purge": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_purge_before_date": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_rename_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_repair": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_replace": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1031\",\"valve\": \"100000000\"}",
+                "Com_replace_select": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_reset": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_restore_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_revoke": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_revoke_all": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_rollback": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_savepoint": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_select": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1010\",\"valve\": \"100000000\"}",
+                "Com_set_option": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_binlog_events": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_binlogs": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_charsets": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_collations": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_column_types": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_create_db": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_create_table": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_databases": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_errors": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_fields": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_grants": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_innodb_status": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_keys": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_logs": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_master_status": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_ndb_status": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_new_master": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_open_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_privileges": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_processlist": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_slave_hosts": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_slave_status": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_status": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_storage_engines": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_triggers": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_variables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_show_warnings": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_slave_start": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_slave_stop": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_stmt_close": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_stmt_execute": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_stmt_fetch": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_stmt_prepare": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_stmt_reset": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_stmt_send_long_data": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_truncate": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_unlock_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_update": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1011\",\"valve\": \"100000000\"}",
+                "Com_update_multi": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_xa_commit": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_xa_end": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_xa_prepare": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_xa_recover": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_xa_rollback": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Com_xa_start": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Compression": "{\"is_additive\": \"1\",\"is_ignore\": \"0\",\"is_number\": \"0\",\"valve\": \"100000000\"}",
+                "Connections": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Created_tmp_disk_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1020\",\"valve\": \"100000000\"}",
+                "Created_tmp_files": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1021\",\"valve\": \"100000000\"}",
+                "Created_tmp_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1022\",\"valve\": \"100000000\"}",
+                "Delayed_errors": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Delayed_insert_threads": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Delayed_writes": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Flush_commands": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_commit": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_delete": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_discover": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_prepare": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_read_first": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_read_key": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_read_next": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_read_prev": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_read_rnd": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_read_rnd_next": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_rollback": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_savepoint": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_savepoint_rollback": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_update": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Handler_write": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_pages_data": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1058\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_pages_dirty": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1059\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_pages_flushed": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1060\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_pages_free": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1061\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_pages_latched": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1062\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_pages_misc": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1063\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_pages_total": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1064\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_read_ahead_rnd": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1065\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_read_ahead_seq": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1066\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_read_requests": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1067\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_reads": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1068\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_wait_free": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1069\",\"valve\": \"100000000\"}",
+                "Innodb_buffer_pool_write_requests": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1070\",\"valve\": \"100000000\"}",
+                "Innodb_data_fsyncs": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1071\",\"valve\": \"100000000\"}",
+                "Innodb_data_pending_fsyncs": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1072\",\"valve\": \"100000000\"}",
+                "Innodb_data_pending_reads": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1073\",\"valve\": \"100000000\"}",
+                "Innodb_data_pending_writes": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1074\",\"valve\": \"100000000\"}",
+                "Innodb_data_read": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1075\",\"valve\": \"100000000\"}",
+                "Innodb_data_reads": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1076\",\"valve\": \"100000000\"}",
+                "Innodb_data_writes": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1077\",\"valve\": \"100000000\"}",
+                "Innodb_data_written": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1078\",\"valve\": \"100000000\"}",
+                "Innodb_dblwr_pages_written": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1079\",\"valve\": \"100000000\"}",
+                "Innodb_dblwr_writes": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1080\",\"valve\": \"100000000\"}",
+                "Innodb_log_waits": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1081\",\"valve\": \"100000000\"}",
+                "Innodb_log_write_requests": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1082\",\"valve\": \"100000000\"}",
+                "Innodb_log_writes": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1083\",\"valve\": \"100000000\"}",
+                "Innodb_os_log_fsyncs": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1084\",\"valve\": \"100000000\"}",
+                "Innodb_os_log_pending_fsyncs": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1085\",\"valve\": \"100000000\"}",
+                "Innodb_os_log_pending_writes": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1086\",\"valve\": \"100000000\"}",
+                "Innodb_os_log_written": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1087\",\"valve\": \"100000000\"}",
+                "Innodb_page_size": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1088\",\"valve\": \"100000000\"}",
+                "Innodb_pages_created": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1089\",\"valve\": \"100000000\"}",
+                "Innodb_pages_read": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1090\",\"valve\": \"100000000\"}",
+                "Innodb_pages_written": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1091\",\"valve\": \"100000000\"}",
+                "Innodb_row_lock_current_waits": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1092\",\"valve\": \"100000000\"}",
+                "Innodb_row_lock_time": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1093\",\"valve\": \"100000000\"}",
+                "Innodb_row_lock_time_avg": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1094\",\"valve\": \"100000000\"}",
+                "Innodb_row_lock_time_max": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1095\",\"valve\": \"100000000\"}",
+                "Innodb_row_lock_waits": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1096\",\"valve\": \"100000000\"}",
+                "Innodb_rows_deleted": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1097\",\"valve\": \"100000000\"}",
+                "Innodb_rows_inserted": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1098\",\"valve\": \"100000000\"}",
+                "Innodb_rows_read": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1099\",\"valve\": \"100000000\"}",
+                "Innodb_rows_updated": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1100\",\"valve\": \"100000000\"}",
+                "Key_blocks_not_flushed": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Key_blocks_unused": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Key_blocks_used": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Key_read_requests": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Key_reads": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Key_write_requests": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Key_writes": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Last_query_cost": "{\"is_additive\": \"1\",\"is_ignore\": \"0\",\"is_number\": \"0\",\"valve\": \"100000000\"}",
+                "Max_used_connections": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Not_flushed_delayed_rows": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Open_files": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Open_streams": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Open_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Opened_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Prepared_stmt_count": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_free_blocks": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_free_memory": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_hits": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_inserts": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_lowmem_prunes": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_not_cached": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_queries_in_cache": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Qcache_total_blocks": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Questions": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1007\",\"valve\": \"100000000\"}",
+                "Rpl_status": "{\"is_additive\": \"1\",\"is_ignore\": \"0\",\"is_number\": \"0\",\"valve\": \"100000000\"}",
+                "Select_full_join": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1023\",\"valve\": \"100000000\"}",
+                "Select_full_range_join": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1024\",\"valve\": \"100000000\"}",
+                "Select_range": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Select_range_check": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Select_scan": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1025\",\"valve\": \"100000000\"}",
+                "Slave_open_temp_tables": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Slave_retried_transactions": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Slave_running": "{\"is_additive\": \"1\",\"is_ignore\": \"0\",\"is_number\": \"0\",\"valve\": \"100000000\"}",
+                "Slow_launch_threads": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Slow_queries": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1008\",\"valve\": \"100000000\"}",
+                "Sort_merge_passes": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Sort_range": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Sort_rows": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Sort_scan": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Table_locks_immediate": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Table_locks_waited": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Tc_log_max_pages_used": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Tc_log_page_size": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Tc_log_page_waits": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Threads_cached": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Threads_connected": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1012\",\"valve\": \"100000000\"}",
+                "Threads_created": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Threads_running": "{\"is_additive\": \"0\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1013\",\"valve\": \"100000000\"}",
+                "Uptime": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "Uptime_since_flush_status": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"valve\": \"100000000\"}",
+                "free_space": "{\"compare\": \"lt\",\"is_additive\": \"1\",\"is_ignore\": \"0\",\"is_number\": \"1\",\"valve\": \"-50\",\"warn\": \"NO\",\"warn_level\": \"2\"}",
+                "os_waits": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1026\",\"valve\": \"100000000\"}",
+                "spin_rounds": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1030\",\"valve\": \"100000000\"}",
+                "spin_waits": "{\"is_additive\": \"1\",\"is_ignore\": \"1\",\"is_number\": \"1\",\"tnm_id\": \"1029\",\"valve\": \"100000000\"}"
+            }
+        }
+    ],
+    "role": "master",
+    "host": "127.0.0.1",
+    "ports": [
+        20000
+    ]
+    }
+}
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy.example.md
new file mode 100644
index 0000000000..314b74609f
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-deploy.example.md
@@ -0,0 +1,125 @@
+
+# dbactuator mysql deploy 
+安装mysql  
+`dbactuator mysql deploy -u xx -n xx -p  `   
+前置工作
+-  先运行 dbactuator sys init
+-  将 mysql-5.7.20-linux-x86_64-tmysql-3.3-gcs.tar.gz 下载到 /data/install
+
+
+
+### 原始 payload 
+```
+{
+    "general": {
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+     }
+    },
+    "extend": {
+        "host": "127.0.0.1",
+        "pkg": "mysql-5.7.20-linux-x86_64-tmysql-3.3-gcs.tar.gz",
+        "pkg_md5": "ea15cee9abf6f11859b2144352f684c9",
+        "mysql_version": "5.7.20",
+        "charset": "utf8mb4",
+        "start_port": 20000,
+        "inst_num": 1,
+        "mycnf_configs": {
+            "client": {
+                "default-character-set": "{{mysqld.character_set_server}}",
+                "port": "{{mysqld.port}}",
+                "socket": "{{mysqld.datadir}}/mysql.sock"
+            },
+            "mysql": {
+                "default-character-set": "{{mysqld.character_set_server}}",
+                "no-auto-rehash": "1",
+                "port": "{{mysqld.port}}",
+                "socket": "{{mysqld.datadir}}/mysql.sock"
+            },
+            "mysqld": {
+                "bind-address": "{{mysqld.bind-address}}",
+                "binlog_format": "ROW",
+                "character_set_server": "{{mysqld.character_set_server}}",
+                "datadir": "{{mysqld.datadir}}/data",
+                "default-storage-engine": "InnoDB",
+                "default_time_zone": "+08:00",
+                "expire_logs_days": "60",
+                "init_connect": "insert into test.conn_log values(connection_id(),now(),user(),current_user(),null);",
+                "innodb_buffer_pool_instances": "4",
+                "innodb_buffer_pool_size": "{{mysqld.innodb_buffer_pool_size}}",
+                "innodb_data_file_path": "ibdata1:1G:autoextend",
+                "innodb_data_home_dir": "{{mysqld.datadir}}/innodb/data",
+                "innodb_file_format": "Barracuda",
+                "innodb_file_per_table": "1",
+                "innodb_flush_log_at_trx_commit": "0",
+                "innodb_io_capacity": "1000",
+                "innodb_lock_wait_timeout": "50",
+                "innodb_log_buffer_size": "32M",
+                "innodb_log_file_size": "256M",
+                "innodb_log_files_in_group": "4",
+                "innodb_log_group_home_dir": "{{mysqld.datadir}}/innodb/log",
+                "innodb_read_io_threads": "8",
+                "innodb_thread_concurrency": "16",
+                "innodb_write_io_threads": "8",
+                "interactive_timeout": "86400",
+                "log_bin": "{{mysqld.logdir}}/binlog/binlog{{port}}.bin",
+                "log_bin_compress": "OFF",
+                "log_bin_trust_function_creators": "1",
+                "log_slave_updates": "1",
+                "log_warnings": "0",
+                "long_query_time": "1",
+                "lower_case_table_names": "0",
+                "max_allowed_packet": "128m",
+                "max_binlog_cache_size": "128M",
+                "max_binlog_size": "256M",
+                "max_connect_errors": "99999999",
+                "max_connections": "6000",
+                "performance_schema": "OFF",
+                "port": "{{mysqld.port}}",
+                "query_cache_size": "0",
+                "query_cache_type": "0",
+                "query_response_time_stats": "ON",
+                "relay-log": "{{mysqld.datadir}}/relay-log/relay-log.bin",
+                "relay_log_recovery": "1",
+                "relay_log_uncompress": "OFF",
+                "server_id": "{{mysqld.server_id}}",
+                "show_compatibility_56": "ON",
+                "skip-name-resolve": "1",
+                "slave_compressed_protocol": "1",
+                "slave_exec_mode": "STRICT",
+                "slave_parallel_type": "DATABASE",
+                "slave_parallel_workers": "4",
+                "slow_query_log": "1",
+                "slow_query_log_file": "{{mysqld.logdir}}/slow-query.log",
+                "socket": "{{mysqld.datadir}}/mysql.sock",
+                "sort_buffer_size": "2M",
+                "sql_mode": "''",
+                "stored_program_cache": "1024",
+                "sync_binlog": "0",
+                "table_open_cache": "5120",
+                "thread_cache_size": "8",
+                "tmpdir": "{{mysqld.datadir}}/tmp",
+                "transaction_isolation": "REPEATABLE-READ",
+                "wait_timeout": "86400"
+            },
+            "mysqldump": {
+                "max_allowed_packet": "1G",
+                "quick": "1"
+            }
+        }
+    }
+}
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-semantic-check.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-semantic-check.example.md
new file mode 100644
index 0000000000..398b0c8f2f
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-semantic-check.example.md
@@ -0,0 +1,45 @@
+## 运行语义检查 
+> dbactuator mysql semantic-check
+
+### payload 
+```
+{
+ "general":{
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+     }
+    },
+    "extend":{
+        "host":"127.0.0.1",
+	    "port":3306,
+		"schemafile": "/data/install/schema.sql",
+		"remote_host": "127.0.0.1",
+		"remote_port": 20000,
+		"execute_objects": [
+            {
+                "sql_file": "/data/install/test.sql",
+				"ignore_dbnames": [],
+                "dbnames": [
+                    "test"
+                ]
+            }
+        ]
+  }
+}
+``` 
+
+## 清理语义检查实例
+> dbactuator mysql semantic-check --clean 
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/mysql-uninstall.example.md b/dbm-services/mysql/db-tools/dbactuator/example/mysql-uninstall.example.md
new file mode 100644
index 0000000000..120bf12f15
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/mysql-uninstall.example.md
@@ -0,0 +1,30 @@
+# dbactuator  mysql uninstall
+
+### 原始payload
+```
+{
+ "general":{
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+        }
+    },
+    "extend":{
+        "host":"127.0.0.1",
+	    "uninstall_option": 2,
+	    "ports":[20000]
+  }
+}
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy-monitor.example.md b/dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy-monitor.example.md
new file mode 100644
index 0000000000..b1f2e0f01e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy-monitor.example.md
@@ -0,0 +1,125 @@
+# dbactuator proxy deploy-monitor 
+安装proxy  
+`dbactuator proxy deploy-monitor  -u xx -n xx -p  `   
+前置工作
+-  将 proxy_monitor_20210917.tar.gz 下载到 /data/install
+
+### 原始 payload 
+```
+{
+    "general":{
+       "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        "proxy_admin_user": "xx",
+        "proxy_admin_pwd": "xx"
+     }
+    },
+    "extend":{
+    "pkg": "proxy_monitor_20210917.tar.gz",
+    "pkg_md5": "b315d3f16179d050c7dda66210f7b4e2",
+    "app": "test_app",
+    "dbas": "xx",
+    "exec_user": "xx",
+    "configs": [
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlProxyMonitor",
+                "conf_file": "proxy_monitor",
+                "conf_type_lc": "",
+                "conf_file_lc": "初始化用户",
+                "namespace_info": "",
+                "description": "我是描述",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "backends_check": "{\"check\": \"YES\"}",
+                "conn_log": "{\"check\": \"YES\",\"expire_days\": \"1\",\"max_rows\": \"20000\",\"max_size\": \"2G\"}",
+                "disk_os_space": "{\"check\": \"YES\",\"dba_used_percent\": \"90%\",\"percent_value_pre\": \"91\",\"percent_valve\": \"94\"}",
+                "proxy_log": "{\"check\": \"YES\"}",
+                "proxy_progress": "{\"check\": \"YES\",\"restart\": \"YES\"}",
+                "proxy_state": "{\"check\": \"YES\"}",
+                "warn_level.conn_log_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"2\",\"repeat\": \"1\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.conn_log_switch_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"1\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.db_agent_check_0": "{\"Triggering_warning\": \"3\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.db_agent_check_1": "{\"Triggering_warning\": \"3\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.db_agent_check_5": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"5\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.disk_os_space": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"6\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.disk_os_space_1": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"6\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.disk_os_space_pre": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"6\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.monitor_center_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_backends_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_connection": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_log_content_error": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_log_content_warn": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_log_exit_check": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"2\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_progress_0": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_progress_3": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"3\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.proxy_state": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"0\",\"repeat\": \"0\",\"unimportance_time\": \"0 0\"}",
+                "warn_level.tokudb_space": "{\"Triggering_warning\": \"0\",\"callup_script\": \"null\",\"level\": \"1\",\"repeat\": \"3\",\"unimportance_time\": \"0 0\"}"
+            }
+        },
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlProxyMonitor",
+                "conf_file": "warn_receiver",
+                "conf_type_lc": "",
+                "conf_file_lc": "",
+                "namespace_info": "",
+                "description": "",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "warn_receiver": "{\"app\":\"{{}}\", \"db_cat\":\"{{}}\", \"duty_person\":\"{{}}\", \"mail_to\":\"{{}}\", \"domainurl\":\"http://127.0.0.1/\", \"sms_to\":\"\"}"
+            }
+        },
+        {
+            "bk_biz_id": "0",
+            "level_name": "plat",
+            "level_value": "0",
+            "conf_file_info": {
+                "namespace": "tendbha",
+                "conf_type": "MysqlProxyMonitor",
+                "conf_file": "xml_server",
+                "conf_type_lc": "",
+                "conf_file_lc": "",
+                "namespace_info": "",
+                "description": "",
+                "updated_by": "",
+                "created_at": "",
+                "updated_at": ""
+            },
+            "content": {
+                "xml_server": "{\"ips\":\"\"}"
+            }
+        }
+    ],
+    "host": "127.0.0.1",
+    "ports": [
+        10000
+    ]
+    }
+}
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy.example.md b/dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy.example.md
new file mode 100644
index 0000000000..c86dafa361
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/example/proxy-deploy.example.md
@@ -0,0 +1,40 @@
+# dbactuator proxy deploy 
+安装proxy  
+`dbactuator proxy deploy -u xx -n xx -p  `   
+前置工作
+-  先运行 dbactuator sys init
+-  将mysql-proxy-0.82.9.tar.gz 下载到 /data/install
+
+### 原始 payload 
+```
+{
+    "general":{
+        "runtime_account":{
+        "admin_user":"xx",
+        "admin_pwd": "xx",
+        "monitor_user":"xx",
+        "monitor_pwd":"xx",
+        "monitor_access_all_user":"xx",
+        "monitor_access_all_pwd":"xx",
+        "repl_user":"xx",
+        "repl_pwd":"xx",
+        "backup_user":"xx",
+        "backup_pwd":"xx",
+        "yw_user":"xx",
+        "yw_pwd": "xx"
+        }
+    },
+    "extend":{
+    "host":"127.0.0.1",
+    "pkg": "mysql-proxy-0.82.9.tar.gz",
+    "pkg_md5": "7e42a8c69c2d296d379252cdca280afc",
+"start_port": 10000,
+    "inst_num": 1,
+    "proxy_configs": {
+        "mysql-proxy": {
+            "ignore-user": "MONITOR,proxy","conn_log": "true","daemon": "true","keepalive": "true","query_response_time_stats": "true","event-threads": "7","log-level": "warning","plugins": "admin, proxy","proxy-address": "127.0.0.1:3306"
+        }
+    }
+  }
+}
+```
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/go.mod b/dbm-services/mysql/db-tools/dbactuator/go.mod
new file mode 100644
index 0000000000..6dec465d9e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/go.mod
@@ -0,0 +1,58 @@
+module dbm-services/mysql/db-tools/dbactuator
+
+go 1.19
+
+require (
+	github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2
+	github.com/dustin/go-humanize v1.0.1
+	github.com/go-sql-driver/mysql v1.7.1
+	github.com/jmoiron/sqlx v1.3.5
+	github.com/mitchellh/go-ps v1.0.0
+	github.com/pkg/errors v0.9.1
+	github.com/shirou/gopsutil/v3 v3.23.2
+	github.com/spf13/cobra v1.7.0
+	gopkg.in/ini.v1 v1.67.0
+)
+
+require (
+	github.com/fatih/color v1.13.0 // indirect
+	github.com/go-ole/go-ole v1.2.6 // indirect
+	github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect
+	github.com/inconshreveable/mousetrap v1.1.0 // indirect
+	github.com/jtolds/gls v4.20.0+incompatible // indirect
+	github.com/kr/fs v0.1.0 // indirect
+	github.com/kr/pretty v0.3.1 // indirect
+	github.com/lib/pq v1.10.0 // indirect
+	github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de // indirect
+	github.com/mattn/go-colorable v0.1.12 // indirect
+	github.com/mattn/go-isatty v0.0.18 // indirect
+	github.com/mattn/go-sqlite3 v1.14.16 // indirect
+	github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
+	github.com/smartystreets/assertions v1.2.0 // indirect
+	github.com/spf13/pflag v1.0.5 // indirect
+	github.com/tklauser/go-sysconf v0.3.11 // indirect
+	github.com/tklauser/numcpus v0.6.0 // indirect
+	github.com/yusufpapurcu/wmi v1.2.2 // indirect
+	golang.org/x/sys v0.8.0 // indirect
+	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
+)
+
+require (
+	github.com/MakeNowJust/heredoc v1.0.0
+	github.com/caarlos0/env/v6 v6.10.1
+	github.com/dlclark/regexp2 v1.8.1
+	github.com/ghodss/yaml v1.0.0
+	github.com/gofrs/flock v0.8.1
+	github.com/golang-jwt/jwt/v4 v4.4.2
+	github.com/hashicorp/go-version v1.6.0
+	github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f // indirect
+	github.com/jinzhu/copier v0.3.5
+	github.com/mitchellh/mapstructure v1.5.0
+	github.com/panjf2000/ants/v2 v2.7.2
+	github.com/pkg/sftp v1.13.5
+	github.com/smartystreets/goconvey v1.7.2
+	github.com/spf13/cast v1.5.0
+	golang.org/x/crypto v0.8.0
+	golang.org/x/net v0.9.0
+	gopkg.in/yaml.v2 v2.4.0
+)
diff --git a/dbm-services/mysql/db-tools/dbactuator/go.sum b/dbm-services/mysql/db-tools/dbactuator/go.sum
new file mode 100644
index 0000000000..37021fa587
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/go.sum
@@ -0,0 +1,160 @@
+github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
+github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
+github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2 h1:ZBbLwSJqkHBuFDA6DUhhse0IGJ7T5bemHyNILUjvOq4=
+github.com/TylerBrock/colorjson v0.0.0-20200706003622-8a50f05110d2/go.mod h1:VSw57q4QFiWDbRnjdX8Cb3Ow0SFncRw+bA/ofY6Q83w=
+github.com/caarlos0/env/v6 v6.10.1 h1:t1mPSxNpei6M5yAeu1qtRdPAK29Nbcf/n3G7x+b3/II=
+github.com/caarlos0/env/v6 v6.10.1/go.mod h1:hvp/ryKXKipEkcuYjs9mI4bBCg+UI0Yhgm5Zu0ddvwc=
+github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dlclark/regexp2 v1.8.1 h1:6Lcdwya6GjPUNsBct8Lg/yRPwMhABj269AAzdGSiR+0=
+github.com/dlclark/regexp2 v1.8.1/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
+github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
+github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
+github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
+github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
+github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
+github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
+github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
+github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
+github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f h1:7LYC+Yfkj3CTRcShK0KOL/w6iTiKyqqBA9a41Wnggw8=
+github.com/hokaccha/go-prettyjson v0.0.0-20211117102719-0474bc63780f/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI=
+github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
+github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
+github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg=
+github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
+github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
+github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
+github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
+github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E=
+github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de h1:V53FWzU6KAZVi1tPp5UIsMoUWJ2/PNwYIDXnu7QuBCE=
+github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE=
+github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
+github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
+github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
+github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
+github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
+github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc=
+github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg=
+github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
+github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/panjf2000/ants/v2 v2.7.2 h1:2NUt9BaZFO5kQzrieOmK/wdb/tQ/K+QHaxN8sOgD63U=
+github.com/panjf2000/ants/v2 v2.7.2/go.mod h1:KIBmYG9QQX5U2qzFP/yQJaq/nSb6rahS9iEHkrCMgM8=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go=
+github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig=
+github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU=
+github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M=
+github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
+github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo=
+github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg=
+github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
+github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
+github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
+github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I=
+github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
+github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
+github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
+github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
+github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
+github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
+golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
+golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
+gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go
new file mode 100644
index 0000000000..c455d13140
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/cmd.go
@@ -0,0 +1,50 @@
+package commoncmd
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/templates"
+
+	"github.com/spf13/cobra"
+)
+
+// NewCommonCommand TODO
+// @todo 将来可以把 download 单独作为一个子命令
+func NewCommonCommand() *cobra.Command {
+	cmds := &cobra.Command{
+		Use:   "common [common operation]",
+		Short: "Common components Operation Command Line Interface",
+		RunE:  subcmd.ValidateSubCommand(),
+	}
+	groups := templates.CommandGroups{
+		{
+			Message: "common operation sets",
+			Commands: []*cobra.Command{
+				CommandFileServer(),
+				RMLargeFileCommand(),
+			},
+		},
+	}
+	groups.Add(cmds)
+	return cmds
+}
+
+// NewDownloadCommand TODO
+func NewDownloadCommand() *cobra.Command {
+	cmds := &cobra.Command{
+		Use:   "download [download operation]",
+		Short: "download components Operation Command Line Interface",
+	}
+	groups := templates.CommandGroups{
+		{
+			Message: "download operation sets",
+			Commands: []*cobra.Command{
+				CommandDownloadScp(),
+				CommandDownloadHttp(),
+				CommandIBSRecover(),
+				CommandIBSQuery(),
+			},
+		},
+	}
+	groups.Add(cmds)
+	return cmds
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go
new file mode 100644
index 0000000000..b9608079a6
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/commoncmd.go
@@ -0,0 +1,2 @@
+// Package commoncmd TODO
+package commoncmd
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go
new file mode 100644
index 0000000000..440b8b676b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_http.go
@@ -0,0 +1,102 @@
+package commoncmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// DownloadHttpAct TODO
+type DownloadHttpAct struct {
+	*subcmd.BaseOptions
+	Payload backup_download.DFHttpComp
+}
+
+// CommandDownloadHttp godoc
+//
+// @Summary      http下载文件
+// @Description  支持限速、basicAuth 认证. 一般配合 common fileserver 使用
+// @Description # server1
+// @Description ./dbactuator common file-server \
+// @Description --payload-format raw \
+// @Description --payload '{"extend":{"bind_address":":8082","mount_path":"/data/dbbak","user":"xiaog","password":"xxxx","proc_maxidle_duration":"60s"}}'
+// @Description
+// @Description # server2
+// @Description curl -u 'xiaog:xxxx' 'http://server1:8082/datadbbak8082/dbactuator' -o dbactuator.bin --limit-rate 10k
+// @Tags         download
+// @Accept       json
+// @Param        body body      backup_download.DFHttpParam  true  "short description"
+// @Router       /download/http [post]
+func CommandDownloadHttp() *cobra.Command {
+	act := DownloadHttpAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "http",
+		Short: "http下载文件",
+		Example: fmt.Sprintf(
+			`dbactuator download http %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *DownloadHttpAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Validate TODO
+func (d *DownloadHttpAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *DownloadHttpAct) Run() error {
+	steps := subcmd.Steps{
+		{
+			FunName: "测试目标连接性",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "下载预检查",
+			Func:    d.Payload.PreCheck,
+		},
+		{
+			FunName: "开始下载",
+			Func:    d.Payload.Start,
+		},
+		{
+			FunName: "等待下载完成",
+			Func:    d.Payload.WaitDone,
+		},
+		{
+			FunName: "完成校验",
+			Func:    d.Payload.PostCheck,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("download files successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_query.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_query.go
new file mode 100644
index 0000000000..a59098dad0
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_query.go
@@ -0,0 +1,88 @@
+package commoncmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// IBSQueryAct TODO
+type IBSQueryAct struct {
+	*subcmd.BaseOptions
+	Payload backup_download.IBSQueryComp
+}
+
+// CommandIBSQuery godoc
+//
+// @Summary      从 ieg 备份系统查询文件
+// @Description  filename 会进行模糊匹配,返回 task_id 用于下载
+// @Tags         download
+// @Accept       json
+// @Param        body body      backup_download.IBSQueryComp  true  "short description"
+// @Success      200  {object}  backup_download.IBSQueryResult
+// @Router       /download/ibs-query [post]
+func CommandIBSQuery() *cobra.Command {
+	act := IBSQueryAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "ibs-query",
+		Short: "从 ieg 备份系统查询",
+		Example: fmt.Sprintf(
+			`dbactuator download ibs-query %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *IBSQueryAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil {
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Validate TODO
+func (d *IBSQueryAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *IBSQueryAct) Run() error {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "查询预检查",
+			Func:    d.Payload.PreCheck,
+		},
+		{
+			FunName: "查询备份文件",
+			Func:    d.Payload.Start,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("query files from ieg backup system successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_recover.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_recover.go
new file mode 100644
index 0000000000..172406706b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_ibs_recover.go
@@ -0,0 +1,103 @@
+package commoncmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// IBSRecoverAct TODO
+type IBSRecoverAct struct {
+	*subcmd.BaseOptions
+	Payload backup_download.IBSRecoverComp
+}
+
+// CommandIBSRecover godoc
+//
+// @Summary      从 ieg 备份系统下载文件
+// @Description  提供 task_id,从 ieg 备份系统下载文件
+// @Description task_files_wild: 模糊搜索文件并下载, task_files: 精确文件查询并下载
+// @Description task_files_wild, task_files 二选一
+// @Description 启用 skip_local_exists=true 时,如果目标目录已存在要下载的文件,会自动跳过
+// @Tags         download
+// @Accept       json
+// @Param        body body      backup_download.IBSRecoverComp  true  "short description"
+// @Success      200  {object}  backup_download.IBSRecoverTask
+// @Router       /download/ibs-recover [post]
+func CommandIBSRecover() *cobra.Command {
+	act := IBSRecoverAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "ibs-recover",
+		Short: "从 ieg 备份系统下载文件",
+		Example: fmt.Sprintf(
+			`dbactuator download ibs-recover %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *IBSRecoverAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil {
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Validate TODO
+func (d *IBSRecoverAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *IBSRecoverAct) Run() error {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "下载预检查",
+			Func:    d.Payload.PreCheck,
+		},
+		{
+			FunName: "开始下载",
+			Func:    d.Payload.Start,
+		},
+		{
+			FunName: "等待下载完成",
+			Func:    d.Payload.WaitDone,
+		},
+		{
+			FunName: "后置检查",
+			Func:    d.Payload.PostCheck,
+		},
+		{
+			FunName: "输出下载结果",
+			Func:    d.Payload.OutputCtx,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("download files from ieg backup system successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go
new file mode 100644
index 0000000000..2e9c4d9378
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/download_scp.go
@@ -0,0 +1,94 @@
+package commoncmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// DownloadScpAct TODO
+type DownloadScpAct struct {
+	*subcmd.BaseOptions
+	Payload backup_download.DFScpComp
+}
+
+// CommandDownloadScp godoc
+//
+// @Summary      scp下载文件
+// @Description  支持限速
+// @Tags         download
+// @Accept       json
+// @Param        body body      backup_download.DFScpParam  true  "short description"
+// @Router       /download/scp [post]
+func CommandDownloadScp() *cobra.Command {
+	act := DownloadScpAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "scp",
+		Short: "scp下载文件",
+		Example: fmt.Sprintf(
+			`dbactuator download scp %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *DownloadScpAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Validate TODO
+func (d *DownloadScpAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *DownloadScpAct) Run() error {
+	steps := subcmd.Steps{
+		{
+			FunName: "测试目标连接性",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "下载预检查",
+			Func:    d.Payload.PreCheck,
+		},
+		{
+			FunName: "开始下载",
+			Func:    d.Payload.Start,
+		},
+		{
+			FunName: "等待下载完成",
+			Func:    d.Payload.WaitDone,
+		},
+		{
+			FunName: "完成校验",
+			Func:    d.Payload.PostCheck,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("download files successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go
new file mode 100644
index 0000000000..d7696be6f6
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/fileserver.go
@@ -0,0 +1,86 @@
+package commoncmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// FileServerAct TODO
+type FileServerAct struct {
+	*subcmd.BaseOptions
+	Payload fileserver.FileServerComp
+}
+
+// CommandFileServer godoc
+//
+// @Summary      简单文件服务
+// @Description  通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份
+// @Description 在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件
+// @Tags         common
+// @Accept       json
+// @Param        body body      fileserver.FileServerComp  true  "short description"
+// @Router       /common/file-server [post]
+func CommandFileServer() *cobra.Command {
+	act := FileServerAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "file-server",
+		Short: "启动文件服务",
+		Example: fmt.Sprintf(
+			`dbactuator file-server %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *FileServerAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	// d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (d *FileServerAct) Run() error {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化参数",
+			Func:    d.Payload.Params.New,
+		},
+		{
+			FunName: "启动fileserver",
+			Func:    d.Payload.Params.Start,
+		},
+		{
+			FunName: "等待结束",
+			Func:    d.Payload.Params.WaitDone,
+		},
+		{
+			FunName: "是否打印download 信息",
+			Func:    d.Payload.Params.OutputCtx,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	// logger.Info("fileserver start successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go
new file mode 100644
index 0000000000..adc25e5be2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/commoncmd/rm_large_file.go
@@ -0,0 +1,121 @@
+package commoncmd
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+// RMLargeFileCmd TODO
+type RMLargeFileCmd struct {
+	*subcmd.BaseOptions
+	Payload RMLargeFileParam
+}
+
+// RMLargeFileParam TODO
+type RMLargeFileParam struct {
+	Filename string `json:"filename" validate:"required"`
+	// 删除速度,MB/s,默认 30
+	BWLimitMB int `json:"bw_limit_mb" validate:"required,gte=1,lte=1000" default:"30"`
+}
+
+// Example TODO
+func (p RMLargeFileParam) Example() interface{} {
+	comp := RMLargeFileParam{
+		Filename:  "xxx",
+		BWLimitMB: 30,
+	}
+	return comp
+}
+
+// PreCheck TODO
+func (p RMLargeFileParam) PreCheck() error {
+	if !cmutil.FileExists(p.Filename) {
+		return errors.Errorf("file not exists %s", p.Filename)
+	} else if cmutil.IsDirectory(p.Filename) {
+		return errors.Errorf("path is directory %s", p.Filename)
+	}
+	if p.BWLimitMB == 0 {
+		p.BWLimitMB = 30
+	}
+	// writable?
+	return nil
+}
+
+// Start TODO
+func (p RMLargeFileParam) Start() error {
+	if err := cmutil.TruncateFile(p.Filename, p.BWLimitMB); err != nil {
+		logger.Error(errors.WithStack(err).Error())
+		return err
+	}
+	return nil
+}
+
+// RMLargeFileCommand godoc
+//
+// @Summary      限速删除大文件
+// @Tags         common
+// @Accept       json
+// @Param        body body      RMLargeFileParam  true  "short description"
+// @Router       /common/rm-file [post]
+func RMLargeFileCommand() *cobra.Command {
+	act := RMLargeFileCmd{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "rm-file",
+		Short: "限速删除大文件",
+		Example: fmt.Sprintf(
+			`dbactuator common rm-file %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *RMLargeFileCmd) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.DeserializeSimple(&d.Payload); err != nil {
+		logger.Error("DeserializeSimple err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Validate TODO
+func (d *RMLargeFileCmd) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *RMLargeFileCmd) Run() error {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.Payload.PreCheck,
+		},
+		{
+			FunName: "删除",
+			Func:    d.Payload.Start,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("rm file %s successfully", d.Payload.Filename)
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go
new file mode 100644
index 0000000000..62f8749b1b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/clear_crontab.go
@@ -0,0 +1,68 @@
+package crontabcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// ClearCrontabAct 清理定时任务
+type ClearCrontabAct struct {
+	*subcmd.BaseOptions
+	Service crontab.ClearCrontabParam
+}
+
+// ClearCrontabCommand 清理定时任务
+func ClearCrontabCommand() *cobra.Command {
+	act := ClearCrontabAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:     "clear-crontab",
+		Short:   "清理crontab",
+		Example: fmt.Sprintf(`dbactuator clear-crontab %s`, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init 初始化
+func (s *ClearCrontabAct) Init() (err error) {
+	if err = s.DeserializeAndValidate(&s.Service); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Run 执行
+func (s *ClearCrontabAct) Run() (err error) {
+	steps := []subcmd.StepFunc{
+		{
+			FunName: "清理机器的crontab",
+			Func:    s.Service.CleanCrontab,
+		},
+		{
+			FunName: "清理机器周边目录",
+			Func:    s.Service.CleanDBToolsFolder,
+		},
+	}
+	logger.Info("start clean crontab ...")
+	for idx, f := range steps {
+		if err = f.Func(); err != nil {
+			logger.Error("step <%d>, run [%s] occur %v", idx, f.FunName, err)
+			return err
+		}
+		logger.Info("step <%d>, run [%s] successfully", idx, f.FunName)
+	}
+	logger.Info("clean crontab successfully")
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go
new file mode 100644
index 0000000000..cce8a4a6ca
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/crontabcmd/crontabcmd.go
@@ -0,0 +1,2 @@
+// Package crontabcmd TODO
+package crontabcmd
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_database_table.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_database_table.go
new file mode 100644
index 0000000000..12cefe65dd
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_database_table.go
@@ -0,0 +1,85 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// BackupDatabaseTable TODO
+const BackupDatabaseTable = "backup-database-table"
+
+// BackupDatabaseTableAct TODO
+type BackupDatabaseTableAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.BackupDatabaseTableComp
+}
+
+// NewBackupDatabaseTableCommand TODO
+func NewBackupDatabaseTableCommand() *cobra.Command {
+	act := BackupDatabaseTableAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:     BackupDatabaseTable,
+		Short:   "备库库表",
+		Example: fmt.Sprintf(`dbactuator mysql %s %s`, BackupDatabaseTable, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate TODO
+func (c *BackupDatabaseTableAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Init TODO
+func (c *BackupDatabaseTableAct) Init() (err error) {
+	if err = c.DeserializeAndValidate(&c.Payload); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Run TODO
+func (c *BackupDatabaseTableAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	// subcmd.Steps 顺序执行,某个步骤error,剩下步骤不执行
+	steps := subcmd.Steps{
+		{
+			FunName: "Precheck",
+			Func:    c.Payload.Precheck,
+		},
+		{
+			FunName: "CreateBackupConfigFile",
+			Func:    c.Payload.CreateBackupConfigFile,
+		},
+		{
+			FunName: "DoBackup",
+			Func:    c.Payload.DoBackup,
+		},
+		{
+			FunName: "RemoveBackupConfigFile",
+			Func:    c.Payload.RemoveBackupConfigFile,
+		},
+		{
+			FunName: "OutputBackupInfo",
+			Func:    c.Payload.OutputBackupInfo,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("备份成功")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_truncate_database.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_truncate_database.go
new file mode 100644
index 0000000000..b14b00973d
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/backup_truncate_database.go
@@ -0,0 +1,97 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// BackupTruncateDatabaseAct 库表删除
+type BackupTruncateDatabaseAct struct {
+	*subcmd.BaseOptions
+	Service mysql.BackupTruncateDatabaseComp
+}
+
+const (
+	// BackupTruncateDatabase 命令常量
+	BackupTruncateDatabase = "backup-truncate-database"
+)
+
+// NewBackupTruncateDatabaseCommand 子命令定义
+func NewBackupTruncateDatabaseCommand() *cobra.Command {
+	act := BackupTruncateDatabaseAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+
+	cmd := &cobra.Command{
+		Use:     BackupTruncateDatabase,
+		Short:   "备份清档库",
+		Example: fmt.Sprintf(`dbactuator mysql %s %s`, BackupTruncateDatabase, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate 参数验证
+func (c *BackupTruncateDatabaseAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Init 初始化
+func (c *BackupTruncateDatabaseAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run 运行
+func (c *BackupTruncateDatabaseAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "Precheck",
+			Func:    c.Service.Precheck,
+		},
+		{
+			FunName: "Init",
+			Func: func() error {
+				return c.Service.Init(c.Uid)
+			},
+		},
+		{
+			FunName: "ReadBackupConf",
+			Func:    c.Service.ReadBackupConf,
+		},
+		{
+			FunName: "DumpSchema",
+			Func:    c.Service.DumpSchema,
+		},
+		{
+			FunName: "ModifyFile",
+			Func:    c.Service.ModifyFile,
+		},
+		{
+			FunName: "CleanNewDB",
+			Func:    c.Service.CleanNewDB,
+		},
+		{
+			FunName: "ImportSchema",
+			Func:    c.Service.ImportSchema,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("备份成功")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/build_master_slave_relation.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/build_master_slave_relation.go
new file mode 100644
index 0000000000..3c593d24f9
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/build_master_slave_relation.go
@@ -0,0 +1,98 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// BuildMsRelationAct TODO
+type BuildMsRelationAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.BuildMSRelationComp
+}
+
+// NewBuildMsRelatioCommand godoc
+//
+// @Summary      建立主从关系
+// @Description  执行 change master to
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.BuildMSRelationComp  true  "short description"
+// @Router       /mysql/change-master [post]
+func NewBuildMsRelatioCommand() *cobra.Command {
+	act := BuildMsRelationAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "change-master",
+		Short: "建立主从关系",
+		Example: fmt.Sprintf(
+			`dbactuator mysql change-master %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	// logger.Info("%s[%s]", cmd.Short, cmd.Use)
+	return cmd
+}
+
+// Init TODO
+func (b *BuildMsRelationAct) Init() (err error) {
+	if err = b.Deserialize(&b.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	b.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (b *BuildMsRelationAct) Validate() (err error) {
+	return b.BaseOptions.Validate()
+}
+
+// Run TODO
+func (b *BuildMsRelationAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化本地db连接",
+			Func:    b.Payload.Init,
+		},
+		{
+			FunName: "当时实例检查",
+			Func:    b.Payload.CheckCurrentSlaveStatus,
+		},
+		{
+			FunName: "主从版本检查",
+			Func:    b.Payload.CheckMSVersion,
+		},
+		{
+			FunName: "主从字符集检查",
+			Func:    b.Payload.CheckCharSet,
+		},
+		{
+			FunName: "建立主从关系",
+			Func:    b.Payload.BuildMSRelation,
+		},
+		{
+			FunName: "检查是否关系建立正常",
+			Func:    b.Payload.CheckBuildOk,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("build master slave realtion successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clean_mysql.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clean_mysql.go
new file mode 100644
index 0000000000..2d65720878
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clean_mysql.go
@@ -0,0 +1,91 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// CleanMysqlAct TODO
+type CleanMysqlAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.CleanMysqlComp
+}
+
+// CleanMysqlCommand godoc
+//
+// @Summary      清空实例,高危
+// @Description  清空本地实例,保留系统库
+// @Tags         mysql
+// @Accept       json
+// @Produce      json
+// @Param        body body      mysql.CleanMysqlComp  true  "description"
+// @Router       /mysql/clean-mysql [post]
+func CleanMysqlCommand() *cobra.Command {
+	act := CleanMysqlAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "clean-mysql",
+		Short: "清空实例",
+		Example: fmt.Sprintf(
+			`dbactuator mysql clean-mysql %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *CleanMysqlAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	logger.Warn("params %+v", d.Payload.Params)
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (d *CleanMysqlAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *CleanMysqlAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Payload.PreCheck,
+		},
+		{
+			FunName: "清空实例",
+			Func:    d.Payload.Start,
+		},
+	}
+
+	if err = steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("clean mysql instance successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clear_instance_config.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clear_instance_config.go
new file mode 100644
index 0000000000..18ae385c10
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clear_instance_config.go
@@ -0,0 +1,78 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// 定义mysql实例级别的配置清理过程
+// 1:清理例行备份程序的实例配置
+// 2:清理例行校验程序的实例配置
+// 3:清理rotate_binlog的实例配置
+
+// ClearInstanceConfig 清理实例配置
+const ClearInstanceConfig = "clear-inst-config"
+
+// ClearInstanceConfigAct 清理实例配置
+type ClearInstanceConfigAct struct {
+	*subcmd.BaseOptions
+	Service mysql.ClearInstanceConfigComp
+}
+
+// ClearInstanceConfigCommand 清理配置子命令
+// @return *cobra.Command
+func ClearInstanceConfigCommand() *cobra.Command {
+	act := ClearInstanceConfigAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   ClearInstanceConfig,
+		Short: "清理实例级别的周边配置",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`,
+			ClearInstanceConfig, subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init 初始化清理实例配置
+func (g *ClearInstanceConfigAct) Init() (err error) {
+	if err = g.Deserialize(&g.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	g.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run 清理实例配置执行入口
+func (g *ClearInstanceConfigAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "清理目标实例初始化",
+			Func:    g.Service.Init,
+		},
+		{
+			FunName: "清理目标实例的周边配置",
+			Func:    g.Service.DoClear,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("clear instance config successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clone_client_grant.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clone_client_grant.go
new file mode 100644
index 0000000000..70a443671e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/clone_client_grant.go
@@ -0,0 +1,79 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// CloneClineGrantAct 克隆client的权限的action
+type CloneClineGrantAct struct {
+	*subcmd.BaseOptions
+	Service grant.CloneClentGRantComp
+}
+
+// CloneClientGrantCommand  subcommand
+//
+//	@return *cobra.Command
+func CloneClientGrantCommand() *cobra.Command {
+	act := CloneClineGrantAct{
+		BaseOptions: subcmd.GBaseOptions,
+		Service: grant.CloneClentGRantComp{
+			Params: &grant.CloneClentGRantParam{},
+		},
+	}
+	cmd := &cobra.Command{
+		Use:     "clone-client-grant",
+		Short:   "克隆客户端权限",
+		Example: fmt.Sprintf(`dbactuator mysql clone-client-grant %s`, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (g *CloneClineGrantAct) Init() (err error) {
+	if err = g.Deserialize(&g.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	g.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (g *CloneClineGrantAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化本地db连接",
+			Func:    g.Service.Init,
+		},
+		{
+			FunName: "清理目标client残留权限",
+			Func:    g.Service.ClearTargetClientPriv,
+		},
+		{
+			FunName: "克隆client权限",
+			Func:    g.Service.CloneTargetClientPriv,
+		},
+		{
+			FunName: "回收旧client权限",
+			Func:    g.Service.DropOriginClientPriv,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("clone client grant successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/cmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/cmd.go
new file mode 100644
index 0000000000..aa23557672
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/cmd.go
@@ -0,0 +1,67 @@
+package mysqlcmd
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/templates"
+
+	"github.com/spf13/cobra"
+)
+
+// NewMysqlCommand mysql子命令
+func NewMysqlCommand() *cobra.Command {
+	cmds := &cobra.Command{
+		Use:   "mysql [mysql operation]",
+		Short: "MySQL Operation Command Line Interface",
+		RunE:  subcmd.ValidateSubCommand(),
+	}
+	groups := templates.CommandGroups{
+		{
+			Message: "mysql operation sets",
+			Commands: []*cobra.Command{
+				NewDeployMySQLInstanceCommand(),
+				NewStartMysqlCommand(),
+				NewUnInstallMysqlCommand(),
+				NewGrantReplCommand(),
+				NewExecSQLFileCommand(),
+				CloneClientGrantCommand(),
+				NewBackupTruncateDatabaseCommand(),
+				NewBackupDatabaseTableCommand(),
+				MycnfChangeCommand(),
+				FindLocalBackupCommand(),
+				MycnfCloneCommand(),
+				NewCutOverToSlaveCommnad(),
+				CleanMysqlCommand(),
+				PtTableSyncCommand(),
+				ParseBinlogTimeCommand(),
+				FlashbackBinlogCommand(),
+				NewPtTableChecksumCommand(),
+				NewInstallMySQLChecksumCommand(),
+				NewInstallNewDbBackupCommand(),
+				NewFullBackupCommand(),
+				NewInstallRotateBinlogCommand(),
+				NewInstallDBAToolkitCommand(),
+				NewDeployMySQLCrondCommand(),
+				ClearInstanceConfigCommand(),
+				NewInstallMySQLMonitorCommand(),
+				NewExecPartitionSQLCommand(),
+			},
+		},
+		{
+			Message: "mysql semantic check operation sets",
+			Commands: []*cobra.Command{
+				NewSenmanticCheckCommand(),
+				NewSenmanticDumpSchemaCommand(),
+			},
+		},
+		{
+			Message: "mysql slave operation  sets",
+			Commands: []*cobra.Command{
+				NewBuildMsRelatioCommand(),
+				RestoreDRCommand(),
+				RecoverBinlogCommand(),
+			},
+		},
+	}
+	groups.Add(cmds)
+	return cmds
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go
new file mode 100644
index 0000000000..c93e02a442
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/deploy_mysql_crond.go
@@ -0,0 +1,95 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// DeployMysqlCrondAct 部署
+type DeployMysqlCrondAct struct {
+	*subcmd.BaseOptions
+	Service mysql.DeployMySQLCrondComp
+}
+
+// DeployMySQLCrond 命令常量
+const DeployMySQLCrond = "deploy-mysql-crond"
+
+// NewDeployMySQLCrondCommand 实现
+func NewDeployMySQLCrondCommand() *cobra.Command {
+	act := DeployMysqlCrondAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   DeployMySQLCrond,
+		Short: "部署 mysql-crond",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`,
+			DeployMySQLCrond,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate 校验参数
+func (c *DeployMysqlCrondAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Init 初始化
+func (c *DeployMysqlCrondAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	logger.Info("extend params: %s", c.Service.Params)
+	return nil
+}
+
+// Run 执行
+func (c *DeployMysqlCrondAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    c.Service.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    c.Service.Precheck,
+		},
+		{
+			FunName: "部署二进制",
+			Func:    c.Service.DeployBinary,
+		},
+		{
+			FunName: "生成配置文件",
+			Func:    c.Service.GeneralRuntimeConfig,
+		},
+		{
+			FunName: "生成空任务配置",
+			Func:    c.Service.TouchJobsConfig,
+		},
+		{
+			FunName: "启动进程",
+			Func:    c.Service.Start,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		logger.Error("部署 mysql-crond 失败: %s", err.Error())
+		return err
+	}
+	logger.Info("部署 mysql-crond 完成")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/find_local_backup.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/find_local_backup.go
new file mode 100644
index 0000000000..f22886ad91
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/find_local_backup.go
@@ -0,0 +1,91 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// FindLocalBackupAct TODO
+type FindLocalBackupAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.FindLocalBackupComp
+}
+
+// FindLocalBackupCommand godoc
+//
+// @Summary      查找本地备份
+// @Description  查找本地备份
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.FindLocalBackupParam  true  "short description"
+// @Success      200  {object}  mysql.FindLocalBackupResp
+// @Router       /mysql/find-local-backup [post]
+func FindLocalBackupCommand() *cobra.Command {
+	act := FindLocalBackupAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "find-local-backup",
+		Short: "查找本地备份",
+		Example: fmt.Sprintf(
+			`dbactuator mysql find-local-backup %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *FindLocalBackupAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Validate TODO
+func (d *FindLocalBackupAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *FindLocalBackupAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "组件初始化",
+			Func:    d.Payload.Params.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Payload.Params.PreCheck,
+		},
+		{
+			FunName: "开始查找备份",
+			Func:    d.Payload.Params.Start,
+		},
+		{
+			FunName: "输出备份信息",
+			Func:    d.Payload.Params.OutputCtx,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("find local backups done")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/flashback.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/flashback.go
new file mode 100644
index 0000000000..e2ce25aaec
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/flashback.go
@@ -0,0 +1,89 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	_ "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" // mysqlutil TODO
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// FlashbackBinlogAct TODO
+type FlashbackBinlogAct struct {
+	*subcmd.BaseOptions
+	Payload rollback.FlashbackComp
+}
+
+// FlashbackBinlogCommand godoc
+//
+// @Summary  导入 binlog
+// @Description  通过 `mysqlbinlog --flashback xxx | mysql` 导入 binlog
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      rollback.FlashbackComp  true  "short description"
+// @Router       /mysql/flashback-binlog [post]
+func FlashbackBinlogCommand() *cobra.Command {
+	act := FlashbackBinlogAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "flashback-binlog",
+		Short: "导入binlog",
+		Example: fmt.Sprintf(
+			"dbactuator mysql flashback-binlog %s %s",
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *FlashbackBinlogAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil {
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("Deserialize err %s", err.Error())
+		return err
+	}
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (d *FlashbackBinlogAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *FlashbackBinlogAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Payload.Params.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Payload.Params.PreCheck,
+		},
+		{
+			FunName: "开始 flashback binlog",
+			Func:    d.Payload.Params.Start,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("import binlog successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/full_backup.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/full_backup.go
new file mode 100644
index 0000000000..6f22f420b2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/full_backup.go
@@ -0,0 +1,92 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// FullBackup 命令常量
+const FullBackup = "full-backup"
+
+// FullBackupAct 命令结构
+type FullBackupAct struct {
+	*subcmd.BaseOptions
+	Service mysql.FullBackupComp
+}
+
+// NewFullBackupCommand 新建命令
+func NewFullBackupCommand() *cobra.Command {
+	act := FullBackupAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+
+	cmd := &cobra.Command{
+		Use:   FullBackup,
+		Short: "全库备份",
+		Example: fmt.Sprintf(
+			`dbactuator mysql full-backup %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate 校验
+func (c *FullBackupAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Init 初始化
+func (c *FullBackupAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	logger.Info("extend params: %s", c.Service.Params)
+	return nil
+}
+
+// Run 执行
+func (c *FullBackupAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func: func() error {
+				return c.Service.Init(c.Uid)
+			},
+		},
+		{
+			FunName: "执行前检查",
+			Func:    c.Service.Precheck,
+		},
+		{
+			FunName: "生成配置文件",
+			Func:    c.Service.GenerateConfigFile,
+		},
+		{
+			FunName: "执行备份",
+			Func:    c.Service.DoBackup,
+		},
+		{
+			FunName: "输出报告",
+			Func:    c.Service.OutputBackupInfo,
+		},
+	}
+
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("备份成功")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/grant_repl.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/grant_repl.go
new file mode 100644
index 0000000000..38cb73bdb3
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/grant_repl.go
@@ -0,0 +1,91 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// GrantReplAct 增加repl账户action
+type GrantReplAct struct {
+	*subcmd.BaseOptions
+	Payload grant.GrantReplComp
+}
+
+// NewGrantReplCommand godoc
+//
+// @Summary      建立复制账号
+// @Description  在目标机器新建 repl 账号
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      grant.GrantReplComp  true  "short description"
+// @Router       /mysql/grant-repl [post]
+func NewGrantReplCommand() *cobra.Command {
+	act := GrantReplAct{
+		BaseOptions: subcmd.GBaseOptions,
+		Payload: grant.GrantReplComp{
+			Params: &grant.GrantReplParam{},
+		},
+	}
+	cmd := &cobra.Command{
+		Use:   "grant-repl",
+		Short: "新增repl账户",
+		Example: fmt.Sprintf(
+			`dbactuator mysql grant-repl %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (g *GrantReplAct) Init() (err error) {
+	if err = g.Deserialize(&g.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	g.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (g *GrantReplAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化本地db连接",
+			Func:    g.Payload.Init,
+		},
+		{
+			FunName: "增加repl账户",
+			Func:    g.Payload.GrantRepl,
+		},
+		{
+			FunName: "获取同步位点信息",
+			Func: func() error {
+				postion, err := g.Payload.GetBinPosition()
+				if err != nil {
+					return err
+				}
+				g.OutputCtx(postion)
+				return nil
+			},
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("grant repl successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_partitionsql.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_partitionsql.go
new file mode 100644
index 0000000000..7fac8a4d2b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_partitionsql.go
@@ -0,0 +1,80 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// ExecPartitionSQLAct TODO
+type ExecPartitionSQLAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.ExcutePartitionSQLComp
+}
+
+const (
+	// ImportPartitionSQL TODO
+	ImportPartitionSQL = "import-partitionsql"
+)
+
+// NewExecPartitionSQLCommand TODO
+func NewExecPartitionSQLCommand() *cobra.Command {
+	act := ExecPartitionSQLAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   ImportPartitionSQL,
+		Short: "分区",
+		Example: fmt.Sprintf(
+			`dbactuator mysql deploy-monitor  %s %s`,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate TODO
+func (d *ExecPartitionSQLAct) Validate() (err error) {
+	return d.BaseOptions.Validate()
+}
+
+// Init TODO
+func (d *ExecPartitionSQLAct) Init() (err error) {
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (d *ExecPartitionSQLAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "Init",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "执行分区",
+			Func:    d.Payload.Excute,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("import partition sql successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_sqlfie.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_sqlfie.go
new file mode 100644
index 0000000000..c163756444
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/import_sqlfie.go
@@ -0,0 +1,90 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// ExecSQLFileAct TODO
+type ExecSQLFileAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.ExcuteSQLFileComp
+}
+
+const (
+	// ImportSQLFile TODO
+	ImportSQLFile = "import-sqlfile"
+)
+
+// NewExecSQLFileCommand TODO
+func NewExecSQLFileCommand() *cobra.Command {
+	act := ExecSQLFileAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   ImportSQLFile,
+		Short: "SQL导入",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`,
+			ImportSQLFile,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate TODO
+func (d *ExecSQLFileAct) Validate() (err error) {
+	return d.BaseOptions.Validate()
+}
+
+// Init TODO
+func (d *ExecSQLFileAct) Init() (err error) {
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (d *ExecSQLFileAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "Init",
+			Func:    d.Payload.Init,
+		}, {
+			FunName: "执行前预处理",
+			Func: func() error {
+				if d.Payload.Params.IsSpider {
+					return d.Payload.OpenDdlExecuteByCtl()
+				}
+				logger.Info("无需预处理,跳过")
+				return nil
+			},
+		},
+		{
+			FunName: "执行导入SQL文件",
+			Func:    d.Payload.Excute,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("import sqlfile successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go
new file mode 100644
index 0000000000..2fa28c02f6
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_checkusm.go
@@ -0,0 +1,101 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// InstallMySQLChecksumAct 安装数据校验
+type InstallMySQLChecksumAct struct {
+	*subcmd.BaseOptions
+	Service mysql.InstallMySQLChecksumComp
+}
+
+// InstallMySQLChecksum 安装数据校验子命令名称
+const InstallMySQLChecksum = "install-checksum"
+
+// NewInstallMySQLChecksumCommand godoc
+//
+// @Summary     安装mysql校验
+// @Description  安装mysql校验
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.InstallMySQLChecksumComp  true  "short description"
+// @Router       /mysql/install-checksum [post]
+func NewInstallMySQLChecksumCommand() *cobra.Command {
+	act := InstallMySQLChecksumAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   InstallMySQLChecksum,
+		Short: "安装mysql校验",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`,
+			InstallMySQLChecksum,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate 基本校验
+func (c *InstallMySQLChecksumAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Init 初始化子命令
+func (c *InstallMySQLChecksumAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	logger.Info("extend params: %s", c.Service.Params)
+	return nil
+}
+
+// Run 执行子命令
+func (c *InstallMySQLChecksumAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    c.Service.Init,
+		},
+		{
+			FunName: "执行前检查",
+			Func:    c.Service.Precheck,
+		},
+		{
+			FunName: "部署二进制程序",
+			Func:    c.Service.DeployBinary,
+		},
+		{
+			FunName: "生成二进制程序配置",
+			Func:    c.Service.GenerateBinaryConfig,
+		},
+		// {
+		//	FunName: "生成 wrapper 文件",
+		//	Func:    c.Service.BuildWrapper,
+		// },
+		{
+			FunName: "注册 crond 任务",
+			Func:    c.Service.AddToCrond,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("部署mysql校验完成")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go
new file mode 100644
index 0000000000..145d0b424c
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_dba_toolkit.go
@@ -0,0 +1,91 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// InstallDBAToolkitAct TODO
+type InstallDBAToolkitAct struct {
+	*subcmd.BaseOptions
+	Service mysql.InstallDBAToolkitComp
+}
+
+// CommandInstallDBAToolkit TODO
+const CommandInstallDBAToolkit = "install-dbatoolkit"
+
+// NewInstallDBAToolkitCommand godoc
+//
+// @Summary      部署DBA工具箱
+// @Description  部署 /home/mysql/dba_toolkit,覆盖
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.InstallDBAToolkitComp  true  "short description"
+// @Router       /mysql/install-dbatoolkit [post]
+func NewInstallDBAToolkitCommand() *cobra.Command {
+	act := InstallDBAToolkitAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   CommandInstallDBAToolkit,
+		Short: "部署 rotate_binlog",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`, CommandInstallDBAToolkit,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *InstallDBAToolkitAct) Init() (err error) {
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// Run TODO
+func (d *InstallDBAToolkitAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "init",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "部署二进制",
+			Func:    d.Service.DeployBinary,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("install dba-toolkit successfully~")
+	return nil
+}
+
+// Rollback TODO
+func (d *InstallDBAToolkitAct) Rollback() (err error) {
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go
new file mode 100644
index 0000000000..6dc83c9e7c
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_monitor.go
@@ -0,0 +1,94 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// InstallMonitorAct 安装 mysql monitor
+type InstallMonitorAct struct {
+	*subcmd.BaseOptions
+	Service mysql.InstallMySQLMonitorComp
+}
+
+// InstallMySQLMonitor 安装 mysql monitor
+const InstallMySQLMonitor = "install-monitor"
+
+// NewInstallMySQLMonitorCommand 安装 mysql monitor 子命令
+func NewInstallMySQLMonitorCommand() *cobra.Command {
+	act := InstallMonitorAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   InstallMySQLMonitor,
+		Short: "安装mysql监控",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`,
+			InstallMySQLMonitor,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate 参数验证
+func (c *InstallMonitorAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Init 初始化
+func (c *InstallMonitorAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	logger.Info("extend params: %s", c.Service.Params)
+	return nil
+}
+
+// Run 执行入口
+func (c *InstallMonitorAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    c.Service.Init,
+		},
+		{
+			FunName: "执行前检查",
+			Func:    c.Service.Precheck,
+		},
+		{
+			FunName: "部署二进制程序",
+			Func:    c.Service.DeployBinary,
+		},
+		{
+			FunName: "生成二进制程序配置",
+			Func:    c.Service.GenerateBinaryConfig,
+		},
+		{
+			FunName: "生成监控项配置",
+			Func:    c.Service.GenerateItemsConfig,
+		},
+		{
+			FunName: "注册crond任务",
+			Func:    c.Service.AddToCrond,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("部署mysql监控完成")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_mysql.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_mysql.go
new file mode 100644
index 0000000000..5eee9e8063
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_mysql.go
@@ -0,0 +1,138 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"encoding/json"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// DeployMySQLAct TODO
+type DeployMySQLAct struct {
+	*subcmd.BaseOptions
+	Service mysql.InstallMySQLComp
+}
+
+// NewDeployMySQLInstanceCommand godoc
+//
+// @Summary      部署 mysql 实例
+// @Description  部署 mysql 实例说明
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.InstallMySQLComp  true  "short description"
+// @Router       /mysql/deploy [post]
+func NewDeployMySQLInstanceCommand() *cobra.Command {
+	act := DeployMySQLAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "deploy",
+		Short: "部署MySQL实例",
+		Example: fmt.Sprintf(
+			`dbactuator mysql deploy %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *DeployMySQLAct) Init() (err error) {
+	logger.Info("DeployMySQLAct Init")
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return d.Service.InitDefaultParam()
+}
+
+// Rollback TODO
+//
+//	@receiver d
+//	@return err
+func (d *DeployMySQLAct) Rollback() (err error) {
+	var r rollback.RollBackObjects
+	if err = d.DeserializeAndValidate(&r); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	err = r.RollBack()
+	if err != nil {
+		logger.Error("roll back failed %s", err.Error())
+	}
+	return
+}
+
+// Run TODO
+func (d *DeployMySQLAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "渲染my.cnf配置",
+			Func:    d.Service.GenerateMycnf,
+		},
+		{
+			FunName: "初始化mysqld相关目录",
+			Func:    d.Service.InitInstanceDirs,
+		},
+		{
+			FunName: "下载并且解压安装包",
+			Func:    d.Service.DecompressMysqlPkg,
+		},
+		{
+			FunName: "初始化mysqld系统库表",
+			Func:    d.Service.Install,
+		},
+		{
+			FunName: "启动mysqld",
+			Func:    d.Service.Startup,
+		},
+		{
+			FunName: "执行初始化系统基础权限、库表SQL",
+			Func:    d.Service.InitDefaultPrivAndSchema,
+		},
+		{
+			FunName: "生成exporter配置文件",
+			Func:    d.Service.CreateExporterCnf,
+		},
+
+		{
+			FunName: "输出系统的时区设置",
+			Func: func() error {
+				d.OutputCtx(fmt.Sprintf("{\"time_zone\": \"%s\"}", d.Service.TimeZone))
+				return nil
+			},
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		rollbackCtxb, rerr := json.Marshal(d.Service.RollBackContext)
+		if rerr != nil {
+			logger.Error("json Marshal %s", err.Error())
+			fmt.Printf("Can't RollBack\n")
+		}
+		fmt.Printf("%s\n", string(rollbackCtxb))
+		return err
+	}
+
+	logger.Info("install_mysql successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go
new file mode 100644
index 0000000000..2d7915be97
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_dbbackup.go
@@ -0,0 +1,112 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// InstallNewDbBackupAct TODO
+type InstallNewDbBackupAct struct {
+	*subcmd.BaseOptions
+	Service mysql.InstallNewDbBackupComp
+}
+
+// NewInstallNewDbBackupCommand godoc
+//
+// @Summary      部署备份程序
+// @Description  部署GO版本备份程序
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.InstallNewDbBackupComp  true  "short description"
+// @Router       /mysql/deploy-dbbackup [post]
+func NewInstallNewDbBackupCommand() *cobra.Command {
+	act := InstallNewDbBackupAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "deploy-dbbackup",
+		Short: "部署GO版本备份程序",
+		Example: fmt.Sprintf(
+			`dbactuator mysql deploy-dbbackup %s %s`, subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *InstallNewDbBackupAct) Init() (err error) {
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return nil
+}
+
+// Run TODO
+func (d *InstallNewDbBackupAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "init",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "初始化待渲染配置",
+			Func:    d.Service.InitRenderData,
+		},
+		{
+			FunName: "初始化备份数据目录",
+			Func:    d.Service.InitBackupDir,
+		},
+		{
+			FunName: "备份原备份程序",
+			Func:    d.Service.BackupBackupIfExist,
+		},
+		{
+			FunName: "初始化备份程序用户",
+			Func:    d.Service.InitBackupUserPriv,
+		},
+		{
+			FunName: "解压备份程序压缩包",
+			Func:    d.Service.DecompressPkg,
+		},
+		{
+			FunName: "生成配置",
+			Func:    d.Service.GenerateDbbackupConfig,
+		},
+		{
+			FunName: "更改安装路径所属用户组",
+			Func:    d.Service.ChownGroup,
+		},
+		{
+			FunName: "添加系统crontab",
+			Func:    d.Service.AddCrontab,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("install new dbbackup successfully~")
+	return nil
+}
+
+// Rollback TODO
+func (d *InstallNewDbBackupAct) Rollback() (err error) {
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_rotatebinlog.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_rotatebinlog.go
new file mode 100644
index 0000000000..ee30c0ed4c
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/install_new_rotatebinlog.go
@@ -0,0 +1,93 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// InstallRotateBinlogAct TODO
+type InstallRotateBinlogAct struct {
+	*subcmd.BaseOptions
+	Service mysql.InstallRotateBinlogComp
+}
+
+// CommandDeployRotatebinlog TODO
+const CommandDeployRotatebinlog = "deploy-rotatebinlog"
+
+// NewInstallRotateBinlogCommand TODO
+func NewInstallRotateBinlogCommand() *cobra.Command {
+	act := InstallRotateBinlogAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   CommandDeployRotatebinlog,
+		Short: "部署 rotate_binlog",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`, CommandDeployRotatebinlog,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *InstallRotateBinlogAct) Init() (err error) {
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return nil
+}
+
+// Run TODO
+func (d *InstallRotateBinlogAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "init",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "部署二进制",
+			Func:    d.Service.DeployBinary,
+		},
+		{
+			FunName: "渲染 config.yaml",
+			Func:    d.Service.GenerateBinaryConfig,
+		},
+		{
+			FunName: "添加系统crontab",
+			Func:    d.Service.InstallCrontab,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("install new rotate_binlog successfully~")
+	return nil
+}
+
+// Rollback TODO
+func (d *InstallRotateBinlogAct) Rollback() (err error) {
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_change.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_change.go
new file mode 100644
index 0000000000..6d19ac6c45
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_change.go
@@ -0,0 +1,91 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// MycnfChangeAct TODO
+type MycnfChangeAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.MycnfChangeComp
+}
+
+// MycnfChangeCommand godoc
+//
+// @Summary      修改mysql配置
+// @Description  修改mysql配置
+// @Tags         mysql
+// @Accept       json
+// @Produce      json
+// @Param        body body      mysql.MycnfChangeComp  true  "description"
+// @Router       /mysql/mycnf-change [post]
+func MycnfChangeCommand() *cobra.Command {
+	act := MycnfChangeAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "mycnf-change",
+		Short: "修改mysql配置",
+		Example: fmt.Sprintf(
+			`dbactuator mysql mycnf-change %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *MycnfChangeAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	logger.Warn("params %+v", d.Payload.Params)
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (d *MycnfChangeAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *MycnfChangeAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "加载配置文件",
+			Func:    d.Payload.Params.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Payload.Params.PreCheck,
+		},
+		{
+			FunName: "修改配置",
+			Func:    d.Payload.Params.Start,
+		},
+	}
+
+	if err = steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("change my.cnf successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_clone.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_clone.go
new file mode 100644
index 0000000000..c9266847f2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mycnf_clone.go
@@ -0,0 +1,91 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// MycnfCloneAct TODO
+type MycnfCloneAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.MycnfCloneComp
+}
+
+// MycnfCloneCommand godoc
+//
+// @Summary      从源实例克隆 my.cnf 部分参数到目标实例
+// @Description  用于 slave 重建或迁移,保持新实例与 my.cnf 实例关键参数相同的场景
+// @Description  默认 clone 参数:
+// @Tags         mysql
+// @Accept       json
+// @Produce      json
+// @Param        body body      mysql.MycnfCloneComp  true  "description"
+// @Router       /mysql/mycnf-clone [post]
+func MycnfCloneCommand() *cobra.Command {
+	act := MycnfCloneAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "mycnf-clone",
+		Short: "克隆mysql配置",
+		Example: fmt.Sprintf(
+			`dbactuator mysql mycnf-clone %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *MycnfCloneAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	logger.Warn("params %+v", d.Payload.Params)
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (d *MycnfCloneAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *MycnfCloneAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "加载配置文件",
+			Func:    d.Payload.Params.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Payload.Params.PreCheck,
+		},
+		{
+			FunName: "修改配置",
+			Func:    d.Payload.Params.Start,
+		},
+	}
+
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("clone my.cnf successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go
new file mode 100644
index 0000000000..fbc1bb8119
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go
@@ -0,0 +1,2 @@
+// Package mysqlcmd TODO
+package mysqlcmd
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/parse_binlog_time.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/parse_binlog_time.go
new file mode 100644
index 0000000000..235170af7a
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/parse_binlog_time.go
@@ -0,0 +1,84 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	_ "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" // mysqlutil TODO
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// ParseBinlogTimeAct TODO
+type ParseBinlogTimeAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.BinlogTimeComp
+}
+
+// ParseBinlogTimeCommand godoc
+//
+// @Summary  获取 binlog 的开始和结束时间
+// @Description 获取 binlog FileDescriptionFormat 和 RotateEvent 事件
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      mysql.BinlogTimeComp  true  "short description"
+// @Router       /mysql/parse-binlog-time [post]
+func ParseBinlogTimeCommand() *cobra.Command {
+	act := ParseBinlogTimeAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "parse-binlog-time",
+		Short: "获取 binlog 起止时间",
+		Example: fmt.Sprintf(
+			"dbactuator mysql parse-binlog-time %s %s",
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *ParseBinlogTimeAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil {
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("Deserialize err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Validate TODO
+func (d *ParseBinlogTimeAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *ParseBinlogTimeAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "开始获取",
+			Func:    d.Payload.Start,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("get binlog start and stop datetime successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_checksum.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_checksum.go
new file mode 100644
index 0000000000..64db376ed4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_checksum.go
@@ -0,0 +1,96 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// PtTableChecksumAct 校验基本结构
+type PtTableChecksumAct struct {
+	*subcmd.BaseOptions
+	Service mysql.PtTableChecksumComp
+}
+
+const (
+	// PtTableChecksum 命令名
+	PtTableChecksum = "pt-table-checksum"
+)
+
+// NewPtTableChecksumCommand godoc
+//
+// @Summary 数据校验
+// @Description 数据校验
+// @Tags mysql
+// @Accept json
+// @Param body body mysql.PtTableChecksumComp true "description"
+// @Router /mysql/pt-table-checksum [post]
+func NewPtTableChecksumCommand() *cobra.Command {
+	act := PtTableChecksumAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+
+	cmd := &cobra.Command{
+		Use:   PtTableChecksum,
+		Short: "数据校验",
+		Example: fmt.Sprintf(
+			`dbactuator mysql pt-table-checksum %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate 基本验证
+func (c *PtTableChecksumAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Init 初始化
+func (c *PtTableChecksumAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	logger.Info("extend params: %s", c.Service.Params)
+	return nil
+}
+
+// Run 执行序列
+func (c *PtTableChecksumAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func: func() error {
+				return c.Service.Init(c.Uid)
+			},
+		},
+		{
+			FunName: "执行前检查",
+			Func:    c.Service.Precheck,
+		},
+		{
+			FunName: "生成配置文件",
+			Func:    c.Service.GenerateConfigFile,
+		},
+		{
+			FunName: "执行校验",
+			Func:    c.Service.DoChecksum,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("校验完成")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_sync.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_sync.go
new file mode 100644
index 0000000000..711e9e19a5
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/pt_table_sync.go
@@ -0,0 +1,82 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// PtTableSync TODO
+const PtTableSync = "pt-table-sync"
+
+// PtTableSyncAct TODO
+type PtTableSyncAct struct {
+	*subcmd.BaseOptions
+	Service mysql.PtTableSyncComp
+}
+
+// PtTableSyncCommand TODO
+func PtTableSyncCommand() *cobra.Command {
+	act := PtTableSyncAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   PtTableSync,
+		Short: "数据修复",
+		Example: fmt.Sprintf(
+			`dbactuator mysql %s %s %s`,
+			PtTableSync, subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate TODO
+func (d *PtTableSyncAct) Validate() (err error) {
+	return d.BaseOptions.Validate()
+}
+
+// Init TODO
+func (d *PtTableSyncAct) Init() (err error) {
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (d *PtTableSyncAct) Run() (err error) {
+	// subcmd.Steps 顺序执行,某个步骤error,剩下步骤不执行
+	defer d.Service.DropSyncUser()
+	defer d.Service.DropTempTable()
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Service.Precheck,
+		},
+		{
+			FunName: "执行pt-table-sync工具",
+			Func:    d.Service.ExecPtTableSync,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("数据修复任务完成")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/recover_binlog.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/recover_binlog.go
new file mode 100644
index 0000000000..73ecb04183
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/recover_binlog.go
@@ -0,0 +1,98 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	_ "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" // mysqlutil TODO
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// RecoverBinlogAct TODO
+type RecoverBinlogAct struct {
+	*subcmd.BaseOptions
+	Payload restore.RecoverBinlogComp
+}
+
+// RecoverBinlogCommand godoc
+//
+// @Summary  导入 binlog
+// @Description  通过 `mysqlbinlog xxx | mysql` 导入 binlog
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      restore.RecoverBinlogComp  true  "short description"
+// @Router       /mysql/recover-binlog [post]
+func RecoverBinlogCommand() *cobra.Command {
+	act := RecoverBinlogAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "recover-binlog",
+		Short: "导入binlog",
+		Example: fmt.Sprintf(
+			"dbactuator mysql recover-binlog %s %s",
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *RecoverBinlogAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil {
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("Deserialize err %s", err.Error())
+		return err
+	}
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (d *RecoverBinlogAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *RecoverBinlogAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Payload.Params.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Payload.Params.PreCheck,
+		},
+		{
+			FunName: "恢复binlog",
+			Func:    d.Payload.Params.Start,
+		},
+		{
+			FunName: "等待导入binlog",
+			Func:    d.Payload.Params.WaitDone,
+		},
+		{
+			FunName: "恢复 binlog 完成",
+			Func:    d.Payload.Params.PostCheck,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("import binlog successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/restore_dr.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/restore_dr.go
new file mode 100644
index 0000000000..d0a8db26ef
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/restore_dr.go
@@ -0,0 +1,123 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	_ "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" // mysqlutil TODO
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// RestoreDRAct TODO
+type RestoreDRAct struct {
+	*subcmd.BaseOptions
+	Payload restore.RestoreDRComp
+}
+
+// RestoreDRCommand godoc
+//
+// @Summary  备份恢复
+// @Description  物理备份、逻辑备份恢复
+// @Tags         mysql
+// @Accept       json
+// @Param        body body      restore.RestoreDRComp  true  "short description"
+// @Success      200  {object}  mysqlutil.ChangeMaster
+// @Router       /mysql/restore-dr [post]
+func RestoreDRCommand() *cobra.Command {
+	act := RestoreDRAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "restore-dr",
+		Short: "备份恢复",
+		Example: fmt.Sprintf(
+			"dbactuator mysql restore-dr %s %s\n"+
+				"\nOutput examples:\n%s",
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Payload.Example()),
+			subcmd.ToPrettyJson(act.Payload.ExampleOutput()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+			util.CheckErr(act.Next())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *RestoreDRAct) Init() (err error) {
+	if err = d.BaseOptions.Validate(); err != nil { // @todo 应该在一开始就validate
+		return err
+	}
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("Deserialize err %s", err.Error())
+		return err
+	}
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (d *RestoreDRAct) Validate() error {
+	return nil
+}
+
+// Run TODO
+func (d *RestoreDRAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	if err = d.Payload.ChooseType(); err != nil {
+		// logger.Error("%+v", err)
+		return err
+	}
+	steps := subcmd.Steps{
+		{
+			FunName: "环境初始化",
+			Func:    d.Payload.Init,
+		},
+		{
+			FunName: "恢复预检查",
+			Func:    d.Payload.PreCheck,
+		},
+		{
+			FunName: "恢复",
+			Func:    d.Payload.Start,
+		},
+		{
+			FunName: "等待恢复完成",
+			Func:    d.Payload.WaitDone,
+		},
+		{
+			FunName: "完成校验",
+			Func:    d.Payload.PostCheck,
+		},
+		{
+			FunName: "输出位点",
+			Func:    d.Payload.OutputCtx,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("backup restore successfully")
+	return nil
+}
+
+// Next 运行下一个 component
+func (d *RestoreDRAct) Next() error {
+	logger.Info("run next: change-master")
+	if comp := d.Payload.BuildChangeMaster(); comp != nil {
+		act := BuildMsRelationAct{
+			BaseOptions: d.BaseOptions,
+			Payload:     *comp,
+		}
+		// comp.GeneralParam = subcmd.GeneralRuntimeParam
+		return act.Run()
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_check.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_check.go
new file mode 100644
index 0000000000..6864252d84
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_check.go
@@ -0,0 +1,102 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// SenmanticCheckAct TODO
+type SenmanticCheckAct struct {
+	*subcmd.BaseOptions
+	Payload mysql.SemanticCheckComp
+	Clean   bool
+}
+
+// NewSenmanticCheckCommand godoc
+//
+// @Summary      运行语义检查
+// @Description  运行语义检查
+// @Tags         mysql
+// @Accept       json
+// @Produce      json
+// @Param        body body      mysql.SemanticCheckComp  true  "short description"
+// @Router       /mysql/semantic-check [post]
+func NewSenmanticCheckCommand() *cobra.Command {
+	act := SenmanticCheckAct{
+		BaseOptions: subcmd.GBaseOptions,
+		Payload:     mysql.SemanticCheckComp{},
+	}
+	cmd := &cobra.Command{
+		Use:   "semantic-check",
+		Short: "运行语义检查",
+		Example: fmt.Sprintf(
+			`dbactuator mysql senmantic-check %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Payload.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			if act.Clean {
+				util.CheckErr(act.Payload.Clean())
+				return
+			}
+			util.CheckErr(act.Run())
+		},
+	}
+	cmd.Flags().BoolVarP(&act.Clean, "clean", "c", act.Clean, "清理语义检查实例")
+	return cmd
+}
+
+// Validate TODO
+func (d *SenmanticCheckAct) Validate() (err error) {
+	return d.BaseOptions.Validate()
+}
+
+// Init TODO
+func (d *SenmanticCheckAct) Init() (err error) {
+	if err = d.Deserialize(&d.Payload.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Payload.GeneralParam = subcmd.GeneralRuntimeParam
+	return nil
+}
+
+// Rollback TODO
+func (d *SenmanticCheckAct) Rollback() (err error) {
+	return
+}
+
+// Run TODO
+func (d *SenmanticCheckAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "precheck",
+			Func:    d.Payload.Precheck,
+		}, {
+			FunName: "init",
+			Func: func() error {
+				return d.Payload.Init(d.Uid)
+			},
+		},
+		{
+			FunName: "运行语义分析",
+			Func:    d.Payload.Run,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("运行语义检查成功")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_dump_schema.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_dump_schema.go
new file mode 100644
index 0000000000..12fec8eda6
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/semantic_dump_schema.go
@@ -0,0 +1,88 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// SenmanticDumpSchemaAct TODO
+type SenmanticDumpSchemaAct struct {
+	*subcmd.BaseOptions
+	Service mysql.SemanticDumpSchemaComp
+}
+
+// NewSenmanticDumpSchemaCommand godoc
+//
+// @Summary      运行语义检查
+// @Description  运行语义检查
+// @Tags         mysql
+// @Accept       json
+// @Produce      json
+// @Param        body body      mysql.SemanticDumpSchemaComp  true  "short description"
+// @Router       /mysql/semantic-dumpschema [post]
+func NewSenmanticDumpSchemaCommand() *cobra.Command {
+	act := SenmanticDumpSchemaAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "semantic-dumpschema",
+		Short: "运行导出表结构",
+		Example: fmt.Sprintf(
+			`dbactuator mysql senmantic-check %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validate TODO
+func (d *SenmanticDumpSchemaAct) Validate() (err error) {
+	return d.BaseOptions.Validate()
+}
+
+// Init TODO
+func (d *SenmanticDumpSchemaAct) Init() (err error) {
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return nil
+}
+
+// Run TODO
+func (d *SenmanticDumpSchemaAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "precheck",
+			Func:    d.Service.Precheck,
+		}, {
+			FunName: "init",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "运行导出表结构",
+			Func:    d.Service.DumpSchema,
+		},
+		{
+			FunName: "上传表结构",
+			Func:    d.Service.Upload,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("导出表结构成功")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/start_mysql.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/start_mysql.go
new file mode 100644
index 0000000000..81caf04793
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/start_mysql.go
@@ -0,0 +1,52 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/computil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+
+	"github.com/spf13/cobra"
+)
+
+// StartMysqlAct TODO
+type StartMysqlAct struct {
+	*subcmd.BaseOptions
+	Payload computil.StartMySQLParam
+}
+
+// NewStartMysqlCommand TODO
+func NewStartMysqlCommand() *cobra.Command {
+	act := StartMysqlAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "start",
+		Short: "启动MySQL实例",
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (s *StartMysqlAct) Init() (err error) {
+	if err = s.DeserializeAndValidate(&s.Payload); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Run TODO
+func (s *StartMysqlAct) Run() (err error) {
+	defer util.LoggerErrorStack(logger.Error, err)
+	if _, err = s.Payload.StartMysqlInstance(); err != nil {
+		logger.Error("start %s:%d failed,err:%s", s.Payload.Host, s.Payload.Port, err.Error())
+		return err
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/switch_backend_to_slave.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/switch_backend_to_slave.go
new file mode 100644
index 0000000000..99b56f38e9
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/switch_backend_to_slave.go
@@ -0,0 +1,118 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// CutOverToSlaveAct TODO
+type CutOverToSlaveAct struct {
+	*subcmd.BaseOptions
+	Service cutover.CutOverToSlaveComp
+}
+
+// NewCutOverToSlaveCommnad TODO
+func NewCutOverToSlaveCommnad() *cobra.Command {
+	act := CutOverToSlaveAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "set-backend-toward-slave",
+		Short: "切换Proxy后端指向Slave",
+		Example: fmt.Sprintf(
+			`dbactuator mysql set-backend-toward-slave %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *CutOverToSlaveAct) Init() (err error) {
+	logger.Info("CutOverToSlaveAct Init")
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (d *CutOverToSlaveAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "[未切换] Init",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "[未切换] 预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "[切换中] 切换",
+			Func: func() error {
+				postion, err := d.Service.CutOver()
+				if err != nil {
+					return err
+				}
+				d.OutputCtx(postion)
+				return nil
+			},
+		},
+		{
+			FunName: "[已切换成功] 断开同步",
+			Func:    d.Service.StopAndResetSlave,
+		},
+		{
+			FunName: "[已切换成功] 剩余操作",
+			Func: func() error {
+				if d.Service.Params.GrantRepl {
+					if err := d.Service.GrantRepl(); err != nil {
+						logger.Error("授权Repl账户失败", err.Error())
+					}
+				}
+				if d.Service.Params.LockedSwitch {
+					switchUser := d.Service.Params.Cluster.MasterIns.SwitchTmpAccount.User
+					host := d.Service.Params.Host
+					if err := d.Service.Params.Cluster.MasterIns.DropSwitchUser(
+						fmt.Sprintf(
+							"%s@%s",
+							switchUser,
+							host,
+						),
+					); err != nil {
+						logger.Error("删除临时用户失败%s", err.Error())
+					}
+				}
+				return nil
+			},
+		},
+	}
+	if err := steps.Run(); err != nil {
+		logger.Error(" Run set-backend-toward-slave Failed: %s", err.Error())
+		return err
+	}
+
+	logger.Info("set-backend-toward-slave successfully")
+	return nil
+}
+
+// Rollback TODO
+func (d *CutOverToSlaveAct) Rollback() (err error) {
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/uninstall_mysql.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/uninstall_mysql.go
new file mode 100644
index 0000000000..04298ee064
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/uninstall_mysql.go
@@ -0,0 +1,76 @@
+package mysqlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// UnInstallMysqlAct TODO
+type UnInstallMysqlAct struct {
+	*subcmd.BaseOptions
+	Service mysql.UnInstallMySQLComp
+}
+
+// NewUnInstallMysqlCommand TODO
+func NewUnInstallMysqlCommand() *cobra.Command {
+	act := UnInstallMysqlAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:     "uninstall",
+		Short:   "下架MySQL",
+		Example: fmt.Sprintf(`dbactuator mysql uninstall %s`, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *UnInstallMysqlAct) Init() (err error) {
+	logger.Info("UnInstallMysqlAct Init")
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return d.Service.Init()
+}
+
+// Run TODO
+func (d *UnInstallMysqlAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "停止数据库实例",
+			Func:    d.Service.ShutDownMySQLD,
+		},
+		{
+			FunName: "清理机器数据&日志目录",
+			Func:    d.Service.ClearMachine,
+		},
+		{
+			FunName: "清理可能残存的mysql相关进程",
+			Func:    d.Service.KillDirtyProcess,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("uninstall mysql successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/clone_proxy_user.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/clone_proxy_user.go
new file mode 100644
index 0000000000..9abd026f3f
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/clone_proxy_user.go
@@ -0,0 +1,92 @@
+// Package proxycmd TODO
+/*
+ * @Description:  dbactuator proxy clone_proxy_user 入口函数
+
+ */
+package proxycmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// CloneProxyUserAct TODO
+// extend payload
+/*
+ {
+		 "source_proxy_host": "1.1.1.1",
+		 "source_proxy_port"  10000,
+		 "target_proxy_host"  "2.2.2.2",
+		 "target_proxy_port"  10000
+
+ }
+*/
+type CloneProxyUserAct struct {
+	*subcmd.BaseOptions
+	Service mysql_proxy.CloneProxyUserComp
+}
+
+// NewCloneProxyUserCommand TODO
+func NewCloneProxyUserCommand() *cobra.Command {
+	act := CloneProxyUserAct{
+		BaseOptions: subcmd.GBaseOptions,
+		Service: mysql_proxy.CloneProxyUserComp{
+			Params: &mysql_proxy.CloneProxyUserParam{},
+		},
+	}
+	cmd := &cobra.Command{
+		Use:   "clone-proxy-user",
+		Short: "proxy clone user",
+		Example: fmt.Sprintf(
+			`dbactuator proxy clone-proxy-user %s %s `,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (c *CloneProxyUserAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (c *CloneProxyUserAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Run TODO
+func (c *CloneProxyUserAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    c.Service.Init,
+		},
+		{
+			FunName: "Clone proxy user",
+			Func:    c.Service.CloneProxyUser,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("clone proxy user successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go
new file mode 100644
index 0000000000..a2062d5872
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/cmd.go
@@ -0,0 +1,35 @@
+// Package proxycmd TODO
+/*
+ * @Description: proxy 相关操作的子命令集合
+ */
+package proxycmd
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/templates"
+
+	"github.com/spf13/cobra"
+)
+
+// NewMysqlProxyCommand TODO
+func NewMysqlProxyCommand() *cobra.Command {
+	cmds := &cobra.Command{
+		Use:   "proxy [proxy operation]",
+		Short: "MySQL Proxy Operation Command Line Interface",
+		RunE:  subcmd.ValidateSubCommand(),
+	}
+	groups := templates.CommandGroups{
+		{
+			Message: "mysql_proxy",
+			Commands: []*cobra.Command{
+				NewDeployMySQLProxyCommand(),
+				NewSetBackendsCommand(),
+				NewUnInstallProxyCommand(),
+				NewCloneProxyUserCommand(),
+				NewRestartProxyCommand(),
+			},
+		},
+	}
+	groups.Add(cmds)
+	return cmds
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/install_mysql_proxy.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/install_mysql_proxy.go
new file mode 100644
index 0000000000..85ee579aef
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/install_mysql_proxy.go
@@ -0,0 +1,100 @@
+package proxycmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// MySQLProxyAct TODO
+type MySQLProxyAct struct {
+	Options DeployMySQLProxyOptions
+	Service mysql_proxy.InstallMySQLProxyComp
+}
+
+// DeployMySQLProxyOptions TODO
+type DeployMySQLProxyOptions struct {
+	*subcmd.BaseOptions
+}
+
+// NewDeployMySQLProxyCommand TODO
+func NewDeployMySQLProxyCommand() *cobra.Command {
+	act := MySQLProxyAct{
+		Options: DeployMySQLProxyOptions{
+			BaseOptions: subcmd.GBaseOptions,
+		},
+		Service: mysql_proxy.InstallMySQLProxyComp{
+			Params: &mysql_proxy.InstallMySQLProxyParam{},
+		},
+	}
+	cmd := &cobra.Command{
+		Use:     "deploy",
+		Short:   "部署mysql-proxy实例",
+		Example: fmt.Sprintf("dbactuator mysql proxy %s", subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validator())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Validator TODO
+func (d *MySQLProxyAct) Validator() error {
+	return d.Options.Validate()
+}
+
+// Init TODO
+func (d *MySQLProxyAct) Init() error {
+	if err := d.Options.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("MySQLProxyActivity Deserialize failed: %s", err.Error())
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return d.Service.Init()
+}
+
+// Run TODO
+func (d *MySQLProxyAct) Run() error {
+	steps := subcmd.Steps{
+		{
+			FunName: "环境预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "初始化目录、文件",
+			Func:    d.Service.InitInstanceDirs,
+		},
+		{
+			FunName: "生成proxy.cnf配置",
+			Func:    d.Service.GenerateProxycnf,
+		},
+		{
+			FunName: "解压安装包",
+			Func:    d.Service.DecompressPkg,
+		},
+		{
+			FunName: "启动Proxy",
+			Func:    d.Service.Start,
+		},
+		{
+			FunName: "初始化默认账户",
+			Func:    d.Service.InitProxyAdminAccount,
+		},
+		{
+			FunName: "生成exporter配置文件",
+			Func:    d.Service.CreateExporterCnf,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("install mysql-proxy successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/restart_mysql_proxy.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/restart_mysql_proxy.go
new file mode 100644
index 0000000000..290d4804b4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/restart_mysql_proxy.go
@@ -0,0 +1,70 @@
+package proxycmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// RestratProxyAct TODO
+type RestratProxyAct struct {
+	*subcmd.BaseOptions
+	Service mysql_proxy.RestartMySQLProxyComp
+}
+
+// NewRestartProxyCommand TODO
+func NewRestartProxyCommand() *cobra.Command {
+	act := RestratProxyAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "restart",
+		Short: "重启Proxy",
+		Example: fmt.Sprintf(
+			`dbactuator proxy restart %s %s`, subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *RestratProxyAct) Init() (err error) {
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (d *RestratProxyAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "重启proxy",
+			Func:    d.Service.RestartProxy,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("restart proxy successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/set_backend.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/set_backend.go
new file mode 100644
index 0000000000..4f08a02efb
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/set_backend.go
@@ -0,0 +1,90 @@
+// Package proxycmd TODO
+/*
+ * @Description:  dbactuator proxy set-backend 入口函数
+ */
+package proxycmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// SetBackendAct TODO
+// extend payload
+/*
+{
+    "host": "127.0.0.1",
+    "port": 10000,
+    "backend_host": "127.0.0.1",
+    "backend_port": 20000
+}
+*/
+type SetBackendAct struct {
+	*subcmd.BaseOptions
+	Service mysql_proxy.ProxySetBackendCom
+}
+
+// NewSetBackendsCommand TODO
+func NewSetBackendsCommand() *cobra.Command {
+	act := SetBackendAct{
+		BaseOptions: subcmd.GBaseOptions,
+		Service: mysql_proxy.ProxySetBackendCom{
+			Params: mysql_proxy.ProxySetBackendParam{},
+		},
+	}
+	cmd := &cobra.Command{
+		Use:   "set-backend",
+		Short: "proxy set backends",
+		Example: fmt.Sprintf(
+			`dbactuator proxy set-backend %s %s `,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (c *SetBackendAct) Init() (err error) {
+	if err = c.Deserialize(&c.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	c.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Validate TODO
+func (c *SetBackendAct) Validate() (err error) {
+	return c.BaseOptions.Validate()
+}
+
+// Run TODO
+func (c *SetBackendAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    c.Service.Init,
+		},
+		{
+			FunName: "Set backends",
+			Func:    c.Service.SetBackend,
+		},
+	}
+
+	if err := steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("set proxy backends successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/uninstall_mysql_proxy.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/uninstall_mysql_proxy.go
new file mode 100644
index 0000000000..5daed102ac
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/proxycmd/uninstall_mysql_proxy.go
@@ -0,0 +1,67 @@
+package proxycmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// UnInstallProxyAct TODO
+type UnInstallProxyAct struct {
+	*subcmd.BaseOptions
+	Service mysql_proxy.UnInstallMySQLProxyComp
+}
+
+// NewUnInstallProxyCommand TODO
+func NewUnInstallProxyCommand() *cobra.Command {
+	act := UnInstallProxyAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:     "uninstall",
+		Short:   "下架Proxy",
+		Example: fmt.Sprintf(`dbactuator proxy uninstall %s`, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *UnInstallProxyAct) Init() (err error) {
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return d.Service.Init()
+}
+
+// Run TODO
+func (d *UnInstallProxyAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "下架proxy",
+			Func:    d.Service.UnInstallProxy,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("uninstall proxy successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/cmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/cmd.go
new file mode 100644
index 0000000000..f9cd749066
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/cmd.go
@@ -0,0 +1,33 @@
+// Package spidercmd tendbcluster 命令
+/*
+ * @Description: spider 相关操作的子命令集合
+ */
+package spidercmd
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/templates"
+
+	"github.com/spf13/cobra"
+)
+
+// NewSpiderCommand tendbcluster 命令
+func NewSpiderCommand() *cobra.Command {
+	cmds := &cobra.Command{
+		Use:   "spider [spider operation]",
+		Short: "Spider Operation Command Line Interface",
+		RunE:  subcmd.ValidateSubCommand(),
+	}
+	groups := templates.CommandGroups{
+		{
+			Message: "spider operation sets",
+			Commands: []*cobra.Command{
+				NewDeploySpiderCommand(),
+				NewUnInstallSpiderCommand(),
+				NewRestratSpiderCommand(),
+			},
+		},
+	}
+	groups.Add(cmds)
+	return cmds
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/install_spider.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/install_spider.go
new file mode 100644
index 0000000000..b0f446eb2c
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/install_spider.go
@@ -0,0 +1,131 @@
+package spidercmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"encoding/json"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// DeploySpiderAct 部署集群
+type DeploySpiderAct struct {
+	*subcmd.BaseOptions
+	BaseService mysql.InstallMySQLComp
+}
+
+// NewDeploySpiderCommand godoc
+//
+// @Summary      部署 spider 实例
+// @Description  部署 spider 实例说明
+// @Tags         spider
+// @Accept       json
+// @Param        body body      mysql.InstallMySQLComp  true  "short description"
+// @Router       /spider/deploy [post]
+func NewDeploySpiderCommand() *cobra.Command {
+	act := DeploySpiderAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "deploy",
+		Short: "部署Spider实例",
+		Example: fmt.Sprintf(
+			`dbactuator Spider deploy %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.BaseService.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init 初始化
+func (d *DeploySpiderAct) Init() (err error) {
+	logger.Info("DeploySpiderAct Init")
+	if err = d.Deserialize(&d.BaseService.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.BaseService.GeneralParam = subcmd.GeneralRuntimeParam
+	return d.BaseService.InitDefaultParam()
+}
+
+// Rollback 回滚
+//
+//	@receiver d
+//	@return err
+func (d *DeploySpiderAct) Rollback() (err error) {
+	var r rollback.RollBackObjects
+	if err = d.DeserializeAndValidate(&r); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	err = r.RollBack()
+	if err != nil {
+		logger.Error("roll back failed %s", err.Error())
+	}
+	return
+}
+
+// Run 执行
+func (d *DeploySpiderAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.BaseService.PreCheck,
+		},
+		{
+			FunName: "渲染my.cnf配置",
+			Func:    d.BaseService.GenerateMycnf,
+		},
+		{
+			FunName: "初始化mysqld相关目录",
+			Func:    d.BaseService.InitInstanceDirs,
+		},
+		{
+			FunName: "下载并且解压安装包",
+			Func:    d.BaseService.DecompressMysqlPkg,
+		},
+		{
+			FunName: "初始化mysqld系统库表",
+			Func:    d.BaseService.Install,
+		},
+		{
+			FunName: "启动mysqld",
+			Func:    d.BaseService.Startup,
+		},
+		{
+			FunName: "执行初始化系统基础权限、库表SQL",
+			Func:    d.BaseService.InitDefaultPrivAndSchema,
+		},
+		// {
+		// 	FunName: "生成exporter配置文件",
+		// 	Func:    d.BaseService.CreateExporterCnf,
+		// },
+
+	}
+
+	if err := steps.Run(); err != nil {
+		rollbackCtxb, rerr := json.Marshal(d.BaseService.RollBackContext)
+		if rerr != nil {
+			logger.Error("json Marshal %s", err.Error())
+			fmt.Printf("Can't RollBack\n")
+		}
+		fmt.Printf("%s\n", string(rollbackCtxb))
+		return err
+	}
+
+	logger.Info("install_spider successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/restart_spider.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/restart_spider.go
new file mode 100644
index 0000000000..17addf4f55
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/restart_spider.go
@@ -0,0 +1,73 @@
+package spidercmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/spider"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// RestartSpiderAct TODO
+type RestartSpiderAct struct {
+	*subcmd.BaseOptions
+	Service spider.RestartSpiderComp
+}
+
+// NewRestratSpiderCommand TODO
+func NewRestratSpiderCommand() *cobra.Command {
+	act := RestartSpiderAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "restart-spider",
+		Short: "重启spider",
+		Example: fmt.Sprintf(
+			`dbactuator spider restart %s %s `,
+			subcmd.CmdBaseExampleStr,
+			subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			// Validate是BaseOptions绑定的方法
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *RestartSpiderAct) Init() (err error) {
+	if err := d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run TODO
+func (d *RestartSpiderAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "重启spider",
+			Func:    d.Service.RestartSpider,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("restart spider successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/uninstall_spider.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/uninstall_spider.go
new file mode 100644
index 0000000000..7f3f5a7784
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spidercmd/uninstall_spider.go
@@ -0,0 +1,76 @@
+package spidercmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// UnInstallSpiderAct TODO
+type UnInstallSpiderAct struct {
+	*subcmd.BaseOptions
+	Service mysql.UnInstallMySQLComp
+}
+
+// NewUnInstallSpiderCommand TODO
+func NewUnInstallSpiderCommand() *cobra.Command {
+	act := UnInstallSpiderAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:     "uninstall",
+		Short:   "下架Spider",
+		Example: fmt.Sprintf(`dbactuator spider uninstall %s`, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *UnInstallSpiderAct) Init() (err error) {
+	logger.Info("UnInstallSpiderAct Init")
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return d.Service.Init()
+}
+
+// Run TODO
+func (d *UnInstallSpiderAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "停止Spider实例",
+			Func:    d.Service.ShutDownMySQLD,
+		},
+		{
+			FunName: "清理机器数据&日志目录",
+			Func:    d.Service.ClearMachine,
+		},
+		{
+			FunName: "清理可能残存的spider相关进程",
+			Func:    d.Service.KillDirtyProcess,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("uninstall spider successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_spider_slave_relationship.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_spider_slave_relationship.go
new file mode 100644
index 0000000000..fb34452c07
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_spider_slave_relationship.go
@@ -0,0 +1,75 @@
+package spiderctlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// AddSlaveClusterRoutingAct TODO
+//
+//	AddSlaveClusterRoutingAct  添加spider slave集群时,添加相关路由信息
+type AddSlaveClusterRoutingAct struct {
+	*subcmd.BaseOptions
+	Service spiderctl.AddSlaveClusterRoutingComp
+}
+
+// AddSlaveClusterRoutingCommand TODO
+func AddSlaveClusterRoutingCommand() *cobra.Command {
+	act := AddSlaveClusterRoutingAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "add-slave-cluster-routing",
+		Short: "添加spider-slave集群的相关路由信息",
+		Example: fmt.Sprintf(`dbactuator spiderctl add-slave-cluster-routing %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example())),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init 初始化
+func (d *AddSlaveClusterRoutingAct) Init() (err error) {
+	logger.Info("InitCLusterRoutingAct Init")
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run 执行
+func (d *AddSlaveClusterRoutingAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "执行前检验",
+			Func:    d.Service.PerCheck,
+		},
+
+		{
+			FunName: "添加slave集群路由信息",
+			Func:    d.Service.AddSlaveRouting,
+		},
+	}
+
+	if err = steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("add slave clsuter routing relationship successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_temporary_spider.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_temporary_spider.go
new file mode 100644
index 0000000000..235769f146
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/add_temporary_spider.go
@@ -0,0 +1,73 @@
+// Package spiderctlcmd TODO
+package spiderctlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// AddTmpSpiderAct  act comp param
+// 用于承接act命令的参数 分为基本act参数信息和之后操作所需要的参数 内设comp承接
+type AddTmpSpiderAct struct {
+	*subcmd.BaseOptions
+	Service spiderctl.AddTmpSpiderComp
+}
+
+// NewAddTmpSpiderCommand TODO
+func NewAddTmpSpiderCommand() *cobra.Command {
+	act := AddTmpSpiderAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "add-tmp-spider",
+		Short: "添加临时spider节点",
+		Example: fmt.Sprintf(`dbactuator spiderctl add-tmp-spider %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *AddTmpSpiderAct) Init() (err error) {
+	logger.Info("AddTmpSpiderAct Init")
+	// 反序列化
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	// 初始化变量
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return nil
+}
+
+// Run TODO
+func (d *AddTmpSpiderAct) Run() (err error) {
+	// 是一个切片
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "中控节点配置新增spider节点信息",
+			Func:    d.Service.AddTmpSpider,
+		},
+	}
+	if err = steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("add temporary spider node successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/cmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/cmd.go
new file mode 100644
index 0000000000..bc6448eab0
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/cmd.go
@@ -0,0 +1,35 @@
+// Package spiderctlcmd 中控节点
+/*
+ * @Description: spiderctl (中控节点)相关操作的子命令集合
+ */
+package spiderctlcmd
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/templates"
+
+	"github.com/spf13/cobra"
+)
+
+// NewSpiderCtlCommand 中控节点
+func NewSpiderCtlCommand() *cobra.Command {
+	cmds := &cobra.Command{
+		Use:   "spiderctl [spider-ctl operation]",
+		Short: "Spiderctl Operation Command Line Interface",
+		RunE:  subcmd.ValidateSubCommand(),
+	}
+	groups := templates.CommandGroups{
+		{
+			Message: "spiderctl operation sets",
+			Commands: []*cobra.Command{
+				NewDeploySpiderCtlCommand(),
+				NewInitCLusterRoutingCommand(),
+				NewAddTmpSpiderCommand(),
+				AddSlaveClusterRoutingCommand(),
+				NewUnInstallSpiderCtlCommand(),
+			},
+		},
+	}
+	groups.Add(cmds)
+	return cmds
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/init_cluster_routing_relationship.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/init_cluster_routing_relationship.go
new file mode 100644
index 0000000000..1c8f235f6b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/init_cluster_routing_relationship.go
@@ -0,0 +1,77 @@
+package spiderctlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// InitCLusterRoutingAct  初始化tendb cluster 集群的路由关系
+type InitCLusterRoutingAct struct {
+	*subcmd.BaseOptions
+	Service spiderctl.InitClusterRoutingComp
+}
+
+// NewInitCLusterRoutingCommand TODO
+//
+// @Summary      初始化tendb cluster 集群的路由关系
+// @Description  初始化tendb cluster 集群的路由关系说明
+// @Tags         spiderctl
+// @Accept       json
+// @Param        body body      spiderctl.InitClusterRoutingComp  true  "short description"
+// @Router /mysql/init-cluster-routing [post]
+func NewInitCLusterRoutingCommand() *cobra.Command {
+	act := InitCLusterRoutingAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "init-cluster-routing",
+		Short: "初始化tendb cluster集群节点关系",
+		Example: fmt.Sprintf(
+			`dbactuator spiderctl init-cluster-routing %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init 初始化
+func (d *InitCLusterRoutingAct) Init() (err error) {
+	logger.Info("InitCLusterRoutingAct Init")
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return
+}
+
+// Run 执行
+func (d *InitCLusterRoutingAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "初始化",
+			Func:    d.Service.Init,
+		},
+		{
+			FunName: "配置mysql.servers表",
+			Func:    d.Service.InitMySQLServers,
+		},
+	}
+
+	if err = steps.Run(); err != nil {
+		return err
+	}
+
+	logger.Info("init tendb cluster routing relationship successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/install_spider_ctl.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/install_spider_ctl.go
new file mode 100644
index 0000000000..9e3369e163
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/install_spider_ctl.go
@@ -0,0 +1,135 @@
+package spiderctlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"encoding/json"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// DeployCtlSpiderAct 部署 spider ctl 实例
+type DeployCtlSpiderAct struct {
+	*subcmd.BaseOptions
+	BaseService mysql.InstallMySQLComp
+}
+
+// NewDeploySpiderCtlCommand godoc
+//
+// @Summary      部署 spider ctl 实例
+// @Description  部署 spider ctl 实例说明
+// @Tags         spiderctl
+// @Accept       json
+// @Param        body body      mysql.InstallMySQLComp  true  "short description"
+// @Router       /spdierctl/deploy [post]
+func NewDeploySpiderCtlCommand() *cobra.Command {
+	act := DeployCtlSpiderAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:   "deploy",
+		Short: "部署Spider-ctl实例",
+		Example: fmt.Sprintf(
+			`dbactuator spiderctl deploy %s %s`,
+			subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.BaseService.Example()),
+		),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				util.CheckErr(act.Rollback())
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init 初始化
+func (d *DeployCtlSpiderAct) Init() (err error) {
+	logger.Info("DeploySpiderAct Init")
+	if err = d.Deserialize(&d.BaseService.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.BaseService.GeneralParam = subcmd.GeneralRuntimeParam
+
+	return d.BaseService.InitDefaultParam()
+}
+
+// Rollback 回滚
+//
+//	@receiver d
+//	@return err
+func (d *DeployCtlSpiderAct) Rollback() (err error) {
+	var r rollback.RollBackObjects
+	if err = d.DeserializeAndValidate(&r); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	err = r.RollBack()
+	if err != nil {
+		logger.Error("roll back failed %s", err.Error())
+	}
+	return
+}
+
+// Run 执行
+func (d *DeployCtlSpiderAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.BaseService.PreCheck,
+		},
+		{
+			FunName: "渲染my.cnf配置",
+			Func:    d.BaseService.GenerateMycnf,
+		},
+		{
+			FunName: "初始化mysqld相关目录",
+			Func:    d.BaseService.InitInstanceDirs,
+		},
+		{
+			FunName: "下载并且解压安装包",
+			Func:    d.BaseService.DecompressTdbctlPkg,
+		},
+		{
+			FunName: "初始化mysqld系统库表",
+			Func:    d.BaseService.Install,
+		},
+		{
+			FunName: "启动tdbctl",
+			Func:    d.BaseService.TdbctlStartup,
+		},
+		{
+			FunName: "执行初始化系统基础权限、库表SQL",
+			Func:    d.BaseService.InitDefaultPrivAndSchema,
+		},
+		{
+			FunName: "安装半同步复制插件",
+			Func:    d.BaseService.InstallRplSemiSyncPlugin,
+		},
+		// {
+		// 	FunName: "生成exporter配置文件",
+		// 	Func:    d.BaseService.CreateExporterCnf,
+		// },
+	}
+
+	if err := steps.Run(); err != nil {
+		rollbackCtxb, rerr := json.Marshal(d.BaseService.RollBackContext)
+		if rerr != nil {
+			logger.Error("json Marshal %s", err.Error())
+			fmt.Printf("Can't RollBack\n")
+		}
+		fmt.Printf("%s\n", string(rollbackCtxb))
+		return err
+	}
+
+	logger.Info("install_spider_ctl_successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/uninstall_spider_ctl.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/uninstall_spider_ctl.go
new file mode 100644
index 0000000000..3dc3f6138a
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/spiderctlcmd/uninstall_spider_ctl.go
@@ -0,0 +1,76 @@
+package spiderctlcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// UnInstallSpiderCtlAct TODO
+type UnInstallSpiderCtlAct struct {
+	*subcmd.BaseOptions
+	Service mysql.UnInstallMySQLComp
+}
+
+// NewUnInstallSpiderCtlCommand TODO
+func NewUnInstallSpiderCtlCommand() *cobra.Command {
+	act := UnInstallSpiderCtlAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:     "uninstall",
+		Short:   "下架Spider-ctl",
+		Example: fmt.Sprintf(`dbactuator spiderctl uninstall %s`, subcmd.CmdBaseExampleStr),
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			if act.RollBack {
+				return
+			}
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *UnInstallSpiderCtlAct) Init() (err error) {
+	logger.Info("UnInstallSpiderCtlAct Init")
+	if err = d.Deserialize(&d.Service.Params); err != nil {
+		logger.Error("DeserializeAndValidate failed, %v", err)
+		return err
+	}
+	d.Service.GeneralParam = subcmd.GeneralRuntimeParam
+	return d.Service.Init()
+}
+
+// Run TODO
+func (d *UnInstallSpiderCtlAct) Run() (err error) {
+	steps := subcmd.Steps{
+		{
+			FunName: "预检查",
+			Func:    d.Service.PreCheck,
+		},
+		{
+			FunName: "停止Spider-ctl实例",
+			Func:    d.Service.ShutDownMySQLD,
+		},
+		{
+			FunName: "清理机器数据&日志目录",
+			Func:    d.Service.ClearMachine,
+		},
+		{
+			FunName: "清理可能残存的spider相关进程",
+			Func:    d.Service.KillDirtyProcess,
+		},
+	}
+	if err := steps.Run(); err != nil {
+		return err
+	}
+	logger.Info("uninstall spider-ctl successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd.go
new file mode 100644
index 0000000000..3526768d20
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd.go
@@ -0,0 +1,341 @@
+// Package subcmd TODO
+package subcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/common/go-pubpkg/validate"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/templates"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/caarlos0/env/v6"
+	"github.com/pkg/errors"
+	"github.com/spf13/cobra"
+)
+
+const (
+	// CmdBaseExampleStr TODO
+	CmdBaseExampleStr = "-u {uid} -n {node_id} -p {base64}"
+	// PayloadFormatRaw TODO
+	PayloadFormatRaw = "raw"
+)
+
+// GBaseOptions TODO
+var GBaseOptions *BaseOptions
+
+// GeneralRuntimeParam TODO
+var GeneralRuntimeParam *components.GeneralParam
+
+func init() {
+	GBaseOptions = &BaseOptions{}
+	GeneralRuntimeParam = &components.GeneralParam{}
+}
+
+// BaseOptions TODO
+/*
+	此参数是json字符串的base64编码之后的字符串
+*/
+type BaseOptions struct {
+	Uid           string
+	RootId        string
+	NodeId        string
+	VersionId     string
+	Payload       string
+	PayloadFormat string
+	RollBack      bool
+	Helper        bool
+	// 是否为外部版本
+	// on ON
+	External string
+}
+
+// IsExternal 是否编译成外部版本
+func (b *BaseOptions) IsExternal() bool {
+	return strings.ToUpper(b.External) == "ON"
+}
+
+const (
+	// StepStateDefault TODO
+	StepStateDefault = "default"
+	// StepStateRunning TODO
+	StepStateRunning = "running"
+	// StepStateSucc TODO
+	StepStateSucc = "success"
+	// StepStateSkip TODO
+	StepStateSkip = "skipped" // 用户主动跳过该 step
+	// StepStateStop TODO
+	StepStateStop = "stopped" // 用户主动暂停,特殊形式的 failed
+	// StepStateFail TODO
+	StepStateFail = "failed"
+)
+
+// StepFunc TODO
+type StepFunc struct {
+	FunName      string
+	Func         func() error
+	State        string
+	FuncRetry    func() error
+	FuncRollback func() error
+	FuncStop     func() error
+	Retries      int
+}
+
+// Steps TODO
+type Steps []StepFunc
+
+// Run TODO
+func (s Steps) Run() (err error) {
+	for idx, step := range s {
+		logMessage := fmt.Sprintf("step <%d>, ready start run [%s]", idx, step.FunName)
+		logger.Info(logMessage)
+		if err = step.Func(); err != nil {
+			logger.Error("step<%d>: %s失败 , 错误: %s", idx, step.FunName, err)
+			// @todo
+			// 顺便输出接下来还有哪些 step 未允许
+			return err
+		}
+		logger.Info("step <%d>, start run [%s] successfully", idx, step.FunName)
+	}
+	return nil
+}
+
+// DeserializeAndValidate TODO
+/*
+	反序列化payload,并校验参数
+	ps: 参数校验 from golang validate v10
+*/
+func (b *BaseOptions) DeserializeAndValidate(s interface{}) (err error) {
+	var bp []byte
+	if b.PayloadFormat == PayloadFormatRaw {
+		bp = []byte(b.Payload)
+	} else {
+		logger.Info("DeserializeAndValidate payload body: %s", b.Payload)
+		bp, err = base64.StdEncoding.DecodeString(b.Payload)
+		if err != nil {
+			return err
+		}
+	}
+
+	// 如果 s 里面的 sub struct 是 pointer,要初始化后再传进来才能解析到环境变量
+	if err := env.Parse(s); err != nil {
+		logger.Warn("env parse error, ignore environment variables for payload:%s", err.Error())
+		// env: expected a pointer to a Struct
+	}
+	defer logger.Info("payload parsed: %+v", s)
+	if err = json.Unmarshal(bp, s); err != nil {
+		logger.Error("json.Unmarshal failed, %v", s, err)
+		return
+	}
+	if err = validate.GoValidateStruct(s, false, true); err != nil {
+		logger.Error("validate struct failed, %v", s, err)
+		return
+	}
+	return nil
+}
+
+// Deserialize TODO
+/*
+  {
+    "general":{} //
+    "extend":{}  // 实际参数
+  }
+	反序列化payload,并校验参数
+	ps: 参数校验 from golang validate v10
+*/
+func (b *BaseOptions) Deserialize(s interface{}) (err error) {
+	var bp []byte
+	if b.PayloadFormat == PayloadFormatRaw {
+		bp = []byte(b.Payload)
+	} else {
+		logger.Info("Deserialize payload body: %s", b.Payload)
+		bp, err = base64.StdEncoding.DecodeString(b.Payload)
+		if err != nil {
+			return err
+		}
+	}
+	if err := env.Parse(s); err != nil {
+		logger.Warn("env parse error, ignore environment variables for payload:%s", err.Error())
+	}
+	logger.Info("params from env %+v", s)
+	g := components.RuntimeAccountParam{}
+	if err := env.Parse(&g); err != nil {
+		logger.Warn("env parse error, ignore environment variables for payload:%s", err.Error())
+	}
+	// logger.Info("Account from env: %+v", g)
+	bip := components.BaseInputParam{
+		ExtendParam:  s,
+		GeneralParam: &components.GeneralParam{RuntimeAccountParam: g},
+	}
+	defer logger.Info("payload parsed: %+v", bip)
+	if err = json.Unmarshal(bp, &bip); err != nil {
+		logger.Error("json.Unmarshal failed, %v", s, err)
+		err = errors.WithMessage(err, "参数解析错误")
+		return
+	}
+	// logger.Info("params after unmarshal %+v", bip)
+	if err = validate.GoValidateStruct(bip, false, true); err != nil {
+		logger.Error("validate struct failed, %v", s, err)
+		err = errors.WithMessage(err, "参数输入错误")
+		return
+	}
+	GeneralRuntimeParam = bip.GeneralParam
+	return nil
+}
+
+// DeserializeSimple 简单 payload 不需要 {"extend":{body}},直接传入 body
+func (b *BaseOptions) DeserializeSimple(s interface{}) (err error) {
+	var body []byte
+	if b.PayloadFormat == PayloadFormatRaw {
+		body = []byte(b.Payload)
+	} else {
+		logger.Info("DeserializeSimple payload body: %s", b.Payload)
+		body, err = base64.StdEncoding.DecodeString(b.Payload)
+		if err != nil {
+			return err
+		}
+	}
+
+	// 如果 s 里面的 sub struct 是 pointer,要初始化后再传进来才能解析到环境变量
+	if err := env.Parse(s); err != nil {
+		logger.Warn("env parse error, ignore environment variables for payload:%s", err.Error())
+	}
+
+	defer logger.Info("payload parsed: %+v", s)
+	if err = json.Unmarshal(body, &s); err != nil {
+		logger.Error("json.Unmarshal failed, %v", s, err)
+		err = errors.WithMessage(err, "参数解析错误")
+		return
+	}
+	if err = validate.GoValidateStruct(s, false, true); err != nil {
+		logger.Error("validate struct failed, %v", s, err)
+		err = errors.WithMessage(err, "参数输入错误")
+		return
+	}
+	return nil
+}
+
+// Validate TODO
+func (b BaseOptions) Validate() (err error) {
+	if len(b.Payload) == 0 {
+		return fmt.Errorf("payload need input")
+	}
+	// logger.Info("Validate payload body: %s", b.Payload)
+
+	return nil
+}
+
+// OutputCtx TODO
+//
+//	@receiver b
+func (b BaseOptions) OutputCtx(ctx string) {
+	fmt.Printf("%s", ctx)
+}
+
+// SetLogger will mkdir logs/
+func SetLogger(cmd *cobra.Command, opt *BaseOptions) {
+	var file *os.File
+	var err error
+	var format = true
+
+	executable, _ := os.Executable()
+	// executeName := filepath.Base(executable)
+	executeDir := filepath.Dir(executable)
+	if err = os.Chdir(executeDir); err != nil {
+		os.Stderr.WriteString(err.Error())
+		os.Exit(1)
+	}
+
+	mode := os.Getenv("MODE")
+	lgn := ""
+	if cmd != nil && cmd.Parent() != nil {
+		lgn = fmt.Sprintf("%s-%s", cmd.Parent().Name(), cmd.Name())
+	}
+	switch mode {
+	case "dev":
+		file = os.Stdout
+		format = false
+	default:
+		logFileDir := filepath.Join(executeDir, "logs")
+		_ = os.MkdirAll(logFileDir, 0755)
+		fileName := filepath.Join(logFileDir, fmt.Sprintf("actuator_%s_%s_%s.log", opt.Uid, lgn, opt.NodeId))
+		file, err = os.OpenFile(fileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
+		if err != nil {
+			os.Stderr.WriteString(err.Error())
+			os.Exit(1)
+		}
+	}
+
+	extMap := map[string]string{
+		"uid":        opt.Uid,
+		"node_id":    opt.NodeId,
+		"root_id":    opt.RootId,
+		"version_id": opt.VersionId,
+	}
+	l := logger.New(file, format, logger.InfoLevel, extMap)
+	logger.ResetDefault(l)
+	defer logger.Sync()
+}
+
+// ValidateSubCommand TODO
+func ValidateSubCommand() func(cmd *cobra.Command, args []string) error {
+	return func(cmd *cobra.Command, args []string) error {
+		if len(args) <= 0 {
+			return fmt.Errorf(
+				"You must specify the type of Operation Describe. %s\n",
+				SuggestAPIResources(cmd.Parent().Name()),
+			)
+		}
+		curName := args[0]
+		var subCommands []string
+		for _, c := range cmd.Commands() {
+			subCommands = append(subCommands, c.Name())
+		}
+		if len(subCommands) <= 0 {
+			return nil
+		}
+		if !util.StringsHas(subCommands, curName) {
+			return fmt.Errorf("Unknown subcommand %s\n", curName)
+		}
+		return nil
+	}
+}
+
+// PrintSubCommandHelper 返回是否成功打印 helper
+// 如果打印,同时运行下 runHelp
+func PrintSubCommandHelper(cmd *cobra.Command, opt *BaseOptions) bool {
+	if opt.Helper {
+		if cmd.Parent().Name() == "dbactuator" {
+			fmt.Println("--helper need sub-command to show payload parameter")
+			os.Exit(1)
+		}
+		if cmd.Name() != "" {
+			subcmdPath := fmt.Sprintf("%s %s", cmd.Parent().Name(), cmd.Name())
+			if err := GetPathDefinitionHelper(subcmdPath); err != nil {
+				fmt.Println(err)
+				os.Exit(1)
+			} else {
+				return true
+			}
+		} else {
+			fmt.Println("--example need sub-command")
+		}
+	}
+	return false
+}
+
+// SuggestAPIResources returns a suggestion to use the "api-resources" command
+// to retrieve a supported list of resources
+func SuggestAPIResources(parent string) string {
+	return templates.LongDesc(
+		fmt.Sprintf(
+			"Use \"%s {Operation Type}\" for a complete list of supported resources.",
+			parent,
+		),
+	)
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_helper.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_helper.go
new file mode 100644
index 0000000000..ad63f99765
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_helper.go
@@ -0,0 +1,338 @@
+package subcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/docs"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"encoding/json"
+	"fmt"
+	"log"
+	"strings"
+)
+
+const (
+	// DTString TODO
+	DTString = "string"
+	// DTInteger TODO
+	DTInteger = "integer"
+	// DTNumber TODO
+	DTNumber = "number"
+	// DTObject TODO
+	DTObject = "object"
+	// DTArray TODO
+	DTArray = "array"
+	// DTArrayObject TODO
+	DTArrayObject = "array object"
+	// DTBOOLEAN TODO
+	DTBOOLEAN = "boolean"
+	// DTUndefined TODO
+	DTUndefined = "undefined ref"
+	// RefMaxDepth TODO
+	RefMaxDepth = 9
+)
+
+const (
+	// DefinitionPrefix TODO
+	DefinitionPrefix = "#/definitions/"
+	// RefKey TODO
+	RefKey = "$ref"
+	// IndentStep TODO
+	IndentStep = "    "
+	// DefinitionKey TODO
+	DefinitionKey = "post"
+)
+
+// PostPath TODO
+type PostPath map[string]*Path // "post": {}
+// Path TODO
+type Path struct {
+	Description string           `json:"description"`
+	Summary     string           `json:"summary"`
+	Parameters  []Param          `json:"parameters"` // parameters[0].schema.$ref
+	Responses   map[string]Param `json:"responses"`
+}
+
+// PrintDescription TODO
+func (p *Path) PrintDescription() {
+	fmt.Printf("# Summary: %s\n", p.Summary)
+	if p.Description != "" {
+		fmt.Printf("# Description: %s\n", p.Description)
+	}
+}
+
+// Param TODO
+type Param struct {
+	Schema      RefMap `json:"schema"` // {"$ref":""}
+	Name        string `json:"name"`
+	Description string `json:"description"`
+}
+
+// RefMap TODO
+type RefMap map[string]string // "$ref":"#/definitions/xx"
+
+// RefMapObj TODO
+type RefMapObj struct {
+	Ref string `json:"$ref"`
+}
+
+// Parameter TODO
+type Parameter struct {
+	Type string `json:"type"`
+	// Properties   components.BaseInputParam `json:"properties"`
+	GeneralParam components.GeneralParam `json:"generalParam"` // generalParam.$ref
+	Params       Definition              `json:"params"`       // params.$ref
+}
+
+// Definition TODO
+type Definition struct {
+	Type        string               `json:"type"`
+	Required    []string             `json:"required"`
+	Properties  map[string]*Property `json:"properties"`
+	description string
+	depth       int // 禁止无限套娃
+	name        string
+	expanded    bool
+}
+
+// PrintProperties TODO
+func (d *Definition) PrintProperties(indent string, header string) {
+	if indent == "" {
+		fmt.Printf("%s: %s\n", header, d.description)
+	}
+	indent = IndentStep + indent
+	for _, prop := range d.Properties {
+		prop.Print(indent)
+	}
+}
+
+// NestedRef TODO
+type NestedRef struct {
+	Type string `json:"type"`
+	RefMapObj
+	Items *NestedRef `json:"items"`
+}
+
+// Property TODO
+type Property struct {
+	Type                 string        `json:"type"`
+	Description          string        `json:"description"`
+	Example              interface{}   `json:"example"`
+	Default              interface{}   `json:"default"`
+	Enum                 []interface{} `json:"enum"`
+	AdditionalProperties *NestedRef    `json:"additionalProperties"` // additionalProperties.$ref
+	Ref                  string        `json:"$ref"`                 // $ref, RefKey
+	Items                *NestedRef    `json:"items"`                // array: items.$ref
+
+	additionalProperties map[string]*Definition
+	ref                  *Definition
+	required             bool
+	name                 string
+	depth                int // 禁止无限套娃
+}
+
+func wrapperBoolean(flag bool) string {
+	if flag {
+		return " Required,"
+	} else {
+		return " " // Optional
+	}
+}
+
+func wrapperType(t string) string {
+	if t == DTObject {
+		return "dict"
+	} else if t == DTNumber {
+		return "float"
+	}
+	return t
+}
+
+func wrapperEnum(v []interface{}) string {
+	var enumStr = ""
+	if v != nil && len(v) > 0 {
+		enumStr = fmt.Sprintf(` Enum oneof%v,`, v)
+	}
+	return enumStr
+}
+
+// Print TODO
+func (p *Property) Print(indent string) {
+	leftMaxPad := "20"
+	left := fmt.Sprintf("%s%s:", indent, p.name)
+
+	leftWithPad := fmt.Sprintf("%-"+leftMaxPad+"s", left)
+	ss := fmt.Sprintf(
+		"%s\t%s,%s%s %s",
+		leftWithPad, p.Type, wrapperBoolean(p.required), wrapperEnum(p.Enum), p.Description,
+	)
+	if p.Example != nil {
+		ss += fmt.Sprintf(". 例: %v", p.Example)
+	}
+	if p.Default != nil {
+		ss += fmt.Sprintf(", 默认值: %v", p.Default)
+	}
+	if p.ref != nil {
+		fmt.Println(ss)
+		p.ref.PrintProperties(indent, p.ref.description)
+	} else {
+		fmt.Println(ss)
+	}
+}
+
+// Definitions TODO
+type Definitions map[string]*Definition
+
+// JsonSpec TODO
+type JsonSpec struct {
+	Paths       map[string]PostPath `json:"paths"`
+	Definitions Definitions         `json:"definitions"`
+}
+
+// GetOneDefinition TODO
+func (ds *Definitions) GetOneDefinition(name string) *Definition {
+	name = strings.TrimPrefix(name, DefinitionPrefix)
+	if obj, ok := (*ds)[name]; ok {
+		return obj
+	} else {
+		// 未定义的 definition name
+	}
+	return nil
+}
+
+// expandProperties 将 ref definition 展开
+func (ds *Definitions) expandProperties() {
+	for defName, d := range *ds {
+		d.name = defName
+		if !d.expanded { // 因为展开时,一直在操作同一个引用,不要重复展开
+			d.ExpandProperties(ds)
+		}
+	}
+}
+
+// ExpandProperties 展开 definition 的 property
+// 因为 property 可能引用其它 definition
+func (d *Definition) ExpandProperties(defs *Definitions) {
+	d.expanded = true
+	if d.Type != DTObject {
+		logger.Info("helper definition is no object %v", d)
+		return
+	}
+	for pname, prop := range d.Properties {
+		prop.depth = d.depth
+		prop.name = pname
+		if util.StringsHas(d.Required, pname) {
+			prop.required = true
+		}
+
+		refObjName := prop.getRef()
+		if refObjName != "" {
+			prop.ref = defs.GetOneDefinition(refObjName)
+			if prop.ref == nil {
+				prop.Type = DTUndefined // 未知 definition, 置空
+				prop.Ref = ""
+				continue
+			}
+			prop.ref.depth = prop.depth + 1
+			d.depth = prop.ref.depth
+			if d.depth > RefMaxDepth {
+				fmt.Printf(
+					"ref max depth exceed, definition name:%v, depth:%d, depth def:%v\n",
+					d.name, d.depth, prop.ref,
+				)
+				continue
+			}
+			prop.ref.ExpandProperties(defs) // 递归
+			prop.ref.description = prop.Description
+			if prop.Type == "" {
+				prop.Type = DTObject
+			}
+		}
+	}
+}
+
+// getRef 判断该 property 是否有下级嵌套
+// 如果有则存到 ref 中,且修改 Type 加上 嵌套类型
+func (p *Property) getRef() string {
+	if p.Ref != "" {
+		p.Type += " " + DTObject
+		return p.Ref
+	} else if p.AdditionalProperties != nil {
+		p.Type += ":map[string]" + " " + p.AdditionalProperties.Type // DTString
+		return p.getItemsNestedRef(p.AdditionalProperties)
+	} else if p.Items != nil {
+		p.Type += " " + p.Items.Type
+		return p.getItemsNestedRef(p.Items)
+	}
+	return ""
+}
+
+func (p *Property) getItemsNestedRef(subRef *NestedRef) string {
+	if ref := subRef.RefMapObj.Ref; ref != "" {
+		p.Ref = ref
+		p.Type += " " + DTObject // DTArrayObject
+		return ref
+	} else if subRef.Items != nil {
+		if ref = subRef.Items.RefMapObj.Ref; ref != "" {
+			p.Ref = ref
+			p.Type += " " + DTObject // DTArrayObject
+			return ref
+		}
+		p.Type += " " + subRef.Items.Type
+	}
+	return ""
+}
+
+// GetPathDefinitionHelper 结束命令字符串,打印描述
+// mysql mycnf-change
+// /mysql/mycnf-change
+func GetPathDefinitionHelper(subcmd string) error {
+	defer func() {
+		if r := recover(); r != nil {
+			// logger.Error("get helper failed %s: %s", subcmd, r, string(debug.Stack()))
+		}
+	}()
+	if strings.Contains(subcmd, " ") {
+		tmp := strings.Split(strings.TrimSpace(subcmd), " ")
+		subcmd = "/" + strings.Join(tmp, "/")
+	}
+	f := docs.SwaggerDocs
+	doc := "swagger.json"
+	b, err := f.ReadFile(doc)
+	if err != nil {
+		return err
+	}
+	jsonSpec := JsonSpec{}
+	if err := json.Unmarshal(b, &jsonSpec); err != nil {
+		fmt.Println(err)
+		log.Fatalln("docs/swagger.json 解析失败")
+	}
+	if pathObj, ok := jsonSpec.Paths[subcmd]; !ok {
+		fmt.Printf("未找到参数定义 %s\n", subcmd)
+	} else {
+		if params, ok := pathObj[DefinitionKey]; !ok {
+			fmt.Printf("未找到参数定义post %s\n", subcmd)
+		} else if len(params.Parameters) == 0 {
+			fmt.Printf("未找到参数定义param %s\n", subcmd)
+		}
+	}
+	// jsonSpec.Definitions.ExpandProperties()
+	pathDefinition := jsonSpec.Paths[subcmd][DefinitionKey]
+	pathDefinition.PrintDescription()
+	// parameters
+	reqSchema := pathDefinition.Parameters[0].Schema
+	schemaName := strings.TrimPrefix(reqSchema[RefKey], DefinitionPrefix)
+	thisDef := jsonSpec.Definitions[schemaName]
+	thisDef.ExpandProperties(&jsonSpec.Definitions)
+	thisDef.PrintProperties("", "\n# Param")
+
+	// responses
+	for code, resp := range pathDefinition.Responses {
+		respSchema := resp.Schema
+		schemaName = strings.TrimPrefix(respSchema[RefKey], DefinitionPrefix)
+		thisDef = jsonSpec.Definitions[schemaName]
+		thisDef.ExpandProperties(&jsonSpec.Definitions) // 如果 param 对象里面包含了 resp 的对象,这里可能重复展开。暂不处理
+		thisDef.PrintProperties("", "\n# Response "+code)
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_util.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_util.go
new file mode 100644
index 0000000000..9cca425cd2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/subcmd_util.go
@@ -0,0 +1,69 @@
+package subcmd
+
+import (
+	"bytes"
+	"crypto/aes"
+	"crypto/cipher"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+)
+
+// PKCS5Padding TODO
+func PKCS5Padding(ciphertext []byte, blockSize int) []byte {
+	padding := blockSize - len(ciphertext)%blockSize
+	padtext := bytes.Repeat([]byte{byte(padding)}, padding)
+	return append(ciphertext, padtext...)
+}
+
+// PKCS5UnPadding TODO
+func PKCS5UnPadding(origData []byte) []byte {
+	length := len(origData)
+	unpadding := int(origData[length-1])
+	return origData[:(length - unpadding)]
+}
+
+// AesEncrypt TODO
+//
+//	增加加密函数,加密的key必须是16,24,32位长
+func AesEncrypt(origData, key []byte) ([]byte, error) {
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		return nil, err
+	}
+
+	blockSize := block.BlockSize()
+	origData = PKCS5Padding(origData, blockSize)
+	fmt.Println(origData)
+	blockMode := cipher.NewCBCEncrypter(block, key[:blockSize])
+	crypted := make([]byte, len(origData))
+	blockMode.CryptBlocks(crypted, origData)
+	return crypted, nil
+}
+
+// AesDecrypt 增加解密函数
+func AesDecrypt(crypted string, key []byte) ([]byte, error) {
+	block, err := aes.NewCipher(key)
+	if err != nil {
+		return nil, err
+	}
+	data, err1 := base64.StdEncoding.DecodeString(crypted)
+	if err1 != nil {
+		return nil, err1
+	}
+	blockSize := block.BlockSize()
+	blockMode := cipher.NewCBCDecrypter(block, key[:blockSize])
+	origData := make([]byte, len(data))
+	blockMode.CryptBlocks(origData, data)
+	origData = PKCS5UnPadding(origData)
+	return origData, nil
+}
+
+// ToPrettyJson TODO
+func ToPrettyJson(v interface{}) string {
+	if data, err := json.MarshalIndent(v, "", "    "); err == nil {
+		// ss := "\n# use --helper to show explanations. example for payload:\n --payload-format raw --payload '%s'"
+		return string(data)
+	}
+	return "未找到合法的 example "
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go
new file mode 100644
index 0000000000..36da300e85
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinit.go
@@ -0,0 +1,79 @@
+package sysinitcmd
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/internal/subcmd"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/sysinit"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/spf13/cobra"
+)
+
+// SysInitAct TODO
+type SysInitAct struct {
+	*subcmd.BaseOptions
+	Service sysinit.SysInitParam
+}
+
+// NewSysInitCommand TODO
+func NewSysInitCommand() *cobra.Command {
+	act := SysInitAct{
+		BaseOptions: subcmd.GBaseOptions,
+	}
+	cmd := &cobra.Command{
+		Use:     "sysinit",
+		Short:   "Exec sysinit_mysql.sh,Init mysql default os user,password",
+		Example: `dbactuator sysinit -p eyJ1c2VyIjoiIiwicHdkIjoiIn0=`,
+		Run: func(cmd *cobra.Command, args []string) {
+			util.CheckErr(act.Validate())
+			util.CheckErr(act.Init())
+			util.CheckErr(act.Run())
+		},
+	}
+	return cmd
+}
+
+// Init TODO
+func (d *SysInitAct) Init() (err error) {
+	if err = d.DeserializeAndValidate(&d.Service); err != nil {
+		logger.Error("DeserializeAndValidate err %s", err.Error())
+		return err
+	}
+	return
+}
+
+// Run TODO
+func (s *SysInitAct) Run() (err error) {
+	steps := []subcmd.StepFunc{
+		{
+			FunName: "执行sysInit脚本",
+			Func:    s.Service.SysInitMachine,
+		},
+	}
+	if s.IsExternal() {
+		steps = append(
+			steps, subcmd.StepFunc{
+				FunName: "安装Perl以及相关依赖",
+				Func:    s.Service.InitExternal,
+			},
+		)
+	}
+	steps = append(
+		steps, subcmd.StepFunc{
+			FunName: fmt.Sprintf("重置%sOS密码", s.Service.OsMysqlUser),
+			Func:    s.Service.SetOsPassWordForMysql,
+		},
+	)
+
+	logger.Info("start sysinit ...")
+	for idx, f := range steps {
+		if err = f.Func(); err != nil {
+			logger.Error("step <%d>, run [%s] occur %v", idx, f.FunName, err)
+			return err
+		}
+		logger.Info("step <%d>, run [%s] successfully", idx, f.FunName)
+	}
+	logger.Info("sysinit successfully")
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go
new file mode 100644
index 0000000000..19eb9248ea
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/sysinitcmd/sysinitcmd.go
@@ -0,0 +1,2 @@
+// Package sysinitcmd TODO
+package sysinitcmd
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup.go
new file mode 100644
index 0000000000..9366278415
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup.go
@@ -0,0 +1,23 @@
+package backup_download
+
+// DownloadFile TODO
+type DownloadFile interface {
+	Init() error
+	PreCheck() error
+	Start() error
+	Pause() error
+	Stop() error
+	Resume() error
+	Rollback() error
+	GetStatus() error
+	GetAction() error
+}
+
+// DFBase TODO
+type DFBase struct {
+	BKBizID int `json:"bk_biz_id"`
+	// 单文件下载限速,单位 MB/s
+	BWLimitMB int64 `json:"bwlimit_mb"`
+	// 并发下载数
+	Concurrency int `json:"max_concurrency"` // @todo 同时下载最大并发
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup_download.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup_download.go
new file mode 100644
index 0000000000..db87845d71
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backup_download.go
@@ -0,0 +1,2 @@
+// Package backup_download TODO
+package backup_download
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_query_comp.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_query_comp.go
new file mode 100644
index 0000000000..232459aeeb
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_query_comp.go
@@ -0,0 +1,67 @@
+package backup_download
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient"
+)
+
+// IBSQueryComp TODO
+type IBSQueryComp struct {
+	Params IBSQueryParam `json:"extend"`
+}
+
+// IBSQueryParam download ibs-query
+type IBSQueryParam struct {
+	IBSQueryReq
+	// ieg backup system url and auth params
+	IBSInfo IBSBaseInfo `json:"ibs_info" validate:"required"`
+	client  *httpclient.HttpClient
+}
+
+// Example TODO
+func (c *IBSQueryComp) Example() interface{} {
+	ibsReq := IBSQueryParam{
+		IBSInfo: IBSBaseInfo{
+			SysID:  "bkdbm",
+			Key:    "fzLosxxxxxxxxxxxx",
+			Ticket: "",
+			Url:    "http://{{BACKUP_SERVER}}",
+		},
+		IBSQueryReq: IBSQueryReq{
+			SourceIp:  "1.1.1.1",
+			BeginDate: "2022-10-30 00:00:01",
+			EndDate:   "2022-10-31 00:00:01",
+			FileName:  "filename",
+		},
+	}
+	return &IBSQueryComp{
+		Params: ibsReq,
+	}
+}
+
+// Init TODO
+func (c *IBSQueryComp) Init() error {
+	c.Params.client = &httpclient.HttpClient{
+		Client: httpclient.New(),
+		Url:    c.Params.IBSInfo.Url,
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (c *IBSQueryComp) PreCheck() error {
+	return nil
+}
+
+// Start TODO
+func (c *IBSQueryComp) Start() error {
+	return c.Params.searchFiles()
+}
+
+func (r *IBSQueryParam) searchFiles() error {
+	if resp, err := r.BsQuery(r.IBSQueryReq); err != nil {
+		return err
+	} else {
+		return components.PrintOutputCtx(resp.Detail)
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_recover_comp.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_recover_comp.go
new file mode 100644
index 0000000000..c79a761c51
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/backupsys_recover_comp.go
@@ -0,0 +1,288 @@
+package backup_download
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"runtime/debug"
+	"strconv"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// IBSRecoverComp TODO
+type IBSRecoverComp struct {
+	Params IBSRecoverParam `json:"extend"`
+}
+
+// TaskFilesWild TODO
+type TaskFilesWild struct {
+	FileTag string `json:"file_tag"`
+	// 搜索的模糊条件,不用带 *
+	NameSearch string `json:"name_search"`
+	// 在搜索的结果里面,应用该正则进行过滤
+	NameRegex string `json:"name_regex"`
+}
+
+// TaskFilesExact TODO
+type TaskFilesExact struct {
+	// 任务ID,用于下载
+	TaskId   string `json:"task_id,omitempty"`
+	FileName string `json:"file_name,omitempty"`
+	// 文件大小
+	Size string `json:"size,omitempty"`
+	Md5  string `json:"md5,omitempty"`
+}
+
+// IBSRecoverParam download ibs-recover
+type IBSRecoverParam struct {
+	IBSRecoverReq
+	// ieg backup system url and auth params
+	IBSInfo IBSBaseInfo `json:"ibs_info" validate:"required"`
+	// 如果是精确文件名下载,用 task_files。提供需要下载的文件列表,提供 task_id 或者完整的 file_name 即可
+	// 如果顺便提供了 size 信息则不用请求备份系统获取大小 来决定文件是否需要重新下载
+	TaskFiles []TaskFilesExact `json:"task_files"`
+	// 如果是模糊匹配搜索并下载,用 task_files_wild
+	TaskFilesWild *TaskFilesWild `json:"task_files_wild"`
+
+	// 如果本地目标目录已经存在对应文件,是否保留(即跳过下载). 默认 false
+	SkipLocalExists bool `json:"skip_local_exists" example:"false"`
+	// 根据文件名下载,或者判断是否跳过下载时,需要提供 ibs_query 参数用于查询
+	IBSQuery IBSQueryForRecover `json:"ibs_query"`
+
+	taskIdSlice []string
+
+	client             *httpclient.HttpClient
+	maxQueryRetryTimes int
+	maxFailCheckNum    int
+}
+
+// IBSRecoverTask 与 IBSQueryResult 基本相同
+// task_id="" 时,会根据 file_name 来查询 task_id
+// 其它信息用于下载完成后进行校验,一般在给 task_id 时有效(来源于上一次备份查询接口)
+// 当 task_id !="" file_name!="" 时,下载之前会检查目标目录文件是否已经存在,决定是否跳过下载
+// 只提供 task_id 时是无法检查文件是跳过下载,会直接从备份系统下载该 task_id
+// 当只提供 file_name 时,会从备份系统先搜索出 task_id,file size,如果文件大小匹配则跳过下载
+type IBSRecoverTask struct {
+	// 任务ID,用于下载
+	TaskId   string `json:"task_id,omitempty"`
+	FileName string `json:"file_name,omitempty"`
+	// 文件大小
+	Size string `json:"size,omitempty"`
+	Md5  string `json:"md5,omitempty"`
+	// 文件状态
+	Status string `json:"status,omitempty"`
+	// 文件最后修改时间
+	FileLastMtime string `json:"file_last_mtime,omitempty"`
+	// 上报该备份任务的IP
+	SourceIp string `json:"source_ip,omitempty"`
+	FileTag  string `json:"file_tag,omitempty"`
+}
+
+// IBSQueryForRecover 复刻 IBSQueryReq,去掉 required 选项
+type IBSQueryForRecover struct {
+	// 来源IP,即提交备份任务的机器IP
+	SourceIp string `json:"source_ip"`
+	// 查询文件起始时间,备份系统以 file_last_mtime 为条件
+	BeginDate string `json:"begin_date"`
+	// 哪一天提交,结束时间,与begin_date形成一个时间范围,建议begin_date与end_date形成的时间范围不要超过3天
+	EndDate string `json:"end_date"`
+}
+
+// Example TODO
+func (c *IBSRecoverComp) Example() interface{} {
+	ibsReq := IBSRecoverParam{
+		maxQueryRetryTimes: 100,
+		maxFailCheckNum:    6,
+		IBSInfo: IBSBaseInfo{
+			SysID:  "bkdbm",
+			Key:    "fzLosxxxxxxxxxxxx",
+			Ticket: "",
+			Url:    "http://{{BACKUP_SERVER}}",
+		},
+		IBSRecoverReq: IBSRecoverReq{
+			TaskidList:  "",
+			DestIp:      "1.1.1.1",
+			Directory:   "/data/dbbak",
+			LoginUser:   "mysql",
+			LoginPasswd: "xxx",
+			Reason:      "example recover",
+		},
+		TaskFiles: []TaskFilesExact{
+			{FileName: "xxx", TaskId: "111", Size: "1023"},
+			{FileName: "yyy"},
+			{TaskId: "222"},
+		},
+		TaskFilesWild: &TaskFilesWild{
+			FileTag:    INCREMENT_BACKUP,
+			NameSearch: "20000",
+			NameRegex:  "^.+20000\\.\\d+(\\..*)*$",
+		},
+		SkipLocalExists: true,
+		IBSQuery: IBSQueryForRecover{
+			SourceIp:  "1.1.1.1",
+			BeginDate: "2022-10-30 01:01:01",
+			EndDate:   "2022-10-31",
+		},
+	}
+	return IBSRecoverComp{
+		Params: ibsReq,
+	}
+}
+
+// skipFileExists 是否跳过此文件
+func (c *IBSRecoverComp) skipFileExists(f string, sizeExpect string) (bool, error) {
+	if !c.Params.SkipLocalExists {
+		return false, nil
+	}
+	if sizeExpect == "" {
+		return false, nil // 本地不知道 file_size,即使文件存在也无法判断是否完整,所以不跳过
+	}
+	fileSize := cmutil.GetFileSize(f)
+	if fileSize == -1 {
+		logger.Info("local file not exists: %s", f)
+		return false, nil // file not exists
+	} else if fileSize < 0 {
+		return false, errors.Errorf("cannot get file_size %s", f)
+	}
+	// 文件已存在,校验大小
+	if strconv.FormatInt(fileSize, 10) != sizeExpect {
+		logger.Warn("file %s exists but size %d not match %s. remove it", f, fileSize, sizeExpect)
+		if err := os.Remove(f); err != nil {
+			return false, err
+		}
+		// 有一种情况,文件正在下载,如果删除成功这里 sleep 一下,一定概率会让上一个下载任务失败掉,用当前这轮新任务下载
+		time.Sleep(1 * time.Second)
+		return false, nil
+	} else {
+		// 本地文件已经存在,且文件大小一致,不重复下载
+		logger.Info("file already exists and size match: %s %s. skip it", f, sizeExpect)
+		return true, nil
+	}
+}
+
+// Init TODO
+func (c *IBSRecoverComp) Init() error {
+	c.Params.maxQueryRetryTimes = 100
+	c.Params.maxFailCheckNum = 6
+	c.Params.client = &httpclient.HttpClient{
+		Client: httpclient.New(),
+		Url:    c.Params.IBSInfo.Url,
+	}
+	if localIPAddrs, err := osutil.GetLocalIPAddrs(); err != nil {
+		if c.Params.DestIp == "127.0.0.1" || !util.StringsHas(localIPAddrs, c.Params.DestIp) {
+			return errors.Errorf("dest_ip %s should be local", c.Params.DestIp)
+		}
+	}
+	// create dest directory
+	p := c.Params.IBSRecoverReq
+	_, err := user.Lookup(p.LoginUser)
+	if err != nil {
+		return errors.Wrap(err, p.LoginUser)
+	}
+	if !cmutil.FileExists(p.Directory) {
+		if err := os.MkdirAll(p.Directory, 0755); err != nil {
+			return errors.Wrap(err, p.Directory)
+		}
+	}
+	if errStr, err := osutil.ExecShellCommand(
+		false,
+		fmt.Sprintf("chown %s %s", p.LoginUser, p.Directory),
+	); err != nil {
+		logger.Error(errStr)
+		return err
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (c *IBSRecoverComp) PreCheck() error {
+	// 检查目标目录是否存在
+	// 检查 user 是否存在
+	// 检查哪些文件在目标目录已经存在
+	if c.Params.TaskFiles != nil && c.Params.TaskFilesWild != nil {
+		return errors.New("either task_files or task_files_wild can be given")
+	}
+	if c.Params.TaskFilesWild != nil {
+		if c.Params.TaskFilesWild.NameSearch == "" {
+			return errors.New("task_files_wild.name_search is required")
+		}
+		if err := c.skipFilesAndInitWild(); err != nil {
+			return err
+		}
+	}
+	if c.Params.TaskFiles != nil {
+		if err := c.skipFilesAndInit(); err != nil {
+			return err
+		}
+	}
+	// 检查空间是否足够 todo
+	return nil
+}
+
+// Start TODO
+func (c *IBSRecoverComp) Start() error {
+	return c.Params.downloadFiles()
+}
+
+// WaitDone TODO
+func (c *IBSRecoverComp) WaitDone() error {
+	return nil
+}
+
+// PostCheck TODO
+func (c *IBSRecoverComp) PostCheck() error {
+	// 检查文件大小
+	var errList []error
+	for _, task := range c.Params.TaskFiles {
+		f := filepath.Join(c.Params.Directory, task.FileName)
+		if fileSize := cmutil.GetFileSize(f); fileSize == -1 {
+			errList = append(errList, errors.Errorf("file %s not exists", f))
+		} else if fileSize < 0 {
+			errList = append(errList, errors.Errorf("file %s size get failed", f))
+		} else if fileSize > 0 {
+			if strconv.FormatInt(fileSize, 10) != task.Size {
+				errList = append(errList, errors.Errorf("file size not match %s for %s", task.Size, f))
+			}
+		}
+	}
+	if len(errList) > 0 {
+		logger.Error("IBSRecoverComp.PostCheck %v", errList)
+		return util.SliceErrorsToError(errList)
+	}
+	return nil
+}
+
+// OutputCtx TODO
+func (c *IBSRecoverComp) OutputCtx() error {
+	return components.PrintOutputCtx(c.Params.TaskFiles)
+}
+
+func (r *IBSRecoverParam) downloadFiles() error {
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("doRow inner panic error,error:%v,stack:%s", r, string(debug.Stack()))
+			return
+		}
+	}()
+	// 因为并行读是由备份系统调度的,所以这里直接全部请求下载
+	// 如果按 task_id 逐个请求备份系统,就需要客户端控制并发
+	if err := r.RecoverAndWaitDone(r.IBSRecoverReq); err != nil {
+		return err
+	}
+	// 确保最终用户权限是mysql
+	cmd := fmt.Sprintf("chown -R %s %s", r.IBSRecoverReq.LoginUser, r.IBSRecoverReq.Directory)
+	output, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Warn("cmd:%s failed, output:%s, err:%s", cmd, output, err.Error())
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/cos.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/cos.go
new file mode 100644
index 0000000000..3c52241046
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/cos.go
@@ -0,0 +1 @@
+package backup_download
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/gse.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/gse.go
new file mode 100644
index 0000000000..3c52241046
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/gse.go
@@ -0,0 +1 @@
+package backup_download
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_client.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_client.go
new file mode 100644
index 0000000000..29f4093c0a
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_client.go
@@ -0,0 +1,233 @@
+package backup_download
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/auth"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"math/rand"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+)
+
+const (
+	// apiserver response code
+	statusSuccess int = 0
+
+	// job executer user
+	// jobExecuterUser = "pub"
+	jobExecuterUser = "scr-system"
+)
+
+// APIServerResponse TODO
+type APIServerResponse struct {
+	Code    int             `json:"code"`
+	Message string          `json:"message"`
+	Data    json.RawMessage `json:"data"`
+}
+
+// Client TODO
+type Client struct {
+	apiservers []string
+
+	// JWT token
+	token     string
+	secretId  string
+	secretKey string
+
+	// client for apiservers
+	client *http.Client
+}
+
+// NewClientByHosts TODO
+func NewClientByHosts(hosts []string) (*Client, error) {
+	cli := &Client{}
+
+	for _, host := range hosts {
+		cli.apiservers = append(cli.apiservers, host)
+	}
+
+	cli.client = &http.Client{
+		Transport: &http.Transport{},
+	}
+
+	return cli, nil
+}
+
+// New TODO
+func New(apiServers string) (*Client, error) {
+	cli := &Client{}
+	if apiServers == "" {
+		return nil, fmt.Errorf("apiservers is null")
+	}
+	for _, host := range strings.Split(apiServers, ",") {
+		cli.apiservers = append(cli.apiservers, host)
+	}
+
+	tr := &http.Transport{}
+
+	cli.client = &http.Client{
+		Transport: tr,
+	}
+
+	return cli, nil
+}
+
+// DoNew TODO
+// others: other parameters maybe used
+//
+//	other->{"user"}  : for gateway
+//
+// 支持根据返回内容包含特征串自动重试
+func (c *Client) DoNew(method, url string, params interface{}, headers map[string]string) (*APIServerResponse, error) {
+	var response *APIServerResponse
+	var err error
+	for retryIdx := 0; retryIdx < 5; retryIdx++ {
+		response, err = c.doNewInner(method, url, params, headers)
+		if err == nil {
+			break
+		}
+		if strings.Contains(err.Error(), "cse.flowcontrol.Consumer.qps.limit") {
+			logger.Warn("DoNew failed, retryIdx:%d, err:%s", retryIdx, err.Error())
+			wait := retryIdx*retryIdx*1000 + rand.Intn(1000)
+			time.Sleep(time.Duration(wait) * time.Millisecond)
+			continue
+		}
+		break
+	}
+	return response, err
+}
+
+func (c *Client) doNewInner(method, url string, params interface{}, headers map[string]string) (
+	*APIServerResponse,
+	error,
+) {
+	host, err := c.nextTarget()
+	if err != nil {
+		logger.Error("nextTarget get an error:%s", err)
+		return nil, fmt.Errorf("get target host failed, err: %+v", err)
+	}
+	body, err := json.Marshal(params)
+	if err != nil {
+		logger.Error("marshal %+v get an error:%w", params, err)
+		return nil, fmt.Errorf("json marshal param failed, err: %+v", err)
+	}
+	req, err := http.NewRequest(method, host+url, bytes.NewReader(body))
+	if err != nil {
+		logger.Error("create a new request(%s,%s,%+v) get an error:%w", method, host+url, params, err)
+		return nil, fmt.Errorf("new request failed, err: %+v", err)
+	}
+
+	// set auth...
+	c.setHeader(req, headers)
+
+	resp, err := c.client.Do(req)
+	if err != nil {
+		logger.Error("invoking http request failed, url: %s, error:%w", req.URL.String(), err)
+		return nil, fmt.Errorf("do http request failed, err: %+v", err)
+	}
+	defer resp.Body.Close()
+
+	// 目前出现偶现网关超时问题,重试一次看是否时间段内必现
+	for i := 1; i <= 5; i++ {
+		// 500 可能正在发布
+		// 429 可能大并发量偶现超频
+		// 504 具体原因未知,先重试
+		if !cmutil.HasElem(
+			resp.StatusCode, []int{
+				http.StatusInternalServerError, http.StatusTooManyRequests,
+				http.StatusGatewayTimeout,
+			},
+		) {
+			break
+		}
+
+		wait := i*i*1000 + rand.Intn(1000)
+		time.Sleep(time.Duration(wait) * time.Millisecond)
+		logger.Warn(
+			"client.Do result with %s, wait %d milliSeconds and retry, url: %s",
+			resp.Status,
+			wait,
+			req.URL.String(),
+		)
+		resp, err = c.client.Do(req)
+		if err != nil {
+			logger.Error("an error occur while invoking client.Do, url: %s, error:%s", req.URL.String(), err.Error())
+			return nil, fmt.Errorf("do http request failed, err: %+v", err)
+		}
+	}
+
+	b, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		err = fmt.Errorf("read resp.body error:%s", err.Error())
+		logger.Error(err.Error())
+		return nil, err
+	}
+	result := &APIServerResponse{}
+	err = json.Unmarshal(b, result)
+	if err != nil {
+		logger.Error("unmarshall %s to %+v get an error:%s", string(b), *result, err.Error())
+		return nil, fmt.Errorf("json unmarshal failed, err: %+v", err)
+	}
+
+	// check response and data is nil
+	if result.Code != statusSuccess {
+		logger.Warn(
+			"result.Code is %d not equal to %d,message:%s,data:%s,param:%+v", result.Code, statusSuccess,
+			result.Message, string(result.Data), params,
+		)
+		if len(result.Data) != 0 {
+			return nil, fmt.Errorf("[%v - %v - %s]", result.Code, result.Message, string(result.Data))
+		}
+		return nil, fmt.Errorf("%v - %v", result.Code, result.Message)
+	}
+	return result, nil
+}
+
+// Do TODO
+func (c *Client) Do(method, url string, params interface{}) (*APIServerResponse, error) {
+	return c.DoNew(method, url, params, map[string]string{})
+}
+
+func (c *Client) nextTarget() (string, error) {
+	rand.Seed(time.Now().UnixNano())
+	startPos := rand.Intn(len(c.apiservers))
+	pos := startPos
+	for {
+		gotHost := c.apiservers[pos]
+		u, err := url.Parse(gotHost)
+		if err != nil {
+			if pos = (pos + 1) % len(c.apiservers); pos == startPos {
+				return "", fmt.Errorf("all hosts are down, uptime tests are failing")
+			}
+			continue
+		}
+		if util.HostCheck(u.Host) {
+			return gotHost, nil
+		}
+		logger.Error("host %s is down", gotHost)
+		if pos = (pos + 1) % len(c.apiservers); pos == startPos {
+			return "", fmt.Errorf("all hosts are down, uptime tests are failing")
+		}
+	}
+}
+
+func (c *Client) setHeader(req *http.Request, others map[string]string) {
+	user := jobExecuterUser
+	if _, ok := others["user"]; ok {
+		user = strings.TrimSpace(others["user"])
+	}
+
+	req.Header.Set("Content-Type", "application/json")
+	req.Header.Set("user", user)
+	// Set JWT token
+	if token, err := auth.Sign(user, c.secretId, c.secretKey); err == nil {
+		req.Header.Set("Authorization", "Bearer "+token)
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_comp.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_comp.go
new file mode 100644
index 0000000000..2346aa429e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/http_comp.go
@@ -0,0 +1,177 @@
+package backup_download
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// DFHttpComp 允许在目标机器上
+type DFHttpComp struct {
+	Params   DFHttpParam `json:"extend"`
+	progress *progress
+}
+
+// DFHttpParam TODO
+type DFHttpParam struct {
+	DFBase
+	HttpGet
+}
+
+// HttpGet TODO
+type HttpGet struct {
+	// 下载 url
+	Server string `json:"server" validate:"required,url"`
+	// 下载哪些文件
+	FileList []string `json:"file_list" validate:"required"`
+	// 文件存放到本机哪个目录
+	PathTgt string `json:"path_tgt" validate:"required"`
+	// http url basic auth user
+	AuthUser string `json:"auth_user"`
+	// http url basic auth pass
+	AuthPass string `json:"auth_pass"`
+	// curl 命令路径,默认留空. 目前只用于测试 url
+	CurlPath    string   `json:"curl_path"`
+	CurlOptions []string `json:"curl_options"`
+
+	curlCmd string
+}
+
+// Example TODO
+func (d *DFHttpComp) Example() interface{} {
+	comp := DFHttpComp{
+		Params: DFHttpParam{
+			DFBase: DFBase{
+				BWLimitMB:   30,
+				Concurrency: 1,
+			},
+			HttpGet: HttpGet{
+				Server:   "http://server1:8082/datadbbak8082/",
+				PathTgt:  "/data/dbbak",
+				FileList: []string{"xx.info", "xx"},
+				AuthUser: "xx",
+				AuthPass: "yy",
+			},
+		},
+	}
+	return comp
+}
+
+// Init TODO
+func (d *DFHttpComp) Init() error {
+	if d.Params.CurlPath == "" {
+		d.Params.CurlPath = "curl"
+	}
+	if d.Params.BWLimitMB == 0 {
+		d.Params.BWLimitMB = 20
+	}
+	if !util.StringsHas(d.Params.CurlOptions, "--limit-rate") {
+		// d.Params.CurlOptions = append(d.Params.CurlOptions, fmt.Sprintf(" --limit-rate %dm", d.Params.BWLimitMB))
+		d.Params.CurlOptions = append(
+			d.Params.CurlOptions,
+			"--limit-rate", fmt.Sprintf("%dm", d.Params.BWLimitMB),
+		)
+	}
+	if !util.StringsHas(d.Params.CurlOptions, " -s ") {
+		d.Params.CurlOptions = append(d.Params.CurlOptions, "-s")
+	}
+	// -XGET
+	if d.Params.AuthUser != "" {
+		d.Params.CurlOptions = append(
+			d.Params.CurlOptions,
+			fmt.Sprintf(`-u "%s:%s"`, d.Params.AuthUser, d.Params.AuthPass),
+		)
+		/*
+			authPassBase64 := base64.StdEncoding.EncodeToString([]byte(d.Params.AuthPass))
+			d.Params.CurlOptions = append(d.Params.CurlOptions,
+				"-H", fmt.Sprintf(`"Authorization: Basic %s"`, authPassBase64))
+		*/
+	}
+	d.Params.curlCmd = fmt.Sprintf("%s %s", d.Params.CurlPath, strings.Join(d.Params.CurlOptions, " "))
+	return nil
+}
+
+// PreCheck TODO
+func (d *DFHttpComp) PreCheck() error {
+	testCurl := fmt.Sprintf("%s '%s'", d.Params.curlCmd, d.Params.Server)
+	logger.Info("test command: %s", testCurl)
+
+	if out, err := osutil.ExecShellCommand(false, testCurl); err != nil {
+		return err
+	} else {
+		if !strings.Contains(out, "
") {
+			return fmt.Errorf("no file list returned")
+		}
+	}
+	return nil
+
+}
+
+// PostCheck TODO
+func (d *DFHttpComp) PostCheck() error {
+	return nil
+}
+
+// Start TODO
+func (d *DFHttpComp) Start() error {
+	if d.progress == nil {
+		d.progress = &progress{
+			Success: []string{},
+			Failed:  []string{},
+			Todo:    []string{},
+			Doing:   []string{},
+		}
+	}
+
+	fileList := d.Params.FileList
+	p := d.Params
+	for _, f := range fileList {
+		if cmutil.HasElem(f, d.progress.Success) {
+			continue
+		}
+		/*
+			shellDownload := fmt.Sprintf("%s '%s%s' -o '%s/%s'",
+				p.curlCmd, p.Server, f, p.PathTgt, f)
+			logger.Info("download command: %s", shellDownload)
+			out, err := osutil.ExecShellCommand(false, shellDownload)
+			// 拼接的 curl 命令,可能被攻击。比如 bash -c "curl --limit-rate 20m -s -u \"xx:yy\" http://server1:8082/datadbbak8082/ls  -o /data1/dbbak/ls ;cd .. ; ls"
+		*/
+
+		err := httpclient.Download(p.Server, p.PathTgt, f, p.AuthUser, p.AuthPass, p.BWLimitMB)
+		if err != nil {
+			logger.Error("download %s got error %s", f, err.Error())
+			d.progress.Failed = append(d.progress.Failed, f)
+			return err
+		}
+		/*
+			else if strings.TrimSpace(out) != "" {
+				d.progress.Failed = append(d.progress.Failed, f)
+				return fmt.Errorf("download %s expect stdout is empty, got %s", f, out)
+			}
+		*/
+		d.progress.Success = append(d.progress.Success, f)
+	}
+	return nil
+}
+
+// WaitDone TODO
+func (d *DFHttpComp) WaitDone() error {
+	totalList := d.Params.FileList
+	for true {
+		if len(d.progress.Success)+len(d.progress.Failed) < len(totalList) && len(totalList) > 0 {
+			time.Sleep(5 * time.Second)
+		} else {
+			break
+		}
+	}
+	logger.Info("files download %+v", d.progress)
+	if len(d.progress.Failed) > 0 {
+		return fmt.Errorf("files download failed %d", len(d.progress.Failed))
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ibs_recover_wild_comp.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ibs_recover_wild_comp.go
new file mode 100644
index 0000000000..b17423bce4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ibs_recover_wild_comp.go
@@ -0,0 +1,130 @@
+package backup_download
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"path/filepath"
+	"regexp"
+	"strings"
+
+	"github.com/jinzhu/copier"
+	"github.com/pkg/errors"
+)
+
+func (c *IBSRecoverComp) skipFilesAndInit() error {
+	taskFilesExist := []string{}
+	for i, tf := range c.Params.TaskFiles {
+		if tf.TaskId != "" {
+			if tf.FileName != "" && tf.Size != "" {
+				f := filepath.Join(c.Params.Directory, tf.FileName)
+				if skip, err := c.skipFileExists(f, tf.Size); err != nil {
+					return err
+				} else if skip {
+					taskFilesExist = append(taskFilesExist, tf.FileName)
+					continue
+				}
+			}
+			c.Params.taskIdSlice = append(c.Params.taskIdSlice, tf.TaskId)
+		} else if tf.FileName != "" {
+			// 如果只提供的 file_name,必然要去查备份系统
+			// 如果提供了 file_name 和 size,当本地 size 和提供的 size 不相同时,才需要查询备份系统。
+			// 查完备份系统,才知道是否 skip,所以这里 SkipFileExists 有 2 次
+			f := filepath.Join(c.Params.Directory, tf.FileName)
+			if skip, err := c.skipFileExists(f, tf.Size); err != nil {
+				return err
+			} else if skip {
+				taskFilesExist = append(taskFilesExist, tf.FileName)
+				continue
+			} // 文件名本地不存在,或者存在但大小不一致 已删除,根据文件名去获取 task_id
+
+			// query task_id with file_name
+			ibsQuery := &IBSQueryParam{
+				IBSInfo: c.Params.IBSInfo,
+				client:  c.Params.client,
+			}
+			copier.Copy(&ibsQuery.IBSQueryReq, &c.Params.IBSQuery)
+			ibsQuery.IBSQueryReq.FileName = tf.FileName
+			logger.Info("request IBS to get task_id,file size, params: %+v", ibsQuery)
+			queryResp, err := ibsQuery.BsQuery(ibsQuery.IBSQueryReq)
+			if err != nil {
+				return err
+			} else if queryResp.Num != 1 {
+				return errors.Errorf(
+					"expect 1 task_id but got %d %+v. requst params:%+v",
+					queryResp.Num, queryResp, ibsQuery.IBSQueryReq,
+				)
+			}
+			task := queryResp.Detail[0]
+			if task.FileName != tf.FileName {
+				return errors.Errorf("file_name %s is not the same as remote %s", tf.FileName, task.FileName)
+			} else if skip, err := c.skipFileExists(f, task.Size); err != nil {
+				return err
+			} else if skip {
+				taskFilesExist = append(taskFilesExist, tf.FileName)
+				copier.Copy(&tf, &task)
+				c.Params.TaskFiles[i] = tf // 也要更新下需要下载的对象,后面 PostCheck 会用到(size)
+				continue
+			}
+			copier.Copy(&tf, &task)
+			c.Params.TaskFiles[i] = tf
+			if task.Status != BACKUP_TASK_SUCC {
+				// todo 这里最好给个选项,部分文件异常,是否继续下载其它文件
+				return errors.Errorf("file abnormal status=%s %s", task.Status, task.FileName)
+			}
+			c.Params.taskIdSlice = append(c.Params.taskIdSlice, task.TaskId)
+		} else {
+			return errors.New("task_id and file_name cannot be empty both")
+		}
+	}
+	logger.Info("files already exists and skip download: %+v", taskFilesExist)
+	if c.Params.IBSRecoverReq.TaskidList != "" {
+		taskIdList := util.SplitAnyRuneTrim(c.Params.IBSRecoverReq.TaskidList, ",")
+		c.Params.taskIdSlice = append(c.Params.taskIdSlice, taskIdList...)
+		c.Params.taskIdSlice = util.UniqueStrings(c.Params.taskIdSlice)
+	}
+	c.Params.IBSRecoverReq.TaskidList = strings.Join(c.Params.taskIdSlice, ",")
+	logger.Info("params for recover: %+v", c.Params)
+	return nil
+}
+
+// skipFilesAndInitWild 会把 task_files_wild 里找到的文件,初始化到 task_files
+// 外面应该再次调用 skipFilesAndInit 来判断需要下载的文件
+func (c *IBSRecoverComp) skipFilesAndInitWild() error {
+	wild := c.Params.TaskFilesWild
+	ibsQuery := &IBSQueryParam{
+		IBSInfo: c.Params.IBSInfo,
+		client:  c.Params.client,
+	}
+	copier.Copy(&ibsQuery.IBSQueryReq, &c.Params.IBSQuery)
+	ibsQuery.IBSQueryReq.FileName = wild.NameSearch
+	logger.Info("request IBS to get task_id,file size, params: %+v", ibsQuery)
+	queryResp, err := ibsQuery.BsQuery(ibsQuery.IBSQueryReq)
+	if err != nil {
+		return err
+	}
+	/*
+		dbport := 3306
+		// binlogXX.xxx  binlog.xxx binlog文件名里如果port=3306 是不带端口
+		regBinlog := regexp.MustCompile(fmt.Sprintf(`^.+%d\.\d+(\..*)*$`, dbport))
+		// (app)_(host)_(ip)_(port)_(date)_(time).XXX (app)_(host)_(ip)_(port)_(date)_(time)_xtra.XXX  全备文件里,一定会带port
+		regFullFile := regexp.MustCompile(fmt.Sprintf(`.+_.+_.+_%d_\d+_\d+.+`, dbport))
+		regFullNonData := regexp.MustCompile(`.*(\.priv|\.info)`)
+	*/
+	reg := regexp.MustCompile(wild.NameRegex)
+	for _, task := range queryResp.Detail {
+		if wild.NameRegex != "" {
+			if !reg.MatchString(task.FileName) {
+				logger.Info("file %s not match file_name_regex", task.FileName)
+				continue
+			}
+			if wild.FileTag != "" && wild.FileTag != task.FileTag {
+				logger.Info("file %s %s not match file_tag %s", task.FileName, task.FileTag, wild.FileTag)
+				continue
+			}
+			taskFile := TaskFilesExact{}
+			copier.Copy(&taskFile, &task)
+			c.Params.TaskFiles = append(c.Params.TaskFiles, taskFile)
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ieg_backupsys.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ieg_backupsys.go
new file mode 100644
index 0000000000..744c235e5e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/ieg_backupsys.go
@@ -0,0 +1,330 @@
+package backup_download
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/common/go-pubpkg/validate"
+	"encoding/json"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// IBS = IegBackupSystem
+
+const (
+	// BACKUP_TASK_SUCC TODO
+	BACKUP_TASK_SUCC string = "4"
+	// FILE_EXPIRED TODO
+	FILE_EXPIRED string = "1"
+	// FILE_NOT_EXPIRED TODO
+	FILE_NOT_EXPIRED string = "0"
+	// MYSQL_FULL_BACKUP TODO
+	MYSQL_FULL_BACKUP string = "MYSQL_FULL_BACKUP"
+	// INCREMENT_BACKUP TODO
+	INCREMENT_BACKUP string = "INCREMENT_BACKUP"
+	// MYSQL_PRIV_FILE TODO
+	MYSQL_PRIV_FILE string = "MYSQL_PRIV_FILE"
+	// MYSQL_INFO_FILE TODO
+	MYSQL_INFO_FILE string = "MYSQL_INFO_FILE"
+)
+
+// IBSParams 备份系统请求参数的格式
+type IBSParams struct {
+	Params struct {
+		Version     string `json:"version"`
+		RequestInfo struct {
+			BaseInfo   *IBSBaseInfo `json:"base_info"`
+			DetailInfo interface{}  `json:"detail_info"`
+		} `json:"request_info"`
+	} `json:"params"`
+}
+
+// NewIBSParams TODO
+func NewIBSParams(info *IBSBaseInfo) *IBSParams {
+	params := &IBSParams{}
+	params.Params.Version = "1.0"
+	params.Params.RequestInfo.BaseInfo = info
+	return params
+}
+
+// IBSBaseInfo godoc
+type IBSBaseInfo struct {
+	// ieg 备份系统 api url 地址,会在后面拼接 /query /recover 后缀进行请求
+	// 可从环境变量获取 IBS_INFO_url
+	Url string `json:"url" validate:"required" example:"http://127.0.0.1/backupApi" env:"IBS_INFO_url" envDefault:"http://127.0.0.1/backupApi"`
+	// application标识,亦即哪个系统需要访问本接口,可从环境变量获取 IBS_INFO_sys_id
+	SysID string `json:"sys_id" validate:"required" env:"IBS_INFO_sys_id"`
+	// 16位字串,由备份系统分配,可从环境变量获取 IBS_INFO__key
+	Key string `json:"key" validate:"required" env:"IBS_INFO_key,unset"`
+	// OA验证的ticket,一个长串,通常附加在访问内网应用的URL上,主要用来验证用户身份,可以留空
+	Ticket string `json:"ticket"`
+}
+
+// IBSQueryResult 查询备份文件的结果记录详情
+type IBSQueryResult struct {
+	// 任务ID,用于下载
+	TaskId string `json:"task_id"`
+	// 备份任务上报时间
+	Uptime string `json:"uptime"`
+	// 文件最后修改时间
+	FileLastMtime string `json:"file_last_mtime"`
+	// 上报该备份任务的IP
+	SourceIp   string `json:"source_ip"`
+	CreateTime string `json:"createTime"`  // 非备份系统字段,全备(截取文件名中的字段),binlog 打开文件读取
+	SourcePort string `json:"source_port"` // 非备份系统字段
+	Path       string `json:"path"`        // 非备份系统字段
+	Md5        string `json:"md5"`
+
+	FileName string `json:"file_name"`
+	// 文件大小
+	Size    string `json:"size"`
+	FileTag string `json:"file_tag"`
+	// 文件状态
+	Status string `json:"status"`
+	// 备份状态信息, 'done, success', 'Fail: bad md5' 等
+	Bkstif     string `json:"bkstif"`
+	ExpireTime string `json:"expire_time"`
+	Expired    string `json:"expired"`
+}
+
+// BackupFileIsOk TODO
+func (c IBSQueryResult) BackupFileIsOk() bool {
+	return c.Status == BACKUP_TASK_SUCC && c.Expired == FILE_NOT_EXPIRED
+}
+
+// IBSQueryReq 查询备份文件的请求参数
+type IBSQueryReq struct {
+	// 来源IP,即提交备份任务的机器IP
+	SourceIp string `json:"source_ip" validate:"required"`
+	// 哪一天提交,起始时间
+	BeginDate string `json:"begin_date" validate:"required"`
+	// 哪一天提交,结束时间,与begin_date形成一个时间范围,建议begin_date与end_date形成的时间范围不要超过3天
+	EndDate string `json:"end_date" validate:"required"`
+	// 文件名
+	FileName string `json:"filename" validate:"required"`
+}
+
+// IBSQueryResp 查询备份文件的响应内容
+type IBSQueryResp struct {
+	Code   string           `json:"code"`
+	Msg    string           `json:"msg"`
+	Detail []IBSQueryResult `json:"detail"`
+	Num    int              `json:"num"`
+}
+
+// IBSQueryRespAbnormal 备份系统 api: 有结果时 detail 是一个 struct,无结果时 detail=""
+// IBSQueryResp 的补充
+type IBSQueryRespAbnormal struct {
+	Code string `json:"code"`
+	Msg  string `json:"msg"`
+
+	Detail string `json:"detail"`
+}
+
+// IBSRecoverReq 请求下载文件的参数
+type IBSRecoverReq struct {
+	// taskid 列表,,逗号分隔。会根据 task_files 里的信息,追加到这里。这里一般不传值,在 task_files 里提供 task_id 或者 file_name
+	TaskidList string `json:"taskid_list,omitempty" example:"10000,100001"`
+	// 目标IP,文件恢复到哪一台机器上的
+	DestIp string `json:"dest_ip" validate:"required,ip" example:"1.1.1.1"`
+	// 登录 dest_ip 的用户名,下载后的文件属组是该用户
+	LoginUser string `json:"login_user" validate:"required"`
+	// 登录 dest_ip 的用户名的密码, ieg 传统scp 方式下载才需要。如果是 cos 下载则不需要
+	LoginPasswd string `json:"login_passwd,omitempty"`
+	Directory   string `json:"diretory" validate:"required" example:"/data/dbbak"` // diretory 是备份系统参数错误拼写
+	// 恢复原因(备注用途)
+	Reason string `json:"reason"`
+}
+
+// IBSRecoverResp TODO
+type IBSRecoverResp struct {
+	Code      string `json:"code"`
+	Msg       string `json:"msg"`
+	RecoverId string `json:"recoverid"`
+}
+
+// IBSRecoverQueryReq 查询恢复下载任务状态的请求参数
+type IBSRecoverQueryReq struct {
+	RecoverId string `json:"recoverid"`
+	DestIp    string `json:"dest_ip"`
+}
+
+// IBSRecoverQueryResp 恢复下载任务的响应参数
+type IBSRecoverQueryResp struct {
+	Code string `json:"code"`
+	Msg  string `json:"msg"`
+
+	Todo    int `json:"todo"`
+	Doing   int `json:"doing"`
+	Success int `json:"success"`
+	Fail    int `json:"fail"`
+	Detail  []struct {
+		TaskId   string `json:"task_id"`
+		FileName string `json:"filename"`
+		Status   string `json:"status"`
+		DestPath string `json:"dest_path"`
+		StatusEn string `json:"status_en"`
+	} `json:"detail"`
+}
+
+// BsQuery 搜索备份系统中的文件
+func (r *IBSQueryParam) BsQuery(param IBSQueryReq) (*IBSQueryResp, error) {
+	if err := validate.GoValidateStruct(param, false, false); err != nil {
+		return nil, err
+	}
+	url := fmt.Sprintf("%s%s", r.client.Url, "/query")
+	params := NewIBSParams(&r.IBSInfo)
+	params.Params.RequestInfo.DetailInfo = param
+	logger.Info("request BsQuery %+v", r.IBSQueryReq)
+	result, err := r.client.PostJson(url, params, nil)
+	if err != nil {
+		return nil, errors.WithMessage(err, "查询备份系统")
+	}
+	resp := IBSQueryResp{}
+	if err := json.Unmarshal(result, &resp); err != nil {
+		respAbn := IBSQueryRespAbnormal{}
+		if err := json.Unmarshal(result, &respAbn); err != nil {
+			return nil, err
+		} else {
+			resp.Code = respAbn.Code
+			resp.Msg = respAbn.Msg
+		}
+	}
+	if resp.Code != "0" {
+		return nil, errors.WithMessage(errors.New(resp.Msg), "查询备份系统返回code!=0")
+	}
+	if resp.Detail != nil {
+		resp.Num = len(resp.Detail)
+	}
+	logger.Info("response BsQuery %+v", resp)
+	return &resp, nil
+}
+
+// ErrorMessage TODO
+func (r IBSRecoverQueryResp) ErrorMessage() string {
+	if r.Fail == 0 {
+		return ""
+	}
+	var messages []string
+	for _, item := range r.Detail {
+		messages = append(messages, fmt.Sprintf("file:%s, error:%s", item.FileName, item.StatusEn))
+	}
+	return strings.Join(messages, "\n")
+}
+
+// NewRecoverTask 根据 backup task_id 异步下载文件
+func (r *IBSRecoverParam) NewRecoverTask(param IBSRecoverReq) (string, error) {
+	url := fmt.Sprintf("%s%s", r.client.Url, "/recover")
+	params := NewIBSParams(&r.IBSInfo)
+	params.Params.RequestInfo.DetailInfo = param
+	logger.Info("request NewRecoverTask %+v", r.IBSRecoverReq)
+	var recoverId string = "0"
+	result, err := r.client.PostJson(url, params, nil)
+	if err != nil {
+		return recoverId, errors.WithMessage(err, "请求备份系统下载")
+	}
+	resp := IBSRecoverResp{}
+	if err := json.Unmarshal(result, &resp); err != nil {
+		return recoverId, err
+	}
+	logger.Info("response NewRecoverTask %+v", resp)
+	if resp.Code != "0" {
+		return recoverId, errors.New(resp.Msg)
+	}
+	recoverId = resp.RecoverId
+	return recoverId, nil
+}
+
+// GetRecoverTaskStatus 查询下载任务的状态
+func (r *IBSRecoverParam) GetRecoverTaskStatus(param IBSRecoverQueryReq) (*IBSRecoverQueryResp, error) {
+	url := fmt.Sprintf("%s%s", r.client.Url, "/get_recover_result")
+	params := NewIBSParams(&r.IBSInfo)
+	params.Params.RequestInfo.DetailInfo = param
+	result, err := r.client.PostJson(url, params, nil)
+	if err != nil {
+		return nil, err
+	}
+	resp := IBSRecoverQueryResp{}
+	if err := json.Unmarshal(result, &resp); err != nil {
+		return nil, err
+	}
+	if resp.Code != "0" {
+		return nil, errors.New(resp.Msg)
+	}
+	return &resp, nil
+}
+
+// RecoverAndWaitDone 根据 backup task_id 异步下载文件
+func (r *IBSRecoverParam) RecoverAndWaitDone(param IBSRecoverReq) error {
+	total := len(r.taskIdSlice)
+	if total == 0 {
+		return nil
+	}
+	// 请求下载,异步
+	recoverId, err := r.NewRecoverTask(param)
+	if err != nil {
+		return err
+	}
+	logger.Info("recoverId:%s", recoverId)
+
+	queryParam := IBSRecoverQueryReq{
+		RecoverId: recoverId,
+		DestIp:    param.DestIp,
+	}
+	var times = r.maxQueryRetryTimes
+	var failTimes = r.maxFailCheckNum
+	process := []int{30, 60, 120, 180, 300}
+	lastFeedBackTime := time.Now()
+	var lastIndex = 0
+	indexLen := len(process)
+	// 循环请求下载结果
+	for {
+		res, err := r.GetRecoverTaskStatus(queryParam)
+		if err != nil {
+			if times > 0 {
+				times--
+				time.Sleep(10 * time.Second)
+				continue
+			}
+			logger.Error(
+				"GetRecoverTaskStatus fail (recoverId:%s,retryTimes:%d,error:%w,param:%v)",
+				recoverId, times, err, queryParam,
+			)
+			return err
+		}
+
+		if res.Success == total {
+			logger.Info("[%d/%d]success,recoverId:%s", res.Success, total, recoverId)
+			break
+		}
+		if res.Fail > 0 {
+			if failTimes > 0 {
+				failTimes--
+				time.Sleep(60 * time.Second)
+				continue
+			}
+			err := errors.Errorf(
+				"[%d/%d]failed pull %d files, recoverId:%s, detail:%s",
+				res.Success, total, res.Fail, recoverId, res.ErrorMessage(),
+			)
+			logger.Error(err.Error())
+			return err
+		}
+
+		if int(time.Now().Sub(lastFeedBackTime).Seconds()) > process[lastIndex] {
+			logger.Info(
+				"[%d/%d]todo:%d,doing:%d,success:%d,fail:%d,recoverId:%s",
+				res.Success, total, res.Todo, res.Doing, res.Success, res.Fail, recoverId,
+			)
+			lastIndex++
+			lastFeedBackTime = time.Now()
+			if lastIndex > indexLen-1 {
+				lastIndex = 0
+			}
+		}
+		time.Sleep(10 * time.Second)
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/scp_comp.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/scp_comp.go
new file mode 100644
index 0000000000..c0187f4126
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download/scp_comp.go
@@ -0,0 +1,193 @@
+package backup_download
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp"
+	"fmt"
+	"log"
+	"time"
+)
+
+// DFScpComp 允许在目标机器上
+type DFScpComp struct {
+	Params DFScpParam `json:"extend"`
+
+	scpConfig sftp.Config
+	scpClient sftp.Client
+	progress  *progress
+}
+
+type progress struct {
+	Success []string
+	Failed  []string
+	Todo    []string
+	Doing   []string
+}
+
+// DFScpParam TODO
+type DFScpParam struct {
+	DFBase
+	// 下载源
+	FileSrc FileSrc `json:"file_src" validate:"required"`
+	// 下载目标
+	FileTgt FileTgt `json:"file_tgt" validate:"required"`
+}
+
+// Example TODO
+func (d *DFScpComp) Example() interface{} {
+	comp := DFScpComp{
+		Params: DFScpParam{
+			DFBase: DFBase{
+				BWLimitMB:   30,
+				Concurrency: 1,
+			},
+			FileSrc: FileSrc{
+				Path:     "/data/dbbak",
+				FileList: []string{"xx.info", "xx"},
+				SSHConfig: SSHConfig{
+					SshHost: "source_host",
+					SshPort: "22",
+					SshUser: "mysql",
+					SshPass: "xx",
+				},
+			},
+			FileTgt: FileTgt{
+				Path: "/data/dbbak",
+			},
+		},
+	}
+	return comp
+}
+
+// Init TODO
+func (d *DFScpComp) Init() error {
+	src := d.Params.FileSrc.SSHConfig
+	scpConfig := sftp.Config{
+		Username: src.SshUser,
+		Password: src.SshPass,
+		Server:   fmt.Sprintf("%s:%s", src.SshHost, src.SshPort),
+		Timeout:  time.Second * 10,
+	}
+	if scpClient, err := sftp.New(scpConfig); err != nil {
+		return err
+	} else {
+		scpClient.Close()
+		// d.sshClient = sshClient
+	}
+	d.scpConfig = scpConfig
+
+	if d.Params.BWLimitMB == 0 {
+		d.Params.BWLimitMB = 20 // 20 MB/s by default
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (d *DFScpComp) PreCheck() error {
+	// 创建本地目录
+	return nil
+}
+
+// PostCheck TODO
+func (d *DFScpComp) PostCheck() error {
+	// 文件数、文件md5、文件连续性校验
+	return nil
+}
+
+// Start TODO
+func (d *DFScpComp) Start() error {
+	if d.progress == nil {
+		d.progress = &progress{
+			Success: []string{},
+			Failed:  []string{},
+			Todo:    []string{},
+			Doing:   []string{},
+		}
+	}
+
+	fileList := d.Params.FileSrc.FileList
+	p := d.Params
+	for _, f := range fileList {
+		if cmutil.HasElem(f, d.progress.Success) {
+			continue
+		}
+		err := sftp.Download(d.scpConfig, p.FileSrc.Path, p.FileTgt.Path, f, p.BWLimitMB) // @todo 下载超时2h
+		if err != nil {
+			log.Println(err)
+			d.progress.Failed = append(d.progress.Failed, f)
+			return err
+		}
+		d.progress.Success = append(d.progress.Success, f)
+	}
+
+	return nil
+}
+
+// Pause TODO
+func (d *DFScpComp) Pause() error {
+	return nil
+}
+
+// Stop TODO
+func (d *DFScpComp) Stop() error {
+	return nil
+}
+
+// Resume TODO
+func (d *DFScpComp) Resume() error {
+	return d.Start()
+}
+
+// Rollback TODO
+func (d *DFScpComp) Rollback() error {
+	return nil
+}
+
+// GetStatus TODO
+func (d *DFScpComp) GetStatus() error {
+	return nil
+}
+
+// WaitDone TODO
+func (d *DFScpComp) WaitDone() error {
+	totalList := d.Params.FileSrc.FileList
+	for true {
+		if len(d.progress.Success)+len(d.progress.Failed) < len(totalList) && len(totalList) > 0 {
+			time.Sleep(5 * time.Second)
+		} else {
+			break
+		}
+	}
+	logger.Info("files download %+v", d.progress)
+
+	if len(d.progress.Failed) > 0 {
+		return fmt.Errorf("files download failed %d", len(d.progress.Failed))
+	}
+	return nil
+}
+
+// SSHConfig ssh信息
+type SSHConfig struct {
+	SshHost string `json:"ssh_host" validate:"required"`
+	SshPort string `json:"ssh_port" validate:"required"`
+	SshUser string `json:"ssh_user" validate:"required"`
+	SshPass string `json:"ssh_pass"`
+}
+
+// FileSrc TODO
+type FileSrc struct {
+	// scp 源机器地址
+	SSHConfig
+	// 源文件所在目录
+	Path  string `json:"path" validate:"required"`
+	Match string `json:"match"`
+	// 源文件名列表,相对上面的 path
+	FileList []string `json:"file_list" validate:"required"`
+}
+
+// FileTgt TODO
+type FileTgt struct {
+	// 文件下载目标目录
+	Path string `json:"path" validate:"required"`
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/base.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/base.go
new file mode 100644
index 0000000000..8bae6b4dcf
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/base.go
@@ -0,0 +1,39 @@
+package components
+
+// BaseInputParam TODO
+type BaseInputParam struct {
+	GeneralParam *GeneralParam `json:"general"`
+	ExtendParam  interface{}   `json:"extend"`
+}
+
+// GeneralParam TODO
+type GeneralParam struct {
+	RuntimeAccountParam RuntimeAccountParam `json:"runtime_account"`
+	RuntimeExtend       RuntimeExtend       `json:"runtime_extend"`
+}
+
+// RuntimeExtend TODO
+type RuntimeExtend struct {
+	MySQLSysUsers []string `json:"mysql_sys_users"`
+}
+
+// RuntimeAccountParam TODO
+type RuntimeAccountParam struct {
+	MySQLAccountParam
+	ProxyAccountParam
+	TdbctlAccoutParam
+}
+
+// GetAccountRepl TODO
+func GetAccountRepl(g *GeneralParam) MySQLReplAccount {
+	Repl := MySQLReplAccount{}
+	if g == nil {
+		return Repl
+	} else if &g.RuntimeAccountParam == nil {
+		return Repl
+	} else if &g.RuntimeAccountParam.MySQLAccountParam == nil {
+		return Repl
+	} else {
+		return g.RuntimeAccountParam.MySQLAccountParam.MySQLReplAccount
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/components.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/components.go
new file mode 100644
index 0000000000..7fb03b9cf0
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/components.go
@@ -0,0 +1,2 @@
+// Package components TODO
+package components
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/computil.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/computil.go
new file mode 100644
index 0000000000..527a1153d2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/computil.go
@@ -0,0 +1,2 @@
+// Package computil TODO
+package computil
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate.go
new file mode 100644
index 0000000000..58733cb157
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate.go
@@ -0,0 +1,324 @@
+package computil
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"regexp"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// StartMySQLParam 实例启动参数
+type StartMySQLParam struct {
+	MediaDir        string // /usr/local/mysql/bin MySQL安装目录
+	MyCnfName       string // 必须参数,需要指定启动配置文件
+	MySQLUser       string // use for check mysqld start successuly
+	MySQLPwd        string // use for check mysqld start successuly
+	Socket          string // 如果socket参数存在,优先使用socket去连接mysql探测
+	Host            string // 如果socket参数不存在,则使用ip,port 方式去连接探测
+	Port            int    // 如果socket参数不存在,则使用ip,port 方式去连接探测
+	SkipSlaveFlag   bool   // --skip-slave-start
+	SkipGrantTables bool   // --skip-grant-tables
+}
+
+// RestartMysqlInstanceNormal TODO
+func RestartMysqlInstanceNormal(inst native.InsObject) error {
+	mycnf := util.GetMyCnfFileName(inst.Port)
+	startParam := StartMySQLParam{
+		Host:      inst.Host,
+		Port:      inst.Port,
+		Socket:    inst.Socket,
+		MySQLUser: inst.User,
+		MySQLPwd:  inst.Pwd,
+
+		MyCnfName: mycnf,
+		MediaDir:  cst.MysqldInstallPath,
+	}
+	if _, err := startParam.RestartMysqlInstance(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// IsInstanceRunning TODO
+func IsInstanceRunning(inst native.InsObject) bool {
+	mycnf := util.GetMyCnfFileName(inst.Port)
+	startParam := StartMySQLParam{
+		Host:      inst.Host,
+		Port:      inst.Port,
+		Socket:    inst.Socket,
+		MySQLUser: inst.User,
+		MySQLPwd:  inst.Pwd,
+
+		MyCnfName: mycnf,
+		MediaDir:  cst.MysqldInstallPath,
+	}
+	if err := startParam.CheckMysqlProcess(); err != nil {
+		return false
+	}
+	return true
+}
+
+// RestartMysqlInstance TODO
+func (p *StartMySQLParam) RestartMysqlInstance() (pid int, err error) {
+	if err := ShutdownMySQLBySocket2(p.MySQLUser, p.MySQLPwd, p.Socket); err != nil {
+		return 0, err
+	}
+	return p.StartMysqlInstance()
+}
+
+// StartMysqlInstance 	启动mysqld
+// 如果 MySQLUser 为空则不检查连接性
+func (p *StartMySQLParam) StartMysqlInstance() (pid int, err error) {
+	var (
+		mediaDir  = p.MediaDir
+		numaStr   = osutil.GetNumaStr()
+		myCnfName = p.MyCnfName
+		startCmd  = fmt.Sprintf(
+			`ulimit -n 204800; 
+		cd %s && %s ./bin/mysqld_safe --defaults-file=%s --user=mysql `, mediaDir, numaStr, myCnfName,
+		)
+	)
+	if p.SkipSlaveFlag {
+		startCmd += "--skip-slave-start "
+	}
+	if p.SkipGrantTables {
+		startCmd += " --skip-grant-tables "
+	}
+	startCmd += " &"
+	logger.Info(fmt.Sprintf("execute mysqld_safe: [%s]", startCmd))
+	pid, err = osutil.RunInBG(false, startCmd)
+	if err != nil {
+		return
+	}
+	return pid, util.Retry(
+		util.RetryConfig{
+			Times:     40,
+			DelayTime: 5 * time.Second,
+		}, func() error { return p.CheckMysqlProcess() },
+	)
+}
+
+// CheckMysqlProcess TODO
+func (p *StartMySQLParam) CheckMysqlProcess() (err error) {
+	if p.MyCnfName == "" {
+		p.MyCnfName = util.GetMyCnfFileName(p.Port)
+	}
+	if p.MediaDir == "" {
+		p.MediaDir = cst.MysqldInstallPath
+	}
+	checkMysqldCmd := fmt.Sprintf("ps -efwww | grep %s|grep 'mysqld_safe '| grep -v grep", p.MyCnfName)
+	out, err := osutil.ExecShellCommand(false, checkMysqldCmd)
+	if err != nil {
+		// 如果是 shell 错误,不必等待 retry? 暂时保持 retry
+		errStr := fmt.Sprintf("exec shell error %s", checkMysqldCmd)
+		err = errors.WithMessage(err, errStr)
+		logger.Error(err.Error())
+		return
+	}
+	regStr := fmt.Sprintf("mysqld_safe\\s+--defaults-file=%s", p.MyCnfName)
+	if !regexp.MustCompile(regStr).MatchString(out) {
+		logger.Info("regStr[%s] not match result[%s] ", regStr, out)
+		return fmt.Errorf("ps grep 不到相关进程,可能没有完全启动请稍等")
+	}
+
+	if p.MySQLUser == "" {
+		// 不检查连接性
+		return nil
+	}
+	addr := fmt.Sprintf("%s:%d", p.Host, p.Port)
+	dsn := native.DsnByTcp(addr, p.MySQLUser, p.MySQLPwd)
+	if p.Socket != "" {
+		dsn = native.DsnBySocket(p.Socket, p.MySQLUser, p.MySQLPwd)
+	}
+	// 没有 error 可以认为连接成功
+	if _, err = native.NewDbWorker(dsn); err != nil {
+		return
+	}
+	logger.Info("connect %s successfully", addr)
+	return err
+}
+
+// ShutdownMySQLParam TODO
+type ShutdownMySQLParam struct {
+	MySQLUser string
+	MySQLPwd  string
+	Socket    string
+}
+
+// ShutdownMySQLBySocket 通过 socket 连接关闭 MySQL,这样的关闭更加可靠。
+//  1. 可能还需要考虑 shutdown 超时问题。
+//  2. 可能需要通过 expect 方式,避免暴露密码。
+func (param ShutdownMySQLParam) ShutdownMySQLBySocket() (err error) {
+	shellCMD := fmt.Sprintf("mysqladmin -u%s -p%s -S %s shutdown", param.MySQLUser, param.MySQLPwd, param.Socket)
+	output, err := osutil.ExecShellCommand(false, shellCMD)
+	if err != nil {
+		if !strings.Contains(err.Error(), "Can't connect to local MySQL server") {
+			logger.Info("shutdown mysql error %s,output:%s. cmd:%s", err.Error(), output, shellCMD)
+			return err
+		} else {
+			logger.Warn("mysqld %s is not running: %s", param.Socket, err.Error())
+		}
+	}
+	return JudgeMysqldShutDown(param.Socket)
+}
+
+// ShutdownMySQLBySocket2 通过 socket 连接关闭 MySQL,这样的关闭更加可靠。
+//  1. 可能还需要考虑 shutdown 超时问题。
+//  2. 可能需要通过 expect 方式,避免暴露密码。
+func ShutdownMySQLBySocket2(user, password, socket string) (err error) {
+	param := &ShutdownMySQLParam{MySQLUser: user, MySQLPwd: password, Socket: socket}
+	return param.ShutdownMySQLBySocket()
+}
+
+// ForceShutDownMySQL 强制关闭mysqld
+//
+//	@receiver param
+//	@return err
+func (param ShutdownMySQLParam) ForceShutDownMySQL() (err error) {
+	shellCMD := fmt.Sprintf("mysqladmin -u%s -p%s -S%s shutdown", param.MySQLUser, param.MySQLPwd, param.Socket)
+	output, err := osutil.ExecShellCommand(false, shellCMD)
+	if err != nil {
+		logger.Warn("使用mysqladmin shutdown 失败:%s output:%s", err.Error(), string(output))
+		// 如果用 shutdown 执行失败
+		// 尝试用 kill -2 去停止mysql
+		if err = KillMySQLD(fmt.Sprintf("socket=%s", param.Socket)); err != nil {
+			return err
+		}
+	}
+	return JudgeMysqldShutDown(param.Socket)
+}
+
+// JudgeMysqldShutDown  err == nil 表示 ps aux 没有发现 mysqld
+func JudgeMysqldShutDown(prefix string) (err error) {
+	logger.Info("start checking mysqld process .... grep prefix is %s", prefix)
+	// 120秒超时
+	ot := time.NewTimer(time.Duration(time.Second * 120))
+	defer ot.Stop()
+	tk := time.NewTicker(2 * time.Second)
+	for {
+		select {
+		case <-ot.C:
+			return errors.New("停止MySQL超时")
+		case <-tk.C:
+			// 不能直接grep mysqld 因为存在 mysqldata
+			shellCMD := fmt.Sprintf("ps -efwww | grep %s|grep -E 'mysqld |mysqld_safe'| grep -v grep|wc -l", prefix)
+			out, err := osutil.ExecShellCommand(false, shellCMD)
+			if err != nil {
+				logger.Info("execute %s get an error:%s", shellCMD, err.Error())
+				return err
+			}
+			logger.Info("shell output information is %s", out)
+			if strings.TrimSpace(out) == "0" {
+				logger.Info("mysql has been exited,success~ ,process count is %s", out)
+				return nil
+			}
+			logger.Warn("mysqld 进程还在,等待进程关闭...")
+		}
+	}
+}
+
+// KillReMindMySQLClient  kill  命令行残留的mysql client 连接
+//
+//	@receiver regexpStr 寻找mysql client 连接的pid
+//	@return error
+func KillReMindMySQLClient(regexpStr string) error {
+	var err error
+	logger.Info("start kill -9 mysql")
+	if strings.TrimSpace(regexpStr) == "" {
+		return errors.New("grep 参数为空,不允许!!!")
+	}
+	killComand := fmt.Sprintf(
+		"ps -efwww|grep ' %s '|egrep -v mysqld|grep mysql|egrep -v grep |awk '{print $2}'|xargs  kill -9", regexpStr,
+	)
+	logger.Info(" kill command is %s", killComand)
+	_, err = osutil.ExecShellCommand(false, killComand)
+	if err != nil {
+		logger.Error("execute %s get an error:%s", killComand, err.Error())
+	}
+	killSocketComand := fmt.Sprintf(
+		"ps -efwww|grep '%s'|egrep -v 'mysqld |mysqld_safe'|grep mysql|grep mysql.sock|egrep -v grep |awk '{print $2}'|xargs  kill -9",
+		regexpStr,
+	)
+	logger.Info(" kill command is %s", killSocketComand)
+	_, err = osutil.ExecShellCommand(false, killSocketComand)
+	if err != nil {
+		logger.Error("execute %s get an error:%s", killComand, err.Error())
+	}
+	return err
+}
+
+// KillMySQLD kill -15 mysqld
+//
+//	@receiver regexpStr: 根据regexpStr grep 进程IDs
+//	@return error
+func KillMySQLD(regexpStr string) error {
+	logger.Info("start kill -15 mysqld")
+	if strings.TrimSpace(regexpStr) == "" {
+		return errors.New("grep 参数为空,不允许!!!")
+	}
+	shellCMD := fmt.Sprintf("ps -efwww|grep %s|egrep -v grep |wc -l", regexpStr)
+	out, err := osutil.ExecShellCommand(false, shellCMD)
+	if err != nil {
+		logger.Error("execute %s get an error:%s", shellCMD, err.Error())
+		return err
+	}
+	// 此处不应该返回错误
+	if strings.TrimSpace(out) == "0" {
+		logger.Info("process has been exit,You Can Consider Mysqld been shutdown")
+		return nil
+	}
+	logger.Info("will kill this %s", out)
+	killCmd := fmt.Sprintf("ps -efwww|grep %s|egrep -v grep |awk '{print $2}'|xargs  kill -15", regexpStr)
+	logger.Info(" kill command is %s", killCmd)
+	kOutput, err := osutil.ExecShellCommand(false, killCmd)
+	if err != nil {
+		logger.Error("execute %s get an error:%s,output:%s", killCmd, err.Error(), string(kOutput))
+		return err
+	}
+	return nil
+}
+
+// GetMysqlSystemDatabases 获取mysql系统库列表
+// 小于5.0:"mysql"
+// 小于5.5:"information_schema", "mysql"
+// 小于5.7:"information_schema", "mysql", "performance_schema"
+// 大于5.7:"information_schema", "mysql", "performance_schema", "sys"
+func GetMysqlSystemDatabases(version string) []string {
+	DBs := []string{"information_schema", "mysql", "performance_schema"}
+
+	if mysqlutil.MySQLVersionParse(version) > mysqlutil.MySQLVersionParse("5.7.0") {
+		DBs = append(DBs, "sys")
+	} else if mysqlutil.MySQLVersionParse(version) < mysqlutil.MySQLVersionParse("5.0.0") {
+		DBs = []string{"mysql"}
+	} else if mysqlutil.MySQLVersionParse(version) < mysqlutil.MySQLVersionParse("5.5.0") {
+		DBs = []string{"information_schema", "mysql"}
+	}
+	return DBs
+}
+
+// GetGcsSystemDatabases 获取mysql系统库列表,包括GCS监控管理库
+// 小于5.0:"mysql", native.INFODBA_SCHEMA, "test"
+// 小于5.5:"information_schema", "mysql", native.INFODBA_SCHEMA, "test"
+// 小于5.7:"information_schema", "mysql", "performance_schema", native.INFODBA_SCHEMA, "test"
+// 大于5.7:"information_schema", "mysql", "performance_schema", "sys",native.INFODBA_SCHEMA, "test"
+func GetGcsSystemDatabases(version string) []string {
+	DBs := GetMysqlSystemDatabases(version)
+	DBs = append(DBs, native.INFODBA_SCHEMA)
+	DBs = append(DBs, native.TEST_DB)
+	return DBs
+}
+
+// GetGcsSystemDatabasesIgnoreTest TODO
+func GetGcsSystemDatabasesIgnoreTest(version string) []string {
+	DBs := GetMysqlSystemDatabases(version)
+	DBs = append(DBs, native.INFODBA_SCHEMA)
+	return DBs
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate_test.go
new file mode 100644
index 0000000000..f61b07c39f
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/computil/mysql_operate_test.go
@@ -0,0 +1,42 @@
+package computil_test
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/computil"
+	"testing"
+)
+
+func TestShutDownMySQLByNormal(t *testing.T) {
+	p := computil.ShutdownMySQLParam{
+		Socket:    "/data/mysql/3306/mysql.sock",
+		MySQLUser: "make",
+		MySQLPwd:  "make",
+	}
+	err := p.ShutdownMySQLBySocket()
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Log("shutdown succcess")
+}
+
+func TestForceShutDownMySQL(t *testing.T) {
+	p := computil.ShutdownMySQLParam{
+		Socket:    "/data/mysql/3306/mysql.sock",
+		MySQLUser: "make",
+		MySQLPwd:  "xxx",
+	}
+	err := p.ForceShutDownMySQL()
+	if err != nil {
+		t.Fatal(err)
+	}
+	t.Log("shutdown succcess")
+}
+
+func TestKillReMindMySQLClient(t *testing.T) {
+	t.Log("start testing  TestKillReMindMySQLClient")
+	err := computil.KillReMindMySQLClient("3306")
+	if err != nil {
+		t.Fatalf("kill mysql client failed %s", err.Error())
+		return
+	}
+	t.Log("ending...")
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go
new file mode 100644
index 0000000000..bde8d5258c
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/clear_crontab.go
@@ -0,0 +1,58 @@
+package crontab
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	ma "dbm-services/mysql/db-tools/mysql-crond/api"
+	"os"
+)
+
+// ClearCrontabParam 实际不止这样
+type ClearCrontabParam struct {
+}
+
+/*
+	执行系统初始化脚本 原来的sysinit.sh
+	创建mysql账户等操作
+*/
+
+// CleanCrontab  注释掉Crontab
+//
+//	@receiver u
+//	@return err
+func (u *ClearCrontabParam) CleanCrontab() (err error) {
+	logger.Info("开始清理机器上的crontab")
+	if err = osutil.CleanLocalCrontab(); err != nil {
+		return err
+	}
+
+	manager := ma.NewManager("http://127.0.0.1:9999")
+	err = manager.Quit()
+	if err != nil {
+		logger.Error("shutdown mysql-crond failed: %s", err.Error())
+	}
+	logger.Info("shutdown mysql-crond success")
+
+	return nil
+}
+
+// CleanDBToolsFolder 清理相关mysql残留的目录,其中包括
+// checksum目录
+// dbbackup目录
+// rotate_binlog目录
+// mysql_crond目录
+// dbatools目录
+func (u *ClearCrontabParam) CleanDBToolsFolder() (err error) {
+
+	logger.Info("开始删除相关周边组件目录")
+	os.RemoveAll(cst.ChecksumInstallPath)
+	os.RemoveAll(cst.DbbackupGoInstallPath)
+	os.RemoveAll(cst.DBAToolkitPath)
+	os.RemoveAll(cst.MySQLCrondInstallPath)
+	os.RemoveAll(cst.RotateBinlogInstallPath)
+	os.RemoveAll(cst.MySQLMonitorInstallPath)
+	os.RemoveAll(cst.DBAReportBase)
+	return nil
+
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/crontab.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/crontab.go
new file mode 100644
index 0000000000..0061fab3ab
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/crontab/crontab.go
@@ -0,0 +1,2 @@
+// Package crontab TODO
+package crontab
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/db_base_account.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/db_base_account.go
new file mode 100644
index 0000000000..94d9b133e2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/db_base_account.go
@@ -0,0 +1,247 @@
+package components
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"fmt"
+	"strings"
+)
+
+// MySQLAccountParam TODO
+type MySQLAccountParam struct {
+	MySQLAdminAccount
+	MySQLMonitorAccount
+	MySQLMonitorAccessAllAccount
+	MySQLReplAccount
+	MySQLDbBackupAccount
+	MySQLYwAccount
+}
+
+// ProxyAccountParam TODO
+type ProxyAccountParam struct {
+	ProxyAdminUser string `json:"proxy_admin_user,omitempty"` // proxy admin user
+	ProxyAdminPwd  string `json:"proxy_admin_pwd,omitempty"`  // proxy admin pwd
+}
+
+// TdbctlAccoutParam TODO
+type TdbctlAccoutParam struct {
+	TdbctlUser string `json:"tdbctl_user,omitempty"`
+	TdbctlPwd  string `json:"tdbctl_pwd,omitempty"`
+}
+
+var allPriv = []string{"ALL PRIVILEGES"}
+var ywUserPriv = []string{"SELECT", "CREATE", "RELOAD", "PROCESS", "SHOW DATABASES", "REPLICATION CLIENT"}
+var backupUserPriv = []string{"SELECT", "RELOAD", "PROCESS", "SHOW DATABASES", "REPLICATION CLIENT", "SHOW VIEW",
+	"TRIGGER", "EVENT", "SUPER"}
+var backupUserPriv80 = []string{"SELECT", "RELOAD", "PROCESS", "SHOW DATABASES", "REPLICATION CLIENT", "SHOW VIEW",
+	"TRIGGER", "EVENT", "SUPER", "BACKUP_ADMIN"} // SUPER is deprecated
+var replUserPriv = []string{"REPLICATION SLAVE", "REPLICATION CLIENT"}
+var monitorUserPriv = []string{"SELECT", "RELOAD", "PROCESS", "SHOW DATABASES", "SUPER", "REPLICATION CLIENT",
+	"SHOW VIEW", "EVENT", "TRIGGER", "CREATE TABLESPACE"}
+var monitorAccessallPriv = []string{"SELECT,INSERT,DELETE"}
+
+// MySQLAccountPrivs TODO
+type MySQLAccountPrivs struct {
+	User       string
+	PassWd     string
+	AuthString string // 也可以直接授权 加密的密码
+	WithGrant  bool
+	PrivParis  []PrivPari
+	// 系统账户默认对象 *.*
+	AccessObjects []string // %,localhost
+}
+
+// PrivPari TODO
+type PrivPari struct {
+	Object string   // 对那个库授权
+	Privs  []string // SELECT, RELOAD, PROCESS, SHOW DATABASES, SUPER, REPLICATION CLIENT, SHOW VIEW,EVENT,TRIGGER
+}
+
+// useAuthString 使用加密之后的密码
+// 传入的密码是空,但是AuthString 不是空
+func (p *MySQLAccountPrivs) useAuthString() bool {
+	return util.StrIsEmpty(p.PassWd) && !util.StrIsEmpty(p.AuthString)
+}
+
+// GenerateInitSql TODO
+// 兼容spider授权的情况
+func (p *MySQLAccountPrivs) GenerateInitSql(version string) (initPrivSqls []string) {
+	var needCreate bool = mysqlutil.MySQLVersionParse(version) >= mysqlutil.MySQLVersionParse("8.0") &&
+		!strings.Contains(version, "tspider")
+	withGrant := ""
+	encr := "BY"
+	pwd := p.PassWd
+	if p.useAuthString() {
+		encr = "AS"
+		pwd = p.AuthString
+	}
+	if p.WithGrant {
+		withGrant = "WITH GRANT OPTION"
+	}
+	for _, accHost := range p.AccessObjects {
+		for _, pp := range p.PrivParis {
+			if needCreate {
+				initPrivSqls = append(initPrivSqls, fmt.Sprintf(
+					"CREATE USER IF NOT EXISTS  %s@'%s'  IDENTIFIED WITH mysql_native_password  %s '%s'", p.User, accHost, encr, pwd))
+				initPrivSqls = append(initPrivSqls, fmt.Sprintf("GRANT %s ON %s TO %s@'%s' %s;", strings.Join(pp.Privs, ","),
+					pp.Object, p.User, accHost, withGrant))
+			} else {
+				initPrivSqls = append(initPrivSqls, fmt.Sprintf("GRANT %s ON %s TO %s@'%s'  IDENTIFIED %s '%s' %s;",
+					strings.Join(pp.Privs, ","), pp.Object, p.User, accHost, encr, pwd, withGrant))
+			}
+		}
+	}
+	return
+}
+
+// MySQLAdminAccount TODO
+type MySQLAdminAccount struct {
+	// mysql admin 账户,环境变量 GENERAL_ACCOUNT_admin_user
+	AdminUser string `json:"admin_user,omitempty" env:"GENERAL_ACCOUNT_admin_user"`
+	// mysql admin 密码,环境变量 GENERAL_ACCOUNT_admin_pwd
+	AdminPwd string `json:"admin_pwd,omitempty" env:"GENERAL_ACCOUNT_admin_pwd,unset"`
+}
+
+// GetAccountPrivs TODO
+func (m MySQLAdminAccount) GetAccountPrivs(localIp string) MySQLAccountPrivs {
+	return MySQLAccountPrivs{
+		User:   m.AdminUser,
+		PassWd: m.AdminPwd,
+		PrivParis: []PrivPari{
+			{
+				Object: "*.*",
+				Privs:  allPriv,
+			},
+		},
+		WithGrant:     true,
+		AccessObjects: []string{"localhost", localIp},
+	}
+}
+
+// MySQLMonitorAccount TODO
+// GRANT SELECT, RELOAD, PROCESS, SHOW DATABASES, SUPER, REPLICATION CLIENT, SHOW VIEW,EVENT,TRIGGER, CREATE TABLESPACE ON *.* TO '%s'@'%s' IDENTIFIED BY '%s'"
+type MySQLMonitorAccount struct {
+	// mysql monitor 账户,环境变量 GENERAL_ACCOUNT_monitor_user
+	MonitorUser string `json:"monitor_user,omitempty" env:"GENERAL_ACCOUNT_monitor_user"`
+	// mysql monitor 密码,环境变量 GENERAL_ACCOUNT_monitor_pwd
+	MonitorPwd string `json:"monitor_pwd,omitempty" env:"GENERAL_ACCOUNT_monitor_pwd,unset"`
+}
+
+// GetAccountPrivs TODO
+func (m MySQLMonitorAccount) GetAccountPrivs(grantHosts ...string) MySQLAccountPrivs {
+	p := MySQLAccountPrivs{
+		User:   m.MonitorUser,
+		PassWd: m.MonitorPwd,
+		PrivParis: []PrivPari{
+			{
+				Object: "*.*",
+				Privs:  monitorUserPriv,
+			},
+			{
+				Object: fmt.Sprintf("%s.*", native.INFODBA_SCHEMA),
+				Privs:  allPriv,
+			},
+		},
+		WithGrant:     false,
+		AccessObjects: []string{"localhost"},
+	}
+	p.AccessObjects = append(p.AccessObjects, grantHosts...)
+	return p
+}
+
+// MySQLMonitorAccessAllAccount TODO
+type MySQLMonitorAccessAllAccount struct {
+	MonitorAccessAllUser string `json:"monitor_access_all_user,omitempty"` // mysql monitor@%
+	MonitorAccessAllPwd  string `json:"monitor_access_all_pwd,omitempty"`  // mysql monitor@% 密码
+}
+
+// GetAccountPrivs TODO
+func (m MySQLMonitorAccessAllAccount) GetAccountPrivs(grantHosts ...string) MySQLAccountPrivs {
+	p := MySQLAccountPrivs{
+		User:   m.MonitorAccessAllUser,
+		PassWd: m.MonitorAccessAllPwd,
+		PrivParis: []PrivPari{
+			{
+				Object: fmt.Sprintf("%s.*", native.INFODBA_SCHEMA),
+				Privs:  monitorAccessallPriv,
+			},
+		},
+		AccessObjects: []string{"%"},
+	}
+	p.AccessObjects = append(p.AccessObjects, grantHosts...)
+	return p
+}
+
+// MySQLReplAccount TODO
+// "GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO %s@%s IDENTIFIED BY '%s';"
+type MySQLReplAccount struct {
+	// repl user, 环境变量 GENERAL_ACCOUNT_repl_user
+	ReplUser string `json:"repl_user,omitempty" env:"GENERAL_ACCOUNT_repl_user"`
+	// repl pwd, 环境变量 GENERAL_ACCOUNT_repl_pwd
+	ReplPwd string `json:"repl_pwd,omitempty" env:"GENERAL_ACCOUNT_repl_pwd,unset"`
+}
+
+// GetAccountPrivs TODO
+func (m MySQLReplAccount) GetAccountPrivs(grantHosts ...string) MySQLAccountPrivs {
+	return MySQLAccountPrivs{
+		User:   m.ReplUser,
+		PassWd: m.ReplPwd,
+		PrivParis: []PrivPari{
+			{
+				Object: "*.*",
+				Privs:  replUserPriv,
+			},
+		},
+		WithGrant:     false,
+		AccessObjects: grantHosts,
+	}
+}
+
+// MySQLDbBackupAccount TODO
+type MySQLDbBackupAccount struct {
+	DbBackupUser string `json:"backup_user,omitempty"` // dbbackup user
+	DbBackupPwd  string `json:"backup_pwd,omitempty"`  // dbbackup pwd
+}
+
+// GetAccountPrivs TODO
+func (m MySQLDbBackupAccount) GetAccountPrivs(is80 bool, grantHosts ...string) MySQLAccountPrivs {
+	privPairs := []PrivPari{
+		{Object: "*.*", Privs: backupUserPriv},
+	}
+	if is80 {
+		privPairs = []PrivPari{
+			{Object: "*.*", Privs: backupUserPriv80},
+		}
+	}
+	return MySQLAccountPrivs{
+		User:          m.DbBackupUser,
+		PassWd:        m.DbBackupPwd,
+		PrivParis:     privPairs,
+		WithGrant:     false,
+		AccessObjects: grantHosts,
+	}
+}
+
+// MySQLYwAccount TODO
+// SELECT, CREATE, RELOAD, PROCESS, SHOW DATABASES, REPLICATION CLIENT localhost
+type MySQLYwAccount struct {
+	YwUser string `json:"yw_user,omitempty" env:"GENERAL_ACCOUNT_yw_user"`     // yw user
+	YwPwd  string `json:"yw_pwd,omitempty" env:"GENERAL_ACCOUNT_yw_pwd,unset"` // yw pwd
+}
+
+// GetAccountPrivs TODO
+func (m MySQLYwAccount) GetAccountPrivs() MySQLAccountPrivs {
+	return MySQLAccountPrivs{
+		User:   m.YwUser,
+		PassWd: m.YwPwd,
+		PrivParis: []PrivPari{
+			{
+				Object: "*.*",
+				Privs:  ywUserPriv,
+			},
+		},
+		WithGrant:     false,
+		AccessObjects: []string{"localhost"},
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go
new file mode 100644
index 0000000000..6a418f6e16
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/dbconfig.go
@@ -0,0 +1,2 @@
+// Package dbconfig TODO
+package dbconfig
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/query_change.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/query_change.go
new file mode 100644
index 0000000000..041f57e44e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/dbconfig/query_change.go
@@ -0,0 +1 @@
+package dbconfig
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/README.md b/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/README.md
new file mode 100644
index 0000000000..e453dd114a
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/README.md
@@ -0,0 +1,9 @@
+### 组件:简单文件服务
+
+通过 http 暴露指定目录可用于下载,可用于在重建备库时,从其它机器下载备份。在 OS 不允许 ssh 登录(scp/sftp)时,可以临时启动该服务来获取备份文件。
+
+功能:
+1. 使用 basic auth 认证,使用随机密码
+2. 限制来源访问 ip,可以动态增加允许ip
+3. 限制最大连接数,超过需要等待
+4. 超过最大空闲时间,自动退出
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/acl.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/acl.go
new file mode 100644
index 0000000000..d440508b57
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/acl.go
@@ -0,0 +1,67 @@
+package fileserver
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// ACL TODO
+type ACL struct {
+	Action string `json:"action"`
+	Who    string `json:"who"`
+	Rule   string `json:"rule"`
+}
+
+// checkACL godoc
+// TODO 允许 1.1.1.1 1.1.1.1/32 都合法
+func checkACL(acls []string, remoteAddr net.Addr, clientAddr string) error {
+	if len(acls) == 0 {
+		return nil
+	}
+	var remoteIP net.IP
+	if clientAddr == "" {
+		clientAddr = remoteAddr.String()
+	}
+	host, _, err := net.SplitHostPort(clientAddr)
+	if err != nil {
+		return fmt.Errorf("BUG: invalid remote address %q", clientAddr)
+	}
+	remoteIP = net.ParseIP(host)
+	if remoteIP == nil {
+		return fmt.Errorf("BUG: invalid remote host %s", host)
+	}
+	for _, acl := range acls {
+		// TODO(performance): move ACL parsing to config-time to make ACL checks
+		// less expensive
+		i := strings.Index(acl, " ")
+		if i < 0 {
+			return fmt.Errorf("invalid acl: %q (no space found)", acl)
+		}
+		action, who := acl[:i], acl[i+len(" "):]
+		if action != "allow" && action != "deny" {
+			return fmt.Errorf("invalid acl: %q (syntax: allow|deny )", acl)
+		}
+		if who == "all" {
+			// The all keyword matches any remote IP address
+		} else {
+			_, net, err := net.ParseCIDR(who)
+			if err != nil {
+				return fmt.Errorf("invalid acl: %q (syntax: allow|deny )", acl)
+			}
+			if !net.Contains(remoteIP) {
+				// Skip this instruction, the remote IP does not match
+				continue
+			}
+		}
+		switch action {
+		case "allow":
+			return nil
+		case "deny":
+			return fmt.Errorf("access denied (acl %q)", acl)
+		default:
+			return fmt.Errorf("invalid acl: %q (syntax: allow|deny )", acl)
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/fileserver.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/fileserver.go
new file mode 100644
index 0000000000..2da53d1bbf
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/fileserver/fileserver.go
@@ -0,0 +1,346 @@
+// Package fileserver TODO
+package fileserver
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"math/rand"
+	"net"
+	"net/http"
+	"strconv"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/backup_download"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+
+	"github.com/pkg/errors"
+	"golang.org/x/net/netutil"
+)
+
+// FileServerComp TODO
+type FileServerComp struct {
+	Params FileServer `json:"extend"`
+}
+
+// FileServer TODO
+type FileServer struct {
+	// http file-server 监听地址. 不提供端口,会在 12000-19999 之间随机选择一个端口,不提供 ip 时默认 localhost
+	BindAddress string `json:"bind_address" validate:"required"`
+	// 将本地哪个目录通过 http 分享
+	MountPath string `json:"mount_path" validate:"required"`
+	// path_prefix 用在生成 url 时的路径前缀. 可留空
+	PathPrefix string `json:"path_prefix"`
+	// http basic auth user
+	AuthUser string `json:"auth_user" validate:"required"`
+	// http basic auth pass,为空时会随机生成密码
+	AuthPass string `json:"auth_pass"`
+	// 访问来源限制,从前往后匹配。格式 `["allow 1.1.1.1/32", "deny all"]`
+	ACLs []string `json:"acls" example:"allow all"`
+	// 暂不支持
+	EnableTls bool `json:"enable_tls"`
+	// 输出 download http 的信息,方便使用
+	PrintDownload bool `json:"print_download"`
+
+	bindHost       string
+	bindPort       string
+	procName       string
+	procStartTime  time.Time
+	lastActiveTime time.Time
+
+	// 限制最大连接数,超过需要等待. 为 0 时表示不限制
+	MaxConnections int `json:"max_connections"`
+	// 超过最大空闲时间,自动退出. 示例 3600s, 60m, 1h
+	ProcMaxIdleDuration string `json:"proc_maxidle_duration" example:"1h"`
+
+	procMaxIdleDuration time.Duration
+	server              *http.Server
+	cw                  *ConnectionWatcher
+}
+
+// Example TODO
+func (s *FileServerComp) Example() interface{} {
+	comp := FileServerComp{
+		Params: FileServer{
+			BindAddress:         "1.1.1.1:18081",
+			MountPath:           "/data/dbbak",
+			PathPrefix:          "",
+			AuthUser:            "test_bk_biz_id",
+			AuthPass:            "",
+			ACLs:                []string{"allow 127.0.0.1/32", "deny all"},
+			MaxConnections:      10,
+			ProcMaxIdleDuration: "1h",
+		},
+	}
+	return comp
+}
+
+// New TODO
+func (s *FileServer) New() error {
+	var err error
+	if s.BindAddress, err = s.getBindAddress(); err != nil {
+		return err
+	}
+	if err = s.Validate(); err != nil {
+		return err
+	}
+	if s.AuthUser == "" {
+		return fmt.Errorf("no access user provided")
+	}
+	if s.AuthPass == "" {
+		s.AuthPass = cmutil.RandomString(12)
+	}
+	if s.MaxConnections == 0 {
+		s.MaxConnections = 9999
+	}
+	if s.ProcMaxIdleDuration == "" {
+		s.procMaxIdleDuration = 3600 * time.Second
+	} else {
+		s.procMaxIdleDuration, err = time.ParseDuration(s.ProcMaxIdleDuration)
+		if err != nil {
+			return errors.Wrap(err, s.ProcMaxIdleDuration)
+		}
+	}
+	if s.PathPrefix == "" {
+		s.PathPrefix = fmt.Sprintf("/%s/", s.procName)
+	}
+	if len(s.ACLs) == 0 {
+		s.ACLs = []string{fmt.Sprintf("allow %s/32", s.bindHost)}
+	}
+	// always "deny all"
+	s.ACLs = append(s.ACLs, "deny all")
+	// logger.Info("FileServer %+v", s)
+	// print dbactuactor params format
+	fmt.Println(s)
+	return nil
+}
+
+// String 用于打印
+func (s *FileServer) String() string {
+	str, _ := json.Marshal(s)
+	return string(str)
+}
+
+func (s *FileServer) getBindAddress() (string, error) {
+	var host, port string
+	var err error
+	if s.BindAddress == "" {
+		host = hostDefault
+		port = getRandPort()
+	} else {
+		if host, port, err = net.SplitHostPort(s.BindAddress); err != nil {
+			if strings.Contains(err.Error(), "missing port") {
+				host = s.BindAddress
+				port = getRandPort()
+			} else {
+				return "", err
+			}
+		} else {
+			if host == "" {
+				host = hostDefault
+			}
+			if port == "" {
+				port = getRandPort()
+			}
+		}
+	}
+	s.bindHost = host
+	s.bindPort = port
+	s.BindAddress = fmt.Sprintf("%s:%s", host, port)
+	return s.BindAddress, nil
+}
+
+// Validate TODO
+func (s *FileServer) Validate() error {
+	if s.MountPath == "" || s.MountPath == "/" || !strings.HasPrefix(s.MountPath, "/data") {
+		return fmt.Errorf("path should start with /data")
+	}
+	// @todo should check mount_path exists or not
+
+	pathID := util.RegexReplaceSubString(s.MountPath, `%|/| `, "")
+	if pathID == "" {
+		return fmt.Errorf("invalid path %s", s.MountPath)
+	}
+	s.procName = fmt.Sprintf("%s%s", pathID, s.bindPort)
+	return nil
+}
+
+func (s *FileServer) handleFileServer(prefix string, handler http.Handler) http.HandlerFunc {
+	// realHandler := http.StripPrefix(prefix, handler)
+	// h := http.StripPrefix(prefix, handler)
+
+	return func(w http.ResponseWriter, req *http.Request) {
+		s.lastActiveTime = time.Now()
+		handler.ServeHTTP(w, req)
+	}
+}
+
+// Start TODO
+func (s *FileServer) Start() error {
+	if err := s.Validate(); err != nil {
+		log.Fatalln(err)
+	}
+
+	handler := http.StripPrefix(s.PathPrefix, http.FileServer(http.Dir(s.MountPath)))
+	hFunc := aclHandler(s.ACLs, s.handleBasicAuth(s.handleFileServer(s.PathPrefix, handler)))
+	http.HandleFunc(s.PathPrefix, hFunc)
+
+	s.cw = &ConnectionWatcher{}
+	server := &http.Server{
+		Addr:      s.BindAddress,
+		Handler:   nil,
+		ConnState: s.cw.OnStateChange,
+	}
+	s.server = server
+
+	// http.Handle(s.Prefix, http.StripPrefix(s.Prefix, http.FileServer(http.Dir(s.Path))))
+	s.procStartTime = time.Now()
+	s.lastActiveTime = time.Now()
+	li, err := net.Listen("tcp", s.BindAddress)
+	if err != nil {
+		log.Fatalln()
+	}
+	li = netutil.LimitListener(li, s.MaxConnections) // 最大连接数
+
+	go func() {
+		if err := server.Serve(li); err != nil {
+			log.Fatalln(err)
+		}
+	}()
+	// s.WaitDone()
+	return nil
+}
+
+// WaitDone TODO
+func (s *FileServer) WaitDone() error {
+	for true {
+		time.Sleep(5 * time.Second)
+		idleDura := time.Now().Sub(s.lastActiveTime)
+		if s.cw.Count() > 0 {
+			logger.Info("server connections %d", s.cw.Count())
+			s.lastActiveTime = time.Now()
+		} else if idleDura > s.procMaxIdleDuration && s.cw.Count() == 0 && s.procMaxIdleDuration > 0 {
+			logger.Info("server idle %s exceed max_idle_duration %s", idleDura, s.ProcMaxIdleDuration)
+			s.server.Close()
+			break
+		} else {
+			logger.Debug("server idle %v", idleDura)
+		}
+	}
+	return nil
+}
+
+// OutputCtx TODO
+func (s *FileServer) OutputCtx() error {
+	if !s.PrintDownload {
+		return nil
+	}
+	httpGet := backup_download.DFHttpComp{
+		Params: backup_download.DFHttpParam{
+			DFBase: backup_download.DFBase{
+				BWLimitMB:   50,
+				Concurrency: 1,
+			},
+			HttpGet: backup_download.HttpGet{
+				Server:   fmt.Sprintf("http://%s%s", s.BindAddress, s.PathPrefix),
+				PathTgt:  "/data/dbbak",
+				FileList: []string{"xx", "yy"},
+				AuthUser: s.AuthUser,
+				AuthPass: s.AuthPass,
+			},
+		},
+	}
+	components.PrintOutputCtx(components.ToPrettyJson(httpGet))
+	return nil
+}
+
+func (s *FileServer) handleBasicAuth(next http.HandlerFunc) http.HandlerFunc {
+	return http.HandlerFunc(
+		func(w http.ResponseWriter, r *http.Request) {
+			if r.Method != http.MethodGet {
+				w.WriteHeader(http.StatusMethodNotAllowed)
+				return
+			}
+			// basicAuthPrefix := "Basic "
+			// auth := r.Header.Get("Authorization")
+			w.Header().Set("Content-Type", r.Header.Get("Content-Type"))
+			u, p, ok := r.BasicAuth()
+			if ok {
+				if u == s.AuthUser && p == s.AuthPass {
+					logger.Info("requested %s", r.URL)
+					// w.WriteHeader(200)
+					s.lastActiveTime = time.Now()
+					if next != nil {
+						next.ServeHTTP(w, r)
+					}
+					return
+				}
+			}
+			w.Header().Set("WWW-Authenticate", `Basic realm="restricted", charset="UTF-8"`)
+			// w.WriteHeader(http.StatusUnauthorized)
+			http.Error(w, "Unauthorized BA", http.StatusUnauthorized)
+		},
+	)
+}
+
+func aclHandler(acls []string, next http.HandlerFunc) http.HandlerFunc {
+	return http.HandlerFunc(
+		func(w http.ResponseWriter, r *http.Request) {
+			if err := checkACL(acls, nil, r.RemoteAddr); err != nil {
+				http.Error(w, "Unauthorized IP", http.StatusUnauthorized)
+				return
+			}
+			if next != nil {
+				next.ServeHTTP(w, r)
+			}
+			return
+		},
+	)
+}
+
+func (s *FileServer) addAcl(acl string) {
+	// is acl valid?
+	s.ACLs = append([]string{acl}, s.ACLs...)
+}
+
+var portRange []int = []int{12000, 19999}
+var hostDefault = "localhost"
+
+func getRandPort() string {
+	diff := portRange[1] - portRange[0]
+	port := rand.Intn(diff) + portRange[0]
+	return strconv.Itoa(port)
+}
+
+// ConnectionWatcher TODO
+type ConnectionWatcher struct {
+	n int64
+}
+
+// OnStateChange records open connections in response to connection
+// state changes. Set net/http Server.ConnState to this method
+// as value.
+func (cw *ConnectionWatcher) OnStateChange(conn net.Conn, state http.ConnState) {
+	switch state {
+	case http.StateNew:
+		cw.Add(1)
+	case http.StateHijacked, http.StateClosed:
+		cw.Add(-1)
+	}
+}
+
+// Count returns the number of connections at the time
+// the call.
+func (cw *ConnectionWatcher) Count() int {
+	return int(atomic.LoadInt64(&cw.n))
+}
+
+// Add adds c to the number of active connections.
+func (cw *ConnectionWatcher) Add(c int64) {
+	atomic.AddInt64(&cw.n, c)
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/medium.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/medium.go
new file mode 100644
index 0000000000..1332d19483
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/medium.go
@@ -0,0 +1,56 @@
+package components
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+	"path"
+	"path/filepath"
+	"regexp"
+	"strings"
+)
+
+// Medium 通用介质包处理
+type Medium struct {
+	Pkg    string `json:"pkg" validate:"required"`          // 安装包名
+	PkgMd5 string `json:"pkg_md5"  validate:"required,md5"` // 安装包MD5
+}
+
+// Check TODO
+func (m *Medium) Check() (err error) {
+	var fileMd5 string
+	// 判断安装包是否存在
+	pkgAbPath := m.GetAbsolutePath()
+	if !cmutil.FileExists(pkgAbPath) {
+		return fmt.Errorf("%s不存在", pkgAbPath)
+	}
+	if fileMd5, err = util.GetFileMd5(pkgAbPath); err != nil {
+		return fmt.Errorf("获取[%s]md5失败, %v", m.Pkg, err.Error())
+	}
+	// 校验md5
+	if fileMd5 != m.PkgMd5 {
+		return fmt.Errorf("安装包的md5不匹配,[%s]文件的md5[%s]不正确", fileMd5, m.PkgMd5)
+	}
+	return
+}
+
+// GetAbsolutePath 返回介质存放的绝对路径
+func (m *Medium) GetAbsolutePath() string {
+	return path.Join(cst.BK_PKG_INSTALL_PATH, m.Pkg)
+}
+
+// GePkgBaseName 例如将 mysql-5.7.20-linux-x86_64-tmysql-3.1.5-gcs.tar.gz
+// 解析出 mysql-5.7.20-linux-x86_64-tmysql-3.1.5-gcs
+// 用于做软连接使用
+func (m *Medium) GePkgBaseName() string {
+	pkgFullName := filepath.Base(m.GetAbsolutePath())
+	return regexp.MustCompile("(.tar.gz|.tgz)$").ReplaceAllString(pkgFullName, "")
+}
+
+// GetPkgTypeName 通过介质包文件名称获取对应的组件类型
+// 比如  mysql-5.7.20-linux-x86_64-tmysql-3.1.5-gcs.tar.gz 解析成 mysql
+// 比如  mariadb-10.3.7-linux-x86_64-tspider-3.7.8-gcs.tar.gz 解析成 mariadb
+func (m *Medium) GetPkgTypeName() string {
+	return strings.Split(m.Pkg, "-")[0]
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_database_table.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_database_table.go
new file mode 100644
index 0000000000..8db10304c4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_database_table.go
@@ -0,0 +1,352 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"encoding/json"
+	"fmt"
+	"math/rand"
+	"os"
+	"path"
+	"strings"
+	"time"
+
+	"gopkg.in/ini.v1"
+)
+
+// BackupDatabaseTableComp struct
+type BackupDatabaseTableComp struct {
+	Params           *BackupDatabaseTableParam `json:"extend"`
+	BackupRunTimeCtx `json:"-"`
+	tools            *tools.ToolSet
+}
+
+// BackupDatabaseTableParam struct
+type BackupDatabaseTableParam struct {
+	Host   string `json:"host" validate:"required,ip"`
+	Port   int    `json:"port" validate:"required,lt=65536,gte=3306"`
+	Regex  string `json:"regex"`
+	BillId string `json:"bill_id"`
+}
+
+// BackupRunTimeCtx 运行时上下文
+type BackupRunTimeCtx struct {
+	BackupDir        string
+	StatusReportPath string
+	ResultReportPath string
+	ConfPath         string
+	RandNum          string
+}
+
+// Report struct
+type Report struct {
+	ReportResult []ReportResult `json:"report_result"`
+	ReportStatus ReportStatus   `json:"report_status"`
+}
+
+// BackupGoConfig 备份配置文件
+type BackupGoConfig struct {
+	SectionStringPublic       string
+	BackupParamPublic         *dbbackup.CnfShared
+	SectionStringBackupSystem string
+	BackupParamBackupSystem   *dbbackup.CnfBackupClient
+	SectionStringLogical      string
+	BackupParamLogical        *dbbackup.CnfLogicalBackup
+}
+
+// ReportStatus ReportStatus
+type ReportStatus struct {
+	BackupId   string `json:"backup_id"`
+	BillId     string `json:"bill_id"`
+	Status     string `json:"status"`
+	ReportTime string `json:"report_time"`
+}
+
+// ReportResult result
+type ReportResult struct {
+	BackupId             string `json:"backup_id"`
+	BkBizId              string `json:"bk_biz_id"`
+	BillId               string `json:"bill_id"`
+	BkCloudId            string `json:"bk_cloud_id"`
+	TimeZone             string `json:"time_zone"`
+	ClusterAddress       string `json:"cluster_address"`
+	MysqlHost            string `json:"mysql_host"`
+	MysqlPort            int    `json:"mysql_port"`
+	MasterHost           string `json:"master_host"`
+	MasterPort           int    `json:"master_port"`
+	FileName             string `json:"file_name"`
+	BackupBeginTime      string `json:"backup_begin_time"`
+	BackupEndTime        string `json:"backup_end_time"`
+	DataSchemaGrant      string `json:"data_schema_grant"`
+	BackupType           string `json:"backup_type"`
+	ConsistentBackupTime string `json:"consistent_backup_time"`
+	MysqlRole            string `json:"mysql_role"`
+	FileSize             int64  `json:"file_size"`
+	FileType             string `json:"file_type"`
+	TaskId               string `json:"task_id"`
+}
+
+// Precheck 检查备份工具是否存在
+func (c *BackupDatabaseTableComp) Precheck() (err error) {
+	c.tools, err = tools.NewToolSetWithPick(tools.ToolDbbackupGo)
+	if err != nil {
+		logger.Error("init toolset failed: %s", err.Error())
+		return err
+	}
+	c.BackupDir = cst.DbbackupGoInstallPath
+	return nil
+}
+
+// CreateBackupConfigFile 生成配置
+func (c *BackupDatabaseTableComp) CreateBackupConfigFile() error {
+	ts := time.Now().UnixNano()
+	rand.Seed(ts)
+	r := rand.Intn(100)
+	c.RandNum = fmt.Sprintf("%d%d", ts, r)
+
+	// 3306端口 dbbackup.3306.ini
+	dailyBackupConfPath := path.Join(c.BackupDir, fmt.Sprintf("dbbackup.%d.ini", c.Params.Port))
+
+	dailyConf, err := c.ReadDailyBackupConfigFile(dailyBackupConfPath)
+	if err != nil {
+		logger.Error("读取备份配置文件%s失败:%s", dailyBackupConfPath, err.Error())
+		return err
+	}
+	err = c.ModifyNewBackupConfigFile(dailyConf)
+	if err != nil {
+		logger.Error("生成库表备份配置文件%s失败:%s", c.ConfPath, err.Error())
+		return err
+	}
+	err = c.WriteToConfigFile(dailyConf)
+	if err != nil {
+		logger.Error("生成库表备份配置文件%s失败:%s", c.ConfPath, err.Error())
+		return err
+	}
+	return nil
+}
+
+// WriteToConfigFile 写配置
+func (c *BackupDatabaseTableComp) WriteToConfigFile(config *BackupGoConfig) error {
+	c.ConfPath = path.Join(c.BackupDir, fmt.Sprintf("dbbackup.%d.%s.ini", c.Params.Port, c.RandNum))
+	file := ini.Empty()
+	mysqlSection, err := file.NewSection(config.SectionStringPublic)
+	if err != nil {
+		logger.Error("new section failed:%s", err.Error())
+		return err
+	}
+	err = mysqlSection.ReflectFrom(config.BackupParamPublic)
+	if err != nil {
+		logger.Error("public section ReflectFrom failed:%s", err.Error())
+		return err
+	}
+
+	mysqlSection, err = file.NewSection(config.SectionStringBackupSystem)
+	if err != nil {
+		logger.Error("new section failed:%s", err.Error())
+		return err
+	}
+	err = mysqlSection.ReflectFrom(config.BackupParamBackupSystem)
+	if err != nil {
+		logger.Error("backup system section ReflectFrom failed:%s", err.Error())
+		return err
+	}
+
+	mysqlSection, err = file.NewSection(config.SectionStringLogical)
+	if err != nil {
+		logger.Error("new section failed:%s", err.Error())
+		return err
+	}
+	err = mysqlSection.ReflectFrom(config.BackupParamLogical)
+	if err != nil {
+		logger.Error("logical section ReflectFrom failed:%s", err.Error())
+		return err
+	}
+
+	err = file.SaveTo(c.ConfPath)
+
+	if err != nil {
+		logger.Error("config file save failed:%s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// RemoveBackupConfigFile 删除临时配置文件
+func (c *BackupDatabaseTableComp) RemoveBackupConfigFile() error {
+	_, err := os.Stat(c.ConfPath)
+	if err != nil {
+		logger.Error("stat file %s failed:%s", c.ConfPath, err.Error())
+		return err
+	}
+	err = os.RemoveAll(c.ConfPath)
+	if err != nil {
+		logger.Error("remove file %s failed:%s", c.ConfPath, err.Error())
+		return err
+	}
+	return nil
+}
+
+// ReadDailyBackupConfigFile 读取日常备份配置文件
+func (c *BackupDatabaseTableComp) ReadDailyBackupConfigFile(vPath string) (*BackupGoConfig, error) {
+	cnf, err := ReadBackupConfigFile(vPath)
+	if err != nil {
+		return cnf, err
+	}
+
+	c.ResultReportPath = path.Join(
+		cnf.BackupParamPublic.ResultReportPath, fmt.Sprintf(
+			"dbareport_result_%d.log",
+			c.Params.Port,
+		),
+	)
+	c.StatusReportPath = path.Join(
+		cnf.BackupParamPublic.StatusReportPath, fmt.Sprintf(
+			"dbareport_status_%d.log",
+			c.Params.Port,
+		),
+	)
+
+	return cnf, err
+}
+
+// ReadBackupConfigFile 读取备份配置文件
+func ReadBackupConfigFile(path string) (*BackupGoConfig, error) {
+	publicConf := new(dbbackup.CnfShared)
+	logicalConf := new(dbbackup.CnfLogicalBackup)
+	backupSystemConf := new(dbbackup.CnfBackupClient)
+
+	file, err := ini.Load(path)
+	if err != nil {
+		return nil, err
+	}
+	// config.SectionStrings()[0]:DEFAULT
+	err = file.Section(file.SectionStrings()[1]).StrictMapTo(publicConf)
+	if err != nil {
+		return nil, err
+	}
+	err = file.Section(file.SectionStrings()[2]).StrictMapTo(backupSystemConf)
+	if err != nil {
+		return nil, err
+	}
+	err = file.Section(file.SectionStrings()[3]).StrictMapTo(logicalConf)
+	if err != nil {
+		return nil, err
+	}
+
+	return &BackupGoConfig{
+		file.SectionStrings()[1], publicConf,
+		file.SectionStrings()[2], backupSystemConf,
+		file.SectionStrings()[3], logicalConf,
+	}, nil
+}
+
+// ModifyNewBackupConfigFile 修改临时配置文件
+func (c *BackupDatabaseTableComp) ModifyNewBackupConfigFile(config *BackupGoConfig) error {
+	config.BackupParamPublic.BackupType = "Logical"
+	config.BackupParamPublic.DataSchemaGrant = "data,schema"
+	config.BackupParamPublic.BackupTimeOut = ""
+	config.BackupParamPublic.BillId = c.Params.BillId
+	config.BackupParamLogical.Regex = c.Params.Regex
+
+	timeStr := time.Now().Format("20060102_150405")
+	backupTo := path.Join(
+		config.BackupParamPublic.BackupDir, fmt.Sprintf(
+			"%s_%s_%s", "backupDatabaseTable",
+			timeStr, c.RandNum,
+		),
+	)
+
+	cmd := fmt.Sprintf("mkdir -p %s", backupTo)
+	if _, err := osutil.ExecShellCommand(false, cmd); err != nil {
+		logger.Error("创建备份目录%s 失败:%s", cmd, err.Error())
+		return err
+	}
+
+	config.BackupParamPublic.BackupDir = backupTo
+	return nil
+}
+
+// DoBackup 执行备份
+func (c *BackupDatabaseTableComp) DoBackup() error {
+	cmd := fmt.Sprintf(
+		"%s --configpath=%s --dumpbackup",
+		c.tools.MustGet(tools.ToolDbbackupGo),
+		c.ConfPath,
+	)
+	_, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Error("execute %s failed: %s", cmd, err.Error())
+		return err
+	}
+	return nil
+}
+
+// OutputBackupInfo 输出备份报告
+func (c *BackupDatabaseTableComp) OutputBackupInfo() error {
+	res, err := GenerateReport(c.Params.BillId, c.StatusReportPath, c.ResultReportPath)
+	if err != nil {
+		logger.Error("generate report failed: %s", err.Error())
+		return err
+	}
+
+	err = components.PrintOutputCtx(res)
+	if err != nil {
+		logger.Error("print backup report info failed: %s.", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// GenerateReport 生成报告
+func GenerateReport(billId string, statusLogFile string, resultLogFile string) (*Report, error) {
+	var res Report
+	var status []ReportStatus
+	var result []ReportResult
+
+	cmd := fmt.Sprintf(`grep '"bill_id":"%s"' %s`, billId, statusLogFile)
+	out, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Error("execute %s failed: %s.", cmd, err.Error())
+		return nil, err
+	}
+
+	out = strings.ReplaceAll(out, "\n", ",")
+	out = strings.Trim(out, ",")
+	out = fmt.Sprintf("%s%s%s", "[", out, "]")
+	err = json.Unmarshal([]byte(out), &status)
+	if err != nil {
+		logger.Error("get backup report status unmarshal failed: %s.", err.Error())
+		return nil, err
+	}
+
+	last := len(status) - 1
+	res.ReportStatus = status[last]
+	if res.ReportStatus.Status != "Success" {
+		err := fmt.Errorf("report status is not Success: %s", res.ReportStatus)
+		logger.Error(err.Error())
+		return nil, err
+	}
+
+	cmd = fmt.Sprintf(`grep '"backup_id":"%s"' %s`, res.ReportStatus.BackupId, resultLogFile)
+
+	out, err = osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Error("execute %s failed: %s.", cmd, err.Error())
+		return nil, err
+	}
+	out = strings.ReplaceAll(out, "\n", ",")
+	out = strings.Trim(out, ",")
+	out = fmt.Sprintf("%s%s%s", "[", out, "]")
+	err = json.Unmarshal([]byte(out), &result)
+	if err != nil {
+		logger.Error("get backup report result unmarshal failed: %s.", err.Error())
+		return nil, err
+	}
+	res.ReportResult = result
+	return &res, nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_truncate_database.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_truncate_database.go
new file mode 100644
index 0000000000..1de90e33af
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/backup_truncate_database.go
@@ -0,0 +1,293 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"os/exec"
+	"path"
+)
+
+// BackupTruncateDatabaseComp TODO
+type BackupTruncateDatabaseComp struct {
+	GeneralParam *components.GeneralParam
+	Params       BackupTruncateDatabaseParam
+	BackupTruncateDatabaseCtx
+}
+
+// BackupTruncateDatabaseParam TODO
+type BackupTruncateDatabaseParam struct {
+	Host          string         `json:"host" validate:"required,ip"`
+	Port          int            `json:"port" validate:"required,lt=65536,gte=3306"`
+	DatabaseInfos []DatabaseInfo `json:"database_infos"`
+}
+
+// DatabaseInfo TODO
+type DatabaseInfo struct {
+	Old string `json:"old"`
+	New string `json:"new"`
+}
+
+// DatabaseInfoCtx TODO
+type DatabaseInfoCtx struct {
+	DatabaseInfo
+	SqlFile string `json:"sql_file"`
+}
+
+// BackupTruncateDatabaseCtx TODO
+type BackupTruncateDatabaseCtx struct {
+	dbConn           *native.DbWorker
+	charset          string
+	dumpCmd          string
+	socket           string
+	databaseInfosCtx []DatabaseInfoCtx
+	uid              string
+	backupDir        string
+}
+
+// Init TODO
+func (c *BackupTruncateDatabaseComp) Init(uid string) (err error) {
+	c.dbConn, err = native.InsObject{
+		Host: c.Params.Host,
+		Port: c.Params.Port,
+		User: c.GeneralParam.RuntimeAccountParam.AdminUser,
+		Pwd:  c.GeneralParam.RuntimeAccountParam.AdminPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("Connect %d failed:%s", c.Params.Port, err.Error())
+		return err
+	}
+
+	if c.socket, err = c.dbConn.ShowSocket(); err != nil {
+		logger.Error("获取socket value 失败:%s", err.Error())
+		return err
+	}
+
+	c.uid = uid
+	return nil
+}
+
+// Precheck TODO
+func (c *BackupTruncateDatabaseComp) Precheck() error {
+	c.dumpCmd = path.Join(cst.MysqldInstallPath, "bin", "mysqldump")
+	if !osutil.FileExist(c.dumpCmd) {
+		return fmt.Errorf("dumpCmd:%s文件不存在", c.dumpCmd)
+	}
+	return nil
+}
+
+// ReadBackupConf TODO
+func (c *BackupTruncateDatabaseComp) ReadBackupConf() error {
+	dailyBackupConfPath := path.Join(
+		cst.DbbackupGoInstallPath,
+		fmt.Sprintf("dbbackup.%d.ini", c.Params.Port),
+	)
+	cnf, err := ReadBackupConfigFile(dailyBackupConfPath)
+	if err != nil {
+		logger.Error("读取备份配置文件%s失败:%s", dailyBackupConfPath, err.Error())
+		return err
+	}
+
+	c.charset = cnf.BackupParamPublic.MysqlCharset
+	c.backupDir = cnf.BackupParamPublic.BackupDir
+	return nil
+}
+
+// DumpSchema TODO
+func (c *BackupTruncateDatabaseComp) DumpSchema() error {
+	for _, dbInfo := range c.Params.DatabaseInfos {
+		oldDb := dbInfo.Old
+		backupFileName := fmt.Sprintf(
+			`truncate_dump_%s_%d_%s_%s.sql`,
+			c.Params.Host,
+			c.Params.Port,
+			oldDb,
+			c.uid,
+		)
+
+		var dumper mysqlutil.Dumper
+		dumper = &mysqlutil.MySQLDumperTogether{
+			MySQLDumper: mysqlutil.MySQLDumper{
+				DumpDir:      c.backupDir,
+				Ip:           c.Params.Host,
+				Port:         c.Params.Port,
+				DbBackupUser: c.GeneralParam.RuntimeAccountParam.AdminUser,
+				DbBackupPwd:  c.GeneralParam.RuntimeAccountParam.AdminPwd,
+				DbNames:      []string{oldDb},
+				DumpCmdFile:  c.dumpCmd,
+				Charset:      c.charset,
+				MySQLDumpOption: mysqlutil.MySQLDumpOption{
+					NoData:       true,
+					NoCreateDb:   true,
+					NoCreateTb:   false,
+					DumpTrigger:  true,
+					DumpRoutine:  true,
+					DumpEvent:    true,
+					NeedUseDb:    false,
+					AddDropTable: false,
+				},
+			},
+			OutputfileName: backupFileName,
+		}
+		if err := dumper.Dump(); err != nil {
+			logger.Error("dump failed: ", err.Error())
+			return err
+		}
+
+		c.databaseInfosCtx = append(
+			c.databaseInfosCtx, DatabaseInfoCtx{
+				DatabaseInfo: dbInfo,
+				SqlFile:      backupFileName,
+			},
+		)
+	}
+
+	return nil
+}
+
+// ModifyFile TODO
+func (c *BackupTruncateDatabaseComp) ModifyFile() error {
+	for _, dbInfoCtx := range c.databaseInfosCtx {
+		sqlFilePath := path.Join(c.backupDir, dbInfoCtx.SqlFile)
+		cmd := exec.Command("sed", "-i", "-e", `s/CREATE TABLE/CREATE TABLE IF NOT EXISTS/g`, sqlFilePath)
+		r, err := cmd.CombinedOutput()
+		if err != nil {
+			logger.Error("replace %s failed: %s(%s)", sqlFilePath, string(r), err.Error())
+		}
+	}
+	return nil
+}
+
+// CleanNewDB TODO
+func (c *BackupTruncateDatabaseComp) CleanNewDB() error {
+	for _, dbInfoCtx := range c.databaseInfosCtx {
+		rows, err := c.dbConn.Query(
+			fmt.Sprintf(
+				"select table_name from information_schema.views where table_schema='%s'",
+				dbInfoCtx.New,
+			),
+		)
+		if err != nil && !c.dbConn.IsNotRowFound(err) {
+			logger.Error(err.Error())
+			return err
+		}
+		for _, row := range rows {
+			viewName, ok := row["table_name"]
+			if !ok {
+				err = fmt.Errorf("转换 %s 失败", row["table_name"])
+				logger.Error(err.Error())
+				return err
+			}
+			_, err = c.dbConn.Exec(fmt.Sprintf("drop view if exists `%s`.`%s`", dbInfoCtx.New, viewName))
+			if err != nil {
+				logger.Error(err.Error())
+				return err
+			}
+		}
+
+		rows, err = c.dbConn.Query(
+			fmt.Sprintf(
+				"select trigger_name from information_schema.triggers where trigger_schema='%s'",
+				dbInfoCtx.New,
+			),
+		)
+		if err != nil && !c.dbConn.IsNotRowFound(err) {
+			logger.Error(err.Error())
+			return err
+		}
+		for _, row := range rows {
+			triggerName, ok := row["trigger_name"]
+			if !ok {
+				err = fmt.Errorf("转换 %s 失败", row["trigger_name"])
+				logger.Error(err.Error())
+				return err
+			}
+			_, err = c.dbConn.Exec(fmt.Sprintf("drop trigger if exists `%s`.`%s`", dbInfoCtx.New, triggerName))
+			if err != nil {
+				logger.Error(err.Error())
+				return err
+			}
+		}
+
+		rows, err = c.dbConn.Query(
+			fmt.Sprintf(
+				"select event_name from information_schema.events where event_schema='%s'",
+				dbInfoCtx.New,
+			),
+		)
+		if err != nil && !c.dbConn.IsNotRowFound(err) {
+			logger.Error(err.Error())
+			return err
+		}
+		for _, row := range rows {
+			eventName, ok := row["event_name"]
+			if !ok {
+				err = fmt.Errorf("转换 %s 失败", row["event_name"])
+				logger.Error(err.Error())
+				return err
+			}
+			_, err = c.dbConn.Exec(fmt.Sprintf("drop event if exists `%s`.`%s`", dbInfoCtx.New, eventName))
+			if err != nil {
+				logger.Error(err.Error())
+				return err
+			}
+		}
+
+		rows, err = c.dbConn.Query(
+			fmt.Sprintf(
+				"select routine_name, routine_type from information_schema.routines where ROUTINE_SCHEMA='%s'",
+				dbInfoCtx.New,
+			),
+		)
+		if err != nil && !c.dbConn.IsNotRowFound(err) {
+			logger.Error(err.Error())
+			return err
+		}
+		for _, row := range rows {
+			routineName, ok := row["routine_name"]
+			if !ok {
+				err = fmt.Errorf("转换 %s 失败", row["routine_name"])
+				logger.Error(err.Error())
+				return err
+			}
+			routineType, ok := row["routine_type"]
+			if !ok {
+				err = fmt.Errorf("转换 %s 失败", row["routine_type"])
+				logger.Error(err.Error())
+				return err
+			}
+			_, err = c.dbConn.Exec(fmt.Sprintf("drop %s if exists `%s`.`%s`", routineType, dbInfoCtx.New, routineName))
+			if err != nil {
+				logger.Error(err.Error())
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// ImportSchema TODO
+func (c *BackupTruncateDatabaseComp) ImportSchema() error {
+	for _, dbInfoCtx := range c.databaseInfosCtx {
+		err := mysqlutil.ExecuteSqlAtLocal{
+			IsForce:          false,
+			Charset:          c.charset,
+			NeedShowWarnings: false,
+			Host:             c.Params.Host,
+			Port:             c.Params.Port,
+			Socket:           c.socket,
+			User:             c.GeneralParam.RuntimeAccountParam.AdminUser,
+			Password:         c.GeneralParam.RuntimeAccountParam.AdminPwd,
+			WorkDir:          c.backupDir,
+		}.ExcuteSqlByMySQLClient(dbInfoCtx.SqlFile, []string{dbInfoCtx.New})
+		if err != nil {
+			logger.Error("导入 %s 到 %s 失败", dbInfoCtx.SqlFile, dbInfoCtx.New)
+			return err
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/change_master.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/change_master.go
new file mode 100644
index 0000000000..f9ac3ccf5f
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/change_master.go
@@ -0,0 +1,284 @@
+/*
+ * @Description:
+ 	建立主从关系,并不实际操作建立主从关系之前的操作比如数据同步
+	并判断主从关系是否正常
+
+	预检查
+	1 master: 端口连通性
+	2 master: repl 账户权限
+	3 当前实例: 是否已经存在主从关系
+		3.1 存在主从关系,但是主从关系错误 reset slave后继续执行
+		3.2 存在主从关系,但是主从关系ok  直接抛错
+		3.3 增加 force 参数,reset slave 继续执行
+	4 当前实例: 端口连通性`
+*/
+
+package mysql
+
+import (
+	"fmt"
+	"time"
+
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+)
+
+type BuildMSRelationComp struct {
+	// 本地使用 ADMIN, change master 使用 repl
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       *BuildMSRelationParam    `json:"extend"`
+	db           *native.DbWorker         // 本地db链接
+	mdb          *native.DbWorker         // 使用repl账户去连接master db
+	checkVars    []string                 // 同步前需要检查的参数
+}
+
+type BuildMSRelationParam struct {
+	// 具体操作内容需要操作的参数
+	Host        string `json:"host"  validate:"required,ip"`                        // 当前实例的主机地址
+	Port        int    `json:"port"  validate:"required,lte=65535,gte=3306"`        // 当前实例的端口
+	MasterHost  string `json:"master_host"  validate:"required,ip" `                // change master to 主库ip
+	MasterPort  int    `json:"master_port"  validate:"required,lte=65535,gte=3306"` // change master to 主库端口
+	IsGtid      bool   `json:"is_gtid"`                                             // 是否启动GID方式进行建立主从
+	BinFile     string `json:"bin_file" validate:"required"`                        // binlog 文件名称
+	BinPosition int64  `json:"bin_position" validate:"required,gte=0"`              // binlog 位点信息
+	// 最大容忍延迟, 当 主从延迟 小于 该值, 认为建立主从关系成功. 不传或者为 0 时,表示不检查
+	MaxTolerateDelay int `json:"max_tolerate_delay"`
+	// 如果当前实例存在主从关系是否直接reset slave后,强制change master
+	Force bool `json:"force" example:"false"`
+	// 不启动 io_thread。默认false 表示启动 io_thread
+	NotStartIOThread bool `json:"not_start_io_thread" example:"false"`
+	// 不启动 sql_thread。默认false 表示启动 sql_thread
+	NotStartSQLThread bool `json:"not_start_sql_thread" example:"false"`
+}
+
+func (b *BuildMSRelationComp) Example() interface{} {
+	comp := BuildMSRelationComp{
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.MySQLAdminReplExample,
+			},
+		},
+		Params: &BuildMSRelationParam{
+			Host:              "1.1.1.1",
+			Port:              3306,
+			MasterHost:        "1.1.1.2",
+			MasterPort:        3306,
+			BinFile:           "binlog20000.001234",
+			BinPosition:       4,
+			NotStartIOThread:  false,
+			NotStartSQLThread: false,
+			Force:             false,
+		},
+	}
+	return comp
+}
+
+func (b *BuildMSRelationComp) Init() (err error) {
+	b.db, err = native.InsObject{
+		Host: b.Params.Host,
+		Port: b.Params.Port,
+		User: b.GeneralParam.RuntimeAccountParam.AdminUser,
+		Pwd:  b.GeneralParam.RuntimeAccountParam.AdminPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("connect %s:%d failed,err:%s", b.Params.Host, b.Params.Port, err.Error())
+		return err
+	}
+	b.mdb, err = native.InsObject{
+		Host: b.Params.MasterHost,
+		Port: b.Params.MasterPort,
+		User: b.GeneralParam.RuntimeAccountParam.ReplUser,
+		Pwd:  b.GeneralParam.RuntimeAccountParam.ReplPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("connect master %s:%d failed,err:%s", b.Params.MasterHost, b.Params.MasterPort, err.Error())
+		return err
+	}
+	b.checkVars = []string{
+		"character_set_system", "character_set_server",
+		"collation_server", "character_set_client",
+	}
+	return nil
+}
+
+// CheckMSVersion 检查主从的版本
+//
+//	@receiver b
+//	@return err
+func (b *BuildMSRelationComp) CheckMSVersion() (err error) {
+	slaveVersion, err := b.db.SelectVersion()
+	if err != nil {
+		return fmt.Errorf("get slave version failed:%w", err)
+	}
+	masterVersion, err := b.mdb.SelectVersion()
+	if err != nil {
+		return fmt.Errorf("get master version failed:%w", err)
+	}
+	return mysqlutil.VersionCompare(masterVersion, slaveVersion)
+}
+
+// CheckCharSet 检查主从的字符集是否一致
+//
+//	@receiver b
+//	@return err
+func (b *BuildMSRelationComp) CheckCharSet() (err error) {
+	return b.db.MySQLVarsCompare(b.mdb, b.checkVars)
+}
+
+// CheckCurrentSlaveStatus 检查当前实例是否存在复制关系 如果force= false,会退出
+//
+//	@receiver b
+//	@return err
+func (b *BuildMSRelationComp) CheckCurrentSlaveStatus() (err error) {
+	// 检查当前实例是否已经存在进程
+	slaveStatus, err := b.db.ShowSlaveStatus()
+	if err != nil {
+		logger.Error("%s:err:%s", util.AtWhere())
+		return err
+	}
+	var emptySlaveStatus native.ShowSlaveStatusResp
+	// 当前实例无同步信息
+	if slaveStatus == emptySlaveStatus {
+		return nil
+	}
+	//  如果没有加强制参数,只要存在关系,就抛出错误
+	if !b.Params.Force {
+		return fmt.Errorf("当前实例实际存在主从关系")
+	}
+	logger.Info("show slave status Info is %v", slaveStatus)
+	// 强制参数force=true,直接执行stop slave && reset slave
+	// Stop Slave
+	if err = b.stopSlave(); err != nil {
+		logger.Error("Force Change Master,Stop Slave Failed %s", err.Error())
+		return
+	}
+	// Reset Slave All
+	if err = b.resetSlaveAll(); err != nil {
+		logger.Error("Force Change Master,Reset Slave All Failed %s", err.Error())
+		return
+	}
+	return
+}
+
+/**
+ * @description: 执行change master,建立主从关系
+ * @return {*}
+ */
+func (b *BuildMSRelationComp) BuildMSRelation() (err error) {
+	logger.Info("begin change Master to %s:%d", b.Params.MasterHost, b.Params.Port)
+	changeMasterSql := b.getChangeMasterSql()
+	logger.Debug("change master sql: %s", changeMasterSql)
+	if _, err = b.db.Exec(changeMasterSql); err != nil {
+		logger.Error("change master to %s:%d failed,err:%s", b.Params.MasterHost, b.Params.MasterPort, err.Error())
+		return err
+	}
+	if err = b.startSlaveThread(!b.Params.NotStartIOThread, !b.Params.NotStartSQLThread); err != nil {
+		logger.Error("start slave failed:%s", err.Error())
+		return err
+	}
+	return
+}
+
+/**
+ * @description: 尝试多次去check show slave status,建立主从关系可能会因为网络等原因,会比较慢
+ * @return {*}
+ */
+func (b *BuildMSRelationComp) CheckBuildOk() (err error) {
+	return util.Retry(
+		util.RetryConfig{Times: 60, DelayTime: 2 * time.Second},
+		func() error { return b.checkSlaveStatus() },
+	)
+}
+
+/**
+ * @description: 根据show slave status 检查主从是否建立正常
+ * @return {*}
+ */
+func (b *BuildMSRelationComp) checkSlaveStatus() (err error) {
+	ss, err := b.db.ShowSlaveStatus()
+	if err != nil {
+		logger.Error("%s exec show slave status failed:%s", util.AtWhere(), err.Error())
+	}
+	if !ss.ReplSyncIsOk() {
+		errMsg := fmt.Sprintf("IOThread:%s,SQLThread:%s", ss.SlaveIORunning, ss.SlaveSQLRunning)
+		return fmt.Errorf(errMsg)
+	}
+	if b.Params.MaxTolerateDelay > 0 && int(ss.SecondsBehindMaster.Int64) > b.Params.MaxTolerateDelay {
+		errMsg := fmt.Sprintf(
+			"同步线程IO已经正常, 主从延迟%d 大于%d",
+			ss.SecondsBehindMaster.Int64,
+			b.Params.MaxTolerateDelay,
+		)
+		return fmt.Errorf(errMsg)
+	}
+	return nil
+}
+
+/**
+ * @description: 执行 stop slave
+ * @return {*}
+ */
+func (b *BuildMSRelationComp) stopSlave() (err error) {
+	_, err = b.db.Exec("stop slave;")
+	return
+}
+
+/**
+ * @description: 执行 start slave
+ * @return {*}
+ */
+func (b *BuildMSRelationComp) startSlave() (err error) {
+	_, err = b.db.Exec("start slave;")
+	return
+}
+
+func (b *BuildMSRelationComp) startSlaveThread(ioThread, sqlThread bool) (err error) {
+	if ioThread && sqlThread {
+		_, err = b.db.Exec("start slave;")
+	} else if ioThread {
+		_, err = b.db.Exec("start slave io_thread;")
+	} else if sqlThread {
+		_, err = b.db.Exec("start slave sql_thread;")
+	}
+	return
+}
+
+/**
+ * @description: 执行reset slave all
+ * @return {*}
+ */
+func (b *BuildMSRelationComp) resetSlaveAll() (err error) {
+	_, err = b.db.Exec("reset slave all;")
+	return
+}
+
+/**
+ * @description: 拼接 change master sql
+ * @return {*}
+ */
+func (b *BuildMSRelationComp) getChangeMasterSql() (changeMastersql string) {
+	replUser := b.GeneralParam.RuntimeAccountParam.ReplUser
+	replPwd := b.GeneralParam.RuntimeAccountParam.ReplPwd
+	changeMastersql = fmt.Sprintf(
+		`CHANGE MASTER TO MASTER_HOST='%s', 
+	MASTER_USER ='%s', 
+	MASTER_PASSWORD='%s',
+	MASTER_PORT=%d,MASTER_LOG_FILE='%s',
+	MASTER_LOG_POS=%d`,
+		b.Params.MasterHost, replUser, replPwd, b.Params.MasterPort, b.Params.BinFile, b.Params.BinPosition,
+	)
+
+	if b.Params.IsGtid {
+		// 如果是gitd,则使用gitd方式构建主从复制命令
+		changeMastersql = fmt.Sprintf(
+			`CHANGE MASTER TO MASTER_HOST='%s', 
+		MASTER_USER ='%s', MASTER_PASSWORD='%s',MASTER_PORT=%d, MASTER_AUTO_POSITION = 1;`,
+			b.Params.MasterHost, replUser, replPwd, b.Params.MasterPort,
+		)
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/check_instance_idle.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/check_instance_idle.go
new file mode 100644
index 0000000000..b0843023da
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/check_instance_idle.go
@@ -0,0 +1 @@
+package mysql
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clean_mysql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clean_mysql.go
new file mode 100644
index 0000000000..303dcdaeff
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clean_mysql.go
@@ -0,0 +1,152 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/computil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"fmt"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// CleanMysqlComp 需要将 BaseInputParam 转换成 Comp 参数
+type CleanMysqlComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       CleanMysqlParam          `json:"extend"`
+}
+
+// Example TODO
+func (c *CleanMysqlComp) Example() interface{} {
+	comp := CleanMysqlComp{
+		Params: CleanMysqlParam{
+			StopSlave:   true,
+			ResetSlave:  true,
+			Force:       false,
+			TgtInstance: &common.InstanceExample,
+		},
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountAdminExample,
+			},
+		},
+	}
+	return comp
+}
+
+// CleanMysqlParam 删除目标实例里面的所有 database
+// 保留系统库,如 mysql,infodba_schema, sys 等
+type CleanMysqlParam struct {
+	// 是否执行 stop slave
+	StopSlave bool `json:"stop_slave"`
+	// 是否执行 reset slave all
+	ResetSlave bool `json:"reset_slave"`
+	// drop_database 之后是否重启实例
+	Restart bool `json:"restart"`
+	// 当实例不空闲时是否强制清空
+	Force bool `json:"force"`
+	// 是否执行 drop database,这里是确认行为. 如果 false 则只把 drop 命令打印到输出
+	DropDatabase     bool `json:"drop_database"`
+	CheckIntervalSec int  `json:"check_interval_sec"`
+	// 清空目标实例
+	TgtInstance *native.Instance `json:"tgt_instance" validate:"required"`
+
+	checkDuration time.Duration
+
+	myCnf    *util.CnfFile
+	dbworker *native.DbWorker
+	instObj  *native.InsObject
+	// account  *components.RuntimeAccountParam
+}
+
+// Init TODO
+func (c *CleanMysqlComp) Init() error {
+	f := util.GetMyCnfFileName(c.Params.TgtInstance.Port)
+	c.Params.myCnf = &util.CnfFile{FileName: f}
+	if err := c.Params.myCnf.Load(); err != nil {
+		return err
+	}
+	dbSocket, err := c.Params.myCnf.GetMySQLSocket()
+	if err != nil {
+		return err
+	}
+	c.Params.instObj = &native.InsObject{
+		Host:   c.Params.TgtInstance.Host,
+		Port:   c.Params.TgtInstance.Port,
+		User:   c.GeneralParam.RuntimeAccountParam.AdminUser,
+		Pwd:    c.GeneralParam.RuntimeAccountParam.AdminPwd,
+		Socket: dbSocket,
+	}
+	if dbw, err := c.Params.instObj.ConnBySocket(); err != nil {
+		return err
+	} else {
+		c.Params.dbworker = dbw
+	}
+	if c.Params.CheckIntervalSec == 0 {
+		c.Params.CheckIntervalSec = 31
+	}
+	c.Params.checkDuration = time.Duration(c.Params.CheckIntervalSec) * time.Second
+	return nil
+}
+
+// PreCheck 前置检查
+// 会初始化 needRestart
+func (c *CleanMysqlComp) PreCheck() error {
+	if err := c.Params.instObj.CheckInstanceConnIdle(c.GeneralParam.RuntimeExtend.MySQLSysUsers,
+		c.Params.checkDuration); err != nil {
+		logger.Warn("clean_mysql precheck error %w", err)
+		if c.Params.Force {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
+
+// Start TODO
+func (c *CleanMysqlComp) Start() error {
+	if c.Params.StopSlave {
+		if err := c.Params.dbworker.StopSlave(); err != nil {
+			return errors.WithMessage(err, "stop slave")
+		}
+	}
+	if c.Params.ResetSlave {
+		if err := c.Params.dbworker.ResetSlave(); err != nil {
+			return errors.WithMessage(err, "reset slave")
+		}
+	}
+
+	// 计划删除的 databases 列表
+	inStr, _ := mysqlutil.UnsafeBuilderStringIn(native.DBSys, "'")
+	dbsSql := fmt.Sprintf("select SCHEMA_NAME from information_schema.SCHEMATA where SCHEMA_NAME not in (%s)", inStr)
+
+	if databases, err := c.Params.dbworker.Query(dbsSql); err != nil {
+		if c.Params.dbworker.IsNotRowFound(err) {
+			return nil
+		} else {
+			return err
+		}
+	} else {
+		for _, dbName := range databases {
+			dropSQL := fmt.Sprintf("DROP DATABASE `%s`;", dbName["SCHEMA_NAME"])
+			logger.Warn("run sql %s", dropSQL)
+			if c.Params.DropDatabase {
+				if _, err := c.Params.dbworker.Exec(dropSQL); err != nil {
+					return errors.WithMessage(err, dropSQL)
+				}
+			} else {
+				fmt.Printf("%s -- not run because drop_database=false\n", dropSQL)
+			}
+		}
+		if c.Params.DropDatabase && c.Params.Restart {
+			if err := computil.RestartMysqlInstanceNormal(*c.Params.instObj); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go
new file mode 100644
index 0000000000..1a60684bd4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/clear_instance_config.go
@@ -0,0 +1,238 @@
+//
+
+package mysql
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"os"
+	"os/exec"
+	"path"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// ClearInstanceConfigComp 清理实例配置
+type ClearInstanceConfigComp struct {
+	GeneralParam *components.GeneralParam  `json:"general"`
+	Params       *ClearInstanceConfigParam `json:"extend"`
+	tools        *tools.ToolSet
+}
+
+// ClearInstanceConfigParam 定义原子任务的入参属性
+type ClearInstanceConfigParam struct {
+	ClearPorts  []int  `json:"clear_ports" validate:"required,gt=0,dive"`
+	MachineType string `json:"machine_type"`
+}
+
+// Example 样例
+func (c *ClearInstanceConfigComp) Example() interface{} {
+	comp := ClearInstanceConfigComp{
+		Params: &ClearInstanceConfigParam{
+			ClearPorts: []int{20000, 20001},
+		},
+	}
+	return comp
+}
+
+// Init 初始化工具集
+func (c *ClearInstanceConfigComp) Init() (err error) {
+	c.tools = tools.NewToolSetWithPickNoValidate(tools.ToolMysqlTableChecksum, tools.ToolMySQLMonitor)
+	return nil
+}
+
+// DoClear 清理配置
+func (c *ClearInstanceConfigComp) DoClear() (err error) {
+	if c.Params.MachineType == "backend" || c.Params.MachineType == "remote" {
+		return c.clearBackend()
+	} else if c.Params.MachineType == "proxy" {
+		return c.clearProxy()
+	} else if c.Params.MachineType == "single" {
+		return c.clearSingle()
+	} else {
+		err = errors.Errorf("unsupported machine type %s", c.Params.MachineType)
+		logger.Error(err.Error())
+		return err
+	}
+}
+
+func (c *ClearInstanceConfigComp) clearBackend() (err error) {
+	err = c.clearMySQLMonitor()
+	if err != nil {
+		logger.Error("clear backend monitor failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear backed monitor success")
+
+	err = c.clearChecksum()
+	if err != nil {
+		logger.Error("clear backend checksum failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear backend checksum success")
+
+	err = c.clearRotateBinlog()
+	if err != nil {
+		logger.Error("clear backend rotate binlog failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear backend rotate binlog success")
+
+	err = c.clearDbBackup()
+	if err != nil {
+		logger.Error("clear backend dbbackup failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear backend dbbackup success")
+
+	return nil
+}
+
+func (c *ClearInstanceConfigComp) clearSingle() (err error) {
+	err = c.clearMySQLMonitor()
+	if err != nil {
+		logger.Error("clear backend monitor failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear backed monitor success")
+
+	err = c.clearRotateBinlog()
+	if err != nil {
+		logger.Error("clear backend rotate binlog failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear backend rotate binlog success")
+
+	err = c.clearDbBackup()
+	if err != nil {
+		logger.Error("clear backend dbbackup failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear backend dbbackup success")
+
+	return nil
+}
+
+func (c *ClearInstanceConfigComp) clearProxy() (err error) {
+	err = c.clearMySQLMonitor()
+	if err != nil {
+		logger.Error("clear proxy monitor failed: %s", err.Error())
+		return err
+	}
+	logger.Info("clear proxy monitor success")
+
+	return nil
+}
+
+func (c *ClearInstanceConfigComp) clearDbBackup() (err error) {
+	// 删除实例的备份配置
+	installBackupPath := path.Join(cst.MYSQL_TOOL_INSTALL_PATH, cst.BackupDir)
+
+	for _, port := range c.Params.ClearPorts {
+		backupFile := path.Join(installBackupPath, cst.GetNewConfigByPort(port))
+		err := os.Remove(backupFile)
+		if os.IsNotExist(err) {
+			// 检测文件是否存在,如果不存在则跳过
+			logger.Warn("检测文件已不存在,跳过 %s", backupFile)
+			continue
+		}
+		if err != nil {
+			logger.Error("删除文件失败[%s]:%s", backupFile, err.Error())
+			return err
+		}
+		logger.Info("备份文件已移除 [%s]", backupFile)
+	}
+	return nil
+}
+
+func (c *ClearInstanceConfigComp) clearChecksum() (err error) {
+	mysqlTableChecksum, err := c.tools.Get(tools.ToolMysqlTableChecksum)
+	if err != nil {
+		logger.Warn("get %s failed: %s", tools.ToolMysqlTableChecksum, err.Error())
+		return nil
+	}
+
+	for _, port := range c.Params.ClearPorts {
+		unInstallTableChecksum := exec.Command(
+			mysqlTableChecksum,
+			"clean",
+			"--config", path.Join(
+				cst.ChecksumInstallPath,
+				fmt.Sprintf("checksum_%d.yaml", port),
+			),
+		)
+		var stdout, stderr bytes.Buffer
+		unInstallTableChecksum.Stdout = &stdout
+		unInstallTableChecksum.Stderr = &stderr
+
+		err = unInstallTableChecksum.Run()
+		if err != nil {
+			logger.Error(
+				"run %s failed: %s, %s",
+				unInstallTableChecksum, err.Error(), stderr.String(),
+			)
+			return err
+		}
+		logger.Info("run %s success: %s", unInstallTableChecksum, stdout.String())
+	}
+	return nil
+}
+
+func (c *ClearInstanceConfigComp) clearRotateBinlog() (err error) {
+	// 删除实例的rotate_binlog配置
+	installPath := path.Join(cst.MYSQL_TOOL_INSTALL_PATH, "rotate_binlog")
+	binPath := path.Join(installPath, string(tools.ToolRotatebinlog))
+	configFile := path.Join(installPath, "config.yaml")
+
+	clearPortString := strings.Replace(strings.Trim(fmt.Sprint(c.Params.ClearPorts), "[]"), " ", ",", -1)
+	cmd := fmt.Sprintf(
+		`%s -c %s --removeConfig %s`, binPath, configFile, clearPortString,
+	)
+
+	_, err = osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Error("remove rotate binlog config failed: %s", err.Error())
+		return err
+	}
+	logger.Info("remove rotate binlog config success [%s]", clearPortString)
+	return nil
+}
+
+func (c *ClearInstanceConfigComp) clearMySQLMonitor() (err error) {
+	mysqlMonitor, err := c.tools.Get(tools.ToolMySQLMonitor)
+	if err != nil {
+		logger.Error("get %s failed: %s", tools.ToolMySQLMonitor, err.Error())
+		return err
+	}
+
+	for _, port := range c.Params.ClearPorts {
+		unInstallMySQLMonitorCmd := exec.Command(
+			mysqlMonitor,
+			"clean",
+			"--config", path.Join(
+				cst.MySQLMonitorInstallPath,
+				fmt.Sprintf("monitor-config_%d.yaml", port),
+			),
+		)
+		var stdout, stderr bytes.Buffer
+		unInstallMySQLMonitorCmd.Stdout = &stdout
+		unInstallMySQLMonitorCmd.Stderr = &stderr
+
+		err = unInstallMySQLMonitorCmd.Run()
+		if err != nil {
+			logger.Error(
+				"run %s failed: %s, %s",
+				unInstallMySQLMonitorCmd, err.Error(), stderr.String(),
+			)
+			return err
+		}
+		logger.Info("run %s success: %s", unInstallMySQLMonitorCmd, stdout.String())
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/common.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/common.go
new file mode 100644
index 0000000000..21d0c2a16a
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/common.go
@@ -0,0 +1,2 @@
+// Package common TODO
+package common
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/helper_example.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/helper_example.go
new file mode 100644
index 0000000000..f25e72782b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/helper_example.go
@@ -0,0 +1,51 @@
+package common
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+)
+
+// InstanceExample TODO
+var InstanceExample = native.Instance{
+	Host: "2.2.2.2",
+	Port: 3306,
+}
+
+// InstanceObjExample TODO
+var InstanceObjExample = native.InsObject{
+	Host:   "1.1.1.1",
+	Port:   3306,
+	Socket: "/data1/mysqldata/3306/mysql.sock",
+	User:   "test",
+	Pwd:    "test",
+}
+
+// AccountRepl TODO
+var AccountRepl = components.MySQLReplAccount{ReplUser: "repl", ReplPwd: "xxx"}
+
+// AccountAdmin TODO
+var AccountAdmin = components.MySQLAdminAccount{AdminUser: "ADMIN", AdminPwd: "xxx"}
+
+// AccountReplExample TODO
+var AccountReplExample = components.MySQLAccountParam{
+	MySQLReplAccount: AccountRepl,
+}
+
+// AccountAdminExample TODO
+var AccountAdminExample = components.MySQLAccountParam{
+	MySQLAdminAccount: AccountAdmin,
+}
+
+// MySQLAdminReplExample TODO
+var MySQLAdminReplExample = components.MySQLAccountParam{
+	MySQLAdminAccount: AccountAdmin,
+	MySQLReplAccount:  AccountRepl,
+}
+
+// AccountMonitorExample TODO
+var AccountMonitorExample = components.MySQLAccountParam{
+	MySQLMonitorAccount: components.MySQLMonitorAccount{
+		MonitorUser: "monitor",
+		MonitorPwd:  "monitor",
+	},
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/types.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/types.go
new file mode 100644
index 0000000000..df376c5ad8
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common/types.go
@@ -0,0 +1,34 @@
+package common
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+)
+
+// LooseBackupTypeList 备份类型允许列表
+func LooseBackupTypeList() []string {
+	var ss []string
+	for _, t := range cst.LooseBackupTypes {
+		ss = append(ss, t...)
+	}
+	return ss
+}
+
+// LooseBackupTypeMap 将不规范的备份类型,映射成规范值
+// 不区分大小写
+func LooseBackupTypeMap(backupType string) string {
+	for t, v := range cst.LooseBackupTypes {
+		if util.StringsHasICase(v, backupType) {
+			return t
+		}
+	}
+	return backupType
+}
+
+// MapNameVarToConf godoc
+func MapNameVarToConf(varName string) string {
+	if val, ok := util.MycnfItemsMap[varName]; ok {
+		return val
+	}
+	return varName
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/base.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/base.go
new file mode 100644
index 0000000000..5df1821126
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/base.go
@@ -0,0 +1,517 @@
+package cutover
+
+import (
+	"database/sql"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"encoding/json"
+	"fmt"
+	"strings"
+	"time"
+)
+
+const (
+	// SwitchRelayCheckMaxSec TODO
+	SwitchRelayCheckMaxSec = 10
+	// AllowedChecksumMaxOffset TODO
+	AllowedChecksumMaxOffset = 2
+	// AllowedTimestampMaxOffset TODO
+	AllowedTimestampMaxOffset = 5
+	// AllowedSlaveDelayMax TODO
+	AllowedSlaveDelayMax = 10
+	// AllowedTimeDelayMax TODO
+	AllowedTimeDelayMax = 3600
+	// ExecSlowKbytes TODO
+	ExecSlowKbytes = 0
+)
+
+// Ins tance  TODO
+type Ins struct {
+	native.Instance
+	dbConn *native.DbWorker `json:"-"`
+}
+
+// Proxies TODO
+type Proxies struct {
+	native.Instance
+	proxyConn *native.ProxyAdminDbWork
+}
+
+// SwitchTmpAccount 切换时,用于远程操作的临时超级账户
+// 只允许 Slave Host 连接
+type SwitchTmpAccount struct {
+	User string `json:"user"`
+	Pwd  string `json:"pwd"`
+}
+
+// MasterInfo TODO
+type MasterInfo struct {
+	native.Instance
+	SwitchTmpAccount `json:"switch_account"`
+	dbConn           *native.DbWorker `json:"-"`
+	// 只读连接
+	readOnlyConn *native.DbWorker `json:"-"`
+}
+
+// AltSlaveInfo 备选待切换
+// Alt  = AlterNative:备选
+type AltSlaveInfo struct {
+	native.Instance
+	dbConn *native.DbWorker `json:"-"`
+	Slave  *NewSlaveInfo    `json:"slave"`
+}
+
+// NewSlaveInfo TODO
+type NewSlaveInfo struct {
+	native.Instance
+	dbConn           *native.DbWorker `json:"-"`
+	SwitchTmpAccount `json:"switch_account"`
+}
+
+// InitConn TODO
+func (s *NewSlaveInfo) InitConn() (err error) {
+	s.dbConn, err = native.InsObject{
+		Host: s.Host,
+		Port: s.Port,
+		User: s.User,
+		Pwd:  s.Pwd,
+	}.Conn()
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// MySQLClusterDetail 群基本信息
+type MySQLClusterDetail struct {
+	// Proxy Instances
+	ProxyInstances []Proxies `json:"proxy_instances"  validate:"required,gt=0,dive"`
+	// 主实例
+	MasterIns MasterInfo `json:"master_instance"  validate:"required"`
+	// 待切换的从实例
+	AltSlaveIns AltSlaveInfo `json:"alt_slave_instance"  validate:"required"`
+	// 除待切换其他从实例
+	SlaveInstances []native.Instance `json:"slave_instance"`
+}
+
+// InitProxyConn 初始化Proxy连接
+func (m *MySQLClusterDetail) InitProxyConn(user, pwd string) (err error) {
+	for i := 0; i < len(m.ProxyInstances); i++ {
+		p := m.ProxyInstances[i]
+		m.ProxyInstances[i].proxyConn, err = native.InsObject{
+			Host: p.Host,
+			Port: p.Port,
+			User: user,
+			Pwd:  pwd,
+		}.ConnProxyAdmin()
+		if err != nil {
+			logger.Error("connect proxy %s admin port %d failed: %s", p.Host, p.Port, err.Error())
+			return err
+		}
+	}
+	return err
+}
+
+// InitMasterConn 初始化 Master Ins 连接
+func (m *MySQLClusterDetail) InitMasterConn(user, pwd string) (err error) {
+	m.MasterIns.dbConn, err = native.InsObject{
+		Host: m.MasterIns.Host,
+		Port: m.MasterIns.Port,
+		User: user,
+		Pwd:  pwd,
+	}.Conn()
+	return err
+}
+
+// InitMasterOnlyReadConn 初始化 Master Ins ReadOnly 连接
+func (m *MySQLClusterDetail) InitMasterOnlyReadConn(user, pwd string) (err error) {
+	m.MasterIns.readOnlyConn, err = native.InsObject{
+		Host: m.MasterIns.Host,
+		Port: m.MasterIns.Port,
+		User: user,
+		Pwd:  pwd,
+	}.Conn()
+	return err
+}
+
+// InitAltSlaveConn 初始化Alt Slave 连接
+func (m *MySQLClusterDetail) InitAltSlaveConn(user, pwd string) (err error) {
+	m.AltSlaveIns.dbConn, err = native.InsObject{
+		Host: m.AltSlaveIns.Host,
+		Port: m.AltSlaveIns.Port,
+		User: user,
+		Pwd:  pwd,
+	}.Conn()
+	return err
+}
+
+// InitAltSlaveSlaveConn 初始化Alt Slave 的 Slave 连接
+func (m *MySQLClusterDetail) InitAltSlaveSlaveConn(user, pwd string) (err error) {
+	m.AltSlaveIns.Slave.dbConn, err = native.InsObject{
+		Host: m.AltSlaveIns.Slave.Host,
+		Port: m.AltSlaveIns.Slave.Port,
+		User: user,
+		Pwd:  pwd,
+	}.Conn()
+	return err
+}
+
+// CheckBackends TODO
+func (m *MySQLClusterDetail) CheckBackends(host string, port int) (err error) {
+	for _, p := range m.ProxyInstances {
+		if err = p.proxyConn.CheckBackend(host, port); err != nil {
+			return err
+		}
+	}
+	return
+}
+
+// CheckAltSlaveMasterAddr TODO
+// CheckAlterMasterOk
+// 检查待切换的从实例,复制的源是否等于Master
+func (m *MySQLClusterDetail) CheckAltSlaveMasterAddr() (err error) {
+	ss, err := m.AltSlaveIns.dbConn.ShowSlaveStatus()
+	if err != nil {
+		return err
+	}
+	if strings.Compare(fmt.Sprintf("%s:%d", ss.MasterHost, ss.MasterPort), m.MasterIns.Addr()) != 0 {
+		msg := fmt.Sprintf(
+			"主从复制关系不正确,%s repl from %s,而不是%s",
+			m.AltSlaveIns.Addr(),
+			ss.MasterHost,
+			m.MasterIns.Addr(),
+		)
+		logger.Error(msg)
+		return fmt.Errorf(msg)
+	}
+	return err
+}
+
+// UpdateProxiesBackend Set Backend
+func (m *MySQLClusterDetail) UpdateProxiesBackend(host string, port int) (err error) {
+	for _, p := range m.ProxyInstances {
+		if err = p.proxyConn.RefreshBackends(host, port); err != nil {
+			logger.Error("refreshBackend failed %s", err.Error())
+			return err
+		}
+		if err = p.proxyConn.CheckBackend(host, port); err != nil {
+			return err
+		}
+	}
+	return
+}
+
+// SetProxiesDefaultBackend TODO
+// set proxy backend to 1.1.1.1:3306
+func (m *MySQLClusterDetail) SetProxiesDefaultBackend() (err error) {
+	// proxy switch 1.1.1.1:3306
+	logger.Info("proxy backend switch to 1.1.1.1:3306")
+	err = util.Retry(
+		util.RetryConfig{Times: 5, DelayTime: 3 * time.Second},
+		func() error { return m.UpdateProxiesBackend("1.1.1.1", 3306) },
+	)
+	if err != nil {
+		logger.Error(
+			"update proxies[%#v] backend to %s get an error:%s",
+			m.ProxyInstances, "1.1.1.1:3306", err.Error(),
+		)
+		return err
+	}
+	return
+}
+
+// LockTablesPreCheck TODO
+// TryLockTables
+func (c *MasterInfo) LockTablesPreCheck(backupUser string) (err error) {
+	// 尝试去kill backup processlist
+	// backup processlist 占用的时间比较长会影响lock table
+	if err = c.KillBackupUserProcesslist(backupUser); err != nil {
+		return err
+	}
+	// 查看是否有长的非活跃连接
+	if err = c.FindLongQuery(); err != nil {
+		return err
+	}
+	return
+}
+
+// FlushTablesWithReadLock 执行flush table with read  lock
+func (c *MasterInfo) FlushTablesWithReadLock() (err error) {
+	if _, err := c.dbConn.Exec("set lock_wait_timeout = 10;"); err != nil {
+		return err
+	}
+	err = util.Retry(
+		util.RetryConfig{Times: 10, DelayTime: 200 * time.Millisecond}, func() error {
+			_, err = c.dbConn.Exec("FLUSH TABLES;")
+			return err
+		},
+	)
+	if err != nil {
+		logger.Error("重试3次,每次间隔5秒,依然失败:%s", err.Error())
+		return err
+	}
+	if _, err := c.dbConn.Exec("FLUSH TABLES WITH READ LOCK;"); err != nil {
+		return err
+	}
+	return
+}
+
+// UnlockTables TODO
+// FlushTablesWithReadLock 执行flush table with read  lock
+func (c *MasterInfo) UnlockTables() (err error) {
+	if _, err := c.dbConn.Exec("UNLOCK TABLES"); err != nil {
+		logger.Error("unlock table failed:%s", err.Error())
+		return err
+	}
+	return
+}
+
+// DropSwitchUser TODO
+// FlushTablesWithReadLock 执行flush table with read  lock
+func (c *MasterInfo) DropSwitchUser(userHost string) (err error) {
+	if _, err := c.dbConn.Exec(fmt.Sprintf("drop user %s;", userHost)); err != nil {
+		logger.Error("drop %s failed:%s", userHost, err.Error())
+		return err
+	}
+	return
+}
+
+// KillBackupUserProcesslist kill 备份processlist
+func (c *MasterInfo) KillBackupUserProcesslist(backupUser string) (err error) {
+	processLists, err := c.dbConn.SelectProcesslist([]string{backupUser})
+	if err != nil {
+		return err
+	}
+	if len(processLists) <= 0 {
+		logger.Info("没有发现关于备份用户[%s]相关的processlist~", backupUser)
+		return nil
+	}
+	var killSQLs []string
+	for _, processlist := range processLists {
+		killSQLs = append(killSQLs, fmt.Sprintf("Kill %d;", processlist.ID))
+	}
+	logger.Info("will kill processlist %v", killSQLs)
+	_, err = c.dbConn.ExecMore(killSQLs)
+	return
+}
+
+// FindLongQuery 查询是否存在长的查询、processlist
+func (c *MasterInfo) FindLongQuery() (err error) {
+	activeProcessLists, err := c.dbConn.SelectLongRunningProcesslist(10)
+	if err != nil {
+		return err
+	}
+	if len(activeProcessLists) <= 0 {
+		return nil
+	}
+	errMsg := []string{"active processlist exist:\n"}
+	for _, p := range activeProcessLists {
+		errMsg = append(
+			errMsg, fmt.Sprintf(
+				"[user:%s,time:%s,host:%s,db:%s,info:%s]",
+				p.User, p.Time, p.Host, realVal(p.DB), realVal(p.Info),
+			),
+		)
+	}
+	return
+}
+
+// realVal TODO
+func realVal(v sql.NullString) string {
+	if !v.Valid {
+		return ""
+	}
+	return v.String
+}
+
+// GetChangeMasterSQL 获取change master SQL
+// proxy 切掉流量后,在备选从库上获取位点信息,供Old Master Change 使用
+// func (s *AltSlaveInfo) GetChangeMasterSQL(repluser, replpwd string) (changeSQL string, err error) {
+// 	pos, err := s.dbConn.ShowMasterStatus()
+// 	if err != nil {
+// 		logger.Error("执行show master status 失败!%s", err.Error())
+// 		return "", err
+// 	}
+// 	logger.Info("current pos is binlog_file:%s,binlog_pos:%d", pos.File, pos.Position)
+// 	changeSQL = fmt.Sprintf(`CHANGE MASTER TO
+// 						MASTER_HOST='%s',
+// 						MASTER_USER='%s',
+// 						MASTER_PASSWORD='%s',
+// 						MASTER_PORT=%d,
+// 						MASTER_LOG_FILE='%s',
+// 						MASTER_LOG_POS=%d;`, s.Host, repluser, replpwd, s.Port, pos.File, pos.Position)
+// 	return
+// }
+
+// RecordBinPos 记录切换时候的bin postion
+func (s *AltSlaveInfo) RecordBinPos() (binPosJsonStr string, err error) {
+	pos, _ := s.dbConn.ShowMasterStatus()
+	logger.Info("show master status on %s,detail: File:%s,Pos:%s", s.Addr(), pos.File, pos.Position)
+	b, err := json.Marshal(pos)
+	if err != nil {
+		return "", err
+	}
+	changeSQL := fmt.Sprintf(
+		`CHANGE MASTER TO
+						MASTER_HOST='%s',
+						MASTER_USER='%s',
+						MASTER_PASSWORD='%s',
+						MASTER_PORT=%d,
+						MASTER_LOG_FILE='%s',
+						MASTER_LOG_POS=%d;`, s.Host, "{user}", "{pwd}", s.Port, pos.File, pos.Position,
+	)
+	logger.Info("change master sql: %s", changeSQL)
+	return string(b), nil
+}
+
+// MSVersionCheck 主从版本对比
+func (m *MySQLClusterDetail) MSVersionCheck() (err error) {
+	masterVer, err := m.MasterIns.readOnlyConn.SelectVersion()
+	if err != nil {
+		return err
+	}
+	slaveVer, err := m.AltSlaveIns.dbConn.SelectVersion()
+	if err != nil {
+		return err
+	}
+	return mysqlutil.VersionCompare(masterVer, slaveVer)
+}
+
+// MSVarsCheck 主从配置对比
+func (m *MySQLClusterDetail) MSVarsCheck(checkVars []string) (err error) {
+	return m.AltSlaveIns.dbConn.MySQLVarsCompare(m.MasterIns.readOnlyConn, checkVars)
+}
+
+// MSCheck 切换前同步检查
+type MSCheck struct {
+	SlavedbConn          *native.DbWorker
+	NeedCheckSumRd       bool // 需要存在校验记录
+	AllowDiffCount       int  // 允许存在差异的校验记录的行数
+	AllowDelaySec        int  // 允许存在的延迟差异
+	AllowDelayBinlogByte int  // 允许binlog的最大延迟
+}
+
+// NewMsCheck TODO
+func NewMsCheck(dbConn *native.DbWorker) *MSCheck {
+	return &MSCheck{
+		SlavedbConn:          dbConn,
+		NeedCheckSumRd:       true,
+		AllowDiffCount:       AllowedChecksumMaxOffset,
+		AllowDelaySec:        AllowedSlaveDelayMax,
+		AllowDelayBinlogByte: ExecSlowKbytes,
+	}
+}
+
+// Check TODO
+// ValidateCheckSum 校验checksum 表
+func (s *MSCheck) Check() (err error) {
+	// 检查主从同步delay binlog size
+	total, err := s.SlavedbConn.TotalDelayBinlogSize()
+	if err != nil {
+		logger.Error("get total delay binlog size failed %s", err.Error())
+		return err
+	}
+	if total > s.AllowDelayBinlogByte {
+		return fmt.Errorf("the total delay binlog size %d 超过了最大允许值 %d", total, s.AllowDelayBinlogByte)
+	}
+
+	// 以为内部版本需要校验的参数
+	if s.SlavedbConn.IsEmptyInstance() {
+		logger.Info("主从关系正常,从库是空实例,跳过检查checksum表")
+		return nil
+	}
+	var cnt int
+	c := fmt.Sprintf(
+		"select count(distinct db, tbl) as cnt from %s.checksum where ts > date_sub(now(), interval 14 day)",
+		native.INFODBA_SCHEMA,
+	)
+	if err = s.SlavedbConn.Queryxs(&cnt, c); err != nil {
+		logger.Error("查询最近14天checkTable总数失败%s", err.Error())
+		return err
+	}
+
+	if !s.NeedCheckSumRd {
+		logger.Info("不需要检查校验记录. 获取到的CheckSum Record 总数为%d", cnt)
+	}
+
+	// 如果查询不到 校验记录需要 return error
+	if cnt == 0 && s.NeedCheckSumRd {
+		logger.Warn("没有查询到最近14天的校验记录")
+		return fmt.Errorf("主从校验记录为空")
+	}
+
+	c = fmt.Sprintf(
+		`select count(distinct db, tbl,chunk) as cnt from %s.checksum
+			where (this_crc <> master_crc or this_cnt <> master_cnt)
+		  	and ts > date_sub(now(), interval 14 day);`, native.INFODBA_SCHEMA,
+	)
+	if err = s.SlavedbConn.Queryxs(&cnt, c); err != nil {
+		logger.Error("查询数据校验差异表失败: %s", err.Error())
+		return err
+	}
+	c = fmt.Sprintf(
+		`select check_result as slave_delay from %s.master_slave_check 
+			WHERE check_item='slave_delay_sec';`, native.INFODBA_SCHEMA,
+	)
+	if cnt > s.AllowDiffCount {
+		return fmt.Errorf("checksum 不同值的 chunk 个数是 %d, 超过了上限 %d", cnt, s.AllowDiffCount)
+	}
+	var delaysec int
+	if err = s.SlavedbConn.Queryxs(&delaysec, c); err != nil {
+		logger.Error("查询slave delay sec: %s", err.Error())
+		return err
+	}
+	if delaysec > s.AllowDelaySec {
+		return fmt.Errorf("slave 延迟时间 %d, 超过了上限 %d", delaysec, s.AllowDelaySec)
+	}
+	return nil
+}
+
+// CheckCheckSum TODO
+// CheckMSReplStatus
+// 只在待切换的从库检查CheckSum 和主从同步状态
+func (s AltSlaveInfo) CheckCheckSum() (err error) {
+	return NewMsCheck(s.dbConn).Check()
+}
+
+// CheckCheckSum TODO
+func (s NewSlaveInfo) CheckCheckSum() (err error) {
+	return NewMsCheck(s.dbConn).Check()
+}
+
+// CompareMSBinPos 比较主从的同步的位点信息对比
+func CompareMSBinPos(master MasterInfo, slave AltSlaveInfo) (err error) {
+	masterStatus, err := master.readOnlyConn.ShowMasterStatus()
+	if err != nil {
+		logger.Error("show master status on %s failed:%s", master.Addr(), err.Error())
+		return err
+	}
+
+	slaveStatus, err := slave.dbConn.ShowSlaveStatus()
+	if err != nil {
+		logger.Error("show slave status on %s failed:%s", slave.Addr(), err.Error())
+		return err
+	}
+	// 比较从库回放到了对应主库的哪个BinLog File
+	msg := fmt.Sprintf(
+		"Master Current BinlogFile:%s Slave SQL Thread Exec BinlogFile:%s",
+		masterStatus.File, slaveStatus.RelayMasterLogFile,
+	)
+	logger.Info(msg)
+	if strings.Compare(masterStatus.File, slaveStatus.RelayMasterLogFile) != 0 {
+		return fmt.Errorf("主从同步可能有差异," + msg)
+	}
+	// 比较主库的位点和从库已经回放的位点信息
+	// 比较从库回放到了对应主库的哪个BinLog File
+	msg = fmt.Sprintf(
+		"Master Current Pos:%d Slave SQL Thread Exec Pos:%d",
+		masterStatus.Position, slaveStatus.ExecMasterLogPos,
+	)
+	logger.Info(msg)
+	if masterStatus.Position != slaveStatus.ExecMasterLogPos {
+		return fmt.Errorf("主从执行的位点信息有差异%s", msg)
+	}
+	return err
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/cutover.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/cutover.go
new file mode 100644
index 0000000000..7e6e240290
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/cutover/cutover.go
@@ -0,0 +1,350 @@
+// Package cutover 主故障切换
+// 下发到Slave节点的机器 执行
+package cutover
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+	"time"
+)
+
+// CutOverParam TODO
+type CutOverParam struct {
+	Host    string              `json:"host"  validate:"required,ip"`
+	Cluster *MySQLClusterDetail `json:"cluster"`
+	IsSafe  bool                `json:"is_safe"`
+	// Master 是否已经dead
+	IsDeadMaster bool `json:"is_dead_master"`
+	// 切换完成,是都需要为源Master,获取其他Slave增加复制账户
+	GrantRepl bool `json:"grant_repl"`
+	// 是否需要锁表切换
+	LockedSwitch bool `json:"locked_switch"`
+}
+
+// CutOverToSlaveComp TODO
+type CutOverToSlaveComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       *CutOverParam            `json:"extend"`
+	runCtx       `json:"-"`
+}
+type runCtx struct {
+	checkVars      []string
+	proxyAdminUser string
+	proxyAdminPwd  string
+	adminUser      string
+	adminPwd       string
+	replUser       string
+	replPwd        string
+	backupUser     string
+	cluster        *MySQLClusterDetail
+	// 是否是成对切换
+	isCutOverPair bool
+}
+
+// Init 初始化
+func (m *CutOverToSlaveComp) Init() (err error) {
+	m.cluster = m.Params.Cluster
+	m.proxyAdminUser = m.GeneralParam.RuntimeAccountParam.ProxyAdminUser
+	m.proxyAdminPwd = m.GeneralParam.RuntimeAccountParam.ProxyAdminPwd
+	m.adminUser = m.GeneralParam.RuntimeAccountParam.AdminUser
+	m.adminPwd = m.GeneralParam.RuntimeAccountParam.AdminPwd
+	m.replUser = m.GeneralParam.RuntimeAccountParam.ReplUser
+	m.replPwd = m.GeneralParam.RuntimeAccountParam.ReplPwd
+	m.backupUser = m.GeneralParam.RuntimeAccountParam.DbBackupUser
+	m.checkVars = []string{"character_set_server", "lower_case_table_names"}
+
+	if err = m.cluster.InitProxyConn(m.proxyAdminUser, m.proxyAdminPwd); err != nil {
+		logger.Error("connect alt proxies failed,err:%s ", err.Error())
+		return err
+	}
+
+	if err = m.cluster.InitAltSlaveConn(m.adminUser, m.adminPwd); err != nil {
+		logger.Error("connect alt slave  %s failed,err:%s ", m.cluster.AltSlaveIns.Addr(), err.Error())
+		return err
+	}
+	// 如果需要锁表切换的情况下,需要通switch tmp account 去初始化源Master
+	// 连接,账户需要有锁表的权限
+	if m.Params.LockedSwitch {
+		switch_user := m.cluster.MasterIns.SwitchTmpAccount.User
+		switch_pwd := m.cluster.MasterIns.SwitchTmpAccount.Pwd
+		if err = m.cluster.InitMasterConn(switch_user, switch_pwd); err != nil {
+			logger.Error(
+				"connect %s from %s use %s account failed %s",
+				m.cluster.MasterIns.Addr(), m.Params.Host, switch_user, err.Error(),
+			)
+			return err
+		}
+	}
+	// 成对迁移的情况下,初始化NewSlave的连接
+	if m.cluster.AltSlaveIns.Slave != nil {
+		if err = m.cluster.AltSlaveIns.Slave.InitConn(); err != nil {
+			logger.Error(
+				"connect %s from %s use %s account failed %s",
+				m.cluster.AltSlaveIns.Slave.Addr(), m.Params.Host, m.cluster.AltSlaveIns.Slave.User,
+			)
+			return err
+		}
+		m.isCutOverPair = true
+	}
+	return err
+}
+
+// Example TODO
+func (m *CutOverToSlaveComp) Example() interface{} {
+	comp := CutOverToSlaveComp{
+		Params: &CutOverParam{
+			Host: "1.1.1.2",
+			Cluster: &MySQLClusterDetail{
+				ProxyInstances: []Proxies{
+					{
+						Instance: native.Instance{
+							Host: "1.1.0.1",
+							Port: 10000,
+						},
+					},
+					{
+						Instance: native.Instance{
+							Host: "1.1.0.2",
+							Port: 10000,
+						},
+					},
+				},
+				MasterIns: MasterInfo{
+					Instance: native.Instance{
+						Host: "1.1.1.1",
+						Port: 3306,
+					},
+					SwitchTmpAccount: SwitchTmpAccount{
+						User: "",
+						Pwd:  "",
+					},
+				},
+				AltSlaveIns: AltSlaveInfo{
+					Instance: native.Instance{
+						Host: "1.1.1.2",
+						Port: 3306,
+					},
+					Slave: &NewSlaveInfo{
+						Instance: native.Instance{
+							Host: "1.1.1.5",
+							Port: 3306,
+						},
+						SwitchTmpAccount: SwitchTmpAccount{
+							User: "",
+							Pwd:  "",
+						},
+					},
+				},
+				SlaveInstances: []native.Instance{
+					{
+						Host: "1.1.1.3",
+						Port: 3306,
+					},
+					{
+						Host: "1.1.1.4",
+						Port: 3306,
+					},
+				},
+			},
+			IsSafe:       true,
+			IsDeadMaster: false,
+			LockedSwitch: true,
+		},
+	}
+	return comp
+}
+
+// PreCheck  预备检查
+func (m *CutOverToSlaveComp) PreCheck() (err error) {
+	// 以下是强制检查的内容
+	// 检查下proxy backend 是不是 源Master
+	if err := m.cluster.CheckBackends(m.cluster.MasterIns.Host, m.cluster.MasterIns.Port); err != nil {
+		return err
+	}
+	// 检查alt Slave repl 的地址不是 cluster.MasterIns
+	if err := m.cluster.CheckAltSlaveMasterAddr(); err != nil {
+		return err
+	}
+
+	// 安全模式下,检查下CheckSum,检查业务连接
+	if m.Params.IsSafe {
+		if err = m.cluster.AltSlaveIns.CheckCheckSum(); err != nil {
+			return err
+		}
+		prcsls, err := m.cluster.AltSlaveIns.dbConn.ShowApplicationProcesslist(
+			m.GeneralParam.RuntimeExtend.MySQLSysUsers)
+		if err != nil {
+			logger.Error("show processlist failed %s", err.Error())
+			return err
+		}
+		if len(prcsls) > 0 {
+			return fmt.Errorf("there is a connection for non system users %v", prcsls)
+		}
+		if m.isCutOverPair {
+			if err = m.cluster.AltSlaveIns.Slave.CheckCheckSum(); err != nil {
+				return err
+			}
+		}
+	}
+
+	// 如果源Master已经故障,以下检查跳过
+	// 如果源主库是故障的,则待切换的从库的复制状态也是一定是异常的
+	if m.Params.IsDeadMaster {
+		// 如果主库是故障的,一定进行不了锁表切换
+		m.Params.LockedSwitch = false
+		return nil
+	}
+
+	if err = m.cluster.AltSlaveIns.dbConn.CheckSlaveReplStatus(); err != nil {
+		logger.Error("检查主从同步状态出错: %s", err.Error())
+		return err
+	}
+
+	if m.isCutOverPair {
+		if err = m.cluster.AltSlaveIns.Slave.dbConn.CheckSlaveReplStatus(); err != nil {
+			return err
+		}
+	}
+	// 初始化只读连接,在Slave从Repl 账户去获取Master实例的变量
+	if err = m.cluster.InitMasterOnlyReadConn(m.replUser, m.replPwd); err != nil {
+		logger.Error("connect alt slave  %s failed,err:%s ", m.cluster.MasterIns.Addr(), err.Error())
+		return err
+	}
+	// 主从参数配置对比
+	if err = m.cluster.MSVarsCheck(m.checkVars); err != nil {
+		return err
+	}
+	// 主从版本对比
+	if err = m.cluster.MSVersionCheck(); err != nil {
+		return err
+	}
+	// 如果需要锁表切换,则进行锁表切换前置检查
+	if m.Params.LockedSwitch {
+		return m.cluster.MasterIns.LockTablesPreCheck(m.backupUser)
+	}
+	return err
+}
+
+// CutOver 切换
+func (m *CutOverToSlaveComp) CutOver() (binPos string, err error) {
+	defer func() {
+		if m.Params.LockedSwitch {
+			m.cluster.MasterIns.UnlockTables()
+		}
+		if err != nil {
+			e := m.cluster.UpdateProxiesBackend(m.cluster.MasterIns.Host, m.cluster.MasterIns.Port)
+			if e != nil {
+				logger.Warn("rollback proxy backends failed  %s", err.Error())
+				return
+			}
+			logger.Info("rollback proxy backend to %ssuccessfully", m.cluster.MasterIns.Addr())
+		}
+	}()
+	// proxy switch 1.1.1.1:3306
+	logger.Info("proxy backend switch to 1.1.1.1:3306")
+	if err = m.cluster.SetProxiesDefaultBackend(); err != nil {
+		logger.Error(
+			"update proxies[%#v] backend to %s failed:%s",
+			m.cluster.ProxyInstances,
+			"1.1.1.1:3306",
+			err.Error(),
+		)
+		return "{}", err
+	}
+	//  尝试在源主库加锁
+	if m.Params.LockedSwitch {
+		if err = m.cluster.MasterIns.FlushTablesWithReadLock(); err != nil {
+			logger.Error("locked %s tables failed:%s", m.cluster.MasterIns.Addr(), err.Error())
+			return "", err
+		}
+	}
+
+	if !m.Params.IsDeadMaster {
+		if err = m.cluster.AltSlaveIns.dbConn.CheckSlaveReplStatus(); err != nil {
+			logger.Error("再次检查下主从状态 %s", err.Error())
+			return "", err
+		}
+		fn := func() error {
+			return CompareMSBinPos(m.cluster.MasterIns, m.cluster.AltSlaveIns)
+		}
+		err = util.Retry(util.RetryConfig{Times: 10, DelayTime: 100 * time.Millisecond}, fn)
+		if err != nil {
+			logger.Error("主从binlog位点有差异 %s", err.Error())
+			return "", err
+		}
+	}
+
+	// record cutover bin pos
+	if binPos, err = m.cluster.AltSlaveIns.RecordBinPos(); err != nil {
+		logger.Error("获取切换时候的位点信息失败: %s", err.Error())
+		return
+	}
+	// proxy switch 待切换slave
+	logger.Info("proxy backend switch to %s", m.cluster.AltSlaveIns.Addr())
+	err = util.Retry(
+		util.RetryConfig{Times: 20, DelayTime: 500 * time.Millisecond},
+		func() error {
+			return m.cluster.UpdateProxiesBackend(m.cluster.AltSlaveIns.Host, m.cluster.AltSlaveIns.Port)
+		},
+	)
+	if err != nil {
+		logger.Error(
+			"update proxies[%#v] backend to %s get an error:%s",
+			m.cluster.ProxyInstances, m.cluster.AltSlaveIns.Addr(), err.Error(),
+		)
+		return "{}", err
+	}
+	return binPos, err
+}
+
+// StopAndResetSlave TODO
+// 切换成功之后
+// Stop Slave && Reset Slave
+func (m *CutOverToSlaveComp) StopAndResetSlave() (err error) {
+	// stop slave
+	if err = m.cluster.AltSlaveIns.dbConn.StopSlave(); err != nil {
+		logger.Error("stop slave failed %s", err.Error())
+		return err
+	}
+	// reset slave
+	if err = m.cluster.AltSlaveIns.dbConn.ResetSlave(); err != nil {
+		logger.Error("stop slave failed %s", err.Error())
+		return err
+	}
+	return
+}
+
+// GrantRepl 切换后对其他实例授权,便于后面change 到新的master
+func (m *CutOverToSlaveComp) GrantRepl() (err error) {
+	var hosts []string
+	if !m.Params.IsDeadMaster {
+		hosts = []string{m.cluster.MasterIns.Host}
+	}
+	for _, ins := range m.cluster.SlaveInstances {
+		hosts = append(hosts, ins.Host)
+	}
+	for _, host := range hosts {
+		g := grant.GrantReplComp{
+			GeneralParam: m.GeneralParam,
+			Params: &grant.GrantReplParam{
+				Host:      m.cluster.AltSlaveIns.Host,
+				Port:      m.cluster.AltSlaveIns.Port,
+				ReplHosts: []string{host},
+			},
+		}
+		if err = g.Init(); err != nil {
+			logger.Error("%s:grant repl,init db conn failed:%s", host, err.Error())
+			return
+		}
+		defer g.Db.Db.Close()
+		if err = g.GrantRepl(); err != nil {
+			logger.Error("%s:grant repl failed:%s", host, err.Error())
+			return err
+		}
+	}
+	return err
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_index.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_index.go
new file mode 100644
index 0000000000..7c836bdd49
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_index.go
@@ -0,0 +1,203 @@
+package dbbackup
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+	"regexp"
+	"sort"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// BackupIndexFile godoc
+type BackupIndexFile struct {
+	BackupType    string `json:"backup_type"`
+	StorageEngine string `json:"storage_engine"`
+	MysqlVersion  string `json:"mysql_version"`
+
+	BackupCharset string `json:"backup_charset"`
+	BkBizId       string `json:"bk_biz_id"`
+	// unique uuid
+	BackupId        string `json:"backup_id"`
+	BillId          string `json:"bill_id"`
+	ClusterId       int    `json:"cluster_id"`
+	BackupHost      string `json:"backup_host"`
+	BackupPort      int    `json:"backup_port"`
+	MysqlRole       string `json:"mysql_role"`
+	DataSchemaGrant string `json:"data_schema_grant"`
+	// 备份一致性时间点,物理备份可能为空
+	ConsistentBackupTime string `json:"consistent_backup_time"`
+	BackupBeginTime      string `json:"backup_begin_time"`
+	BackupEndTime        string `json:"backup_end_time"`
+	TotalFilesize        uint64 `json:"total_filesize"`
+
+	FileList   []IndexFileItem  `json:"file_list"`
+	BinlogInfo BinlogStatusInfo `json:"binlog_info"`
+
+	indexFilePath string
+	// backupFiles {data: {file1: obj, file2: obj}, priv: {}}
+	backupFiles map[string][]IndexFileItem
+	// 备份文件解压后的目录名,相对目录
+	backupBasename string
+	// 备份文件的所在根目录,比如 /data/dbbak
+	backupDir  string
+	targetDir  string
+	splitParts []string
+	tarParts   []string
+}
+
+// IndexFileItem godoc
+type IndexFileItem struct {
+	BackupFileName string `json:"backup_file_name"`
+	BackupFileSize int64  `json:"backup_file_size"`
+	TarFileName    string `json:"tar_file_name"`
+	// TarFileSize    int64  `json:"tar_file_size"`
+	DBTable  string `json:"db_table"`
+	FileType string `json:"file_type" enums:"schema,data,metadata,priv"`
+}
+
+// BinlogStatusInfo master status and slave status
+type BinlogStatusInfo struct {
+	ShowMasterStatus *StatusInfo `json:"show_master_status"`
+	ShowSlaveStatus  *StatusInfo `json:"show_slave_status"`
+}
+
+// StatusInfo detailed binlog information
+type StatusInfo struct {
+	BinlogFile string `json:"binlog_file"`
+	BinlogPos  string `json:"binlog_pos"`
+	Gtid       string `json:"gtid"`
+	MasterHost string `json:"master_host"`
+	MasterPort int    `json:"master_port"`
+}
+
+// String 用于打印
+func (s *BinlogStatusInfo) String() string {
+	return fmt.Sprintf("BinlogStatusInfo{MasterStatus:%+v, SlaveStatus:%+v}", s.ShowMasterStatus, s.ShowSlaveStatus)
+}
+
+// ParseBackupIndexFile read index file: fileDir/fileName
+func ParseBackupIndexFile(indexFilePath string, indexObj *BackupIndexFile) error {
+	fileDir, fileName := filepath.Split(indexFilePath)
+	bodyBytes, err := os.ReadFile(indexFilePath)
+	if err != nil {
+		return err
+	}
+	if err := json.Unmarshal(bodyBytes, indexObj); err != nil {
+		logger.Error("fail to read index file to struct: %s", fileName)
+		// return err
+	}
+
+	indexObj.indexFilePath = indexFilePath
+	indexObj.backupBasename = strings.TrimSuffix(fileName, ".index")
+	indexObj.backupDir = fileDir
+	// indexObj.targetDir = filepath.Join(fileDir, indexObj.backupBasename)
+
+	indexObj.backupFiles = make(map[string][]IndexFileItem)
+	for _, fileItem := range indexObj.FileList {
+		indexObj.backupFiles[fileItem.FileType] = append(indexObj.backupFiles[fileItem.FileType], fileItem)
+	}
+	logger.Info("backupBasename=%s, backupType=%s, charset=%s",
+		indexObj.backupBasename, indexObj.BackupType, indexObj.BackupCharset)
+	return indexObj.ValidateFiles()
+}
+
+// GetTarFileList 从 index 中返回文件名列表
+// fileType="" 时返回所有
+func (f *BackupIndexFile) GetTarFileList(fileType string) []string {
+	fileNamelist := []string{}
+	if fileType == "" {
+		for _, fileItem := range f.FileList {
+			fileNamelist = append(fileNamelist, fileItem.TarFileName)
+		}
+		return util.UniqueStrings(fileNamelist)
+	} else {
+		fileList := f.backupFiles[fileType]
+		for _, f := range fileList {
+			fileNamelist = append(fileNamelist, f.TarFileName)
+		}
+		return util.UniqueStrings(fileNamelist)
+	}
+}
+
+// ValidateFiles 校验文件是否连续,文件是否存在,文件大小是否正确
+// splitParts example:  [a.part_1, a.part_2]
+// tarParts example:  [a.0.tar  a.1.tar]
+func (f *BackupIndexFile) ValidateFiles() error {
+	var errFiles []string
+	reSplitPart := regexp.MustCompile(ReSplitPart)
+	reTarPart := regexp.MustCompile(ReTarPart) // 如果只有一个tar,也会存到这里
+	// allFileList := f.GetTarFileList("")
+	tarPartsWithoutSuffix := []string{} // remove .tar suffix from tar to get no. sequence
+	for _, tarFile := range f.FileList {
+		if fSize := cmutil.GetFileSize(filepath.Join(f.backupDir, tarFile.TarFileName)); fSize < 0 {
+			errFiles = append(errFiles, tarFile.TarFileName)
+			continue
+		} // else if fSize != tarFile.TarFileSize {}
+		if reSplitPart.MatchString(tarFile.TarFileName) {
+			f.splitParts = append(f.splitParts, tarFile.TarFileName)
+		} else if reTarPart.MatchString(tarFile.TarFileName) {
+			tarPartsWithoutSuffix = append(tarPartsWithoutSuffix, strings.TrimSuffix(tarFile.TarFileName, ".tar"))
+			f.tarParts = append(f.tarParts, tarFile.TarFileName)
+		}
+	}
+	if len(errFiles) != 0 {
+		return errors.Errorf("files not found in %s: %v", f.backupDir, errFiles)
+	}
+	sort.Strings(f.splitParts)
+	sort.Strings(f.tarParts)
+
+	if len(f.splitParts) >= 2 { // 校验文件是否连续
+		fileSeqList := util.GetSuffixWithLenAndSep(f.splitParts, "_", 0)
+		if err := util.IsConsecutiveStrings(fileSeqList, true); err != nil {
+			return err
+		}
+	}
+	if len(tarPartsWithoutSuffix) >= 2 {
+		fileSeqList := util.GetSuffixWithLenAndSep(tarPartsWithoutSuffix, "_", 0)
+		if err := util.IsConsecutiveStrings(fileSeqList, true); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// UntarFiles merge and untar
+// set targetDir
+func (f *BackupIndexFile) UntarFiles(untarDir string) error {
+	if untarDir == "" {
+		return errors.Errorf("untar target dir should not be emtpy")
+	}
+	f.targetDir = filepath.Join(untarDir, f.backupBasename)
+	if cmutil.FileExists(f.targetDir) {
+		return errors.Errorf("target untar path already exists %s", f.targetDir)
+	}
+	// 物理备份, merge parts
+	if len(f.splitParts) > 0 {
+		// TODO 考虑使用 pv 限速
+		cmd := fmt.Sprintf(`cd %s && cat %s | tar -xf -C %s/ -`, f.backupDir, strings.Join(f.splitParts, " "), untarDir)
+		if _, err := osutil.ExecShellCommand(false, cmd); err != nil {
+			return errors.Wrap(err, cmd)
+		}
+	}
+	if len(f.tarParts) > 0 {
+		for _, p := range f.tarParts {
+			cmd := fmt.Sprintf(`cd %s && tar -xf %s -C %s/`, f.backupDir, p, untarDir)
+			if _, err := osutil.ExecShellCommand(false, cmd); err != nil {
+				return errors.Wrap(err, cmd)
+			}
+		}
+	}
+
+	if !cmutil.FileExists(f.targetDir) {
+		return errors.Errorf("targetDir %s is not ready", f.targetDir)
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_info.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_info.go
new file mode 100644
index 0000000000..0f9630d6d8
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/backup_info.go
@@ -0,0 +1,253 @@
+package dbbackup
+
+import (
+	"bufio"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// BackupFile TODO
+type BackupFile struct {
+	Filename string    `json:"filename"`
+	FileInfo *FileInfo `json:"file_info"`
+}
+
+// FileInfo TODO
+type FileInfo struct {
+	Md5           string `json:"md5"`
+	Size          string `json:"size"`
+	CreateTime    string `json:"createTime"`
+	FileLastMtime string `json:"file_last_mtime"`
+	SourceIp      string `json:"source_ip"`
+	SourcePort    string `json:"source_port"`
+}
+
+// InfoFileDetail save info in the MYSQL_FULL_BACKUP .info file
+type InfoFileDetail struct {
+	App        string            `json:"app"`
+	Charset    string            `json:"charset"`
+	DbList     []string          `json:"dbList"`
+	Cmd        string            `json:"cmd"`
+	BackupType string            `json:"backupType"`
+	BackupRole string            `json:"backupRole"`
+	FileInfo   map[string]string `json:"fileInfo"` // {"somefile.tar":"md5_value", "file": "md5"}
+	FullyStamp string            `json:"fullyStamp"`
+	FullName   string            `json:"fullName"`
+	BackupHost string
+	BackupPort int
+	StartTime  string
+
+	flagTar        bool
+	backupBasename string
+
+	infoFilePath string // InfoFileDetail full path filename
+	fileList     []BackupFile
+	// backupFiles, full: [], info:[], priv:[]
+	backupFiles map[string][]string
+
+	backupDir string // 备份所在目录
+	targetDir string
+}
+
+// ParseBackupInfoFile 读取 .info 文件
+// infoFile 输入完整路径
+func ParseBackupInfoFile(infoFilePath string, infoObj *InfoFileDetail) error {
+	// func (i *InfoFileDetail) Load(infoFile string) error {
+	fileDir, fileName := filepath.Split(infoFilePath)
+	f, err := os.Open(infoFilePath)
+	if err != nil {
+		return errors.Wrap(err, infoFilePath) // os.IsNotExist(err) || os.IsPermission(err)
+	}
+	defer f.Close()
+
+	scanner := bufio.NewScanner(f)
+	reg := regexp.MustCompile(`^(\w+)\s*=\s*(.*)$`)
+	for scanner.Scan() {
+		line := strings.TrimSpace(scanner.Text())
+		array := reg.FindStringSubmatch(line)
+		if len(array) != 3 {
+			continue
+		}
+		k, v := array[1], array[2]
+
+		switch k {
+		case "APP":
+			infoObj.App = v
+		case "CHARSET":
+			infoObj.Charset = v
+		case "DBLIST":
+			dblist := util.SplitAnyRune(v, " ")
+			dblist = util.RemoveEmpty(dblist)
+			infoObj.DbList = dblist
+		case "CMD":
+			infoObj.Cmd = v
+		case "BACKTYPE":
+			infoObj.BackupType = common.LooseBackupTypeMap(v)
+			if util.StringsHasICase(common.LooseBackupTypeList(), v) {
+				infoObj.flagTar = true
+			}
+		case "BACKROLE":
+			infoObj.BackupRole = v
+		case "FILE_INFO":
+			res := make([]map[string]string, 0)
+			if err := json.Unmarshal([]byte(v), &res); err != nil {
+				return fmt.Errorf("unmarshal file info failed, data:%s, err:%s", v, err.Error())
+			}
+			fileMap := make(map[string]string)
+			for _, row := range res {
+				for k, v := range row {
+					fileMap[k] = v
+				}
+			}
+			infoObj.FileInfo = fileMap
+		case "FULLY_STAMP":
+			infoObj.FullyStamp = v
+		case "FULLY_NAME":
+			infoObj.FullName = v
+		default:
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return fmt.Errorf("scan file %s failed, err:%s", infoFilePath, err.Error())
+	}
+	if err := infoObj.parseBackupInstance(); err != nil {
+		return err
+	}
+	infoObj.infoFilePath = infoFilePath
+	infoObj.backupBasename = strings.TrimSuffix(fileName, ".info")
+	infoObj.backupDir = fileDir
+	// infoObj.targetDir = filepath.Join(fileDir, infoObj.backupBasename)
+	if err := infoObj.getFullFileListFromInfo(false); err != nil {
+		return err
+	}
+	return infoObj.ValidateFiles()
+}
+
+// parseBackupInstance 从 cmd 中过去 backupHost, backupPort, startTime
+func (i *InfoFileDetail) parseBackupInstance() error {
+	var reg *regexp.Regexp
+	if i.BackupType == cst.TypeGZTAB {
+		// --gztab=/data1/dbbak/DBHA_host-1_127.0.0.1_20000_20220831_200425
+		reg = regexp.MustCompile(`gztab=.*_(\d+\.\d+\.\d+\.\d+)_(\d+)_(\d+_\d+).*`)
+	} else if i.BackupType == cst.TypeXTRA {
+		// --target-dir=/data1/dbbak/DBHA_host-1_127.0.0.1_20000_20220907_040332_xtra
+		reg = regexp.MustCompile(`target-dir=.*_(\d+\.\d+\.\d+\.\d+)_(\d+)_(\d+_\d+).*`)
+	} else {
+		return fmt.Errorf("uknown backup type %s", i.BackupType)
+	}
+	m := reg.FindStringSubmatch(i.Cmd)
+	if len(m) != 4 {
+		return fmt.Errorf("failed to get host:port from %s", i.Cmd)
+	}
+	i.BackupHost = m[1]
+	i.BackupPort, _ = strconv.Atoi(m[2])
+	timeLayout := `20060102_150405`
+	timeLayoutNew := `2006-01-02 15:04:05`
+	if t, e := time.Parse(timeLayout, m[3]); e != nil {
+		return fmt.Errorf("backup start_time parse failed %s", m[3])
+	} else {
+		i.StartTime = t.Format(timeLayoutNew)
+	}
+	return nil
+}
+
+// ValidateFiles godoc
+// 如果给了 .info ,以 .info 文件为准
+// 执行完后,fullFileList 会有排好序的全备文件名,BackupFiles[MYSQL_FULL_BACKUP] 里有全部文件对应的信息
+// full 会校验它的连续性
+func (i *InfoFileDetail) ValidateFiles() error {
+	var errFiles []string
+
+	// BackupFiles[MYSQL_FULL_BACKUP] 可能来自参数传递,也可能来自 .info 里面读取
+	fullFileList := i.backupFiles[MYSQL_FULL_BACKUP]
+	if len(fullFileList) == 0 {
+		return fmt.Errorf("expect more than one full file but got %v", fullFileList)
+	} else if len(fullFileList) >= 2 { // 校验文件是否连续
+		fileSeqList := util.GetSuffixWithLenAndSep(fullFileList, ".", 0)
+		if err := util.IsConsecutiveStrings(fileSeqList, true); err != nil {
+			return err
+		}
+	}
+
+	// 校验文件是否存在
+	// 简单校验文件大小。文件md5应该是下载完就要保证一致的,所以这里不校验
+	for _, f := range fullFileList {
+		if cmutil.GetFileSize(filepath.Join(i.backupDir, f)) < 0 {
+			errFiles = append(errFiles, f)
+		}
+	}
+	if len(errFiles) > 0 {
+		return errors.Errorf("error files: %v", errFiles)
+	}
+	return nil
+}
+
+// getFullFileListFromInfo 从 .info 里面获取全备文件名
+// 会解析 infoFile
+func (i *InfoFileDetail) getFullFileListFromInfo(checkMD5 bool) error {
+
+	var fullFiles []BackupFile // @todo md5这里校验,不用传到外面,移除
+	var fullFileNames []string
+	for fname, fmd5 := range i.FileInfo {
+		f := BackupFile{Filename: fname, FileInfo: &FileInfo{Md5: fmd5}}
+		fullFiles = append(fullFiles, f)
+		fullFileNames = append(fullFileNames, fname)
+	}
+	if len(fullFiles) == 0 {
+		return errors.New("full files not found")
+	}
+	i.backupFiles[MYSQL_FULL_BACKUP] = fullFileNames
+	// sort.Strings(fullFileList)
+	return nil
+}
+
+// UntarFiles merge and untar
+// set targetDir
+// 解压到哪个目录,比如 untarDir = /data1/dbbak,解压完后 targetDir = /data1/dbbak/xxx_xxx_xxx/
+func (i *InfoFileDetail) UntarFiles(untarDir string) error {
+	// 检查磁盘空间大小
+
+	// cat aa.0 aa.1 | tar -xf - -C workdir/
+	var cmd string
+	if untarDir == "" {
+		return errors.Errorf("untar target dir should not be emtpy")
+	}
+	i.targetDir = filepath.Join(untarDir, i.backupBasename)
+
+	if cmutil.FileExists(i.targetDir) {
+		return errors.Errorf("target untar path already exists %s", i.targetDir)
+	}
+	fullFileList := i.backupFiles[MYSQL_FULL_BACKUP]
+
+	if len(fullFileList) >= 2 {
+		cmd = fmt.Sprintf(
+			`cd %s && cat %s | tar -xf - -C %s/`,
+			i.backupDir, strings.Join(fullFileList, " "), untarDir,
+		)
+	} else {
+		cmd = fmt.Sprintf(
+			`cd %s && tar -xf %s -C %s/`,
+			i.backupDir, strings.Join(fullFileList, " "), untarDir,
+		)
+	}
+	if _, err := osutil.ExecShellCommand(false, cmd); err != nil {
+		return errors.Wrap(err, cmd)
+	}
+	if !cmutil.FileExists(i.targetDir) {
+		return errors.Errorf("targetDir %s is not ready", i.targetDir)
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/cst.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/cst.go
new file mode 100644
index 0000000000..b1f6f233fd
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/cst.go
@@ -0,0 +1,32 @@
+package dbbackup
+
+const (
+	// ReSplitPart physical.part_0
+	ReSplitPart = `(.+)(\.part_\d+)`
+	// ReTarPart xxxx_logical_0.tar physical.tar
+	// ReTarPart = `(.+)_(\d+)\.tar`
+	ReTarPart = `(.+)\.tar$`
+)
+
+const (
+	// MYSQL_FULL_BACKUP TODO
+	MYSQL_FULL_BACKUP string = "full"
+	// INCREMENT_BACKUP TODO
+	INCREMENT_BACKUP string = "incr"
+	// MYSQL_PRIV_FILE TODO
+	MYSQL_PRIV_FILE string = "priv"
+	// MYSQL_INFO_FILE TODO
+	MYSQL_INFO_FILE string = "info"
+	// BACKUP_INDEX_FILE TODO
+	BACKUP_INDEX_FILE string = "index"
+)
+
+const (
+	// DBRoleMaster TODO
+	DBRoleMaster = "Master"
+	// DBRoleSlave TODO
+	DBRoleSlave = "Slave"
+	// DBRoleRelay TODO
+	DBRoleRelay = "Relay" // 中继节点
+
+)
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/dbbackup.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/dbbackup.go
new file mode 100644
index 0000000000..37374d270f
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/dbbackup.go
@@ -0,0 +1,2 @@
+// Package dbbackup TODO
+package dbbackup
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/types.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/types.go
new file mode 100644
index 0000000000..9ad2ec7ea0
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup/types.go
@@ -0,0 +1,104 @@
+package dbbackup
+
+// LogicBackupDataOption TODO
+type LogicBackupDataOption struct {
+	// "grant,schema,data"
+	DataSchemaGrant string `json:"DataSchemaGrant"`
+}
+
+// Cnf the config of dumping backup
+type Cnf struct {
+	Public        CnfShared        `json:"Public" ini:"Public" validate:"required"`
+	BackupClient  CnfBackupClient  `json:"BackupClient" ini:"BackupClient" validate:"required"`
+	LogicalBackup CnfLogicalBackup `json:"LogicalBackup" ini:"LogicalBackup" validate:"required"`
+	// LogicalLoad          CnfLogicalLoad          `json:"LogicalLoad" ini:"LogicalLoad"`
+	PhysicalBackup CnfPhysicalBackup `json:"PhysicalBackup" ini:"PhysicalBackup"`
+}
+
+// CnfShared TODO
+type CnfShared struct {
+	BkBizId         string `ini:"BkBizId"`
+	BkCloudId       string `ini:"BkCloudId"`
+	BillId          string `ini:"BillId"`
+	BackupId        string `ini:"BackupId"`
+	ClusterAddress  string `ini:"ClusterAddress"`
+	ClusterId       string `ini:"ClusterId"`
+	MysqlHost       string `ini:"MysqlHost"`
+	MysqlPort       string `ini:"MysqlPort"`
+	MysqlUser       string `ini:"MysqlUser"`
+	MysqlPasswd     string `ini:"MysqlPasswd"`
+	DataSchemaGrant string `ini:"DataSchemaGrant"`
+	BackupDir       string `ini:"BackupDir" validate:"required"`
+	MysqlRole       string `ini:"MysqlRole"`
+	MysqlCharset    string `ini:"MysqlCharset"`
+	BackupTimeOut   string `ini:"BackupTimeout" validate:"required,time"`
+	BackupType      string `ini:"BackupType"`
+	OldFileLeftDay  string `ini:"OldFileLeftDay"`
+	// TarSizeThreshold tar file will be split to this package size. MB
+	TarSizeThreshold uint64 `ini:"TarSizeThreshold" validate:"required,gte=128"`
+	// IOLimitMBPerSec tar or split default io limit, mb/s. 0 means no limit
+	IOLimitMBPerSec  int    `ini:"IOLimitMBPerSec"`
+	ResultReportPath string `ini:"ResultReportPath"`
+	StatusReportPath string `ini:"StatusReportPath"`
+}
+
+// CnfBackupClient TODO
+type CnfBackupClient struct {
+	FileTag          string `ini:"FileTag"`
+	RemoteFileSystem string `ini:"RemoteFileSystem"`
+	DoChecksum       string `ini:"DoChecksum"`
+}
+
+// CnfLogicalBackup the config of logical backup
+type CnfLogicalBackup struct {
+	// ChunkFilesize Split tables into chunks of this output file size. This value is in MB
+	ChunkFilesize   uint64 `ini:"ChunkFilesize"`
+	Regex           string `ini:"Regex"`
+	Threads         int    `ini:"Threads"`
+	DisableCompress bool   `ini:"DisableCompress"`
+	FlushRetryCount int    `ini:"FlushRetryCount"`
+	DefaultsFile    string `ini:"DefaultsFile"`
+	// ExtraOpt other mydumper options string to be appended
+	ExtraOpt string `ini:"ExtraOpt"`
+}
+
+// CnfPhysicalBackup the config of physical backup
+type CnfPhysicalBackup struct {
+	// Threads –parallel to copy files
+	Threads int `ini:"Threads"`
+	// SplitSpeed tar split limit in MB/s, default 300
+	SplitSpeed int64 `ini:"SplitSpeed"`
+	// Throttle limits the number of chunks copied per second. The chunk size is 10 MB, 0 means no limit
+	Throttle     int    `ini:"Throttle"`
+	DefaultsFile string `ini:"DefaultsFile" validate:"required,file"`
+	// ExtraOpt other xtrabackup options string to be appended
+	ExtraOpt string `ini:"ExtraOpt"`
+}
+
+// CnfLogicalLoad the config of logical loading
+type CnfLogicalLoad struct {
+	MysqlHost     string `ini:"MysqlHost"`
+	MysqlPort     string `ini:"MysqlPort"`
+	MysqlUser     string `ini:"MysqlUser"`
+	MysqlPasswd   string `ini:"MysqlPasswd"`
+	MysqlCharset  string `ini:"MysqlCharset"`
+	MysqlLoadDir  string `ini:"MysqlLoadDir"`
+	Threads       int    `ini:"Threads"`
+	Regex         string `ini:"Regex"`
+	EnableBinlog  bool   `ini:"EnableBinlog"`
+	IndexFilePath string `ini:"IndexFilePath" validate:"required"`
+	// ExtraOpt other myloader options string to be appended
+	ExtraOpt string `json:"ExtraOpt"`
+}
+
+// CnfPhysicalLoad the config of physical loading
+type CnfPhysicalLoad struct {
+	MysqlLoadDir string `ini:"MysqlLoadDir" validate:"required"`
+	Threads      int    `ini:"Threads"`
+	// CopyBack use copy-back or move-back
+	CopyBack      bool   `ini:"CopyBack"`
+	IndexFilePath string `ini:"IndexFilePath" validate:"required,file"`
+	DefaultsFile  string `ini:"DefaultsFile" validate:"required"`
+	// ExtraOpt other xtrabackup recover options string to be appended
+	ExtraOpt string `json:"ExtraOpt"`
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/deploy_mysql_crond.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/deploy_mysql_crond.go
new file mode 100644
index 0000000000..1245e51668
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/deploy_mysql_crond.go
@@ -0,0 +1,348 @@
+package mysql
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"os/exec"
+	"path"
+	"text/template"
+	"time"
+
+	"github.com/pkg/errors"
+	"gopkg.in/yaml.v2"
+)
+
+// DeployMySQLCrondComp 部署参数
+type DeployMySQLCrondComp struct {
+	GeneralParam *components.GeneralParam `json:"general_param"`
+	Params       *DeployMySQLCrondParam   `json:"params"`
+	tools        *tools.ToolSet
+}
+
+// DeployMySQLCrondParam 部署参数
+type DeployMySQLCrondParam struct {
+	components.Medium
+	Ip        string `json:"ip"`
+	BkCloudId int    `json:"bk_cloud_id"`
+	// EventName        string `json:"event_name"`
+	EventDataId    int    `json:"event_data_id"`
+	EventDataToken string `json:"event_data_token"`
+	// MetricsName      string `json:"metrics_name"`
+	MetricsDataId    int    `json:"metrics_data_id"`
+	MetricsDataToken string `json:"metrics_data_token"`
+	BeatPath         string `json:"beat_path"`
+	AgentAddress     string `json:"agent_address"`
+	BkBizId          int    `json:"bk_biz_id"`
+}
+
+// Init 初始化二进制位置
+func (c *DeployMySQLCrondComp) Init() (err error) {
+	c.tools = tools.NewToolSetWithPickNoValidate(tools.ToolMySQLCrond)
+	return nil
+}
+
+// Precheck 校验码检查
+func (c *DeployMySQLCrondComp) Precheck() (err error) {
+	if err = c.Params.Check(); err != nil {
+		logger.Error("check mysql-crond pkg failed: %s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// DeployBinary 部署二进制
+func (c *DeployMySQLCrondComp) DeployBinary() (err error) {
+	err = os.MkdirAll(cst.MySQLCrondInstallPath, 0755)
+	if err != nil {
+		logger.Error("mkdir %s failed: %s", cst.MySQLCrondInstallPath, err.Error())
+		return err
+	}
+
+	decompressCmd := fmt.Sprintf(
+		`tar zxf %s -C %s`,
+		c.Params.Medium.GetAbsolutePath(), cst.MySQLCrondInstallPath,
+	)
+	_, err = osutil.ExecShellCommand(false, decompressCmd)
+	if err != nil {
+		logger.Error("decompress mysql-crond pkg failed: %s", err.Error())
+		return err
+	}
+
+	chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.MySQLCrondInstallPath)
+	_, err = osutil.ExecShellCommand(false, chownCmd)
+	if err != nil {
+		logger.Error("chown %s to mysql failed: %s", cst.MySQLCrondInstallPath, err.Error())
+		return err
+	}
+	return nil
+}
+
+// GeneralRuntimeConfig 生成 runtime 配置
+func (c *DeployMySQLCrondComp) GeneralRuntimeConfig() (err error) {
+	t, err := template.ParseFiles(path.Join(cst.MySQLCrondInstallPath, "mysql-crond.conf.go.tpl"))
+	if err != nil {
+		logger.Error("read mysql-crond runtime config template failed: %s", err.Error())
+		return err
+	}
+
+	f, err := os.OpenFile(
+		path.Join(cst.MySQLCrondInstallPath, "runtime.yaml"),
+		os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
+		0644,
+	)
+	if err != nil {
+		logger.Error("create mysql-crond runtime.yaml failed: %s", err.Error())
+		return err
+	}
+
+	err = t.Execute(
+		f,
+		struct {
+			IP        string
+			BkCloudId int
+			// EventName        string
+			EventDataId    int
+			EventDataToken string
+			// MetricsName      string
+			MetricsDataId    int
+			MetricsDataToken string
+			LogPath          string
+			PidPath          string
+			InstallPath      string
+			BeatPath         string
+			AgentAddress     string
+		}{
+			IP:        c.Params.Ip,
+			BkCloudId: c.Params.BkCloudId,
+			// EventName:        c.Params.EventName,
+			EventDataId:    c.Params.EventDataId,
+			EventDataToken: c.Params.EventDataToken,
+			// MetricsName:      c.Params.MetricsName,
+			MetricsDataId:    c.Params.MetricsDataId,
+			MetricsDataToken: c.Params.MetricsDataToken,
+			LogPath:          path.Join(cst.MySQLCrondInstallPath, "logs"),
+			PidPath:          cst.MySQLCrondInstallPath,
+			InstallPath:      cst.MySQLCrondInstallPath,
+			BeatPath:         c.Params.BeatPath,
+			AgentAddress:     c.Params.AgentAddress,
+		},
+	)
+	if err != nil {
+		logger.Error("execute template for mysql-crond failed: %s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// TouchJobsConfig 生成一个空的任务配置
+func (c *DeployMySQLCrondComp) TouchJobsConfig() (err error) {
+	jobsConfigFilePath := path.Join(cst.MySQLCrondInstallPath, "jobs-config.yaml")
+	if _, err = os.Stat(jobsConfigFilePath); errors.Is(err, os.ErrNotExist) {
+		jc := struct {
+			Jobs    []int `yaml:"jobs"` // 实际这里不是 int, 但是这不重要, 反正是空的, 占位而已
+			BkBizId int   `yaml:"bk_biz_id"`
+		}{
+			Jobs:    nil,
+			BkBizId: c.Params.BkBizId,
+		}
+		content, err := yaml.Marshal(jc)
+		if err != nil {
+			logger.Error("marshal init jobs config file failed: %s", err.Error())
+			return err
+		}
+
+		f, err := os.OpenFile(
+			jobsConfigFilePath,
+			os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
+			0644,
+		)
+		if err != nil {
+			logger.Error("create jobs config failed: %s", err.Error())
+			return err
+		}
+
+		_, err = f.Write(content)
+		if err != nil {
+			logger.Error("write jobs config failed: %s", err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// Start 启动进程
+func (c *DeployMySQLCrondComp) Start() (err error) {
+	chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.MySQLCrondInstallPath)
+	_, err = osutil.ExecShellCommand(false, chownCmd)
+	if err != nil {
+		logger.Error("chown %s to mysql failed: %s", cst.MySQLCrondInstallPath, err.Error())
+		return err
+	}
+
+	/*
+		前台启动 mysql-crond
+		目的是试试看能不能正常启动, 方便捕捉错误
+	*/
+	errChan := make(chan error)
+	go func() {
+		cmd := exec.Command(
+			"su", "-", "mysql", "-c",
+			fmt.Sprintf(
+				`%s -c %s`,
+				path.Join(cst.MySQLCrondInstallPath, "mysql-crond"),
+				path.Join(cst.MySQLCrondInstallPath, "runtime.yaml"),
+			),
+		)
+		var stderr bytes.Buffer
+		cmd.Stderr = &stderr
+		err := cmd.Run()
+		if err != nil {
+			errChan <- errors.Wrap(err, stderr.String())
+		}
+		errChan <- nil
+	}()
+
+	started := false
+LabelSelectLoop:
+	for i := 1; i <= 10; i++ {
+		select {
+		case err := <-errChan:
+			if err != nil {
+				logger.Error("start mysql-crond failed: %s", err.Error())
+				return err
+			}
+		case <-time.After(1 * time.Second):
+			logger.Info("try to connect mysql-crond %d times", i)
+			_, err := http.Get("http://127.0.0.1:9999/entries")
+			if err != nil {
+				logger.Info("try to connect mysql-crond %d times failed: %s", i, err.Error())
+				break
+			}
+			started = true
+			logger.Info("try to connect mysql-crond %d times success", i)
+			break LabelSelectLoop
+		}
+	}
+
+	if !started {
+		err := errors.Errorf("start mysql-crond failed: try to connect too many times")
+		logger.Error(err.Error())
+		return err
+	}
+
+	// 关闭前台启动的 mysql-crond
+	resp, err := http.Get("http://127.0.0.1:9999/quit")
+	if err != nil {
+		logger.Error("call quit failed: %s", err.Error())
+		return err
+	}
+	defer func() {
+		_ = resp.Body.Close()
+	}()
+
+	if resp.StatusCode != 200 {
+		err := errors.Errorf("quit api err: %s", err.Error())
+		logger.Error(err.Error())
+		return err
+	}
+
+	// quit 要等10s
+	time.Sleep(15 * time.Second)
+
+	// 确认监听端口已经关闭
+	logger.Info("check mysql-crond bind port")
+	closed := false
+	for i := 1; i <= 5; i++ {
+		logger.Info("check mysql-crond port %d times", i)
+		_, err := net.DialTimeout("tcp", "127.0.0.1:9999", 1*time.Second)
+		if err != nil {
+			logger.Info("port closed")
+			closed = true
+			break
+		}
+		logger.Info("port opened, try later")
+		time.Sleep(2 * time.Second)
+	}
+
+	if !closed {
+		err := errors.Errorf("mysql-crond quit failed, confirm port close too many times")
+		logger.Error(err.Error())
+		return err
+	}
+
+	// 正式后台启动 mysql-crond
+	cmd := exec.Command(
+		"su", []string{
+			"-", "mysql", "-c", // mysql 写死
+			fmt.Sprintf(
+				`%s -c %s`,
+				path.Join(cst.MySQLCrondInstallPath, "start.sh"),
+				path.Join(cst.MySQLCrondInstallPath, "runtime.yaml"),
+			),
+		}...,
+	)
+	err = cmd.Run()
+	if err != nil {
+		logger.Error("start mysql-crond failed: %s", err.Error())
+		return err
+	}
+
+	// 再次检查能不能连接
+	started = false
+	for i := 1; i <= 10; i++ {
+		logger.Info("try to connect mysql-crond %d times", i)
+		_, err := http.Get("http://127.0.0.1:9999/entries")
+		if err != nil {
+			logger.Info("try to connect mysql-crond %d times failed: %s", i, err.Error())
+			time.Sleep(2 * time.Second)
+			continue
+		}
+		started = true
+		logger.Info("try to connect mysql-crond %d times success", i)
+		break
+	}
+
+	if !started {
+		err := errors.Errorf("start mysql-crond failed: try to connect too many times")
+		logger.Error(err.Error())
+		return err
+	}
+
+	logger.Info("mysql-crond started")
+
+	return nil
+}
+
+// Example 例子
+func (c *DeployMySQLCrondComp) Example() interface{} {
+	return DeployMySQLCrondComp{
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountMonitorExample,
+			},
+		},
+		Params: &DeployMySQLCrondParam{
+			Medium: components.Medium{
+				Pkg:    "mysql-crond.tar.gz",
+				PkgMd5: "12345",
+			},
+			Ip:        "127.0.0.1",
+			BkCloudId: 0,
+			// EventName:        "mysql_crond_event",
+			EventDataId:    123,
+			EventDataToken: "abc",
+			// MetricsName:      "mysql_crond_beat",
+			MetricsDataId:    456,
+			MetricsDataToken: "xyz",
+		},
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/drop_large_table.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/drop_large_table.go
new file mode 100644
index 0000000000..239e9b0d81
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/drop_large_table.go
@@ -0,0 +1,69 @@
+package mysql
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"fmt"
+)
+
+// DropLargeTableComp TODO
+type DropLargeTableComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       DropTableParam           `json:"extend"`
+}
+
+// DropTableParam godoc
+// 1. show create table
+// 2. rename table, create table
+// 3. 做硬链接
+// 4. drop table
+// 5. 删除硬链和原始文件
+type DropTableParam struct {
+	Database   string   `json:"database" validate:"required"`
+	Tables     []string `json:"tables" validate:"required"`
+	LargeTable bool     `json:"large_table"`
+	// 每秒删除速度 MB/s
+	BWLimitMB int `json:"bwlimit_mb"`
+	// 超过多少 MB 算大文件,大文件采用 trunc 限速删除
+	LargeTableSizeMB int `json:"large_table_size_mb"`
+	// 是否保留表结构,相当于 truncate table
+	KeepSchema bool `json:"keep_schema"`
+
+	// "table1"
+	fileList map[string][]*linkFiles
+}
+
+type linkFiles struct {
+	srcFile  string
+	destFile string
+}
+
+// select @@datadir
+// select SPACE,NAME,FILE_SIZE from INNODB_SYS_TABLESPACES where NAME like 'query_analyzer/%'
+// query_analyzer/query_history#P#p202206 .ibd
+//
+
+func (d *DropTableParam) dropInnodbTable() error {
+	// innodb_file_per_table
+
+	for _, fileList := range d.fileList {
+		for _, file := range fileList {
+			file.destFile = fmt.Sprintf("%s.__drop__", file.srcFile)
+			// osutil.MakeHardLink(file.srcFile, file.destFile)
+			// osutil.TruncateFile(file.srcFile, d.BWLimit)
+		}
+	}
+
+	return nil
+}
+
+func dropTokudbTable() error {
+	return nil
+}
+
+func dropMyisamTable() error {
+	return nil
+}
+
+func dropRocksdbTable() error {
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/excute_sql_file.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/excute_sql_file.go
new file mode 100644
index 0000000000..38d7aa92f2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/excute_sql_file.go
@@ -0,0 +1,283 @@
+// Package mysql TODO
+//
+//		package
+//	 ignore_dbnames: 变更时候需要忽略的dbname,支持正则匹配 [db1,db2,db3%]
+//		dbnames: 变更时候 需要指定的变更的库
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/computil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"path"
+	"regexp"
+	"strings"
+)
+
+// ExcuteSQLFileComp TODO
+type ExcuteSQLFileComp struct {
+	GeneralParam            *components.GeneralParam `json:"general"`
+	Params                  *ExcuteSQLFileParam      `json:"extend"`
+	ExcuteSQLFileRunTimeCtx `json:"-"`
+}
+
+// ExcuteSQLFileParam TODO
+type ExcuteSQLFileParam struct {
+	Host          string             `json:"host"  validate:"required,ip"`             // 当前实例的主机地址
+	Ports         []int              `json:"ports"`                                    // 被监控机器的上所有需要监控的端口
+	CharSet       string             `json:"charset" validate:"required,checkCharset"` // 字符集参数
+	FilePath      string             `json:"file_path"`                                // 文件路径
+	ExcuteObjects []ExcuteSQLFileObj `json:"execute_objects"`
+	Force         bool               `json:"force"`     // 是否强制执行 执行出错后,是否继续往下执行
+	IsSpider      bool               `json:"is_spider"` // 是否是spider集群
+}
+
+// ExcuteSQLFileObj 单个文件的执行对象
+// 一次可以多个文件操作不同的数据库
+type ExcuteSQLFileObj struct {
+	SQLFile       string   `json:"sql_file"`       // 变更文件名称
+	IgnoreDbNames []string `json:"ignore_dbnames"` // 忽略的,需要排除变更的dbName,支持模糊匹配
+	DbNames       []string `json:"dbnames"`        // 需要变更的DBNames,支持模糊匹配
+}
+
+// ExcuteSQLFileRunTimeCtx 运行时上下文
+type ExcuteSQLFileRunTimeCtx struct {
+	ports                []int
+	dbConns              map[Port]*native.DbWorker
+	vermap               map[Port]string // 当前实例的数据版本
+	charsetmap           map[Port]string // 当前实例的字符集
+	socketmap            map[Port]string // 当前实例的socket value
+	taskdir              string
+	RegularIgnoreDbNames []string
+	RegularDbNames       []string
+}
+
+// Example TODO
+func (e *ExcuteSQLFileComp) Example() interface{} {
+	return ExcuteSQLFileComp{
+		GeneralParam: &components.GeneralParam{},
+		Params: &ExcuteSQLFileParam{
+			Host:     "127.0.0.1",
+			Ports:    []int{3306, 3307},
+			CharSet:  "utf8",
+			FilePath: "/data/workspace",
+			ExcuteObjects: []ExcuteSQLFileObj{
+				{
+					SQLFile:       "111.sql",
+					IgnoreDbNames: []string{"a%"},
+					DbNames:       []string{"db1", "db2"},
+				},
+			},
+			Force:    false,
+			IsSpider: false,
+		},
+	}
+}
+
+// Init TODO
+func (e *ExcuteSQLFileComp) Init() (err error) {
+	e.ports = make([]int, len(e.Params.Ports))
+	e.dbConns = make(map[int]*native.DbWorker)
+	e.vermap = make(map[int]string)
+	e.socketmap = make(map[int]string)
+	e.charsetmap = make(map[int]string)
+
+	copy(e.ports, e.Params.Ports)
+	for _, port := range e.ports {
+		var ver, charset, socket string
+		dbConn, err := native.InsObject{
+			Host: e.Params.Host,
+			Port: port,
+			User: e.GeneralParam.RuntimeAccountParam.AdminUser,
+			Pwd:  e.GeneralParam.RuntimeAccountParam.AdminPwd,
+		}.Conn()
+		if err != nil {
+			logger.Error("Connect %d failed:%s", port, err.Error())
+			return err
+		}
+		if ver, err = dbConn.SelectVersion(); err != nil {
+			logger.Error("获取实例版本失败:%s", err.Error())
+			return err
+		}
+
+		charset = e.Params.CharSet
+		if e.Params.CharSet == "default" {
+			if charset, err = dbConn.ShowServerCharset(); err != nil {
+				logger.Error("获取实例的字符集失败:%s", err.Error())
+				return err
+			}
+		}
+		if socket, err = dbConn.ShowSocket(); err != nil {
+			logger.Error("获取socket value 失败:%s", err.Error())
+			return err
+		}
+		if !cmutil.FileExists(socket) {
+			socket = ""
+		}
+
+		e.dbConns[port] = dbConn
+		e.vermap[port] = ver
+		e.socketmap[port] = socket
+		e.charsetmap[port] = charset
+		e.taskdir = strings.TrimSpace(e.Params.FilePath)
+		if e.taskdir == "" {
+			e.taskdir = cst.BK_PKG_INSTALL_PATH
+		}
+	}
+	return nil
+}
+
+// MvFile2TaskDir  将gse的文件move 到taskdir
+//
+//	@receiver e
+//	@receiver err
+// func (e *ExcuteSQLFileComp) MvFile2TaskDir(taskdir string) (err error) {
+// 	e.taskdir = path.Join(cst.BK_PKG_INSTALL_PATH, taskdir)
+// 	if err = os.MkdirAll(e.taskdir, os.ModePerm); err != nil {
+// 		logger.Error("初始化任务目录失败%s:%s", e.taskdir, err.Error())
+// 		return
+// 	}
+// 	for _, o := range e.Params.ExcuteObjects {
+// 		if err = os.Rename(path.Join(cst.BK_PKG_INSTALL_PATH, o.SQLFile), path.Join(e.taskdir, o.SQLFile)); err != nil {
+// 			logger.Error("将SQL文件%s移动到%s 错误:%s", o.SQLFile, e.taskdir, err.Error())
+// 			return
+// 		}
+// 	}
+// 	return err
+// }
+
+// Excute TODO
+func (e *ExcuteSQLFileComp) Excute() (err error) {
+	for _, port := range e.ports {
+		if err = e.excuteOne(port); err != nil {
+			logger.Error("execute at %d failed: %s", port, err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// OpenDdlExecuteByCtl TODO
+// sed 之前考虑是否需要保留源文件
+// 此方法仅用于spider集群变更
+func (e *ExcuteSQLFileComp) OpenDdlExecuteByCtl() (err error) {
+	for _, f := range e.Params.ExcuteObjects {
+		stdout, err := osutil.StandardShellCommand(
+			false,
+			fmt.Sprintf(`sed -i '1 i\/*!50600 SET ddl_execute_by_ctl=1*/;' %s`, path.Join(e.taskdir, f.SQLFile)),
+		)
+		if err != nil {
+			logger.Error("sed insert ddl_execute_by_ctl failed %s,stdout:%s", err.Error(), stdout)
+			return err
+		}
+		logger.Info("sed at %s,stdout:%s", f.SQLFile, stdout)
+	}
+	return
+}
+
+// excuteOne 执行导入SQL文件
+//
+//	@receiver e
+//	@return err
+func (e *ExcuteSQLFileComp) excuteOne(port int) (err error) {
+	alldbs, err := e.dbConns[port].ShowDatabases()
+	if err != nil {
+		logger.Error("获取实例db list失败:%s", err.Error())
+		return err
+	}
+	dbsExcluesysdbs := util.FilterOutStringSlice(alldbs, computil.GetGcsSystemDatabasesIgnoreTest(e.vermap[port]))
+	for _, f := range e.Params.ExcuteObjects {
+		var realexcutedbs []string
+		// 获得目标库 因为是通配符 所以需要获取完整名称
+		intentionDbs, err := e.match(dbsExcluesysdbs, f.parseDbParamRe())
+		if err != nil {
+			return err
+		}
+		// 获得忽略库
+		ignoreDbs, err := e.match(dbsExcluesysdbs, f.parseIgnoreDbParamRe())
+		if err != nil {
+			return err
+		}
+		// 获取最终需要执行的库
+		realexcutedbs = util.FilterOutStringSlice(intentionDbs, ignoreDbs)
+		if len(realexcutedbs) <= 0 {
+			return fmt.Errorf("没有适配到任何db")
+		}
+		logger.Info("will real excute on %v", realexcutedbs)
+		err = mysqlutil.ExecuteSqlAtLocal{
+			IsForce:          e.Params.Force,
+			Charset:          e.charsetmap[port],
+			NeedShowWarnings: false,
+			Host:             e.Params.Host,
+			Port:             port,
+			Socket:           e.socketmap[port],
+			WorkDir:          e.taskdir,
+			User:             e.GeneralParam.RuntimeAccountParam.AdminUser,
+			Password:         e.GeneralParam.RuntimeAccountParam.AdminPwd,
+		}.ExcuteSqlByMySQLClient(f.SQLFile, realexcutedbs)
+		if err != nil {
+			logger.Error("执行%s文件失败", f.SQLFile)
+			return err
+		}
+	}
+	return err
+}
+
+// match 根据show databases 返回的实际db,匹配出dbname
+//
+//	@receiver e
+//	@receiver regularDbNames
+//	@return matched
+func (e *ExcuteSQLFileComp) match(dbsExculeSysdb, regularDbNames []string) (matched []string, err error) {
+	for _, regexpStr := range regularDbNames {
+		re, err := regexp.Compile(regexpStr)
+		if err != nil {
+			logger.Error(" regexp.Compile(%s) failed:%s", regexpStr, err.Error())
+			return nil, err
+		}
+		for _, db := range dbsExculeSysdb {
+			if re.MatchString(db) {
+				matched = append(matched, db)
+			}
+		}
+	}
+	return
+}
+
+// parseDbParamRe TODO
+// ConvertDbParamToRegular 解析DbNames参数成正则参数
+//
+//	@receiver e
+func (e *ExcuteSQLFileObj) parseDbParamRe() (s []string) {
+	return changeToMatch(e.DbNames)
+}
+
+// parseIgnoreDbParamRe  解析IgnoreDbNames参数成正则参数
+//
+//	@receiver e
+//	@return []string
+func (e *ExcuteSQLFileObj) parseIgnoreDbParamRe() (s []string) {
+	return changeToMatch(e.IgnoreDbNames)
+}
+
+// changeToMatch 将输入的参数转成正则匹配的格式
+//
+//	@receiver input
+//	@return []string
+func changeToMatch(input []string) []string {
+	var result []string
+	for _, str := range input {
+		str = strings.Replace(str, "?", ".", -1)
+		str = strings.Replace(str, "%", ".*", -1)
+		str = `^` + str + `$`
+		result = append(result, str)
+	}
+	return result
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go
new file mode 100644
index 0000000000..705c1096a2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go
@@ -0,0 +1,363 @@
+package mysql
+
+import (
+	"database/sql"
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	ma "dbm-services/mysql/db-tools/mysql-crond/api"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+)
+
+// ExcutePartitionSQLComp TODO
+type ExcutePartitionSQLComp struct {
+	GeneralParam                 *components.GeneralParam `json:"general"`
+	Params                       *ExcutePartitionSQLParam `json:"extend"`
+	ExcutePartitionSQLRunTimeCtx `json:"-"`
+}
+
+// ExcutePartitionSQLParam TODO
+type ExcutePartitionSQLParam struct {
+	BkBizId      int    `json:"bk_biz_id"`
+	ClusterId    int    `json:"cluster_id"`
+	ImmuteDomain string `json:"immute_domain"`
+	MasterIp     string `json:"master_ip"  validate:"required,ip"` // 当前实例的主机地址
+	MasterPort   int    `json:"master_port"`                       // 被监控机器的上所有需要监控的端口
+	ShardName    string `json:"shard_name"`
+	Ticket       string `json:"ticket"`
+	FilePath     string `json:"file_path"`
+	Force        bool   `json:"force"`
+}
+
+// ExcutePartitionSQLObj TODO
+type ExcutePartitionSQLObj struct {
+	ConfigID      int                    `json:"config_id"`
+	Dblike        string                 `json:"dblike"`
+	Tblike        string                 `json:"tblike"`
+	InitPartition []InitPartitionContent `json:"init_partition"`
+	AddPartition  []string               `json:"add_partition"`
+	DropPartition []string               `json:"drop_partition"`
+}
+
+// InitPartitionContent TODO
+type InitPartitionContent struct {
+	NeedSize int64  `json:"need_size"`
+	Sql      string `json:"sql"`
+}
+
+// ExcutePartitionSQLRunTimeCtx TODO
+type ExcutePartitionSQLRunTimeCtx struct {
+	port                 int
+	dbConns              *native.DbWorker
+	ver                  string // 当前实例的版本
+	charset              string // 当前实例的字符集
+	socket               string // 当前实例的socket value
+	RegularIgnoreDbNames []string
+	RegularDbNames       []string
+	WorkDir              string
+}
+
+// Example TODO
+func (e *ExcutePartitionSQLComp) Example() interface{} {
+	comp := ExcutePartitionSQLComp{
+		Params: &ExcutePartitionSQLParam{
+			BkBizId:      0,
+			ClusterId:    0,
+			ImmuteDomain: "xxx.xxx.xxx",
+			MasterIp:     "1.1.1.1",
+			MasterPort:   0,
+			ShardName:    "xxx",
+			Ticket:       "https://www.xxx.com",
+			FilePath:     "/xxx/xxx/xxx.txt",
+			Force:        false,
+		},
+	}
+	return comp
+}
+
+// Init TODO
+func (e *ExcutePartitionSQLComp) Init() (err error) {
+	e.port = e.Params.MasterPort
+	var ver, charset, socket string
+	dbConn, err := native.InsObject{
+		Host: e.Params.MasterIp,
+		Port: e.port,
+		User: e.GeneralParam.RuntimeAccountParam.AdminUser,
+		Pwd:  e.GeneralParam.RuntimeAccountParam.AdminPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("Connect %d failed:%s", e.port, err.Error())
+		return err
+	}
+	if ver, err = dbConn.SelectVersion(); err != nil {
+		logger.Error("获取实例版本失败:%s", err.Error())
+		return err
+	}
+
+	if socket, err = dbConn.ShowSocket(); err != nil {
+		logger.Error("获取socket value 失败:%s", err.Error())
+		return err
+	}
+	if !cmutil.FileExists(socket) {
+		socket = ""
+	}
+	e.dbConns = dbConn
+	e.ver = ver
+	e.socket = socket
+	e.charset = charset
+	e.WorkDir = fmt.Sprintf("%s/%s", cst.BK_PKG_INSTALL_PATH, "partition")
+	_ = os.MkdirAll(e.WorkDir, 0755)
+	return nil
+}
+
+// Excute TODO
+func (e *ExcutePartitionSQLComp) Excute() (err error) {
+	// 以单个执行目标为单位 execute_objects
+	executeObjects, err := e.getPartitionInfo(e.Params.FilePath)
+
+	if err != nil {
+		logger.Error(err.Error())
+		return err
+	}
+	ip := e.Params.MasterIp
+	port := e.Params.MasterPort
+	user := e.GeneralParam.RuntimeAccountParam.AdminUser
+	pwd := e.GeneralParam.RuntimeAccountParam.AdminPwd
+	param := ""
+	if strings.Contains(e.Params.ShardName, "TDBCTL") {
+		param = "&tc_admin=0"
+	}
+
+	dbw, err := InitDB(ip, port, user, pwd, param)
+	defer dbw.Close()
+	c := make(chan struct{}, 4)
+	wg := &sync.WaitGroup{}
+	errs := []string{}
+	lock := &sync.Mutex{}
+	for _, eb := range executeObjects {
+		c <- struct{}{}
+		wg.Add(1)
+		// 并发执行ExcutePartitionSQLObj
+		go func(eb ExcutePartitionSQLObj) {
+			defer wg.Done()
+			// 每条分区config为一个单位,根据ConfigID生成一个对应的错误文件
+			errfile := fmt.Sprintf("partition_%d_%s.err", eb.ConfigID, time.Now().Format("20060102150405"))
+			errsall := []string{}
+
+			// 依次执行初始化、添加、删除
+			// 每个任务中会并发执行单条sql
+			if len(eb.InitPartition) > 0 {
+				logger.Info(fmt.Sprintf("初始化分区,config_id=%d\n", eb.ConfigID))
+				// 初始化分区使用pt工具,因此通过命令行的形式进行执行
+				err = e.excuteInitSql(eb.InitPartition, errfile, 10)
+				if err != nil {
+					lock.Lock()
+					errsall = append(errsall, err.Error())
+					lock.Unlock()
+				} else {
+					logger.Info("初始化分区成功!")
+				}
+			}
+			if len(eb.AddPartition) > 0 {
+				logger.Info(fmt.Sprintf("添加分区,config_id=%d\n", eb.ConfigID))
+				err := e.excuteOne(dbw, eb.AddPartition, errfile, 20)
+				if err != nil {
+					lock.Lock()
+					errsall = append(errsall, err.Error())
+					lock.Unlock()
+				} else {
+					logger.Info("添加分区成功!")
+				}
+			}
+			if len(eb.DropPartition) > 0 {
+				logger.Info(fmt.Sprintf("删除分区,config_id=%d\n", eb.ConfigID))
+				err := e.excuteOne(dbw, eb.DropPartition, errfile, 20)
+				if err != nil {
+					lock.Lock()
+					errsall = append(errsall, err.Error())
+					lock.Unlock()
+				} else {
+					logger.Info("删除分区成功!")
+				}
+			}
+			if len(errsall) > 0 {
+				body := struct {
+					Name      string
+					Content   string
+					Dimension map[string]interface{}
+				}{}
+				body.Name = "partition"
+				body.Content = fmt.Sprintf("%s。单据号:%s", "分区任务执行失败", e.Params.Ticket)
+				body.Dimension = make(map[string]interface{})
+				body.Dimension["config_id"] = eb.ConfigID
+				body.Dimension["dblike"] = eb.Dblike
+				body.Dimension["tblike"] = eb.Tblike
+				body.Dimension["ticket"] = e.Params.Ticket
+				body.Dimension["immute_domain"] = e.Params.ImmuteDomain
+				body.Dimension["shard_name"] = e.Params.ShardName
+				manager := ma.NewManager("http://127.0.0.1:9999")
+				sendErr := manager.SendEvent(body.Name, body.Content, body.Dimension)
+				errs = append(errs, strings.Join(errsall, ";\n"))
+				if sendErr != nil {
+					logger.Error(fmt.Sprintf("上报失败:%s\n", sendErr.Error()))
+				}
+			}
+			<-c
+		}(eb)
+	}
+	wg.Wait()
+	if len(errs) > 0 {
+		return errors.New(strings.Join(errs, ";\n"))
+	}
+	return nil
+}
+
+// excuteOne 以执行目标为单位
+func (e *ExcutePartitionSQLComp) excuteOne(
+	dbw *sql.DB, partitionSQLSet []string, errfile string,
+	connum int,
+) (err error) {
+	wg := sync.WaitGroup{}
+	var errs []string
+	lock := &sync.Mutex{}
+	lockappend := &sync.Mutex{}
+	// 初始化分区的并发度可以低点
+	// 增加和删除分区相对消耗小点,可以增加并发度
+	c := make(chan struct{}, connum)
+	for _, partitionSQL := range partitionSQLSet {
+		c <- struct{}{}
+		wg.Add(1)
+		// partitionSQL = e.replace(partitionSQL)
+		go func(partitionSQL string) {
+			defer wg.Done()
+			err := mysqlutil.ExecuteSqlAtLocal{
+				WorkDir:          e.WorkDir,
+				IsForce:          e.Params.Force,
+				Charset:          "utf8",
+				NeedShowWarnings: false,
+				Host:             e.Params.MasterIp,
+				Port:             e.Params.MasterPort,
+				Socket:           e.socket,
+				User:             e.GeneralParam.RuntimeAccountParam.AdminUser,
+				Password:         e.GeneralParam.RuntimeAccountParam.AdminPwd,
+				ErrFile:          errfile,
+			}.ExcutePartitionByMySQLClient(dbw, partitionSQL, lock)
+			if err != nil {
+				lockappend.Lock()
+				errs = append(errs, fmt.Sprintf("%s执行失败,报错:%s", partitionSQL, err.Error()))
+				lockappend.Unlock()
+			}
+			<-c
+		}(partitionSQL)
+	}
+	wg.Wait()
+	if len(errs) > 0 {
+		return fmt.Errorf(fmt.Sprintf("%s", strings.Join(errs, "\n")))
+	}
+	return nil
+}
+
+// InitDB TODO
+func InitDB(host string, port int, user string, pwd string, param string) (dbw *sql.DB, err error) {
+	tcpdsn := fmt.Sprintf("%s:%d", host, port)
+	dsn := fmt.Sprintf(
+		"%s:%s@tcp(%s)/?charset=utf8&parseTime=True&loc=Local&timeout=30s&readTimeout=30s&lock_wait_timeout=5%s", user,
+		pwd,
+		tcpdsn, param,
+	)
+	SqlDB, err := sql.Open("mysql", dsn)
+	if err != nil {
+		logger.Error("connect to mysql failed %s", err.Error())
+		return nil, err
+	}
+	return SqlDB, nil
+}
+
+func (e *ExcutePartitionSQLComp) excuteInitSql(
+	partitionSQLSets []InitPartitionContent, errfile string,
+	connum int,
+) (err error) {
+	// 在执行初始化分区前,需要预先检查磁盘剩余空间是否满足初始化分区的条件
+	errs := []string{}
+	for _, partitionSQL := range partitionSQLSets {
+		flag, err := e.precheck(partitionSQL.NeedSize)
+		command := fmt.Sprintf("%s/%s %s", cst.DBAToolkitPath, "percona-toolkit-3.5.0", partitionSQL.Sql)
+		if err != nil {
+			return err
+		}
+		if flag {
+			err := mysqlutil.ExecuteSqlAtLocal{
+				WorkDir:          e.WorkDir,
+				IsForce:          e.Params.Force,
+				Charset:          "utf8",
+				NeedShowWarnings: false,
+				Host:             e.Params.MasterIp,
+				Port:             e.Params.MasterPort,
+				Socket:           e.socket,
+				User:             e.GeneralParam.RuntimeAccountParam.AdminUser,
+				Password:         e.GeneralParam.RuntimeAccountParam.AdminPwd,
+				ErrFile:          errfile,
+			}.ExcuteInitPartition(command)
+			if err != nil {
+				errs = append(errs, fmt.Sprintf("%s执行失败,报错:%s", command, err.Error()))
+			}
+		}
+	}
+	if len(errs) > 0 {
+		return fmt.Errorf(fmt.Sprintf("%s", strings.Join(errs, "\n")))
+	}
+	return nil
+}
+
+func (e *ExcutePartitionSQLComp) precheck(needSize int64) (flag bool, err error) {
+	// (已用磁盘空间+3*表大小)/总容量<90%
+	// (可用磁盘空间+NeedSize)/总容量>10%
+	datadir, err := e.dbConns.GetSingleGlobalVar("datadir")
+	if err != nil {
+		return false, err
+	}
+
+	diskInfo, err := osutil.GetLinuxDirDiskInfo(datadir)
+	if err != nil {
+		return false, err
+	}
+	size, err := strconv.ParseInt(diskInfo.Blocks_1K, 10, 64)
+	if err != nil {
+		return false, err
+	}
+	rate := (float64(diskInfo.Available) + float64(needSize/1024)) / float64(size)
+	if rate > 0.1 {
+		flag = true
+	} else {
+		flag = false
+	}
+	return flag, nil
+}
+
+func (e *ExcutePartitionSQLComp) getPartitionInfo(filePath string) (epsos []ExcutePartitionSQLObj, err error) {
+	f, err := os.ReadFile(filePath)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("读取文件失败!--->%s", err.Error()))
+	}
+	err = json.Unmarshal(f, &epsos)
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("反序列化失败!--->%s", err.Error()))
+	}
+	return epsos, nil
+}
+
+// replace 反引号进行转义 可在命令行中执行
+func (e *ExcutePartitionSQLComp) replace(partitionSQL string) string {
+	return strings.Replace(partitionSQL, "`", "\\`", -1)
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/find_backup_local.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/find_backup_local.go
new file mode 100644
index 0000000000..081391d5e0
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/find_backup_local.go
@@ -0,0 +1,248 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// FindLocalBackupComp 有 resp 返回
+type FindLocalBackupComp struct {
+	Params FindLocalBackupParam `json:"extend"`
+}
+
+// FindLocalBackupParam 参数
+type FindLocalBackupParam struct {
+	BackupDirs []string `json:"backup_dirs" validate:"required"`
+	// 查找哪个实例的备份
+	TgtInstance *native.Instance `json:"tgt_instance" validate:"required"`
+	// 指定查询哪个 cluster_id 的备份,如果不指定可能查询到其它非法的备份
+	ClusterId  int  `json:"cluster_id"`
+	FileServer bool `json:"file_server"`
+
+	resp FindLocalBackupResp
+}
+
+// LocalBackupObj TODO
+type LocalBackupObj struct {
+	BKBizID string `json:"bk_biz_id"`
+	// 备份所属 host
+	InstHost string `json:"inst_host"`
+	// 备份所属 port
+	InstPort int `json:"inst_port"`
+
+	DBRole          string `json:"db_role"`
+	DataSchemaGrant string `json:"data_schema_grant"`
+	BackupId        string `json:"backup_id"`
+	BillId          string `json:"bill_id"`
+	ClusterId       int    `json:"cluster_id"`
+
+	// 备份时间,目前是备份开始时间
+	BackupTime string `json:"backup_time"`
+	// InfoFile   common.InfoFileDetail `json:"info_file"`
+	// 备份文件列表
+	FileList []string `json:"file_list"`
+	// 备份所在目录
+	BackupDir  string `json:"backup_dir"`
+	BackupType string `json:"backup_type"`
+	IndexFile  string `json:"index_file"`
+}
+
+// FindLocalBackupResp TODO
+type FindLocalBackupResp struct {
+	// backups key 是 .info 文件
+	Backups map[string]*LocalBackupObj `json:"backups"` // info_file: detail
+	// 记录上面 backups 最近的一次备份
+	Latest string `json:"latest"`
+}
+
+// Example TODO
+func (f *FindLocalBackupComp) Example() interface{} {
+	comp := FindLocalBackupComp{
+		Params: FindLocalBackupParam{
+			BackupDirs:  []string{"/data/dbbak", "/data1/dbbak"},
+			TgtInstance: &common.InstanceExample,
+			FileServer:  false,
+		},
+	}
+	return comp
+}
+
+// Init TODO
+func (f *FindLocalBackupParam) Init() error {
+	var err error
+	if f.TgtInstance.Host == "" {
+		f.TgtInstance.Host, err = osutil.GetLocalIP()
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (f *FindLocalBackupParam) PreCheck() error {
+	if f.TgtInstance.Port == 0 {
+		return errors.New("target instance port is needed")
+	}
+	return nil
+}
+
+// StartOld TODO
+func (f *FindLocalBackupParam) StartOld() error {
+	backups := make(map[string]*LocalBackupObj)
+	infoLatest := ""
+	for _, dir := range f.BackupDirs {
+		if !cmutil.IsDirectory(dir) {
+			continue
+		}
+		script := fmt.Sprintf("ls %s/*_%s_%d_*.info", dir, f.TgtInstance.Host, f.TgtInstance.Port)
+		logger.Info("find cmd: %s", script)
+		out, err := osutil.ExecShellCommand(false, script)
+		if err != nil {
+			logger.Warn("find error %w", err)
+			if strings.Contains(out, "No such file or directory") { // 如果是 No such file or directory, 则忽略
+				continue
+			}
+			return err
+		}
+		logger.Info("find cmd output: %s", out)
+		infoList := util.SplitAnyRune(strings.TrimSpace(out), " \n")
+		for _, info := range infoList {
+			file := dbbackup.InfoFileDetail{}
+			if err = dbbackup.ParseBackupInfoFile(info, &file); err != nil {
+				logger.Warn("file %s parse error: %s", info, err.Error())
+				// return err
+			} else {
+				if info > infoLatest {
+					infoLatest = info
+				}
+				fileList := []string{}
+				for f := range file.FileInfo {
+					fileList = append(fileList, f)
+				}
+				localBackup := &LocalBackupObj{
+					BackupDir:  dir,
+					FileList:   fileList,
+					BackupType: file.BackupType,
+					// InfoFile:   file,
+					BKBizID:    file.App,
+					BackupTime: file.StartTime,
+					InstHost:   file.BackupHost,
+					InstPort:   file.BackupPort,
+				}
+				backups[info] = localBackup
+			}
+		}
+	}
+	resp := FindLocalBackupResp{
+		Backups: backups,
+		Latest:  infoLatest,
+	}
+	f.resp = resp
+	// fmt.Println(resp)
+	return nil
+}
+
+// Start TODO
+func (f *FindLocalBackupParam) Start() error {
+	backups := make(map[string]*LocalBackupObj)
+	indexLatest := dbbackup.BackupIndexFile{}
+	for _, dir := range f.BackupDirs {
+		if !cmutil.IsDirectory(dir) {
+			continue
+		}
+		script := fmt.Sprintf("ls %s/*_%s_%d_*.index", dir, f.TgtInstance.Host, f.TgtInstance.Port)
+		logger.Info("find cmd: %s", script)
+		out, err := osutil.ExecShellCommand(false, script)
+		if err != nil {
+			logger.Warn("find error %w", err)
+			if strings.Contains(out, "No such file or directory") { // 如果是 No such file or directory, 则忽略
+				continue
+			}
+			return err
+		}
+		logger.Info("find cmd output: %s", out)
+		indexList := util.SplitAnyRune(strings.TrimSpace(out), " \n")
+		for _, info := range indexList {
+			file := dbbackup.BackupIndexFile{}
+			/*
+				contentBytes, err := os.ReadFile(info)
+				if err != nil {
+					return err
+				}
+				if err := json.Unmarshal(contentBytes, &file); err != nil {
+					logger.Error("fail to read index file to struct: %s", info)
+					continue
+					// return err
+				}
+			*/
+			if err := dbbackup.ParseBackupIndexFile(info, &file); err != nil {
+				logger.Warn("file %s parse error: %s", info, err.Error())
+				continue
+			}
+
+			if f.ClusterId != 0 && f.ClusterId != file.ClusterId {
+				logger.Warn("backup index %s does not belong to cluster_id=%s", info, f.ClusterId)
+				continue
+			}
+			if file.ConsistentBackupTime > indexLatest.ConsistentBackupTime {
+				indexLatest = file
+			}
+			fileList := file.GetTarFileList("")
+			localBackup := &LocalBackupObj{
+				BackupDir:       dir,
+				FileList:        fileList,
+				BackupType:      file.BackupType,
+				BKBizID:         file.BkBizId,
+				ClusterId:       file.ClusterId,
+				BackupTime:      file.ConsistentBackupTime,
+				InstHost:        file.BackupHost,
+				InstPort:        file.BackupPort,
+				DBRole:          file.MysqlRole,
+				BackupId:        file.BackupId,
+				BillId:          file.BillId,
+				DataSchemaGrant: file.DataSchemaGrant,
+				IndexFile:       info,
+			}
+			if file.BackupId == "" {
+				logger.Warn("backup_id should not be empty: %+v", localBackup)
+			}
+			backups[file.BackupId] = localBackup
+		}
+	}
+	resp := FindLocalBackupResp{
+		Backups: backups,
+		Latest:  indexLatest.BackupId,
+	}
+	f.resp = resp
+	// fmt.Println(resp)
+	return nil
+}
+
+// PostCheck TODO
+func (f *FindLocalBackupParam) PostCheck() error {
+	if f.FileServer {
+		logger.Info("start file httpserver")
+	}
+	return nil
+}
+
+// OutputCtx TODO
+func (f *FindLocalBackupParam) OutputCtx() error {
+	ss, err := components.WrapperOutput(f.resp)
+	if err != nil {
+		return err
+	}
+	fmt.Println(ss)
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/full_backup.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/full_backup.go
new file mode 100644
index 0000000000..417f14501c
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/full_backup.go
@@ -0,0 +1,208 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"path"
+
+	"gopkg.in/ini.v1"
+)
+
+// FullBackupComp 基本结构
+type FullBackupComp struct {
+	GeneralParam *components.GeneralParam
+	Params       *FullBackupParam
+	tools        *tools.ToolSet
+	FullBackupCtx
+}
+
+// FullBackupParam godoc
+type FullBackupParam struct {
+	Host string `json:"host"`
+	Port int    `json:"port"`
+	// Charset    string `json:"charset"`
+	BackupType string `json:"backup_type"`
+	FileTag    string `json:"file_tag"`
+	BillId     string `json:"bill_id"`
+}
+
+// FullBackupCtx 上下文
+type FullBackupCtx struct {
+	backupGoDailyConfig *BackupGoConfig
+	uid                 string
+	cfgFilePath         string
+	ReportStatusLog     string
+	ReportResultLog     string
+}
+
+// Precheck 预检查
+func (c *FullBackupComp) Precheck() (err error) {
+	_, err = native.InsObject{
+		Host: c.Params.Host,
+		Port: c.Params.Port,
+		User: c.GeneralParam.RuntimeAccountParam.DbBackupUser,
+		Pwd:  c.GeneralParam.RuntimeAccountParam.DbBackupPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("connect %s:%d failed:%s", c.Params.Host, c.Params.Port, err.Error())
+		return err
+	}
+
+	c.tools, err = tools.NewToolSetWithPick(tools.ToolDbbackupGo)
+	if err != nil {
+		logger.Error("init toolset failed: %s", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// Init 初始化
+func (c *FullBackupComp) Init(uid string) (err error) {
+	c.uid = uid
+
+	configFile := path.Join(cst.DbbackupGoInstallPath, cst.DbbackupConfigFilename(c.Params.Port))
+	if !osutil.FileExist(configFile) {
+		err = fmt.Errorf("backup config file %s not found", configFile)
+		logger.Error(err.Error())
+		return err
+	}
+
+	cfg, err := ReadBackupConfigFile(configFile)
+	if err != nil {
+		logger.Error("read %s failed: %s", configFile, err.Error())
+		return err
+	}
+
+	c.backupGoDailyConfig = cfg
+
+	c.ReportResultLog = path.Join(
+		c.backupGoDailyConfig.BackupParamPublic.ResultReportPath,
+		fmt.Sprintf("dbareport_result_%d.log", c.Params.Port),
+	)
+	c.ReportStatusLog = path.Join(
+		c.backupGoDailyConfig.BackupParamPublic.StatusReportPath,
+		fmt.Sprintf("dbareport_status_%d.log", c.Params.Port),
+	)
+	return nil
+}
+
+// GenerateConfigFile 生成配置
+func (c *FullBackupComp) GenerateConfigFile() (err error) {
+	c.backupGoDailyConfig.BackupParamPublic.BackupType = c.Params.BackupType
+	c.backupGoDailyConfig.BackupParamPublic.DataSchemaGrant = "ALL"
+	c.backupGoDailyConfig.BackupParamPublic.BillId = c.Params.BillId
+	c.backupGoDailyConfig.BackupParamBackupSystem.FileTag = c.Params.FileTag
+	c.backupGoDailyConfig.BackupParamPublic.BackupTimeOut = ""
+
+	// if strings.ToLower(c.Params.Charset) != "default" {
+	//	c.backupGoDailyConfig.BackupParamPublic.MysqlCharset = strings.ToLower(c.Params.Charset)
+	// }
+	// c.backupGoDailyConfig.BackupParamPublic.BackupTimeOut ToDo 怎么搞
+
+	f := ini.Empty()
+	section, err := f.NewSection(c.backupGoDailyConfig.SectionStringPublic)
+	if err != nil {
+		logger.Error("new public section failed: %s", err.Error())
+		return err
+	}
+	err = section.ReflectFrom(c.backupGoDailyConfig.BackupParamPublic)
+	if err != nil {
+		logger.Error("public section reflect failed: %s", err.Error())
+		return err
+	}
+
+	section, err = f.NewSection(c.backupGoDailyConfig.SectionStringBackupSystem)
+	if err != nil {
+		logger.Error("new backup system section failed: %s", err.Error())
+		return err
+	}
+	err = section.ReflectFrom(c.backupGoDailyConfig.BackupParamBackupSystem)
+	if err != nil {
+		logger.Error("backup system section reflect failed: %s", err.Error())
+		return err
+	}
+
+	section, err = f.NewSection(c.backupGoDailyConfig.SectionStringLogical)
+	if err != nil {
+		logger.Error("new logical section failed: %s", err.Error())
+		return err
+	}
+	err = section.ReflectFrom(c.backupGoDailyConfig.BackupParamLogical)
+	if err != nil {
+		logger.Error("logical section reflect failed: %s", err.Error())
+		return err
+	}
+
+	c.cfgFilePath = path.Join("/tmp", fmt.Sprintf("dbbackup.%d.%s.ini", c.Params.Port, c.uid))
+
+	err = f.SaveTo(c.cfgFilePath)
+	if err != nil {
+		logger.Error("save %s failed: %s", c.cfgFilePath, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// DoBackup 执行
+func (c *FullBackupComp) DoBackup() (err error) {
+	// defer func() {
+	//	_ = os.Remove(c.cfgFilePath)
+	// }()
+
+	cmd := fmt.Sprintf(
+		"%s --configpath=%s --dumpbackup",
+		c.tools.MustGet(tools.ToolDbbackupGo),
+		c.cfgFilePath,
+	)
+	_, err = osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		logger.Error("execute %s failed: %s", cmd, err.Error())
+		return err
+	}
+	return nil
+}
+
+// OutputBackupInfo 输出报告
+func (c *FullBackupComp) OutputBackupInfo() error {
+	res, err := GenerateReport(c.Params.BillId, c.ReportStatusLog, c.ReportResultLog)
+	if err != nil {
+		logger.Error("generate report failed: %s", err.Error())
+		return err
+	}
+
+	err = components.PrintOutputCtx(res)
+	if err != nil {
+		logger.Error("print backup report info failed: %s.", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// Example 例子
+func (c *FullBackupComp) Example() interface{} {
+	comp := FullBackupComp{
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountMonitorExample,
+			},
+		},
+		Params: &FullBackupParam{
+			Host: "127.0.0.1",
+			Port: 20000,
+			// Charset:    "default",
+			BackupType: "LOGICAL",
+			FileTag:    "MYSQL_FULL_BACKUP",
+			BillId:     "123456",
+		},
+	}
+	return comp
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone.go
new file mode 100644
index 0000000000..eb2d4fa3f6
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone.go
@@ -0,0 +1 @@
+package grant
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_client_grant.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_client_grant.go
new file mode 100644
index 0000000000..86ba02ac09
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_client_grant.go
@@ -0,0 +1,183 @@
+// Package grant TODO
+/*
+ * @Description:
+ */
+package grant
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"fmt"
+	"strings"
+)
+
+// CloneClentGRantComp TODO
+type CloneClentGRantComp struct {
+	GeneralParam  *components.GeneralParam
+	Params        *CloneClentGRantParam
+	db            *native.DbWorker // 本地db链接
+	masterVersion string           // 主库的数据库版本
+}
+
+// CloneClentGRantParam 给mysql 克隆客户端的权限
+type CloneClentGRantParam struct {
+	// 当前实例的主机地址
+	Host string `json:"host" validate:"required,ip"`
+	// 当前实例的端口
+	Port int `json:"port" validate:"required,lt=65536,gte=3306"`
+	// 作为模板权限的客户端ip
+	TemplateClientHost string `json:"template_client_host" validate:"required,ip"`
+	// 目标的客户端ip
+	TargetClientHost string `json:"target_client_host" validate:"required,ip"`
+	//	是否回收旧客户端账号
+	IsDrop bool `json:"is_drop"`
+	//	当is_drop为true才读取该变量,默认传1.1.1.1即可,表示需要删除对应权限的客户端host
+	OriginClientHost string `json:"origin_client_host" validate:"required,ip"`
+}
+
+// Init TODO
+func (g *CloneClentGRantComp) Init() (err error) {
+	dbwork, err := native.InsObject{
+		Host: g.Params.Host,
+		Port: g.Params.Port,
+		User: g.GeneralParam.RuntimeAccountParam.AdminUser,
+		Pwd:  g.GeneralParam.RuntimeAccountParam.AdminPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("connect %s:%d failed,err:%s", g.Params.Host, g.Params.Port, err.Error())
+		return
+	}
+
+	g.db = dbwork
+	if g.masterVersion, err = g.db.SelectVersion(); err != nil {
+		logger.Error("select Version error:%s", err)
+	}
+	logger.Info("Version is %s", g.masterVersion)
+	return
+}
+
+// ReadTemplateClientPriv TODO
+func (g *CloneClentGRantComp) ReadTemplateClientPriv() (grantSql []string, err error) {
+	var rows []string
+
+	selectSql := fmt.Sprintf(
+		"select concat( user,'@',host) as userhost from mysql.user where host = '%s'",
+		g.Params.TemplateClientHost,
+	)
+	created := false
+
+	if mysqlutil.MySQLVersionParse(g.masterVersion) > 5007000 {
+		created = true
+	}
+
+	// 先查询TemplateClientHost 对应在mysql实例上的user,host
+	err = g.db.Queryx(&rows, selectSql)
+	if err != nil {
+		logger.Error("select error:%s", err)
+		return
+	}
+
+	// 读取模板的授权
+	for _, row := range rows {
+		var grants []string
+		if grants, err = g.db.ShowPrivForUser(created, row); err != nil {
+			logger.Error("show priv sql for user error :%s", err)
+			return
+		}
+		grantSql = append(grantSql, grants...)
+	}
+	return
+}
+
+// SpliceDropUserSQL TODO
+func (g *CloneClentGRantComp) SpliceDropUserSQL(clientHost string) (dropUserSqls []string, err error) {
+	var rows []string
+
+	selectSql := fmt.Sprintf("select concat( user,'@',host) as userhost from mysql.user where host = '%s'", clientHost)
+
+	// 先查询clientHost对应在mysql实例上的user,host
+	err = g.db.Queryx(&rows, selectSql)
+	if err != nil {
+		logger.Error("select error:%s", err)
+		return
+	}
+
+	// 拼接drop user SQL 模板
+	for _, row := range rows {
+		sql := fmt.Sprintf("drop user %s", row)
+		dropUserSqls = append(dropUserSqls, sql)
+	}
+	return
+}
+
+// CloneTargetClientPriv 读取模板客户端的权限内容,生成对应目标客户端授权语句执行
+func (g *CloneClentGRantComp) CloneTargetClientPriv() (err error) {
+	var targetGrantSqls []string
+	var templateGrantSqls []string
+	if templateGrantSqls, err = g.ReadTemplateClientPriv(); err != nil {
+		return
+	}
+
+	if len(templateGrantSqls) == 0 {
+		logger.Info("related priv is empty.")
+		return nil
+	}
+
+	for _, grant := range templateGrantSqls {
+		tmpGrant := strings.Replace(grant, g.Params.TemplateClientHost, g.Params.TargetClientHost, -1)
+		targetGrantSqls = append(targetGrantSqls, tmpGrant)
+	}
+
+	if _, err = g.db.ExecMore(targetGrantSqls); err != nil {
+		logger.Error("Clone permission failed: %s", err)
+		return
+	}
+
+	return
+}
+
+// DropOriginClientPriv 回收旧客户端用户和权限
+func (g *CloneClentGRantComp) DropOriginClientPriv() (err error) {
+	var dropUserSqls []string
+	if !g.Params.IsDrop {
+		// 传入参数不执行删除用户的步骤,则提前退出
+		logger.Info("IsDrop is %s, skip", g.Params.IsDrop)
+		return nil
+	}
+
+	if dropUserSqls, err = g.SpliceDropUserSQL(g.Params.OriginClientHost); err != nil {
+		return
+	}
+
+	if len(dropUserSqls) == 0 {
+		logger.Info("drop priv is empty.")
+		return nil
+	}
+
+	if _, err = g.db.ExecMore(dropUserSqls); err != nil {
+		logger.Error("drop user failed: %s", err)
+		return
+	}
+	return
+}
+
+// ClearTargetClientPriv 清理目标客户端残留的用户和权限
+func (g *CloneClentGRantComp) ClearTargetClientPriv() (err error) {
+	var dropUserSqls []string
+	if dropUserSqls, err = g.SpliceDropUserSQL(g.Params.TargetClientHost); err != nil {
+		return
+	}
+
+	if len(dropUserSqls) == 0 {
+		logger.Info("clear priv is empty.")
+		return nil
+	}
+
+	if _, err = g.db.ExecMore(dropUserSqls); err != nil {
+		logger.Error("clear user failed: %s", err)
+		return
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_instance_priv.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_instance_priv.go
new file mode 100644
index 0000000000..2e2c95201b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/clone_instance_priv.go
@@ -0,0 +1,36 @@
+package grant
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+)
+
+// CloneInsPrivComp TODO
+type CloneInsPrivComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       *CloneInsPrivParam       `json:"extend"`
+}
+
+// CloneInsPrivParam TODO
+type CloneInsPrivParam struct {
+	// native.InsObject
+	// 具体操作内容需要操作的参数
+	// 当前实例的主机地址
+	Host string `json:"host"  validate:"required,ip"`
+	// 当前实例的端口
+	Port int `json:"port"  validate:"required,lt=65536,gte=3306"`
+	// 克隆权限的源实例
+	SourceIns RemoteIns `json:"source_instance"`
+}
+
+// RemoteIns TODO
+type RemoteIns struct {
+	native.Instance
+	//  对于当Host的临时超级账户
+	User string `json:"user"`
+	Pwd  string `json:"pwd"`
+}
+
+// CloneInsPrivRCtx 运行时上下文
+type CloneInsPrivRCtx struct {
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/repl.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/repl.go
new file mode 100644
index 0000000000..ca881d2c50
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/grant/repl.go
@@ -0,0 +1,117 @@
+// Package grant TODO
+/*
+ * @Description:
+ */
+package grant
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"encoding/json"
+	"fmt"
+)
+
+// GrantReplComp TODO
+type GrantReplComp struct {
+	GeneralParam  *components.GeneralParam `json:"general"`
+	Params        *GrantReplParam          `json:"extend"`
+	Db            *native.DbWorker         // 本地db链接
+	masterVersion string                   // 主库的数据库版本
+}
+
+// GrantReplParam 对Master 增加repl 账户
+type GrantReplParam struct {
+	Host      string   `json:"host"`       // 当前实例的主机地址
+	Port      int      `json:"port"`       // 当前实例的端口
+	ReplHosts []string `json:"repl_hosts"` // slave host
+}
+
+// Example TODO
+func (g *GrantReplComp) Example() interface{} {
+	comp := GrantReplComp{
+		Params: &GrantReplParam{
+			Host:      "1.1.1.1",
+			Port:      3306,
+			ReplHosts: []string{"2.2.2.2", "3.3.3.3"},
+		},
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.MySQLAdminReplExample,
+			},
+		},
+	}
+	return comp
+}
+
+// Init TODO
+func (g *GrantReplComp) Init() (err error) {
+	dbwork, err := native.InsObject{
+		Host: g.Params.Host,
+		Port: g.Params.Port,
+		User: g.GeneralParam.RuntimeAccountParam.AdminUser,
+		Pwd:  g.GeneralParam.RuntimeAccountParam.AdminPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("connect %s:%d failed,err:%s", g.Params.Host, g.Params.Port, err.Error())
+		return
+	}
+	g.Db = dbwork
+	ver, err := g.Db.SelectVersion()
+	if err != nil {
+		return
+	}
+	g.masterVersion = ver
+	logger.Info("Version is %s", g.masterVersion)
+	return
+}
+
+// GrantRepl TODO
+func (g *GrantReplComp) GrantRepl() (err error) {
+	repl_user := g.GeneralParam.RuntimeAccountParam.ReplUser
+	repl_pwd := g.GeneralParam.RuntimeAccountParam.ReplPwd
+	var execSQLs []string
+	for _, replHost := range g.Params.ReplHosts {
+		execSQLs = append(
+			execSQLs,
+			fmt.Sprintf(
+				"CREATE USER /*!50706 IF NOT EXISTS */ `%s`@`%s` IDENTIFIED BY '%s';",
+				repl_user, replHost, repl_pwd,
+			),
+		)
+		execSQLs = append(
+			execSQLs,
+			fmt.Sprintf("GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO `%s`@`%s`;", repl_user, replHost),
+		)
+	}
+	if _, err := g.Db.ExecMore(execSQLs); err != nil {
+		logger.Error("create repl user failed:[%s]", err.Error())
+		return err
+	}
+
+	// sqls := []string{
+	// 	fmt.Sprintf("CREATE USER /*!50706 IF NOT EXISTS */ `%s`@`%s` IDENTIFIED BY '%s';",
+	// 		repl_user, g.Params.ReplHost, repl_pwd),
+	// 	fmt.Sprintf("GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO `%s`@`%s`;", repl_user, g.Params.ReplHost)}
+	// for _, lqs := range sqls {
+	// 	if _, err := g.Db.Exec(lqs); err != nil {
+	// 		return err
+	// 	}
+	// }
+	return nil
+}
+
+// GetBinPosition TODO
+func (g *GrantReplComp) GetBinPosition() (binPosition string, err error) {
+	resp, err := g.Db.ShowMasterStatus()
+	if err != nil {
+		return "", err
+	}
+	b, err := json.Marshal(resp)
+	if err != nil {
+		return "", err
+	}
+	// fmt.Printf("%s", string(b))
+	return string(b), nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_checksum.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_checksum.go
new file mode 100644
index 0000000000..0a762caa55
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_checksum.go
@@ -0,0 +1,253 @@
+package mysql
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"os"
+	"os/exec"
+	"path"
+	"time"
+
+	"gopkg.in/yaml.v2"
+)
+
+// InstallMySQLChecksumComp 基本结构
+type InstallMySQLChecksumComp struct {
+	GeneralParam *components.GeneralParam   `json:"general"`
+	Params       *InstallMySQLChecksumParam `json:"extend"`
+	tools        *tools.ToolSet
+}
+
+// InstanceInfo 实例描述
+type InstanceInfo struct {
+	BkBizId      int    `json:"bk_biz_id"`
+	Ip           string `json:"ip"`
+	Port         int    `json:"port"`
+	Role         string `json:"role"`
+	ClusterId    int    `json:"cluster_id"`
+	ImmuteDomain string `json:"immute_domain"`
+	BkInstanceId int64  `json:"bk_instance_id,omitempty"` // 0 被视为空, 不序列化
+}
+
+// InstallMySQLChecksumParam 输入参数
+type InstallMySQLChecksumParam struct {
+	components.Medium
+	SystemDbs     []string       `json:"system_dbs"`
+	InstancesInfo []InstanceInfo `json:"instances_info"`
+	ExecUser      string         `json:"exec_user"`
+	Schedule      string         `json:"schedule"`
+	ApiUrl        string         `json:"api_url"`
+}
+
+// Init 初始化
+func (c *InstallMySQLChecksumComp) Init() (err error) {
+	c.tools = tools.NewToolSetWithPickNoValidate(tools.ToolMysqlTableChecksum, tools.ToolPtTableChecksum)
+	return nil
+}
+
+// Precheck 预检查
+func (c *InstallMySQLChecksumComp) Precheck() (err error) {
+	if err = c.Params.Medium.Check(); err != nil {
+		logger.Error("check checksum pkg failed: %s", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// DeployBinary 部署 mysql-table-checksum 和 pt-table-checksum
+func (c *InstallMySQLChecksumComp) DeployBinary() (err error) {
+	err = os.MkdirAll(cst.ChecksumInstallPath, 0755)
+	if err != nil {
+		logger.Error("mkdir %s failed: %s", cst.ChecksumInstallPath, err.Error())
+		return err
+	}
+
+	decompressCmd := fmt.Sprintf(
+		`tar zxf %s -C %s`,
+		c.Params.Medium.GetAbsolutePath(), cst.ChecksumInstallPath,
+	)
+	_, err = osutil.ExecShellCommand(false, decompressCmd)
+	if err != nil {
+		logger.Error("decompress checksum pkg failed: %s", err.Error())
+		return err
+	}
+
+	chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.ChecksumInstallPath)
+	_, err = osutil.ExecShellCommand(false, chownCmd)
+	if err != nil {
+		logger.Error("chown %s to mysql failed: %s", cst.ChecksumInstallPath, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// GenerateBinaryConfig 生成 mysql-table-checksum 配置文件
+func (c *InstallMySQLChecksumComp) GenerateBinaryConfig() (err error) {
+	var configFs []*os.File
+	defer func() {
+		for _, f := range configFs {
+			_ = f.Close()
+		}
+	}()
+
+	logDir := path.Join(cst.ChecksumInstallPath, "logs")
+	for _, instance := range c.Params.InstancesInfo {
+		cfg := ChecksumConfig{
+			BkBizId: instance.BkBizId,
+			Cluster: _cluster{
+				Id:           instance.ClusterId,
+				ImmuteDomain: instance.ImmuteDomain,
+			},
+			Ip:         instance.Ip,
+			Port:       instance.Port,
+			User:       c.GeneralParam.RuntimeAccountParam.MonitorUser,
+			Password:   c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+			InnerRole:  instance.Role,
+			ReportPath: path.Join(cst.DBAReportBase, "checksum"),
+			Filter: _ptFilters{
+				IgnoreDatabases: c.Params.SystemDbs,
+			},
+			PtChecksum: _ptChecksum{
+				Path:      c.tools.MustGet(tools.ToolPtTableChecksum),
+				Replicate: fmt.Sprintf("%s.checksum", native.INFODBA_SCHEMA),
+				Switches:  []string{},
+				Args: []map[string]interface{}{
+					{
+						"name":  "run-time",
+						"value": time.Hour * 2,
+					},
+				},
+			},
+			Log: &_logConfig{
+				Console:    false,
+				LogFileDir: &logDir,
+				Debug:      false,
+				Source:     true,
+				Json:       true,
+			},
+			Schedule: c.Params.Schedule,
+			ApiUrl:   c.Params.ApiUrl, // "http://127.0.0.1:9999",
+		}
+
+		yamlData, err := yaml.Marshal(&cfg)
+		if err != nil {
+			logger.Error("generate yaml config for %d failed: %s", instance.Port, err.Error())
+			return err
+		}
+
+		f, err := os.OpenFile(
+			path.Join(cst.ChecksumInstallPath, fmt.Sprintf("checksum_%d.yaml", instance.Port)),
+			os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
+			0644,
+		)
+		if err != nil {
+			logger.Error("create config file for %d failed: %s", instance.Port, err.Error())
+			return err
+		}
+		configFs = append(configFs, f)
+
+		_, err = f.Write(yamlData)
+		if err != nil {
+			logger.Error("write config file for %d failed: %s", instance.Port, err.Error())
+			return err
+		}
+
+		_, err = osutil.ExecShellCommand(
+			false,
+			fmt.Sprintf(
+				`chown mysql %s`,
+				path.Join(cst.ChecksumInstallPath, fmt.Sprintf("checksum_%d.yaml", instance.Port)),
+			),
+		)
+		if err != nil {
+			logger.Error(
+				"chown %s failed: %s",
+				path.Join(cst.ChecksumInstallPath, fmt.Sprintf("checksum_%d.yaml", instance.Port)),
+				err.Error(),
+			)
+			return err
+		}
+	}
+	return nil
+}
+
+// AddToCrond 添加调度
+func (c *InstallMySQLChecksumComp) AddToCrond() (err error) {
+	mysqlTableChecksum, err := c.tools.Get(tools.ToolMysqlTableChecksum)
+	if err != nil {
+		logger.Error("get %s failed: %s", tools.ToolMysqlTableChecksum, err.Error())
+		return err
+	}
+
+	for _, ins := range c.Params.InstancesInfo {
+		command := exec.Command(
+			mysqlTableChecksum,
+			"reschedule",
+			"--staff", c.Params.ExecUser,
+			"--config",
+			path.Join(
+				cst.ChecksumInstallPath,
+				fmt.Sprintf("checksum_%d.yaml", ins.Port),
+			),
+		)
+		var stdout, stderr bytes.Buffer
+		command.Stdout = &stdout
+		command.Stderr = &stderr
+
+		err := command.Run()
+		if err != nil {
+			logger.Error("run %s failed: %s, %s", command, err.Error(), stderr.String())
+			return err
+		}
+		logger.Info("run %s success: %s", command, stdout.String())
+	}
+	return nil
+}
+
+// Example 样例
+func (c *InstallMySQLChecksumComp) Example() interface{} {
+	return InstallMySQLChecksumComp{
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountMonitorExample,
+			},
+		},
+		Params: &InstallMySQLChecksumParam{
+			Medium: components.Medium{
+				Pkg:    "mysql-table-checksum.tar.gz",
+				PkgMd5: "12345",
+			},
+			SystemDbs: native.DBSys,
+			InstancesInfo: []InstanceInfo{
+				{
+					BkBizId:      1,
+					Ip:           "127.0.0.1",
+					Port:         20000,
+					Role:         "master",
+					ClusterId:    12,
+					ImmuteDomain: "aaa.bbb.com",
+				},
+				{
+					BkBizId:      1,
+					Ip:           "127.0.0.1",
+					Port:         20001,
+					Role:         "master",
+					ClusterId:    12,
+					ImmuteDomain: "aaa.bbb.com",
+				},
+			},
+			ExecUser: "rtx",
+			Schedule: "@every 5m",
+			ApiUrl:   "http://x.x.x.x:yyyy",
+		},
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go
new file mode 100644
index 0000000000..d99991d0a9
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_dba_toolkit.go
@@ -0,0 +1,70 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+)
+
+// InstallDBAToolkitComp 基本结构
+type InstallDBAToolkitComp struct {
+	Params InstallDBAToolkitParam `json:"extend"`
+}
+
+// InstallDBAToolkitParam 输入参数
+type InstallDBAToolkitParam struct {
+	components.Medium
+	// 发起执行actor的用户,仅用于审计
+	ExecUser string `json:"exec_user"`
+}
+
+// Init 初始化
+func (c *InstallDBAToolkitComp) Init() (err error) {
+	return nil
+}
+
+// PreCheck 预检查
+func (c *InstallDBAToolkitComp) PreCheck() (err error) {
+	if err = c.Params.Medium.Check(); err != nil {
+		logger.Error("check dbatoolkit pkg failed: %s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// DeployBinary 部署 rotate_binlog
+func (c *InstallDBAToolkitComp) DeployBinary() (err error) {
+	decompressCmd := fmt.Sprintf(
+		`tar zxf %s -C %s`,
+		c.Params.Medium.GetAbsolutePath(), cst.MYSQL_TOOL_INSTALL_PATH,
+	)
+	_, err = osutil.ExecShellCommand(false, decompressCmd)
+	if err != nil {
+		logger.Error("decompress dbatoolkit pkg failed: %s", err.Error())
+		return err
+	}
+
+	chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.DBAToolkitPath)
+	_, err = osutil.ExecShellCommand(false, chownCmd)
+	if err != nil {
+		logger.Error("chown %s to mysql failed: %s", cst.DBAToolkitPath, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// Example 样例
+func (c *InstallDBAToolkitComp) Example() interface{} {
+	return InstallDBAToolkitComp{
+		Params: InstallDBAToolkitParam{
+			Medium: components.Medium{
+				Pkg:    "dba-toolkit.tar.gz",
+				PkgMd5: "12345",
+			},
+			ExecUser: "sys",
+		},
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_monitor.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_monitor.go
new file mode 100644
index 0000000000..2002446fa7
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_monitor.go
@@ -0,0 +1,365 @@
+package mysql
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"os"
+	"os/exec"
+	"path"
+	"time"
+
+	"github.com/pkg/errors"
+	"gopkg.in/yaml.v2"
+)
+
+// InstallMySQLMonitorComp 安装 mysql monitor
+type InstallMySQLMonitorComp struct {
+	GeneralParam *components.GeneralParam  `json:"general"`
+	Params       *InstallMySQLMonitorParam `json:"extend"`
+	tools        *tools.ToolSet
+}
+
+// InstallMySQLMonitorParam 参数
+type InstallMySQLMonitorParam struct {
+	components.Medium
+	SystemDbs     []string       `json:"system_dbs"`
+	ExecUser      string         `json:"exec_user"`
+	ApiUrl        string         `json:"api_url"`
+	InstancesInfo []InstanceInfo `json:"instances_info"`
+	MachineType   string         `json:"machine_type"`
+	BkCloudId     int            `json:"bk_cloud_id"`
+	ItemsConfig   map[string]struct {
+		Enable      *bool    `json:"enable" yaml:"enable"`
+		Schedule    *string  `json:"schedule" yaml:"schedule"`
+		MachineType string   `json:"machine_type" yaml:"machine_type"`
+		Role        []string `json:"role" yaml:"role"`
+	} `json:"items_config"`
+}
+
+type monitorItem struct {
+	Name        string   `json:"name" yaml:"name"`
+	Enable      *bool    `json:"enable" yaml:"enable"`
+	Schedule    *string  `json:"schedule" yaml:"schedule"`
+	MachineType string   `json:"machine_type" yaml:"machine_type"`
+	Role        []string `json:"role" yaml:"role"`
+}
+
+type connectAuth struct {
+	User     string `yaml:"user" validate:"required"`
+	Password string `yaml:"password" validate:"required"`
+}
+
+type authCollect struct {
+	Mysql      *connectAuth `yaml:"mysql"`
+	Proxy      *connectAuth `yaml:"proxy"`
+	ProxyAdmin *connectAuth `yaml:"proxy_admin"`
+}
+
+type monitorConfig struct {
+	BkBizId         int           `yaml:"bk_biz_id"`
+	Ip              string        `yaml:"ip" validate:"required,ipv4"`
+	Port            int           `yaml:"port" validate:"required,gt=1024,lte=65535"`
+	BkInstanceId    int64         `yaml:"bk_instance_id" validate:"required,gt=0"`
+	ImmuteDomain    string        `yaml:"immute_domain"`
+	MachineType     string        `yaml:"machine_type"`
+	Role            *string       `yaml:"role"`
+	BkCloudID       *int          `yaml:"bk_cloud_id" validate:"required,gte=0"`
+	Log             *_logConfig   `yaml:"log"`
+	ItemsConfigFile string        `yaml:"items_config_file" validate:"required"`
+	ApiUrl          string        `yaml:"api_url" validate:"required"`
+	Auth            authCollect   `yaml:"auth"`
+	DBASysDbs       []string      `yaml:"dba_sys_dbs" validate:"required"`
+	InteractTimeout time.Duration `yaml:"interact_timeout" validate:"required"`
+	DefaultSchedule string        `yaml:"default_schedule" validate:"required"`
+}
+
+// Init 初始化
+func (c *InstallMySQLMonitorComp) Init() (err error) {
+	c.tools = tools.NewToolSetWithPickNoValidate(tools.ToolMySQLMonitor)
+	return nil
+}
+
+// Precheck 预检查
+func (c *InstallMySQLMonitorComp) Precheck() (err error) {
+	if err = c.Params.Medium.Check(); err != nil {
+		logger.Error("check monitor pkg failed: %s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// DeployBinary 二进制部署
+func (c *InstallMySQLMonitorComp) DeployBinary() (err error) {
+	err = os.MkdirAll(cst.MySQLMonitorInstallPath, 0755)
+	if err != nil {
+		logger.Error("mkdir %s failed: %s", cst.MySQLCrondInstallPath, err.Error())
+		return err
+	}
+
+	decompressCmd := fmt.Sprintf(
+		`tar zxf %s -C %s`,
+		c.Params.Medium.GetAbsolutePath(), cst.MySQLMonitorInstallPath,
+	)
+	_, err = osutil.ExecShellCommand(false, decompressCmd)
+	if err != nil {
+		logger.Error("decompress monitor pkg failed: %s", err.Error())
+		return err
+	}
+
+	chownCmd := fmt.Sprintf(`chown -R mysql %s`, cst.MySQLMonitorInstallPath)
+	_, err = osutil.ExecShellCommand(false, chownCmd)
+	if err != nil {
+		logger.Error("chown %s to mysql failed: %s", cst.MySQLMonitorInstallPath, err.Error())
+		return err
+	}
+	return nil
+}
+
+// GenerateBinaryConfig 生成 runtime 配置
+func (c *InstallMySQLMonitorComp) GenerateBinaryConfig() (err error) {
+	var configFs []*os.File
+	defer func() {
+		for _, f := range configFs {
+			_ = f.Close()
+		}
+	}()
+
+	logDir := path.Join(cst.MySQLMonitorInstallPath, "logs")
+	for _, instance := range c.Params.InstancesInfo {
+		if instance.BkInstanceId <= 0 {
+			err := errors.Errorf(
+				"%s:%d invalid bk_instance_id: %d",
+				instance.Ip,
+				instance.Port,
+				instance.BkInstanceId,
+			)
+			return err
+		}
+
+		cfg := monitorConfig{
+			Ip:           instance.Ip,
+			Port:         instance.Port,
+			BkInstanceId: instance.BkInstanceId,
+			ImmuteDomain: instance.ImmuteDomain,
+			Role:         &instance.Role,
+			BkBizId:      instance.BkBizId,
+			BkCloudID:    &c.Params.BkCloudId,
+			MachineType:  c.Params.MachineType,
+			Log: &_logConfig{
+				Console:    false,
+				LogFileDir: &logDir,
+				Debug:      false,
+				Source:     true,
+				Json:       true,
+			},
+			ItemsConfigFile: path.Join(
+				cst.MySQLMonitorInstallPath,
+				fmt.Sprintf("items-config_%d.yaml", instance.Port),
+			),
+			ApiUrl:          c.Params.ApiUrl,
+			DBASysDbs:       c.Params.SystemDbs,
+			InteractTimeout: 5 * time.Second,
+			DefaultSchedule: "@every 1m",
+		}
+
+		switch c.Params.MachineType {
+		case "backend":
+			cfg.Auth = authCollect{
+				Mysql: &connectAuth{
+					User:     c.GeneralParam.RuntimeAccountParam.MonitorUser,
+					Password: c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+				},
+			}
+		case "proxy":
+			cfg.Auth = authCollect{
+				ProxyAdmin: &connectAuth{
+					User:     c.GeneralParam.RuntimeAccountParam.ProxyAdminUser,
+					Password: c.GeneralParam.RuntimeAccountParam.ProxyAdminPwd,
+				},
+				Proxy: &connectAuth{
+					User:     c.GeneralParam.RuntimeAccountParam.MonitorAccessAllUser,
+					Password: c.GeneralParam.RuntimeAccountParam.MonitorAccessAllPwd,
+				},
+			}
+		case "single":
+			cfg.Auth = authCollect{
+				Mysql: &connectAuth{
+					User:     c.GeneralParam.RuntimeAccountParam.MonitorUser,
+					Password: c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+				},
+			}
+		case "remote":
+			cfg.Auth = authCollect{
+				Mysql: &connectAuth{
+					User:     c.GeneralParam.RuntimeAccountParam.MonitorUser,
+					Password: c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+				},
+			}
+		default:
+			err := errors.Errorf("not support machine type: %s", c.Params.MachineType)
+			logger.Error(err.Error())
+			return err
+		}
+
+		yamlData, err := yaml.Marshal(&cfg)
+		if err != nil {
+			logger.Error("marshal monitor config for %d failed", instance.Port, err.Error())
+			return err
+		}
+
+		f, err := os.OpenFile(
+			path.Join(cst.MySQLMonitorInstallPath, fmt.Sprintf("monitor-config_%d.yaml", instance.Port)),
+			os.O_CREATE|os.O_TRUNC|os.O_WRONLY,
+			0644,
+		)
+		if err != nil {
+			logger.Error("create config file for %d failed: %s", instance.Port, err.Error())
+			return err
+		}
+		configFs = append(configFs, f)
+
+		_, err = f.Write(yamlData)
+		if err != nil {
+			logger.Error("write config file for %d failed: %s", instance.Port, err.Error())
+			return err
+		}
+
+		_, err = osutil.ExecShellCommand(
+			false,
+			fmt.Sprintf(
+				`chown mysql %s`,
+				path.Join(
+					cst.MySQLMonitorInstallPath, fmt.Sprintf("monitor-config_%d.yaml", instance.Port),
+				),
+			),
+		)
+		if err != nil {
+			logger.Error("chown config file for %d failed: %s", instance.Port, err.Error())
+			return err
+		}
+	}
+	return err
+}
+
+// GenerateItemsConfig 复制监控项配置
+func (c *InstallMySQLMonitorComp) GenerateItemsConfig() (err error) {
+	var monitorItems []monitorItem
+	for k, v := range c.Params.ItemsConfig {
+		monitorItems = append(
+			monitorItems, monitorItem{
+				Name:        k,
+				Enable:      v.Enable,
+				Schedule:    v.Schedule,
+				MachineType: v.MachineType,
+				Role:        v.Role,
+			},
+		)
+	}
+
+	content, err := yaml.Marshal(monitorItems)
+	if err != nil {
+		logger.Error("marshal items config failed: %s", err.Error())
+		return err
+	}
+
+	for _, instance := range c.Params.InstancesInfo {
+		f, err := os.OpenFile(
+			path.Join(
+				cst.MySQLMonitorInstallPath,
+				fmt.Sprintf(`items-config_%d.yaml`, instance.Port),
+			),
+			os.O_CREATE|os.O_TRUNC|os.O_RDWR,
+			0755,
+		)
+		if err != nil {
+			logger.Error("create items-config file failed: %s", err.Error())
+			return err
+		}
+
+		_, err = f.Write(append(content, []byte("\n")...))
+		if err != nil {
+			logger.Error("write items-config file faield: %s", err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// AddToCrond 添加 crond entry
+func (c *InstallMySQLMonitorComp) AddToCrond() (err error) {
+	mysqlMonitor, err := c.tools.Get(tools.ToolMySQLMonitor)
+	if err != nil {
+		logger.Error("get %s failed: %s", tools.ToolMySQLMonitor, err.Error())
+		return err
+	}
+
+	for _, ins := range c.Params.InstancesInfo {
+		command := exec.Command(
+			mysqlMonitor,
+			"reschedule",
+			"--staff", c.Params.ExecUser,
+			"--config", path.Join(
+				cst.MySQLMonitorInstallPath,
+				fmt.Sprintf("monitor-config_%d.yaml", ins.Port),
+			),
+		)
+		var stdout, stderr bytes.Buffer
+		command.Stdout = &stdout
+		command.Stderr = &stderr
+
+		err := command.Run()
+		if err != nil {
+			logger.Error("run %s failed: %s, %s", command, err.Error(), stderr.String())
+			return err
+		}
+		logger.Info("run %s success: %s", command, stdout.String())
+	}
+	return nil
+}
+
+// Example 样例
+func (c *InstallMySQLMonitorComp) Example() interface{} {
+	return InstallMySQLMonitorComp{
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountMonitorExample,
+			},
+		},
+		Params: &InstallMySQLMonitorParam{
+			Medium: components.Medium{
+				Pkg:    "mysql-monitor.tar.gz",
+				PkgMd5: "12345",
+			},
+			SystemDbs: native.DBSys,
+			ExecUser:  "whoru",
+			ApiUrl:    "http://x.x.x.x:yyyy",
+			InstancesInfo: []InstanceInfo{
+				{
+					BkBizId:      1,
+					Ip:           "127.0.0.1",
+					Port:         20000,
+					Role:         "master",
+					ClusterId:    12,
+					ImmuteDomain: "aaa.bbb.com",
+				},
+				{
+					BkBizId:      1,
+					Ip:           "127.0.0.1",
+					Port:         20001,
+					Role:         "master",
+					ClusterId:    12,
+					ImmuteDomain: "aaa.bbb.com",
+				},
+			},
+			MachineType: "backend",
+			BkCloudId:   0,
+		},
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go
new file mode 100644
index 0000000000..e94575809e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_mysql.go
@@ -0,0 +1,991 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/computil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/rollback"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path"
+	"regexp"
+	"strconv"
+	"strings"
+	"text/template"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// InstallMySQLComp TODO
+type InstallMySQLComp struct {
+	GeneralParam       *components.GeneralParam `json:"general"`
+	Params             *InstallMySQLParams      `json:"extend"`
+	installMySQLConfig `json:"-"`
+	RollBackContext    rollback.RollBackObjects `json:"-"`
+	TimeZone           string
+}
+
+// InstallMySQLParams TODO
+type InstallMySQLParams struct {
+	components.Medium
+	// map[port]my.cnf
+	MyCnfConfigs json.RawMessage `json:"mycnf_configs"  validate:"required" `
+	// MySQLVerion 只需5.6 5.7 这样的大版本号
+	MysqlVersion string `json:"mysql_version"  validate:"required"`
+	// 字符集参数
+	CharSet string `json:"charset" validate:"required,checkCharset"`
+	// Ports
+	Ports []int `json:"ports" validate:"required,gt=0,dive"`
+	// 安装实例的内存大小,可以不指定,会自动计算
+	InstMem                  uint64          `json:"inst_mem"`
+	Host                     string          `json:"host" validate:"required,ip" `
+	SuperAccount             SuperAccount    `json:"super_account"`
+	DBHAAccount              DBHAAccount     `json:"dbha_account"`
+	SpiderAutoIncrModeMap    json.RawMessage `json:"spider_auto_incr_mode_map"`
+	AllowDiskFileSystemTypes []string
+}
+
+// SuperAccount TODO
+type SuperAccount struct {
+	User        string   `json:"user" validate:"required"`
+	Pwd         string   `json:"pwd"  validate:"required"`
+	AccessHosts []string `json:"access_hosts"`
+}
+
+// DBHAAccount TODO
+type DBHAAccount struct {
+	User        string   `json:"user" validate:"required"`
+	Pwd         string   `json:"pwd"  validate:"required"`
+	AccessHosts []string `json:"access_hosts"`
+}
+
+// InitDirs TODO
+type InitDirs = []string
+
+// Port TODO
+type Port = int
+type socket = string
+
+// SpiderAutoIncrModeValue TODO
+type SpiderAutoIncrModeValue int
+
+type installMySQLConfig struct {
+	InstallDir              string
+	MysqlInstallDir         string
+	TdbctlInstallDir        string
+	DataRootPath            string
+	LogRootPath             string
+	DataBaseDir             string // eg: /data1/mysqldata/
+	LogBaseDir              string // eg: /data/mysqllog/
+	DefaultMysqlDataDirName string
+	DefaultMysqlLogDirName  string
+	MyCnfTpls               map[Port]*util.CnfFile // 	MyCnfConfigs json.RawMessage 反序列化后的对象
+	InsPorts                []Port
+	RenderConfigs           map[Port]RenderConfigs
+	InsInitDirs             map[Port]InitDirs
+	InsSockets              map[Port]socket
+	SpiderAutoIncrModeMap   map[Port]SpiderAutoIncrModeValue
+	Checkfunc               []func() error
+}
+
+// RenderConfigs TODO
+type RenderConfigs struct {
+	Mysqld Mysqld
+}
+
+// Mysqld TODO
+type Mysqld struct {
+	Port                         string                  `json:"port"`
+	Datadir                      string                  `json:"datadir"`
+	Logdir                       string                  `json:"logdir"`
+	CharacterSetServer           string                  `json:"character_set_server"`
+	CollationServer              string                  `json:"collation_server"`
+	BindAddress                  string                  `json:"bind-address"`
+	ServerId                     uint64                  `json:"server_id"`
+	InnodbBufferPoolSize         string                  `json:"innodb_buffer_pool_size"`
+	SpiderAutoIncrementModeValue SpiderAutoIncrModeValue `json:"spider_auto_increment_mode_value"`
+}
+
+// Example TODO
+func (i *InstallMySQLComp) Example() interface{} {
+	comp := InstallMySQLComp{
+		Params: &InstallMySQLParams{
+			Medium: components.Medium{
+				Pkg:    "mysql-5.6.24-linux-x86_64-tmysql-2.2.3-gcs.tar.gz",
+				PkgMd5: "a2dba04a7d96928473ab8ac5132edee1",
+			},
+			MysqlVersion: "",
+			CharSet:      "utf8",
+			Ports:        []int{20000, 20001},
+			InstMem:      0,
+			MyCnfConfigs: []byte(`{
+							"20000":{
+								"client":{"port": "{{mysqld.port}}" },
+								"mysql":{"socket": "{{mysqld.datadir}}/mysql.sock" },
+								"mysqld":{"binlog_format": "ROW","innodb_io_capacity": "1000","innodb_read_io_threads": "8"}},
+							"20001":{
+								"client":{"port": "{{mysqld.port}}"},
+								"mysql":{"socket": "{{mysqld.datadir}}/mysql.sock"},
+								"mysqld":{"binlog_format": "ROW","innodb_io_capacity": "2000","innodb_read_io_threads": "10"}}}`),
+			SuperAccount: SuperAccount{
+				User:        "user",
+				Pwd:         "xxx",
+				AccessHosts: []string{"ip1", "ip2"},
+			},
+			DBHAAccount: DBHAAccount{
+				User:        "user",
+				Pwd:         "xxx",
+				AccessHosts: []string{"ip1", "ip2"},
+			},
+		},
+	}
+	return comp
+}
+
+// InitDefaultParam TODO
+func (i *InstallMySQLComp) InitDefaultParam() (err error) {
+	var mountpoint string
+	i.InstallDir = cst.UsrLocal
+	i.MysqlInstallDir = cst.MysqldInstallPath
+	i.TdbctlInstallDir = cst.TdbctlInstallPath
+	i.DataRootPath = cst.DefaultMysqlDataRootPath
+	i.LogRootPath = cst.DefaultMysqlLogRootPath
+	i.DefaultMysqlDataDirName = cst.DefaultMysqlDataBasePath
+	i.DefaultMysqlLogDirName = cst.DefaultMysqlLogBasePath
+	i.Params.AllowDiskFileSystemTypes = []string{"ext4", "xfs"}
+	// 计算获取需要安装的ports
+	i.InsPorts = i.Params.Ports
+	i.MyCnfTpls = make(map[int]*util.CnfFile)
+	// 获取系统内存,计算实例内存大小
+	if err := i.initMySQLInstanceMem(); err != nil {
+		return err
+	}
+	// 数据目录优先放在 /data1 盘下
+	mountpoint, err = osutil.FindFirstMountPoint(cst.DefaultMysqlDataRootPath, cst.AlterNativeMysqlDataRootPath)
+	if err != nil {
+		logger.Error("not found mount point /data1")
+		return err
+	}
+	i.DataRootPath = mountpoint
+	i.DataBaseDir = path.Join(mountpoint, cst.DefaultMysqlDataBasePath)
+	// 日志目录优先放在 /data 盘下
+	mountpoint, err = osutil.FindFirstMountPoint(cst.DefaultMysqlLogRootPath, cst.AlterNativeMysqlLogRootPath)
+	if err != nil {
+		logger.Error("not found mount point /data")
+		return err
+	}
+	i.LogRootPath = mountpoint
+	i.LogBaseDir = path.Join(mountpoint, cst.DefaultMysqlLogBasePath)
+
+	// 反序列化mycnf 配置
+	var mycnfs map[Port]json.RawMessage
+	if err = json.Unmarshal([]byte(i.Params.MyCnfConfigs), &mycnfs); err != nil {
+		logger.Error("反序列化配置失败:%s", err.Error())
+		return err
+	}
+
+	for _, port := range i.InsPorts {
+		var cnfraw json.RawMessage
+		var ok bool
+		if cnfraw, ok = mycnfs[port]; !ok {
+			return fmt.Errorf("参数中没有%d的配置", port)
+		}
+		var mycnf mysqlutil.MycnfObject
+		if err = json.Unmarshal(cnfraw, &mycnf); err != nil {
+			logger.Error("反序列%d 化配置失败:%s", port, err.Error())
+			return err
+		}
+		cnftpl, err := util.NewMyCnfObject(mycnf, "tpl")
+		if err != nil {
+			logger.Error("初始化mycnf ini 模版:%s", err.Error())
+			return err
+		}
+		i.MyCnfTpls[port] = cnftpl
+	}
+
+	// 如果SpiderAutoIncrModeMap有传入,则渲染
+	if i.Params.SpiderAutoIncrModeMap != nil {
+		i.SpiderAutoIncrModeMap = make(map[int]SpiderAutoIncrModeValue)
+		if err = json.Unmarshal([]byte(i.Params.SpiderAutoIncrModeMap), &i.SpiderAutoIncrModeMap); err != nil {
+			logger.Error("反序列化配置失败:%s", err.Error())
+			return err
+		}
+	}
+
+	// 计算需要替换的参数配置
+	if err := i.initInsReplaceMyConfigs(); err != nil {
+		return err
+	}
+	i.Checkfunc = append(i.Checkfunc, i.CheckTimeZoneSetting)
+	i.Checkfunc = append(i.Checkfunc, i.precheckMysqlDir)
+	i.Checkfunc = append(i.Checkfunc, i.precheckMysqlProcess)
+	i.Checkfunc = append(i.Checkfunc, i.precheckMysqlPackageBitOS)
+	i.Checkfunc = append(i.Checkfunc, i.Params.Medium.Check)
+	i.Checkfunc = append(i.Checkfunc, i.precheckFilesystemType)
+	return nil
+}
+
+// PreCheck TODO
+func (i *InstallMySQLComp) PreCheck() error {
+	for _, f := range i.Checkfunc {
+		if err := f(); err != nil {
+			logger.Error("check failed %s", err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// precheckMysqlDir TODO
+/*
+	检查根路径下是已经存在mysql相关的数据和日志目录
+	eg:
+	/data1/mysqldata/{port}
+	/data/mysqldata/{port}
+	/data1/mysqllog/{port}
+	/data/mysqllog/{port}
+*/
+func (i *InstallMySQLComp) precheckMysqlDir() error {
+	for _, port := range i.InsPorts {
+		for _, rootDir := range []string{cst.DefaultMysqlLogRootPath, cst.DefaultMysqlDataRootPath} {
+			d := path.Join(rootDir, i.DefaultMysqlDataDirName, strconv.Itoa(port))
+			if osutil.FileExist(d) {
+				return fmt.Errorf("%s 已经存在了", d)
+			}
+			l := path.Join(rootDir, i.DefaultMysqlLogDirName, strconv.Itoa(port))
+			if osutil.FileExist(l) {
+				return fmt.Errorf("%s 已经存在了", l)
+			}
+		}
+	}
+	return nil
+}
+
+func (i *InstallMySQLComp) precheckFilesystemType() (err error) {
+	mountInfo := osutil.GetMountPathInfo()
+	for _, key := range util.UniqueStrings([]string{i.DataRootPath, i.LogRootPath}) {
+		if v, exist := mountInfo[key]; exist {
+			logger.Info("%s : %s", key, v.FileSystemType)
+			if !util.StringsHas(i.Params.AllowDiskFileSystemTypes, v.FileSystemType) {
+				return fmt.Errorf("The %s,Filesystem is %s,is not allowed", key, v.FileSystemType)
+			}
+		} else {
+			return fmt.Errorf("The %s Not Found Filesystem Type", key)
+		}
+	}
+	return nil
+}
+
+func (i *InstallMySQLComp) precheckMysqlProcess() (err error) {
+	var output string
+	var mysqldNum int
+
+	// 如果正在部署tdbctl组件,部署场景会与这块引起冲突,则暂时先跳过。
+	if strings.Contains(i.Params.Pkg, "tdbctl") {
+		logger.Warn("正在部署tdbctl组件,不再mysqld进程存活检查")
+		return nil
+	}
+
+	if output, err = osutil.ExecShellCommand(false, "ps -efwww|grep -w mysqld|grep -v grep|wc -l"); err != nil {
+		return errors.Wrap(err, "执行ps -efwww|grep -w mysqld|grep -v grep|wc -l失败")
+	}
+	if mysqldNum, err = strconv.Atoi(osutil.CleanExecShellOutput(output)); err != nil {
+		logger.Error("strconv.Atoi %s failed:%s", output, err.Error())
+		return err
+	}
+	if mysqldNum > 0 {
+		return errors.New(fmt.Sprintf("have %d mysqld process running", mysqldNum))
+	}
+	return nil
+}
+
+func (i *InstallMySQLComp) precheckMysqlPackageBitOS() error {
+	var mysqlBits = cst.Bit64
+	if strings.Contains(i.Params.MysqlVersion, cst.Bit32) {
+		mysqlBits = cst.Bit32
+	}
+	if strings.Compare(mysqlBits, strconv.Itoa(cst.OSBits)) != 0 {
+		return fmt.Errorf("mysql 安装包的和系统不匹配,当前系统是%d", cst.OSBits)
+	}
+	return nil
+}
+
+// initMySQLInstanceMem TODO
+// GetInstMemByIP 返回的内存单位是 MB
+func (i *InstallMySQLComp) initMySQLInstanceMem() (err error) {
+	var instMem uint64
+	if i.Params.InstMem > 0 {
+		return nil
+	}
+	if instMem, err = mysqlutil.GetInstMemByIP(uint64(len(i.InsPorts))); err != nil {
+		logger.Error("获取实例内存失败, err: %w", err)
+		return fmt.Errorf("获取实例内存失败, err: %w", err)
+	}
+	i.Params.InstMem = instMem
+	return nil
+}
+
+// initInsReplaceMyConfigs TODO
+/*
+	初始化每个实例需要替换的配置参数,供生成实际my.cnf配置文件
+
+		mysqldata
+			- socket					socket=/data1/mysqldata/20000/mysql.sock
+			- datadir  					datadir=/data1/mysqldata/20000/data
+			- tmpdir					tmpdir=/data1/mysqldata/20000/tmp
+			- innodb_data_home_dir		innodb_data_home_dir=/data1/mysqldata/20000/innodb/data
+			- innodb_log_group_home_dir innodb_log_group_home_dir=/data1/mysqldata/20000/innodb/log
+		mysqllog
+			- log_bin 					log_bin=/data/mysqllog/20000/binlog/binlog20000.bin
+			- slow_query_log_file		slow_query_log_file=/data/mysqllog/20000/slow-query.log
+			- relay-log					relay-log=/data1/mysqldata/relay-log/relay-log.bin
+*/
+func (i *InstallMySQLComp) initInsReplaceMyConfigs() error {
+	i.RenderConfigs = make(map[int]RenderConfigs)
+	i.InsInitDirs = make(map[int]InitDirs)
+	i.InsSockets = make(map[int]string)
+	for _, port := range i.InsPorts {
+		insBaseDataDir := path.Join(i.DataBaseDir, strconv.Itoa(port))
+		insBaseLogDir := path.Join(i.LogBaseDir, strconv.Itoa(port))
+		serverId, err := mysqlutil.GenMysqlServerId(i.Params.Host, port)
+		if err != nil {
+			logger.Error("%s:%d generation serverId Failed %s", i.Params.Host, port, err.Error())
+			return err
+		}
+		i.RenderConfigs[port] = RenderConfigs{Mysqld{
+			Datadir:                      insBaseDataDir,
+			Logdir:                       insBaseLogDir,
+			ServerId:                     serverId,
+			Port:                         strconv.Itoa(port),
+			CharacterSetServer:           i.Params.CharSet,
+			InnodbBufferPoolSize:         fmt.Sprintf("%dM", i.Params.InstMem),
+			BindAddress:                  i.Params.Host,
+			SpiderAutoIncrementModeValue: i.SpiderAutoIncrModeMap[port],
+		}}
+
+		i.InsInitDirs[port] = append(i.InsInitDirs[port], []string{insBaseDataDir, insBaseLogDir}...)
+	}
+	return nil
+	//	return i.calInsInitDirs()
+}
+
+// getInitDirFromCnf TODO
+// calInsInitDirs  从模板配置获取需要初始化新建的目录
+func (i *InstallMySQLComp) getInitDirFromCnf() (err error) {
+	// 获取需要初始化目录的模板值
+	initDirTpls := map[string]string{
+		"datadir":                   "",
+		"innodb_log_group_home_dir": "",
+		"innodb_data_home_dir":      "",
+		"log_bin":                   "",
+		"relay-log":                 "",
+		"tmpdir":                    "",
+		"socket":                    "",
+	}
+	for _, port := range i.InsPorts {
+		cnf, err := util.LoadMyCnfForFile(util.GetMyCnfFileName(port))
+		if err != nil {
+			return err
+		}
+		if err := cnf.GetInitDirItemTpl(initDirTpls); err != nil {
+			return err
+		}
+		for key, dir := range initDirTpls {
+			switch strings.ReplaceAll(key, "-", "_") {
+			case "log_bin", "relay_log":
+				i.InsInitDirs[port] = append(i.InsInitDirs[port], path.Dir(dir))
+			case "socket":
+				i.InsSockets[port] = dir
+			default:
+				i.InsInitDirs[port] = append(i.InsInitDirs[port], dir)
+			}
+		}
+	}
+	return err
+}
+
+// GenerateMycnf TODO
+/**
+ * @description: 渲染配置
+ * @return {*}
+ */
+func (i *InstallMySQLComp) GenerateMycnf() (err error) {
+	// 1. 根据参数反序列化配置
+	var tmplFileName = "/tmp/my.cnf.tpl"
+
+	// 2. 替换数据目录、日志目录生产实际配置文件
+	for _, port := range i.InsPorts {
+		i.MyCnfTpls[port].FileName = tmplFileName
+		if err = i.MyCnfTpls[port].SafeSaveFile(false); err != nil {
+			logger.Error("保存模版文件失败:%s", err.Error())
+			return err
+		}
+		// 防止过快读取到的是空文件
+		if err = util.Retry(util.RetryConfig{Times: 3, DelayTime: 100 * time.Millisecond}, func() error {
+			return util.FileIsEmpty(tmplFileName)
+		}); err != nil {
+			return err
+		}
+		tmpl, err := template.ParseFiles(tmplFileName)
+		if err != nil {
+			return errors.WithMessage(err, "template ParseFiles failed")
+		}
+		cnf := util.GetMyCnfFileName(port)
+		f, err := os.Create(cnf)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		if err := tmpl.Execute(f, i.RenderConfigs[port]); err != nil {
+			return err
+		}
+		if _, err = osutil.ExecShellCommand(false, fmt.Sprintf("chown -R mysql %s", cnf)); err != nil {
+			logger.Error("chown -R mysql %s %s", cnf, err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// InitInstanceDirs TODO
+/*
+	创建实例相关的数据,日志目录以及修改权限
+*/
+func (i *InstallMySQLComp) InitInstanceDirs() (err error) {
+	if err = i.getInitDirFromCnf(); err != nil {
+		return err
+	}
+	for _, port := range i.InsPorts {
+		for _, dir := range i.InsInitDirs[port] {
+			if util.StrIsEmpty(dir) {
+				continue
+			}
+			cmd := fmt.Sprintf("mkdir -p %s && chown -R mysql:mysql %s", dir, dir)
+			if _, err := osutil.ExecShellCommand(false, cmd); err != nil {
+				logger.Error("初始化实例目录%s 失败:%s", dir, err.Error())
+				return err
+			}
+			// mkdir ok, add will rollback dir
+			i.RollBackContext.AddDelFile(dir)
+		}
+	}
+	for _, dir := range []string{i.DataBaseDir, i.LogBaseDir} {
+		if _, err := osutil.ExecShellCommand(false, fmt.Sprintf("chown -R mysql %s", dir)); err != nil {
+			logger.Error("该更%s所属组失败:%s", dir, err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// DecompressMysqlPkg TODO
+/**
+ * @description:  校验、解压mysql安装包
+ * @return {*}
+ */
+func (i *InstallMySQLComp) DecompressMysqlPkg() (err error) {
+	if err = os.Chdir(i.InstallDir); err != nil {
+		return fmt.Errorf("cd to dir %s failed, err:%w", i.InstallDir, err)
+	}
+	// 判断 /usr/local/mysql 目录是否已经存在,如果存在则删除掉
+	if cmutil.FileExists(i.MysqlInstallDir) {
+		if _, err = osutil.ExecShellCommand(false, "rm -r "+i.MysqlInstallDir); err != nil {
+			logger.Error("rm -r %s error: %w", i.MysqlInstallDir, err)
+			return err
+		}
+	}
+	pkgAbPath := i.Params.Medium.GetAbsolutePath()
+	if output, err := osutil.ExecShellCommand(false, fmt.Sprintf("tar zxf %s", pkgAbPath)); err != nil {
+		logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error())
+		return err
+	}
+	mysqlBinaryFile := i.Params.Medium.GePkgBaseName()
+	extraCmd := fmt.Sprintf("ln -sf %s %s && chown -R mysql mysql*", mysqlBinaryFile, i.MysqlInstallDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	logger.Info("mysql binary directory: %s", mysqlBinaryFile)
+	if _, err := os.Stat(i.MysqlInstallDir); err != nil {
+		logger.Error("%s check failed, %v", i.MysqlInstallDir, err)
+		return err
+	}
+	logger.Info("decompress mysql pkg successfully")
+	return nil
+}
+
+// Install TODO
+/**
+ * @description:  mysqld init 初始化mysql 内置的系统库表
+ * @return {*}
+ */
+func (i *InstallMySQLComp) Install() (err error) {
+	logger.Info("开始安装mysql实例 ~  %v", i.InsPorts)
+	var isSudo = mysqlutil.IsSudo()
+	for _, port := range i.InsPorts {
+		var initialMysql string
+		var output string
+		myCnf := util.GetMyCnfFileName(port)
+		initialLogFile := fmt.Sprintf("/tmp/install_mysql_%d.log", port)
+
+		// mysql5.7.18以下版本或者spider版本的初始化命令
+		initialMysql = fmt.Sprintf(
+			"su - mysql -c \"cd /usr/local/mysql && ./scripts/mysql_install_db --defaults-file=%s --user=mysql --force &>%s\"",
+			myCnf, initialLogFile)
+
+		// mysql5.7.18以上的版本
+		if mysqlutil.MySQLVersionParse(i.Params.MysqlVersion) >= mysqlutil.MySQLVersionParse("5.7.18") &&
+			i.Params.Medium.GetPkgTypeName() == "mysql" {
+			initialMysql = fmt.Sprintf(
+				"su - mysql -c \"cd /usr/local/mysql && ./bin/mysqld --defaults-file=%s --initialize-insecure --user=mysql &>%s\"",
+				myCnf, initialLogFile)
+		}
+		// 拼接tdbctl专属初始化命令
+		if strings.Contains(i.Params.Pkg, "tdbctl") {
+			initialMysql = fmt.Sprintf(
+				"su - mysql -c \"cd %s && ./bin/mysqld --defaults-file=%s --initialize-insecure --user=mysql &>%s\"",
+				i.TdbctlInstallDir, myCnf, initialLogFile)
+		}
+
+		if output, err = osutil.ExecShellCommand(isSudo, initialMysql); err != nil {
+			logger.Error("%s execute failed, %s", initialMysql, output)
+			// 如果存在初始化的日志文件,才初始化错误的时间,将日志cat出来
+			if osutil.FileExist(initialLogFile) {
+				ldat, e := os.ReadFile(initialLogFile)
+				if e != nil {
+					logger.Warn("读取初始化mysqld日志失败%s", e.Error())
+				} else {
+					logger.Error("初始化mysqld失败日志: %s", string(ldat))
+				}
+			}
+			return err
+		}
+		/*
+			checkFile := path.Join(i.InsReplaceMyConfigs[port].Mysqld.DataDir, "mysql", "user.MYD")
+			if mysqlutil.MySQLVersionParse(i.Params.MysqlVersion) >= mysqlutil.MySQLVersionParse("8.0") {
+				checkFile = path.Join(i.InsReplaceMyConfigs[port].Mysqld.DataDir, "sys", "sys_config.ibd")
+			}
+			logger-back.Info("check [%s]", checkFile)
+			if _, err := os.Stat(checkFile); os.IsNotExist(err) {
+				logger-back.Error("check [%s] file failed, %v", checkFile, err)
+				return err
+			}
+		*/
+		time.Sleep(5 * time.Second)
+	}
+	logger.Info("Init all mysqld successfully")
+	return nil
+}
+
+// Startup TODO
+/**
+ * @description: 启动mysqld实例 会重试连接判断是否启动成功
+ * @return {*}
+ */
+func (i *InstallMySQLComp) Startup() (err error) {
+	if err = osutil.ClearTcpRecycle(); err != nil {
+		err = fmt.Errorf("clear tcp recycle failed, err: %w", err)
+		logger.Warn("startup, %s", err.Error())
+	}
+	for _, port := range i.InsPorts {
+		logger.Info("will start %d", port)
+		s := computil.StartMySQLParam{
+			MediaDir:      i.MysqlInstallDir,
+			MyCnfName:     util.GetMyCnfFileName(port),
+			MySQLUser:     "root",
+			MySQLPwd:      "",
+			Socket:        i.InsSockets[port],
+			SkipSlaveFlag: false,
+		}
+		pid, err := s.StartMysqlInstance()
+		if err != nil {
+			logger.Error("start %d faild err: %s", port, err.Error())
+			return err
+		}
+		i.RollBackContext.AddKillProcess(pid)
+	}
+	return i.linkTmpSoket()
+}
+
+// linkTmpSoket TODO
+// 软链实例socket到/tmp/mysql.sock
+func (i *InstallMySQLComp) linkTmpSoket() (err error) {
+	if len(i.InsPorts) == 1 {
+		socket := i.InsSockets[i.InsPorts[0]]
+		if strings.TrimSpace(socket) == "" {
+			return nil
+		}
+		return osutil.CreateSoftLink(socket, "/tmp/mysql.sock")
+	}
+	return nil
+}
+
+// generateDefaultMysqlAccount TODO
+/**
+ * @description:  生成初始化默认mysql 账户sql
+ * @receiver {string} realVersion: mysql 实际版本
+ * @return {*}
+ */
+func (i *InstallMySQLComp) generateDefaultMysqlAccount(realVersion string) (initAccountsql []string) {
+
+	initAccountsql = append(i.GetSuperUserAccount(realVersion), i.GetDBHAAccount(realVersion)...)
+
+	runp := i.GeneralParam.RuntimeAccountParam
+	privParis := []components.MySQLAccountPrivs{}
+	privParis = append(privParis, runp.MySQLAdminAccount.GetAccountPrivs(i.Params.Host))
+	privParis = append(privParis, runp.MySQLMonitorAccessAllAccount.GetAccountPrivs())
+	privParis = append(privParis, runp.MySQLMonitorAccount.GetAccountPrivs(i.Params.Host))
+	privParis = append(privParis, runp.MySQLYwAccount.GetAccountPrivs())
+	for _, v := range privParis {
+		initAccountsql = append(initAccountsql, v.GenerateInitSql(realVersion)...)
+	}
+	if mysqlutil.MySQLVersionParse(realVersion) >= mysqlutil.MySQLVersionParse("5.7.18") {
+		s :=
+			`INSERT INTO mysql.db(Host,Db,User,Select_priv,Insert_priv,Update_priv,Delete_priv,Create_priv,Drop_priv,
+                     Grant_priv,References_priv,Index_priv,Alter_priv,Create_tmp_table_priv,Lock_tables_priv,
+                     Create_view_priv,Show_view_priv,Create_routine_priv,Alter_routine_priv,Execute_priv,
+                     Event_priv,Trigger_priv)
+VALUES ('%','test','','Y','Y','Y','Y','Y','Y','N','Y','Y','Y','Y','Y','Y','Y','Y','N','N','Y','Y');`
+		initAccountsql = append(initAccountsql, s)
+	} else if mysqlutil.MySQLVersionParse(i.Params.MysqlVersion) <= mysqlutil.MySQLVersionParse("5.6") {
+		s := `alter table mysql.general_log change thread_id thread_id bigint(21) unsigned NOT NULL;`
+		initAccountsql = append(initAccountsql, s)
+	}
+	initAccountsql = append(initAccountsql, "delete from mysql.user where user='root' or user='';")
+	initAccountsql = append(initAccountsql, "update mysql.db set Insert_priv = 'Y' where db = 'test';")
+	initAccountsql = append(initAccountsql, "flush privileges;")
+	return
+}
+
+// GetSuperUserAccount TODO
+func (i *InstallMySQLComp) GetSuperUserAccount(realVersion string) (initAccountsql []string) {
+	for _, host := range i.Params.SuperAccount.AccessHosts {
+		if mysqlutil.MySQLVersionParse(realVersion) >= mysqlutil.MySQLVersionParse("5.7.18") {
+			initAccountsql = append(initAccountsql,
+				fmt.Sprintf("CREATE USER '%s'@'%s' IDENTIFIED WITH mysql_native_password BY '%s' ;",
+					i.Params.SuperAccount.User, host, i.Params.SuperAccount.Pwd))
+			initAccountsql = append(initAccountsql, fmt.Sprintf("GRANT ALL PRIVILEGES ON *.* TO '%s'@'%s' WITH GRANT OPTION ; ",
+				i.Params.SuperAccount.User, host))
+		} else {
+			initAccountsql = append(initAccountsql,
+				fmt.Sprintf("GRANT ALL PRIVILEGES ON *.* TO '%s'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION ;",
+					i.Params.SuperAccount.User, host, i.Params.SuperAccount.Pwd))
+		}
+	}
+	return
+}
+
+// GetDBHAAccount TODO
+// 获取生成DHBA-GM访问账号的生成语句
+func (i *InstallMySQLComp) GetDBHAAccount(realVersion string) (initAccountsql []string) {
+	for _, host := range i.Params.DBHAAccount.AccessHosts {
+		if mysqlutil.MySQLVersionParse(realVersion) >= mysqlutil.MySQLVersionParse("5.7.18") {
+			initAccountsql = append(initAccountsql,
+				fmt.Sprintf("CREATE USER '%s'@'%s' IDENTIFIED WITH mysql_native_password BY '%s' ;",
+					i.Params.DBHAAccount.User, host, i.Params.DBHAAccount.Pwd))
+			initAccountsql = append(initAccountsql, fmt.Sprintf(
+				"GRANT RELOAD, PROCESS, SHOW DATABASES, SUPER, REPLICATION CLIENT, SHOW VIEW ON *.* TO '%s'@'%s' WITH GRANT OPTION ; ",
+				i.Params.DBHAAccount.User, host))
+		} else {
+			initAccountsql = append(initAccountsql,
+				fmt.Sprintf(
+					"GRANT RELOAD, PROCESS, SHOW DATABASES, SUPER, REPLICATION CLIENT, SHOW VIEW ON *.* TO '%s'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION ;",
+					i.Params.DBHAAccount.User, host, i.Params.DBHAAccount.Pwd))
+		}
+	}
+	return
+}
+
+// InitDefaultPrivAndSchema TODO
+/**
+ * @description: 执行初始化默认库表语句&初始化默认账户sql
+ * @return {*}
+ */
+func (i *InstallMySQLComp) InitDefaultPrivAndSchema() (err error) {
+	var bsql []byte
+	var initSQLs []string
+	if bsql, err = staticembed.DefaultSysSchemaSQL.ReadFile(staticembed.DefaultSysSchemaSQLFileName); err != nil {
+		logger.Error("读取嵌入文件%s失败", staticembed.DefaultSysSchemaSQLFileName)
+		return
+	}
+	for _, value := range strings.SplitAfterN(string(bsql), ";", -1) {
+		if !regexp.MustCompile(`^\\s*$`).MatchString(value) {
+			initSQLs = append(initSQLs, value)
+		}
+	}
+
+	// 剔除最后一个空字符,spilts 会多分割出一个空字符
+	if len(initSQLs) < 2 {
+		return fmt.Errorf("初始化sql为空%v", initSQLs)
+	}
+	initSQLs = initSQLs[0 : len(initSQLs)-1]
+	for _, port := range i.InsPorts {
+		var dbWork *native.DbWorker
+		if dbWork, err = native.NewDbWorker(native.DsnBySocket(i.InsSockets[port], "root", "")); err != nil {
+			logger.Error("connenct by %s failed,err:%s", port, err.Error())
+			return
+		}
+		// 初始化schema
+		if _, err := dbWork.ExecMore(initSQLs); err != nil {
+			logger.Error("flush privileges failed %v", err)
+			return err
+		}
+		version, err := dbWork.SelectVersion()
+		if err != nil {
+			logger.Error("get %d mysql version failed  %v", port, err)
+			return err
+		}
+
+		// 初始化权限
+		var initAccountSqls []string
+		if strings.Contains(version, "tspider") {
+			// 暂时用执行shell命令代替, 执行SQL文件
+			if err := i.create_spider_table(i.InsSockets[port]); err != nil {
+				return err
+			}
+			initAccountSqls = i.generateDefaultSpiderAccount(version)
+		} else {
+			initAccountSqls = i.generateDefaultMysqlAccount(version)
+		}
+		// 初始化数据库之后,reset master,标记binlog重头开始,避免同步干扰
+		initAccountSqls = append(initAccountSqls, "reset master;")
+
+		if _, err := dbWork.ExecMore(initAccountSqls); err != nil {
+			logger.Error("flush privileges failed %v", err)
+			return err
+		}
+	}
+	logger.Info("flush privileges successfully")
+	return nil
+}
+
+// CheckTimeZoneSetting 安装mysql实例之前增加对时区校验,如果mysql的设置时区和机器系统设置的不一致,则不允许安装
+func (i *InstallMySQLComp) CheckTimeZoneSetting() (err error) {
+	timeZoneKeyName := "default_time_zone"
+	execCmd := "date +%:z"
+	output, err := osutil.ExecShellCommand(false, execCmd)
+	if err != nil {
+		logger.Error("exec get date script failed %s", err.Error())
+		return err
+	}
+	i.TimeZone = osutil.CleanExecShellOutput(output)
+	for _, port := range i.InsPorts {
+		instanceTimeZone, err := i.MyCnfTpls[port].GetMysqldKeyVaule(timeZoneKeyName)
+		if err != nil {
+			logger.Error("exec get instance config [%d] default_time_zone failed %s", port, err.Error())
+			return err
+		}
+		// 如果传入参数没有设置到default_time_zone参数,mysql走默认值SYSTEM,则这里输出warning日志,但是允许安装
+		if instanceTimeZone == "" {
+			// 如果第一次查不到,则转换中划线查询一次
+			instanceTimeZone, err = i.MyCnfTpls[port].GetMysqldKeyVaule(strings.ReplaceAll(timeZoneKeyName, "_", "-"))
+			if err != nil {
+				logger.Error("exec get instance config [%d] default_time_zone failed %s", port, err.Error())
+				return err
+			}
+			if instanceTimeZone == "" {
+				logger.Warn("[%d] default_time_zone cannot find a value, it is recommended to set a specific value", port)
+				continue
+			}
+		}
+		// 如果系统和实例配置不一致,且mysql实例设置不是SYSTEM,则退出
+		if i.TimeZone != instanceTimeZone && instanceTimeZone != "SYSTEM" {
+			return fmt.Errorf(
+				"The time zone is inconsistent with the configuration of the operating system and mysqld[%d], check", port)
+		}
+	}
+	return nil
+}
+
+// CreateExporterCnf 根据mysql部署端口生成对应的exporter配置文件
+func (i *InstallMySQLComp) CreateExporterCnf() (err error) {
+	for _, port := range i.InsPorts {
+		exporterConfName := fmt.Sprintf("/etc/exporter_%d.cnf", port)
+		if err = util.CreateExporterConf(
+			exporterConfName,
+			i.Params.Host,
+			strconv.Itoa(port),
+			i.GeneralParam.RuntimeAccountParam.MonitorUser,
+			i.GeneralParam.RuntimeAccountParam.MonitorPwd,
+		); err != nil {
+			logger.Error("create exporter conf err : %s", err.Error())
+			return err
+		}
+		if _, err = osutil.ExecShellCommand(false, fmt.Sprintf("chown -R mysql %s", exporterConfName)); err != nil {
+			logger.Error("chown -R mysql %s %s", exporterConfName, err.Error())
+			return err
+		}
+	}
+	return nil
+
+}
+
+// InstallRplSemiSyncPlugin 安装实例支持半同步复制插件(目前只有spider ctl实例需要)
+func (i *InstallMySQLComp) InstallRplSemiSyncPlugin() (err error) {
+	var execSQLs []string
+	execSQLs = append(execSQLs, "INSTALL PLUGIN rpl_semi_sync_master SONAME 'semisync_master.so';")
+	execSQLs = append(execSQLs, "INSTALL PLUGIN rpl_semi_sync_slave SONAME 'semisync_slave.so';")
+	logger.Info("installing rpl_semi_sync plugin...")
+
+	for _, port := range i.InsPorts {
+		// 连接本地实例的db(
+		dbConn, err := native.InsObject{
+			Host: i.Params.Host,
+			Port: port,
+			User: i.GeneralParam.RuntimeAccountParam.AdminUser,
+			Pwd:  i.GeneralParam.RuntimeAccountParam.AdminPwd,
+		}.Conn()
+		if err != nil {
+			logger.Error("Connect %d failed:%s", port, err.Error())
+			return err
+		}
+		if _, err := dbConn.ExecMore(execSQLs); err != nil {
+			logger.Error("isntall plugin failed:[%s]", err.Error())
+			return err
+		}
+	}
+	return nil
+}
+
+// DecompressTdbctlPkg 针对mysql-tdbctl的场景,解压并生成新的目录作为tdbctl运行目录
+func (i *InstallMySQLComp) DecompressTdbctlPkg() (err error) {
+	if err = os.Chdir(i.InstallDir); err != nil {
+		return fmt.Errorf("cd to dir %s failed, err:%w", i.InstallDir, err)
+	}
+	// 判断 /usr/local/tdbctl 目录是否已经存在,如果存在则删除掉
+	if cmutil.FileExists(i.TdbctlInstallDir) {
+		if _, err = osutil.ExecShellCommand(false, "rm -r "+i.TdbctlInstallDir); err != nil {
+			logger.Error("rm -r %s error: %w", i.TdbctlInstallDir, err)
+			return err
+		}
+	}
+
+	tdbctlBinaryFile := i.Params.Medium.GePkgBaseName()
+
+	// 判断 tdbctl安装目录是否已经存在,如果存在则删除掉
+	if cmutil.FileExists(tdbctlBinaryFile) {
+		if _, err = osutil.ExecShellCommand(false, "rm -r "+tdbctlBinaryFile); err != nil {
+			logger.Error("rm -r %s error: %w", tdbctlBinaryFile, err)
+			return err
+		}
+	}
+
+	pkgAbPath := i.Params.Medium.GetAbsolutePath()
+	if output, err := osutil.ExecShellCommand(
+		false,
+		fmt.Sprintf("mkdir %s && tar zxf %s -C %s --strip-components 1 ", tdbctlBinaryFile, pkgAbPath,
+			tdbctlBinaryFile)); err != nil {
+		logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error())
+		return err
+	}
+
+	extraCmd := fmt.Sprintf("ln -sf %s %s && chown -R mysql mysql*", tdbctlBinaryFile, i.TdbctlInstallDir)
+	if _, err = osutil.ExecShellCommand(false, extraCmd); err != nil {
+		logger.Error("%s execute failed, %v", extraCmd, err)
+		return err
+	}
+	logger.Info("mysql binary directory: %s", tdbctlBinaryFile)
+	if _, err := os.Stat(i.TdbctlInstallDir); err != nil {
+		logger.Error("%s check failed, %v", i.TdbctlInstallDir, err)
+		return err
+	}
+	logger.Info("decompress mysql pkg successfully")
+	return nil
+}
+
+// TdbctlStartup TODO
+/**
+ * @description: 启动mysql-tdbctl实例 会重试连接判断是否启动成功
+ * @return {*}
+ */
+func (i *InstallMySQLComp) TdbctlStartup() (err error) {
+	if err = osutil.ClearTcpRecycle(); err != nil {
+		err = fmt.Errorf("clear tcp recycle failed, err: %w", err)
+		logger.Warn("startup, %s", err.Error())
+	}
+	for _, port := range i.InsPorts {
+		logger.Info("will start %d", port)
+		s := computil.StartMySQLParam{
+			MediaDir:      i.TdbctlInstallDir,
+			MyCnfName:     util.GetMyCnfFileName(port),
+			MySQLUser:     "root",
+			MySQLPwd:      "",
+			Socket:        i.InsSockets[port],
+			SkipSlaveFlag: false,
+		}
+		pid, err := s.StartMysqlInstance()
+		if err != nil {
+			logger.Error("start %d faild err: %s", port, err.Error())
+			return err
+		}
+		i.RollBackContext.AddKillProcess(pid)
+	}
+	return nil
+}
+
+// generateDefaultSpiderAccount TODO
+/**
+ * @description:  spider专属生成初始化默认mysql 账户sql
+ * @receiver {string} realVersion: mysql 实际版本
+ * @return {*}
+ */
+func (i *InstallMySQLComp) generateDefaultSpiderAccount(realVersion string) (initAccountsql []string) {
+	initAccountsql = i.getSuperUserAccountForSpider()
+	runp := i.GeneralParam.RuntimeAccountParam
+	privParis := []components.MySQLAccountPrivs{}
+	privParis = append(privParis, runp.MySQLAdminAccount.GetAccountPrivs(i.Params.Host))
+	privParis = append(privParis, runp.MySQLMonitorAccessAllAccount.GetAccountPrivs())
+	privParis = append(privParis, runp.MySQLMonitorAccount.GetAccountPrivs(i.Params.Host))
+	privParis = append(privParis, runp.MySQLYwAccount.GetAccountPrivs())
+	for _, v := range privParis {
+		initAccountsql = append(initAccountsql, v.GenerateInitSql(realVersion)...)
+	}
+	if mysqlutil.MySQLVersionParse(realVersion) <= mysqlutil.MySQLVersionParse("5.6") {
+		s := `alter table mysql.general_log change thread_id thread_id bigint(21) unsigned NOT NULL;`
+		initAccountsql = append(initAccountsql, s)
+	}
+	// 不知道这里为什么执行不了source命令,暂时用执行shell命令代替
+	// initAccountsql = append(initAccountsql, fmt.Sprintf("source %s/scripts/install_spider.sql;", i.MysqlInstallDir))
+	initAccountsql = append(initAccountsql, "delete from mysql.user where user='root' or user='';")
+	initAccountsql = append(initAccountsql, "update mysql.db set Insert_priv = 'Y' where db = 'test';")
+	initAccountsql = append(initAccountsql, "flush privileges;")
+	return
+}
+
+// getSuperUserAccountForSpider TODO
+/**
+ * @description: 为spider创建DRS、DBHA服务访问的账号白名单
+ * @return {*}
+ */
+func (i *InstallMySQLComp) getSuperUserAccountForSpider() (initAccountsql []string) {
+	for _, host := range i.Params.SuperAccount.AccessHosts {
+		initAccountsql = append(initAccountsql,
+			fmt.Sprintf("GRANT ALL PRIVILEGES ON *.* TO '%s'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION ;",
+				i.Params.SuperAccount.User, host, i.Params.SuperAccount.Pwd))
+	}
+	for _, host := range i.Params.DBHAAccount.AccessHosts {
+		initAccountsql = append(initAccountsql,
+			fmt.Sprintf(
+				"GRANT RELOAD, PROCESS, SHOW DATABASES, SUPER, REPLICATION CLIENT, SHOW VIEW ON *.* TO '%s'@'%s' IDENTIFIED BY '%s' WITH GRANT OPTION ;",
+				i.Params.DBHAAccount.User, host, i.Params.DBHAAccount.Pwd))
+	}
+	return
+}
+
+func (i *InstallMySQLComp) create_spider_table(socket string) (err error) {
+	return mysqlutil.ExecuteSqlAtLocal{
+		User:     "root",
+		Password: "",
+		Socket:   socket,
+		Charset:  i.Params.CharSet,
+	}.ExcuteSqlByMySQLClientOne(path.Join(i.MysqlInstallDir, "scripts/install_spider.sql"), "")
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_new_dbbackup.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_new_dbbackup.go
new file mode 100644
index 0000000000..ff5f74529b
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_new_dbbackup.go
@@ -0,0 +1,372 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"os"
+	"path"
+	"strconv"
+	"strings"
+	"text/template"
+	"time"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+	"gopkg.in/ini.v1"
+)
+
+// InstallNewDbBackupComp TODO
+type InstallNewDbBackupComp struct {
+	GeneralParam *components.GeneralParam
+	Params       *InstallNewDbBackupParam
+	runtimeContext
+}
+
+// InstallNewDbBackupParam TODO
+type InstallNewDbBackupParam struct {
+	components.Medium
+	Configs        dbbackup.Cnf    `json:"configs" validate:"required"`         // 模板配置
+	Options        BackupOptions   `json:"options" validate:"required"`         // 选项参数配置
+	Host           string          `json:"host"  validate:"required,ip"`        // 当前实例的主机地址
+	Ports          []int           `json:"ports" validate:"required,gt=0,dive"` // 被监控机器的上所有需要监控的端口
+	Role           string          `json:"role" validate:"required"`            // 当前主机安装的mysqld的角色
+	BkBizId        string          `json:"bk_biz_id" validate:"required"`       // bkbizid
+	BkCloudId      string          `json:"bk_cloud_id"`                         // bk_cloud_id
+	ClusterAddress map[Port]string `json:"cluster_address"`                     // cluster addresss
+	ClusterId      map[Port]int    `json:"cluster_id"`                          // cluster id
+	ExecUser       string          `json:"exec_user"`                           // 执行Job的用户
+}
+
+type runtimeContext struct {
+	installPath string                    // dbbackupInstallPath
+	dbConn      map[Port]*native.DbWorker // db连接池
+	versionMap  map[Port]string           // 当前机器数据库实例版本
+	renderCnf   map[Port]dbbackup.Cnf
+	ignoredbs   []string
+	ignoretbls  []string
+}
+
+// BackupOptions TODO
+type BackupOptions struct {
+	BackupType  string `json:"BackupType" validate:"required"`
+	CrontabTime string `json:"CrontabTime" validate:"required,crontabexpr"`
+	IgnoreObjs  struct {
+		// "mysql,test,db_infobase,information_schema,performance_schema,sys"
+		IgnoreDatabases string `json:"ExcludeDatabases"`
+		IgnoreTables    string `json:"ExcludeTables"`
+	} `json:"Logical"`
+	Master dbbackup.LogicBackupDataOption `json:"Master" validate:"required"`
+	Slave  dbbackup.LogicBackupDataOption `json:"Slave"`
+}
+
+// Example TODO
+func (i *InstallNewDbBackupComp) Example() interface{} {
+	comp := InstallNewDbBackupComp{
+		Params: &InstallNewDbBackupParam{
+			Medium: components.Medium{
+				Pkg:    "dbbackup-go.tar.gz",
+				PkgMd5: "90e5be347c606218b055a61f990ecdf4",
+			},
+			Host:  "127.0.0.1",
+			Ports: []int{20000, 20001},
+			Options: BackupOptions{
+				CrontabTime: "09:00:00",
+				BackupType:  "logical",
+				Master:      dbbackup.LogicBackupDataOption{DataSchemaGrant: "grant"},
+				Slave:       dbbackup.LogicBackupDataOption{DataSchemaGrant: "grant"},
+			},
+			Configs:        dbbackup.Cnf{},
+			Role:           "slave",
+			ClusterAddress: map[Port]string{20000: "testdb1.xx.a1.db", 20001: "testdb2.xx.a1.db"},
+			ClusterId:      map[Port]int{20000: 111, 20001: 112},
+		},
+	}
+	return comp
+}
+
+// Init TODO
+func (i *InstallNewDbBackupComp) Init() (err error) {
+	i.initBackupOptions()
+	i.installPath = path.Join(cst.MYSQL_TOOL_INSTALL_PATH, cst.BackupDir)
+	i.dbConn = make(map[int]*native.DbWorker)
+	i.versionMap = make(map[int]string)
+	i.renderCnf = make(map[int]dbbackup.Cnf)
+	for _, port := range i.Params.Ports {
+		dbwork, err := native.InsObject{
+			Host: i.Params.Host,
+			Port: port,
+			User: i.GeneralParam.RuntimeAccountParam.AdminUser,
+			Pwd:  i.GeneralParam.RuntimeAccountParam.AdminPwd,
+		}.Conn()
+		if err != nil {
+			return fmt.Errorf("init db conn %d failed err:%w", port, err)
+		}
+		i.dbConn[port] = dbwork
+		version, err := dbwork.SelectVersion()
+		if err != nil {
+			return err
+		}
+		i.versionMap[port] = version
+	}
+
+	logger.Info("config %v", i.Params.Configs)
+	return nil
+}
+
+func (i *InstallNewDbBackupComp) initBackupOptions() {
+	logger.Info("options %v", i.Params.Options)
+	var ignoretbls, ignoredbs []string
+	ignoredbs = strings.Split(i.Params.Options.IgnoreObjs.IgnoreDatabases, ",")
+	ignoredbs = append(ignoredbs, native.DBSys...)
+	ignoretbls = strings.Split(i.Params.Options.IgnoreObjs.IgnoreTables, ",")
+
+	i.ignoredbs = util.UniqueStrings(util.RemoveEmpty(ignoredbs))
+	i.ignoretbls = util.UniqueStrings(util.RemoveEmpty(ignoretbls))
+	if len(i.ignoretbls) <= 0 {
+		i.ignoretbls = []string{"*"}
+	}
+	logger.Info("ignore dbs %v", i.ignoredbs)
+	logger.Info("ignore ignoretbls %v", i.ignoretbls)
+}
+
+func (i *InstallNewDbBackupComp) getInsDomainAddr(port int) string {
+	if i.Params.ClusterAddress == nil {
+		return ""
+	}
+	if len(i.Params.ClusterAddress) == 0 {
+		return ""
+	}
+	if v, ok := i.Params.ClusterAddress[port]; ok {
+		return v
+	}
+	return ""
+}
+func (i *InstallNewDbBackupComp) getInsClusterId(port int) int {
+	if i.Params.ClusterId == nil {
+		return 0
+	}
+	if len(i.Params.ClusterId) == 0 {
+		return 0
+	}
+	if v, ok := i.Params.ClusterId[port]; ok {
+		return v
+	}
+	return 0
+}
+
+// InitRenderData 初始化待渲染的配置变量
+func (i *InstallNewDbBackupComp) InitRenderData() (err error) {
+	bkuser := i.GeneralParam.RuntimeAccountParam.DbBackupUser
+	bkpwd := i.GeneralParam.RuntimeAccountParam.DbBackupPwd
+	regexfunc, err := db_table_filter.NewDbTableFilter([]string{"*"}, []string{"*"}, i.ignoredbs, i.ignoretbls)
+	if err != nil {
+		return err
+	}
+	regexStr := regexfunc.TableFilterRegex()
+	logger.Info("regexStr %v", regexStr)
+	// 根据role 选择备份参数选项
+	var dsg string
+	switch strings.ToUpper(i.Params.Role) {
+	case cst.BackupRoleMaster, cst.BackupRoleRepeater:
+		dsg = i.Params.Options.Master.DataSchemaGrant
+	case cst.BackupRoleSlave:
+		dsg = i.Params.Options.Slave.DataSchemaGrant
+	case cst.BackupRoleOrphan:
+		// orphan 使用的是 tendbsingle Master.DataSchemaGrant
+		dsg = i.Params.Options.Master.DataSchemaGrant
+	case cst.BackupRoleSpiderMaster, cst.BackupRoleSpiderSlave:
+		// spider 只在 spider_master and tdbctl_master 上,备份schema,grant
+		dsg = "schema,grant"
+	default:
+		return fmt.Errorf("未知的备份角色%s", i.Params.Role)
+	}
+	for _, port := range i.Params.Ports {
+		i.renderCnf[port] = dbbackup.Cnf{
+			Public: dbbackup.CnfShared{
+				MysqlHost:       i.Params.Host,
+				MysqlPort:       strconv.Itoa(port),
+				MysqlUser:       bkuser,
+				MysqlPasswd:     bkpwd,
+				MysqlRole:       strings.ToLower(i.Params.Role),
+				BkBizId:         i.Params.BkBizId,
+				ClusterAddress:  i.getInsDomainAddr(port),
+				ClusterId:       cast.ToString(i.getInsClusterId(port)),
+				DataSchemaGrant: dsg,
+			},
+			BackupClient: dbbackup.CnfBackupClient{},
+			LogicalBackup: dbbackup.CnfLogicalBackup{
+				Regex: regexStr,
+			},
+		}
+	}
+	return nil
+}
+
+// InitBackupDir 判断备份目录是否存在,不存在的话则创建
+func (i *InstallNewDbBackupComp) InitBackupDir() (err error) {
+	backupdir := i.Params.Configs.Public.BackupDir
+	if _, err := os.Stat(backupdir); os.IsNotExist(err) {
+		logger.Warn("backup dir %s is not exist. will make it", backupdir)
+		cmd := fmt.Sprintf("mkdir -p %s", backupdir)
+		output, err := osutil.ExecShellCommand(false, cmd)
+		if err != nil {
+			return fmt.Errorf("execute [%s] get an error:%s,%w", cmd, output, err)
+		}
+	}
+	cmd := fmt.Sprintf("chown -R mysql %s", backupdir)
+	output, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		return fmt.Errorf("execute [%s] get an error:%s,%w", cmd, output, err)
+	}
+	return
+}
+
+// DecompressPkg TODO
+func (i *InstallNewDbBackupComp) DecompressPkg() (err error) {
+	if err = i.Params.Medium.Check(); err != nil {
+		return err
+	}
+	cmd := fmt.Sprintf(
+		"tar zxf %s -C %s &&  chown -R mysql %s", i.Params.Medium.GetAbsolutePath(),
+		path.Dir(i.installPath), i.installPath,
+	)
+	output, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		err = fmt.Errorf("execute %s error:%w,%s", cmd, err, output)
+		return err
+	}
+	return nil
+}
+
+// InitBackupUserPriv TODO
+func (i *InstallNewDbBackupComp) InitBackupUserPriv() (err error) {
+	for _, port := range i.Params.Ports {
+		ver := i.versionMap[port]
+		var isMysql80 bool = mysqlutil.MySQLVersionParse(ver) >= mysqlutil.MySQLVersionParse("8.0") &&
+			!strings.Contains(ver, "tspider")
+		privs := i.GeneralParam.RuntimeAccountParam.MySQLDbBackupAccount.GetAccountPrivs(isMysql80, i.Params.Host)
+		sqls := privs.GenerateInitSql(ver)
+		dc, ok := i.dbConn[port]
+		if !ok {
+			return fmt.Errorf("from dbConns 获取%d连接失败", port)
+		}
+		if _, err = dc.ExecMore(sqls); err != nil {
+			logger.Error("初始化备份账户失败%s", err.Error())
+			return
+		}
+	}
+	return
+}
+
+// GenerateDbbackupConfig TODO
+func (i *InstallNewDbBackupComp) GenerateDbbackupConfig() (err error) {
+	// 先渲染模版配置文件
+	tmplf := path.Join(i.installPath, fmt.Sprintf("%s.tpl", cst.BackupFile))
+	if err := i.saveTplConfigfile(tmplf); err != nil {
+		return err
+	}
+	tmpl, err := template.ParseFiles(tmplf)
+	if err != nil {
+		return errors.WithMessage(err, "template ParseFiles failed")
+	}
+	for _, port := range i.Params.Ports {
+		cnff := path.Join(i.installPath, cst.GetNewConfigByPort(port))
+		f, err := os.Create(cnff)
+		if err != nil {
+			return errors.WithMessage(err, fmt.Sprintf("create %s failed", cnff))
+		}
+		defer f.Close()
+		if data, ok := i.renderCnf[port]; ok {
+			if err = tmpl.Execute(f, data); err != nil {
+				return errors.WithMessage(err, "渲染%d的备份配置文件失败")
+			}
+		} else {
+			return fmt.Errorf("not found %d render data", port)
+		}
+	}
+	return nil
+}
+
+// ChownGroup 更改安装目录的所属组
+func (i *InstallNewDbBackupComp) ChownGroup() (err error) {
+	cmd := fmt.Sprintf(
+		" chown -R mysql %s ; chmod +x %s/*.sh ; chmod +x %s/dbbackup",
+		path.Dir(i.installPath), i.installPath, i.installPath,
+	)
+	output, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		err = fmt.Errorf("execute %s error:%w,%s", cmd, err, output)
+		return err
+	}
+	return nil
+}
+
+func (i *InstallNewDbBackupComp) saveTplConfigfile(tmpl string) (err error) {
+	cfg := ini.Empty()
+	if err = cfg.ReflectFrom(&i.Params.Configs); err != nil {
+		return errors.WithMessage(err, "参数反射ini失败")
+	}
+	if err := cfg.SaveTo(tmpl); err != nil {
+		return errors.WithMessage(err, "保存模版配置失败")
+	}
+	fileinfo, err := os.Stat(tmpl)
+	if err != nil {
+		return errors.WithMessage(err, fmt.Sprintf("os stats file %s failed", tmpl))
+	}
+	if fileinfo.Size() <= 0 {
+		return fmt.Errorf("渲染的配置为空!!!")
+	}
+	if err := cfg.SaveTo(tmpl); err != nil {
+		return errors.WithMessage(err, "保存模版配置失败")
+	}
+	return
+}
+
+// AddCrontab TODO
+func (i *InstallNewDbBackupComp) AddCrontab() (err error) {
+	var newCrontab []string
+	err = osutil.RemoveSystemCrontab("dbbackup")
+	if err != nil {
+		return fmt.Errorf("删除原备份crontab任务失败(\"dbbackup\") get an error:%w", err)
+	}
+	entryshell := path.Join(i.installPath, "dbbackup_main.sh")
+	logfile := path.Join(i.installPath, "dbbackup.log")
+	newCrontab = append(
+		newCrontab,
+		fmt.Sprintf(
+			"#dbbackup/dbbackup_main.sh: backup database every day, distribute at %s by %s",
+			time.Now().Format(cst.TIMELAYOUT), i.Params.ExecUser,
+		),
+	)
+	newCrontab = append(
+		newCrontab,
+		fmt.Sprintf(
+			"%s %s 1>>%s 2>&1\n",
+			i.Params.Options.CrontabTime, entryshell, logfile,
+		),
+	)
+	crontabStr := strings.Join(newCrontab, "\n")
+	return osutil.AddCrontab(crontabStr)
+}
+
+// BackupBackupIfExist  如如果已经存在备份程序,则先备份,在删除
+func (i *InstallNewDbBackupComp) BackupBackupIfExist() (err error) {
+	bakInstallPath := i.installPath + "-backup"
+	if _, err := os.Stat(i.installPath); !os.IsNotExist(err) {
+		cmd := fmt.Sprintf("rm -rf %s; mv %s %s", bakInstallPath, i.installPath, bakInstallPath)
+		output, err := osutil.ExecShellCommand(false, cmd)
+		if err != nil {
+			err = fmt.Errorf("execute %s get an error:%s,%w", cmd, output, err)
+			return err
+		}
+	}
+	return
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_rotatebinlog.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_rotatebinlog.go
new file mode 100644
index 0000000000..93fcc8aa74
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/install_rotatebinlog.go
@@ -0,0 +1,218 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup"
+	"dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"reflect"
+
+	"github.com/ghodss/yaml"
+	"github.com/mitchellh/mapstructure"
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+)
+
+// InstallRotateBinlogComp 基本结构
+type InstallRotateBinlogComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       InstallRotateBinlogParam `json:"extend"`
+	configFile   string
+	binPath      string
+	installPath  string
+}
+
+// InstallRotateBinlogParam 输入参数
+type InstallRotateBinlogParam struct {
+	components.Medium
+	Configs rotate.Config `json:"configs" validate:"required"` // 模板配置
+	// 本机的所有实例信息。用户密码将从 general 参数获取
+	Instances []*rotate.ServerObj `json:"instances"`
+	// 发起执行actor的用户,仅用于审计
+	ExecUser string `json:"exec_user"`
+}
+
+// Init 初始化
+func (c *InstallRotateBinlogComp) Init() (err error) {
+	c.Params.Configs.Servers = c.Params.Instances
+	for _, s := range c.Params.Configs.Servers {
+		s.Username = c.GeneralParam.RuntimeAccountParam.MonitorUser
+		s.Password = c.GeneralParam.RuntimeAccountParam.MonitorPwd
+		var instObj = native.InsObject{
+			Host: s.Host, Port: s.Port, User: s.Username, Pwd: s.Password, Socket: s.Socket,
+		}
+		if dbw, err := instObj.Conn(); err != nil {
+			logger.Error("install mysql-rotatebinlog test connect failed: %s. instance:%+v", err.Error(), *s)
+			// return err
+		} else {
+			dbw.Stop()
+		}
+	}
+	c.installPath = filepath.Join(cst.MYSQL_TOOL_INSTALL_PATH, "mysql-rotatebinlog")
+	c.binPath = filepath.Join(c.installPath, string(tools.ToolRotatebinlog))
+	return nil
+}
+
+// PreCheck 预检查
+func (c *InstallRotateBinlogComp) PreCheck() (err error) {
+	if err = c.Params.Medium.Check(); err != nil {
+		logger.Error("check mysql-rotatebinlog pkg failed: %s", err.Error())
+		return err
+	}
+	return nil
+}
+
+// DeployBinary 部署 mysql-rotatebinlog
+func (c *InstallRotateBinlogComp) DeployBinary() (err error) {
+	err = os.MkdirAll(filepath.Join(c.installPath, "logs"), 0755)
+	if err != nil {
+		logger.Error("mkdir %s failed: %s", c.installPath, err.Error())
+		return err
+	}
+
+	decompressCmd := fmt.Sprintf(
+		`tar zxf %s -C %s`,
+		c.Params.Medium.GetAbsolutePath(), cst.MYSQL_TOOL_INSTALL_PATH,
+	)
+	_, err = osutil.ExecShellCommand(false, decompressCmd)
+	if err != nil {
+		logger.Error("decompress rotatebinlog pkg failed: %s", err.Error())
+		return err
+	}
+
+	chownCmd := fmt.Sprintf(`chown -R mysql %s && chmod +x %s`, c.installPath, c.binPath)
+	_, err = osutil.ExecShellCommand(false, chownCmd)
+	if err != nil {
+		logger.Error("chown %s to mysql failed: %s", c.installPath, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// GenerateBinaryConfig 生成 mysql-rotatebinlog 配置文件
+func (c *InstallRotateBinlogComp) GenerateBinaryConfig() (err error) {
+	for k, val := range c.Params.Configs.BackupClient {
+		if k == "ibs" {
+			ibsClient := backup.IBSBackupClient{}
+			if reflect.TypeOf(val).Kind() == reflect.Map {
+				// backup_client.ibs 返回的是 json map,比如 {"enable": true,"ibs_mode": "hdfs","with_md5": true,"file_tag": "INCREMENT_BACKUP","tool_path": "backup_client"}
+				if err := mapstructure.Decode(val, &ibsClient); err != nil {
+					return errors.Wrapf(err, "fail to decode backup_client.ibs value:%v", val)
+				} else {
+					c.Params.Configs.BackupClient[k] = ibsClient
+				}
+			} else {
+				// backup_client.ibs 返回的是 string, 比如:{\"enable\": true,\"ibs_mode\": \"hdfs\",\"with_md5\": true,\"file_tag\": \"INCREMENT_BACKUP\",\"tool_path\": \"backup_client\"}
+				if err := json.Unmarshal([]byte(cast.ToString(val)), &ibsClient); err != nil {
+					return errors.Wrapf(err, "fail to parse backup_client.ibs value:%v", val)
+				} else {
+					c.Params.Configs.BackupClient[k] = ibsClient
+				}
+			}
+		} else {
+			mapObj := make(map[string]interface{})
+			if reflect.TypeOf(val).Kind() == reflect.Map {
+				mapObj = val.(map[string]interface{})
+			} else if err := json.Unmarshal([]byte(cast.ToString(val)), &mapObj); err != nil {
+				return errors.Wrapf(err, "fail to parse backup_client value:%v", val)
+			}
+			c.Params.Configs.BackupClient[k] = mapObj
+		}
+	}
+	yamlData, err := yaml.Marshal(c.Params.Configs) // use json tag
+	if err != nil {
+		return err
+	}
+	c.configFile = filepath.Join(c.installPath, "config.yaml")
+	if err := ioutil.WriteFile(c.configFile, yamlData, 0644); err != nil {
+		return err
+	}
+	return nil
+}
+
+// InstallCrontab 注册crontab
+func (c *InstallRotateBinlogComp) InstallCrontab() (err error) {
+	err = osutil.RemoveSystemCrontab("mysql-rotatebinlog")
+	if err != nil {
+		logger.Error("remove old mysql-rotatebinlog crontab failed: %s", err.Error())
+		return err
+	}
+	registerCmd := fmt.Sprintf("%s -c %s --addSchedule", c.binPath, c.configFile)
+	str, err := osutil.ExecShellCommand(false, registerCmd)
+	if err != nil {
+		logger.Error(
+			"failed to register mysql-rotatebinlog to crond: %s(%s)", str, err.Error(),
+		)
+	}
+	return err
+}
+
+// Example 样例
+func (c *InstallRotateBinlogComp) Example() interface{} {
+	ibsExample := `{
+  "enable": true,
+  "ibs_mode": "hdfs",
+  "with_md5": true,
+  "file_tag": "INCREMENT_BACKUP",
+  "tool_path": "backup_client"
+}`
+	return InstallRotateBinlogComp{
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountMonitorExample,
+			},
+		},
+		Params: InstallRotateBinlogParam{
+			Medium: components.Medium{
+				Pkg:    "mysql-rotatebinlog.tar.gz",
+				PkgMd5: "12345",
+			},
+			Configs: rotate.Config{
+				Public: rotate.PublicCfg{
+					KeepPolicy:         "most",
+					MaxBinlogTotalSize: "200g",
+					MaxDiskUsedPct:     80,
+					MaxKeepDuration:    "61d",
+					PurgeInterval:      "4h",
+					RotateInterval:     "10m",
+				},
+				Crond: rotate.ScheduleCfg{
+					Schedule: "*/10 * * * *",
+					ApiUrl:   "http://127.0.0.1:9999",
+					ItemName: "mysql-rotatebinlog",
+				},
+				Servers: nil,
+				Report: rotate.ReportCfg{
+					Enable:     true,
+					Filepath:   path.Join(cst.DBAReportBase, "mysql/binlog"),
+					LogMaxsize: 5, LogMaxbackups: 10, LogMaxage: 30,
+				},
+				Encrypt: rotate.EncryptCfg{Enable: false},
+				BackupClient: map[string]interface{}{
+					"ibs": json.RawMessage([]byte(ibsExample)),
+				},
+			},
+			Instances: []*rotate.ServerObj{
+				{
+					Host: "1.1.1.1", Port: 3306,
+					Tags: rotate.InstanceMeta{
+						BkBizId: 100, ClusterId: 10, ClusterDomain: "a.b.c", DBRole: "master",
+					},
+				},
+			},
+			ExecUser: "sys",
+		},
+	}
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_change.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_change.go
new file mode 100644
index 0000000000..a9badd1694
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_change.go
@@ -0,0 +1,228 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/computil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+const (
+	// OPTypeUpsert TODO
+	OPTypeUpsert = "upsert"
+	// OPTypeRemove TODO
+	OPTypeRemove = "remove"
+)
+
+// MycnfChangeComp 需要将 BaseInputParam 转换成 Comp 参数
+type MycnfChangeComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       MycnfChangeParam         `json:"extend"`
+}
+
+// Example TODO
+func (c *MycnfChangeComp) Example() interface{} {
+	comp := MycnfChangeComp{
+		Params: MycnfChangeParam{
+			Items: map[string]*ConfItemOp{
+				"mysqld.binlog_format": {
+					ConfValue:   "ROW",
+					OPType:      "upsert",
+					NeedRestart: false,
+				},
+				"mysqld.innodb_buffer_pool_size": {
+					ConfValue:   "4096M",
+					OPType:      "upsert",
+					NeedRestart: true,
+				},
+			},
+			Persistent:  2,
+			Restart:     2,
+			TgtInstance: common.InstanceObjExample,
+		},
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountAdminExample,
+			},
+		},
+	}
+	return comp
+}
+
+// 偷个懒:目前只支持修改 mysqld
+// 注释说明:
+//   runtime: 配置当前运行值,set global 生效
+//   file: my.cnf 里面的持久化值
+
+// MycnfChangeParam 修改 my.cnf 参数
+// 注意, op_type=remove 时,会直接操作文件持久化该配置项改动,不受 persistent 影响
+// 移除一个配置项时,代表要回归系统的默认值(不是配置中心plat或者其它地方定义的默认值),因为我们不知道这个值所以不修改 runtime
+type MycnfChangeParam struct {
+	Items map[string]*ConfItemOp `json:"items" validate:"required"`
+	// 是否持久化到 my.cnf 文件,-1: 不持久化,1: 持久化, 2: 仅持久化但不修改运行时
+	Persistent int `json:"persistent" validate:"required" enums:"-1,1,2"`
+	// 指定是否 允许重启, -1:不重启, 1: 重启, 2:根据 items need_restart 自动判断是否重启
+	Restart     int              `json:"restart" validate:"required" enums:"-1,1,2"`
+	TgtInstance native.InsObject `json:"tgt_instance" validate:"required"`
+
+	// 自动判断的是否 需要重启
+	needRestart bool
+	myCnf       *util.CnfFile
+	dbworker    *native.DbWorker
+}
+
+// ConfItemOp TODO
+type ConfItemOp struct {
+	// ConfName  string `json:"conf_name" validate:"required"`
+	ConfValue string `json:"conf_value"`
+	// 配置项修改动作,允许值 `upsert`,`remove`
+	OPType      string `json:"op_type" form:"op_type" validate:"required,enums" enums:"upsert,remove"`
+	NeedRestart bool   `json:"need_restart,omitempty"`
+
+	confValueOld string
+}
+
+// Init TODO
+func (c *MycnfChangeParam) Init() (err error) {
+	f := util.GetMyCnfFileName(c.TgtInstance.Port)
+	c.myCnf = &util.CnfFile{FileName: f}
+	if err = c.myCnf.Load(); err != nil {
+		return err
+	}
+	if c.TgtInstance.Socket == "" {
+		if c.TgtInstance.Socket, err = c.myCnf.GetMySQLSocket(); err != nil {
+			return err
+		}
+	}
+	/*
+		if c.Persistent == 2 && c.Restart >= 1 {
+			return errors.New("only persistent to file should not work with restart")
+		}
+	*/
+	return nil
+}
+
+// PreCheck 前置检查
+// 会初始化 needRestart
+func (c *MycnfChangeParam) PreCheck() error {
+	var errList []error
+	var err error
+	// persistent == 2 时表示不修改运行时,所以不检查连接性。修改配置只能操作 my.cnf 已有项
+	// 即关机状态下不允许写入新的配置项,因为没法判断配置项是否合法。但可以 remove
+	if c.Persistent == 2 {
+		for k, v := range c.Items {
+			sk := util.GetSectionFromKey(k, true)
+			if v.OPType != OPTypeRemove {
+				if v.confValueOld, err = c.myCnf.GetMySQLCnfByKey(sk.Section, sk.Key); err != nil {
+					errList = append(errList, err)
+				} else {
+					logger.Warn("change [%s]%s new: %s. old: %s", sk.Section, sk.Key, v.ConfValue, v.confValueOld)
+				}
+			}
+			// 仅写到文件,也判断是否需要重启。但如果进程没在运行,则不启动
+			if v.NeedRestart && computil.IsInstanceRunning(c.TgtInstance) {
+				c.needRestart = true
+			}
+		}
+	} else if c.Persistent <= 1 {
+		// 判断连接性
+		if dbw, err := c.TgtInstance.Conn(); err != nil {
+			return err
+		} else {
+			c.dbworker = dbw
+		}
+		for k, v := range c.Items {
+			if v.OPType == OPTypeRemove { // 不校验 key 是否存在
+				if v.NeedRestart {
+					c.needRestart = true
+				}
+				continue
+			}
+			sk := util.GetSectionFromKey(k, true)
+			if sk.Section != util.MysqldSec {
+				continue
+			}
+			v.confValueOld, err = c.dbworker.GetSingleGlobalVar(sk.Key) // valRuntime
+			logger.Warn("change cnf: [%s]%s new: %s. old: %s", sk.Section, sk.Key, v.ConfValue, v.confValueOld)
+			if err != nil {
+				errList = append(errList, err)
+			} else if v.ConfValue != v.confValueOld {
+				if v.NeedRestart {
+					c.needRestart = true
+				}
+			} else {
+				// 运行值与修改值相同,不必重启. 但不妨碍多修改一次
+			}
+		}
+	} else {
+		return errors.Errorf("unknown persistent %d", c.Persistent)
+	}
+	if len(errList) > 0 {
+		return util.SliceErrorsToError(errList)
+	}
+	return nil
+}
+
+// Start TODO
+func (c *MycnfChangeParam) Start() error {
+	for k, v := range c.Items {
+		sk := util.GetSectionFromKey(k, true)
+		if v.OPType == OPTypeUpsert {
+			if sk.Section == util.MysqldSec && c.Persistent <= 1 { // 只有 mysqld 才需要 set global
+				setVar := fmt.Sprintf("set global %s = %s", sk.Key, v.ConfValue)
+				if _, err := c.dbworker.Exec(setVar); err != nil {
+					// Error 1238: Variable 'lower_case_table_names' is a read only variable
+					if !strings.Contains(err.Error(), "Error 1238:") {
+						return err
+					} else {
+						logger.Warn("needRestart %s", err.Error())
+						c.Items[k].NeedRestart = true
+						c.needRestart = true
+					}
+				}
+			}
+			if c.Persistent >= 1 {
+				c.myCnf.ReplaceValue(sk.Section, common.MapNameVarToConf(sk.Key), false, v.ConfValue)
+			}
+		} else if v.OPType == OPTypeRemove {
+			// 直接从文件移除,然后判断是否需要重启
+			// 移除配置项,不校验配置项名字是否合法、是否在文件存在,因为有可能就是要移除一个非法名字
+			// 移除配置项,只操作 file,不影响 runtime,移除后的具体效果,取决于改配置项自己的默认值。而是否回归默认值取决于 need_restart
+			// 这个 need_restart 与该配置项是否需要重启属性无关,而是指定是否进行重启。
+			//  举个例子,slave_exec_mode 本身 need_restart 属性是 0(false),但现在把它从 my.cnf 移除时不代表它 runtime 马上回归默认(因为没法知道 slave_exec_mode 在 mysqld 的默认值,哪怕在配置中心 plat 里面的配置也只是人为定义的默认值)
+			c.myCnf.ReplaceValue(sk.Section, common.MapNameVarToConf(sk.Key), false, v.ConfValue)
+		} else {
+			return errors.Errorf("unknown op_type %s", v.OPType)
+		}
+	}
+	logger.Info("change_cnf param: %+v", c)
+	if c.Persistent >= 1 {
+		if err := c.myCnf.SafeSaveFile(false); err != nil {
+			return err
+		}
+	}
+	if c.Restart == 1 {
+		if err := computil.RestartMysqlInstanceNormal(c.TgtInstance); err != nil {
+			return err
+		}
+	} else if c.needRestart {
+		if c.Restart >= 1 {
+			if err := computil.RestartMysqlInstanceNormal(c.TgtInstance); err != nil {
+				return err
+			}
+		} else if c.Persistent < 1 { // 需要重启,但是却不允许重启
+			logger.Error("need to restart mysqld but not run restart")
+			// return errors.New("need to restart mysqld but not run restart")
+		} else {
+			logger.Warn("need to restart mysqld but not run restart")
+		}
+	}
+	// else 即使 restart=
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_clone.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_clone.go
new file mode 100644
index 0000000000..f1738c8017
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_clone.go
@@ -0,0 +1,136 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+
+	"github.com/pkg/errors"
+)
+
+// MycnfCloneComp TODO
+type MycnfCloneComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       MycnfCloneParam          `json:"extend"`
+}
+
+// Example TODO
+func (c *MycnfCloneComp) Example() interface{} {
+	comp := MycnfCloneComp{
+		Params: MycnfCloneParam{
+			SrcInstance: common.InstanceObjExample,
+			Persistent:  1,
+			Restart:     2,
+			TgtInstance: common.InstanceObjExample,
+			// Items:       []string{"time_zone", "binlog_format", "character_set_server"},
+		},
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.MySQLAdminReplExample,
+			},
+		},
+	}
+	return comp
+}
+
+// MycnfCloneItemsDefault TODO
+var MycnfCloneItemsDefault = []string{
+	"max_allowed_packet",
+	"time_zone",
+	"binlog_format",
+	"lower_case_table_names",
+	"character_set_server",
+	"collation_server",
+	"max_binlog_size",
+	"log_bin_compress",
+	"net_buffer_length",
+	"interactive_timeout",
+	"wait_timeout",
+	"relay_log_uncompress",
+	"slave_parallel_workers",
+}
+
+// MycnfCloneParam godoc
+// mycnf-clone 建议 persistent=1, restart=2 选项,表示会持久化到文件,并根据需要重启
+type MycnfCloneParam struct {
+	// 参数克隆,获取源实例,可以提供 repl 账号权限
+	SrcInstance native.InsObject `json:"src_instance" validate:"required"`
+	// 应用到本地目标实例,需要有 ADMIN 权限
+	TgtInstance native.InsObject `json:"tgt_instance" validate:"required"`
+
+	// 是否持久化到 my.cnf 文件,0: 不持久化,1: 持久化, 2: 仅持久化但不修改运行时
+	Persistent int `json:"persistent" validate:"required" enums:"0,1,2" example:"1"`
+	// 指定是否 允许重启, 0:不重启, 1: 重启, 2:根据 items need_restart 自动判断是否重启
+	Restart int `json:"restart" validate:"required" enums:"0,1,2" example:"2"`
+	// 需要克隆哪些变量, 考虑到不同版本参数不一样,这里不要求指定一定存在; 只修改 mysqld 区。即失败忽略
+	// 有些参数是 readonly 的,只会保存到 my.cnf 中,如果与运行值不一样需要用户重启
+	// 默认值见 MycnfCloneItemsDefault
+	Items []string `json:"items"`
+
+	mycnfChange *MycnfChangeParam
+}
+
+// Init TODO
+func (c *MycnfCloneParam) Init() error {
+	// 如果是克隆用户指定的配置,配置名在源不存在则报错
+	// 如果用户未指定,即内置配置项同步,考虑到多版本可能存在不同变量需要同步,使用一份配置名列表,于是忽略错误
+	ignoreUnknownVars := false
+	if len(c.Items) == 0 {
+		c.Items = MycnfCloneItemsDefault
+		ignoreUnknownVars = true
+	}
+	c.mycnfChange = &MycnfChangeParam{
+		Restart:     c.Restart,
+		Persistent:  c.Persistent,
+		Items:       map[string]*ConfItemOp{},
+		TgtInstance: c.TgtInstance,
+	}
+	srcDB, err := c.SrcInstance.Conn()
+	if err != nil {
+		return errors.WithMessage(err, "连接源实例失败")
+	}
+	for _, varName := range c.Items {
+		if val, err := srcDB.GetSingleGlobalVar(varName); err != nil {
+			// todo
+			errStr := fmt.Sprintf("get variable %s failed: %s", varName, err.Error())
+			logger.Error(errStr)
+			if !ignoreUnknownVars {
+				return errors.New(errStr)
+			}
+			// continue
+		} else {
+			varFullName := util.MysqldSec + "." + varName
+			c.mycnfChange.Items[varFullName] = &ConfItemOp{
+				ConfValue:   val,
+				OPType:      OPTypeUpsert,
+				NeedRestart: false,
+			}
+		}
+	}
+
+	if err := c.mycnfChange.Init(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (c *MycnfCloneParam) PreCheck() error {
+
+	if err := c.mycnfChange.PreCheck(); err != nil {
+		return err
+	}
+	logger.Info(" MycnfCloneParam.PreCheck %v", c)
+	return nil
+}
+
+// Start TODO
+func (c *MycnfCloneParam) Start() error {
+	if err := c.mycnfChange.Start(); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_diff.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_diff.go
new file mode 100644
index 0000000000..b0843023da
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/mycnf_diff.go
@@ -0,0 +1 @@
+package mysql
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/parse_binlog_time.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/parse_binlog_time.go
new file mode 100644
index 0000000000..981f3915d9
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/parse_binlog_time.go
@@ -0,0 +1,61 @@
+package mysql
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	binlogParser "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser"
+	"encoding/json"
+	"fmt"
+	"path/filepath"
+)
+
+// BinlogTimeComp TODO
+type BinlogTimeComp struct {
+	Params BinlogTimeParam `json:"extend"`
+}
+
+// Example TODO
+func (t *BinlogTimeComp) Example() interface{} {
+	return &BinlogTimeComp{
+		Params: BinlogTimeParam{
+			BinlogDir:   "/data/dbbak",
+			BinlogFiles: []string{"binlog20000.00001", "binlog20000.00002"},
+			Format:      "json",
+		},
+	}
+}
+
+// BinlogTimeParam TODO
+type BinlogTimeParam struct {
+	BinlogDir   string   `json:"binlog_dir" validate:"required"`
+	BinlogFiles []string `json:"binlog_files" validate:"required"`
+	Format      string   `json:"format" enums:",json,dump"`
+	parser      *binlogParser.BinlogParse
+}
+
+// Init TODO
+func (t *BinlogTimeComp) Init() error {
+	bp, err := binlogParser.NewBinlogParse("mysql", 0)
+	if err != nil {
+		return err
+	}
+	t.Params.parser = bp
+	return nil
+}
+
+// Start TODO
+func (t *BinlogTimeComp) Start() error {
+	for _, f := range t.Params.BinlogFiles {
+		filename := filepath.Join(t.Params.BinlogDir, f)
+		if err := cmutil.FileExistsErr(filename); err != nil {
+			fmt.Printf("%s: %v\n", filename, err)
+			continue
+		}
+		if events, err := t.Params.parser.GetTime(filename, true, true); err != nil {
+			fmt.Printf("%s: %v\n", filename, err)
+		} else {
+			b, _ := json.Marshal(events)
+			fmt.Printf("%s: %s\n", filename, b)
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_checksum.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_checksum.go
new file mode 100644
index 0000000000..d2b25d52ff
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_checksum.go
@@ -0,0 +1,393 @@
+package mysql
+
+import (
+	"bytes"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter"
+	"fmt"
+	"os"
+	"os/exec"
+	"path"
+	"strings"
+
+	_ "github.com/go-sql-driver/mysql" // mysql 驱动
+	"github.com/jmoiron/sqlx"
+	"gopkg.in/yaml.v2"
+)
+
+// PtTableChecksumComp 数据校验基本结构
+type PtTableChecksumComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       *PtTableChecksumParam    `json:"extend"`
+	PtTableChecksumCtx
+	tools *tools.ToolSet
+}
+
+// PtTableChecksumParam godoc
+type PtTableChecksumParam struct {
+	BkBizId                   int         `json:"bk_biz_id"`                    // 业务 id
+	ClusterId                 int         `json:"cluster_id"`                   // 集群 id
+	ImmuteDomain              string      `json:"immute_domain"`                // 集群域名
+	MasterIp                  string      `json:"master_ip"`                    // 执行校验的 db ip
+	MasterPort                int         `json:"master_port"`                  // 执行校验的 db port
+	InnerRole                 string      `json:"inner_role"`                   // 执行校验的 db inner role, 应该是[master, repeater]
+	MasterAccessSlaveUser     string      `json:"master_access_slave_user"`     // 从 db 访问 slave 的用户名
+	MasterAccessSlavePassword string      `json:"master_access_slave_password"` // 从 db 访问 slave 的密码
+	DbPatterns                []string    `json:"db_patterns"`                  // 库表过滤选项
+	IgnoreDbs                 []string    `json:"ignore_dbs"`                   // 库表过滤选项
+	TablePatterns             []string    `json:"table_patterns"`               // 库表过滤选项
+	IgnoreTables              []string    `json:"ignore_tables"`                // 库表过滤选项
+	RuntimeHour               int         `json:"runtime_hour"`                 // 校验运行时长
+	ReplicateTable            string      `json:"replicate_table"`              // 结果表, 带库前缀
+	Slaves                    []SlaveInfo `json:"slaves"`                       // slave 列表
+	SystemDbs                 []string    `json:"system_dbs"`                   // 系统表
+}
+
+// SlaveInfo slave 描述
+type SlaveInfo struct {
+	Id   int    `json:"id"`   // slave id
+	Ip   string `json:"ip"`   // slave ip
+	Port int    `json:"port"` // slave port
+}
+
+// PtTableChecksumCtx 上下文信息
+type PtTableChecksumCtx struct {
+	uid     string
+	cfgFile string
+	dbh     *sqlx.DB
+}
+
+// Precheck 预检查
+// master, slaves 连接
+// 依赖文件存在
+func (c *PtTableChecksumComp) Precheck() (err error) {
+	_, err = native.InsObject{
+		Host: c.Params.MasterIp,
+		Port: c.Params.MasterPort,
+		User: c.GeneralParam.RuntimeAccountParam.MonitorUser,
+		Pwd:  c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("connect %s:%d failed:%s", c.Params.MasterIp, c.Params.MasterPort, err.Error())
+		return err
+	}
+
+	for _, slave := range c.Params.Slaves {
+		_, err = native.InsObject{
+			Host: slave.Ip,
+			Port: slave.Port,
+			User: c.Params.MasterAccessSlaveUser,
+			Pwd:  c.Params.MasterAccessSlavePassword,
+		}.Conn()
+		if err != nil {
+			logger.Error("connect slave %s:%d failed:%s", slave.Ip, slave.Port, err.Error())
+			return err
+		}
+	}
+
+	c.tools, err = tools.NewToolSetWithPick(tools.ToolMysqlTableChecksum, tools.ToolPtTableChecksum)
+	if err != nil {
+		logger.Error("init toolset failed: %s", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// Init 连接 master
+func (c *PtTableChecksumComp) Init(uid string) (err error) {
+	c.uid = uid
+
+	dsn := fmt.Sprintf(
+		`%s:%s@tcp(%s:%d)/test`,
+		c.GeneralParam.RuntimeAccountParam.MonitorUser,
+		c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+		c.Params.MasterIp,
+		c.Params.MasterPort,
+	)
+	c.dbh, err = sqlx.Connect("mysql", dsn)
+	if err != nil {
+		logger.Error("connect %s failed: %s", dsn, err.Error())
+		return err
+	}
+
+	return nil
+}
+
+type _cluster struct {
+	Id           int    `yaml:"id"`
+	ImmuteDomain string `yaml:"immute_domain"`
+}
+
+type _slave struct {
+	User     string `yaml:"user"`
+	Password string `yaml:"password"`
+	Ip       string `yaml:"ip"`
+	Port     int    `yaml:"port"`
+}
+
+type _ptFilters struct {
+	Databases            []string `yaml:"databases"`
+	Tables               []string `yaml:"tables"`
+	IgnoreDatabases      []string `yaml:"ignore_databases"`
+	IgnoreTables         []string `yaml:"ignore_tables"`
+	DatabasesRegex       string   `yaml:"databases_regex"`
+	TablesRegex          string   `yaml:"tables_regex"`
+	IgnoreDatabasesRegex string   `yaml:"ignore_databases_regex"`
+	IgnoreTablesRegex    string   `yaml:"ignore_tables_regex"`
+}
+
+type _ptChecksum struct {
+	Path      string                   `yaml:"path"`
+	Replicate string                   `yaml:"replicate"`
+	Switches  []string                 `yaml:"switches"`
+	Args      []map[string]interface{} `yaml:"args"`
+}
+
+type _logConfig struct {
+	Console    bool    `yaml:"console"`
+	LogFileDir *string `yaml:"log_file_dir"`
+	Debug      bool    `yaml:"debug"`
+	Source     bool    `yaml:"source"`
+	Json       bool    `yaml:"json"`
+}
+
+// ChecksumConfig mysql-table-checksum 配置
+type ChecksumConfig struct {
+	BkBizId    int         `yaml:"bk_biz_id"`
+	Cluster    _cluster    `yaml:"cluster"`
+	Ip         string      `yaml:"ip"`
+	Port       int         `yaml:"port"`
+	User       string      `yaml:"user"`
+	Password   string      `yaml:"password"`
+	InnerRole  string      `yaml:"inner_role"`
+	ReportPath string      `yaml:"report_path"`
+	Slaves     []_slave    `yaml:"slaves"`
+	Filter     _ptFilters  `yaml:"filter"`
+	PtChecksum _ptChecksum `yaml:"pt_checksum"`
+	Log        *_logConfig `yaml:"log"`
+	Schedule   string      `yaml:"schedule"`
+	ApiUrl     string      `yaml:"api_url"`
+}
+
+// GenerateConfigFile 生成 mysql-table-checksum 配置文件
+func (c *PtTableChecksumComp) GenerateConfigFile() (err error) {
+	logDir := path.Join(cst.ChecksumInstallPath, "logs")
+
+	cfg := ChecksumConfig{
+		BkBizId: c.Params.BkBizId,
+		Cluster: _cluster{
+			Id:           c.Params.ClusterId,
+			ImmuteDomain: c.Params.ImmuteDomain,
+		},
+		Ip:         c.Params.MasterIp,
+		Port:       c.Params.MasterPort,
+		User:       c.GeneralParam.RuntimeAccountParam.MonitorUser,
+		Password:   c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+		InnerRole:  c.Params.InnerRole,
+		ReportPath: "", // 单据不需要上报, 这里可以留空
+		Slaves:     []_slave{},
+		PtChecksum: _ptChecksum{
+			Replicate: c.Params.ReplicateTable,
+			Switches:  []string{},
+			Args: []map[string]interface{}{
+				{
+					"name":  "run-time",
+					"value": c.Params.RuntimeHour,
+				},
+			},
+		},
+		Log: &_logConfig{
+			Console:    false,
+			LogFileDir: &logDir,
+			Debug:      false,
+			Source:     true,
+			Json:       false,
+		},
+		Schedule: "",
+		ApiUrl:   "http://127.0.0.1:9999",
+	}
+
+	ptTableChecksumPath, err := c.tools.Get(tools.ToolPtTableChecksum)
+	if err != nil {
+		logger.Error("get %s failed: %s", tools.ToolPtTableChecksum, err)
+	}
+	cfg.PtChecksum.Path = ptTableChecksumPath
+
+	filters, err := c.transformFilter()
+	if err != nil {
+		return err
+	}
+	cfg.Filter = *filters
+
+	for _, slave := range c.Params.Slaves {
+		cfg.Slaves = append(
+			cfg.Slaves, _slave{
+				User:     c.Params.MasterAccessSlaveUser,
+				Password: c.Params.MasterAccessSlavePassword,
+				Ip:       slave.Ip,
+				Port:     slave.Port,
+			},
+		)
+	}
+
+	yamlData, err := yaml.Marshal(&cfg)
+	if err != nil {
+		logger.Error("generate yaml config failed: %s", err.Error())
+		return err
+	}
+
+	c.cfgFile = path.Join("/tmp", fmt.Sprintf("checksum_%s.yaml", c.uid))
+	err = os.WriteFile(c.cfgFile, yamlData, 0644)
+	if err != nil {
+		logger.Error("write config failed: %s", err.Error())
+		return err
+	}
+
+	logger.Info("mysql-table-checksum config: %s", cfg)
+	return nil
+}
+
+// DoChecksum 执行校验
+func (c *PtTableChecksumComp) DoChecksum() (err error) {
+	mysqlTableChecksumPath, err := c.tools.Get(tools.ToolMysqlTableChecksum)
+	if err != nil {
+		logger.Error("get %s failed: %s", tools.ToolMysqlTableChecksum, err.Error())
+		return err
+	}
+	command := exec.Command(
+		mysqlTableChecksumPath, []string{
+			"demand",
+			"--config", c.cfgFile,
+			"--uuid", c.uid,
+		}...,
+	)
+	logger.Info("command: %s", command)
+	var stdout, stderr bytes.Buffer
+	command.Stdout = &stdout
+	command.Stderr = &stderr
+
+	err = command.Run()
+	if err != nil {
+		logger.Error("execute check command failed: %s, %s", err.Error(), stderr.String())
+		return err
+	}
+
+	fmt.Println(components.WrapperOutputString(strings.TrimSpace(stdout.String())))
+	return nil
+}
+
+// Example 样例
+func (c *PtTableChecksumComp) Example() interface{} {
+	comp := PtTableChecksumComp{
+		GeneralParam: &components.GeneralParam{
+			RuntimeAccountParam: components.RuntimeAccountParam{
+				MySQLAccountParam: common.AccountMonitorExample,
+			},
+		},
+		Params: &PtTableChecksumParam{
+			BkBizId:                   1,
+			ClusterId:                 1,
+			ImmuteDomain:              "example.db.com",
+			MasterIp:                  "127.0.0.1",
+			MasterPort:                20000,
+			InnerRole:                 "master",
+			MasterAccessSlaveUser:     "dummyuser",
+			MasterAccessSlavePassword: "dummypassword",
+			DbPatterns:                []string{"db1%", "db2%"},
+			IgnoreDbs:                 []string{"db11", "db22"},
+			TablePatterns:             []string{},
+			IgnoreTables:              []string{"table_user_%", "table_mail_%"},
+			RuntimeHour:               2,
+			ReplicateTable:            fmt.Sprintf("%s.checksum", native.INFODBA_SCHEMA),
+			Slaves: []SlaveInfo{
+				{
+					Id:   11,
+					Ip:   "127.0.0.2",
+					Port: 20000,
+				},
+				{
+					Id:   12,
+					Ip:   "127.0.0.3",
+					Port: 20000,
+				},
+			},
+			SystemDbs: []string{native.INFODBA_SCHEMA, native.TEST_DB, "mysql"},
+		},
+		PtTableChecksumCtx: PtTableChecksumCtx{},
+	}
+	return comp
+}
+
+func (c *PtTableChecksumComp) transformFilter() (*_ptFilters, error) {
+	// validate 在这里做完了
+	filter, err := db_table_filter.NewDbTableFilter(
+		c.Params.DbPatterns,
+		c.Params.TablePatterns,
+		c.Params.IgnoreDbs,
+		c.Params.IgnoreTables,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	logger.Info("filter: %v", filter)
+	var res _ptFilters
+	err = c.transformInclude(&res, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	err = c.transformExclude(&res, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	logger.Info("system dbs: %s", c.Params.SystemDbs)
+	res.IgnoreDatabases = append(res.IgnoreDatabases, c.Params.SystemDbs...)
+	logger.Info("transformed filters: %s", res)
+	return &res, nil
+}
+
+func (c *PtTableChecksumComp) transformInclude(ptFilters *_ptFilters, filter *db_table_filter.DbTableFilter) error {
+	if db_table_filter.HasGlobPattern(c.Params.DbPatterns) {
+		ptFilters.DatabasesRegex = db_table_filter.ReplaceGlob(c.Params.DbPatterns[0])
+	} else {
+		ptFilters.Databases = c.Params.DbPatterns
+	}
+
+	if db_table_filter.HasGlobPattern(c.Params.TablePatterns) {
+		ptFilters.TablesRegex = db_table_filter.ReplaceGlob(c.Params.TablePatterns[0])
+	} else {
+		ptFilters.Tables = c.Params.TablePatterns
+	}
+	return nil
+}
+
+func (c *PtTableChecksumComp) transformExclude(
+	ptFilters *_ptFilters,
+	filter *db_table_filter.DbTableFilter,
+) (err error) {
+	if db_table_filter.HasGlobPattern(c.Params.IgnoreTables) && c.Params.IgnoreTables[0] == "*" {
+		if db_table_filter.HasGlobPattern(c.Params.IgnoreDbs) {
+			ptFilters.IgnoreDatabasesRegex = db_table_filter.ReplaceGlob(c.Params.IgnoreDbs[0])
+		} else {
+			ptFilters.IgnoreDatabases = c.Params.IgnoreDbs
+		}
+		return nil
+	}
+
+	ptFilters.IgnoreTables, err = filter.GetExcludeTables(
+		c.Params.MasterIp,
+		c.Params.MasterPort,
+		c.GeneralParam.RuntimeAccountParam.MonitorUser,
+		c.GeneralParam.RuntimeAccountParam.MonitorPwd,
+	)
+
+	return err
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_sync.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_sync.go
new file mode 100644
index 0000000000..7480b3fbeb
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/pt_table_sync.go
@@ -0,0 +1,420 @@
+// 通过工具pt-table-sync 工具对主从数据做数据修复
+// 修复前会做一系列的前置检查行为,尽量保证数据能修复正常
+// 原子任务需要兼容两个触发场景:手动检验而发起修复场景、例行检验而发起修复场景。两种场景因为处理逻辑稍微不同,需要部分区别对待
+
+package mysql
+
+import (
+	"crypto/rand"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"math/big"
+	"strings"
+)
+
+const checkSumDB = native.INFODBA_SCHEMA
+const slaveBehindMasterLimit = 1800 // slave不能落后master 半小时以上
+const chunkSize = "10000"           // pt-table-sync每次修复的chunk单位,检验时候会用到
+// Charset TODO
+const Charset = "binary" // 目前统一使用binary字符集
+// SyncExitStatus2 TODO
+const SyncExitStatus2 = "exit status 2" // 如果执行信息返回状态为2,是代表真的有数据不一致的情况,视为程序执行正常
+
+// PtTableSyncComp TODO
+type PtTableSyncComp struct {
+	GeneralParam *components.GeneralParam `json:"general"`
+	Params       *PtTableSyncParam        `json:"extend"`
+	PtTableSyncCtx
+	tools *tools.ToolSet
+}
+
+// PtTableSyncParam TODO
+// 2023/0203 增加例行校验发起修复所需要的参数:start_time、end_time、is_routine_trigger
+type PtTableSyncParam struct {
+	Host                string `json:"host" validate:"required,ip"`
+	Port                int    `json:"port" validate:"required,lt=65536,gte=3306"`
+	MasterHost          string `json:"master_host" validate:"required,ip"`
+	MasterPort          int    `json:"master_port" validate:"required,lt=65536,gte=3306"`
+	IsSyncNonInnodbTbls bool   `json:"is_sync_non_innodb"`
+	SyncUser            string `json:"sync_user" validate:"required"`
+	SyncPass            string `json:"sync_pass" validate:"required"`
+	CheckSumTable       string `json:"check_sum_table" validate:"required"`
+	StartTime           string `json:"start_time"`
+	EndTime             string `json:"end_time"`
+	IsRoutineTrigger    bool   `json:"is_routine_trigger"`
+	// Synctables        []string `json:"sync_tables"`
+	// SyncDbs           []string `json:"SyncDbs"`
+}
+
+// PtTableSyncCtx 定义任务执行时需要的上下文
+type PtTableSyncCtx struct {
+	tableSyncMap          []TableSyncInfo
+	dbConn                *native.DbWorker
+	masterDbConn          *native.DbWorker
+	tempCheckSumTableName string
+}
+
+// TableSyncInfo 定义待修复表的信息结构体
+type TableSyncInfo struct {
+	DbName    string `db:"DbName"`
+	TableName string `db:"TableName"`
+}
+
+// TableInfo TODO
+type TableInfo struct {
+	DbName    string `db:"TABLE_SCHEMA"`
+	TableName string `db:"TABLE_NAME"`
+	Engine    string `db:"ENGINE"`
+}
+
+// Example TODO
+func (c *PtTableSyncComp) Example() interface{} {
+	comp := PtTableSyncComp{
+		Params: &PtTableSyncParam{
+			Host:                "1.1.1.1",
+			Port:                10000,
+			MasterHost:          "1.1.1.2",
+			MasterPort:          10000,
+			IsSyncNonInnodbTbls: false,
+			CheckSumTable:       "checksum",
+			SyncUser:            "xxx",
+			SyncPass:            "xxx",
+		},
+	}
+	return comp
+}
+
+// Init 定义act的初始化内容
+func (c *PtTableSyncComp) Init() (err error) {
+
+	// 连接本地实例的db(其实是从实例)
+	c.dbConn, err = native.InsObject{
+		Host: c.Params.Host,
+		Port: c.Params.Port,
+		User: c.GeneralParam.RuntimeAccountParam.AdminUser,
+		Pwd:  c.GeneralParam.RuntimeAccountParam.AdminPwd,
+	}.Conn()
+	if err != nil {
+		logger.Error("Connect %d failed:%s", c.Params.Port, err.Error())
+		return err
+	}
+	// 远程连接传入过来的master实例,用临时账号
+	c.masterDbConn, err = native.InsObject{
+		Host: c.Params.MasterHost,
+		Port: c.Params.MasterPort,
+		User: c.Params.SyncUser,
+		Pwd:  c.Params.SyncPass,
+	}.Conn()
+	if err != nil {
+		logger.Error("Connect %d failed:%s", c.Params.Port, err.Error())
+		return err
+	}
+
+	// 获取checksum 表中数据校验异常的表信息
+	err = c.getTableSyncMap()
+	if err != nil {
+		return err
+	}
+	// 拼接临时表的名称
+	randomNum, _ := rand.Int(rand.Reader, big.NewInt(100000))
+	c.PtTableSyncCtx.tempCheckSumTableName = fmt.Sprintf("checksum_%s_%d", c.Params.SyncUser, randomNum)
+
+	return nil
+}
+
+// Precheck 定义act的前置检测行为
+// 检测记录异常的表,现在是否存在;
+// 现在的节点的同步是否还是现在的主库,同步是否正常
+func (c *PtTableSyncComp) Precheck() (err error) {
+
+	// 判断传入过来的checksum表是否存在
+	if !c.isExistCheckSumTable() {
+		return fmt.Errorf("The checksum table [%s.%s] maybe not exit ", checkSumDB, c.Params.CheckSumTable)
+	}
+
+	slaveStatus, err := c.dbConn.ShowSlaveStatus()
+	if err != nil {
+		return err
+	}
+	// 同步是否正常
+	if !slaveStatus.ReplSyncIsOk() {
+		errMsg := fmt.Sprintf(
+			"IOThread:%s,SQLThread:%s",
+			slaveStatus.SlaveIORunning, slaveStatus.SlaveSQLRunning,
+		)
+		return fmt.Errorf(errMsg)
+	}
+	// 目前节点的master是否是参数传入的master
+	if slaveStatus.MasterHost != c.Params.MasterHost || slaveStatus.MasterPort != c.Params.MasterPort {
+		errMsg := fmt.Sprintf(
+			"The current node syncs this node[%s:%d], not this node[%s:%d]",
+			slaveStatus.MasterHost, slaveStatus.MasterPort, c.Params.MasterHost, c.Params.MasterPort,
+		)
+		return fmt.Errorf(errMsg)
+	}
+	// 如果该节点如果了落后时间时间大于1800s ,则程序先异常退出,可等待主从复制同步后再重试任务
+	if slaveStatus.SecondsBehindMaster.Int64 >= slaveBehindMasterLimit {
+		errMsg := fmt.Sprintf(
+			"The slave node [%s:%d] data lags behind the master [%s:%d] by more than 1800s, so exit first",
+			c.Params.Host, c.Params.Port, c.Params.MasterHost, c.Params.MasterPort,
+		)
+		return fmt.Errorf(errMsg)
+	}
+	// 判断最终待修复的表信息列表是否为空,后续引入忽略表的参数,最终可能会为空
+	if len(c.tableSyncMap) == 0 {
+		return fmt.Errorf("Check that the list to be fixed is empty ")
+	}
+	// 加载pt-table-sync 工具路径
+	c.tools, err = tools.NewToolSetWithPick(tools.ToolPtTableSync)
+	if err != nil {
+		logger.Error("init toolset failed: %s", err.Error())
+		return err
+	}
+
+	return nil
+}
+
+// ExecPtTableSync 定义下发pt-table-sync工具去执行数据修复的过程
+// 目前的按照表的维度来下发修复,每修复进程完,记录对应的表已修复完成,打印到日志上
+// 如果其中某张表修复出现异常,db-act进程不中断,对下一张表进行修复。失败的表打印到日志上
+// 修复表之前看看表是否满足修复条件,如果不满足,则跳过对这个表的修复
+func (c *PtTableSyncComp) ExecPtTableSync() (err error) {
+	// 定义出现修复异常表的失败的表数量
+	var errTableCount int = 0
+	// 定义出现跳过表修复的数量
+	var skipTableCount int = 0
+
+	// 定义数据修复的checksum表名
+	var getChecksumName string
+
+	// 获取工具文件路径
+	PtTableSyncPath, err := c.tools.Get(tools.ToolPtTableSync)
+	if err != nil {
+		logger.Error("get %s failed: %s", tools.ToolPtTableSync, err.Error())
+		return err
+	}
+
+	for _, syncTable := range c.tableSyncMap {
+		// 先在master实例判断表是否符合修复条件
+		if !c.checkTable(syncTable.DbName, syncTable.TableName) {
+			skipTableCount++
+			logger.Warn(
+				fmt.Sprintf(
+					"The table [%s.%s] does not conform to the behavior of this data repair, skip sync",
+					syncTable.DbName, syncTable.TableName,
+				),
+			)
+			continue
+		}
+
+		if c.Params.IsRoutineTrigger {
+			// 例行检查发起的数据修复,用临时表作为修复依据
+			getChecksumName = c.PtTableSyncCtx.tempCheckSumTableName
+			if !c.CopyTableCheckSumReport(syncTable.DbName, syncTable.TableName, getChecksumName) {
+				return fmt.Errorf("copy table data error")
+			}
+
+		} else {
+			// 否则使用传入记录表
+			getChecksumName = c.Params.CheckSumTable
+		}
+
+		// 拼接pt-table-sync 的执行命令
+		syncCmd := fmt.Sprintf(
+			"%s --execute --replicate=%s.%s --sync-to-master --no-buffer-to-client --no-check-child-tables "+
+				"--chunk-size=%s --databases=%s --tables=%s --charset=%s h=%s,P=%d,u=%s,p=%s",
+			PtTableSyncPath, checkSumDB, getChecksumName, chunkSize, syncTable.DbName,
+			syncTable.TableName, Charset, c.Params.Host, c.Params.Port, c.Params.SyncUser, c.Params.SyncPass,
+		)
+
+		// logger.Info("executing %s", syncCmd)
+		output, err := osutil.ExecShellCommand(false, syncCmd)
+
+		if err != nil && !strings.Contains(err.Error(), SyncExitStatus2) {
+			// 如果修复某张表时候进程查询异常,不中断,记录异常信息,执行下张表的修复
+			logger.Error("exec cmd get an error:%s,%s", output, err.Error())
+			errTableCount++
+			continue
+		}
+		logger.Info("syncing-table [%s.%s] has been executed successfully", syncTable.DbName, syncTable.TableName)
+	}
+	logger.Info(
+		"Number of successful fixes: %d ,Number of failed fixes: %d, Number of skip fixes: %d",
+		len(c.tableSyncMap)-errTableCount-skipTableCount, errTableCount, skipTableCount,
+	)
+
+	// 如果真的存在部分表修复异常,则返回失败
+	if errTableCount != 0 {
+		return fmt.Errorf("error")
+	}
+	return nil
+}
+
+// getTableSyncMap 查询本地实例的checksum检测结果异常的表信息
+func (c *PtTableSyncComp) getTableSyncMap() (err error) {
+	var checkSQL string
+	if c.Params.IsRoutineTrigger {
+		// 例行检测校验触发的数据修复场景
+		checkSQL = fmt.Sprintf(
+			`select db as DbName ,tbl as TableName from %s.%s where (this_crc <> master_crc or this_cnt <> master_cnt) 
+			 and (ts between '%s' and '%s')  group by db, tbl`,
+			checkSumDB, c.Params.CheckSumTable, c.Params.StartTime, c.Params.EndTime,
+		)
+	} else {
+		// 常规校验而触发数据修复
+		checkSQL = fmt.Sprintf(
+			"select db as DbName ,tbl as TableName from %s.%s where this_crc <> master_crc or this_cnt <> master_cnt group by db, tbl",
+			checkSumDB, c.Params.CheckSumTable,
+		)
+	}
+
+	// 这里查询返回空的话,先不在这里报错退出
+	err = c.dbConn.Queryx(&c.tableSyncMap, checkSQL)
+	if err != nil && !c.dbConn.IsNotRowFound(err) {
+		logger.Error(err.Error())
+		return err
+	}
+
+	return nil
+
+}
+
+// isExistCheckSumTable 判断本地实例是否存在checksum表
+func (c *PtTableSyncComp) isExistCheckSumTable() bool {
+	checkSumSql := fmt.Sprintf(
+		"select 1 from information_schema.tables where TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' ;",
+		checkSumDB, c.Params.CheckSumTable,
+	)
+	_, err := c.dbConn.Query(checkSumSql)
+	if err != nil {
+		logger.Error(err.Error())
+		return false
+	}
+	return true
+}
+
+// checkTable 在master实例检验表是否符合修复规格: 检测表是否在主库存在;表的引擎是否事务引擎
+func (c *PtTableSyncComp) checkTable(dbName string, tableName string) bool {
+
+	var tableInfo []TableInfo
+
+	checkSumSql := fmt.Sprintf(
+		"select TABLE_SCHEMA, TABLE_NAME, ENGINE from information_schema.tables where TABLE_SCHEMA = '%s' and TABLE_NAME = '%s' ;",
+		dbName, tableName,
+	)
+	err := c.masterDbConn.Queryx(&tableInfo, checkSumSql)
+	if err != nil {
+		logger.Error(err.Error())
+		return false
+	}
+	// 检测是否是非事务引擎表,目前事务引擎只有 innodb和tokudb
+	if !c.Params.IsSyncNonInnodbTbls && !(tableInfo[0].Engine == "InnoDB" || tableInfo[0].Engine == "TokuDB") {
+		logger.Error(fmt.Sprintf("The table [%s.%s] is not a transaction engine table", dbName, tableName))
+		return false
+	}
+	return true
+}
+
+// DropSyncUser 修复后删除主从节点的临时数据修复账号
+func (c *PtTableSyncComp) DropSyncUser() (err error) {
+
+	logger.Info("droping sync user ....")
+	userHost := fmt.Sprintf("%s@%s", c.Params.SyncUser, c.Params.Host)
+
+	// 在主节点删除
+	if _, err := c.masterDbConn.Exec(fmt.Sprintf("drop user %s;", userHost)); err != nil {
+		logger.Error(
+			"drop %s failed:%s in the instance [%s:%d]", userHost, err.Error(), c.Params.MasterHost,
+			c.Params.MasterPort,
+		)
+		return err
+	}
+	// 在本地节点删除
+	if _, err := c.dbConn.Exec(fmt.Sprintf("drop user %s;", userHost)); err != nil {
+		logger.Error("drop %s failed:%s in the instance [%s:%d]", userHost, err.Error(), c.Params.Host, c.Params.Port)
+		return err
+	}
+	logger.Info("drop-user has been executed successfully")
+	return nil
+}
+
+// CopyTableCheckSumReport 处理将需要修复表的异常检验结果复制到临时表
+// 这个针对巡检例行校验而触发的数据修复场景
+// 原因是实例的checksum-report是历史表,包括存在很多历史记录,影响到pt-table-sync工具修复进度
+func (c *PtTableSyncComp) CopyTableCheckSumReport(DBName string, tableName string, tempCheckSumTableName string) bool {
+	// 定义复制数据SQL列表
+	var copySQLs []string
+
+	// 校验必要参数的逻辑
+	if !(c.Params.IsRoutineTrigger && len(c.Params.StartTime) != 0 && len(c.Params.EndTime) != 0) {
+		logger.Error(
+			"The required parameter is unreasonable,if is_routine_trigger is true, start_time and end_time is not null ",
+		)
+		return false
+	}
+
+	// 在master创建临时checksum临时表
+	if _, err := c.masterDbConn.Exec(
+		fmt.Sprintf(
+			"create table if not exists %s.%s like %s.%s ;",
+			checkSumDB, tempCheckSumTableName, checkSumDB, c.Params.CheckSumTable,
+		),
+	); err != nil {
+		logger.Error("create table %s failed:[%s]", tempCheckSumTableName, err.Error())
+		return false
+	}
+
+	// 导入异常记录在临时表上
+	copySQLs = append(copySQLs, "set binlog_format = 'Statement' ;")
+	copySQLs = append(copySQLs, fmt.Sprintf("truncate table %s.%s ;", checkSumDB, tempCheckSumTableName))
+	copySQLs = append(
+		copySQLs,
+		fmt.Sprintf(
+			"insert into %s.%s select * from %s.%s  where db = '%s' and tbl = '%s' and ts between '%s' and '%s' ",
+			checkSumDB,
+			tempCheckSumTableName,
+			checkSumDB,
+			c.Params.CheckSumTable,
+			DBName,
+			tableName,
+			c.Params.StartTime,
+			c.Params.EndTime,
+		),
+	)
+	copySQLs = append(copySQLs, "set binlog_format = 'ROW' ;")
+
+	if _, err := c.masterDbConn.ExecMore(copySQLs); err != nil {
+		logger.Error("create table %s failed:[%s]", tempCheckSumTableName, err.Error())
+		return false
+	}
+
+	return true
+}
+
+// DropTempTable 删除临时表
+func (c *PtTableSyncComp) DropTempTable() (err error) {
+
+	if len(c.PtTableSyncCtx.tempCheckSumTableName) == 0 {
+		// 判断临时表是否为空,为空则跳过
+		logger.Info("temp-table is null, skip")
+		return nil
+	}
+
+	logger.Info(fmt.Sprintf("droping Temp table :%s ....", c.PtTableSyncCtx.tempCheckSumTableName))
+	if _, err := c.masterDbConn.Exec(
+		fmt.Sprintf(
+			"drop table if exists %s.%s;",
+			checkSumDB,
+			c.PtTableSyncCtx.tempCheckSumTableName,
+		),
+	); err != nil {
+		logger.Error("drop temp table failed")
+		return err
+	}
+	logger.Info("drop-temp-table has been executed successfully")
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/README.md b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/README.md
new file mode 100644
index 0000000000..40ccb8c015
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/README.md
@@ -0,0 +1,21 @@
+## 已支持恢复类型
+- gztab
+- xtrabackup
+- logical (dbloader)
+- physical (dbloader)
+
+## 开发说明
+增加不同的恢复类型,需要实现接口 `Restore` 的以下方法:
+```
+type Restore interface {
+	Init() error
+	PreCheck() error
+	Start() error
+	WaitDone() error
+	PostCheck() error
+	ReturnChangeMaster() (*mysqlutil.ChangeMaster, error)
+}
+```
+比如 mload_restore, xload_restore, dbloader_restore 都是该接口的实现,`RestoreDRComp` 封装了这个接口对外提供恢复指令,它的`ChooseType`方法决定使用哪种 Restore 实现
+
+dbloader 又分为 logical / physical,恢复行为由 `dbbackup-go/dbbackup` 完成
\ No newline at end of file
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/backup.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/backup.go
new file mode 100644
index 0000000000..ce808d0754
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/backup.go
@@ -0,0 +1,106 @@
+package restore
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup"
+	"fmt"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// BackupInfo backup file info
+type BackupInfo struct {
+	WorkDir string `json:"work_dir" validate:"required" example:"/data1/dbbak"` // 备份恢复目录,工作目录
+	// 备份文件所在本地目录,理论上doDr不会对该目录写入,而是写入 targetDir
+	BackupDir string `json:"backup_dir"  validate:"required" example:"/data/dbbak"`
+	// 备份文件名列表,key 是 info|full|priv|index, value 是是相对于 backup_dir 的文件名列表
+	BackupFiles map[string][]string `json:"backup_files" validate:"required"`
+
+	backupType string
+	backupHost string
+	backupPort int
+	// backupBaseName 是备份文件名解压后的目标目录,不带路径,不带后缀
+	backupBaseName string
+
+	infoFilePath string // InfoFileDetail full path filename
+	infoObj      *dbbackup.InfoFileDetail
+
+	// fullFileList []string
+
+	indexFilePath string
+	indexObj      *dbbackup.BackupIndexFile
+}
+
+func (b *BackupInfo) initWorkDirs() error {
+	/*
+		backup_file_list       a, b, c
+		backup_dir /data/dbbak/
+		backup_untar_dir ${task_dir}/base_name/
+
+		work_base_dir /data1/dbbak
+		task_dir   ${work_base_dir}/doDr_10941094/20000/
+	*/
+	if !cmutil.IsDirectory(b.WorkDir) {
+		return errors.Errorf("error work_dir %s", b.WorkDir)
+	}
+	return nil
+}
+
+// CheckIntegrity TODO
+func (b *BackupInfo) CheckIntegrity() error {
+	return nil
+}
+
+// GetBackupMetaFile godoc
+// 获取 .info / .index 文件名,解析文件内容
+func (b *BackupInfo) GetBackupMetaFile(fileType string) error {
+	fileList, ok := b.BackupFiles[fileType]
+	if !ok {
+		return errors.Errorf("backup_files has no file_type: %s", fileType)
+	}
+	if len(fileList) != 1 {
+		return fmt.Errorf("expect one meta file but got %v", fileList)
+	}
+	metaFilename := strings.TrimSpace(fileList[0])
+	metaFilePath := filepath.Join(b.BackupDir, metaFilename)
+	if err := cmutil.FileExistsErr(metaFilePath); err != nil {
+		return err
+	}
+	if strings.HasSuffix(metaFilename, ".index") || fileType == dbbackup.BACKUP_INDEX_FILE {
+		b.indexFilePath = metaFilePath
+		b.backupBaseName = strings.TrimSuffix(metaFilename, ".index")
+		var indexObj = &dbbackup.BackupIndexFile{}
+		if err := dbbackup.ParseBackupIndexFile(b.indexFilePath, indexObj); err != nil {
+			return err
+		} else {
+			b.indexObj = indexObj
+			b.backupType = b.indexObj.BackupType
+			b.backupHost = b.indexObj.BackupHost
+			b.backupPort = b.indexObj.BackupPort
+		}
+	} else if strings.HasSuffix(metaFilename, ".info") || fileType == dbbackup.MYSQL_INFO_FILE {
+		b.infoFilePath = metaFilePath
+		b.backupBaseName = strings.TrimSuffix(metaFilename, ".info")
+		var infoObj = &dbbackup.InfoFileDetail{}
+		if err := dbbackup.ParseBackupInfoFile(b.infoFilePath, infoObj); err != nil {
+			return err
+		} else {
+			b.infoObj = infoObj
+			b.backupType = b.infoObj.BackupType
+			b.backupHost = b.infoObj.BackupHost
+			b.backupPort = b.infoObj.BackupPort
+		}
+	} else {
+		return errors.Errorf("unknown meta file_type: %s", fileType)
+	}
+	logger.Info("backupType=%s, backupHost=%s, backupPort=%d", b.backupType, b.backupHost, b.backupPort)
+	return nil
+}
+
+func newTimestampString() string {
+	return time.Now().Format("20060102150405")
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/common.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/common.go
new file mode 100644
index 0000000000..5713b678b2
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/common.go
@@ -0,0 +1,33 @@
+package restore
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+)
+
+func checkExistRunningDbLoad(db *native.DbWorker, checkProcess bool, dblist []string) (bool, []string, error) {
+	// 检查进程
+	if !checkProcess || len(dblist) == 0 {
+		return true, nil, nil
+	}
+
+	processList, err := db.SelectProcesslist([]string{native.DBUserAdmin})
+	if err != nil {
+		return false, nil, err
+	}
+	if len(processList) == 0 {
+		return true, nil, nil
+	}
+
+	var runningDbs []string
+	for _, process := range processList {
+		if !process.DB.Valid {
+			continue
+		}
+		if util.StringsHas(dblist, process.DB.String) {
+			runningDbs = append(runningDbs, process.DB.String)
+		}
+	}
+	runningDbs = util.UniqueStrings(runningDbs)
+	return true, runningDbs, nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader.go
new file mode 100644
index 0000000000..c0330794ba
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader.go
@@ -0,0 +1,9 @@
+// Package dbloader TODO
+package dbloader
+
+// DBBackupLoader dbbackup -loadbackup
+type DBBackupLoader interface {
+	CreateConfigFile() error
+	PreLoad() error
+	Load() error
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader_util.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader_util.go
new file mode 100644
index 0000000000..878c6e39f6
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/dbloader_util.go
@@ -0,0 +1,54 @@
+package dbloader
+
+import (
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+)
+
+// LoaderUtil TODO
+type LoaderUtil struct {
+	Client string `json:"client" validate:"required"`
+	// 恢复本地的目标实例
+	TgtInstance native.InsObject `json:"tgt_instance"`
+	IndexObj    *dbbackup.BackupIndexFile
+
+	// 不写 binlog -without-binlog: set sql_log_bin=0
+	WithOutBinlog bool   `json:"withoutBinlog"`
+	IndexFilePath string `json:"index_file_path" validate:"required"`
+	LoaderDir     string `json:"loader_dir"`
+	TaskDir       string `json:"taskDir"`
+
+	// 上层传递过来的filter,不包括系统过滤库
+	Databases        []string `json:"databases"`
+	Tables           []string `json:"tables"`
+	ExcludeDatabases []string `json:"exclude_databases"`
+	ExcludeTables    []string `json:"exclude_tables"`
+
+	// 内部检查相关
+	cfgFilePath string
+	doDr        bool
+}
+
+/*
+func (l *LoaderUtil) String() string {
+	return fmt.Sprintf("LoaderUtil{Client:%s, TgtInstance:%v, IndexObj:%+v, IndexFilePath:%s, LoaderDir:%s, TaskDir:%s}",
+		l.Client, l.TgtInstance, l.IndexObj, l.IndexFilePath, l.LoaderDir, l.TaskDir)
+}
+*/
+
+// LoaderOpt TODO
+type LoaderOpt struct {
+	// 恢复哪些 db,当前只对 逻辑恢复有效
+	Databases       []string `json:"databases"`
+	Tables          []string `json:"tables"`
+	IgnoreDatabases []string `json:"ignore_databases"`
+	IgnoreTables    []string `json:"ignore_tables"`
+
+	RecoverPrivs bool `json:"recover_privs"`
+	// 在指定时间点回档场景才需要,是否恢复 binlog。在 doSlave 场景,是不需要 recover_binlog。这个选项是控制下一步恢复binlog的行为
+	// 当 recover_binlog 时,要确保实例的所有库表结构都恢复。在逻辑回档场景,只回档部分库表数据时,依然要恢复所有表结构
+	WillRecoverBinlog bool `json:"recover_binlog"`
+	// 在库表级定点回档时有用,如果是 statement/mixed 格式,导入数据时需要全部导入;
+	// 如果是 row,可只导入指定库表数据, 在 recover-binlog 时可指定 quick_mode=true 也恢复指定库表 binlog
+	SourceBinlogFormat string `json:"source_binlog_format" enums:",ROW,STATEMENT,MIXED"`
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/logical_loader.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/logical_loader.go
new file mode 100644
index 0000000000..06261b7a45
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/logical_loader.go
@@ -0,0 +1,148 @@
+package dbloader
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter"
+	"fmt"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+	"gopkg.in/ini.v1"
+)
+
+// LogicalLoader TODO
+type LogicalLoader struct {
+	*LoaderUtil
+	MyloaderOpt   *LoaderOpt
+	myloaderRegex string
+}
+
+// CreateConfigFile 要确保 buildFilter 在这之前运行
+func (l *LogicalLoader) CreateConfigFile() error {
+	p := l.LoaderUtil
+	if l.myloaderRegex == "" {
+		return errors.New("myloader config need filter regex")
+	}
+	loaderConfig := dbbackup.CnfLogicalLoad{
+		MysqlHost:     p.TgtInstance.Host,
+		MysqlPort:     cast.ToString(p.TgtInstance.Port),
+		MysqlUser:     p.TgtInstance.User,
+		MysqlPasswd:   p.TgtInstance.Pwd,
+		MysqlCharset:  l.IndexObj.BackupCharset,
+		MysqlLoadDir:  p.LoaderDir,
+		IndexFilePath: p.IndexFilePath,
+		Threads:       4,
+		EnableBinlog:  true,
+		Regex:         l.myloaderRegex,
+	}
+	if loaderConfig.MysqlCharset == "" {
+		loaderConfig.MysqlCharset = "binary"
+	}
+	logger.Info("dbloader config file, %+v", loaderConfig)
+
+	f := ini.Empty()
+	section, err := f.NewSection("LogicalLoad")
+	if err != nil {
+		return err
+	}
+	if err = section.ReflectFrom(&loaderConfig); err != nil {
+		return err
+	}
+	cfgFilePath := filepath.Join(p.TaskDir, fmt.Sprintf("dbloader_%d.cfg", p.TgtInstance.Port))
+	if err = f.SaveTo(cfgFilePath); err != nil {
+		return errors.Wrap(err, "create config")
+	}
+	p.cfgFilePath = cfgFilePath
+	logger.Info("tmp dbloader config file %s", p.cfgFilePath)
+	return nil
+}
+
+// PreLoad 在解压之前做的事情
+// 检查实例连通性
+func (l *LogicalLoader) PreLoad() error {
+	if err := l.buildFilter(); err != nil {
+		return err
+	}
+
+	dbWorker, err := l.TgtInstance.Conn()
+	if err != nil {
+		return errors.Wrap(err, "目标实例连接失败")
+	}
+	defer dbWorker.Stop()
+	return nil
+}
+
+// Load 恢复数据
+// 1. create config 2. loadbackup
+func (l *LogicalLoader) Load() error {
+	if err := l.CreateConfigFile(); err != nil {
+		return err
+	}
+
+	if err := l.loadBackup(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (l *LogicalLoader) loadBackup() error {
+	cmd := fmt.Sprintf(`cd %s;%s -configpath %s -loadbackup |grep -v WARNING`, l.TaskDir, l.Client, l.cfgFilePath)
+	logger.Info("dbLoader cmd: %s", cmd)
+	stdStr, err := cmutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		return errors.Wrap(err, stdStr)
+	}
+	return nil
+}
+
+// buildFilter 只有逻辑备份才有 filter options, myloader filter regex 存入 myloaderRegex
+func (l *LogicalLoader) buildFilter() error {
+	o := l.MyloaderOpt
+	if o != nil {
+		if len(o.Databases) == 0 {
+			l.Databases = []string{"*"}
+		}
+		if len(o.Tables) == 0 {
+			l.Tables = []string{"*"}
+		}
+		if len(o.Databases)+len(o.Tables)+len(o.IgnoreDatabases)+len(o.IgnoreTables) == 0 {
+			// schema/data 一起全部导入, recover-binlog quick_mode只能false
+			logger.Info("no filter: import schema and data together, recover-binlog need quick_mode=false")
+			l.doDr = true
+		}
+		if o.WillRecoverBinlog && o.SourceBinlogFormat != "ROW" {
+			// 指定 filter databases/tables(或者指定无效),导入数据时
+			// 必须全部导入 schema 和 data.恢复时也恢复全量 binlog,即 quick_mode=false
+			logger.Info("binlog_format!=row: import schema and data together, recover-binlog need quick_mode=false")
+			l.doDr = true
+		} else {
+			// 后续不恢复binlog
+			// 或者,后续要恢复binlog,且源binlog格式是row,可以只导入需要的表
+			l.Databases = o.Databases
+			l.Tables = o.Tables
+			l.ExcludeDatabases = o.IgnoreDatabases
+			l.ExcludeTables = o.IgnoreTables
+		}
+	} else {
+		l.doDr = true
+	}
+	if l.doDr == true {
+		l.Databases = []string{"*"}
+		l.Tables = []string{"*"}
+	}
+	// build regex
+	if filter, err := db_table_filter.NewDbTableFilter(
+		l.Databases,
+		l.Tables,
+		l.ExcludeDatabases,
+		l.ExcludeTables,
+	); err != nil {
+		return err
+	} else {
+		l.myloaderRegex = filter.MyloaderRegex(l.doDr)
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/physical_loader.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/physical_loader.go
new file mode 100644
index 0000000000..870509537a
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/physical_loader.go
@@ -0,0 +1,99 @@
+package dbloader
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+	"gopkg.in/ini.v1"
+)
+
+// PhysicalLoader TODO
+type PhysicalLoader struct {
+	*LoaderUtil
+	*Xtrabackup
+}
+
+// CreateConfigFile TODO
+func (l *PhysicalLoader) CreateConfigFile() error {
+	logger.Info("create loader config file")
+	p := l.LoaderUtil
+
+	// get my.cnf and socket
+	cnfFileName := util.GetMyCnfFileName(p.TgtInstance.Port)
+	cnfFile := &util.CnfFile{FileName: cnfFileName}
+	if err := cnfFile.Load(); err != nil {
+		logger.Info("get my.conf failed %v", cnfFileName)
+		return errors.WithStack(err)
+	}
+	l.myCnf = cnfFile
+	if p.TgtInstance.Socket == "" {
+		p.TgtInstance.Socket = l.Xtrabackup.getSocketName() // x.myCnf.GetMySQLSocket()
+		l.Xtrabackup.TgtInstance.Socket = l.Xtrabackup.getSocketName()
+	}
+	// create loader config file
+	loaderConfig := dbbackup.CnfPhysicalLoad{
+		DefaultsFile:  cnfFileName, // l.Xtrabackup.myCnf.FileName
+		MysqlLoadDir:  p.LoaderDir,
+		IndexFilePath: p.IndexFilePath,
+		CopyBack:      false,
+		Threads:       4,
+	}
+	logger.Info("dbloader config file, %+v", loaderConfig)
+
+	f := ini.Empty()
+	section, err := f.NewSection("PhysicalLoad")
+	if err != nil {
+		return err
+	}
+	if err = section.ReflectFrom(&loaderConfig); err != nil {
+		return err
+	}
+	cfgFilePath := filepath.Join(p.TaskDir, fmt.Sprintf("dbloader_%d.cfg", p.TgtInstance.Port))
+	if err = f.SaveTo(cfgFilePath); err != nil {
+		return errors.Wrap(err, "create config")
+	}
+	p.cfgFilePath = cfgFilePath
+	logger.Info("tmp dbloader config file %s", p.cfgFilePath)
+	return nil
+}
+
+// PreLoad TODO
+func (l *PhysicalLoader) PreLoad() error {
+	return nil
+}
+
+// Load 恢复数据
+// 1. create config
+// 2. stop mysqld / clean old dirs
+// 3. loadbackup
+// 4. fix privs / star mysqld
+func (l *PhysicalLoader) Load() error {
+	if err := l.CreateConfigFile(); err != nil {
+		return err
+	}
+	if err := l.Xtrabackup.PreRun(); err != nil {
+		return err
+	}
+	if err := l.loadBackup(); err != nil {
+		return err
+	}
+	if err := l.Xtrabackup.PostRun(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (l *PhysicalLoader) loadBackup() error {
+	cmd := fmt.Sprintf(`cd %s;%s -configpath %s -loadbackup |grep -v WARNING`, l.TaskDir, l.Client, l.cfgFilePath)
+	logger.Info("dbLoader cmd: %s", cmd)
+	stdStr, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		return errors.Wrap(err, stdStr)
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup.go
new file mode 100644
index 0000000000..c8549b7c23
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup.go
@@ -0,0 +1,188 @@
+package dbloader
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/computil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+)
+
+// Xtrabackup TODO
+type Xtrabackup struct {
+	TgtInstance   native.InsObject `json:"tgt_instance"`
+	SrcBackupHost string           `json:"src_backup_host" validate:"required"`
+	QpressTool    string           `json:"qpress_tool" validate:"required,file"`
+
+	LoaderDir string // 备份解压后的目录,${taskDir}/backupBaseName/
+	// 在 PostRun 中会择机初始化
+	dbWorker *native.DbWorker // TgtInstance
+	// 在 PreRun 时初始化
+	myCnf *util.CnfFile
+}
+
+// PreRun 以下所有步骤必须可重试
+func (x *Xtrabackup) PreRun() error {
+	logger.Info("run xtrabackup preRun")
+
+	// 关闭本地mysql
+	inst := x.TgtInstance
+
+	logger.Info("stop local mysqld")
+	if err := computil.ShutdownMySQLBySocket2(inst.User, inst.Pwd, inst.Socket); err != nil {
+		logger.Error("shutdown mysqld failed %s", inst.Socket)
+		return err
+	}
+
+	logger.Info("decompress xtrabackup meta files")
+	if err := x.DecompressMetaFile(); err != nil {
+		return err
+	}
+
+	logger.Info("clean local mysqld data dirs")
+	// 清理本地目录
+	if err := x.cleanXtraEnv(); err != nil {
+		return err
+	}
+
+	logger.Info("replace local mysqld my.cnf variables")
+	// 调整my.cnf文件
+	if err := x.doReplaceCnf(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// PostRun TODO
+func (x *Xtrabackup) PostRun() (err error) {
+	logger.Info("change datadir owner user and group")
+	// 调整目录属主
+	if err = x.changeDirOwner(); err != nil {
+		return err
+	}
+
+	logger.Info("start local mysqld with skip-grant-tables")
+	// 启动mysql-修复权限
+	startParam := computil.StartMySQLParam{
+		MediaDir:        cst.MysqldInstallPath,
+		MyCnfName:       x.myCnf.FileName,
+		MySQLUser:       x.TgtInstance.User, // 用ADMIN
+		MySQLPwd:        x.TgtInstance.Pwd,
+		Socket:          x.TgtInstance.Socket,
+		SkipGrantTables: true, // 以 skip-grant-tables 启动来修复 ADMIN
+	}
+	if _, err = startParam.StartMysqlInstance(); err != nil {
+		return errors.WithMessage(err, "start mysqld after xtrabackup")
+	}
+	if x.dbWorker, err = x.TgtInstance.Conn(); err != nil {
+		return err
+	}
+
+	logger.Info("repair ADMIN user host and password")
+	// 物理备份,ADMIN密码与 backup instance(cluster?) 相同,修复成
+	// 修复ADMIN用户
+	if err := x.RepairUserAdmin(native.DBUserAdmin, x.TgtInstance.Pwd); err != nil {
+		return err
+	}
+
+	logger.Info("repair other user privileges")
+	// 修复权限
+	if err := x.RepairPrivileges(); err != nil {
+		return err
+	}
+	x.dbWorker.Stop()
+
+	logger.Info("restart local mysqld")
+	// 重启mysql(去掉 skip-grant-tables)
+	startParam.SkipGrantTables = false
+	if _, err := startParam.RestartMysqlInstance(); err != nil {
+		return err
+	}
+	// reconnect
+	if x.dbWorker, err = x.TgtInstance.Conn(); err != nil {
+		return err
+	} else {
+		defer x.dbWorker.Stop()
+	}
+
+	logger.Info("repair myisam tables")
+	// 修复MyIsam表
+	if err := x.RepairAndTruncateMyIsamTables(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (x *Xtrabackup) cleanXtraEnv() error {
+	dirs := []string{
+		"datadir",
+		"innodb_log_group_home_dir",
+		"innodb_data_home_dir",
+		"relay-log",
+		"log_bin",
+		"tmpdir",
+	}
+	return x.CleanEnv(dirs)
+}
+
+// doReplaceCnf godoc
+// todo 考虑使用 mycnf-change 模块来修改
+func (x *Xtrabackup) doReplaceCnf() error {
+	items := []string{
+		"innodb_data_file_path",
+		"innodb_log_files_in_group",
+		"innodb_log_file_size",
+		"tokudb_cache_size",
+	}
+	return x.ReplaceMycnf(items)
+}
+
+func (x *Xtrabackup) importData() error {
+	return nil
+}
+
+func (x *Xtrabackup) changeDirOwner() error {
+	dirs := []string{
+		"datadir",
+		"innodb_log_group_home_dir",
+		"innodb_data_home_dir",
+		"relay_log",
+		"tmpdir",
+		"log_bin",
+		"slow_query_log_file",
+	}
+	return x.ChangeDirOwner(dirs)
+}
+
+// DecompressMetaFile decompress .pq file and output same file name without suffix
+// ex: /home/mysql/dbbackup/xtrabackup/qpress -do xtrabackup_info.qp > xtrabackup_info
+func (x *Xtrabackup) DecompressMetaFile() error {
+	files := []string{
+		"xtrabackup_timestamp_info",
+		"backup-my.cnf",
+		"xtrabackup_binlog_info",
+		"xtrabackup_info",
+		"xtrabackup_slave_info",
+		"xtrabackup_galera_info",
+	}
+
+	for _, file := range files {
+		compressedFile := filepath.Join(x.LoaderDir, file+".qp")
+		if _, err := os.Stat(compressedFile); os.IsNotExist(err) {
+			continue
+		}
+		script := fmt.Sprintf(`cd %s && %s -do %s.qp > %s`, x.LoaderDir, x.QpressTool, file, file)
+		stdErr, err := cmutil.ExecShellCommand(false, script)
+		if err != nil {
+			return errors.Wrapf(err, "decompress file %s failed, error:%s, stderr:%s",
+				compressedFile, err.Error(), stdErr)
+		}
+	}
+	return nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup_repaire.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup_repaire.go
new file mode 100644
index 0000000000..2bf541fcdc
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader/xtrabackup_repaire.go
@@ -0,0 +1,289 @@
+package dbloader
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"path"
+	"path/filepath"
+	"regexp"
+	"runtime/debug"
+	"strings"
+	"sync"
+)
+
+// RepairUserAdmin TODO
+func (x *Xtrabackup) RepairUserAdmin(user, password string) error {
+	/*
+		sql := fmt.Sprintf(
+			"UPDATE `mysql`.`user` SET `authentication_string`=password('%s') WHERE `user`='%s'",
+			password, user,
+		)
+		sql = fmt.Sprintf("ALTER USER %s@'localhost' IDENTIFIED WITH mysql_native_password BY '%s'", user, password)
+	*/
+	localHosts := []string{"localhost", "127.0.0.1", x.SrcBackupHost} // 这些是合法的 admin host
+	localHostsUnsafe := mysqlutil.UnsafeIn(localHosts, "'")           // 不在这些列表里的 admin host 将会被 DELETE
+	adminHostsSQL := fmt.Sprintf("SELECT `host` FROM `mysql`.`user` where `user`='%s'", user)
+	var adminUserHosts []string
+	if adminHosts, err := x.dbWorker.QueryOneColumn("host", adminHostsSQL); err != nil {
+		return err
+	} else {
+		for _, h := range adminHosts {
+			if cmutil.StringsHas(localHosts, h) {
+				adminUserHosts = append(adminUserHosts, h)
+			}
+		}
+	}
+
+	sqlList := []string{"FLUSH PRIVILEGES;"}
+	sqlList = append(sqlList, fmt.Sprintf("DELETE FROM `mysql`.`user` WHERE `user`='%s' AND `host` NOT IN (%s);",
+		user, localHostsUnsafe))
+	for _, adminHost := range adminUserHosts {
+		sqlList = append(sqlList, fmt.Sprintf("ALTER USER %s@'%s' IDENTIFIED WITH mysql_native_password BY '%s';",
+			user, adminHost, password))
+		if adminHost == x.SrcBackupHost {
+			sqlList = append(sqlList, fmt.Sprintf("UPDATE `mysql`.`user` SET `host`='%s' WHERE `user`='%s' and `host`='%s';",
+				x.TgtInstance.Host, user, x.SrcBackupHost))
+		}
+	}
+	sqlList = append(sqlList, "FLUSH PRIVILEGES;")
+
+	logger.Info("RepairUserAdmin: %v", mysqlutil.ClearIdentifyByInSQLs(sqlList))
+	if _, err := x.dbWorker.ExecMore(sqlList); err != nil {
+		return err
+	}
+	return nil
+	// ALTER USER ADMIN@'localhost' IDENTIFIED BY 'auth_string';
+	// SET PASSWORD FOR 'ADMIN'@'localhost' = 'auth_string';
+	// ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'XXX';
+	// flush privileges;
+}
+
+// RepairAndTruncateMyIsamTables TODO
+func (x *Xtrabackup) RepairAndTruncateMyIsamTables() error {
+	systemDbs := util.StringsRemove(native.DBSys, native.TEST_DB)
+	sql := fmt.Sprintf(
+		`SELECT table_schema, table_name FROM information_schema.tables `+
+			`WHERE table_schema not in (%s) AND engine = 'MyISAM'`,
+		mysqlutil.UnsafeIn(systemDbs, "'"),
+	)
+
+	rows, err := x.dbWorker.Db.Query(sql)
+	if err != nil {
+		return fmt.Errorf("query myisam tables error,detail:%w,sql:%s", err, sql)
+	}
+	defer rows.Close()
+
+	wg := sync.WaitGroup{}
+	errorChan := make(chan error, 1)
+	finishChan := make(chan bool, 1)
+	for rows.Next() {
+		var db string
+		var table string
+		if err := rows.Scan(&db, &table); err != nil {
+			return err
+		}
+		wg.Add(1)
+		go func(worker *native.DbWorker, db, table string) {
+			defer wg.Done()
+			defer func() {
+				if r := recover(); r != nil {
+					logger.Info("panic goroutine inner error!%v;%s", r, string(debug.Stack()))
+					errorChan <- fmt.Errorf("panic goroutine inner error!%v", r)
+					return
+				}
+			}()
+
+			sql := ""
+			if db == native.TEST_DB || db == native.INFODBA_SCHEMA {
+				sql = fmt.Sprintf("truncate table %s.%s", db, table)
+			} else {
+				sql = fmt.Sprintf("repair table %s.%s", db, table)
+			}
+			_, err := worker.Exec(sql)
+			if err != nil {
+				errorChan <- fmt.Errorf("repair myisam table error,sql:%s,error:%w", sql, err)
+				return
+			}
+			return
+		}(x.dbWorker, db, table)
+	}
+	go func() {
+		wg.Wait()
+		close(finishChan)
+	}()
+
+	select {
+	case <-finishChan:
+	case err := <-errorChan:
+		return err
+	}
+	return nil
+}
+
+// RepairPrivileges TODO
+func (x *Xtrabackup) RepairPrivileges() error {
+	tgtHost := x.TgtInstance.Host
+	myUsers := []string{"ADMIN", "sync", "repl"}
+
+	srcHostUnsafe := mysqlutil.UnsafeEqual(x.SrcBackupHost, "'")
+	tgtHostUnsafe := mysqlutil.UnsafeEqual(tgtHost, "'")
+	myUsersUnsafe := mysqlutil.UnsafeIn(myUsers, "'")
+
+	var batchSQLs []string
+	// delete src host's ADMIN/sync user
+	sql1 := fmt.Sprintf(
+		"DELETE FROM mysql.user WHERE `user` IN (%s) AND `host` = %s;",
+		myUsersUnsafe, srcHostUnsafe,
+	)
+	batchSQLs = append(batchSQLs, sql1)
+
+	// update src host to new, but not ADMIN/sync/repl
+	sql2s := []string{
+		fmt.Sprintf(
+			"UPDATE mysql.user SET `host`=%s WHERE `host`=%s AND User not in (%s);",
+			tgtHostUnsafe, srcHostUnsafe, myUsersUnsafe,
+		),
+		fmt.Sprintf(
+			"UPDATE mysql.db SET `host`=%s WHERE `host`=%s AND User not in (%s);",
+			tgtHostUnsafe, srcHostUnsafe, myUsersUnsafe,
+		),
+		fmt.Sprintf(
+			"UPDATE mysql.tables_priv SET `host`=%s WHERE `host`=%s AND User not in (%s);",
+			tgtHostUnsafe, srcHostUnsafe, myUsersUnsafe,
+		),
+	}
+	batchSQLs = append(batchSQLs, sql2s...)
+
+	// delete src host users, but not localhost
+	sql3 := fmt.Sprintf(
+		"DELETE FROM mysql.user WHERE `host` IN(%s);", srcHostUnsafe,
+	)
+	batchSQLs = append(batchSQLs, sql3)
+
+	// flush
+	sql4 := fmt.Sprintf("flush privileges;")
+	batchSQLs = append(batchSQLs, sql4)
+	logger.Info("RepairPrivileges: %+v", batchSQLs)
+	if _, err := x.dbWorker.ExecMore(batchSQLs); err != nil {
+		return err
+	}
+	return nil
+}
+
+// CleanEnv 为物理备份清理本机数据目录
+func (x *Xtrabackup) CleanEnv(dirs []string) error {
+	// 进程应该已关闭,端口关闭
+	if osutil.IsPortUp(x.TgtInstance.Host, x.TgtInstance.Port) {
+		return fmt.Errorf("port %d is still opened", x.TgtInstance.Port)
+	}
+
+	var dirArray []string
+	for _, v := range dirs {
+		if strings.TrimSpace(x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "")) == "" {
+			logger.Warn(fmt.Sprintf("my.cnf %s is Emtpty!!", v))
+			continue
+		}
+		switch v {
+		case "relay-log", "relay_log":
+			val, err := x.myCnf.GetRelayLogDir()
+			if err != nil {
+				return err
+			}
+			reg := regexp.MustCompile(cst.RelayLogFileMatch)
+			if result := reg.FindStringSubmatch(val); len(result) == 2 {
+				relaylogdir := result[1]
+				dirArray = append(dirArray, "rm -rf "+relaylogdir+"/*")
+			}
+		case "log_bin", "log-bin":
+			val, err := x.myCnf.GetMySQLLogDir()
+			if err != nil {
+				return err
+			}
+			reg := regexp.MustCompile(cst.BinLogFileMatch)
+			if result := reg.FindStringSubmatch(val); len(result) == 2 {
+				binlogdir := result[1]
+				// TODO 所有 rm -rf 的地方都应该要检查是否可能 rm -rf / binlog.xxx 这种误删可能
+				dirArray = append(dirArray, "rm -rf "+binlogdir+"/*")
+			}
+		case "slow_query_log_file", "slow-query-log-file":
+			if val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, "slow_query_log_file", ""); val != "" {
+				dirArray = append(dirArray, "rm -f "+val)
+			}
+		default:
+			val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "")
+			if strings.TrimSpace(val) != "" && strings.TrimSpace(val) != "/" {
+				dirArray = append(dirArray, "rm -rf "+val+"/*")
+			}
+		}
+	}
+	scripts := strings.Join(dirArray, "\n")
+	logger.Info("CleanEnv: %s", scripts)
+	// run with mysql os user
+	if _, err := osutil.ExecShellCommand(false, scripts); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ReplaceMycnf godoc
+// 物理恢复新实例的 innodb_data_file_path 等参数要保持跟原实例一致(排除 server_id,server_uuid)
+func (x *Xtrabackup) ReplaceMycnf(items []string) error {
+	backupMyCnfPath := x.getBackupCnfName()
+	backupMyCnf, err := util.LoadMyCnfForFile(backupMyCnfPath)
+	if err != nil {
+		return err
+	}
+	bakCnfMap := backupMyCnf.SaveMySQLConfig2Object()
+	var itemsExclude = []string{"server_id", "server_uuid"}
+	itemMap := map[string]string{}
+	for _, key := range items {
+		if util.StringsHas(itemsExclude, key) {
+			continue
+		}
+		itemMap[key] = bakCnfMap.Section[util.MysqldSec].KvMap[key]
+		// sed 's///g' f > /tmp/f && cat /tmp/f > f
+	}
+	if len(itemMap) > 0 {
+		logger.Info("ReplaceMycnf new: %v", itemMap)
+		if err = x.myCnf.ReplaceValuesToFile(itemMap); err != nil {
+			// x.myCnf.Load() // reload it?
+			return err
+		}
+	}
+	return nil
+}
+
+// ChangeDirOwner 修正目录属组,需要 root 权限
+func (x *Xtrabackup) ChangeDirOwner(dirs []string) error {
+	var commands []string
+	for _, v := range dirs {
+		// 如果my.cnf中没有配置这个目录, 就不做操作
+		if p := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, ""); p != "" {
+			if filepath.IsAbs(p) {
+				commands = append(commands, fmt.Sprintf("chown -R mysql %s", path.Dir(p)))
+			}
+			// @todo 如果是相对目录,忽略 or 报错 ?
+		}
+	}
+	script := strings.Join(commands, "\n")
+	logger.Info("ChangeDirOwner: %s", script)
+	if _, err := osutil.ExecShellCommand(false, script); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (x *Xtrabackup) getBackupCnfName() string {
+	return fmt.Sprintf("%s/%s", x.LoaderDir, "backup-my.cnf")
+}
+
+func (x *Xtrabackup) getSocketName() string {
+	sock := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, "socket", "/tmp/mysql.sock")
+	return sock
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader_restore.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader_restore.go
new file mode 100644
index 0000000000..182fc8eb99
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader_restore.go
@@ -0,0 +1,224 @@
+package restore
+
+import (
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/common/go-pubpkg/validate"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/dbloader"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/core/cst"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+
+	"github.com/jinzhu/copier"
+	"github.com/pkg/errors"
+	"github.com/spf13/cast"
+)
+
+// DBLoader 使用 dbbackup-go loadbackup 进行恢复
+type DBLoader struct {
+	*RestoreParam
+
+	taskDir      string // 依赖 BackupInfo.WorkDir ${work_dir}/doDr_${id}/${port}/
+	targetDir    string // 备份解压后的目录,${taskDir}/backupBaseName/
+	dbLoaderUtil *dbloader.LoaderUtil
+	// dbLoader is interface
+	dbLoader dbloader.DBBackupLoader
+	// myCnf for physical backup
+	myCnf *util.CnfFile
+}
+
+// Init load index file
+func (m *DBLoader) Init() error {
+	var err error
+	/*
+		if err = m.BackupInfo.GetBackupMetaFile(dbbackup.BACKUP_INDEX_FILE); err != nil {
+			return err
+		}
+	*/
+	cnfFileName := util.GetMyCnfFileName(m.TgtInstance.Port)
+	cnfFile := &util.CnfFile{FileName: cnfFileName}
+	if err = cnfFile.Load(); err != nil {
+		logger.Info("get my.conf failed %v", cnfFileName)
+		return errors.WithStack(err)
+	} else {
+		m.myCnf = cnfFile
+		m.TgtInstance.Socket, err = m.myCnf.GetMySQLSocket()
+		if err != nil {
+			logger.Warn("fail to get mysqld socket: %s", cnfFileName)
+		}
+	}
+	if err = m.BackupInfo.indexObj.ValidateFiles(); err != nil {
+		return err
+	}
+	if err = m.initDirs(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (m *DBLoader) PreCheck() error {
+	toolset, err := tools.NewToolSetWithPick(tools.ToolDbbackupGo, tools.ToolQPress)
+	if err != nil {
+		return err
+	}
+	if err := m.Tools.Merge(toolset); err != nil {
+		return err
+	}
+	// validateBackupInfo before run import
+	if _, err := m.getChangeMasterPos(m.SrcInstance); err != nil {
+		return err
+	}
+	// 工具可执行权限
+	// 本地实例是否可联通
+	return nil
+}
+
+// chooseDBBackupLoader 选择是 dbbackup-go 恢复是 logical or physical
+func (m *DBLoader) chooseDBBackupLoader() error {
+	dbloaderPath := m.Tools.MustGet(tools.ToolDbbackupGo)
+	m.dbLoaderUtil = &dbloader.LoaderUtil{
+		Client:        dbloaderPath,
+		TgtInstance:   m.TgtInstance,
+		IndexFilePath: m.BackupInfo.indexFilePath,
+		IndexObj:      m.BackupInfo.indexObj,
+		LoaderDir:     m.targetDir,
+		TaskDir:       m.taskDir,
+		WithOutBinlog: true,
+	}
+	// logger.Warn("validate dbLoaderUtil: %+v", m.dbLoaderUtil)
+	if err := validate.GoValidateStruct(m.dbLoaderUtil, false, false); err != nil {
+		return err
+	}
+
+	if m.backupType == cst.BackupTypeLogical {
+		myloaderOpt := &dbloader.LoaderOpt{}
+		copier.Copy(myloaderOpt, m.RestoreOpt)
+		logger.Warn("myloaderOpt copied: %+v. src:%+v", myloaderOpt, m.RestoreOpt)
+		m.dbLoader = &dbloader.LogicalLoader{
+			LoaderUtil:  m.dbLoaderUtil,
+			MyloaderOpt: myloaderOpt,
+		}
+	} else if m.backupType == cst.BackupTypePhysical {
+		m.dbLoader = &dbloader.PhysicalLoader{
+			LoaderUtil: m.dbLoaderUtil,
+			Xtrabackup: &dbloader.Xtrabackup{
+				TgtInstance:   m.dbLoaderUtil.TgtInstance,
+				SrcBackupHost: m.dbLoaderUtil.IndexObj.BackupHost,
+				QpressTool:    m.Tools.MustGet(tools.ToolQPress),
+				LoaderDir:     m.targetDir,
+			},
+		}
+	} else {
+		return errors.Errorf("unknown backupType: %s", m.backupType)
+	}
+	logger.Info("recover backup_type=%s", m.backupType)
+	return nil
+}
+
+// Start TODO
+func (m *DBLoader) Start() error {
+	if err := m.chooseDBBackupLoader(); err != nil {
+		return err
+	}
+	if err := m.dbLoader.PreLoad(); err != nil {
+		return err
+	}
+
+	logger.Info("dbloader params %+v", m)
+	if m.taskDir == "" {
+		return errors.Errorf("dbloader taskDir error")
+	}
+	if err := m.BackupInfo.indexObj.UntarFiles(m.taskDir); err != nil {
+		return err
+	}
+
+	if err := m.dbLoader.Load(); err != nil {
+		return errors.Wrap(err, "dbloaderData failed")
+	}
+	return nil
+}
+
+// WaitDone TODO
+func (m *DBLoader) WaitDone() error {
+	return nil
+}
+
+// PostCheck TODO
+func (m *DBLoader) PostCheck() error {
+	return nil
+}
+
+// ReturnChangeMaster TODO
+func (m *DBLoader) ReturnChangeMaster() (*mysqlutil.ChangeMaster, error) {
+	return m.getChangeMasterPos(m.SrcInstance)
+}
+
+func (m *DBLoader) initDirs() error {
+	if m.BackupInfo.WorkDir == "" {
+		return errors.Errorf("work_dir %s should not be empty", m.WorkDir)
+	}
+	if m.WorkID == "" {
+		m.WorkID = newTimestampString()
+	}
+	m.taskDir = fmt.Sprintf("%s/doDr_%s/%d", m.WorkDir, m.WorkID, m.TgtInstance.Port)
+	if err := osutil.CheckAndMkdir("", m.taskDir); err != nil {
+		return err
+	}
+	if m.BackupInfo.backupBaseName == "" {
+		return errors.Errorf("backup file baseName [%s] error", m.BackupInfo.backupBaseName)
+	}
+	m.targetDir = fmt.Sprintf("%s/%s", m.taskDir, m.backupBaseName)
+	return nil
+}
+
+// getChangeMasterPos godoc
+// srcMaster -> srcSlave
+//
+//	|-> tgtMaster -> tgtSlave
+//
+// masterInst is instance you want to change master to it
+func (m *DBLoader) getChangeMasterPos(masterInst native.Instance) (*mysqlutil.ChangeMaster, error) {
+	logger.Info("metadata: %+v", m.indexObj.BinlogInfo)
+	masterInfo := m.indexObj.BinlogInfo.ShowMasterStatus
+	slaveInfo := m.indexObj.BinlogInfo.ShowSlaveStatus
+	if masterInfo == nil || masterInfo.BinlogFile == "" {
+		return nil, errors.New("no master info found in metadata")
+	}
+	if masterInst.Host == "" || masterInst.Port == 0 { // 说明不关注备份位点信息
+		return nil, nil
+	}
+	// 如果备份文件的源实例,就是当前恢复要change master to 的实例,直接用 MasterStatus info
+	if masterInfo.MasterHost == masterInst.Host && masterInfo.MasterPort == masterInst.Port {
+		// if m.BackupInfo.backupHost == masterInst.Host && m.BackupInfo.backupPort == masterInst.Port {
+		cm := &mysqlutil.ChangeMaster{
+			MasterLogFile:   masterInfo.BinlogFile,
+			MasterLogPos:    cast.ToInt64(masterInfo.BinlogPos),
+			ExecutedGtidSet: masterInfo.Gtid,
+
+			MasterHost: masterInst.Host,
+			MasterPort: masterInst.Port,
+		}
+		return cm, nil
+	} else if slaveInfo == nil || slaveInfo.BinlogFile == "" {
+		// 说明是在 Master 的备份,如果发生互切/迁移,这个备份会是无效的
+		return nil, errors.New("this backup is illegal because I cannot find the binlog pos for current master")
+	}
+	// 用的是 slave 的备份,change master to it's master
+	if slaveInfo.MasterHost != "" && slaveInfo.MasterHost != masterInst.Host {
+		logger.Warn(
+			"metadata show slave host=%s:%d != change to master host=%s:%d",
+			slaveInfo.MasterHost, slaveInfo.MasterPort, masterInst.Host, masterInst.Port)
+	}
+	cm := &mysqlutil.ChangeMaster{
+		MasterLogFile:   slaveInfo.BinlogFile,
+		MasterLogPos:    cast.ToInt64(slaveInfo.BinlogPos),
+		ExecutedGtidSet: slaveInfo.Gtid,
+		MasterHost:      masterInst.Host,
+		MasterPort:      masterInst.Port,
+	}
+	return cm, nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_restore.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_restore.go
new file mode 100644
index 0000000000..c20791957e
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_restore.go
@@ -0,0 +1,191 @@
+package restore
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/common/go-pubpkg/validate"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/tools"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"path/filepath"
+	"regexp"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// MLoad TODO
+type MLoad struct {
+	*RestoreParam
+
+	taskDir   string // 依赖 BackupInfo.WorkDir ${work_dir}/doDr_${id}/${port}/
+	targetDir string // 备份解压后的目录,${taskDir}/backupBaseName/
+	mloadUtil MLoadParam
+	dbWorker  *native.DbWorker // TgtInstance
+}
+
+// Init TODO
+func (m *MLoad) Init() error {
+	var err error
+	if err = m.BackupInfo.infoObj.ValidateFiles(); err != nil {
+		return err
+	}
+	if err = m.initDirs(); err != nil {
+		return err
+	}
+	// logger.Info("tgtInstance: %+v", m.TgtInstance)
+	m.dbWorker, err = m.TgtInstance.Conn()
+	if err != nil {
+		return errors.Wrap(err, "目标实例连接失败")
+	}
+	return nil
+}
+
+// PreCheck TODO
+func (m *MLoad) PreCheck() error {
+	toolset, err := tools.NewToolSetWithPick(tools.ToolMload)
+	if err != nil {
+		return err
+	}
+	if err := m.Tools.Merge(toolset); err != nil {
+		return err
+	}
+	// 工具可执行权限
+	// 本地实例是否可联通
+	return nil
+}
+
+// Start TODO
+func (m *MLoad) Start() error {
+	mloadPath := m.Tools.MustGet(tools.ToolMload)
+	m.mloadUtil = MLoadParam{
+		Client:            mloadPath,
+		Host:              m.TgtInstance.Host,
+		Port:              m.TgtInstance.Port,
+		User:              m.TgtInstance.User,
+		Password:          m.TgtInstance.Pwd,
+		Charset:           m.BackupInfo.infoObj.Charset,
+		PathList:          []string{m.targetDir},
+		TaskDir:           m.taskDir,
+		db:                m.dbWorker,
+		checkMLoadProcess: true,
+		WithOutBinlog:     true,
+	}
+	if m.RestoreOpt != nil {
+		o := m.RestoreOpt
+		if !m.RestoreOpt.WillRecoverBinlog {
+			// schema/data 一起导入
+			m.mloadUtil.flagApartSchemaData = false
+		} else {
+			if len(o.Databases)+len(o.Tables)+len(o.IgnoreDatabases)+len(o.IgnoreTables) == 0 {
+				// schema/data 一起全部导入, recover-binlog quick_mode只能false
+				logger.Info("no filter: import schema and data together, recover-binlog need quick_mode=false")
+				m.mloadUtil.flagApartSchemaData = false
+			} else if m.RestoreOpt.SourceBinlogFormat != "ROW" {
+				logger.Info("binlog_format!=row: import schema and data together, recover-binlog need quick_mode=false")
+				// 指定 filter databases/tables(或者指定无效),导入数据时,必须全部导入 schema 和 data.恢复时也恢复全量 binlog,即 quick_mode=false
+				m.mloadUtil.flagApartSchemaData = false
+			} else { // 存在 filter 且 binlog_format=row
+				logger.Info("import full-schema and specific-data apart, recover-binlog filter depends on its quick_mode")
+				m.mloadUtil.Databases = o.Databases
+				m.mloadUtil.Tables = o.Tables
+				m.mloadUtil.IgnoreDatabases = o.IgnoreDatabases
+				m.mloadUtil.IgnoreTables = o.IgnoreTables
+				// 1. schema全部导入,data 指定导入, 2. schema/data 一起部分导入, 但后续 recover-binlog 的错误概率可能变高
+				// recover-binlog 有 quick_mode 控制是恢复全量 binlog 还是恢复指定库表 binlog (event_query_handler=error)
+				m.mloadUtil.flagApartSchemaData = true
+			}
+		}
+	}
+
+	if err := validate.GoValidateStruct(m.mloadUtil, false, false); err != nil {
+		return err
+	}
+	logger.Info("mload params %+v", m)
+	if err := m.BackupInfo.infoObj.UntarFiles(m.taskDir); err != nil {
+		return err
+	}
+	// 针对空库,可能备份了 infodba_schema,导入时忽略掉
+	if cmutil.FileExists(filepath.Join(m.targetDir, native.INFODBA_SCHEMA)) {
+		m.mloadUtil.IgnoreDatabases = append(m.mloadUtil.IgnoreDatabases, native.INFODBA_SCHEMA)
+	}
+	if err := m.mloadUtil.Run(); err != nil {
+		return errors.Wrap(err, "mloadData failed")
+	}
+	return nil
+}
+
+// WaitDone TODO
+func (m *MLoad) WaitDone() error {
+	return nil
+}
+
+// PostCheck TODO
+func (m *MLoad) PostCheck() error {
+	return nil
+}
+
+// ReturnChangeMaster TODO
+func (m *MLoad) ReturnChangeMaster() (*mysqlutil.ChangeMaster, error) {
+	return m.getChangeMasterPos(m.SrcInstance)
+}
+
+func (m *MLoad) initDirs() error {
+	if m.BackupInfo.WorkDir == "" {
+		return errors.Errorf("work_dir %s should not be empty", m.WorkDir)
+	}
+	if m.WorkID == "" {
+		m.WorkID = newTimestampString()
+	}
+	m.taskDir = fmt.Sprintf("%s/doDr_%s/%d", m.WorkDir, m.WorkID, m.TgtInstance.Port)
+	if err := osutil.CheckAndMkdir("", m.taskDir); err != nil {
+		return err
+	}
+	if m.backupBaseName == "" {
+		return errors.Errorf("backup file baseName [%s] error", m.backupBaseName)
+	}
+	m.targetDir = fmt.Sprintf("%s/%s", m.taskDir, m.backupBaseName)
+	return nil
+}
+
+// getChangeMasterPos godoc
+// srcMaster -> srcSlave
+//
+//	|-> tgtMaster -> tgtSlave
+//
+// tgtMaster 始终指向 srcMaster host,port
+func (m *MLoad) getChangeMasterPos(masterInst native.Instance) (*mysqlutil.ChangeMaster, error) {
+	// -- CHANGE MASTER TO
+	// -- CHANGE SLAVE TO
+	backupPosFile := "DUMP.BEGIN.sql.gz"
+	cmd := fmt.Sprintf("zcat %s/%s |grep 'CHANGE '", m.targetDir, backupPosFile)
+	out, err := osutil.ExecShellCommand(false, cmd)
+	if err != nil {
+		return nil, err
+	}
+	changeSqls := util.SplitAnyRune(out, "\n")
+	if len(changeSqls) == 2 {
+		// backupRole := DBRoleSlave
+	}
+	var changeSql string
+	// 在 slave 上备份,会同时有 CHANGE MASTER, CHANGE SLAVE
+	reChangeMaster := regexp.MustCompile(`(?i:CHANGE MASTER TO)`)
+	// reChangeSlave := regexp.MustCompile(`(?i:CHANGE SLAVE TO)`)   // 在 master 上备份,只有 CHANGE SLAVE
+	for _, sql := range changeSqls {
+		if reChangeMaster.MatchString(sql) {
+			sql = strings.ReplaceAll(sql, "--", "")
+			changeSql = sql
+			break
+		}
+	}
+	cm := &mysqlutil.ChangeMaster{ChangeSQL: changeSql}
+	if err := cm.ParseChangeSQL(); err != nil {
+		return nil, errors.Wrap(err, changeSql)
+	}
+	cm.MasterHost = masterInst.Host
+	cm.MasterPort = masterInst.Port
+	return cm, nil
+}
diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_util.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_util.go
new file mode 100644
index 0000000000..c884bfb8c4
--- /dev/null
+++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/mload_util.go
@@ -0,0 +1,286 @@
+package restore
+
+import (
+	"dbm-services/common/go-pubpkg/cmutil"
+	"dbm-services/common/go-pubpkg/logger"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/native"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil"
+	"dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil"
+	"fmt"
+	"os"
+	"os/exec"
+	"runtime"
+	"runtime/debug"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+const (
+	// GrepError TODO
+	GrepError = `grep -iP '\[FATAL\]|\[ERROR\]| ERROR | FATAL |^ERROR |^FATAL '  `
+)
+
+// MLoadParam TODO
+type MLoadParam struct {
+	Client   string   `json:"client" validate:"required"`
+	Host     string   `json:"host"`
+	Port     int      `json:"port"`
+	User     string   `json:"user"`
+	Password string   `json:"password"`
+	Charset  string   `json:"charset" validate:"required"`
+	PathList []string `json:"pathList"` // 绝对路径列表
+	// 不写 binlog -without-binlog: set sql_log_bin=0
+	WithOutBinlog   bool     `json:"withoutBinlog"`
+	NoData          bool     `json:"noData"`
+	NoCreateTable   bool     `json:"noCreateTable"`
+	Databases       []string `json:"databases"`
+	Tables          []string `json:"tables"`
+	IgnoreDatabases []string `json:"ignoreDatabases"`
+	IgnoreTables    []string `json:"ignoreTables"`
+
+	TaskDir string `json:"taskDir"`
+	// 导入全量 schema, 仅在允许分开导入 schema 和 data 的情况下有效(有 filterOpts 且 要恢复 binlog 且 binlog_format=row)
+	flagApartSchemaData bool
+	// 标记是否已经导入 schema
+	flagSchemaImported int
+	// 标记是否已导入 data
+	flagDataImported int
+
+	db *native.DbWorker // 本地db链接
+	// 转化 --databases ... --tables... 之后的子串拼接
+	filterOpts string
+
+	// 内部检查相关
+	checkMLoadProcess bool
+	mloadScript       string
+}
+
+// MLoadCheck TODO
+func (m *MLoadParam) MLoadCheck() error {
+	// 判断MLoad工具存在
+	if err := cmutil.FileExistsErr(m.Client); err != nil {
+		return err
+	}
+	// 判断MLoad工具可执行
+	return nil
+}
+
+func (m *MLoadParam) initFilterOpt() error {
+	// MLOAD.pl的过滤选项,当指定了 --databases 时,一切已 databases 作为基准,即 --tables, --ignore-tables 都是在 database 里过滤
+	// 当指定 --ignore-databases 时,不能有其它过滤选项
+	if len(m.IgnoreDatabases) > 0 && (len(m.Tables) > 0 || len(m.IgnoreTables) > 0) {
+		return errors.New("MLOAD.pl --ignore-databases should has no other filter options")
+	}
+	if len(m.Databases) != 1 && len(m.Tables) > 0 {
+		return errors.New("MLOAD.pl --tables should has only one database using --databases")
+	}
+	if len(m.Databases) != 1 && len(m.IgnoreTables) > 0 {
+		return errors.New("MLOAD.pl --ignore-tables should has only one database using --databases")
+	}
+	if len(m.Databases) > 0 && len(m.IgnoreDatabases) > 0 {
+		return errors.New("MLOAD.pl --databases and --ignore-databases cannot work together")
+	}
+	if len(m.Tables) > 0 && len(m.IgnoreTables) > 0 {
+		return errors.New("MLOAD.pl --tables and --ignore-tables cannot work together")
+	}
+
+	if len(m.Databases) > 0 {
+		m.filterOpts += fmt.Sprintf(" --databases=%s", strings.Join(m.Databases, ","))
+	}
+	if len(m.Tables) > 0 {
+		m.filterOpts += fmt.Sprintf(" --tables=%s", strings.Join(m.Tables, ","))
+	}
+	if len(m.IgnoreTables) > 0 {
+		m.filterOpts += fmt.Sprintf(" --ignore-tables=%s", strings.Join(m.IgnoreTables, ","))
+	}
+	if len(m.IgnoreDatabases) > 0 {
+		logger.Info("mload ignore-databases=%v", m.IgnoreDatabases)
+		m.filterOpts += fmt.Sprintf(
+			" --ignore_not_exist_dbs --ignore-databases=%s",
+			strings.Join(m.IgnoreDatabases, ","),
+		)
+	}
+	return nil
+}
+
+// Run TODO
+func (m *MLoadParam) Run() error {
+	if err := m.MLoadCheck(); err != nil {
+		return err
+	}
+	if err := m.initFilterOpt(); err != nil {
+		return err
+	}
+	return m.MLoadData()
+}
+
+// MLoadData TODO
+func (m *MLoadParam) MLoadData() error {
+	defer func() {
+		if r := recover(); r != nil {
+			logger.Error("MLoadData panic, err:%+v, stack:%s", r, string(debug.Stack()))
+		}
+	}()
+	logger.Info("MLoadParam: %+v", m)
+	for i, filePath := range m.PathList {
+		var logFile = fmt.Sprintf("%s/MLoad_backup_%d_%d.log", m.TaskDir, i, m.Port)
+		// load日志是追加的方式,之前的错误会被统计到本次
+		if err := os.RemoveAll(logFile); err != nil {
+			logger.Warn("remove %s failed, err:%w", logFile, err)
+		}
+		m.mloadScript = fmt.Sprintf(
+			"/usr/bin/perl %s -u%s -p%s -h %s -P %d --charset=%s -L %s --conc %d  --path=%s",
+			m.Client, m.User, m.Password, m.Host, m.Port, m.Charset, logFile, runtime.NumCPU(), filePath,
+		)
+		if m.WithOutBinlog {
+			m.mloadScript += ` --without-binlog `
+		}
+		if m.NoData {
+			m.mloadScript += ` --no-data`
+		}
+		if m.NoCreateTable {
+			m.mloadScript += ` --no-create-table`
+		}
+		// 判断是否需要将导入拆分成:先导 schema,再导 data. 下面这一段的执行顺序很重要
+		if m.filterOpts != "" && m.flagApartSchemaData {
+			// 这里默认 schema 导全量,data 只到需要的库表
+			if m.flagSchemaImported == 0 {
+				logger.Warn("filterImportFullSchema=true, 先导全量 schema")
+				// 先导全量 schema,不加filterOpts
+				m.NoData = true
+				m.NoCreateTable = false
+				m.flagSchemaImported = 1              // 避免进入死循环
+				if err := m.MLoadData(); err != nil { // 只用于设置参数,实际导入要调到跳出去
+					return errors.Wrap(err, "import full schema")
+				}
+				m.flagSchemaImported = 2 // 设置 flag 已导入 schema
+			}
+			if m.flagSchemaImported == 2 && m.flagDataImported == 0 {
+				// 再继续导需要的数据
+				logger.Warn("flagSchemaImported=true,导入需要的库表数据")
+				m.NoData = false
+				m.NoCreateTable = true
+				m.flagDataImported = 1
+				if err := m.MLoadData(); err != nil {
+					return errors.Wrap(err, "import data")
+				}
+				m.flagDataImported = 2
+			}
+			if m.flagDataImported == 1 {
+				m.mloadScript += " " + m.filterOpts
+			} else if m.flagSchemaImported == 2 && m.flagDataImported == 2 {
+				return nil
+			}
+		}
+
+		logger.Info("MLoad script:%s", mysqlutil.RemovePassword(m.mloadScript))
+		cmd := &osutil.FileOutputCmd{
+			Cmd: exec.Cmd{
+				Path: "/bin/bash",
+				Args: []string{"/bin/bash", "-c", m.mloadScript},
+			},
+			StdOutFile: logFile,
+			StdErrFile: logFile,
+		}
+		if err := cmd.Start(); err != nil {
+			return errors.WithStack(err)
+		}
+		done := make(chan error, 1)
+		go func(cmd *osutil.FileOutputCmd) {
+			done <- cmd.Wait()
+		}(cmd)
+
+		interval := 10 * time.Second // 每 10s 检查一次 mload
+		ticker := time.NewTicker(interval)
+		defer ticker.Stop()
+
+		// 检查任务进程
+		// 检查进程是否还在运行
+		var counter int
+		for {
+			stop := false
+			select {
+			case err := <-done:
+				if err != nil {
+					if _, _, err2 := m.checkMLoadComplete(logFile); err2 != nil {
+						return errors.Wrap(err, err2.Error())
+					}
+					return fmt.Errorf("MLoad导入失败, 命令:%s, 错误:%w", mysqlutil.RemovePassword(m.mloadScript), err)
+				}
+				stop = true
+			case <-ticker.C:
+				_, runningDBS, err := checkExistRunningDbLoad(m.db, m.checkMLoadProcess, m.Databases)
+				if err != nil {
+					logger.Warn("checkExistRunningMLoad failed, err:%w", err)
+				}
+				counter++
+				if counter%12 == 0 {
+					logger.Info("checkExistRunningMLoad runningDBS: %+v", runningDBS)
+				}
+			}
+			if stop {
+				break
+			}
+		}
+
+		// 检查结果
+		isSuccess, errorNum, err := m.checkMLoadComplete(logFile)
+		if err != nil {
+			return err
+		}
+		if !isSuccess {
+			return fmt.Errorf("MLoad导入失败,error数量:%d, 具体请查看错误日志文件:
%s", errorNum, logFile) + } + } + return nil +} + +func (m *MLoadParam) checkMLoadComplete(errFile string) (bool, int, error) { + script := GrepError + errFile + ` |wc -l` + out, err := exec.Command("/bin/bash", "-c", script).CombinedOutput() + if err != nil { + return false, 0, errors.Wrap(err, script) + } + outStr := strings.TrimSpace(string(out)) + errorNum, err := strconv.Atoi(outStr) + if err != nil { + return false, 0, errors.Wrapf(err, "命令 %s 的结果转换失败.结果:%s", script, outStr) + } + if errorNum > 0 { + // 尝试从 mload 日志里面 + scriptErr := GrepError + errFile + ` |tail -1 |awk -F ' ' '{print $NF}'` + if out, err := exec.Command("/bin/bash", "-c", scriptErr).CombinedOutput(); err == nil { + loadErrFile := strings.TrimSpace(string(out)) + if out, err := exec.Command("/bin/bash", "-c", "grep -i error "+loadErrFile+" | head -2"). + CombinedOutput(); err == nil { + errInfo := strings.TrimSpace(string(out)) + if errInfo != "" { + return false, errorNum, errors.Errorf("error num: %d, error info: %s", errorNum, errInfo) + } + } else { + return false, errorNum, errors.Errorf("error num: %d, error file: %s", errorNum, loadErrFile) + } + } + return false, errorNum, errors.Errorf("存在%d处错误. 命令:%s", errorNum, script) + } + + var isComplete = 0 + script = `grep '\[COMPLETE\]' ` + errFile + ` |wc -l` + out, err = exec.Command("/bin/bash", "-c", script).CombinedOutput() + if err != nil { + return false, 0, errors.Wrap(err, script) + } + outStr = strings.TrimSpace(string(out)) + isComplete, err = strconv.Atoi(outStr) + if err != nil { + return false, 0, errors.Wrapf(err, "命令 %s 的结果转换失败.结果:%s", script, outStr) + } + if isComplete < 1 { + return false, 0, errors.Wrapf(err, "COMPLETE not found。返回:%s", outStr) + } + + return true, 0, nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/recover_binlog.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/recover_binlog.go new file mode 100644 index 0000000000..af0db748ba --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/recover_binlog.go @@ -0,0 +1,708 @@ +package restore + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + binlogParser "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "text/template" + + "github.com/panjf2000/ants/v2" + "github.com/pkg/errors" +) + +// RecoverBinlogComp 有 resp 返回 +type RecoverBinlogComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params RecoverBinlog `json:"extend"` +} + +// Example TODO +func (c *RecoverBinlogComp) Example() interface{} { + return RecoverBinlogComp{ + Params: RecoverBinlog{ + TgtInstance: common.InstanceObjExample, + RecoverOpt: &MySQLBinlogUtil{ + StartTime: "2022-11-05 00:00:01", + StopTime: "2022-11-05 22:00:01", + IdempotentMode: true, + NotWriteBinlog: true, + Databases: []string{"db1,db2"}, + Tables: []string{"tb1,tb2"}, + MySQLClientOpt: &MySQLClientOpt{ + MaxAllowedPacket: 1073741824, + BinaryMode: true, + }, + }, + QuickMode: true, + BinlogDir: "/data/dbbak/20000/binlog", + BinlogFiles: []string{"binlog20000.00001", "binlog20000.00002"}, + WorkDir: "/data/dbbak/", + ParseOnly: false, + ToolSet: *tools.NewToolSetWithPickNoValidate(tools.ToolMysqlbinlog), + }, + GeneralParam: &components.GeneralParam{ + RuntimeAccountParam: components.RuntimeAccountParam{ + MySQLAccountParam: common.AccountAdminExample, + }, + }, + } +} + +// RecoverBinlog TODO +type RecoverBinlog struct { + TgtInstance native.InsObject `json:"tgt_instance" validate:"required"` + RecoverOpt *MySQLBinlogUtil `json:"recover_opt" validate:"required"` + // 恢复时 binlog 存放目录,一般是下载目录 + BinlogDir string `json:"binlog_dir" validate:"required" example:"/data/dbbak/123456/binlog"` + // binlog列表 + BinlogFiles []string `json:"binlog_files" validate:"required"` + // binlog 解析所在目录,存放运行日志 + WorkDir string `json:"work_dir" validate:"required" example:"/data/dbbak/"` + WorkID string `json:"work_id" example:"123456"` + // 仅解析 binlog,不做导入 + ParseOnly bool `json:"parse_only"` + // 解析的并发度,默认 1 + ParseConcurrency int `json:"parse_concurrency"` + // 指定要开始应用的第 1 个 binlog。如果指定,一般要设置 start_pos,如果不指定则使用 start_time + BinlogStartFile string `json:"binlog_start_file"` + + // 如果启用 quick_mode,解析 binlog 时根据 filter databases 等选项过滤 row event,对 query event 会全部保留 。需要 mysqlbinlog 工具支持 --tables 选项,可以指定参数的 tools + // 当 quick_mode=false 时,recover_opt 里的 databases 等选项无效,会应用全部 binlog + QuickMode bool `json:"quick_mode"` + SourceBinlogFormat string `json:"source_binlog_format" enums:",ROW,STATEMENT,MIXED"` + + // 恢复用到的客户端工具,不提供时会有默认值 + tools.ToolSet + + // /WorkDir/WorkID/ + taskDir string + dbWorker *native.DbWorker // TgtInstance + binlogCli string + mysqlCli string + filterOpts string + importScript string + parseScript string + binlogParsedDir string + logDir string + tools tools.ToolSet +} + +const ( + dirBinlogParsed = "binlog_parsed" + importScript = "import_binlog.sh" + parseScript = "parse_binlog.sh" +) + +// MySQLClientOpt TODO +type MySQLClientOpt struct { + MaxAllowedPacket int `json:"max_allowed_packet"` + // 是否启用 --binary-mode + BinaryMode bool `json:"binary_mode"` +} + +// MySQLBinlogUtil TODO +type MySQLBinlogUtil struct { + // --start-datetime + StartTime string `json:"start_time"` + // --stop-datetime + StopTime string `json:"stop_time"` + // --start-position + StartPos uint `json:"start_pos,omitempty"` + // --stop-position + StopPos uint `json:"stop_pos,omitempty"` + // 是否开启幂等模式, mysql --slave-exec-mode=idempotent or mysqlbinlog --idempotent + IdempotentMode bool `json:"idempotent_mode"` + // 导入时是否记录 binlog, mysql sql_log_bin=0 or mysqlbinlog --disable-log-bin. true表示不写 + NotWriteBinlog bool `json:"not_write_binlog"` + + // row event 解析指定 databases + Databases []string `json:"databases,omitempty"` + // row event 解析指定 tables + Tables []string `json:"tables,omitempty"` + // row event 解析指定 忽略 databases + DatabasesIgnore []string `json:"databases_ignore,omitempty"` + // row event 解析指定 忽略 tables + TablesIgnore []string `json:"tables_ignore,omitempty"` + + // query event 默认处理策略。keep:保留解析出的query event 语句, ignore:注释(丢弃)该 query event, error:认为是不接受的语句,报错 + // 默认 keep + QueryEventHandler string `json:"query_event_handler" enums:"keep,ignore,safe,error"` + // 匹配字符串成功,强制忽略语句,加入注释中。当与 filter_statement_match_error 都匹配时,ignore_force会优先生效 + // 默认 infodba_schema + FilterStatementMatchIgnoreForce string `json:"filter_statement_match_ignore_force"` + // 匹配字符串成功,则解析 binlog 报错 + FilterStatementMatchError string `json:"filter_statement_match_error"` + // 匹配字符串成功,则忽略语句,加入注释中 + FilterStatementMatchIgnore string `json:"filter_statement_match_ignore"` + + // --rewrite_db="db1->xx_db1,db2->xx_db2" + RewriteDB string `json:"rewrite_db"` + + MySQLClientOpt *MySQLClientOpt `json:"mysql_client_opt"` + // 是否启用 flashback + Flashback bool `json:"flashback,omitempty"` + + // mysqlbinlog options string + options string +} + +func (r *RecoverBinlog) parse(f string) error { + parsedName := fmt.Sprintf(`%s/%s.sql`, dirBinlogParsed, f) + cmd := fmt.Sprintf("cd %s && %s %s/%s >%s", r.taskDir, r.binlogCli, r.BinlogDir, f, parsedName) + logger.Info("run: %s", cmd) + if outStr, err := osutil.ExecShellCommand(false, cmd); err != nil { + return errors.Wrapf(err, "fail to parse %s: %s, cmd: %s", f, outStr, cmd) + } + return nil +} + +// ParseBinlogFiles TODO +func (r *RecoverBinlog) ParseBinlogFiles() error { + logger.Info("start to parse binlog files with concurrency %d", r.ParseConcurrency) + defer ants.Release() + var errs []error + var wg = &sync.WaitGroup{} + pp, _ := ants.NewPoolWithFunc( + r.ParseConcurrency, func(i interface{}) { + f := i.(string) + if err := r.parse(f); err != nil { + errs = append(errs, err) + return + } + }, + ) + defer pp.Release() + + for _, f := range r.BinlogFiles { + if len(errs) > 0 { + break + } + if f != "" { + wg.Add(1) + pp.Invoke(f) + wg.Done() + } + } + wg.Wait() + if len(errs) > 0 { + return util.SliceErrorsToError(errs) + } + return nil +} + +// buildScript 创建 parse_binlog.sh, import_binlog.sh 脚本,需要调用执行 +func (r *RecoverBinlog) buildScript() error { + // 创建解析 binlog 的脚本,只是为了查看或者后面手动跑 + // 因为要并行解析,所以真正跑的是 ParseBinlogFiles + parseCmds := []string{fmt.Sprintf("cd %s", r.taskDir)} + for _, f := range r.BinlogFiles { + if f == "" { + continue + } + parsedName := fmt.Sprintf(`%s/%s.sql`, dirBinlogParsed, f) + cmd := fmt.Sprintf("%s %s/%s >%s 2>logs/parse_%s.err", r.binlogCli, r.BinlogDir, f, parsedName, f) + parseCmds = append(parseCmds, cmd) + } + r.parseScript = fmt.Sprintf(filepath.Join(r.taskDir, parseScript)) + fh, err := os.OpenFile(r.parseScript, os.O_CREATE|os.O_WRONLY, 0755) + if err != nil { + return err + } + defer fh.Close() + _, err = io.WriteString(fh, strings.Join(parseCmds, "\n")) + if err != nil { + return errors.Wrap(err, "write parse script") + } + + // 创建导入 binlog 的脚本 + importBinlogTmpl := ` +#!/bin/bash +dbhost={{.dbHost}} +dbport={{.dbPort}} +dbuser={{.dbUser}} +dbpass={{.dbPass}} +mysql_cmd={{.mysqlCmd}} +retcode=0 + +if [ "$dbpass" = "" ];then + echo 'please set password' + exit 1 +fi +mysql_opt="-u$dbuser -p$dbpass -h$dbhost -P$dbport {{.mysqlOpt}} -A " +sqlFiles="{{.sqlFiles}}" +for f in $sqlFiles +do + filename={{.dirBinlogParsed}}/${f}.sql + echo "importing $filename" + $mysql_cmd $mysql_opt < $filename >>logs/import_binlog.log 2>>logs/import_binlog.err + if [ $? -gt 0 ];then + retcode=1 + break + fi +done +exit $retcode +` + r.importScript = fmt.Sprintf(filepath.Join(r.taskDir, importScript)) + fi, err := os.OpenFile(r.importScript, os.O_CREATE|os.O_WRONLY, 0755) + if err != nil { + return err + } + defer fi.Close() + if r.RecoverOpt.Flashback { + sort.Sort(sort.Reverse(sort.StringSlice(r.BinlogFiles))) // 降序 + // sort.Slice(sqlFiles, func(i, j int) bool { return sqlFiles[i] > sqlFiles[j] }) // 降序 + } + if tpl, err := template.New("").Parse(importBinlogTmpl); err != nil { + return errors.Wrap(err, "write import script") + } else { + Vars := map[string]interface{}{ + "dbHost": r.TgtInstance.Host, + "dbPort": r.TgtInstance.Port, + "dbUser": r.TgtInstance.User, + "dbPass": r.TgtInstance.Pwd, + "mysqlOpt": "--max-allowed-packet=1073741824 --binary-mode", + "mysqlCmd": r.ToolSet.MustGet(tools.ToolMysqlclient), + "dirBinlogParsed": dirBinlogParsed, + "sqlFiles": strings.Join(r.BinlogFiles, " "), + } + if err := tpl.Execute(fi, Vars); err != nil { + return err + } + } + return nil +} + +// Init TODO +func (r *RecoverBinlog) Init() error { + var err error + // 工具路径初始化,检查工具路径, 工具可执行权限 + toolset, err := tools.NewToolSetWithPick(tools.ToolMysqlbinlog, tools.ToolMysqlclient) + if err != nil { + return err + } + if err = r.ToolSet.Merge(toolset); err != nil { + return err + } + + // quick_mode is only allowed when binlog_format=row + if r.SourceBinlogFormat != "ROW" && r.QuickMode { + r.QuickMode = false + logger.Warn("quick_mode set to false because source_binlog_format != ROW") + } + // quick_mode=true 需要 mysqlbinlog 支持 --databases --tables 等选项 + if r.QuickMode && !r.RecoverOpt.Flashback { + mysqlbinlogCli := r.ToolSet.MustGet(tools.ToolMysqlbinlog) + checkMysqlbinlog := fmt.Sprintf(`%s --help |grep "\-\-tables="`, mysqlbinlogCli) + if _, err := osutil.ExecShellCommand(false, checkMysqlbinlog); err != nil { + r.QuickMode = false + logger.Warn("%s has not --tables option, set recover_binlog quick_mode=false", mysqlbinlogCli) + } + } + if r.RecoverOpt.Flashback && !r.QuickMode { + return errors.New("--flashback need quick_mode=true") + } + + if err = r.initDirs(); err != nil { + return err + } + if r.ParseConcurrency == 0 { + r.ParseConcurrency = 1 + } + // 检查目标实例连接性 + if r.RecoverOpt.Flashback || !r.ParseOnly { + // logger.Info("tgtInstance: %+v", r.TgtInstance) + r.dbWorker, err = r.TgtInstance.Conn() + if err != nil { + return errors.Wrap(err, "目标实例连接失败") + } + if ret, err := r.TgtInstance.MySQLClientExec(r.ToolSet.MustGet(tools.ToolMysqlclient), "select 1"); err != nil { + return err + } else if strings.Contains(ret, "ERROR ") { + logger.Error("MySQLClientExec failed: %w %s", ret, err) + } + } + if r.RecoverOpt.Flashback && !r.ParseOnly { + return errors.New("flashback=true must have parse_only=true") + } + return nil +} + +func (r *RecoverBinlog) buildMysqlOptions() error { + b := r.RecoverOpt + mysqlOpt := r.RecoverOpt.MySQLClientOpt + + // init mysql client options + var initCommands []string + if b.NotWriteBinlog { + initCommands = append(initCommands, "set session sql_log_bin=0") + } + if len(initCommands) > 0 { + r.TgtInstance.Options += fmt.Sprintf(" --init-command='%s'", strings.Join(initCommands, ";")) + } + if mysqlOpt.BinaryMode { + r.TgtInstance.Options += " --binary-mode" + } + if mysqlOpt.MaxAllowedPacket > 0 { + r.TgtInstance.Options += fmt.Sprintf(" --max-allowed-packet=%d", mysqlOpt.MaxAllowedPacket) + } + r.mysqlCli = r.TgtInstance.MySQLClientCmd(r.ToolSet.MustGet(tools.ToolMysqlclient)) + return nil +} + +func (r *RecoverBinlog) buildBinlogOptions() error { + b := r.RecoverOpt + if b.StartPos == 0 && b.StartTime == "" { + return errors.Errorf("start_time and start_pos cannot be empty both") + } + if b.StartPos > 0 { + if r.BinlogStartFile == "" { + return errors.Errorf("start_pos must has binlog_start_file") + } else { + b.options += fmt.Sprintf(" --start-position=%d", b.StartPos) + // 同时要把 BinlogFiles 列表里面,binlog_start_file 之前的文件去掉 + } + } + if b.StartTime != "" { + b.options += fmt.Sprintf(" --start-datetime='%s'", b.StartTime) + } + if b.StopTime != "" { + b.options += fmt.Sprintf(" --stop-datetime='%s'", b.StopTime) + } + b.options += " --base64-output=auto" + // 严谨的情况,只有在确定源实例是 row full 模式下,才能启用 binlog 过滤条件,否则只能全量应用。 + // 但 --databases 等条件只对 row event 有效,在 query-event-handler=keep 情况下解析不会报错 + // 逻辑导入的库表过滤规则,跟 mysqlbinlog_rollback 的库表过滤规则不一样,这里先不处理 @todo + // 如果 mysqlbinlog 没有 --tables 选项,也不能启用 quick_mode + if r.QuickMode { + if err := r.buildFilterOpts(); err != nil { + return err + } + } + if b.IdempotentMode { + b.options += fmt.Sprintf(" --idempotent") + } else if r.QuickMode { + logger.Warn("idempotent=false and quick_mode=true may lead binlog-recover fail") + } + if b.NotWriteBinlog { + b.options += " --disable-log-bin" + } + + r.binlogCli += fmt.Sprintf("%s %s", r.ToolSet.MustGet(tools.ToolMysqlbinlog), r.RecoverOpt.options) + return nil +} + +func (r *RecoverBinlog) buildFilterOpts() error { + b := r.RecoverOpt + r.filterOpts = "" + if b.Flashback { + r.filterOpts += " --flashback" + } + if len(b.Databases) > 0 { + r.filterOpts += fmt.Sprintf(" --databases='%s'", strings.Join(b.Databases, ",")) + } + if len(b.Tables) > 0 { + r.filterOpts += fmt.Sprintf(" --tables='%s'", strings.Join(b.Tables, ",")) + } + if len(b.DatabasesIgnore) > 0 { + r.filterOpts += fmt.Sprintf(" --databases-ignore='%s'", strings.Join(b.DatabasesIgnore, ",")) + } + if len(b.TablesIgnore) > 0 { + r.filterOpts += fmt.Sprintf(" --tables-ignore='%s'", strings.Join(b.TablesIgnore, ",")) + } + if r.filterOpts == "" { + logger.Warn("quick_mode=true shall works with binlog-filter data import") + } + if r.filterOpts == "" && !b.IdempotentMode { + return errors.Errorf("no binlog-filter need idempotent_mode=true") + } + // query event 都全部应用,没法做部分过滤。前提是表结构已全部导入,否则导入会报错。也可以设置为 error 模式,解析时就会报错 + if b.QueryEventHandler == "" { + b.QueryEventHandler = "keep" + } + r.filterOpts += fmt.Sprintf(" --query-event-handler=%s", b.QueryEventHandler) + // 正向解析,不设置 --filter-statement-match-error + r.filterOpts += fmt.Sprintf(" --filter-statement-match-ignore-force='%s'", native.INFODBA_SCHEMA) + b.options += " " + r.filterOpts + return nil +} + +func (r *RecoverBinlog) initDirs() error { + if r.WorkID == "" { + r.WorkID = newTimestampString() + } + r.taskDir = fmt.Sprintf("%s/recover_binlog_%s/%d", r.WorkDir, r.WorkID, r.TgtInstance.Port) + if err := osutil.CheckAndMkdir("", r.taskDir); err != nil { + return err + } + r.binlogParsedDir = fmt.Sprintf("%s/%s", r.taskDir, dirBinlogParsed) + if err := osutil.CheckAndMkdir("", r.binlogParsedDir); err != nil { + return err + } + r.logDir = fmt.Sprintf("%s/%s", r.taskDir, "logs") + if err := osutil.CheckAndMkdir("", r.logDir); err != nil { + return err + } + return nil +} + +// PreCheck TODO +func (r *RecoverBinlog) PreCheck() error { + var err error + if err := r.buildMysqlOptions(); err != nil { + return err + } + // init mysqlbinlog options + if err := r.buildBinlogOptions(); err != nil { + return nil + } + // 检查 binlog 是否存在 + var binlogFilesErrs []error + for _, f := range r.BinlogFiles { + filename := filepath.Join(r.BinlogDir, f) + if err := cmutil.FileExistsErr(filename); err != nil { + binlogFilesErrs = append(binlogFilesErrs, err) + } + } + if len(r.BinlogFiles) == 0 { + return errors.New("no binlog files given") + } else if len(binlogFilesErrs) > 0 { + return util.SliceErrorsToError(binlogFilesErrs) + } + + // 检查 binlog 文件连续性 + sort.Strings(r.BinlogFiles) + fileSeqList := util.GetSuffixWithLenAndSep(r.BinlogFiles, ".", 0) + if err = util.IsConsecutiveStrings(fileSeqList, true); err != nil { + return err + } + + // 检查第一个 binlog 是否存在 + if r.BinlogStartFile != "" { + if !util.StringsHas(r.BinlogFiles, r.BinlogStartFile) { + return errors.Errorf("first binlog %s not found", r.BinlogStartFile) + } + // 如果 start_datetime 为空,依赖 start_file, start_pos 选择起始 binlog pos + if r.RecoverOpt.StartTime == "" { + for i, f := range r.BinlogFiles { + if f != r.BinlogStartFile { + logger.Info("remove binlog file %s from list", f) + r.BinlogFiles[i] = "" // 移除第一个 binlog 之前的 file + } else { + break + } + } + } + } + + if err := r.checkTimeRange(); err != nil { + return err + } + + return nil +} + +// FilterBinlogFiles 对 binlog 列表多余是时间,掐头去尾,并返回文件总大小 +func (r *RecoverBinlog) FilterBinlogFiles() (int64, error) { + bp, _ := binlogParser.NewBinlogParse("", 0) // 用默认值 + var binlogFiles = []string{""} // 第一个元素预留 + var totalSize int64 = 0 + var firstBinlogSize int64 = 0 + logger.Info("BinlogFiles before filter: %v", r.BinlogFiles) + if r.RecoverOpt.StartTime != "" && r.BinlogStartFile == "" { + binlogFiles[0] = "" + for _, f := range r.BinlogFiles { + fileName := filepath.Join(r.BinlogDir, f) + events, err := bp.GetTime(fileName, true, true) + if err != nil { + return 0, err + } + startTime := events[0].EventTime + stopTime := events[1].EventTime + fileSize := cmutil.GetFileSize(fileName) + if startTime > r.RecoverOpt.StartTime { + binlogFiles = append(binlogFiles, f) + totalSize += fileSize + if r.RecoverOpt.StopTime != "" && stopTime > r.RecoverOpt.StopTime { + break + } + } else { + binlogFiles[0] = f + firstBinlogSize = fileSize + } + } + } + if binlogFiles[0] == "" { + return 0, errors.New("没有找到满足条件的第一个 binlog") + } + totalSize += firstBinlogSize + r.BinlogFiles = binlogFiles + logger.Info("BinlogFiles after filter: %v", r.BinlogFiles) + return totalSize, nil +} + +func (r *RecoverBinlog) checkTimeRange() error { + startTime := r.RecoverOpt.StartTime + stopTime := r.RecoverOpt.StopTime + if startTime != "" && stopTime != "" && startTime >= stopTime { + return errors.Errorf("binlog start_time [%s] should be little then stop_time [%s]", startTime, stopTime) + } + bp, _ := binlogParser.NewBinlogParse("", 0) // 用默认值 + if startTime != "" { + events, err := bp.GetTime(filepath.Join(r.BinlogDir, r.BinlogFiles[0]), true, false) + if err != nil { + return err + } + evStartTime := events[0].EventTime + if evStartTime > startTime { + return errors.Errorf( + "the first binlog %s start-datetime [%s] is greater then start_time [%s]", + r.BinlogFiles[0], evStartTime, startTime, + ) + } else { + logger.Info( + "the first binlog %s start-datetime [%s] is lte start time[%s]", + r.BinlogFiles[0], evStartTime, startTime, + ) + } + } + + // 检查最后一个 binlog 时间,需要在目标时间之后 + if stopTime != "" { + lastBinlog := util.LastElement(r.BinlogFiles) + events, err := bp.GetTime(filepath.Join(r.BinlogDir, lastBinlog), false, true) + if err != nil { + return err + } + evStopTime := events[0].EventTime + if evStopTime < stopTime { + return errors.Errorf( + "the last binlog %s stop-datetime [%s] is little then target_time [%s]", + lastBinlog, evStopTime, stopTime, + ) + } else { + logger.Info( + "the last binlog %s stop-datetime [%s] gte target_time [%s]", + lastBinlog, evStopTime, stopTime, + ) + } + } + return nil +} + +// Start godoc +// 一定会解析 binlog +func (r *RecoverBinlog) Start() error { + binlogFiles := strings.Join(r.BinlogFiles, " ") + if r.ParseOnly { + if err := r.buildScript(); err != nil { + return err + } + return r.ParseBinlogFiles() + } else if !r.RecoverOpt.Flashback { + if r.RecoverOpt.IdempotentMode { + // 这个要在主函数运行,调用 defer 来设置回去 + newValue := "IDEMPOTENT" + originValue, err := r.dbWorker.SetSingleGlobalVarAndReturnOrigin("slave_exec_mode", newValue) + if err != nil { + return err + } + if originValue != newValue { + defer func() { + if err = r.dbWorker.SetSingleGlobalVar("slave_exec_mode", originValue); err != nil { + logger.Error("fail to set back slave_exec_mode=%s", originValue) + } + }() + } + } + + // 这里要考虑命令行的长度 + outFile := filepath.Join(r.taskDir, fmt.Sprintf("import_binlog_%s.log", r.WorkID)) + errFile := filepath.Join(r.taskDir, fmt.Sprintf("import_binlog_%s.err", r.WorkID)) + cmd := fmt.Sprintf( + `cd %s; %s %s | %s >%s 2>%s`, + r.BinlogDir, r.binlogCli, binlogFiles, r.mysqlCli, outFile, errFile, + ) + logger.Info(mysqlutil.ClearSensitiveInformation(mysqlutil.RemovePassword(cmd))) + stdoutStr, err := osutil.ExecShellCommand(false, cmd) + if err != nil { + if strings.TrimSpace(stdoutStr) == "" { + if errContent, err := osutil.ExecShellCommand( + false, + fmt.Sprintf("head -2 %s", errFile), + ); err == nil { + if strings.TrimSpace(errContent) != "" { + logger.Error(errContent) + } + } + } else { + return errors.Errorf("empty stderr: %s", errFile) + } + return err + } + } else { + return errors.New("flashback=true must have parse_only=true") + } + return nil +} + +// Import import_binlog.sh +func (r *RecoverBinlog) Import() error { + if r.RecoverOpt.IdempotentMode { + // 这个要在主函数运行,调用 defer 来设置回去 + newValue := "IDEMPOTENT" + originValue, err := r.dbWorker.SetSingleGlobalVarAndReturnOrigin("slave_exec_mode", newValue) + if err != nil { + return err + } + if originValue != newValue { + defer func() { + if err = r.dbWorker.SetSingleGlobalVar("slave_exec_mode", originValue); err != nil { + logger.Error("fail to set back slave_exec_mode=%s", originValue) + } + }() + } + } + script := fmt.Sprintf(`cd %s && %s > import.log 2>import.err`, r.taskDir, r.importScript) + logger.Info("run script: %s", script) + _, err := osutil.ExecShellCommand(false, script) + if err != nil { + return errors.Wrap(err, "run import_binlog.sh") + } + return nil +} + +// WaitDone TODO +func (r *RecoverBinlog) WaitDone() error { + // 通过 lsof 查看 mysqlbinlog 当前打开的是那个 binlog,来判断进度 + return nil +} + +// PostCheck TODO +func (r *RecoverBinlog) PostCheck() error { + // 检查 infodba_schema.master_slave_check 里面的时间与 target_time 差异不超过 65s + return nil +} + +// GetDBWorker TODO +func (r *RecoverBinlog) GetDBWorker() *native.DbWorker { + return r.dbWorker +} + +// GetTaskDir TODO +func (r *RecoverBinlog) GetTaskDir() string { + return r.taskDir +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/restore.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/restore.go new file mode 100644 index 0000000000..301d7475c6 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/restore.go @@ -0,0 +1,236 @@ +// Package restore TODO +package restore + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/dbbackup" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "strings" + + "github.com/pkg/errors" +) + +// Restore 备份恢复类的接口定义 +type Restore interface { + Init() error + PreCheck() error + Start() error + WaitDone() error + PostCheck() error + ReturnChangeMaster() (*mysqlutil.ChangeMaster, error) +} + +// RestoreParam 恢复用到的参数,不区分恢复类型的公共参数 +type RestoreParam struct { + BackupInfo + // 恢复用到的客户端工具,不提供时会有默认值 + Tools tools.ToolSet `json:"tools"` + // 恢复本地的目标实例 + TgtInstance native.InsObject `json:"tgt_instance"` + // 备份实例的 ip port,用于生产 change master 语句 + SrcInstance native.Instance `json:"src_instance"` + // 恢复完成后是否执行 change master,会 change master 到 src_instance + ChangeMaster bool `json:"change_master"` + // work_id 标识本次恢复,若为0则为当前时间戳 + WorkID string `json:"work_id"` + // 恢复选项,比如恢复库表、是否导入binlog等。目前只对逻辑恢复有效 + RestoreOpt *RestoreOpt `json:"restore_opts"` +} + +// RestoreOpt TODO +type RestoreOpt struct { + // 恢复哪些 db,当前只对 逻辑恢复有效 + Databases []string `json:"databases"` + Tables []string `json:"tables"` + IgnoreDatabases []string `json:"ignore_databases"` + IgnoreTables []string `json:"ignore_tables"` + + RecoverPrivs bool `json:"recover_privs"` + // 在指定时间点回档场景才需要,是否恢复 binlog。在 doSlave 场景,是不需要 recover_binlog。这个选项是控制下一步恢复binlog的行为 + // 当 recover_binlog 时,要确保实例的所有库表结构都恢复。在逻辑回档场景,只回档部分库表数据时,依然要恢复所有表结构 + WillRecoverBinlog bool `json:"recover_binlog"` + // 在库表级定点回档时有用,如果是 statement/mixed 格式,导入数据时需要全部导入; + // 如果是 row,可只导入指定库表数据, 在 recover-binlog 时可指定 quick_mode=true 也恢复指定库表 binlog + SourceBinlogFormat string `json:"source_binlog_format" enums:",ROW,STATEMENT,MIXED"` +} + +// FilterOpt TODO +type FilterOpt struct { + Databases []string `json:"databases"` // 未使用 + Tables []string `json:"tables"` // 未使用 + IgnoreDatabases []string `json:"ignore_databases"` // 添加内置 mysql infodba_schema 库 + IgnoreTables []string `json:"ignore_tables"` // 未使用 +} + +// RestoreDRComp TODO +// 封装 Restore 接口 +type RestoreDRComp struct { + GeneralParam *components.GeneralParam `json:"general"` // 通用参数 + // 恢复参数,会复制给具体的 Restore 实现. 见 ChooseType 方法 + Params RestoreParam `json:"extend"` + restore Restore // 接口 + changeMaster *mysqlutil.ChangeMaster // 存放恢复完的 change master 信息 + // 是否是中断后继续执行 + Resume bool `json:"resume"` +} + +// Init TODO +func (r *RestoreDRComp) Init() error { + return r.restore.Init() +} + +// PreCheck TODO +func (r *RestoreDRComp) PreCheck() error { + if r.Resume && r.Params.WorkID == "" { + return errors.New("recover恢复执行模式需要 work_id 参数") + } + if r.Params.ChangeMaster && components.GetAccountRepl(r.GeneralParam).ReplUser == "" { + return errors.New("enable change_master should have repl_user given") + } + if r.Params.RestoreOpt != nil && r.Params.BackupInfo.backupType == cst.TypeXTRA { + logger.Warn("xtrabackup recover does not support databases/table filter, recover all") + // return errors.New("物理备份暂不支持指定库表恢复") + } + return r.restore.PreCheck() +} + +// Start TODO +func (r *RestoreDRComp) Start() error { + return r.restore.Start() +} + +// WaitDone TODO +func (r *RestoreDRComp) WaitDone() error { + return r.restore.WaitDone() +} + +// PostCheck TODO +func (r *RestoreDRComp) PostCheck() error { + return r.restore.PostCheck() +} + +// OutputCtx TODO +func (r *RestoreDRComp) OutputCtx() error { + m := r.Params + logger.Warn("restore-dr comp params: %+v", m) + if !(m.backupHost == m.SrcInstance.Host && m.backupPort == m.SrcInstance.Port) { + logger.Warn( + "backup instance[%s:%d] is not src_instance[%s:%d]", + m.backupHost, m.backupPort, m.SrcInstance.Host, m.SrcInstance.Port, + ) + } + if cm, err := r.restore.ReturnChangeMaster(); err != nil { + return err + } else { + logger.Warn("ReturnChangeMaster %+v. sql: %s", cm, cm.GetSQL()) + r.changeMaster = cm + return components.PrintOutputCtx(cm) + } +} + +// BuildChangeMaster 生成 change master,给下一个 act 使用 +func (r *RestoreDRComp) BuildChangeMaster() *mysql.BuildMSRelationComp { + cm := r.changeMaster + if !r.Params.ChangeMaster { + return nil + } else if r.Params.ChangeMaster { + cm.MasterUser = r.GeneralParam.RuntimeAccountParam.ReplUser + cm.MasterPassword = r.GeneralParam.RuntimeAccountParam.ReplPwd + } + comp := mysql.BuildMSRelationComp{ + GeneralParam: r.GeneralParam, + Params: &mysql.BuildMSRelationParam{ + Host: r.Params.TgtInstance.Host, + Port: r.Params.TgtInstance.Port, + MasterHost: cm.MasterHost, + MasterPort: cm.MasterPort, + BinFile: cm.MasterLogFile, + BinPosition: cm.MasterLogPos, + }, + } + comp.GeneralParam.RuntimeAccountParam.AdminUser = r.Params.TgtInstance.User + comp.GeneralParam.RuntimeAccountParam.AdminPwd = r.Params.TgtInstance.Pwd + return &comp +} + +// ChooseType 选择恢复类型 +// 在 Init 之前运行 +func (r *RestoreDRComp) ChooseType() error { + b := &r.Params.BackupInfo + if err := b.GetBackupMetaFile(dbbackup.BACKUP_INDEX_FILE); err != nil { + logger.Warn("get index file failed: %s, try to get info file", err.Error()) + // return err + if err := b.GetBackupMetaFile(dbbackup.MYSQL_INFO_FILE); err != nil { + return err + } + } + b.backupType = strings.ToLower(b.backupType) // 一律转成小写来判断 + if b.backupType == cst.TypeXTRA { + xload := XLoad{ + RestoreParam: &r.Params, + } + r.restore = &xload + } else if b.backupType == cst.TypeGZTAB { + mload := MLoad{ + RestoreParam: &r.Params, + } + r.restore = &mload + } else if b.backupType == cst.BackupTypeLogical || b.backupType == cst.BackupTypePhysical { + dbloader := DBLoader{ + RestoreParam: &r.Params, + } + r.restore = &dbloader + } else { + return errors.Errorf("unknown backup_type [%s]", b.backupType) + } + logger.Info("choose recover type [%s], indexObj.BackupType=%s", b.backupType, b.indexObj.BackupType) + return nil +} + +// Example TODO +func (r *RestoreDRComp) Example() interface{} { + comp := RestoreDRComp{ + Params: RestoreParam{ + BackupInfo: BackupInfo{ + WorkDir: "/data1/dbbak/", + BackupDir: "/data/dbbak/", + BackupFiles: map[string][]string{ + "info": {"DBHA_VM-71-150-centos_x_20000_20220831_200425.info"}, + }, + }, + Tools: *tools.NewToolSetWithPickNoValidate(tools.ToolMload, tools.ToolXLoad, tools.ToolMysqlclient), + TgtInstance: common.InstanceObjExample, + SrcInstance: common.InstanceExample, + WorkID: "", + RestoreOpt: &RestoreOpt{ + Databases: []string{"db1"}, + Tables: []string{"tb1"}, + }, + }, + GeneralParam: &components.GeneralParam{ + RuntimeAccountParam: components.RuntimeAccountParam{ + MySQLAccountParam: common.AccountReplExample, + }, + }, + } + return comp +} + +// ExampleOutput TODO +func (r *RestoreDRComp) ExampleOutput() interface{} { + return &mysqlutil.ChangeMaster{ + MasterHost: "1.1.1.1", + MasterPort: 3306, + MasterUser: "xx", + MasterPassword: "yy", + MasterLogFile: "binlog.000001", + MasterLogPos: 4, + ChangeSQL: "change master to xxxx", + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go new file mode 100644 index 0000000000..dbc8c2bfa6 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_repaire_util.go @@ -0,0 +1,267 @@ +package restore + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "path" + "path/filepath" + "regexp" + "runtime/debug" + "strings" + "sync" +) + +// RepairUserAdminByLocal TODO +func (x *XLoad) RepairUserAdminByLocal(user, password string) error { + sql := fmt.Sprintf( + "UPDATE `mysql`.`user` SET `authentication_string`=password('%s') WHERE `user`='%s'", + password, user, + ) + // sql := fmt.Sprintf("ALTER USER %s@'localhost' IDENTIFIED WITH mysql_native_password BY '%s'", password, user) + logger.Info("RepairUserAdminByLocal: %s", sql) + if _, err := x.dbWorker.Exec(sql); err != nil { + return err + } + + // ALTER USER ADMIN@'localhost' IDENTIFIED BY 'auth_string'; + // SET PASSWORD FOR 'ADMIN'@'localhost' = 'auth_string'; + // ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'XXX'; + // flush privileges; + if _, err := x.dbWorker.Exec("FLUSH PRIVILEGES"); err != nil { + return err + } + return nil +} + +// RepairAndTruncateMyIsamTables TODO +func (x *XLoad) RepairAndTruncateMyIsamTables() error { + systemDbs := util.StringsRemove(native.DBSys, native.TEST_DB) + sql := fmt.Sprintf( + `SELECT table_schema, table_name FROM information_schema.tables `+ + `WHERE table_schema not in (%s) AND engine = 'MyISAM'`, + mysqlutil.UnsafeIn(systemDbs, "'"), + ) + + rows, err := x.dbWorker.Db.Query(sql) + if err != nil { + return fmt.Errorf("query myisam tables error,detail:%w,sql:%s", err, sql) + } + defer rows.Close() + + wg := sync.WaitGroup{} + errorChan := make(chan error, 1) + finishChan := make(chan bool, 1) + for rows.Next() { + var db string + var table string + if err := rows.Scan(&db, &table); err != nil { + return err + } + wg.Add(1) + go func(worker *native.DbWorker, db, table string) { + defer wg.Done() + defer func() { + if r := recover(); r != nil { + logger.Info("panic goroutine inner error!%v;%s", r, string(debug.Stack())) + errorChan <- fmt.Errorf("panic goroutine inner error!%v", r) + return + } + }() + + sql := "" + if db == native.TEST_DB || db == native.INFODBA_SCHEMA { + sql = fmt.Sprintf("truncate table %s.%s", db, table) + } else { + sql = fmt.Sprintf("repair table %s.%s", db, table) + } + _, err := worker.Exec(sql) + if err != nil { + errorChan <- fmt.Errorf("repair myisam table error,sql:%s,error:%w", sql, err) + return + } + return + }(x.dbWorker, db, table) + } + go func() { + wg.Wait() + close(finishChan) + }() + + select { + case <-finishChan: + case err := <-errorChan: + return err + } + return nil +} + +// RepairPrivileges TODO +func (x *XLoad) RepairPrivileges() error { + srcHost := x.BackupInfo.infoObj.BackupHost + tgtHost := x.TgtInstance.Host + localHost := []string{"localhost", "127.0.0.1"} + myUsers := []string{"ADMIN", "sync", "repl"} + + srcHostUnsafe := mysqlutil.UnsafeEqual(srcHost, "'") + tgtHostUnsafe := mysqlutil.UnsafeEqual(tgtHost, "'") + localHostUnsafe := mysqlutil.UnsafeIn(localHost, "'") + myUsersUnsafe := mysqlutil.UnsafeIn(myUsers, "'") + + var batchSQLs []string + // delete src host's ADMIN/sync user, but not localhost + sql1 := fmt.Sprintf( + "DELETE FROM mysql.user WHERE `user` IN (%s) AND `host` = %s AND `host` NOT IN (%s);", + myUsersUnsafe, srcHostUnsafe, localHostUnsafe, + ) + batchSQLs = append(batchSQLs, sql1) + + // update src host to new, but not ADMIN/sync/repl + sql2s := []string{ + fmt.Sprintf( + "UPDATE mysql.user SET `host`=%s WHERE `host`=%s AND User not in (%s);", + tgtHostUnsafe, srcHostUnsafe, myUsersUnsafe, + ), + fmt.Sprintf( + "UPDATE mysql.db SET `host`=%s WHERE `host`=%s AND User not in (%s);", + tgtHostUnsafe, srcHostUnsafe, myUsersUnsafe, + ), + fmt.Sprintf( + "UPDATE mysql.tables_priv SET `host`=%s WHERE `host`=%s AND User not in (%s);", + tgtHostUnsafe, srcHostUnsafe, myUsersUnsafe, + ), + } + batchSQLs = append(batchSQLs, sql2s...) + + // delete src host users, but not localhost + sql3 := fmt.Sprintf( + "DELETE FROM mysql.user WHERE `host` IN(%s) AND `host` NOT IN (%s);", + srcHostUnsafe, localHostUnsafe, + ) + batchSQLs = append(batchSQLs, sql3) + + // flush + sql4 := fmt.Sprintf("flush privileges;") + batchSQLs = append(batchSQLs, sql4) + logger.Info("RepairPrivileges: %+v", batchSQLs) + if _, err := x.dbWorker.ExecMore(batchSQLs); err != nil { + return err + } + return nil +} + +// CleanEnv 为物理备份清理本机数据目录 +func (x *XLoad) CleanEnv(dirs []string) error { + // 进程应该已关闭,端口关闭 + if osutil.IsPortUp(x.TgtInstance.Host, x.TgtInstance.Port) { + return fmt.Errorf("port %d is still opened", x.TgtInstance.Port) + } + + var dirArray []string + for _, v := range dirs { + if strings.TrimSpace(x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "")) == "" { + logger.Warn(fmt.Sprintf("my.cnf %s is Emtpty!!", v)) + continue + } + switch v { + case "relay-log", "relay_log": + val, err := x.myCnf.GetRelayLogDir() + if err != nil { + return err + } + reg := regexp.MustCompile(cst.RelayLogFileMatch) + if result := reg.FindStringSubmatch(val); len(result) == 2 { + relaylogdir := result[1] + dirArray = append(dirArray, "rm -rf "+relaylogdir+"/*") + } + case "log_bin", "log-bin": + val, err := x.myCnf.GetMySQLLogDir() + if err != nil { + return err + } + reg := regexp.MustCompile(cst.BinLogFileMatch) + if result := reg.FindStringSubmatch(val); len(result) == 2 { + binlogdir := result[1] + // TODO 所有 rm -rf 的地方都应该要检查是否可能 rm -rf / binlog.xxx 这种误删可能 + dirArray = append(dirArray, "rm -rf "+binlogdir+"/*") + } + case "slow_query_log_file", "slow-query-log-file": + if val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, "slow_query_log_file", ""); val != "" { + dirArray = append(dirArray, "rm -f "+val) + } + default: + val := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, "") + if strings.TrimSpace(val) != "" && strings.TrimSpace(val) != "/" { + dirArray = append(dirArray, "rm -rf "+val+"/*") + } + } + } + scripts := strings.Join(dirArray, "\n") + logger.Info("CleanEnv: %s", scripts) + // run with mysql os user + if _, err := osutil.ExecShellCommand(false, scripts); err != nil { + return err + } + return nil +} + +// ReplaceMycnf godoc +// 物理恢复新实例的 innodb_data_file_path 等参数要保持跟原实例一致(排除 server_id,server_uuid) +func (x *XLoad) ReplaceMycnf(items []string) error { + backupMyCnfPath := x.getBackupCnfName() + backupMyCnf, err := util.LoadMyCnfForFile(backupMyCnfPath) + if err != nil { + return err + } + bakCnfMap := backupMyCnf.SaveMySQLConfig2Object() + var itemsExclude = []string{"server_id", "server_uuid"} + itemMap := map[string]string{} + for _, key := range items { + if util.StringsHas(itemsExclude, key) { + continue + } + itemMap[key] = bakCnfMap.Section[util.MysqldSec].KvMap[key] + // sed 's///g' f > /tmp/f && cat /tmp/f > f + } + if len(itemMap) > 0 { + logger.Info("ReplaceMycnf new: %v", itemMap) + if err = x.myCnf.ReplaceValuesToFile(itemMap); err != nil { + // x.myCnf.Load() // reload it? + return err + } + } + return nil +} + +// ChangeDirOwner 修正目录属组,需要 root 权限 +func (x *XLoad) ChangeDirOwner(dirs []string) error { + var commands []string + for _, v := range dirs { + // 如果my.cnf中没有配置这个目录, 就不做操作 + if p := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, v, ""); p != "" { + if filepath.IsAbs(p) { + commands = append(commands, fmt.Sprintf("chown -R mysql %s", path.Dir(p))) + } + // @todo 如果是相对目录,忽略 or 报错 ? + } + } + script := strings.Join(commands, "\n") + logger.Info("ChangeDirOwner: %s", script) + if _, err := osutil.ExecShellCommand(false, script); err != nil { + return err + } + return nil +} + +func (x *XLoad) getBackupCnfName() string { + return fmt.Sprintf("%s/%s", x.targetDir, "backup-my.cnf") +} + +func (x *XLoad) getSocketName() string { + sock := x.myCnf.GetMyCnfByKeyWithDefault(util.MysqldSec, "socket", "/tmp/mysql.sock") + return sock +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_restore.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_restore.go new file mode 100644 index 0000000000..1ed02c9ad6 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_restore.go @@ -0,0 +1,332 @@ +package restore + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/computil" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/pkg/errors" +) + +// XLoad TODO +type XLoad struct { + *RestoreParam + + taskDir string // 依赖 BackupInfo.WorkDir ${work_dir}/doDr_${id}/${port}/ + targetDir string // 备份解压后的目录,${taskDir}/backupBaseName/ + dbWorker *native.DbWorker // TgtInstance + myCnf *util.CnfFile +} + +func (x *XLoad) initDirs() error { + if x.BackupInfo.WorkDir == "" { + return errors.Errorf("work_dir %s should not be empty", x.WorkDir) + } + if x.WorkID == "" { + x.WorkID = newTimestampString() + } + x.taskDir = fmt.Sprintf("%s/doDr_%s/%d", x.WorkDir, x.WorkID, x.TgtInstance.Port) + if err := osutil.CheckAndMkdir("", x.taskDir); err != nil { + return err + } + if x.backupBaseName == "" { + return errors.Errorf("backup file baseName [%s] error", x.backupBaseName) + } + x.targetDir = fmt.Sprintf("%s/%s", x.taskDir, x.backupBaseName) + return nil +} + +// Init TODO +func (x *XLoad) Init() error { + cnfFileName := util.GetMyCnfFileName(x.TgtInstance.Port) + cnfFile := &util.CnfFile{FileName: cnfFileName} + var err error + if err = cnfFile.Load(); err != nil { + logger.Info("get my.conf failed %v", cnfFileName) + return errors.WithStack(err) + } else { + x.myCnf = cnfFile + } + if err = x.BackupInfo.infoObj.ValidateFiles(); err != nil { + return err + } + if err = x.initDirs(); err != nil { + return err + } + // logger.Info("tgtInstance: %+v", x.TgtInstance) + x.dbWorker, err = x.TgtInstance.Conn() + if err != nil { + return errors.Wrap(err, "目标实例连接失败") + } + // x.dbWorker.Db.Close() + logger.Info("XLoad params: %+v", x) + return nil +} + +// PreCheck TODO +func (x *XLoad) PreCheck() error { + toolset, err := tools.NewToolSetWithPick(tools.ToolXLoad, tools.ToolQPress) + if err != nil { + return err + } + if err := x.Tools.Merge(toolset); err != nil { + return err + } + // 工具可执行权限 + + // 物理恢复要求目标是空闲实例 + return nil +} + +// Start TODO +func (x *XLoad) Start() error { + if err := x.BackupInfo.infoObj.UntarFiles(x.taskDir); err != nil { + return err + } + if err := x.DoXLoad(); err != nil { + return err + } + return nil +} + +// WaitDone TODO +func (x *XLoad) WaitDone() error { + return nil +} + +// PostCheck 物理备份肯修改了密码,验证能否用 ADMIN 登录 +func (x *XLoad) PostCheck() error { + _, err := x.TgtInstance.Conn() + if err != nil { + return errors.Wrap(err, "目标实例连接失败") + } + return nil +} + +// ReturnChangeMaster TODO +func (x *XLoad) ReturnChangeMaster() (*mysqlutil.ChangeMaster, error) { + return x.getChangeMasterPos(x.SrcInstance) +} + +// getChangeMasterPos godoc +// xtrabackup 在 master,只有 xtrabackup_binlog_info +// xtrabackup 在 slave, 有 xtrabackup_binlog_info, xtrabackup_slave_info +func (x *XLoad) getChangeMasterPos(masterInst native.Instance) (*mysqlutil.ChangeMaster, error) { + XtraSlaveInfoFile := filepath.Join(x.targetDir, "xtrabackup_slave_info") // 当前备份的对端 master 位点 + XtraBinlogInfo := filepath.Join(x.targetDir, "xtrabackup_binlog_info") // 当前备份所在实例位点, 可能master可能slave + + binlogInfo, err := osutil.ReadFileString(XtraBinlogInfo) + if err != nil { + return nil, err + } + // todo Repeater? + if x.infoObj.BackupRole == cst.BackupRoleMaster || + (x.infoObj.BackupRole == cst.BackupRoleSlave && + x.infoObj.BackupHost == masterInst.Host && x.infoObj.BackupPort == masterInst.Port) { + if cm, err := mysqlutil.ParseXtraBinlogInfo(binlogInfo); err != nil { + return nil, err + } else { + cm.MasterHost = x.infoObj.BackupHost + cm.MasterPort = x.infoObj.BackupPort + return cm, nil + } + } else if x.infoObj.BackupRole == cst.BackupRoleSlave { + if slaveInfo, err := osutil.ReadFileString(XtraSlaveInfoFile); err != nil { + return nil, err + } else { + cm := &mysqlutil.ChangeMaster{ChangeSQL: slaveInfo} + if err := cm.ParseChangeSQL(); err != nil { + return nil, errors.Wrap(err, slaveInfo) + } + return cm, nil + } + } else { + return nil, errors.Errorf("unknown backup_role %s", x.infoObj.BackupRole) + } +} + +// DoXLoad 以下所有步骤必须可重试 +func (x *XLoad) DoXLoad() error { + // 关闭本地mysql + inst := x.TgtInstance + if err := computil.ShutdownMySQLBySocket2(inst.User, inst.Pwd, inst.Socket); err != nil { + logger.Error("shutdown mysqld failed %s", inst.Socket) + return err + } + + // 清理本地目录 + if err := x.cleanXtraEnv(); err != nil { + return err + } + + // 调整my.cnf文件 + if err := x.doReplaceCnf(); err != nil { + return err + } + + // 恢复物理全备 + if err := x.importData(); err != nil { + return err + } + + // 调整目录属主 + if err := x.changeDirOwner(); err != nil { + return err + } + + // 启动mysql-修复权限 + startParam := computil.StartMySQLParam{ + MediaDir: cst.MysqldInstallPath, + MyCnfName: x.myCnf.FileName, + MySQLUser: inst.User, // 用ADMIN + MySQLPwd: inst.Pwd, + Socket: inst.Socket, + SkipGrantTables: true, // 以 skip-grant-tables 启动来修复 ADMIN + } + if _, err := startParam.StartMysqlInstance(); err != nil { + } + + // 物理备份,ADMIN密码与 backup instance(cluster?) 相同,修复成 + // 修复ADMIN用户 + if err := x.RepairUserAdminByLocal(native.DBUserAdmin, inst.Pwd); err != nil { + return err + } + + // 修复权限 + if err := x.RepairPrivileges(); err != nil { + return err + } + + // 重启mysql(去掉 skip-grant-tables) + startParam.SkipGrantTables = false + if _, err := startParam.RestartMysqlInstance(); err != nil { + return err + } + + // 修复MyIsam表 + if err := x.RepairAndTruncateMyIsamTables(); err != nil { + return err + } + + return nil +} + +func (x *XLoad) cleanXtraEnv() error { + dirs := []string{ + "datadir", + "innodb_log_group_home_dir", + "innodb_data_home_dir", + "relay-log", + "log_bin", + "tmpdir", + } + return x.CleanEnv(dirs) +} + +// doReplaceCnf godoc +// todo 考虑使用 mycnf-change 模块来修改 +func (x *XLoad) doReplaceCnf() error { + items := []string{ + "innodb_data_file_path", + "innodb_log_files_in_group", + "innodb_log_file_size", + "tokudb_cache_size", + } + return x.ReplaceMycnf(items) +} + +func (x *XLoad) importData() error { + reg := regexp.MustCompile(`^\s*(.*)/mysqldata/.*$`) + datadir, err := x.myCnf.GetMySQLDataDir() + if err != nil { + return err + } + array := reg.FindStringSubmatch(datadir) + if len(array) != 2 { + return fmt.Errorf( + "get mysqldata dir error,len not 2,is %d,info:(%s)", + len(array), strings.Join(array, ";"), + ) + } + mysqlDataDir := array[1] + + xloadPath, err := x.Tools.Get(tools.ToolXLoad) + if err != nil { + return err + } + param := XLoadParam{ + Host: x.TgtInstance.Host, + MysqlDataDir: mysqlDataDir, + MyCnfFile: util.GetMyCnfFileName(x.TgtInstance.Port), + FilePath: x.targetDir, + TaskDir: x.taskDir, + Client: xloadPath, + } + if err := XLoadData(param); err != nil { + return err + } + return nil +} + +func (x *XLoad) changeDirOwner() error { + dirs := []string{ + "datadir", + "innodb_log_group_home_dir", + "innodb_data_home_dir", + "relay_log", + "tmpdir", + "log_bin", + "slow_query_log_file", + } + return x.ChangeDirOwner(dirs) +} + +// DecompressMetaFile decompress .pq file and output same file name without suffix +// ex: /home/mysql/dbbackup/xtrabackup/qpress -do xtrabackup_info.qp > xtrabackup_info +func (x *XLoad) DecompressMetaFile(dir string) error { + client, err := x.Tools.Get(tools.ToolQPress) + if err != nil { + return err + } + files := []string{ + "xtrabackup_timestamp_info", + "backup-my.cnf", + "xtrabackup_binlog_info", + "xtrabackup_info", + "xtrabackup_slave_info", + "xtrabackup_galera_info", + } + + for _, file := range files { + compressedFile := fmt.Sprintf("%s.qp", file) + if _, err := os.Stat(compressedFile); os.IsNotExist(err) { + continue + } + errFile := fmt.Sprintf("%s.err", compressedFile) + script := fmt.Sprintf(`%s -do %s/%s`, client, dir, compressedFile) + cmd := osutil.FileOutputCmd{ + Cmd: exec.Cmd{ + Path: "/bin/bash", + Args: []string{"/bin/bash", "-c", script}, + }, + StdOutFile: path.Join(dir, file), + StdErrFile: path.Join(dir, errFile), + } + if err := cmd.Run(); err != nil { + return errors.Wrapf(err, "decompress file %s failed, plz check log:%s", compressedFile, errFile) + } + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_util.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_util.go new file mode 100644 index 0000000000..f933b47857 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore/xload_util.go @@ -0,0 +1,150 @@ +package restore + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "io/ioutil" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "github.com/pkg/errors" +) + +// XLoadParam TODO +type XLoadParam struct { + Client string `json:"client"` + TaskDir string `json:"taskDir"` // taskDir 已经包含了 work_id + FilePath string `json:"filePath"` + MyCnfFile string `json:"myCnfFile"` + MysqlDataDir string `json:"mysqlDataDir"` + + Host string `json:"host"` +} + +// XLoadCheckParam TODO +type XLoadCheckParam struct { + Client string `json:"client"` +} + +// XLoadData TODO +func XLoadData(m XLoadParam) error { + logger.Info("XLoadData param: %+v", m) + xloadLogFile := fmt.Sprintf("%s/xload.log", m.TaskDir) + xloadLogoutFile := fmt.Sprintf("%s/xload.out", m.TaskDir) + + // generate log4perl.conf + log4perlConf := fmt.Sprintf("%s/log4perl.conf", m.TaskDir) + logConfContent := `log4perl.rootLogger=INFO, LOGFILE +log4perl.appender.LOGFILE = Log::Log4perl::Appender::File +log4perl.appender.LOGFILE.filename = ` + xloadLogFile + logConfContent = logConfContent + ` +log4perl.appender.LOGFILE.mode = append +log4perl.appender.LOGFILE.layout = PatternLayout +log4perl.appender.LOGFILE.layout.ConversionPattern = %d %-5p %c %F:%L - %m%n` + + if err := ioutil.WriteFile(log4perlConf, []byte(logConfContent), os.ModePerm); err != nil { + return fmt.Errorf("write %s failed, err:%w", log4perlConf, err) + } + + // do xload + backupPath := m.FilePath // fmt.Sprintf("%s/%s", m.TaskDir, m.FilePath) + script := fmt.Sprintf( + `/usr/bin/perl %s --backup-path=%s --defaults-file=%s --log-config-file=%s `, + m.Client, backupPath, m.MyCnfFile, log4perlConf, + ) + logger.Info("XLoadData: %s", script) + cmd := &osutil.FileOutputCmd{ + Cmd: exec.Cmd{ + Path: "/bin/bash", + Args: []string{"/bin/bash", "-c", script}, + }, + StdOutFile: xloadLogoutFile, + StdErrFile: xloadLogoutFile, + } + if err := cmd.Start(); err != nil { + return err + } + + done := make(chan error, 1) + + go func(cmd *osutil.FileOutputCmd) { + done <- cmd.Wait() + }(cmd) + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + + // 检查任务进程 + // 检查进程是否还在运行 +For: + for { + select { + case err := <-done: + if err != nil { + return errors.Wrapf(err, "xload导入数据失败, script:%s", script) + } + break For + case <-ticker.C: + _, _, _ = m.checkExistRunningXLoad() + } + } + + // 检查结果 + isSuccess, errorNum, err := m.checkXLoadComplete(xloadLogFile) + if err != nil { + return err + } + if !isSuccess { + return errors.Errorf("xload导入失败,error数量:%d, 错误详情:%s", errorNum, xloadLogFile) + } + + // chown mysqldata + command := fmt.Sprintf("cd %s;chown -R mysql mysqldata;", m.MysqlDataDir) + _, err = exec.Command("/bin/bash", "-c", command).CombinedOutput() + if err != nil { + return errors.Wrap(err, script) + } + + return nil +} + +// checkExistRunningXLoad 检查进程 +func (m *XLoadParam) checkExistRunningXLoad() (bool, []string, error) { + return true, nil, nil +} + +func (m *XLoadParam) checkXLoadComplete(errFile string) (bool, int, error) { + script := GrepError + errFile + ` |wc -l` + out, err := exec.Command("/bin/bash", "-c", script).CombinedOutput() + if err != nil { + return false, 0, errors.Wrap(err, script) + } + outStr := strings.TrimSpace(string(out)) + errorNum, err := strconv.Atoi(outStr) + if err != nil { + return false, 0, errors.Wrapf(err, "命令: %s 的结果转换失败.结果:%s", script, outStr) + } + if errorNum > 0 { + return false, errorNum, errors.Errorf("存在 %s 处错误. 命令:%s", outStr, script) + } + + script = `grep 'task is COMPLETE' ` + errFile + ` |wc -l` + out, err = exec.Command("/bin/bash", "-c", script).CombinedOutput() + if err != nil { + return false, 0, errors.Wrap(err, script) + } + outStr = strings.TrimSpace(string(out)) + isComplete, err := strconv.Atoi(outStr) + if err != nil { + return false, 0, errors.Wrapf(err, "命令: %s 的结果转换失败.结果:%s", script, outStr) + } + if isComplete < 1 { + return false, 0, errors.New("COMPLETE not found。返回:" + outStr) + } + + return true, 0, nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback.go new file mode 100644 index 0000000000..e197807409 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback.go @@ -0,0 +1,162 @@ +package rollback + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/common" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "path/filepath" + + "github.com/pkg/errors" +) + +// FlashbackComp TODO +type FlashbackComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params Flashback `json:"extend"` +} + +// Example TODO +func (c *FlashbackComp) Example() interface{} { + return FlashbackComp{ + Params: Flashback{ + TargetTime: "2022-11-11 00:00:01", + StopTime: "", + FlashbackBinlog: FlashbackBinlog{ + TgtInstance: common.InstanceObjExample, + WorkDir: "/data/dbbak", + BinlogDir: "", + ParseConcurrency: 2, + RecoverOpt: &RecoverOpt{ + Databases: []string{"db1", "db2"}, + Tables: []string{"tb1", "tb2"}, + }, + ToolSet: *tools.NewToolSetWithPickNoValidate(tools.ToolMysqlbinlogRollback), + }, + }, + GeneralParam: &components.GeneralParam{ + RuntimeAccountParam: components.RuntimeAccountParam{ + MySQLAccountParam: common.AccountAdminExample, + }, + }, + } +} + +// Init TODO +func (f *Flashback) Init() error { + toolset, err := tools.NewToolSetWithPick(tools.ToolMysqlbinlogRollback, tools.ToolMysqlclient) + if err != nil { + return err + } + if err = f.ToolSet.Merge(toolset); err != nil { + return err + } + // recover_binlog 用的是 mysqlbinlog, flashback用的是 mysqlbinlog_rollback + f.ToolSet.Set(tools.ToolMysqlbinlog, f.ToolSet.MustGet(tools.ToolMysqlbinlogRollback)) + f.recover = restore.RecoverBinlog{ + TgtInstance: f.TgtInstance, + WorkDir: f.WorkDir, + WorkID: f.WorkID, + ToolSet: f.ToolSet, + QuickMode: true, + SourceBinlogFormat: "ROW", // 这里只代表 flashback 要求 ROW 模式,源实例 binlog_format 在 PreCheck 里会判断 + ParseOnly: true, + ParseConcurrency: f.ParseConcurrency, + RecoverOpt: &restore.MySQLBinlogUtil{ + MySQLClientOpt: &restore.MySQLClientOpt{ + BinaryMode: true, + MaxAllowedPacket: 1073741824, + }, + QueryEventHandler: "error", + Flashback: true, // --flashback 模式 + NotWriteBinlog: false, + IdempotentMode: true, + StartTime: f.TargetTime, + Databases: f.RecoverOpt.Databases, + Tables: f.RecoverOpt.Tables, + DatabasesIgnore: f.RecoverOpt.DatabasesIgnore, + TablesIgnore: f.RecoverOpt.TablesIgnore, + }, + } + // 拼接 recover-binlog 参数 + if err := f.recover.Init(); err != nil { + return err + } + f.dbWorker = f.recover.GetDBWorker() + // 检查起止时间 + dbNowTime, err := f.dbWorker.SelectNow() + if err != nil { + return err + } + if f.StopTime == "" { + f.StopTime = dbNowTime + } else if f.StopTime > dbNowTime { + return errors.Errorf("StopTime [%s] cannot be greater than db current time [%s]", f.StopTime, dbNowTime) + } + f.recover.RecoverOpt.StopTime = f.StopTime + + return nil +} + +// downloadBinlogFiles 将文件软连接到 downloadDir +func (f *Flashback) downloadBinlogFiles() error { + f.binlogSaveDir = filepath.Join(f.recover.GetTaskDir(), "binlog") + if err := osutil.CheckAndMkdir("", f.binlogSaveDir); err != nil { + return err + } + for _, fn := range f.recover.BinlogFiles { + srcFile := filepath.Join(f.recover.BinlogDir, fn) + dstFile := filepath.Join(f.binlogSaveDir, fn) + if err := osutil.MakeSoftLink(srcFile, dstFile, true); err != nil { + return errors.Wrap(err, dstFile) + } + } + // 在后续 binlog-recover 环节,使用下载目录 + f.recover.BinlogDir = f.binlogSaveDir + return nil +} + +// PreCheck 检查版本、实例角色、 binlog 格式 +// 目前只考虑 binlog 在本地的 flashback,不从远端下载 +func (f *Flashback) PreCheck() error { + var err error + if err = f.checkVersionAndVars(); err != nil { + return err + } + if err = f.checkDBRole(); err != nil { + return err + } + if err = f.checkDBTableInUse(); err != nil { + return err + } + if f.BinlogDir == "" { // 没有指定 binlog 目录,目前是自动从 实例 binlog 目录本地找,并做软链 + if totalSize, err := f.getBinlogFiles(""); err != nil { + return err + } else { + // 暂定 2 倍 binlog 大小 + diskSizeNeedMB := (totalSize / 1024 / 1024) * 2 + logger.Info("parse binlog need disk size %d MB", diskSizeNeedMB) + } + if err = f.downloadBinlogFiles(); err != nil { + return err + } + } + + if err = f.recover.PreCheck(); err != nil { + return err + } + return nil +} + +// Start 检查版本、实例角色、 binlog 格式 +func (f *Flashback) Start() error { + if err := f.recover.Start(); err != nil { + return err + } + if err := f.recover.Import(); err != nil { + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_check.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_check.go new file mode 100644 index 0000000000..e96b9cab3b --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_check.go @@ -0,0 +1,223 @@ +package rollback + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/restore" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" + "io/ioutil" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-version" + "github.com/pkg/errors" +) + +// Flashback 库表闪回 +// 暂不从备份系统下载 binlog +type Flashback struct { + FlashbackBinlog + // 闪回的目标时间点,对应 recover-binlog 的 start_time, 精确到秒。目标实例的时区 + TargetTime string `json:"target_time" validate:"required"` + StopTime string `json:"stop_time"` + dbWorker *native.DbWorker + recover restore.RecoverBinlog +} + +// FlashbackBinlog TODO +type FlashbackBinlog struct { + TgtInstance native.InsObject `json:"tgt_instance" validate:"required"` + RecoverOpt *RecoverOpt `json:"recover_opt" validate:"required"` + // 当 binlog_dir 不为空,表示 binlog 已下载;当为空时,目前只从本地软连接 + BinlogDir string `json:"binlog_dir"` + // binlog列表,如果不提供,则自动从本地查找符合时间范围的 binlog + BinlogFiles []string `json:"binlog_files"` + // binlog 解析所在目录,存放运行日志 + WorkDir string `json:"work_dir" validate:"required"` + WorkID string `json:"work_id"` + // 解析binlog并发度 + ParseConcurrency int `json:"parse_concurrency"` + // 恢复用到的客户端工具,不提供时会有默认值 + tools.ToolSet + + // binlog 下载到哪个目录 + binlogSaveDir string +} + +// RecoverOpt TODO +type RecoverOpt struct { + // row event 解析指定 databases + Databases []string `json:"databases,omitempty"` + // row event 解析指定 tables + Tables []string `json:"tables,omitempty"` + // row event 解析指定 忽略 databases + DatabasesIgnore []string `json:"databases_ignore,omitempty"` + // row event 解析指定 忽略 tables + TablesIgnore []string `json:"tables_ignore,omitempty"` + // 暂不支持行级闪回 + FilterRows string `json:"filter_rows"` +} + +// getBinlogFiles 从本地实例查找并过滤 binlog +// fromDir 指定从哪个目录找 binlog +func (f *Flashback) getBinlogFiles(fromDir string) (int64, error) { + if fromDir != "" { + f.recover.BinlogDir = fromDir + } + if len(f.BinlogFiles) != 0 { + f.recover.BinlogFiles = f.BinlogFiles + } else { + if binlogDir, binlogFiles, err := f.getBinlogFilesLocal(); err != nil { + return 0, err + } else { + f.recover.BinlogDir = binlogDir // 实例真实 binlog dir + f.recover.BinlogFiles = binlogFiles + } + } + // flush logs 滚动 binlog + if _, err := f.dbWorker.ExecWithTimeout(5*time.Second, "FLUSH LOGS"); err != nil { + return 0, err + } + return f.recover.FilterBinlogFiles() +} + +// getBinlogFilesLocal 返回 binlog_dirs 下所有的 binlog 文件名 +// 在 WillRecoverBinlog.FilterBinlogFiles 里会掐头去尾 +func (f *Flashback) getBinlogFilesLocal() (string, []string, error) { + // 临时关闭 binlog 删除 + binlogDir, namePrefix, err := f.dbWorker.GetBinlogDir(f.TgtInstance.Port) + if err != nil { + return "", nil, err + } else { + logger.Info("binlogDir=%s namePrefix=%s", binlogDir, namePrefix) + } + files, err := ioutil.ReadDir(binlogDir) // 已经按文件名排序 + if err != nil { + return "", nil, errors.Wrap(err, "read binlog dir") + } + + var binlogFiles []string + reFilename := regexp.MustCompile(cst.ReBinlogFilename) + for _, fi := range files { + if !reFilename.MatchString(fi.Name()) { + if !strings.HasSuffix(fi.Name(), ".index") { + logger.Warn("illegal binlog file name %s", fi.Name()) + } + continue + } else { + binlogFiles = append(binlogFiles, fi.Name()) + } + } + return binlogDir, binlogFiles, nil +} + +func (f *Flashback) checkVersionAndVars() error { + // binlog_format + rowReg := regexp.MustCompile(`(?i)row`) + fullReg := regexp.MustCompile(`(?i)full`) + if val, err := f.dbWorker.GetSingleGlobalVar("binlog_format"); err != nil { + return err + } else if rowReg.MatchString(val) == false { + return errors.Errorf("binlog_format=%s should be ROW", val) + } + // binlog_row_image + flashbackAtLeastVer, _ := version.NewVersion("5.5.24") + flashbackVer80, _ := version.NewVersion("8.0.0") + fullrowAtLeastVer, _ := version.NewVersion("5.6.24") // 该版本之后才有 binlog_row_image + if val, err := f.dbWorker.SelectVersion(); err != nil { + return err + } else { + curInstVersion, err := version.NewVersion(val) + if err != nil { + return errors.Wrapf(err, "invalid version %s", val) + } + if curInstVersion.GreaterThanOrEqual(flashbackVer80) { // 8.0以上要用自己的 mysqlbinlog 版本 + f.ToolSet.Set(tools.ToolMysqlbinlog, f.ToolSet.MustGet(tools.ToolMysqlbinlogRollback80)) + f.recover.ToolSet.Set(tools.ToolMysqlbinlog, f.ToolSet.MustGet(tools.ToolMysqlbinlogRollback80)) + } + if curInstVersion.LessThan(flashbackAtLeastVer) || curInstVersion.GreaterThan(flashbackVer80) { + return errors.Errorf("mysql version %s does not support flashback", curInstVersion) + } else if curInstVersion.GreaterThan(fullrowAtLeastVer) { + if val, err := f.dbWorker.GetSingleGlobalVar("binlog_row_image"); err != nil { + return err + } else if fullReg.MatchString(val) == false { + return errors.Errorf("binlog_row_image=%s should be FULL", val) + } + } + } + return nil +} + +func (f *Flashback) checkDBTableInUse() error { + // 检查库是否存在 + var dbTables []native.TableSchema + if len(f.RecoverOpt.Databases) > 0 && len(f.RecoverOpt.Tables) == 0 { + for _, db := range f.RecoverOpt.Databases { + if dbs, err := f.dbWorker.SelectDatabases(db); err != nil { + return err + } else if len(dbs) == 0 { + return errors.Errorf("no databases found for %s", db) + } + } + } else if len(f.RecoverOpt.Databases) > 0 && len(f.RecoverOpt.Tables) > 0 { + if dbTablesMap, err := f.dbWorker.SelectTables(f.RecoverOpt.Databases, f.RecoverOpt.Tables); err != nil { + return err + } else if len(dbTablesMap) == 0 { + return errors.Errorf("no tables found for %v . %v", f.RecoverOpt.Databases, f.RecoverOpt.Tables) + } else { + for _, dbtb := range dbTablesMap { + dbTables = append(dbTables, dbtb) + } + } + } + // 检查表是否在使用 + var errList []error + if openTables, err := f.dbWorker.ShowOpenTables(6 * time.Second); err != nil { + return err + } else { + openTablesList := []string{} + for _, dbt := range openTables { + openTablesList = append(openTablesList, fmt.Sprintf("%s.%s", dbt.Database, dbt.Table)) + } + logger.Info("tables opened %v", openTablesList) + logger.Info("tables to flashback %+v", dbTables) + for _, dbt := range dbTables { + if util.StringsHas(openTablesList, dbt.DBTableStr) { + errList = append(errList, errors.Errorf("table is opened %s", dbt.DBTableStr)) + } + } + if len(errList) > 0 { + return util.SliceErrorsToError(errList) + } + } + return nil +} + +func (f *Flashback) checkTableColumnExists() error { + return nil +} + +func (f *Flashback) checkInstanceSkipped() error { + return nil +} + +func (f *Flashback) checkDBRole() error { + // 从备份/监控配置里面获取 db_role + // 从 show slave status 里面判断角色 + if slaveStatus, err := f.dbWorker.ShowSlaveStatus(); err != nil { + return err + } else { + if slaveStatus.MasterHost != "" { + return errors.New("target_instance should not be a slave") + } + } + return nil +} + +func (f *Flashback) checkDiskSpace() error { + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_download.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_download.go new file mode 100644 index 0000000000..7fdf071070 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_download.go @@ -0,0 +1 @@ +package rollback diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_import.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_import.go new file mode 100644 index 0000000000..441b051ad0 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_import.go @@ -0,0 +1,6 @@ +package rollback + +// ImportBinlog TODO +func (f *Flashback) ImportBinlog() error { + return f.recover.Import() +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_rows.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_rows.go new file mode 100644 index 0000000000..7fdf071070 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/flashback_rows.go @@ -0,0 +1 @@ +package rollback diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/rollback.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/rollback.go new file mode 100644 index 0000000000..2b8c64f923 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/rollback/rollback.go @@ -0,0 +1,2 @@ +// Package rollback TODO +package rollback diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_check_run.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_check_run.go new file mode 100644 index 0000000000..fe7e54ba58 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_check_run.go @@ -0,0 +1,453 @@ +package mysql + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/computil" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "os" + "os/exec" + "path" + "regexp" + "sync" + + ants "github.com/panjf2000/ants/v2" +) + +/* + Prepare: + 1、下发备份表结构命名到目标实例,完成备份表结构 + 2、将备份的表结构文件&待语义检查的SQL文件下发到语义检查的机器上 + 3、选择一个于目标实例版本一致的语义检查的实例,锁定 + ------------------------------------------------------------- + Doing: + 4、将处理后的表结构导入到临时实例上 + 5、导入待检查的SQL文件 + 6、分析语义检查的结果 + ------------------------------------------------------------- + Ending: + 7、Clean 语义检查临时实例,并释放 +*/ + +// SemanticCheckComp TODO +type SemanticCheckComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params SenmanticCheckParam `json:"extend"` + SenmanticCheckRunTimeCtx `json:"-"` +} + +// SenmanticCheckParam TODO +type SenmanticCheckParam struct { + Host string `json:"host" validate:"required,ip"` // 语义检查实例的主机地址 + Port int `json:"port" validate:"required,lt=65536,gte=3306"` // 语义检查实例的端口 + SchemaFile string `json:"schemafile" validate:"required"` // 表结构文件 + ExcuteObjects []ExcuteSQLFileObj `json:"execute_objects"` + // 用于获取目标实例的字符集,默认存储引擎 + RemoteHost string `json:"remote_host" validate:"required,ip"` // 获取表结构的源实例IP + RemotePort int `json:"remote_port" validate:"required,lt=65536,gte=3306"` // 获取表结构的源实例Port +} + +// SenmanticCheckRunTimeCtx TODO +type SenmanticCheckRunTimeCtx struct { + dbConn *native.DbWorker + adminUser string + adminPwd string + socket string + version string + remoteDefaultEngineIsTokudb bool + remoteVersion string + remoteCharset string + afterdealSchemafile string // schema sqlfile 处理之后的文件 + taskdir string + schemafilename string +} + +// Precheck 前置检查 +// +// @receiver c +// @return err +func (c *SemanticCheckComp) Precheck() (err error) { + if !osutil.FileExist(c.Params.SchemaFile) { + return fmt.Errorf("%s文件不存在", c.Params.SchemaFile) + } + for _, o := range c.Params.ExcuteObjects { + if !osutil.FileExist(path.Join(cst.BK_PKG_INSTALL_PATH, o.SQLFile)) { + return fmt.Errorf("%s文件不存在", o.SQLFile) + } + } + return nil +} + +// Example TODO +func (c *SemanticCheckComp) Example() interface{} { + comp := SemanticCheckComp{ + Params: SenmanticCheckParam{ + Host: "1.1.1.1", + Port: 3306, + SchemaFile: "db_schema.sql", + ExcuteObjects: []ExcuteSQLFileObj{ + { + SQLFile: "test1.sql", + IgnoreDbNames: []string{"db9"}, + DbNames: []string{"db_100*", "db8"}, + }, + { + SQLFile: "test2.sql", + IgnoreDbNames: []string{"db90"}, + DbNames: []string{"db_200*", "db7"}, + }, + }, + RemoteHost: "2.2.2.2", + RemotePort: 3306, + }, + } + return comp +} + +// Init TODO +// +// @receiver c +// @receiver uid +// @return err +func (c *SemanticCheckComp) Init(uid string) (err error) { + c.taskdir = path.Join(cst.BK_PKG_INSTALL_PATH, fmt.Sprintf("semantic_check_%s", uid)) + if err = os.MkdirAll(c.taskdir, os.ModePerm); err != nil { + logger.Error("初始化任务目录失败%s:%s", c.taskdir, err.Error()) + return + } + + c.schemafilename = path.Base(c.Params.SchemaFile) + // 将表结构文件移动到target dir + if err = os.Rename(c.Params.SchemaFile, path.Join(c.taskdir, c.schemafilename)); err != nil { + logger.Error("将表结构文件移动到%s 错误:%s", c.taskdir, err.Error()) + return + } + if err = c.initLocalRuntimeCtxParam(); err != nil { + return + } + return c.initRemoteRuntimeCtxParam() +} + +// initRemoteRuntimeCtxParam TODO +// +// initRuntimeCtxParam 初始化运行时参数 +// @receiver c +// @return err +func (c *SemanticCheckComp) initRemoteRuntimeCtxParam() (err error) { + remotedbConn, err := native.InsObject{ + Host: c.Params.RemoteHost, + Port: c.Params.RemotePort, + User: c.GeneralParam.RuntimeAccountParam.MonitorAccessAllUser, + Pwd: c.GeneralParam.RuntimeAccountParam.MonitorAccessAllPwd, + }.Conn() + if err != nil { + logger.Error("Connect %s:%d failed:%s", c.Params.RemoteHost, c.Params.Port, err.Error()) + return err + } + defer func() { + if remotedbConn.Db != nil { + remotedbConn.Db.Close() + } + }() + if c.remoteCharset, err = remotedbConn.ShowServerCharset(); err != nil { + logger.Error("获取源实例的字符集失败:%s", err.Error()) + return err + } + if c.remoteVersion, err = remotedbConn.SelectVersion(); err != nil { + logger.Error("获取源实例的Version:%s", err.Error()) + return err + } + if c.remoteDefaultEngineIsTokudb, err = remotedbConn.IsSupportTokudb(); err != nil { + logger.Error("判断源实例是否支持:%s", err.Error()) + return err + } + return err +} + +func (c *SemanticCheckComp) initLocalRuntimeCtxParam() (err error) { + c.adminUser = c.GeneralParam.RuntimeAccountParam.AdminUser + c.adminPwd = c.GeneralParam.RuntimeAccountParam.AdminPwd + c.dbConn, err = native.InsObject{ + Host: c.Params.Host, + Port: c.Params.Port, + User: c.adminUser, + Pwd: c.adminPwd, + }.Conn() + if err != nil { + logger.Error("Connect %d failed:%s", c.Params.Port, err.Error()) + return err + } + c.socket, err = c.dbConn.ShowSocket() + if err != nil { + logger.Warn("获取本实例socket val 失败") + } + c.version, err = c.dbConn.SelectVersion() + if err != nil { + logger.Error("获取本实例Version失败:%s", err.Error()) + return err + } + return nil +} + +// dealWithSchemaFile 导入前处理导入文件 +// +// @receiver c +// @return err +func (c *SemanticCheckComp) dealWithSchemaFile() (err error) { + script := fmt.Sprintf("cat %s ", path.Join(c.taskdir, c.schemafilename)) + if c.remoteDefaultEngineIsTokudb { + script += ` | sed -e 's/ROW_FORMAT=TOKUDB_ZLIB/ROW_FORMAT=default/'` + } + // 将没有包含"CREATE TABLE IF NOT EXISTS"的行做替换(直接替换会导致替换结果出现两次IF NOT EXISTS) + script += ` | sed '/CREATE TABLE IF NOT EXISTS /! s/^CREATE TABLE /CREATE TABLE IF NOT EXISTS /'` + engine := "INNODB" + if c.remoteDefaultEngineIsTokudb { + engine = "MYISAM" + } + switch { + case regexp.MustCompile(`tspider-3`).MatchString(c.remoteVersion): + script += ` | sed -e 's/99106 ROW_FORMAT=GCS_DYNAMIC/99104 ROW_FORMAT=DYNAMIC/i'` + script += ` | sed -e 's/99104 COMPRESSED/99999 COMPRESSED/i'` + script += ` | sed -e 's/ROW_FORMAT=FIXED//i'` + script += fmt.Sprintf(" | sed -e 's/ENGINE=SPIDER /ENGINE=%s ROW_FORMAT=DYNAMIC /i'", engine) + script += " | sed '/^ PARTITION `pt/d' " + script += fmt.Sprintf(" | sed 's/ENGINE = SPIDER,$/ENGINE = %s) ;/g'", engine) + script += ` | sed 's/MOD [0-9]*)$/MOD 1)/g'` + case regexp.MustCompile(`spider`).MatchString(c.remoteVersion): + script += ` | sed -e 's/99106 ROW_FORMAT=GCS_DYNAMIC/99104 ROW_FORMAT=DYNAMIC/i'` + script += ` | sed -e 's/99104 COMPRESSED/99999 COMPRESSED/i'` + script += ` | sed -e 's/ROW_FORMAT=FIXED//i'` + script += fmt.Sprintf(" | sed -e 's/ENGINE=SPIDER /ENGINE=%s ROW_FORMAT=DYNAMIC /i'", engine) + script += ` | sed '/^ PARTITION pt/d'` + script += "| sed '/^ PARTITION `pt/d'" + script += fmt.Sprintf(" | sed 's/ENGINE = SPIDER,$/ENGINE = %s) \\\\*\\\\/;/g'", engine) + script += `| sed 's/%[0-9]*)$/\%1)/g'` + default: + script += " | sed -e 's/99106 ROW_FORMAT=GCS_DYNAMIC/99104 ROW_FORMAT=DYNAMIC/i'" + script += " | sed -e 's/99104 COMPRESSED/99999 COMPRESSED/i'" + script += " | sed -e 's/ROW_FORMAT=FIXED//i'" + script += " | sed -e 's/ENGINE=SPIDER DEFAULT CHARSET=/ENGINE=INNODB ROW_FORMAT=DYNAMIC DEFAULT CHARSET=/i'" + } + logger.Info("导入前预处理命令:%s", script) + stdOutFileName := path.Join(c.taskdir, c.schemafilename+".new") + stdOutFile, err := os.OpenFile(stdOutFileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return fmt.Errorf("open file %s failed, err:%w", stdOutFileName, err) + } + defer func() { + if err := stdOutFile.Close(); err != nil { + logger.Warn("close file %s failed, err:%s", stdOutFileName, err.Error()) + } + }() + + stdErrFileName := path.Join(c.taskdir, c.schemafilename+".new.err") + stdErrFile, err := os.OpenFile(stdErrFileName, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) + if err != nil { + return fmt.Errorf("open %s failed, err:%+v", stdErrFileName, err) + } + defer func() { + if err := stdErrFile.Close(); err != nil { + logger.Warn("close file %s failed, err:%s", stdErrFileName, err.Error()) + } + }() + logger.Info( + "预处理导出的表结构[doing]: 源文件(%s) ==> 预处理后文件(%s), 错误输出(%s)", + c.Params.SchemaFile, + stdOutFileName, + stdErrFileName, + ) + cmd := exec.Command("/bin/bash", "-c", script) + cmd.Stdout = stdOutFile + cmd.Stderr = stdErrFile + if err := cmd.Run(); err != nil { + logger.Error("运行预处理失败%s", err.Error()) + return err + } + logger.Info( + "预处理导出的表结构[doing]: 源文件(%s) ==> 预处理后文件(%s), 错误输出(%s)", + c.Params.SchemaFile, + stdOutFileName, + stdErrFileName, + ) + c.afterdealSchemafile = stdOutFileName + return nil +} + +// LoadSchema 导入远程表结构 +// +// @receiver c +// @return err +func (c *SemanticCheckComp) LoadSchema() (err error) { + if err = c.dealWithSchemaFile(); err != nil { + logger.Error("预处理导入文件失败:%s", err.Error()) + return err + } + return mysqlutil.ExecuteSqlAtLocal{ + Host: c.Params.Host, + Port: c.Params.Port, + Charset: c.remoteCharset, + Socket: c.socket, + User: c.adminUser, + Password: c.adminPwd, + }.ExcuteSqlByMySQLClient(c.afterdealSchemafile, []string{native.TEST_DB}) +} + +// Run 运行语义检查 +// +// @receiver c +// @return err +func (c *SemanticCheckComp) Run() (err error) { + if err = c.LoadSchema(); err != nil { + logger.Error("导入目标实例%s:%d表结构失败", c.Params.RemoteHost, c.Params.RemotePort) + return err + } + e := ExcuteSQLFileComp{ + GeneralParam: c.GeneralParam, + Params: &ExcuteSQLFileParam{ + Host: c.Params.Host, + Ports: []int{c.Params.Port}, + CharSet: c.remoteCharset, + ExcuteObjects: c.Params.ExcuteObjects, + Force: false, + }, + ExcuteSQLFileRunTimeCtx: ExcuteSQLFileRunTimeCtx{}, + } + if err = e.Init(); err != nil { + return err + } + if err = e.Excute(); err != nil { + return err + } + return nil +} + +// Clean 清理并重启语义实例 +// +// @receiver c +// @return err +func (c *SemanticCheckComp) Clean() (err error) { + logger.Info("开始清除语义检查的实例...") + if err = c.initLocalRuntimeCtxParam(); err != nil { + return err + } + if err = c.cleandata(); err != nil { + logger.Warn("清理语义实例失败:%s", err.Error()) + return + } + if err = c.restart(); err != nil { + logger.Error("重启语义实例失败:%s", err.Error()) + return + } + logger.Info("清理语义检查实例成功~") + return +} + +// restart TODO +// shutdown 重启语义检查实例 +// +// @receiver c +// @return err +func (c *SemanticCheckComp) restart() (err error) { + err = computil.ShutdownMySQLParam{ + MySQLUser: c.adminUser, + MySQLPwd: c.adminPwd, + Socket: c.socket, + }.ShutdownMySQLBySocket() + if err != nil { + logger.Error("关闭实例失败:%s", err.Error()) + return err + } + p := &computil.StartMySQLParam{ + MyCnfName: util.GetMyCnfFileName(c.Params.Port), + MySQLUser: c.adminUser, + MySQLPwd: c.adminPwd, + Socket: c.socket, + } + _, err = p.StartMysqlInstance() + if err != nil { + logger.Error("启动实例失败:%s", err.Error()) + return err + } + return +} + +// cleandata TODO +// DropSenmanticInstanceDatabases 清楚语义实例的数据库 +// +// @receiver c +// @return err +func (c *SemanticCheckComp) cleandata() (err error) { + alldbs, err := c.dbConn.ShowDatabases() + if err != nil { + logger.Error("清理实例:获取语义检查实例本地实例失败%s", err.Error()) + return err + } + testTbls, err := c.dbConn.ShowTables(native.TEST_DB) + if err != nil { + logger.Error("清理实例:获取test库内的表失败:%s", err.Error()) + return err + } + dbInfobaseTbls, err := c.dbConn.ShowTables(native.INFODBA_SCHEMA) + if err != nil { + logger.Error("清理实例:获取 infodba_schema 库内的表失败:%s", err.Error()) + return err + } + + var wg sync.WaitGroup + errChan := make(chan error, 1) + pool, _ := ants.NewPool(100) + defer pool.Release() + + dropdbs := util.FilterOutStringSlice(alldbs, computil.GetGcsSystemDatabases(c.version)) + logger.Info("will drop databases is:%v", dropdbs) + f := func(db string) func() { + return func() { + _, err := c.dbConn.Exec(fmt.Sprintf("drop database `%s`;", db)) + if err != nil { + errChan <- fmt.Errorf("drop database %s,err:%w", db, err) + } + wg.Done() + } + } + for _, db := range dropdbs { + wg.Add(1) + pool.Submit(f(db)) + } + type db = string + type tables = []string + var specialDbTbls map[db]tables + specialDbTbls = make(map[string][]string) + specialDbTbls[native.INFODBA_SCHEMA] = util.FilterOutStringSlice(testTbls, []string{"conn_log", "free_space"}) + specialDbTbls[native.INFODBA_SCHEMA] = util.FilterOutStringSlice( + dbInfobaseTbls, + []string{"QUERY_RESPONSE_TIME", "check_heartbeat", "checksum", "master_slave_check", "spes_status"}, + ) + ff := func(db, tbl string) func() { + return func() { + _, err := c.dbConn.Exec(fmt.Sprintf("drop table `%s`.`%s`;", db, tbl)) + if err != nil { + errChan <- fmt.Errorf("drop table `%s`.`%s`,err:%w", db, tbl, err) + } + wg.Done() + } + } + for db, tbls := range specialDbTbls { + for _, tbl := range tbls { + wg.Add(1) + pool.Submit(ff(db, tbl)) + } + } + wg.Wait() + select { + case err := <-errChan: + logger.Error("drop db failed: %s", err.Error()) + return err + default: + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_dump_schema.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_dump_schema.go new file mode 100644 index 0000000000..d60dad7ab5 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/semantic_dump_schema.go @@ -0,0 +1,221 @@ +package mysql + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/computil" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "encoding/json" + "fmt" + "net/url" + "path" + "reflect" + "regexp" +) + +// SemanticDumpSchemaComp TODO +type SemanticDumpSchemaComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params DumpSchemaParam `json:"extend"` + DumpSchemaRunTimeCtx `json:"-"` +} + +// DumpSchemaParam TODO +type DumpSchemaParam struct { + Host string `json:"host" validate:"required,ip"` // 当前实例的主机地址 + Port int `json:"port" validate:"required,lt=65536,gte=3306"` // 当前实例的端口 + CharSet string `json:"charset" validate:"required,checkCharset"` // 字符集参数 + BackupFileName string `json:"backup_file_name"` + BackupDir string `json:"backup_dir"` + BkCloudId int `json:"bk_cloud_id"` // 所在的云区域 + DBCloudToken string `json:"db_cloud_token"` // 云区域token + FileServer FileServer `json:"fileserver"` +} + +// FileServer TODO +type FileServer struct { + URL string `json:"url"` // 制品库地址 + Bucket string `json:"bucket"` // 目标bucket + Password string `json:"password"` // 制品库 password + Username string `json:"username"` // 制品库 username + Project string `json:"project"` // 制品库 project + UploadPath string `json:"upload_path"` // 上传路径 +} + +// DumpSchemaRunTimeCtx TODO +type DumpSchemaRunTimeCtx struct { + dbs []string // 需要备份的表结构的数据库名称集合 + charset string // 当前实例的字符集 + dumpCmd string +} + +// Example godoc +func (c *SemanticDumpSchemaComp) Example() interface{} { + comp := SemanticDumpSchemaComp{ + Params: DumpSchemaParam{ + Host: "1.1.1.1", + Port: 3306, + CharSet: "default", + BackupFileName: "xx_schema.sql", + BackupDir: "/data/path1/path2", + }, + } + return comp +} + +// Init init +// +// @receiver c +// @return err +func (c *SemanticDumpSchemaComp) Init() (err error) { + conn, err := native.InsObject{ + Host: c.Params.Host, + Port: c.Params.Port, + User: c.GeneralParam.RuntimeAccountParam.AdminUser, + Pwd: c.GeneralParam.RuntimeAccountParam.AdminPwd, + }.Conn() + if err != nil { + logger.Error("Connect %d failed:%s", c.Params.Port, err.Error()) + return err + } + alldbs, err := conn.ShowDatabases() + if err != nil { + logger.Error("show all databases failed:%s", err.Error()) + return err + } + + version, err := conn.SelectVersion() + if err != nil { + logger.Error("获取version failed %s", err.Error()) + return err + } + finaldbs := []string{} + reg := regexp.MustCompile(`^bak_cbs`) + for _, db := range util.FilterOutStringSlice(alldbs, computil.GetGcsSystemDatabasesIgnoreTest(version)) { + if reg.MatchString(db) { + continue + } + finaldbs = append(finaldbs, db) + } + if len(finaldbs) == 0 { + return fmt.Errorf("变更实例排除系统库后,再也没有可以变更的库") + } + c.dbs = finaldbs + c.charset = c.Params.CharSet + if c.Params.CharSet == "default" { + if c.charset, err = conn.ShowServerCharset(); err != nil { + logger.Error("获取实例的字符集失败:%s", err.Error()) + return err + } + } + return err +} + +// Precheck 预检查 +// +// @receiver c +// @return err +func (c *SemanticDumpSchemaComp) Precheck() (err error) { + // 如果目录不存应该主动创建吗? + // 需要判断目录大小么? + c.dumpCmd = path.Join(cst.MysqldInstallPath, "bin", "mysqldump") + if !osutil.FileExist(c.dumpCmd) { + return fmt.Errorf("dumpCmd:%s文件不存在", c.dumpCmd) + } + if !osutil.FileExist(c.Params.BackupDir) { + return fmt.Errorf("backupdir: %s不存在", c.Params.BackupDir) + } + return +} + +// DumpSchema 运行备份表结构 +// +// @receiver c +// @return err +func (c *SemanticDumpSchemaComp) DumpSchema() (err error) { + var dumper mysqlutil.Dumper + dumper = &mysqlutil.MySQLDumperTogether{ + MySQLDumper: mysqlutil.MySQLDumper{ + DumpDir: c.Params.BackupDir, + Ip: c.Params.Host, + Port: c.Params.Port, + DbBackupUser: c.GeneralParam.RuntimeAccountParam.AdminUser, + DbBackupPwd: c.GeneralParam.RuntimeAccountParam.AdminPwd, + DbNames: c.dbs, + DumpCmdFile: c.dumpCmd, + Charset: c.charset, + MySQLDumpOption: mysqlutil.MySQLDumpOption{ + NoData: true, + AddDropTable: true, + NeedUseDb: true, + DumpRoutine: true, + DumpTrigger: false, + }, + }, + OutputfileName: c.Params.BackupFileName, + } + if err := dumper.Dump(); err != nil { + logger.Error("dump failed: ", err.Error()) + return err + } + return nil +} + +// Upload TODO +func (c *SemanticDumpSchemaComp) Upload() (err error) { + if reflect.DeepEqual(c.Params.FileServer, FileServer{}) { + logger.Info("无参数,无需上传~") + return nil + } + schemafile := path.Join(c.Params.BackupDir, c.Params.BackupFileName) + r := path.Join("generic", c.Params.FileServer.Project, c.Params.FileServer.Bucket, c.Params.FileServer.UploadPath) + uploadUrl, err := url.JoinPath(c.Params.FileServer.URL, r, "/") + if err != nil { + logger.Error("call url joinPath failed %s ", err.Error()) + return err + } + if c.Params.BkCloudId == 0 { + uploadUrl, err = url.JoinPath( + c.Params.FileServer.URL, path.Join( + "/generic", c.Params.FileServer.Project, + c.Params.FileServer.Bucket, c.Params.FileServer.UploadPath, c.Params.BackupFileName, + ), + ) + if err != nil { + logger.Error("call url joinPath failed %s ", err.Error()) + return err + } + } + logger.Info("bk_cloud_id:%d,upload url:%s", c.Params.BkCloudId, uploadUrl) + resp, err := bkrepo.UploadFile( + schemafile, uploadUrl, c.Params.FileServer.Username, c.Params.FileServer.Password, + c.Params.BkCloudId, c.Params.DBCloudToken, + ) + if err != nil { + logger.Error("upload sqlfile error %s", err.Error()) + return err + } + if resp.Code != 0 { + errMsg := fmt.Sprintf( + "upload respone code is %d,respone msg:%s,traceId:%s", + resp.Code, + resp.Message, + resp.RequestId, + ) + logger.Error(errMsg) + return fmt.Errorf(errMsg) + } + logger.Info("Resp: code:%d,msg:%s,traceid:%s", resp.Code, resp.Message, resp.RequestId) + var uploadRespdata bkrepo.UploadRespData + if err := json.Unmarshal(resp.Data, &uploadRespdata); err != nil { + logger.Error("unmarshal upload respone data failed %s", err.Error()) + return err + } + logger.Info("%v", uploadRespdata) + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/uninstall_mysql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/uninstall_mysql.go new file mode 100644 index 0000000000..599b8b951c --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/uninstall_mysql.go @@ -0,0 +1,299 @@ +// 下架MySQL实例 +// 因为下架需要操作数据目录和日志目录 +// 这个两个参数是从my.cnf里面读取的 +// ** 一定要存在my.cnf ** 否则无法下架,如果my.cnf 丢失可以伪造一个my.cnf + +package mysql + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/computil" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "path" + "strconv" + "strings" + "time" + + "github.com/mitchellh/go-ps" + "github.com/shirou/gopsutil/v3/process" +) + +// UnInstallMySQLComp 卸载mysql +type UnInstallMySQLComp struct { + GeneralParam *components.GeneralParam + Params *UnInstallMySQLParam + runTimeCtx + tools *tools.ToolSet +} + +// UnInstallMySQLParam 参数 +type UnInstallMySQLParam struct { + Host string `json:"host" validate:"required,ip" ` + Force bool `json:"force"` // 是否强制下架mysqld + Ports []int `json:"ports" validate:"required,gt=0,dive"` // 被监控机器的上所有需要监控的端口 + +} + +// 运行是需要的必须参数,可以提前计算 +type runTimeCtx struct { + adminUser string + adminPwd string + insMyObj map[Port]*MyCnfObj +} + +// MyCnfObj 配置 +type MyCnfObj struct { + MyCnfPath string + Datadir string + LogDir string + Socket string + IsShutdown bool // 标记卸载的实例是否已经是关闭/不能访问的状态 +} + +// Init 初始化 UnInstallMySQLRunTimeContext +// check my.cnf 配置,并加载配置 +func (u *UnInstallMySQLComp) Init() (err error) { + u.insMyObj = make(map[int]*MyCnfObj) + for _, port := range u.Params.Ports { + var datadir, logdir, socket string + myfile := util.GetMyCnfFileName(port) + if !cmutil.FileExists(myfile) { + return fmt.Errorf("%s不存在", myfile) + } + f, err := util.LoadMyCnfForFile(myfile) + if err != nil { + logger.Error("加载%s配置失败%s", myfile, err) + return err + } + if datadir, err = f.GetMySQLDataDir(); err != nil { + return err + } + if logdir, err = f.GetMySQLLogDir(); err != nil { + return err + } + if socket, err = f.GetMySQLSocket(); err != nil { + return err + } + u.insMyObj[port] = &MyCnfObj{ + MyCnfPath: myfile, + Datadir: datadir, + LogDir: logdir, + Socket: socket, + IsShutdown: false, // 初始化给个默认值,后续判断实例是否正常才变更 + } + } + u.runTimeCtx.adminPwd = u.GeneralParam.RuntimeAccountParam.AdminPwd + u.runTimeCtx.adminUser = u.GeneralParam.RuntimeAccountParam.AdminUser + u.tools = tools.NewToolSetWithPickNoValidate(tools.ToolMysqlTableChecksum) + return nil +} + +// PreCheck 非强制下架时候需要做一些安全卸载检查 +// +// @receiver u +// @return err +func (u *UnInstallMySQLComp) PreCheck() (err error) { + for _, port := range u.Params.Ports { + inst := native.InsObject{ + User: u.adminUser, + Pwd: u.adminPwd, + Socket: u.runTimeCtx.insMyObj[port].Socket, + } + if _, err := inst.ConnBySocket(); err != nil { + logger.Warn("try connent this mysql instance [%p] failed:%s", port, err.Error()) + u.insMyObj[port].IsShutdown = true + } + if !u.Params.Force && !u.insMyObj[port].IsShutdown { + // 非强制下架,且实例正常的情况下,需要判断实例是否有业务连接, + // todo 这里重新去创建连接,如果检测实例状态和连接业务访问之间出现实例异常,则会触发bug,后续考虑怎么优化这点 + if err := inst.CheckInstanceConnIdle(u.GeneralParam.RuntimeExtend.MySQLSysUsers, time.Second*1); err != nil { + logger.Warn("try connent this mysql instance [%p] failed:%s", port, err.Error()) + u.insMyObj[port].IsShutdown = true + } + } + continue + } + return nil +} + +// ShutDownMySQLD 关闭mysqld +func (u *UnInstallMySQLComp) ShutDownMySQLD() (err error) { + for _, port := range u.Params.Ports { + if u.Params.Force || u.insMyObj[port].IsShutdown { + // 这里如果传入强制卸载,或者之前判断实例已经异常,则走强制关闭逻辑,否则走正常卸载过程 + err = computil.ShutdownMySQLParam{ + MySQLUser: u.adminUser, + MySQLPwd: u.adminPwd, + Socket: u.insMyObj[port].Socket, + }.ForceShutDownMySQL() + if err != nil { + logger.Error("shutdown mysql instance %p failed:%s", port, err.Error()) + return err + } + } else { + // 走正常mysql关闭命令流程 + err = computil.ShutdownMySQLParam{ + MySQLUser: u.adminUser, + MySQLPwd: u.adminPwd, + Socket: u.insMyObj[port].Socket, + }.ShutdownMySQLBySocket() + if err != nil { + logger.Error("shutdown mysql instance %p failed:%s", port, err.Error()) + return err + } + } + } + return err +} + +// ClearMachine 清理目录 +// +// @receiver u +// @return err +// todo 删除备份程序配置, 删除数据校验程序的 +func (u *UnInstallMySQLComp) ClearMachine() (err error) { + for _, port := range u.Params.Ports { + var ( + dataLog = path.Join( + cst.DefaultMysqlLogRootPath, + cst.DefaultMysqlLogBasePath, + strconv.Itoa(port), + ) // "/data/mysqllog/{port}" + data1Log = path.Join( + cst.AlterNativeMysqlLogRootPath, + cst.DefaultMysqlLogBasePath, + strconv.Itoa(port), + ) // "/data1/mysqllog/{port}" + dataPath = path.Join( + cst.AlterNativeMysqlDataRootPath, + cst.DefaultMysqlDataBasePath, + strconv.Itoa(port), + ) // "/data/mysqldata/{port}" + data1Path = path.Join( + cst.DefaultMysqlDataRootPath, + cst.DefaultMysqlDataBasePath, + strconv.Itoa(port), + ) // "/data1/mysqldata/{port}" + data1Bak = path.Join( + cst.DefaultMysqlDataRootPath, + cst.DefaultBackupBasePath, + ) // "/data1/dbbak/" + dataBak = path.Join( + cst.AlterNativeMysqlDataRootPath, + cst.DefaultBackupBasePath, + ) // "/data/dbbak/" + suffix = fmt.Sprintf("_bak_%s", time.Now().Format(cst.TIMELAYOUTSEQ)) + dataLogBak = path.Join( + cst.DefaultMysqlLogRootPath, + fmt.Sprintf("%s_%d%s", cst.DefaultMysqlLogBasePath, port, suffix), + ) // "/data/mysqllog_{port}_bak__xxxx" + data1LogBak = path.Join( + cst.AlterNativeMysqlLogRootPath, + fmt.Sprintf("%s_%d%s", cst.DefaultMysqlLogBasePath, port, suffix), + ) // "/data/mysqllog_{port}_bak__xxxx" + + ) + + if cmutil.FileExists(dataLog) { + cmd := fmt.Sprintf("mv %s %s;", dataLog, dataLogBak) + logger.Info("backup command [%s]", cmd) + output, err := osutil.ExecShellCommand(false, cmd) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w,output:%s", cmd, err, output) + return err + } + } + if cmutil.FileExists(data1Log) { + cmd := fmt.Sprintf("mv %s %s;", data1Log, data1LogBak) + logger.Info("backup command [%s]", cmd) + output, err := osutil.ExecShellCommand(false, cmd) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w,output:%s", cmd, err, output) + return err + } + } + if cmutil.FileExists(dataPath) { + var shellCMD string + if !cmutil.FileExists(dataBak) { + shellCMD += fmt.Sprintf("mkdir %s;", dataBak) + } + shellCMD += fmt.Sprintf( + "mv %s %s_%d%s;", + dataPath, + path.Join(dataBak, cst.DefaultMysqlDataBasePath), + port, + suffix, + ) + logger.Info("backup command [%s]", shellCMD) + output, err := osutil.ExecShellCommand(false, shellCMD) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w,output:%s", shellCMD, err, output) + return err + } + } + if cmutil.FileExists(data1Path) { + var shellCMD string + if !cmutil.FileExists(data1Bak) { + shellCMD += fmt.Sprintf("mkdir %s;", data1Bak) + } + shellCMD += fmt.Sprintf( + "mv %s %s_%d%s;", + data1Path, + path.Join(data1Bak, cst.DefaultMysqlDataBasePath), + port, + suffix, + ) + logger.Info("backup command [%s]", shellCMD) + output, err := osutil.ExecShellCommand(false, shellCMD) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w,output:%s", shellCMD, err, output) + return err + } + } + } + return nil +} + +// KillDirtyProcess 清理系统残留的mysql 相关进程,下架的时候会检查,比如hang住的mysql client 进程 +// +// @receiver u +// @return err +func (u *UnInstallMySQLComp) KillDirtyProcess() (err error) { + dirtyProcessNames := []string{ + "mysql", + } + processes, err := ps.Processes() + if err != nil { + return fmt.Errorf("list processes failed, err:%s", err.Error()) + } + msgs := make([]string, 0) + for _, proc := range processes { + processName := proc.Executable() + if !cmutil.HasElem(processName, dirtyProcessNames) { + continue + } + + p, err := process.NewProcess(int32(proc.Pid())) + if err != nil { + msgs = append(msgs, fmt.Sprintf("process:%s, err:%s", processName, err.Error())) + continue + } + if err := p.Terminate(); err != nil { + msg := fmt.Sprintf("terminate process %s failed, err:%s", processName, err.Error()) + msgs = append(msgs, msg) + continue + } + logger.Info("success terminate dirty process %s", processName) + } + if len(msgs) != 0 { + return fmt.Errorf("failed kill %d processes, they are: %s", len(msgs), strings.Join(msgs, "\n")) + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/clone_proxy_user.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/clone_proxy_user.go new file mode 100644 index 0000000000..c96d327d9d --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/clone_proxy_user.go @@ -0,0 +1,79 @@ +// Package mysql_proxy TODO +/* + * @Description: 克隆proxy的user权限 + */ +package mysql_proxy + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" +) + +// CloneProxyUserComp TODO +type CloneProxyUserComp struct { + GeneralParam *components.GeneralParam + Params *CloneProxyUserParam + SoueceProxyAdminConn *native.ProxyAdminDbWork + TargetProxyAdminConn *native.ProxyAdminDbWork +} + +// CloneProxyUserParam TODO +// payload param +type CloneProxyUserParam struct { + SourceProxyHost string `json:"source_proxy_host" validate:"required,ip"` + SourceProxyPort int `json:"source_proxy_port" validate:"required,gte=3306"` + TargetProxyHost string `json:"target_proxy_host" validate:"required,ip"` + TargetProxyPort int `json:"target_proxy_port" validate:"required,gte=3306"` +} + +// Init TODO +func (p *CloneProxyUserComp) Init() (err error) { + p.SoueceProxyAdminConn, err = native.InsObject{ + Host: p.Params.SourceProxyHost, + Port: p.Params.SourceProxyPort, + User: p.GeneralParam.RuntimeAccountParam.ProxyAdminUser, + Pwd: p.GeneralParam.RuntimeAccountParam.ProxyAdminPwd, + }.ConnProxyAdmin() + if err != nil { + logger.Error("connect source proxy admin port(ori:%s) failed,%s", p.Params.SourceProxyPort, err.Error()) + return err + } + p.TargetProxyAdminConn, err = native.InsObject{ + Host: p.Params.TargetProxyHost, + Port: p.Params.TargetProxyPort, + User: p.GeneralParam.RuntimeAccountParam.ProxyAdminUser, + Pwd: p.GeneralParam.RuntimeAccountParam.ProxyAdminPwd, + }.ConnProxyAdmin() + if err != nil { + logger.Error("connect target proxy admin port(ori:%s) failed,%s", p.Params.TargetProxyPort, err.Error()) + return err + } + return +} + +// CloneProxyUser 在源proxy克隆user白名单给目标proxy +func (p *CloneProxyUserComp) CloneProxyUser() (err error) { + err = p.SoueceProxyAdminConn.CloneProxyUser(p.TargetProxyAdminConn) + if err != nil { + logger.Error( + "clone proxy users to instance(%s#%s) failed,%s", p.Params.TargetProxyHost, p.Params.TargetProxyPort, + err.Error(), + ) + return err + } + return +} + +// Example TODO +func (p *CloneProxyUserComp) Example() interface{} { + comp := CloneProxyUserComp{ + Params: &CloneProxyUserParam{ + SourceProxyHost: "1.1.1.1", + SourceProxyPort: 10000, + TargetProxyHost: "2.2.2.2", + TargetProxyPort: 10000, + }, + } + return comp +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/install_mysql_proxy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/install_mysql_proxy.go new file mode 100644 index 0000000000..79603071b2 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/install_mysql_proxy.go @@ -0,0 +1,408 @@ +// Package mysql_proxy TODO +/* + * @Description: 安装 MySQL Proxy + */ +package mysql_proxy + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil" + "encoding/json" + "fmt" + "os" + "path" + "strconv" + "strings" +) + +// InstallMySQLProxyComp TODO +type InstallMySQLProxyComp struct { + GeneralParam *components.GeneralParam + Params *InstallMySQLProxyParam + InstallMySQLProxyConfig // 运行时初始化配置 +} + +// InstallMySQLProxyParam TODO +// payload param +type InstallMySQLProxyParam struct { + components.Medium + ProxyConfigs json.RawMessage `json:"proxy_configs"` + Host string `json:"host" validate:"required,ip"` + Ports []int `json:"ports" validate:"required,gt=0,dive"` +} + +// InitDirs 别名 +type InitDirs = []string + +// InitFiles TODO +type InitFiles = []string + +// Port TODO +type Port = int + +// InstallMySQLProxyConfig TODO +type InstallMySQLProxyConfig struct { + UserLocal string + Version string + ProxyInstallDir string + ProxyDataDir string + ProxyBaseDir string + ProxyAdminPortInc int + ProxyAdminUser string + ProxyAdminPwd string + InsPorts []Port + InsReplaceConfigs map[Port]proxyutil.ReplaceProxyConfigs + InsInitDirs map[Port]InitDirs // 需要初始化创建的目录 + InsInitFile map[Port]InitFiles // 需要初始化创建的文件 + Checkfunc []func() error +} + +// Init TODO +/** + * @description: 计算proxy挂载点,获取需要替换的配置项,初始化端口,目录等 + * @return {*} + */ +func (i *InstallMySQLProxyComp) Init() (err error) { + var mountpoint string + i.UserLocal = cst.UsrLocal + i.ProxyInstallDir = cst.ProxyInstallPath + i.ProxyDataDir = cst.DefaultProxyDataRootPath + i.ProxyAdminPortInc = cst.ProxyAdminPortInc + i.ProxyAdminUser = i.GeneralParam.RuntimeAccountParam.ProxyAdminUser + i.ProxyAdminPwd = i.GeneralParam.RuntimeAccountParam.ProxyAdminPwd + // 数据目录优先放在 /data1 盘下 + if mountpoint, err = osutil.FindFirstMountPointProxy( + cst.DefaultProxyDataRootPath, + cst.AlterNativeProxyDataRootPath, + ); err != nil { + logger.Error("not found mount point /data1") + return err + } + i.ProxyDataDir = mountpoint + i.ProxyBaseDir = path.Join(mountpoint, cst.DefaultProxyLogBasePath) + // 计算获取需要安装的ports + if len(i.Params.Ports) == 0 { + return fmt.Errorf("param Ports[len:%d] may be mistake", len(i.Params.Ports)) + } + i.InsPorts = i.Params.Ports + if err = i.calculateRepalceConfigs(); err != nil { + logger.Error("计算替换配置失败: %s", err.Error()) + return err + } + return +} + +// PreCheck TODO +/** + * @description: 预检查: + * - 检查是否存在安装proxy的路径 + * - 检查是否存在proxy processs + * - 检查安装包是否存在,如果存在检查md5是否正确 + * - + * @return {*} + */ +func (i *InstallMySQLProxyComp) PreCheck() (err error) { + // 判断 /usr/local/mysql-proxy 目录是否已经存在,如果存在则删除掉 + // if _, err := os.Stat(i.ProxyInstallDir); !os.IsNotExist(err) { + if cmutil.FileExists(i.ProxyInstallDir) { + if _, err = osutil.ExecShellCommand(false, "rm -r "+i.ProxyInstallDir); err != nil { + logger.Error("rm -r %s error:%s", i.ProxyInstallDir, err.Error()) + return err + } + } + // 校验介质 + if err = i.Params.Medium.Check(); err != nil { + return err + } + + if err := i.checkRunningProcess(); err != nil { + logger.Error("checkRunningProcess %s", err.Error()) + return err + } + if err := i.checkDirs(); err != nil { + logger.Error("checkDirs %s", err.Error()) + return err + } + return nil +} + +// checkRunningProcess TODO +/** + * @description: 检查是否mysql-proxy 进程存在 + * @return {*} + */ +func (i *InstallMySQLProxyComp) checkRunningProcess() (err error) { + // 正在运行的 proxy 机器是不能安装 proxy 的吧。 + proxyNumStr, err := osutil.ExecShellCommand(false, "ps -efwww|grep -w mysql-proxy|grep -v grep|wc -l") + if err != nil { + return fmt.Errorf("error occurs while getting number of mysql-proxy.[%w]", err) + } + proxyNum, err := strconv.Atoi(strings.Replace(proxyNumStr, "\n", "", -1)) + if err != nil { + logger.Error("= strconv.Atoi %s failed,err:%s", proxyNumStr, err.Error()) + } + if proxyNum > 0 { + return fmt.Errorf("already have %d running proxy process ", proxyNum) + } + return +} + +// checkDirs TODO +/** + * @description: 检查相关proxy目录是否已经存在 + * @return {*} + */ +func (i *InstallMySQLProxyComp) checkDirs() error { + for _, port := range i.InsPorts { + for _, dir := range i.InsInitDirs[port] { + if cmutil.FileExists(dir) { + return fmt.Errorf("%s already exist", dir) + } + } + } + return nil +} + +// getAdminPort TODO +/** + * @description: 获取proxy admin port + * @receiver {int} port + * @return {*} + */ +func getAdminPort(port int) int { + return port + cst.ProxyAdminPortInc +} + +// GenerateProxycnf TODO +/** + * @description: 生成proxy.cnf + * @return {*} + */ +func (i *InstallMySQLProxyComp) GenerateProxycnf() (err error) { + // 1. 根据参数反序列化配置 + var tmplConfigs proxyutil.ProxyCnfObject + var tmplFileName = "proxy.cnf.tpl" + var nf *util.CnfFile + logger.Info("proxy Configs: %s", i.Params.ProxyConfigs) + if err = json.Unmarshal([]byte(i.Params.ProxyConfigs), &tmplConfigs); err != nil { + logger.Error("反序列化配置失败:%s", err.Error()) + return err + } + if nf, err = tmplConfigs.NewProxyCnfObject(tmplFileName); err != nil { + logger.Error("渲染模版配置文件失败:%s", err.Error()) + return err + } + for _, port := range i.InsPorts { + nf.FileName = util.GetProxyCnfName(port) + logger.Info("will replace config: %v", i.InsReplaceConfigs[port]) + if err = proxyutil.ReplaceProxyConfigsObjects(nf, i.InsReplaceConfigs[port]); err != nil { + logger.Error("替换参数失败%s", err.Error()) + return err + } + if err = nf.SafeSaveFile(true); err != nil { + logger.Error("保存配置文件失败", err.Error()) + return err + } + if _, err = osutil.ExecShellCommand( + false, fmt.Sprintf( + "chown -R mysql %s && chmod 0660 %s", nf.FileName, + nf.FileName, + ), + ); err != nil { + logger.Error("chown -R mysql %s %s", nf.FileName, err.Error()) + return err + } + } + return err +} + +// calculateRepalceConfigs 计算每个实例proxy.cnf.{port} 需要替换的配置项 同时将需要的初始化的目录计算出来并赋值 +// +// @receiver i +// @return err +func (i *InstallMySQLProxyComp) calculateRepalceConfigs() (err error) { + i.InsReplaceConfigs = make(map[Port]proxyutil.ReplaceProxyConfigs) + i.InsInitDirs = make(map[Port]InitDirs) + i.InsInitFile = make(map[int]InitFiles) + for _, port := range i.InsPorts { + proxyLogPath := path.Join(i.ProxyBaseDir, strconv.Itoa(port), "log") + adminUserfile := fmt.Sprintf("%s.%d", cst.DefaultProxyUserCnfName, port) + i.InsInitDirs[port] = append(i.InsInitDirs[port], proxyLogPath) + i.InsInitFile[port] = append(i.InsInitFile[port], adminUserfile) + i.InsReplaceConfigs[port] = proxyutil.ReplaceProxyConfigs{ + BaseDir: i.ProxyInstallDir, + LogFile: path.Join(proxyLogPath, "mysql-proxy.log"), + AdminUsersFile: adminUserfile, + AdminAddress: fmt.Sprintf("%s:%d", i.Params.Host, port+i.ProxyAdminPortInc), + AdminUserName: i.ProxyAdminUser, + AdminPassWord: i.ProxyAdminPwd, + AdminLuaScript: cst.DefaultAdminScripyLua, + ProxyAddress: fmt.Sprintf("%s:%d", i.Params.Host, port), + ProxyBackendAddress: cst.DefaultBackend, + } + } + return +} + +// InitInstanceDirs 创建实例相关的数据,日志目录以及修改权限 +// +// @receiver i +// @return err +func (i *InstallMySQLProxyComp) InitInstanceDirs() (err error) { + for _, port := range i.InsPorts { + for _, dir := range i.InsInitDirs[port] { + if _, err := osutil.ExecShellCommand( + false, + fmt.Sprintf("mkdir -p %s && chown -R mysql %s", dir, dir), + ); err != nil { + logger.Error("初始化实例目录%s 失败:%s", dir, err.Error()) + return err + } + } + for _, file := range i.InsInitFile[port] { + if _, err := osutil.ExecShellCommand( + false, fmt.Sprintf( + "touch %s && chown -R mysql %s && chmod 0660 %s ", file, + file, file, + ), + ); err != nil { + logger.Error("初始化文件%s 失败:%s", file, err.Error()) + return err + } + } + } + // 给根目录加权限 + if _, err := osutil.ExecShellCommand(false, fmt.Sprintf("chown -R mysql %s", i.ProxyBaseDir)); err != nil { + logger.Error("初始化实例目录%s 失败:%s", i.ProxyBaseDir, err.Error()) + return err + } + return nil +} + +// DecompressPkg TODO +/** + * @description: 解压安装包 + * @return {*} + */ +func (i *InstallMySQLProxyComp) DecompressPkg() (err error) { + if err = os.Chdir(i.UserLocal); err != nil { + return fmt.Errorf("cd to dir %s failed, err:%w", i.UserLocal, err) + } + pkgAbPath := i.Params.Medium.GetAbsolutePath() + if output, err := osutil.ExecShellCommand( + false, fmt.Sprintf( + "tar zxf %s -C %s ", pkgAbPath, + i.UserLocal, + ), + ); err != nil { + logger.Error("tar zxf %s error:%s,%s", pkgAbPath, output, err.Error()) + return err + } + proxyRealDirName := i.Params.Medium.GePkgBaseName() + extraCmd := fmt.Sprintf( + "ln -s %s mysql-proxy && chown -R mysql mysql-proxy && chown -R mysql %s", proxyRealDirName, + proxyRealDirName, + ) + if output, err := osutil.ExecShellCommand(false, extraCmd); err != nil { + err := fmt.Errorf("execute shell[%s] get an error:%w and output:%s", extraCmd, err, output) + return err + } + logger.Info("untar %s successfully", i.Params.Pkg) + return nil +} + +// Start TODO +/** + * @description: 启动proxy + * @return {*} + */ +func (i *InstallMySQLProxyComp) Start() error { + for _, port := range i.InsPorts { + p := proxyutil.StartProxyParam{ + InstallPath: i.ProxyInstallDir, + ProxyCnf: util.GetProxyCnfName(port), + Host: i.Params.Host, + Port: getAdminPort(port), // Is Admin Port + ProxyUser: i.ProxyAdminUser, + ProxyPwd: i.ProxyAdminPwd, + } + if err := p.Start(); err != nil { + logger.Error("start proxy(%d) failed,err:%s", port, err.Error()) + return err + } + logger.Info("start proxy(%d) successfully", port) + } + return nil +} + +// InitProxyAdminAccount TODO +/** + * @description: 初始化默认账户: add monitor@% user + * @return {*} + */ +func (i *InstallMySQLProxyComp) InitProxyAdminAccount() (err error) { + for _, port := range i.InsPorts { + // Test Conn ... + pc, err := native.NewDbWorkerNoPing( + fmt.Sprintf("%s:%d", i.Params.Host, getAdminPort(port)), i.ProxyAdminUser, + i.ProxyAdminPwd, + ) + if err != nil { + logger.Error("connect %d failed", port) + return err + } + defer pc.Stop() + _, err = pc.Exec(fmt.Sprintf("refresh_users('%s','+')", cst.ProxyUserMonitorAccessAll)) + if err != nil { + logger.Error("add ProxyAdminAccout failed %s", err.Error()) + return err + } + } + return +} + +// CreateExporterCnf 根据mysql-proxy部署端口生成对应的exporter配置文件 +func (i *InstallMySQLProxyComp) CreateExporterCnf() (err error) { + for _, port := range i.InsPorts { + exporterConfName := fmt.Sprintf("/etc/exporter_%d.cnf", port) + exporterContext := fmt.Sprintf( + "%s:%d,,,%s:%d,%s,%s", + i.Params.Host, + port, + i.Params.Host, + getAdminPort(port), + i.ProxyAdminUser, + i.ProxyAdminPwd, + ) + + f, err := os.OpenFile( + exporterConfName, + os.O_CREATE|os.O_TRUNC|os.O_WRONLY, + 0644, + ) + if err != nil { + logger.Error("create config file [%s] failed: %s", exporterConfName, err.Error()) + return err + } + defer f.Close() + + _, err = f.Write([]byte(exporterContext)) + if err != nil { + logger.Error("write config file [%s] failed: %s", exporterConfName, err.Error()) + return err + } + + if _, err = osutil.ExecShellCommand(false, fmt.Sprintf("chown -R mysql %s", exporterConfName)); err != nil { + logger.Error("chown -R mysql %s %s", exporterConfName, err.Error()) + return err + } + } + return nil + +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/restart_mysql_proxy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/restart_mysql_proxy.go new file mode 100644 index 0000000000..ec1bcde9bf --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/restart_mysql_proxy.go @@ -0,0 +1,94 @@ +package mysql_proxy + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil" +) + +// RestartMySQLProxyComp TODO +type RestartMySQLProxyComp struct { + GeneralParam *components.GeneralParam + Params *RestartMySQLProxyParam +} + +// RestartMySQLProxyParam TODO +type RestartMySQLProxyParam struct { + Host string `json:"host" validate:"required,ip"` + Port int `json:"port" validate:"required,gte=3306"` // 被重启的proxy端口 +} + +// Example TODO +func (u *RestartMySQLProxyComp) Example() interface{} { + comp := RestartMySQLProxyComp{ + Params: &RestartMySQLProxyParam{ + Host: "1.1.1.1", + Port: 10000, + }, + } + return comp +} + +// PreCheck 提前检查下proxy client 等情况, 这里不检查proxy是否连接 +// +// @receiver u +// @return err +func (u *RestartMySQLProxyComp) PreCheck() (err error) { + + db := native.InsObject{ + Host: u.Params.Host, + User: u.GeneralParam.RuntimeAccountParam.ProxyAdminUser, + Pwd: u.GeneralParam.RuntimeAccountParam.ProxyAdminPwd, + Port: u.Params.Port, + } + _, err = db.ConnProxyAdmin() + if err != nil { + logger.Error("连接%d的Admin Port 失败%s", u.Params.Port, err.Error()) + return err + } + // inuse, err := db.CheckProxyInUse() + // if err != nil { + // logger.Error("检查Proxy可用性检查失败") + // return err + // } + // if inuse { + // return fmt.Errorf("检测到%d存在可用连接", u.Params.Port) + // } + + return +} + +// RestartProxy TODO +// UnInstallProxy 停止Proxy 然后在备份目录 +// +// @receiver u +// @return err +func (u *RestartMySQLProxyComp) RestartProxy() (err error) { + + // 先正常关闭proxy进程 + if err = proxyutil.KillDownProxy(u.Params.Port); err != nil { + logger.Error("停止%d进程失败:%s", u.Params.Port, err.Error()) + return err + } + logger.Info("关闭 proxy(%d) 成功", u.Params.Port) + + // 然后启动proxy进程 + p := proxyutil.StartProxyParam{ + InstallPath: cst.ProxyInstallPath, + ProxyCnf: util.GetProxyCnfName(u.Params.Port), + Host: u.Params.Host, + Port: getAdminPort(u.Params.Port), // Is Admin Port + ProxyUser: u.GeneralParam.RuntimeAccountParam.ProxyAdminUser, + ProxyPwd: u.GeneralParam.RuntimeAccountParam.ProxyAdminPwd, + } + if err := p.Start(); err != nil { + logger.Error("启动 proxy(%d) 失败,err:%s", u.Params.Port, err.Error()) + return err + } + logger.Info("启动 proxy(%d) 成功", u.Params.Port) + + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/set_backend.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/set_backend.go new file mode 100644 index 0000000000..aa3de8e3e8 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/set_backend.go @@ -0,0 +1,79 @@ +// Package mysql_proxy TODO +/* + * @Description: 设置proxy后端,建立proxyh和master的关系 + */ +package mysql_proxy + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "fmt" + "strings" +) + +// ProxySetBackendCom TODO +type ProxySetBackendCom struct { + GeneralParam *components.GeneralParam + Params ProxySetBackendParam + proxyAdminConn *native.ProxyAdminDbWork `json:"-"` +} + +// ProxySetBackendParam TODO +type ProxySetBackendParam struct { + Host string `json:"host" validate:"required,ip" ` // 当前实例的主机地址 + Port int `json:"port" validate:"required,gt=3306"` // 当前实例的端口 + BackendHost string `json:"backend_host" validate:"required,ip"` + BackendPort int `json:"backend_port" validate:"required,gt=3306"` +} + +// Example TODO +func (p *ProxySetBackendCom) Example() interface{} { + comp := ProxySetBackendCom{ + Params: ProxySetBackendParam{ + Host: "1.1.1.1", + Port: 10000, + BackendHost: "1.2.1.1", + BackendPort: 20000, + }, + } + return comp +} + +// Init TODO +/** + * @description: 建立proxy amdin conn, 并且检查 backends 是否为 1.1.1.1:3306(新上架的proxy backends 一定是1.1.1.1:3306) + * @return {*} + */ +func (p *ProxySetBackendCom) Init() (err error) { + p.proxyAdminConn, err = native.InsObject{ + Host: p.Params.Host, + Port: p.Params.Port, + User: p.GeneralParam.RuntimeAccountParam.ProxyAdminUser, + Pwd: p.GeneralParam.RuntimeAccountParam.ProxyAdminPwd, + }.ConnProxyAdmin() + if err != nil { + logger.Error("connect proxy admin port(ori:%s) failed,%s", p.Params.Port, err.Error()) + return err + } + backend, err := p.proxyAdminConn.SelectBackend() + if err != nil { + logger.Error("get backends failed %s", err.Error()) + return err + } + // 在判断后端指向是否为 "1.1.1.1:3306" + if strings.TrimSpace(backend.Address) != cst.DefaultBackend { + return fmt.Errorf("current backends is not empty,%s", backend.Address) + } + return +} + +// SetBackend TODO +/** + * @description: refresh backends + * @return {*} + */ +func (p *ProxySetBackendCom) SetBackend() (err error) { + return p.proxyAdminConn.RefreshBackends(p.Params.BackendHost, p.Params.BackendPort) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/uninstall_mysql_proxy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/uninstall_mysql_proxy.go new file mode 100644 index 0000000000..b5419d0ec4 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql_proxy/uninstall_mysql_proxy.go @@ -0,0 +1,129 @@ +package mysql_proxy + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil" + "fmt" + "path" + "strconv" + "strings" +) + +// UnInstallMySQLProxyComp TODO +type UnInstallMySQLProxyComp struct { + GeneralParam *components.GeneralParam + Params *UnInstallMySQLProxyParam + runTimeCtx +} + +// UnInstallMySQLProxyParam TODO +type UnInstallMySQLProxyParam struct { + Host string `json:"host" validate:"required,ip"` + Force bool `json:"force"` // 是否强制下架 + Ports []int `json:"ports" validate:"required,gt=0,dive"` // 被监控机器的上所有需要监控的端口 +} + +// 运行是需要的必须参数,可以提前计算 +type runTimeCtx struct { + proxyAdminUser string + proxyAdminPwd string + proxyInsLogDir map[Port]string // 每个proxy实例的日志目录 +} + +// Init 初始化组件运行参数 +// +// @receiver u +// @return err +func (u *UnInstallMySQLProxyComp) Init() (err error) { + u.runTimeCtx.proxyAdminUser = u.GeneralParam.RuntimeAccountParam.ProxyAdminUser + u.runTimeCtx.proxyAdminPwd = u.GeneralParam.RuntimeAccountParam.ProxyAdminPwd + u.runTimeCtx.proxyInsLogDir = make(map[int]string) + for _, port := range u.Params.Ports { + proxyCnf := util.GetProxyCnfName(port) + if !cmutil.FileExists(proxyCnf) { + return fmt.Errorf("%s不存在", proxyCnf) + } + f, err := util.LoadMyCnfForFile(proxyCnf) + if err != nil { + logger.Error("加载%s配置失败%s", proxyCnf, err) + return err + } + // example logfile value: /data/mysql-proxy/10000/log/mysql-proxy.log + var logfile string + if logfile, err = f.GetProxyLogFilePath(); err != nil { + return err + } + sl := strings.Split(logfile, strconv.Itoa(port)) + if len(sl) < 2 { + return fmt.Errorf("proxy logfile 可能格式不对:%s", logfile) + } + // /data/mysql-proxy/10000 + u.runTimeCtx.proxyInsLogDir[port] = path.Join(sl[0], strconv.Itoa(port)) + } + return +} + +// PreCheck 如果非强制下架。提前检查下proxy client 等情况 +// +// @receiver u +// @return err +func (u *UnInstallMySQLProxyComp) PreCheck() (err error) { + for _, port := range u.Params.Ports { + if !u.Params.Force { + db, err := native.InsObject{ + Host: u.Params.Host, + User: u.runTimeCtx.proxyAdminUser, + Pwd: u.proxyAdminPwd, + Port: port, + }.ConnProxyAdmin() + if err != nil { + logger.Error("连接%d的Admin Port 失败%s", port, err.Error()) + return err + } + inuse, err := db.CheckProxyInUse() + if err != nil { + logger.Error("检查Proxy可用性检查失败") + return err + } + if inuse { + return fmt.Errorf("检测到%d存在可用连接", port) + } + } + continue + } + return err +} + +// CleanCrontab 先注释掉Crontab 相关的计划,包括告警,以免下架告警 +// @receiver u +// @return err +// func (u *UnInstallMySQLProxyComp) CleanCrontab() (err error) { +// logger.Info("开始清理机器上的crontab") +// if err = osutil.CleanLocalCrontab(); err != nil { +// return err +// } +// return +// } + +// UnInstallProxy 停止Proxy 然后在备份目录 +// +// @receiver u +// @return err +func (u *UnInstallMySQLProxyComp) UnInstallProxy() (err error) { + for _, port := range u.Params.Ports { + if err = proxyutil.KillDownProxy(port); err != nil { + logger.Error("停止%d进程失败:%s", port, err.Error()) + return err + } + if err = osutil.SafeRmDir(u.runTimeCtx.proxyInsLogDir[port]); err != nil { + logger.Error("删除log dir失败:%s", u.runTimeCtx.proxyInsLogDir[port]) + return err + } + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/output.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/output.go new file mode 100644 index 0000000000..ff46190324 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/output.go @@ -0,0 +1,39 @@ +package components + +import ( + "encoding/json" + "fmt" +) + +// WrapperOutputString TODO +func WrapperOutputString(output string) string { + return fmt.Sprintf(`%s`, output) +} + +// WrapperOutput TODO +func WrapperOutput(v interface{}) (string, error) { + if b, e := json.Marshal(v); e != nil { + return "", e + } else { + return fmt.Sprintf(`%s`, string(b)), nil + } +} + +// PrintOutputCtx TODO +func PrintOutputCtx(v interface{}) error { + if ss, err := WrapperOutput(v); err != nil { + return err + } else { + fmt.Println(ss) + } + return nil +} + +// ToPrettyJson TODO +func ToPrettyJson(v interface{}) string { + if data, err := json.MarshalIndent(v, "", " "); err == nil { + // ss := "\n# use --helper to show explanations. example for payload:\n --payload-format raw --payload '%s'" + return string(data) + } + return "未找到合法的 example " +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/spider/restart_spider.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spider/restart_spider.go new file mode 100644 index 0000000000..6bee005548 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spider/restart_spider.go @@ -0,0 +1,78 @@ +// Package spider TODO +package spider + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/computil" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" +) + +// RestartSpiderComp TODO +type RestartSpiderComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params *RestartSpiderParam `json:"extend"` +} + +// RestartSpiderParam TODO +type RestartSpiderParam struct { + Host string `json:"host" validate:"required,ip"` + Port int `json:"port" validate:"required,gte=3306"` + myCnf *util.CnfFile + instObj *native.InsObject +} + +// Example TODO +func (d *RestartSpiderComp) Example() interface{} { + comp := RestartSpiderComp{ + Params: &RestartSpiderParam{ + Host: "1.1.1.1", + Port: 0, + }, + } + return comp +} + +// Init TODO +func (u *RestartSpiderComp) Init() (err error) { + f := util.GetMyCnfFileName(u.Params.Port) + u.Params.myCnf = &util.CnfFile{ + FileName: f, + } + if err := u.Params.myCnf.Load(); err != nil { + return err + } + dbSocket, err := u.Params.myCnf.GetMySQLSocket() + if err != nil { + return err + } + u.Params.instObj = &native.InsObject{ + Host: u.Params.Host, + Port: u.Params.Port, + User: u.GeneralParam.RuntimeAccountParam.AdminUser, + Pwd: u.GeneralParam.RuntimeAccountParam.AdminPwd, + Socket: dbSocket, + } + return +} + +// PreCheck TODO +func (u *RestartSpiderComp) PreCheck() (err error) { + _, err = u.Params.instObj.ConnSpiderAdmin() + if err != nil { + logger.Error(fmt.Sprintf("连接Admin失败。%s", err.Error())) + return err + } + return +} + +// RestartSpider TODO +func (u *RestartSpiderComp) RestartSpider() (err error) { + err = computil.RestartMysqlInstanceNormal(*u.Params.instObj) + if err != nil { + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_slave_cluster_relationship.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_slave_cluster_relationship.go new file mode 100644 index 0000000000..4a8cdff07d --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_slave_cluster_relationship.go @@ -0,0 +1,200 @@ +package spiderctl + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/tools" + "fmt" +) + +// AddSlaveClusterRoutingComp TODO +type AddSlaveClusterRoutingComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params *AddSlaveClusterRoutingParam `json:"extend"` + AddCtx + tools *tools.ToolSet +} + +// AddSlaveClusterRoutingParam TODO +type AddSlaveClusterRoutingParam struct { + Host string `json:"host" validate:"required,ip"` + Port int `json:"port" validate:"required,lt=65536,gte=3306"` + SlaveInstances []Instance `json:"slave_instances" validate:"required"` + SpiderSlaveInstances []Instance `json:"spider_slave_instances" validate:"required"` +} + +// AddCtx 定义任务执行时需要的上下文 +type AddCtx struct { + dbConn *native.DbWorker +} + +// Example TODO +func (a *AddSlaveClusterRoutingComp) Example() interface{} { + comp := AddSlaveClusterRoutingComp{ + Params: &AddSlaveClusterRoutingParam{ + Host: "1.1.1.1", + Port: 26000, + SlaveInstances: []Instance{ + { + Host: "3.3.3.3", + Port: 20000, + ShardID: 0, + }, + { + Host: "3.3.3.3", + Port: 20001, + ShardID: 1, + }, + }, + SpiderSlaveInstances: []Instance{ + { + Host: "3.3.3.3", + Port: 25000, + }, + { + Host: "3.3.3.3", + Port: 25001, + }, + }, + }, + } + return comp +} + +// Init 定义act的初始化内容 +func (a *AddSlaveClusterRoutingComp) Init() (err error) { + + // 连接本地实例的db + a.dbConn, err = native.InsObject{ + Host: a.Params.Host, + Port: a.Params.Port, + User: a.GeneralParam.RuntimeAccountParam.AdminUser, + Pwd: a.GeneralParam.RuntimeAccountParam.AdminPwd, + }.Conn() + if err != nil { + logger.Error("Connect %d failed:%s", a.Params.Port, err.Error()) + return err + } + return nil +} + +// PerCheck 临时方法,目前部分命令需要设置tc_admin后,新连接才能执行 +func (a *AddSlaveClusterRoutingComp) PerCheck() (err error) { + + // 先把session级别tc_admin 设置为0,让show命令生效 + if _, err := a.dbConn.Exec("set tc_admin = 0"); err != nil { + logger.Error("set tc_admin failed:[%s]", err.Error()) + return err + } + + // 连接本地实例的db,查看是否从实例连接它,如果有,认为在主从关系里面它是主,如果没有,暂时不信任 + slaveHosts, err := a.dbConn.ShowSlaveHosts() + if err != nil { + logger.Error("exec show-slave-hosts failed:[%s]", err.Error()) + return err + } + if len(slaveHosts) == 0 { + return fmt.Errorf("This instance is not currently the master, exit") + } + + return nil +} + +// AddSlaveRouting 在中控集群添加 remote-slave的域名关系 +// with database 执行失败之后 tdbctl create node wrapper 无法幂等,目前需要手动清洗待spider +func (a *AddSlaveClusterRoutingComp) AddSlaveRouting() (err error) { + + tdbctlUser := a.GeneralParam.RuntimeAccountParam.TdbctlUser + tdbctlPwd := a.GeneralParam.RuntimeAccountParam.TdbctlPwd + + // 首先添加 remote-slave 的路由信息,每添加一个slave,先判断信息是否存在, 然后执行加入 + for _, inst := range a.Params.SlaveInstances { + var execSQLs []string + err, isInsert := a.CheckInst(inst.Host, inst.Port) + if err != nil { + // 判断出现异常 + return err + } + if !isInsert { + // 实例已存在,跳过,目前中控保证添加成功后 ,mysql.servers表会存在 + continue + } + // 组装添加remote-slave节点的路由SQL + // 显性标记实例为主节点 + execSQLs = append(execSQLs, "set tc_admin=1;") + execSQLs = append(execSQLs, "set global tc_is_primary=1;") + execSQLs = append( + execSQLs, + fmt.Sprintf( + "tdbctl create node wrapper 'mysql_slave' options(user '%s', password '%s', host '%s', port %d, number %d);", + tdbctlUser, tdbctlPwd, inst.Host, inst.Port, inst.ShardID), + ) + if _, err := a.dbConn.ExecMore(execSQLs); err != nil { + logger.Error("tdbctl create node failed:[%s]", err.Error()) + return err + } + + } + // 然后添加spider-slave节点,每添加一个spider slave,需要判断是否已添加过 + for _, inst := range a.Params.SpiderSlaveInstances { + + var execSQLs []string + + err, isInsert := a.CheckInst(inst.Host, inst.Port) + if err != nil { + // 判断出现异常 + return err + } + if !isInsert { + // 实例已存在,跳过 + continue + } + // 组装添加spider-slave节点的路由SQL + // 显性标记实例为主节点 + execSQLs = append(execSQLs, "set tc_admin=1;") + execSQLs = append(execSQLs, "set global tc_is_primary=1;") + execSQLs = append( + execSQLs, + fmt.Sprintf( + "tdbctl create node wrapper 'SPIDER_SLAVE' options(user '%s', password '%s', host '%s', port %d ) with database;", + tdbctlUser, tdbctlPwd, inst.Host, inst.Port), + ) + if _, err := a.dbConn.ExecMore(execSQLs); err != nil { + logger.Error("tdbctl create node failed:[%s]", err.Error()) + return err + } + + } + + // 最后flush整体集群的路由信息 + if _, err := a.dbConn.Exec("tdbctl flush routing;"); err != nil { + logger.Error("tdbctl create node failed:[%s]", err.Error()) + return err + } + return nil +} + +// CheckInst todo +// 检查添加的集群实例的路由信息之前是否添加完成 +func (a *AddSlaveClusterRoutingComp) CheckInst(host string, port int) (err error, result bool) { + var cnt int + // 先把session级别tc_admin 设置为0,让show命令生效 + if _, err := a.dbConn.Exec("set tc_admin = 0"); err != nil { + logger.Error("set tc_admin failed:[%s]", err.Error()) + return err, false + } + + checkSQL := fmt.Sprintf("select count(0) from mysql.servers where Host = '%s' and Port = %d ", host, port) + if err := a.dbConn.Queryxs(&cnt, checkSQL); err != nil { + logger.Error("检查失败%s", err.Error()) + return err, false + } + if cnt != 0 { + // 返回结果非0,则代表该实例已经写入路由表 + logger.Warn("实例【%s:%d】已经在中控实例录入到,这次选跳过", host, port) + return nil, false + + } + return nil, true +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_temporary_spider.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_temporary_spider.go new file mode 100644 index 0000000000..e5ba17b381 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/add_temporary_spider.go @@ -0,0 +1,65 @@ +// Package spiderctl TODO +package spiderctl + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "fmt" +) + +// AddTmpSpiderComp 分为通用参数和行为专用参数 +type AddTmpSpiderComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params *AddTmpSpiderParam `json:"extend"` + InitCtx +} + +// AddTmpSpiderParam 具体执行时所需要的参数 +type AddTmpSpiderParam struct { + Host string `json:"host" validate:"required,ip"` + Port int `json:"port" validate:"required,lt=65536,gte=3306"` + SpiderInstances []Instance `json:"spider_instance" validate:"required"` +} + +// Example TODO +func (a *AddTmpSpiderComp) Example() interface{} { + comp := AddTmpSpiderComp{} + return comp +} + +// Init 初始化数据库连接,之后用于客户端登录执行添加新的路由信息 +func (a *AddTmpSpiderComp) Init() (err error) { + a.dbConn, err = native.InsObject{ + Host: a.Params.Host, + Port: a.Params.Port, + User: a.GeneralParam.RuntimeAccountParam.AdminUser, + Pwd: a.GeneralParam.RuntimeAccountParam.AdminPwd, + }.Conn() + if err != nil { + logger.Error("Connect %d failed:%s", a.Params.Port, err.Error()) + return err + } + return nil +} + +// AddTmpSpider TODO +func (a *AddTmpSpiderComp) AddTmpSpider() (err error) { + var execSQLs []string + tdbctlUser := a.GeneralParam.RuntimeAccountParam.TdbctlUser + tdbctlPwd := a.GeneralParam.RuntimeAccountParam.TdbctlPwd + for _, inst := range a.Params.SpiderInstances { + execSQLs = append(execSQLs, fmt.Sprintf( + "tdbctl create node wrapper 'SPIDER' options(user '%s', password '%s', host '%s', port %d) with database;", + tdbctlUser, tdbctlPwd, inst.Host, inst.Port, + ), + ) + } + execSQLs = append(execSQLs, "tdbctl flush routing;") + _, err = a.dbConn.ExecMore(execSQLs) + if err != nil { + logger.Error("tdbctl create node failed:[%s]", err.Error()) + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/init_cluster_routing_relationship.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/init_cluster_routing_relationship.go new file mode 100644 index 0000000000..fa8424a643 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/init_cluster_routing_relationship.go @@ -0,0 +1,151 @@ +package spiderctl + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "fmt" +) + +// 处理tendb cluster 部署阶段时初始化 tendb、ctl、spider 节点之间的路由关系 + +// InitClusterRoutingComp TODO +type InitClusterRoutingComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params *InitClusterRoutingParam `json:"extend"` + InitCtx +} + +// InitClusterRoutingParam TODO +type InitClusterRoutingParam struct { + Host string `json:"host" validate:"required,ip"` + Port int `json:"port" validate:"required,lt=65536,gte=3306"` + MysqlInstances []Instance `json:"mysql_instances" validate:"required"` + SpiderInstances []Instance `json:"spider_instances" validate:"required"` + CltInstances []Instance `json:"ctl_instances" validate:"required"` +} + +// Instance TODO +type Instance struct { + Host string `json:"host"` + Port int `json:"port"` + ShardID int `json:"shard_id"` +} + +// InitCtx 定义任务执行时需要的上下文 +type InitCtx struct { + dbConn *native.DbWorker +} + +// Example TODO +func (i *InitClusterRoutingComp) Example() interface{} { + comp := InitClusterRoutingComp{ + Params: &InitClusterRoutingParam{ + Host: "1.1.1.1", + Port: 26000, + MysqlInstances: []Instance{ + { + Host: "2.2.2.2", + Port: 20000, + ShardID: 0, + }, + { + Host: "2.2.2.2", + Port: 20001, + ShardID: 1, + }, + }, + SpiderInstances: []Instance{ + { + Host: "3.3.3.3", + Port: 25000, + }, + { + Host: "3.3.3.3", + Port: 25001, + }, + }, + CltInstances: []Instance{ + { + Host: "1.1.1.1", + Port: 26000, + }, + { + Host: "1.1.1.1", + Port: 26001, + }, + }, + }, + } + return comp +} + +// Init 定义act的初始化内容 +func (i *InitClusterRoutingComp) Init() (err error) { + + // 连接本地实例的db + i.dbConn, err = native.InsObject{ + Host: i.Params.Host, + Port: i.Params.Port, + User: i.GeneralParam.RuntimeAccountParam.AdminUser, + Pwd: i.GeneralParam.RuntimeAccountParam.AdminPwd, + }.Conn() + if err != nil { + logger.Error("Connect %d failed:%s", i.Params.Port, err.Error()) + return err + } + return nil +} + +// InitMySQLServers 初始化mysql.servers表 +// todo 发现tdbctl组件执行create node 会让tc_is_primary跳转,看看是不是bug +func (i *InitClusterRoutingComp) InitMySQLServers() (err error) { + var execSQLs []string + tdbctlUser := i.GeneralParam.RuntimeAccountParam.TdbctlUser + tdbctlPwd := i.GeneralParam.RuntimeAccountParam.TdbctlPwd + + // 先清理mysql.servers 表,保证活动节点执行的幂等性。但这么粗暴地删除会不会有安全隐患? + execSQLs = append(execSQLs, "truncate table mysql.servers;") + + // 显性标记实例为主节点 + execSQLs = append(execSQLs, "set global tc_admin=1;") + + // 拼接create node 语句 + for _, inst := range i.Params.MysqlInstances { + execSQLs = append(execSQLs, "set global tc_is_primary=1;") + execSQLs = append( + execSQLs, + fmt.Sprintf( + "tdbctl create node wrapper 'mysql' options(user '%s', password '%s', host '%s', port %d, number %d );", + tdbctlUser, tdbctlPwd, inst.Host, inst.Port, inst.ShardID, + ), + ) + + } + for _, inst := range i.Params.SpiderInstances { + execSQLs = append(execSQLs, "set global tc_is_primary=1;") + execSQLs = append( + execSQLs, + fmt.Sprintf( + "tdbctl create node wrapper 'SPIDER' options(user '%s', password '%s', host '%s', port %d);", + tdbctlUser, tdbctlPwd, inst.Host, inst.Port, + ), + ) + } + for _, inst := range i.Params.CltInstances { + execSQLs = append(execSQLs, "set global tc_is_primary=1;") + execSQLs = append( + execSQLs, + fmt.Sprintf( + "tdbctl create node wrapper 'TDBCTL' options(user '%s', password '%s', host '%s', port %d);", + tdbctlUser, tdbctlPwd, inst.Host, inst.Port, + ), + ) + } + execSQLs = append(execSQLs, "tdbctl flush routing;") + if _, err := i.dbConn.ExecMore(execSQLs); err != nil { + logger.Error("tdbctl create node failed:[%s]", err.Error()) + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/spiderctl.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/spiderctl.go new file mode 100644 index 0000000000..0c3feb70b4 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/spiderctl/spiderctl.go @@ -0,0 +1,2 @@ +// Package spiderctl TODO +package spiderctl diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/sysinit/sysinit.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/sysinit/sysinit.go new file mode 100644 index 0000000000..7e9825c871 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/sysinit/sysinit.go @@ -0,0 +1,87 @@ +// Package sysinit TODO +package sysinit + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "io/ioutil" +) + +// SysInitParam TODO +type SysInitParam struct { + OsMysqlUser string `json:"user"` + OsMysqlPwd string `json:"pwd"` +} + +/* + 执行系统初始化脚本 原来的sysinit.sh + 创建mysql账户等操作 +*/ + +// SysInitMachine TODO +func (s *SysInitParam) SysInitMachine() error { + logger.Info("start exec sysinit ...") + return ExecSysInitScript() +} + +// SetOsPassWordForMysql TODO +func (s *SysInitParam) SetOsPassWordForMysql() error { + logger.Info("start set os pwd ...") + return osutil.SetOSUserPassword(s.OsMysqlUser, s.OsMysqlPwd) +} + +// ExecSysInitScript TODO +func ExecSysInitScript() (err error) { + data, err := staticembed.SysInitMySQLScript.ReadFile(staticembed.SysInitMySQLScriptFileName) + if err != nil { + logger.Error("read sysinit script failed %s", err.Error()) + return err + } + tmpScriptName := "/tmp/sysinit.sh" + if err = ioutil.WriteFile(tmpScriptName, data, 07555); err != nil { + logger.Error("write tmp script failed %s", err.Error()) + return err + } + command := fmt.Sprintf("/bin/bash -c \"%s\"", tmpScriptName) + _, err = osutil.StandardShellCommand(false, command) + if err != nil { + logger.Error("exec sysinit script failed %s", err.Error()) + return err + } + return nil +} + +// InitExternal TODO +func (s *SysInitParam) InitExternal() (err error) { + data, err := staticembed.ExternalScript.ReadFile(staticembed.ExternalScriptFileName) + if err != nil { + logger.Error("read sysinit script failed %s", err.Error()) + return err + } + tmpScriptName := "/tmp/yum_install_perl_dep.sh" + if err = ioutil.WriteFile(tmpScriptName, data, 07555); err != nil { + logger.Error("write tmp script failed %s", err.Error()) + return err + } + command := fmt.Sprintf("/bin/bash -c \"%s\"", tmpScriptName) + _, err = osutil.StandardShellCommand(false, command) + if err != nil { + logger.Error("yum install perl dep failed %s", err.Error()) + return err + } + return nil +} + +// GetTimeZone 增加机器初始化后输出机器的时区配置 +func (s *SysInitParam) GetTimeZone() (timeZone string, err error) { + execCmd := "date +%:z" + output, err := osutil.StandardShellCommand(false, execCmd) + if err != nil { + logger.Error("exec get date script failed %s", err.Error()) + return "", err + } + return osutil.CleanExecShellOutput(output), nil + +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/codes/codes.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/codes/codes.go new file mode 100644 index 0000000000..478a7aee7e --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/codes/codes.go @@ -0,0 +1,62 @@ +// Package codes TODO +package codes + +/* +@description: 相关错误码及对应错误类型 +@rules: +1. 初始化类的错误码使用 30000-39999 +2. 操作系统的错误码使用 40000-49999 +3. MySQL、Redis、Mongo实例操作的错误码 50000-59999 +*/ + +const ( + // Unauthorized TODO + Unauthorized = 10001 + // UnmarshalFailed TODO + UnmarshalFailed = 10002 + // NotExistMountPoint TODO + NotExistMountPoint = 20001 + // NotExistUser TODO + NotExistUser = 20002 + // PermissionDeny TODO + PermissionDeny = 20003 + + // RenderConfigFailed TODO + RenderConfigFailed = 30001 + // InitParamFailed TODO + InitParamFailed = 30002 + // InitMySQLDirFailed TODO + InitMySQLDirFailed = 30003 + + // InstallMySQLFailed TODO + InstallMySQLFailed = 40001 + // ExecuteShellFailed TODO + ExecuteShellFailed = 40002 + // DecompressPkgFailed TODO + DecompressPkgFailed = 40003 + // StartMySQLFailed TODO + StartMySQLFailed = 40004 + // NotAvailableMem TODO + NotAvailableMem = 40005 + + // ImportPrivAndSchemaFailed TODO + ImportPrivAndSchemaFailed = 50001 +) + +// ErrorCodes TODO +var ErrorCodes = map[int]string{ + Unauthorized: "没有进行用户认证", + UnmarshalFailed: "反序列化失败", + NotExistMountPoint: "没有可用的挂载点", + NotExistUser: "用户不存在", + PermissionDeny: "权限不足", + RenderConfigFailed: "初始化配置失败", + InitParamFailed: "初始化参数失败", + InitMySQLDirFailed: "初始化MySQL目录失败", + InstallMySQLFailed: "安装实例失败", + ExecuteShellFailed: "执行Shell脚本失败", + DecompressPkgFailed: "解压文件失败", + StartMySQLFailed: "启动MySQL失败", + NotAvailableMem: "内存不可用", + ImportPrivAndSchemaFailed: "导入权限和库失败", +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/const.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/const.go new file mode 100644 index 0000000000..c6e7d32def --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/const.go @@ -0,0 +1,31 @@ +package cst + +import "time" + +const ( + // Environment TODO + Environment = "enviroment" + // Test TODO + Test = "test" +) + +const ( + // TIMELAYOUT TODO + TIMELAYOUT = "2006-01-02 15:04:05" + // TIMELAYOUTSEQ TODO + TIMELAYOUTSEQ = "2006-01-02_15:04:05" + // TimeLayoutDir TODO + TimeLayoutDir = "20060102150405" +) + +const ( + // BK_PKG_INSTALL_PATH 默认文件下发路径 + BK_PKG_INSTALL_PATH = "/data/install" + // MYSQL_TOOL_INSTALL_PATH 默认工具安装路径 + MYSQL_TOOL_INSTALL_PATH = "/home/mysql" +) + +// GetNowTimeLayoutStr 20060102150405 +func GetNowTimeLayoutStr() string { + return time.Now().Format(TimeLayoutDir) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/cst.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/cst.go new file mode 100644 index 0000000000..1dab3e254a --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/cst.go @@ -0,0 +1,2 @@ +// Package cst 常量 +package cst diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/dbbackup.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/dbbackup.go new file mode 100644 index 0000000000..f3e4327450 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/dbbackup.go @@ -0,0 +1,14 @@ +package cst + +import "fmt" + +// BackupFile TODO +const BackupFile = "dbbackup" + +// BackupDir TODO +const BackupDir = "dbbackup-go" + +// GetNewConfigByPort TODO +func GetNewConfigByPort(port int) string { + return fmt.Sprintf("%s.%d.ini", BackupFile, port) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/mysql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/mysql.go new file mode 100644 index 0000000000..c9826bfa87 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/mysql.go @@ -0,0 +1,112 @@ +package cst + +import "fmt" + +const ( + // UsrLocal 系统路径 + UsrLocal = "/usr/local" + // MysqldInstallPath mysql/spider 二进制路径 + MysqldInstallPath = "/usr/local/mysql" + // TdbctlInstallPath tdbctl 二进制路径 + TdbctlInstallPath = "/usr/local/tdbctl" + // DefaultMysqlLogRootPath 默认存放mysql日志的根路径 + DefaultMysqlLogRootPath = "/data" + // AlterNativeMysqlLogRootPath 备选路径 + AlterNativeMysqlLogRootPath = "/data1" + // DefaultMysqlLogBasePath mysql 日志路径 + DefaultMysqlLogBasePath = "mysqllog" + // DefaultMysqlDataRootPath 默认存放mysql数据的根路径 + DefaultMysqlDataRootPath = "/data1" + // AlterNativeMysqlDataRootPath 默认存放mysql数据的根路径 + AlterNativeMysqlDataRootPath = "/data" + // DefaultMysqlDataBasePath 默认数据路径 + DefaultMysqlDataBasePath = "mysqldata" + // DefaultBackupBasePath 备份路径 + DefaultBackupBasePath = "dbbak" + // DefaultMycnfRootPath 默认配置文件路径 + DefaultMycnfRootPath = "/etc" + // DefaultMyCnfName 默认配置文件 + DefaultMyCnfName = "/etc/my.cnf" + // DefaultSocketName 默认 sock 文件 + DefaultSocketName = "mysql.sock" + // DefaultMySQLPort 默认端口 + DefaultMySQLPort = 3306 + // RelayLogFileMatch relaylog 模式 + RelayLogFileMatch = `(.*)/relay-log.bin` + // BinLogFileMatch binlog 模式 + BinLogFileMatch = `(.*)/binlog\d*.bin` + // ReBinlogFilename binlog 文件名 + ReBinlogFilename = `binlog\d*\.\d+$` + // DatadirMatch 实例数据目录模式 + DatadirMatch = `(.*)/mysqldata/\d+$` + // MysqlOsUserName 系统帐号 + MysqlOsUserName = "mysql" + // MysqlOsUserGroup 系统组 + MysqlOsUserGroup = "mysql" + // MySQLClientPath mysqlclient 路径 + MySQLClientPath = "/usr/local/mysql/bin/mysql" + // ChecksumInstallPath check path + ChecksumInstallPath = "/home/mysql/checksum" + // DbbackupGoInstallPath install path + DbbackupGoInstallPath = "/home/mysql/dbbackup-go" + // DBAToolkitPath dba 工具集 + DBAToolkitPath = "/home/mysql/dba-toolkit" + // MySQLCrondInstallPath crond安装路径 + MySQLCrondInstallPath = "/home/mysql/mysql-crond" + // MySQLMonitorInstallPath 监控安装路径 + MySQLMonitorInstallPath = "/home/mysql/mysql-monitor" + // RotateBinlogInstallPath rotate binlog + RotateBinlogInstallPath = "/home/mysql/rotate_binlog" + // DBAReportBase 上报根目录 + DBAReportBase = "/home/mysql/dbareport" +) + +const ( + // MIR_MASTER meta inner role + MIR_MASTER = "master" + // MIR_SLAVE inner role slave + MIR_SLAVE = "slave" + // MIR_REPEATER inner role repeater + MIR_REPEATER = "repeater" + // MIR_ORPHAN inner role orphan + MIR_ORPHAN = "orphan" // 单节点集群的实例角色 +) + +// backup .info 中的 BackupRole +const ( + BackupRoleMaster = "MASTER" + BackupRoleSlave = "SLAVE" + BackupRoleRepeater = "REPEATER" + // BackupRoleOrphan 单节点备份行为 + BackupRoleOrphan = "ORPHAN" + BackupRoleSpiderMaster = "spider_master" + BackupRoleSpiderSlave = "spider_slave" +) + +// 规范的 备份类型名 +const ( + TypeGZTAB = "gztab" + TypeXTRA = "xtra" +) + +// LooseBackupTypes 不规范的 备份类型名,不区分大小写 +// dbbackup.conf 中的 backup_type +var LooseBackupTypes = map[string][]string{ + TypeGZTAB: {"GZTAB"}, + TypeXTRA: {"XTRA", "xtrabackup"}, +} + +// DbbackupConfigFilename 日常备份配置文件没 +func DbbackupConfigFilename(port int) string { + return fmt.Sprintf("dbbackup.%d.ini", port) +} + +const ( + // BackupTypeLogical 备份类型 + BackupTypeLogical = "logical" // mydumper + // BackupTypePhysical TODO + BackupTypePhysical = "physical" +) + +// MySQLCrondPort crond 端口 +const MySQLCrondPort = 9999 diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/os.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/os.go new file mode 100644 index 0000000000..9164ef73b8 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/os.go @@ -0,0 +1,8 @@ +package cst + +// bits +const ( + Bit64 = "64" + Bit32 = "32" + OSBits = 32 << uintptr(^uintptr(0)>>63) +) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/proxy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/proxy.go new file mode 100644 index 0000000000..6af6770dcb --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/cst/proxy.go @@ -0,0 +1,18 @@ +package cst + +// proxy related +const ( + ProxyAdminPortInc = 1000 + // string array, split by comma + // 初始化mysql会增加这个账户 + ProxyUserMonitorAccessAll = "MONITOR@%" + // Proxy + ProxyInstallPath = "/usr/local/mysql-proxy" + DefaultProxyDataRootPath = "/data" + AlterNativeProxyDataRootPath = "/data1" + DefaultProxyCnfName = "/etc/proxy.cnf" + DefaultProxyUserCnfName = "/etc/proxy_user.cnf" + DefaultAdminScripyLua = "/usr/local/mysql-proxy/lib/mysql-proxy/lua/admin.lua" + DefaultBackend = "1.1.1.1:3306" + DefaultProxyLogBasePath = "mysql-proxy" +) diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go new file mode 100644 index 0000000000..22c7b3e586 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.go @@ -0,0 +1,11 @@ +package staticembed + +import "embed" + +// DefaultSysSchemaSQLFileName TODO +const DefaultSysSchemaSQLFileName = "default_sys_schema.sql" + +// DefaultSysSchemaSQL TODO +// +//go:embed default_sys_schema.sql +var DefaultSysSchemaSQL embed.FS diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql new file mode 100644 index 0000000000..6c672c1bb9 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/default_sys_schema.sql @@ -0,0 +1,63 @@ +CREATE DATABASE if not exists test; +CREATE DATABASE IF NOT EXISTS `infodba_schema` DEFAULT CHARACTER SET utf8; +create table IF NOT EXISTS infodba_schema.free_space(a int) engine = InnoDB; +CREATE TABLE if not exists infodba_schema.conn_log( + conn_id bigint default NULL, + conn_time datetime default NULL, + user_name varchar(128) default NULL, + cur_user_name varchar(128) default NULL, + ip varchar(15) default NULL, + key conn_time(conn_time) +) engine = InnoDB; +create table if not exists infodba_schema.`checksum`( + master_ip char(32) NOT NULL DEFAULT '0.0.0.0', + master_port int(11) NOT NULL DEFAULT '3306', + db char(64) NOT NULL, + tbl char(64) NOT NULL, + chunk int(11) NOT NULL, + chunk_time float DEFAULT NULL, + chunk_index varchar(200) DEFAULT NULL, + lower_boundary blob, + upper_boundary blob, + this_crc char(40) NOT NULL, + this_cnt int(11) NOT NULL, + master_crc char(40) DEFAULT NULL, + master_cnt int(11) DEFAULT NULL, + ts timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (`master_ip`,`master_port`,`db`,`tbl`,`chunk`), + KEY `ts_db_tbl` (`ts`,`db`,`tbl`) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; +replace into infodba_schema.checksum +values('0.0.0.0','3306', 'test', 'test', 0, NULL, NULL, '1=1', '1=1', '0', 0, '0', 0, now()); +CREATE TABLE if not exists infodba_schema.spes_status( + ip varchar(15) default '', + spes_id smallint default 0, + report_day int default 0, + PRIMARY KEY ip_id_day (ip, spes_id, report_day) +) engine = InnoDB; +CREATE TABLE IF NOT EXISTS infodba_schema.master_slave_check ( + check_item VARCHAR(64) NOT NULL PRIMARY KEY comment 'check_item to check', + master VARCHAR(64) comment 'the check_item status on master', + slave VARCHAR(64) comment 'the check_item status on slave', + check_result VARCHAR(64) comment 'the different value of master and slave' +) ENGINE = InnoDB; +INSERT INTO infodba_schema.master_slave_check +values('slave_delay_sec', now(), now(), 0); +CREATE TABLE IF NOT EXISTS infodba_schema.check_heartbeat ( + uid INT NOT NULL PRIMARY KEY, + ck_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP on UPDATE CURRENT_TIMESTAMP +) ENGINE = InnoDB; +REPLACE INTO infodba_schema.check_heartbeat(uid) value(1); +CREATE TABLE IF NOT EXISTS infodba_schema.query_response_time( + time_min INT(11) NOT NULL DEFAULT '0', + time VARCHAR(14) NOT NULL DEFAULT '', + total VARCHAR(100) NOT NULL DEFAULT '', + update_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (time_min, time) +) engine = InnoDB; +-- conn_log 所有用户可写. 注会导致所有用户可以看见 infodba_schema +insert into `mysql`.`db`(`Host`,`Db`,`User`,`Select_priv`,`Insert_priv`, `Update_priv`,`Delete_priv`,`Create_priv`,`Drop_priv`) + values('%','infodba_schema','','Y','Y', 'N','N','N','N'); + +flush privileges; +flush logs; \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/external.sh b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/external.sh new file mode 100644 index 0000000000..5494fca3ae --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/external.sh @@ -0,0 +1,15 @@ +#/bin/bash + +FOUND=$(grep nofile /etc/security/limits.conf |grep -v "#") +if [ ! -z "$FOUND" ]; then + sed -i '/ nofile /s/^/#/' /etc/security/limits.conf +fi +PKGS=("perl" "perl-Digest-MD5" "perl-Test-Simple" "perl-DBI" "perl-DBD-MySQL" "perl-Data-Dumper" "perl-Encode" "perl-Time-HiRes" "perl-JSON") +for pkg in ${PKGS[@]} +do + if rpm -q ${pkg} &> /dev/null;then + echo "$pkg already install" + continue + fi + yum install -y ${pkg} +done diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/staticembed.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/staticembed.go new file mode 100644 index 0000000000..9b14946fdb --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/staticembed.go @@ -0,0 +1,2 @@ +// Package staticembed TODO +package staticembed diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go new file mode 100644 index 0000000000..46824f64a2 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.go @@ -0,0 +1,19 @@ +package staticembed + +import "embed" + +// SysInitMySQLScriptFileName TODO +var SysInitMySQLScriptFileName = "sysinit_mysql.sh" + +// ExternalScriptFileName TODO +var ExternalScriptFileName = "external.sh" + +// SysInitMySQLScript TODO +// +//go:embed sysinit_mysql.sh +var SysInitMySQLScript embed.FS + +// ExternalScript TODO +// +//go:embed external.sh +var ExternalScript embed.FS diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh new file mode 100644 index 0000000000..2bf8e4b28b --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/core/staticembed/sysinit_mysql.sh @@ -0,0 +1,84 @@ +#!/bin/sh +# 新建mysql.mysql用户 +## +# mysql scripts +## +# depends: ~/abs/ssh.exp ~/abs/scp.exp +function _exit() { + rm $0 + exit +} +#chmod o+rx /usr/local/ieod-public/sysinfo -R +#chmod o+rx /usr/local/agenttools/agent +#chmod o+rx /usr/local/agenttools/agent/agentRep* +#handler nscd restart +#如果存在mysql用户组就groupadd mysql -g 202 +egrep "^mysql" /etc/group >& /dev/null +if [ $? -ne 0 ] +then +groupadd mysql -g 202 +fi +#考虑到可能上架已运行的机器,userdel有风险,不采用这种方法 +#如果存在mysql用户就删掉(因为有可能1)id不为30019,2)不存在home目录) +id mysql >& /dev/null +if [ $? -ne 0 ] +then + useradd -m -d /home/mysql -g 202 -G users -u 30019 mysql + chage -M 99999 mysql + if [ ! -d /home/mysql ]; + then + mkdir -p /home/mysql + fi + chmod 755 /home/mysql + usermod -d /home/mysql mysql +fi +#如果存在mysql用户,上面那一步会报错,也不会创建/home/mysql,所以判断下并创建/home/mysql +if [ ! -d /data ]; +then + mkdir -p /data1/data/ + ln -s /data1/data/ /data +fi +if [ ! -d /data1 ]; +then + mkdir -p /data/data1/ + ln -s /data/data1 /data1 +fi +mkdir -p /data1/dbha +chown -R mysql /data1/dbha +mkdir -p /data/dbha +chown -R mysql /data/dbha +#mkdir -p /home/mysql/install +#chown -R mysql /home/mysql +#chmod -R a+rwx /home/mysql/install +mkdir -p /data/install +chown -R mysql /home/mysql +chown -R mysql /data/install +chmod -R a+rwx /data/install +rm -rf /home/mysql/install +ln -s /data/install /home/mysql/install +chown -R mysql /home/mysql/install +password="$2" +#password=$(echo "$2" | /home/mysql/install/lib/tools/base64 -d) +echo "mysql:$password" | chpasswd +FOUND=$(grep 'ulimit -n 204800' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'ulimit -n 204800' >> /etc/profile +fi +FOUND=$(grep 'export LC_ALL=en_US' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'export LC_ALL=en_US' >> /etc/profile +fi +FOUND=$(grep 'export PATH=/usr/local/mysql/bin/:$PATH' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'export PATH=/usr/local/mysql/bin/:$PATH' >> /etc/profile +fi +FOUND_umask=$(grep '^umask 022' /etc/profile) +if [ -z "$FOUND_umask" ]; then + echo 'umask 022' >> /etc/profile +fi +FOUND=$(grep 'fs.aio-max-nr' /etc/sysctl.conf) +if [ -z "$FOUND" ];then +echo "fs.aio-max-nr=1024000" >> /etc/sysctl.conf +/sbin/sysctl -p +fi +_exit diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/db.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/db.go new file mode 100644 index 0000000000..56420694e0 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/db.go @@ -0,0 +1,146 @@ +package native + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "time" + + _ "github.com/go-sql-driver/mysql" // mysql TODO +) + +// Instance TODO +type Instance struct { + Host string `json:"host" example:"1.1.1.1"` // 当前实例的主机地址 + Port int `json:"port" example:"33060"` // 当前实例的端口 +} + +// Addr Ins Addr +func (o Instance) Addr() string { + return fmt.Sprintf("%s:%d", o.Host, o.Port) +} + +// InsObject 操作对象参数 +// 可以用于操作 mysql or proxy 实例 +type InsObject struct { + Host string `json:"host"` // 当前实例的主机地址 + Port int `json:"port"` // 当前实例的端口 + User string `json:"user"` // 连接当前实例的User + Pwd string `json:"pwd"` // 连接当前实例的User Pwd + Socket string `json:"socket"` // 连接socket + Charset string `json:"charset"` // 连接字符集 + Options string `json:"options"` // 其它选项 + // 构造的 mysql client 访问命令 + mysqlCli string +} + +func (o InsObject) tcpdsn() string { + return fmt.Sprintf("%s:%d", o.Host, o.Port) +} + +func (o InsObject) proxyAdminTcpDsn() string { + return fmt.Sprintf("%s:%d", o.Host, GetProxyAdminPort(o.Port)) +} + +func (o InsObject) spiderAdminTcpDsn() string { + return fmt.Sprintf("%s:%d", o.Host, o.Port) +} + +// GetProxyAdminPort TODO +func GetProxyAdminPort(port int) int { + return port + cst.ProxyAdminPortInc +} + +// Conn Connect Tcp/Ip +func (o InsObject) Conn() (*DbWorker, error) { + if o.Socket != "" { + return NewDbWorker(DsnBySocket(o.Socket, o.User, o.Pwd)) + } else { + return NewDbWorker(DsnByTcp(o.tcpdsn(), o.User, o.Pwd)) + } +} + +// ConnBySocket Connect Tcp/Ip +func (o InsObject) ConnBySocket() (*DbWorker, error) { + return NewDbWorker(DsnBySocket(o.Socket, o.User, o.Pwd)) +} + +// DsnByTcp TODO +func DsnByTcp(host, user, password string) string { + return fmt.Sprintf("%s:%s@tcp(%s)/?timeout=5s&multiStatements=true", user, password, host) +} + +// DsnBySocket TODO +func DsnBySocket(socket, user, password string) string { + return fmt.Sprintf("%s:%s@unix(%s)/?timeout=5s&multiStatements=true", user, password, socket) +} + +// MySQLClientCmd TODO +func (o InsObject) MySQLClientCmd(mysqlClient string) string { + cmd := fmt.Sprintf(`%s --host=%s --port=%d --user=%s -p%s`, mysqlClient, o.Host, o.Port, o.User, o.Pwd) + if o.Socket != "" { + cmd += fmt.Sprintf(" --socket %s", o.Socket) + } + if o.Charset != "" { + cmd += fmt.Sprintf(" --default-character-set=%s", o.Charset) + } + if o.Options != "" { + cmd += " " + o.Options + } + o.mysqlCli = cmd + return cmd +} + +// MySQLClientExec 执行 mysql ... -e "sql" 命令 +func (o InsObject) MySQLClientExec(mysqlClient, sqlStr string) (string, error) { + if o.mysqlCli == "" { + o.mysqlCli = o.MySQLClientCmd(mysqlClient) + } + cmd := fmt.Sprintf(`%s -A -Nse "%s"`, o.mysqlCli, sqlStr) + return osutil.ExecShellCommand(false, cmd) +} + +// CheckInstanceConnIdle TODO +func (o InsObject) CheckInstanceConnIdle(sysUsers []string, sleepTime time.Duration) error { + db, err := o.ConnBySocket() + if err != nil { + logger.Error("Connect %d failed,Err:%s", o.Port, err.Error()) + return err + } + defer db.Db.Close() + + // 检查非系统用户的processlist + processLists, err := db.ShowApplicationProcesslist(sysUsers) + if err != nil { + return fmt.Errorf("获取%d processlist 失败,err:%w", o.Port, err) + } + if len(processLists) > 0 { + return fmt.Errorf("实例%d 残留processlist 连接:%v", o.Port, processLists) + } + + // 检查show open tables + openTables, err := db.ShowOpenTables(sleepTime) + if err != nil { + return err + } + if len(openTables) > 0 { + return fmt.Errorf("实例%d 存在 open tables:%v", o.Port, openTables) + } + return nil +} + +// IsEmptyDB 过滤出系统库后,判断是否存在业务db +// +// @receiver dblist +// @return bool +func IsEmptyDB(dblist []string) bool { + var whiteDBs = DBSys + for _, db := range dblist { + if !cmutil.HasElem(db, whiteDBs) { + return false + } + } + return true +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/db_benchmark_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/db_benchmark_test.go new file mode 100644 index 0000000000..98f74390b8 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/db_benchmark_test.go @@ -0,0 +1,18 @@ +package native_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "testing" +) + +// BenchmarkDbConn TODO +func BenchmarkDbConn(b *testing.B) { + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + b.Fatalf("connect failed %s", err.Error()) + return + } + for n := 0; n < b.N; n++ { + d.SelectLongRunningProcesslist(0) + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/db_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/db_test.go new file mode 100644 index 0000000000..9a771ce59e --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/db_test.go @@ -0,0 +1,217 @@ +package native_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "testing" + "time" + + "github.com/jmoiron/sqlx" +) + +func TestConnByTcp(t *testing.T) { + t.Log("start conn ... ") +} + +func TestShowVersion(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + ver, err := d.SelectVersion() + if err != nil { + t.Fatalf("get version failed %s", err.Error()) + return + } + t.Logf("current version is %s", ver) +} + +func TestShowSlaveStatus(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + s, err := d.ShowSlaveStatus() + if err != nil { + t.Fatalf("show slave status failed %s", err.Error()) + return + } + t.Logf("current version is %v", s) +} + +func TestShowMasterStatus(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + s, err := d.ShowMasterStatus() + if err != nil { + t.Fatalf("show master status failed %s", err.Error()) + return + } + t.Logf("master status is %v", s) +} + +func TestShowApplicationProcesslist(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + s, err := d.ShowApplicationProcesslist([]string{"root"}) + if err != nil { + t.Fatalf("show processlist failed %s", err.Error()) + return + } + t.Log("ending ...", s) +} + +func TestShowOpenTables(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + s, err := d.ShowOpenTables(time.Second * 1) + if err != nil { + t.Fatalf("show open tables failed %s", err.Error()) + return + } + t.Log("ending ...", s) +} + +func TestShowDatabases(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + s, err := d.ShowServerCharset() + if err != nil { + t.Fatalf("ShowServerCharset failed %s", err.Error()) + return + } + t.Log("ending ...", s) +} + +func TestShowTables(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + s, err := d.ShowTables(native.TEST_DB) + if err != nil { + t.Fatalf("ShowServerCharset failed %s", err.Error()) + return + } + t.Log("ending ...", s) +} + +type TableElements struct { + DbNname string `db:"table_schema"` + TableName string `db:"table_name"` +} + +func TestQueryx(t *testing.T) { + t.Log("start conn ... ") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + var data []TableElements + sysdbs := []string{native.TEST_DB} + q, args, err := sqlx.In( + "select table_schema,table_name from information_schema.tables where engine <> 'innodb' and table_schema not in (?)", + sysdbs, + ) + if err != nil { + t.Fatalf(err.Error()) + return + } + sqlxdb := sqlx.NewDb(d.Db, "mysql") + query := sqlxdb.Rebind(q) + err = d.Queryx(&data, query, args...) + if err != nil { + t.Fatalf("query table error %s", err.Error()) + return + } + var ts native.ShowTableStatusResp + if err = d.Queryx(ts, "show table status from ? like ?", "mysql", "user"); err != nil { + t.Fatalf("%s", err.Error()) + return + } + t.Log("show tables;", data) + return +} + +func TestGetSingleGlobalVar(t *testing.T) { + t.Log("start testing ...") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + val, err := d.GetSingleGlobalVar("max_binlog_size") + // d.Queryxs(&val, fmt.Sprintf("show global variables like '%s'", "max_binlog_size")) + if err != nil { + t.Fatalf(err.Error()) + } + t.Log(val) +} + +func TestShowAppProcessList(t *testing.T) { + t.Log("start..") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + pls, err := d.SelectProcesslist([]string{"make"}) + if err != nil { + t.Fatal(err) + } + t.Log(pls) + t.Log("ending...") +} + +func TestFindLongQuerySQL(t *testing.T) { + t.Log("start...") + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + pls, err := d.SelectLongRunningProcesslist(0) + for _, pl := range pls { + // if !pl.DB.Valid { + // continue + // } + // t.Log(pl.DB.String) + t.Log(pl.Info.String) + t.Log(pl.State.String) + } + t.Log("ending...") +} + +func TestExec(t *testing.T) { + d, err := native.NewDbWorker(native.DsnBySocket("/data/mysql/3306/mysql.sock", "root", "")) + if err != nil { + t.Fatalf("connect failed %s", err.Error()) + return + } + _, err = d.Exec("drop database makee11") + if err != nil { + t.Fatalf(err.Error()) + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/dbworker.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/dbworker.go new file mode 100644 index 0000000000..97fcb0e078 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/dbworker.go @@ -0,0 +1,861 @@ +package native + +import ( + "context" + "database/sql" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "fmt" + "strconv" + "strings" + "time" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "github.com/spf13/cast" +) + +// DbWorker TODO +type DbWorker struct { + Dsn string + Db *sql.DB +} + +// NewDbWorker TODO +func NewDbWorker(dsn string) (*DbWorker, error) { + var err error + dbw := &DbWorker{ + Dsn: dsn, + } + dbw.Db, err = sql.Open("mysql", dbw.Dsn) + if err != nil { + return nil, err + } + // check connect with timeout + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + if err := dbw.Db.PingContext(ctx); err != nil { + return nil, fmt.Errorf("ping context failed, err:%w", err) + } + if err := dbw.Db.Ping(); err != nil { + return nil, err + } + return dbw, nil +} + +// NewDbWorkerNoPing mysql-proxy supports very few queries, which do not include PINGs +// In this case, we do not ping after a connection is built, +// and let the function caller to decide if the connection is healthy +func NewDbWorkerNoPing(host, user, password string) (*DbWorker, error) { + var err error + dbw := &DbWorker{ + Dsn: fmt.Sprintf("%s:%s@tcp(%s)/?timeout=5s&multiStatements=true", user, password, host), + } + dbw.Db, err = sql.Open("mysql", dbw.Dsn) + if err != nil { + return nil, err + } + return dbw, nil +} + +// Stop close connection +func (h *DbWorker) Stop() { + if h.Db != nil { + if err := h.Db.Close(); err != nil { + logger.Warn("close db handler failed, err:%s", err.Error()) + } + } +} + +// Exec 执行任意sql,返回影响行数 +func (h *DbWorker) Exec(query string, args ...interface{}) (int64, error) { + ret, err := h.Db.Exec(query, args...) + if err != nil { + return 0, err + } + return ret.RowsAffected() +} + +// ExecWithTimeout 执行任意sql,返回影响行数 +// 超时 +func (h *DbWorker) ExecWithTimeout(dura time.Duration, query string, args ...interface{}) (int64, error) { + ctx := context.Background() + ctx, cancel := context.WithTimeout(ctx, dura) + defer cancel() + ret, err := h.Db.ExecContext(ctx, query, args...) + if err != nil { + return 0, err + } + return ret.RowsAffected() +} + +// ExecMore 执行一堆sql +func (h *DbWorker) ExecMore(sqls []string) (rowsAffectedCount int64, err error) { + var c int64 + for _, args := range sqls { + ret, err := h.Db.Exec(args) + if err != nil { + return rowsAffectedCount, fmt.Errorf("exec %s failed,err:%w", args, err) + } + if c, err = ret.RowsAffected(); err != nil { + return rowsAffectedCount, fmt.Errorf("exec %s failed,err:%w", args, err) + } + rowsAffectedCount += c + } + return +} + +// Queryx execute query use sqlx +func (h *DbWorker) Queryx(data interface{}, query string, args ...interface{}) error { + logger.Info("Queryx:%s, args:%v", query, args) + db := sqlx.NewDb(h.Db, "mysql") + udb := db.Unsafe() + if err := udb.Select(data, query, args...); err != nil { + return fmt.Errorf("sqlx select failed, err:%w", err) + } + return nil +} + +// Queryxs execute query use sqlx return Single column +func (h *DbWorker) Queryxs(data interface{}, query string) error { + logger.Info("Queryxs:%s", query) + db := sqlx.NewDb(h.Db, "mysql") + udb := db.Unsafe() + if err := udb.Get(data, query); err != nil { + return err + } + return nil +} + +// QueryOneColumn query one column rows to slice +func (h *DbWorker) QueryOneColumn(columnName string, query string) ([]string, error) { + logger.Info("QueryOneColumn: %s, params:%v", query) + if ret, err := h.Query(query); err != nil { + return nil, err + } else { + colValues := []string{} + if len(ret) > 0 { + row0 := ret[0] + if _, ok := row0[columnName]; !ok { + return nil, errors.Errorf("column name %s not found", columnName) + } + } + for _, row := range ret { + colValues = append(colValues, cast.ToString(row[columnName])) + } + return colValues, nil + } +} + +// Query conv rows list to map +// 查询结果为空时,返回 not row found +func (h *DbWorker) Query(query string) ([]map[string]interface{}, error) { + return h.QueryWithArgs(query) +} + +// QueryWithArgs conv rows list to map +// 查询结果为空时,返回 not row found +func (h *DbWorker) QueryWithArgs(query string, args ...interface{}) ([]map[string]interface{}, error) { + logger.Info("Query: %s, params:%v", query, args) + var rows *sql.Rows + var err error + if len(args) <= 0 { + rows, err = h.Db.Query(query) + } else { + rows, err = h.Db.Query(query, args) + } + if err != nil { + return nil, err + } + defer func() { + if err := rows.Close(); err != nil { + logger.Warn("close row failed, err:%s", err.Error()) + } + }() + // get all columns name + columns, err := rows.Columns() + if err != nil { + return nil, err + } + values := make([]sql.RawBytes, len(columns)) + scanArgs := make([]interface{}, len(values)) + for i := range values { + scanArgs[i] = &values[i] + } + result := make([]map[string]interface{}, 0) + for rows.Next() { + row := make(map[string]interface{}) + err = rows.Scan(scanArgs...) + if err != nil { + return nil, err + } + var value string + for i, col := range values { + if col == nil { + value = "NULL" + } else { + value = string(col) + } + row[columns[i]] = value + } + result = append(result, row) + } + if err = rows.Err(); err != nil { + return nil, err + } + if len(result) == 0 { + return nil, fmt.Errorf(NotRowFound) + } + return result, nil +} + +// QueryGlobalVariables TODO +func (h *DbWorker) QueryGlobalVariables() (map[string]string, error) { + result := make(map[string]string) + rows, err := h.Db.Query("SHOW GLOBAL VARIABLES") + if err != nil { + return result, err + } + defer rows.Close() + for rows.Next() { + var key, val string + err := rows.Scan(&key, &val) + if err != nil { + continue + } + result[key] = val + } + return result, nil +} + +// ExecuteAdminSql for example, flush binary logs; set global binlog_format=row; +func (h *DbWorker) ExecuteAdminSql(admSql string) error { + _, err := h.Db.Exec(admSql) + if err != nil { + return err + } + return nil +} + +// IsNotRowFound TODO +func (h *DbWorker) IsNotRowFound(err error) bool { + return err.Error() == NotRowFound +} + +// ShowSlaveStatus 返回结构化的查询show slave status +func (h *DbWorker) ShowSlaveStatus() (resp ShowSlaveStatusResp, err error) { + err = h.Queryxs(&resp, "show slave status;") + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return ShowSlaveStatusResp{}, nil + } + } + return +} + +// TotalDelayBinlogSize 获取Slave 延迟的总binlog size +func (h *DbWorker) TotalDelayBinlogSize() (total int, err error) { + maxbinlogsize_str, err := h.GetSingleGlobalVar("max_binlog_size") + if err != nil { + return -1, err + } + maxbinlogsize, err := strconv.Atoi(maxbinlogsize_str) + if err != nil { + return -1, err + } + var ss ShowSlaveStatusResp + err = h.Queryxs(&ss, "show slave status;") + if err != nil { + return + } + masterBinIdx, err := getIndexFromBinlogFile(ss.MasterLogFile) + if err != nil { + return -1, err + } + relayBinIdx, err := getIndexFromBinlogFile(ss.RelayMasterLogFile) + if err != nil { + return -1, err + } + return (masterBinIdx-relayBinIdx)*maxbinlogsize + (ss.ExecMasterLogPos - ss.ReadMasterLogPos), nil +} + +// getIndexFromBinlogFile TODO +// eg:fileName binlog20000.224712 +// output: 224712 +func getIndexFromBinlogFile(fileName string) (seq int, err error) { + ss := strings.Split(fileName, ".") + if len(ss) <= 0 { + return -1, fmt.Errorf("empty after split . %s", fileName) + } + return strconv.Atoi(ss[1]) +} + +// ShowMasterStatus 返回结构化的查询show slave status +func (h *DbWorker) ShowMasterStatus() (resp MasterStatusResp, err error) { + err = h.Queryxs(&resp, "show master status;") + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return MasterStatusResp{}, nil + } + } + return +} + +// ShowSlaveHosts 返回结构化的查询show slave hosts +func (h *DbWorker) ShowSlaveHosts() (resp []SlaveHostResp, err error) { + err = h.Queryx(&resp, "show slave hosts;") + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return []SlaveHostResp{}, nil + } + } + return +} + +// SelectVersion 查询version +func (h *DbWorker) SelectVersion() (version string, err error) { + err = h.Queryxs(&version, "select version() as version;") + return +} + +// SelectNow 获取实例的当前时间。不是获取机器的,因为可能存在时区不一样 +func (h *DbWorker) SelectNow() (nowTime string, err error) { + err = h.Queryxs(&nowTime, "select now() as not_time;") + return +} + +// GetBinlogDir 获取实例的 binlog_dir 和 binlog file prefix +// 从 mysqld 在线变量里获取失败,则从 my.cnf 里获取 +func (h *DbWorker) GetBinlogDir(port int) (string, string, error) { + cnfFile := util.CnfFile{} + if logBinBasename, err := h.GetSingleGlobalVar("log_bin_basename"); err == nil { + if logBinBasename != "" { + binlogDir, namePrefix, err2 := cnfFile.ParseLogBinBasename(logBinBasename) + if err2 == nil { + return binlogDir, namePrefix, nil + } + } + } else { + logger.Warn("failed to get global variables log_bin_basename: %s", err.Error()) + } + cnfFile.FileName = util.GetMyCnfFileName(port) + if err := cnfFile.Load(); err != nil { + return "", "", err + } else { + return cnfFile.GetBinLogDir() + } +} + +// ShowApplicationProcesslist 查询是否存在非系统用户的processlist +// 已经忽略了dbsysUsers +func (h *DbWorker) ShowApplicationProcesslist(sysUsers []string) (processLists []ShowProcesslistResp, err error) { + users := append(sysUsers, dbSysUsers...) + query, args, err := sqlx.In("select * from information_schema.processlist where User not in (?)", users) + if err != nil { + return nil, err + } + err = h.Queryx(&processLists, query, args...) + return processLists, err +} + +// SelectProcesslist TODO +func (h *DbWorker) SelectProcesslist(usersIn []string) (processList []SelectProcessListResp, err error) { + query, args, err := sqlx.In("select * from information_schema.processlist where User in (?);", usersIn) + if err != nil { + return nil, err + } + if err := h.Queryx(&processList, query, args...); err != nil { + return nil, err + } + return processList, nil +} + +// SelectLongRunningProcesslist 查询Time > ? And state != 'Sleep'的processLists +func (h *DbWorker) SelectLongRunningProcesslist(time int) ([]SelectProcessListResp, error) { + var userExcluded []string = []string{"'repl'", "'system user'", "'event_scheduler'"} + var processList []SelectProcessListResp + query, args, err := sqlx.In( + "select * from information_schema.processlist where Command <> 'Sleep' and Time > ? and User Not In (?)", + time, userExcluded, + ) + if err != nil { + return nil, err + } + if err := h.Queryx(&processList, query, args...); err != nil { + return nil, err + } + return processList, nil +} + +// ShowOpenTables TODO +/* +ShowOpenTables + show open tables; + +---------------+---------------------------------------+--------+-------------+ + | Database | Table | In_use | Name_locked | + +---------------+---------------------------------------+--------+-------------+ + | dbPrdsDevBak | tb_app_max_bak | 0 | 0 | +*/ +// 查询实例上被打开的表 +// 忽略系统库 +func (h *DbWorker) ShowOpenTables(sleepTime time.Duration) (openTables []ShowOpenTablesResp, err error) { + if _, err = h.ExecWithTimeout(time.Second*5, "flush tables;"); err != nil { + return nil, err + } + // wait a moment before check opened tables + time.Sleep(sleepTime) + db := sqlx.NewDb(h.Db, "mysql") + udb := db.Unsafe() + // show open tables WHERE `Database` not in + inStr, _ := mysqlutil.UnsafeBuilderStringIn(DBSys, "'") + sqlStr := fmt.Sprintf("show open tables where `Database` not in (%s)", inStr) + if err := udb.Select(&openTables, sqlStr); err != nil { + return nil, err + } + return openTables, nil +} + +// GetSqlxDb TODO +func (h *DbWorker) GetSqlxDb() *sqlx.DB { + return sqlx.NewDb(h.Db, "mysql") +} + +// ShowServerCharset 查询version +func (h *DbWorker) ShowServerCharset() (charset string, err error) { + return h.GetSingleGlobalVar("character_set_server") +} + +// ShowSocket 获取当前实例的socket Value +// +// @receiver h +// @return socket +// @return err +func (h *DbWorker) ShowSocket() (socket string, err error) { + return h.GetSingleGlobalVar("socket") +} + +// GetSingleGlobalVar TODO +func (h *DbWorker) GetSingleGlobalVar(varName string) (val string, err error) { + var item MySQLGlobalVariableItem + sqlstr := fmt.Sprintf("show global variables like '%s'", varName) + if err = h.Queryxs(&item, sqlstr); err != nil { + return "", err + } + return item.Value, nil +} + +// SetSingleGlobalVarAndReturnOrigin set global and return origin value +func (h *DbWorker) SetSingleGlobalVarAndReturnOrigin(varName, varValue string) (val string, err error) { + originValue, err := h.GetSingleGlobalVar(varName) + if err != nil { + return "", err + } + sqlstr := fmt.Sprintf("SET GLOBAL %s='%s'", varName, varValue) + if err = h.ExecuteAdminSql(sqlstr); err != nil { + return "", err + } + return originValue, nil +} + +// SetSingleGlobalVar set global +func (h *DbWorker) SetSingleGlobalVar(varName, varValue string) error { + sqlstr := fmt.Sprintf("SET GLOBAL %s='%s'", varName, varValue) + if err := h.ExecuteAdminSql(sqlstr); err != nil { + return err + } + return nil +} + +// ShowDatabases 执行show database 获取所有的dbName +// +// @receiver h +// @return databases +// @return err +func (h *DbWorker) ShowDatabases() (databases []string, err error) { + err = h.Queryx(&databases, "show databases") + return +} + +// SelectDatabases 查询 databases +func (h *DbWorker) SelectDatabases(dbNameLike string) (databases []string, err error) { + inStr, _ := mysqlutil.UnsafeBuilderStringIn(DBSys, "'") + dbsSql := fmt.Sprintf("select SCHEMA_NAME from information_schema.SCHEMATA where SCHEMA_NAME not in (%s) ", inStr) + if dbNameLike != "" { + dbsSql += fmt.Sprintf(" and SCHEMA_NAME like '%s'", dbNameLike) + } + if databases, err = h.QueryOneColumn("SCHEMA_NAME", dbsSql); err != nil { + if h.IsNotRowFound(err) { + return nil, nil + } else { + return nil, err + } + } + return databases, nil +} + +// TableColumnDef TODO +type TableColumnDef struct { + ColName string + ColPos string // int? + ColType string +} + +// TableSchema TODO +type TableSchema struct { + DBName string + TableName string + DBTableStr string // dbX.tableY + SchemaStr string + + PrimaryKey []string + UniqueKey []string + ColumnMap map[string]TableColumnDef +} + +// SelectTables 查询 tables +// db=[db1,db2] tb=[tbl1,tbl2,tbl3] +// 从上面松散的信息中,获取真实的db.table,可能最终得到 db1.tbl1 db2.tbl2 db2.tbl3 +func (h *DbWorker) SelectTables(dbNames, tbNames []string) (map[string]TableSchema, error) { + queryStr := + "SELECT TABLE_SCHEMA,TABLE_NAME,TABLE_TYPE,TABLE_ROWS FROM information_schema.TABLES WHERE TABLE_TYPE='BASE TABLE' " + if len(dbNames) > 0 { + if dbs, err := mysqlutil.UnsafeBuilderStringIn(dbNames, "'"); err != nil { + return nil, err + } else { + queryStr += fmt.Sprintf(" AND TABLE_SCHEMA IN (%s)", dbs) + } + } + if len(tbNames) > 0 { + if tbs, err := mysqlutil.UnsafeBuilderStringIn(tbNames, "'"); err != nil { + return nil, err + } else { + queryStr += fmt.Sprintf(" AND TABLE_NAME IN (%s)", tbs) + } + } + + result, err := h.Query(queryStr) + if err != nil { + return nil, err + } + tblSchemas := make(map[string]TableSchema) + + if len(result) > 0 { + for _, row := range result { + dbtbl := fmt.Sprintf(`%s.%s`, row["TABLE_SCHEMA"].(string), row["TABLE_NAME"].(string)) + tblSchemas[dbtbl] = TableSchema{ + DBName: row["TABLE_SCHEMA"].(string), + TableName: row["TABLE_NAME"].(string), + DBTableStr: dbtbl, + } + } + + return tblSchemas, nil + } + return nil, errors.New("table not found") +} + +// ShowTables 获取指定库名下的所有表名 +// +// @receiver h +// @receiver db +// @return tables +// @return err +func (h *DbWorker) ShowTables(db string) (tables []string, err error) { + results, err := h.Query(fmt.Sprintf("show full tables from `%s`;", db)) + if err != nil { + return + } + for _, row := range results { + tableType, ok := row[TableType].(string) + if !ok { + return nil, fmt.Errorf("隐式转换失败%s", row[tableType]) + } + // 仅仅获取表,忽略视图等 + if tableType != "BASE TABLE" { + continue + } + dbCol := TablesInPrefix + db + dbData, ok := row[dbCol].(string) + if !ok { + return nil, errors.New("GetTables:change result(dbData) to string fail") + } + tables = append(tables, dbData) + } + return tables, nil +} + +// ShowEngines 返回执行 show engines; +// +// @receiver h +// @return engines +// @return err +func (h *DbWorker) ShowEngines() (engines []ShowEnginesResp, err error) { + err = h.Queryx(&engines, "show engines") + return +} + +// IsSupportTokudb 判断实例是否开启了Tokudb引擎 +// +// @receiver h +// @return support +// @return err +func (h *DbWorker) IsSupportTokudb() (support bool, err error) { + engines, err := h.ShowEngines() + if err != nil { + return false, err + } + for _, engine := range engines { + if engine.Engine == strings.ToUpper("Tokudb") && engine.Support != "NO" { + return true, nil + } + } + return false, nil +} + +// IsEmptyInstance 判断实例是否是空实例 ps:过滤系统后 +// +// @receiver h +// @return bool +func (h *DbWorker) IsEmptyInstance() bool { + dbs, err := h.ShowDatabases() + if err != nil { + return false + } + return IsEmptyDB(dbs) +} + +// GetUserHosts 获取MySQL 实例上的user,host +// +// @receiver h +// @return users +// @return err +func (h *DbWorker) GetUserHosts() (users []UserHosts, err error) { + err = h.Queryx(&users, "select user,host from mysql.user") + return +} + +// ShowPrivForUser 获取create user && grant user 语句 +// +// @receiver h +// @receiver created 5.7 以上的版本需要show create user +// @receiver userhost +// @return grants +// @return err +func (h *DbWorker) ShowPrivForUser(created bool, userhost string) (grants []string, err error) { + if created { + var createUserSQL string + if err = h.Queryxs(&createUserSQL, fmt.Sprintf("show create user %s", userhost)); err != nil { + return + } + grants = append(grants, createUserSQL) + } + var grantsSQL []string + if err = h.Queryx(&grantsSQL, fmt.Sprintf("show grants for %s", userhost)); err != nil { + return + } + grants = append(grants, grantsSQL...) + return +} + +// CheckSlaveReplStatus TODO +// 检查从库的同步状态是否Ok +func (h *DbWorker) CheckSlaveReplStatus() (err error) { + return util.Retry( + util.RetryConfig{Times: 10, DelayTime: 1}, + func() error { + ss, err := h.ShowSlaveStatus() + if err != nil { + return err + } + if !ss.ReplSyncIsOk() { + return fmt.Errorf("主从同步状态异常,IoThread:%s,SqlThread:%s", ss.SlaveIORunning, ss.SlaveSQLRunning) + } + if !ss.SecondsBehindMaster.Valid { + return fmt.Errorf("SecondsBehindMaster Val Is Null") + } + if ss.SecondsBehindMaster.Int64 > 10 { + return fmt.Errorf("SecondsBehindMaster Great Than 10 Sec") + } + return nil + }, + ) +} + +// MySQLVarsCompare MySQL 实例参数差异对比 +// referInsConn 参照实例的是连接 +// h 对比的实例 +func (h *DbWorker) MySQLVarsCompare(referInsConn *DbWorker, checkVars []string) (err error) { + referVars, err := referInsConn.QueryGlobalVariables() + if err != nil { + return err + } + compareVars, err := h.QueryGlobalVariables() + if err != nil { + return err + } + var errMsg []string + for _, varName := range checkVars { + referV, r_ok := referVars[varName] + compareV, c_ok := compareVars[varName] + // 如果存在某个key,在某个节点查询不就输出waning + if !(r_ok && c_ok) { + continue + } + if strings.Compare(referV, compareV) != 0 { + errMsg = append(errMsg, fmt.Sprintf("存在差异: 变量名:%s Master:%s,Slave:%s", varName, referV, compareV)) + } + } + if len(errMsg) > 0 { + return fmt.Errorf(strings.Join(errMsg, "\n")) + } + return +} + +// ResetSlave TODO +// reset slave +func (h *DbWorker) ResetSlave() (err error) { + _, err = h.Exec("reset slave /*!50516 all */;") + return +} + +// StopSlave TODO +// reset slave +func (h *DbWorker) StopSlave() (err error) { + _, err = h.ExecWithTimeout(time.Second*30, "stop slave;") + return +} + +// Grants TODO +type Grants struct { + User string + Host string + Privs []string +} + +// TableType TODO +const TableType = "Table_type" + +// TablesInPrefix TODO +const TablesInPrefix = "Tables_in_" + +// GetTableUniqueKeys 本地获取表的 unique key +func (h *DbWorker) GetTableUniqueKeys(dbtable string) (uniqKeys map[string][]string, err1 error) { + sqlKeys := fmt.Sprintf(`SHOW INDEX FROM %s WHERE Key_name='PRIMARY'`, dbtable) + result, err := h.Query(sqlKeys) + uniqKeys = make(map[string][]string) + if err != nil && !strings.Contains(err.Error(), "not row found") { + return nil, fmt.Errorf("GetTableUniqueKeysRemote fail,error:%s", err.Error()) + } else if len(result) > 0 { + var pKey []string + for _, row := range result { + pKey = append(pKey, row["Column_name"].(string)) + } + uniqKeys["PRIMARY"] = pKey + } else { // no primary key defined, check unique key + sqlKeys = fmt.Sprintf(`SHOW INDEX FROM %s WHERE Non_unique=0`, dbtable) + result, err := h.Query(sqlKeys) + if err != nil { + return nil, fmt.Errorf("GetTableUniqueKeysRemote fail,error:%s", err.Error()) + } else if len(result) == 0 { + return nil, fmt.Errorf(`No PRIMARY or UNIQUE key found on table %s `, dbtable) + } else { + for _, row := range result { + Key_name := row["Key_name"].(string) + if _, ok := uniqKeys[Key_name]; ok { + uniqKeys[Key_name] = append(uniqKeys[Key_name], row["Column_name"].(string)) + } else { + var uKey []string + uKey = append(uKey, row["Column_name"].(string)) + uniqKeys[Key_name] = uKey + } + } + } + } + return uniqKeys, nil +} + +// GetTableUniqueKeyBest 从更多个 unique key 返回最优的唯一键 +func GetTableUniqueKeyBest(uniqKeys map[string][]string) []string { + if _, ok := uniqKeys["PRIMARY"]; ok { + return uniqKeys["PRIMARY"] + } else { + // 或者列最少的unique key // 获取 not null unique key + var uniqKeyBest []string + var colCnt int = 9999 + for _, v := range uniqKeys { + if len(v) <= colCnt { + colCnt = len(v) + uniqKeyBest = v + } + } + return uniqKeyBest + } +} + +// GetTableColumns get table column info +func GetTableColumns(dbworker *DbWorker, dbName, tblName string) (map[string]TableSchema, error) { + // tblSchemas = {"dbX.tableY": {"a": {"name":"a", "pos":"1", "type":"int"}}} + /* + queryStr := fmt.Sprintf("SELECT TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,DATA_TYPE,COLUMN_TYPE " + + "FROM information_schema.COLUMNS WHERE TABLE_SCHEMA =%s AND TABLE_NAME = %s" + + " ORDER BY TABLE_SCHEMA,TABLE_NAME,ORDINAL_POSITION asc", dbName, tblName) + */ + queryStr := fmt.Sprintf( + "SELECT TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,DATA_TYPE,COLUMN_TYPE " + + "FROM information_schema.COLUMNS WHERE TABLE_SCHEMA =? AND TABLE_NAME = ? " + + "ORDER BY TABLE_SCHEMA,TABLE_NAME,ORDINAL_POSITION asc", + ) + result, err := dbworker.QueryWithArgs(queryStr, dbName, tblName) + // todo 这里的err没有正确捕捉到,比如sql执行错误 + if err != nil { + return nil, err + } + + dbtbl := fmt.Sprintf(`%s.%s`, dbName, tblName) + tblSchemas := make(map[string]TableSchema) + if len(result) > 0 { + tblColumns := make(map[string]TableColumnDef) + for _, row := range result { + colDef := TableColumnDef{ + ColName: row["COLUMN_NAME"].(string), + ColPos: row["ORDINAL_POSITION"].(string), + ColType: row["DATA_TYPE"].(string), + } + tblColumns[row["COLUMN_NAME"].(string)] = colDef + } + tblSchemas[dbtbl] = TableSchema{ + ColumnMap: tblColumns, + } + return tblSchemas, nil + } + return nil, errors.New("table not found") +} + +// GetTableColumnList TODO +func GetTableColumnList(dbworker *DbWorker, dbName, tblName string) ([]string, error) { + // tblSchemas = {"dbX.tableY": {"a": {"name":"a", "pos":"1", "type":"int"}}} + /* + queryStr := fmt.Sprintf("SELECT TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,DATA_TYPE,COLUMN_TYPE " + + "FROM information_schema.COLUMNS WHERE TABLE_SCHEMA =%s AND TABLE_NAME = %s" + + " ORDER BY TABLE_SCHEMA,TABLE_NAME,ORDINAL_POSITION asc", dbName, tblName) + */ + queryStr := fmt.Sprintf( + "SELECT TABLE_SCHEMA,TABLE_NAME,COLUMN_NAME,ORDINAL_POSITION,DATA_TYPE,COLUMN_TYPE " + + "FROM information_schema.COLUMNS WHERE TABLE_SCHEMA =? AND TABLE_NAME = ? " + + "ORDER BY TABLE_SCHEMA,TABLE_NAME,ORDINAL_POSITION asc", + ) + result, err := dbworker.QueryWithArgs(queryStr, dbName, tblName) + // todo 这里的err没有正确捕捉到,比如sql执行错误 + if err != nil { + return nil, err + } + var columnList []string + if len(result) > 0 { + for _, row := range result { + columnList = append(columnList, row["COLUMN_NAME"].(string)) + } + return columnList, nil + } + return nil, errors.New("table not found") +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy.go new file mode 100644 index 0000000000..d56fd62ade --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy.go @@ -0,0 +1,123 @@ +package native + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "fmt" + "strings" +) + +const ( + // ProxyMinVersion TODO + ProxyMinVersion = "mysql-proxy 0.8.2.4" +) + +// ProxyAdminDbWork TODO +type ProxyAdminDbWork struct { + DbWorker +} + +// ConnProxyAdmin TODO +// Connect Proxy Admin Port By Tcp/Ip +func (o InsObject) ConnProxyAdmin() (*ProxyAdminDbWork, error) { + dbwork, err := NewDbWorkerNoPing(o.proxyAdminTcpDsn(), o.User, o.Pwd) + return &ProxyAdminDbWork{DbWorker: *dbwork}, err +} + +// SelectBackend 查询ProxyBackends +func (h *ProxyAdminDbWork) SelectBackend() (backends ProxyAdminBackend, err error) { + err = h.Queryxs(&backends, "SELECT * FROM backends;") + return +} + +// SelectProxyVersion 查询Proxy Version +// +// @receiver h +// @return backends +// @return err +func (h *ProxyAdminDbWork) SelectProxyVersion() (version string, err error) { + err = h.Queryxs(&version, "select version();") + return +} + +// AddUser TODO +// refresh_users('user@host','flag') +// Add Proxy Users +func (h *ProxyAdminDbWork) AddUser(userDsn string) (err error) { + _, err = h.Exec(fmt.Sprintf("refresh_users('%s','+')", userDsn)) + return +} + +// RefreshBackends TODO +// refresh_backends('host:port',flag) +// Add Proxy Users +func (h *ProxyAdminDbWork) RefreshBackends(host string, port int) (err error) { + refreshSQL := fmt.Sprintf("refresh_backends('%s:%d',1)", host, port) + logger.Info(refreshSQL) + _, err = h.Exec(refreshSQL) + return +} + +// CheckProxyInUse 检查Proxy backend 是否等于 1.1.1.1:3306 还有是否存在连接的Client +// +// @receiver h +// @return inuse +// @return err +func (h *ProxyAdminDbWork) CheckProxyInUse() (inuse bool, err error) { + backend, err := h.SelectBackend() + if err != nil { + return false, err + } + if strings.Compare(backend.Address, cst.DefaultBackend) != 0 && backend.ConnectedClients > 0 { + return true, nil + } + return false, nil +} + +// CloneProxyUser 定义proxy克隆白名单的功能 +func (h *ProxyAdminDbWork) CloneProxyUser(target_db *ProxyAdminDbWork) (err error) { + // refresh_users('a@b,c@d,e@f','+'); + users, err := h.GetAllProxyUsers() + if err != nil { + return err + } + + userStr := strings.Join(users, ",") + refreshSQL := fmt.Sprintf("refresh_users('%s','+');", userStr) + + _, err = target_db.Exec(refreshSQL) + if err != nil { + return err + } + return nil +} + +// GetAllProxyUsers 定义查询proxy 所有user 功能 +func (h *ProxyAdminDbWork) GetAllProxyUsers() (users []string, err error) { + var sql = "select * from users;" + + rows, err := h.Query(sql) + if err != nil { + return nil, err + } + for _, row := range rows { + users = append(users, row["user@ip"].(string)) + } + return +} + +// CheckBackend TODO +func (h *ProxyAdminDbWork) CheckBackend(host string, port int) (err error) { + c, err := h.SelectBackend() + if err != nil { + return err + } + if strings.Compare(c.Address, fmt.Sprintf("%s:%d", host, port)) != 0 { + return fmt.Errorf( + "change backend get an error, expected %s but is %s", + fmt.Sprintf("%s:%d", host, port), + c.Address, + ) + } + return err +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy_test.go new file mode 100644 index 0000000000..ae579df3b5 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/proxy_test.go @@ -0,0 +1,57 @@ +package native_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "testing" +) + +func TestConnProxyAdminPort(t *testing.T) { + t.Log("start conn proxy") + pc, err := native.NewDbWorkerNoPing("{IP}:11000", "proxy", "xx") + if err != nil { + t.Fatalf("conn proxy failed %s", err.Error()) + return + } + ver, err := pc.SelectVersion() + if err != nil { + t.Fatalf("get version failed %s", err.Error()) + return + } + t.Logf("current version is %s", ver) +} + +func TestConnProxyAdminAddUser(t *testing.T) { + t.Log("start conn proxy") + pc, err := native.NewDbWorkerNoPing("{IP}:11000", "proxy", "xx") + if err != nil { + t.Fatalf("conn proxy failed %s", err.Error()) + return + } + af, err := pc.Exec("refresh_users('user@%','+') ") + if err != nil { + t.Fatalf("refresh_users %s", err.Error()) + return + } + t.Logf("current refresh_users is %d", af) +} + +func TestGetProxyBackends(t *testing.T) { + t.Log("start conn proxy") + b := native.InsObject{ + Host: "", + Port: 10000, + User: "", + Pwd: "", + } + pc, err := b.ConnProxyAdmin() + if err != nil { + t.Fatalf("conn proxy failed %s", err.Error()) + return + } + backends, err := pc.SelectBackend() + if err != nil { + t.Fatalf("SelectBackends %s", err.Error()) + return + } + t.Logf("current SelectBackends is %v", backends) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/spider.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/spider.go new file mode 100644 index 0000000000..786e785fdf --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/spider.go @@ -0,0 +1,12 @@ +package native + +// SpiderAdminDbWork TODO +type SpiderAdminDbWork struct { + DbWorker +} + +// ConnSpiderAdmin TODO +func (o InsObject) ConnSpiderAdmin() (*SpiderAdminDbWork, error) { + dbwork, err := NewDbWorkerNoPing(o.spiderAdminTcpDsn(), o.User, o.Pwd) + return &SpiderAdminDbWork{DbWorker: *dbwork}, err +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/native/types.go b/dbm-services/mysql/db-tools/dbactuator/pkg/native/types.go new file mode 100644 index 0000000000..a7f0cb1dab --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/native/types.go @@ -0,0 +1,165 @@ +// Package native TODO +/* + * @Description: database operation sets + */ +package native + +import ( + "database/sql" + "strings" +) + +const ( + // SLAVE_IO_RUNING_OK TODO + SLAVE_IO_RUNING_OK = "YES" + // SLAVE_SQL_RUNING_OK TODO + SLAVE_SQL_RUNING_OK = "YES" +) + +// NotRowFound TODO +const NotRowFound = "not row found" +const ( + // INFODBA_SCHEMA TODO + INFODBA_SCHEMA = "infodba_schema" + // TEST_DB TODO + TEST_DB = "test" + // INFO_SCHEMA TODO + INFO_SCHEMA = "information_schema" + // PERF_SCHEMA TODO + PERF_SCHEMA = "performance_schema" +) + +var dbSysUsers = []string{"event_scheduler", "system user"} + +// DBSys TODO +var DBSys = []string{"mysql", "sys", INFO_SCHEMA, PERF_SCHEMA, INFODBA_SCHEMA, TEST_DB} + +// DBUserAdmin TODO +var DBUserAdmin = "ADMIN" + +// ShowTableStatusResp TODO +type ShowTableStatusResp struct { + Name string `db:"Name"` + Engine string `db:"Engine"` + Version int `db:"Version"` + RowFormat string `db:"Row_format"` + Rows int `db:"Rows"` + AvgRowLength int `db:"Avg_row_length"` + DataLength int `db:"Data_length"` + MaxDataLength int `db:"Max_data_length"` + IndexLength int `db:"Index_length"` + DataFree int `db:"Data_free"` + Collation string `db:"Collation"` + // Auto_increment + // more ... +} + +// ShowSlaveStatusResp TODO +type ShowSlaveStatusResp struct { + MasterHost string `json:"Master_Host" db:"Master_Host"` + MasterPort int `json:"Master_Port" db:"Master_Port"` + MasterUser string `json:"Master_User" db:"Master_User"` + MasterLogFile string `json:"Master_Log_File" db:"Master_Log_File"` + ReadMasterLogPos int `json:"Read_Master_Log_Pos" db:"Read_Master_Log_Pos"` + RelayMasterLogFile string `json:"Relay_Master_Log_File" db:"Relay_Master_Log_File"` + ExecMasterLogPos int `json:"Exec_Master_Log_Pos" db:"Exec_Master_Log_Pos"` + SlaveIORunning string `json:"Slave_IO_Running" db:"Slave_IO_Running"` + SlaveSQLRunning string `json:"Slave_SQL_Running" db:"Slave_SQL_Running"` + SecondsBehindMaster sql.NullInt64 `json:"Seconds_Behind_Master" db:"Seconds_Behind_Master"` +} + +// ReplSyncIsOk TODO +// 判断主从同步是否 Ok +func (s ShowSlaveStatusResp) ReplSyncIsOk() bool { + var empty ShowSlaveStatusResp + if s == empty { + return false + } + ioRunningIsOk := strings.EqualFold(strings.ToUpper(s.SlaveIORunning), strings.ToUpper(SLAVE_IO_RUNING_OK)) + sqlRunningIsOk := strings.EqualFold(strings.ToUpper(s.SlaveSQLRunning), strings.ToUpper(SLAVE_SQL_RUNING_OK)) + return ioRunningIsOk && sqlRunningIsOk +} + +// MasterStatusResp TODO +type MasterStatusResp struct { + File string `json:"bin_file" db:"File"` + Position int `json:"bin_position" db:"Position"` + BinlogDoDB string `json:"binlog_db_db" db:"Binlog_Do_DB"` + BinlogIgnoreDB string `json:"binlog_ignore_db" db:"Binlog_Ignore_DB"` + ExecutedGtidSet string `json:"executed_gtid_set" db:"Executed_Gtid_Set"` +} + +// SlaveHostResp TODO +type SlaveHostResp struct { + ServerID string `json:"server_id" db:"Server_id"` + Host string `json:"host" db:"Host"` + Port int `json:"port" db:"Port"` + MasterID string `json:"master_id" db:"Master_id"` + SlaveUUID string `json:"slave_uuid" db:"Slave_UUID"` +} + +// ShowProcesslistResp TODO +type ShowProcesslistResp struct { + ID uint64 `json:"id" db:"Id"` + User string `json:"user" db:"User"` + Host sql.NullString `json:"host" db:"Host"` + DB sql.NullString `json:"db" db:"db"` + // Command sql.NullString `json:"command" db:"Command"` + Time int `json:"time" db:"Time"` + State sql.NullString `json:"state" db:"State"` + Info sql.NullString `json:"info" db:"Info"` +} + +// SelectProcessListResp TODO +type SelectProcessListResp struct { + ID uint64 `json:"ID" db:"ID"` + User string `json:"USER" db:"USER"` + Host string `json:"HOST" db:"HOST"` + DB sql.NullString `json:"DB" db:"DB"` + Command string `json:"COMMAND" db:"COMMAND"` + Time string `json:"TIME" db:"TIME"` + State sql.NullString `json:"STATE" db:"STATE"` + Info sql.NullString `json:"INFO" db:"INFO"` + TimeMs int64 `json:"TIME_MS" db:"TIME_MS"` + RowsSent uint64 `json:"ROWS_SENT" db:"ROWS_SENT"` + RowsExamined uint64 `json:"ROWS_EXAMINED" db:"ROWS_EXAMINED"` + RowsRead uint64 `json:"ROWS_READ" db:"ROWS_READ"` + OSThreadID uint64 `json:"OS_THREAD_ID" db:"OS_THREAD_ID"` +} + +// ProxyAdminBackend TODO +// SELECT * FROM backends +type ProxyAdminBackend struct { + BackendNdx int `json:"backend_ndx"` + Address string `json:"address"` + State string `json:"state"` + Type string `json:"type"` + Uuid string `json:"uuid"` + ConnectedClients int `json:"connected_clients"` +} + +// ShowOpenTablesResp TODO +type ShowOpenTablesResp struct { + Database string `db:"Database"` + Table string `db:"Table"` + In_use int `db:"In_use"` +} + +// MySQLGlobalVariableItem TODO +type MySQLGlobalVariableItem struct { + VariableName string `db:"Variable_name"` + Value string `db:"Value"` +} + +// ShowEnginesResp TODO +type ShowEnginesResp struct { + Engine string `db:"Engine"` + Support string `db:"Support"` + // ... complete +} + +// UserHosts TODO +type UserHosts struct { + User string `db:"user"` + Host string `db:"host"` +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback.go b/dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback.go new file mode 100644 index 0000000000..3c22d8511c --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback.go @@ -0,0 +1,232 @@ +// Package rollback TODO +package rollback + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "os" + "path" +) + +const ( + // OP_DEL TODO + OP_DEL = "DEL" + // OP_MOVE TODO + OP_MOVE = "MOVE" +) + +// RollBackObjects TODO +type RollBackObjects struct { + RollBackProcessList []RollBackProcess `json:"rollback_processlist"` + RollBackFiles []RollBackFile `json:"rollback_files"` +} + +// 这些目录无论如何都不能直接删除 +// 我们原子任务主要操作相关目录 +var safeDirs = map[string]struct{}{"/": {}, "/etc": {}, "/usr": {}, "/usr/local": {}, "/data": {}, "/data1": {}} + +// RollBackFile 文件包括 常规文件 目录 软连接等 +// 回滚操作不记录删除文件的操作 +// 因为删除文件没有源文件无法恢复 +type RollBackFile struct { + // 文件必须是绝对路径 + FileName string `json:"file_name"` // DEL,MOVE 后的文件名称 + OriginFileName string `json:"origin_file_name"` // DEL,MOVE 前的文件名称 + OriginOpera string `json:"origin_opera"` // 原始操作 DEL:新增文件 MOVE:文件重命名 +} + +// RollBackProcess 暂定回滚由任务拉起的新进程 +// 已经kill进程,暂不恢复 +type RollBackProcess struct { + StartOsUser string `json:"start_os_user"` // os启动用户 + ProcessId int `json:"process_id"` +} + +// AddDelFile TODO +func (r *RollBackObjects) AddDelFile(fileName string) { + r.RollBackFiles = append( + r.RollBackFiles, RollBackFile{ + FileName: fileName, + OriginOpera: OP_DEL, + }, + ) +} + +// AddMoveFile TODO +func (r *RollBackObjects) AddMoveFile(originFileName, fileName string) { + r.RollBackFiles = append( + r.RollBackFiles, RollBackFile{ + FileName: fileName, + OriginFileName: originFileName, + OriginOpera: OP_DEL, + }, + ) +} + +// AddKillProcess TODO +func (r *RollBackObjects) AddKillProcess(pid int) { + r.RollBackProcessList = append( + r.RollBackProcessList, RollBackProcess{ + ProcessId: pid, + }, + ) +} + +// RollBack TODO +func (r *RollBackObjects) RollBack() (err error) { + if r.RollBackProcessList != nil { + err = r.RollBack_Processlists() + } + if r.RollBackFiles != nil { + err = r.RollBack_Files() + } + return err +} + +// RollBack_Processlists TODO +func (r *RollBackObjects) RollBack_Processlists() (err error) { + if len(r.RollBackProcessList) <= 0 { + return nil + } + for _, rp := range r.RollBackProcessList { + if err = rp.Rollback(); err != nil { + return + } + } + return err +} + +// RollBack_Files TODO +func (r *RollBackObjects) RollBack_Files() (err error) { + if len(r.RollBackFiles) <= 0 { + return nil + } + for _, rfile := range r.RollBackFiles { + if err = rfile.RollBack(); err != nil { + return + } + } + return err +} + +// RollBack TODO +// os.Stat 和 os.Lstat 两个函数用来获取文件类型,但是os.Stat具有穿透连接能力,如果你去获取一个软链的 FileInfo,他会返回软链到的文件的信息,你既然想知道他的具体类型,就要使用 os.Lstat +func (r *RollBackFile) RollBack() (err error) { + f, err := os.Lstat(r.FileName) + if err != nil { + // 如果是删除文件的话,文件不存在,那就忽略错误 + if os.IsNotExist(err) && r.OriginOpera == OP_DEL { + return nil + } + return err + } + + switch mode := f.Mode().Type(); { + case mode.IsDir(): + return r.rollbackDir() + case mode.IsRegular(): + return r.rollbackRegularFile() + case mode&os.ModeSymlink != 0: + return r.rollbackLink() + default: + logger.Error("Not Define mode.String(): %v\n", mode.String()) + } + return nil +} + +func (r *RollBackFile) rollbackRegularFile() (err error) { + switch r.OriginOpera { + case OP_DEL: + return SafeRm(r.FileName) + case OP_MOVE: + return SafeMove(r.FileName, r.OriginFileName) + } + return fmt.Errorf("no define Operate %s", r.OriginOpera) +} + +func (r *RollBackFile) rollbackDir() (err error) { + switch r.OriginOpera { + case OP_DEL: + return SafeRmDir(r.FileName) + case OP_MOVE: + return SafeMove(r.FileName, r.OriginFileName) + } + return fmt.Errorf("no define Operate %s", r.OriginOpera) +} + +func (r *RollBackFile) rollbackLink() (err error) { + switch r.OriginOpera { + case OP_DEL: + return SafeUnlink(r.FileName) + case OP_MOVE: + return SafeRelink(r.FileName, r.OriginFileName) + } + return fmt.Errorf("no define Operate %s", r.OriginOpera) +} + +// SafeMove TODO +func SafeMove(file, destfile string) (err error) { + _, err = osutil.ExecShellCommand(false, fmt.Sprintf("mv %s %s", file, destfile)) + return +} + +// SafeRelink TODO +func SafeRelink(linkfile, destfile string) (err error) { + _, err = osutil.ExecShellCommand(false, fmt.Sprintf(" unlink %s && ln -s %s %s", linkfile, destfile, linkfile)) + return +} + +// SafeUnlink TODO +func SafeUnlink(file string) (err error) { + if IsSafe(file) { + _, err = osutil.ExecShellCommand(false, fmt.Sprintf("unlink %s", file)) + return + } + return fmt.Errorf("%s 不允许删除", file) +} + +// SafeRm TODO +func SafeRm(file string) (err error) { + if IsSafe(file) { + _, err = osutil.ExecShellCommand(false, fmt.Sprintf("rm %s", file)) + return + } + return fmt.Errorf("%s不允许删除", file) +} + +// SafeRmDir TODO +func SafeRmDir(file string) (err error) { + if IsSafe(file) { + _, err = osutil.ExecShellCommand(false, fmt.Sprintf("rm -rf %s", file)) + return + } + return fmt.Errorf("%s 不允许删除", file) +} + +// IsSafe TODO +func IsSafe(file string) bool { + // 如果存在 file 是不能直接删除的目录 + if _, ok := safeDirs[file]; ok { + return !ok + } + // 如果存在 file 是不能直接删除的目录,判断下base dir + if _, ok := safeDirs[path.Base(file)]; ok { + return !ok + } + return !util.StrIsEmpty(file) +} + +// Rollback TODO +func (r *RollBackProcess) Rollback() (err error) { + if r.ProcessId <= 0 { + return nil + } + p, err := os.FindProcess(r.ProcessId) + if err != nil { + // 找不到这个进程,可能吗没有 不需要回滚 + return nil + } + return p.Kill() +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback_test.go new file mode 100644 index 0000000000..232c2b2bd9 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/rollback/rollback_test.go @@ -0,0 +1,85 @@ +package rollback + +import ( + "testing" +) + +// 将删除/data1/11.txt +func TestRollBackFile(t *testing.T) { + t.Logf("start testing TestRollBackFile...") + rf := RollBackFile{ + FileName: "/data1/11.txt", + OriginOpera: OP_DEL, + } + if err := rf.RollBack(); err != nil { + t.Error("rollback", err) + } +} + +// 将会把/data1/1.txt mv /data1/2.txt +func TestMoveFile(t *testing.T) { + t.Logf("start testing TestRollBackFile...") + rf := RollBackFile{ + FileName: "/data1/1.txt", + OriginFileName: "/data1/2.txt", + OriginOpera: OP_MOVE, + } + if err := rf.RollBack(); err != nil { + t.Error("rollback", err) + } +} + +// 将会把/data1/d1 删除 +func TestDelDir(t *testing.T) { + t.Logf("start testing TestRollBackFile...") + rf := RollBackFile{ + FileName: "/data1/d1/", + OriginOpera: OP_DEL, + } + if err := rf.RollBack(); err != nil { + t.Errorf("rollback %s", err.Error()) + } +} + +// 将会把/data1/d1 删除 +func TestMoveDir(t *testing.T) { + t.Logf("start testing TestRollBackFile...") + rf := RollBackFile{ + FileName: "/data1/d1", + OriginFileName: "/data1/d", + OriginOpera: OP_MOVE, + } + if err := rf.RollBack(); err != nil { + t.Errorf("rollback %s", err.Error()) + } +} + +// 将会把/data1/f 软连接到 /data1/c 目录 +func TestRmLink(t *testing.T) { + t.Logf("start testing TestRollBackFile...") + rf := RollBackFile{ + FileName: "/data1/f", + OriginOpera: OP_DEL, + } + if err := rf.RollBack(); err != nil { + t.Errorf("rollback %s", err.Error()) + } +} + +// 将会把/data1/f 软连接到 /data1/c 目录 +func TestMoveLink(t *testing.T) { + t.Logf("start testing TestRollBackFile...") + rf := RollBackFile{ + FileName: "/data1/f", + OriginFileName: "/data1/c", + OriginOpera: OP_MOVE, + } + if err := rf.RollBack(); err != nil { + t.Errorf("rollback %s", err.Error()) + } +} + +func TestIsSafeDir(t *testing.T) { + t.Logf("start testing ...") + t.Log(IsSafe("/usr/local")) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/tools/impls.go b/dbm-services/mysql/db-tools/dbactuator/pkg/tools/impls.go new file mode 100644 index 0000000000..ea98e3634f --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/tools/impls.go @@ -0,0 +1,38 @@ +package tools + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + + "github.com/pkg/errors" +) + +func (s *ToolSet) validate() error { + for k, v := range s.maps { + if _, ok := defaultPath[k]; !ok { + return errors.Errorf("tool %s is not regiestered", k) + } + if !osutil.FileExist(v) { + err := errors.Errorf("%s: %s not found", k, v) + return err + } + } + return nil +} + +// Get 获得工具路径 +func (s *ToolSet) Get(tool ExternalTool) (string, error) { + if p, ok := s.maps[tool]; ok { + return p, nil + } + err := errors.Errorf("%s not registered or picked", tool) + return "", err +} + +// MustGet 必须获得 +func (s *ToolSet) MustGet(tool ExternalTool) string { + if p, ok := s.maps[tool]; ok { + return p + } + err := errors.Errorf("%s not registered or picked", tool) + panic(err) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/tools/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/tools/init.go new file mode 100644 index 0000000000..5e55e697b6 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/tools/init.go @@ -0,0 +1,147 @@ +package tools + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "fmt" + "path" + "path/filepath" +) + +// ExternalTool 外部工具类型 +type ExternalTool string + +const ( + // ToolMysqlbinlog mysqlbinlog + ToolMysqlbinlog ExternalTool = "mysqlbinlog" + // ToolMload mload + ToolMload ExternalTool = "mload" + // ToolMysqlclient mysql + ToolMysqlclient ExternalTool = "mysql" + // ToolXLoad xload + ToolXLoad ExternalTool = "xload" + // ToolQPress qpress + ToolQPress ExternalTool = "qpress" + // ToolPv TODO + ToolPv ExternalTool = "pv" + // ToolMysqlbinlogRollback mysqlbinlog_rollback + ToolMysqlbinlogRollback ExternalTool = "mysqlbinlog_rollback" + // ToolMysqlbinlogRollback80 mysqlbinlog_rollback80 + ToolMysqlbinlogRollback80 ExternalTool = "mysqlbinlog_rollback80" + // ToolMysqlTableChecksum mysql-table-checksum + ToolMysqlTableChecksum ExternalTool = "mysql-table-checksum" + // ToolPtTableChecksum pt-table-checksum + ToolPtTableChecksum ExternalTool = "pt-table-checksum" + // ToolPtTableSync pt-table-sync + ToolPtTableSync ExternalTool = "pt-table-sync" + // ToolDbbackupGo dbbackup + ToolDbbackupGo ExternalTool = "dbbackup" + // ToolRotatebinlog binlog 清理 + ToolRotatebinlog ExternalTool = "rotatebinlog" + // ToolMySQLCrond crond + ToolMySQLCrond ExternalTool = "mysql-crond" + // ToolMySQLMonitor mysql monitor + ToolMySQLMonitor ExternalTool = "mysql-monitor" +) + +// defaultPath defaults path +var defaultPath = map[ExternalTool]string{ + ToolMload: "/home/mysql/dbbackup/MLOAD/MLOAD.pl", + ToolXLoad: "/home/mysql/dbbackup/xtrabackup/xload.pl", + ToolQPress: "/home/mysql/dbbackup-go/bin/xtrabackup/qpress", + ToolPv: "/home/mysql/dbbackup-go/bin/pv", + ToolMysqlclient: "/usr/local/mysql/bin/mysql", + ToolMysqlbinlog: "/usr/local/mysql/bin/mysqlbinlog", + ToolMysqlbinlogRollback: filepath.Join(cst.DBAToolkitPath, string(ToolMysqlbinlogRollback)), + ToolMysqlbinlogRollback80: filepath.Join(cst.DBAToolkitPath, "mysqlbinlog_rollback_80"), + ToolMysqlTableChecksum: path.Join(cst.ChecksumInstallPath, string(ToolMysqlTableChecksum)), + ToolPtTableChecksum: path.Join(cst.ChecksumInstallPath, string(ToolPtTableChecksum)), + ToolPtTableSync: path.Join(cst.ChecksumInstallPath, string(ToolPtTableSync)), + ToolDbbackupGo: path.Join(cst.DbbackupGoInstallPath, string(ToolDbbackupGo)), + ToolMySQLCrond: path.Join(cst.MySQLCrondInstallPath, string(ToolMySQLCrond)), + ToolMySQLMonitor: path.Join(cst.MySQLMonitorInstallPath, string(ToolMySQLMonitor)), +} + +// ToolPath 基本结构 +type ToolPath struct { + Tools map[ExternalTool]string `json:"tools"` +} + +// ToolSet 外部工具 +type ToolSet struct { + // 外部指定工具路径 + Tools map[ExternalTool]string `json:"tools"` + maps map[ExternalTool]string +} + +// NewToolSetWithDefault 加载全部默认工具 +func NewToolSetWithDefault() (*ToolSet, error) { + res := &ToolSet{maps: defaultPath} + err := res.validate() + if err != nil { + return nil, err + } + return res, nil +} + +// NewToolsSetWithDefaultNoValidate 无验证 +func NewToolsSetWithDefaultNoValidate() *ToolSet { + return &ToolSet{maps: defaultPath} +} + +// NewToolSetWithPick 按需加载 +func NewToolSetWithPick(tools ...ExternalTool) (*ToolSet, error) { + maps := make(map[ExternalTool]string) + for _, tool := range tools { + if p, ok := defaultPath[tool]; ok { + maps[tool] = p + } else { + err := fmt.Errorf("%s not registered", tool) + return nil, err + } + } + res := &ToolSet{maps: maps} + if err := res.validate(); err != nil { + return nil, err + } + return res, nil +} + +// NewToolSetWithPickNoValidate 无验证 +func NewToolSetWithPickNoValidate(tools ...ExternalTool) *ToolSet { + maps := make(map[ExternalTool]string) + for _, tool := range tools { + if p, ok := defaultPath[tool]; ok { + maps[tool] = p + } else { + maps[tool] = "" + } + } + return &ToolSet{maps: maps} +} + +// Merge merge tools to left ToolSet +func (s *ToolSet) Merge(tools *ToolSet) error { + s.maps = s.Tools + if s.maps == nil { + s.maps = make(map[ExternalTool]string) + } + if err := s.validate(); err != nil { + return err + } + for toolName, toolPath := range tools.maps { + if _, ok := s.maps[toolName]; !ok { + s.maps[toolName] = toolPath + } + } + return nil +} + +// Set modify a tool path +// 没有校验 toolName 和 toolPath +func (s *ToolSet) Set(toolName ExternalTool, toolPath string) error { + if s.maps == nil { + s.maps = make(map[ExternalTool]string) + } + s.maps[toolName] = toolPath + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/tools/tools.go b/dbm-services/mysql/db-tools/dbactuator/pkg/tools/tools.go new file mode 100644 index 0000000000..a8cb267c2c --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/tools/tools.go @@ -0,0 +1,2 @@ +// Package tools 外部工具 +package tools diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/auth.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/auth.go new file mode 100644 index 0000000000..bde76f03e2 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/auth.go @@ -0,0 +1,2 @@ +// Package auth 认证 +package auth diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/jwt_token.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/jwt_token.go new file mode 100644 index 0000000000..77d96c955b --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/auth/jwt_token.go @@ -0,0 +1,21 @@ +package auth + +import ( + "time" + + "github.com/golang-jwt/jwt/v4" + // "github.com/dgrijalva/jwt-go" +) + +// Sign 签名加密 +func Sign(username string, secretId, secretKey string) (tokenString string, err error) { + // The token content. + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": secretId, + "user": username, + "iat": time.Now().Add(-1 * time.Minute).Unix(), + }) + // Sign the token with the specified secret. + tokenString, err = token.SignedString([]byte(secretKey)) + return +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo.go new file mode 100644 index 0000000000..94b69e8827 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo.go @@ -0,0 +1,296 @@ +// Package bkrepo TODO +package bkrepo + +import ( + "bytes" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io" + "log" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" +) + +/* + API: GET /generic/{project}/{repo}/{path}?download=true + API 名称: download + 功能说明: + + 中文:下载通用制品文件 + English:download generic file + 请求体 此接口请求体为空 +*/ + +// BkRepoClient TODO +type BkRepoClient struct { + Client *http.Client + BkRepoProject string + BkRepoPubBucket string + BkRepoEndpoint string + BkRepoUser string + BkRepoPwd string +} + +// BkRepoRespone TODO +type BkRepoRespone struct { + Code int `json:"code"` + Message string `json:"message"` + Data json.RawMessage `json:"data"` + RequestId string `json:"request_id"` +} + +// getBaseUrl TODO +// +// @receiver b +func (b *BkRepoClient) getBaseUrl() string { + u, err := url.Parse(b.BkRepoEndpoint) + if err != nil { + log.Fatal(err) + } + u.Path = path.Join(u.Path, "generic", b.BkRepoProject, b.BkRepoPubBucket) + return u.String() +} + +// Download 从制品库下载文件 +// +// @receiver b +func (b *BkRepoClient) Download(sqlpath, filename, downloaddir string) (err error) { + uri := b.getBaseUrl() + path.Join("/", sqlpath, filename) + "?download=true" + logger.Info("The download uri %s", uri) + req, err := http.NewRequest(http.MethodGet, uri, nil) + if err != nil { + return err + } + if strings.Contains(filename, "..") { + return fmt.Errorf("%s 存在路径穿越风险", filename) + } + fileAbPath, err := filepath.Abs(path.Join(downloaddir, filename)) + if err != nil { + return err + } + f, err := os.Create(fileAbPath) + if err != nil { + return err + } + req.SetBasicAuth(b.BkRepoUser, b.BkRepoPwd) + resp, err := b.Client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + size, err := io.Copy(f, resp.Body) + if err != nil { + return err + } + logger.GetLogger().Info(fmt.Sprintf("Downloaded a file %s with size %d", filename, size)) + fileNodeInfo, err := b.QueryFileNodeInfo(sqlpath, filename) + if err != nil { + return err + } + + if size != int64(fileNodeInfo.Size) { + return fmt.Errorf("当前文件&源文件大小不一致,当前文件是:%d,制品库文件是:%d", size, fileNodeInfo.Size) + } + + currentFileMd5, err := util.GetFileMd5(fileAbPath) + if err != nil { + return err + } + if currentFileMd5 != fileNodeInfo.Md5 { + return fmt.Errorf("当前文件&源文件md5b不一致,当前文件是:%s,制品库文件是:%s", currentFileMd5, fileNodeInfo.Md5) + } + return nil +} + +// FileNodeInfo TODO +type FileNodeInfo struct { + Name string `json:"name"` + Sha256 string `json:"sha256"` + Md5 string `json:"md5"` + Size int `json:"size"` + Metadata map[string]string `json:"metadata"` +} + +// QueryFileNodeInfo TODO +// QueryMetaData 查询文件元数据信息 +// +// @receiver b +func (b *BkRepoClient) QueryFileNodeInfo(filepath, filename string) (realData FileNodeInfo, err error) { + var baseResp BkRepoRespone + uri := b.BkRepoEndpoint + path.Join( + "repository/api/node/detail/", b.BkRepoProject, b.BkRepoPubBucket, filepath, + filename, + ) + req, err := http.NewRequest(http.MethodGet, uri, nil) + if err != nil { + return FileNodeInfo{}, err + } + resp, err := b.Client.Do(req) + if err != nil { + return FileNodeInfo{}, err + } + defer resp.Body.Close() + if err = json.NewDecoder(resp.Body).Decode(&baseResp); err != nil { + return FileNodeInfo{}, err + } + if baseResp.Code != 0 { + return FileNodeInfo{}, fmt.Errorf("bkrepo Return Code: %d,Messgae:%s", baseResp.Code, baseResp.Message) + } + if err = json.Unmarshal([]byte(baseResp.Data), &realData); err != nil { + return FileNodeInfo{}, err + } + return +} + +// UploadRespData TODO +type UploadRespData struct { + Sha256 string `json:"sha256"` + Md5 string `json:"md5"` + Size int64 `json:"size"` + FullPath string `json:"fullPath"` + CreateBy string `json:"createBy"` + CreateDate string `json:"createdDate"` + LastModifiedBy string `json:"lastModifiedBy"` + LastModifiedDate string `json:"lastModifiedDate"` + Folder bool `json:"folder"` // 是否为文件夹 + Path string `json:"path"` + Name string `json:"name"` + ProjectId string `json:"projectId"` + RepoName string `json:"repoName"` +} + +// FileServerInfo 文件服务器 +type FileServerInfo struct { + URL string `json:"url"` // 制品库地址 + Bucket string `json:"bucket"` // 目标bucket + Password string `json:"password"` // 制品库 password + Username string `json:"username"` // 制品库 username + Project string `json:"project"` // 制品库 project +} + +func newfileUploadRequest(uri string, params map[string]string, paramName, path string) (*http.Request, error) { + file, err := os.Open(path) + if err != nil { + return nil, err + } + defer file.Close() + + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + part, err := writer.CreateFormFile(paramName, filepath.Base(path)) + if err != nil { + return nil, err + } + _, err = io.Copy(part, file) + if err != nil { + return nil, err + } + for key, val := range params { + _ = writer.WriteField(key, val) + } + err = writer.Close() + if err != nil { + return nil, err + } + + req, err := http.NewRequest(http.MethodPut, uri, body) + req.Header.Set("Content-Type", writer.FormDataContentType()) + return req, err +} + +// UploadDirectToBkRepo TODO +func UploadDirectToBkRepo(filepath string, targetURL string, username string, password string) (*BkRepoRespone, error) { + logger.Info("start upload files from %s to %s", filepath, targetURL) + bodyBuf := bytes.NewBufferString("") + bodyWriter := multipart.NewWriter(bodyBuf) + fh, err := os.Open(filepath) + if err != nil { + logger.Info("error opening file") + return nil, err + } + boundary := bodyWriter.Boundary() + closeBuf := bytes.NewBufferString("") + + requestReader := io.MultiReader(bodyBuf, fh, closeBuf) + fi, err := fh.Stat() + if err != nil { + fmt.Printf("Error Stating file: %s", filepath) + return nil, err + } + req, err := http.NewRequest("PUT", targetURL, requestReader) + if err != nil { + return nil, err + } + req.SetBasicAuth(username, password) + // Set headers for multipart, and Content Length + req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary) + // 文件是否可以被覆盖,默认false + req.Header.Set("X-BKREPO-OVERWRITE", "True") + // 文件默认保留半年 + req.Header.Set("X-BKREPO-EXPIRES", "183") + req.ContentLength = fi.Size() + int64(bodyBuf.Len()) + int64(closeBuf.Len()) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("返回码非200 %d", resp.StatusCode) + } + var baseResp BkRepoRespone + if err = json.NewDecoder(resp.Body).Decode(&baseResp); err != nil { + return nil, err + } + return &baseResp, err +} + +// UploadFile 上传文件到蓝盾制品库 +// filepath: 本地需要上传文件的路径 +// targetURL: 仓库文件完整路径 +func UploadFile( + filepath string, targetURL string, username string, password string, BkCloudId int, + DBCloudToken string, +) (*BkRepoRespone, error) { + logger.Info("start upload files from %s to %s", filepath, targetURL) + if BkCloudId == 0 { + return UploadDirectToBkRepo(filepath, targetURL, username, password) + } + req, err := newfileUploadRequest( + targetURL, map[string]string{ + "bk_cloud_id": strconv.Itoa(BkCloudId), + "db_cloud_token": DBCloudToken, + }, "file", filepath, + ) + if err != nil { + logger.Error("new request failed %s", err.Error()) + return nil, err + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + body := &bytes.Buffer{} + _, err = body.ReadFrom(resp.Body) + if err != nil { + logger.Error("read from body failed %s", err.Error()) + return nil, err + } + logger.Info("respone body:%s", body.String()) + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("返回码非200 %d,Message:%s", resp.StatusCode, body.String()) + } + var baseResp BkRepoRespone + if err = json.NewDecoder(body).Decode(&baseResp); err != nil { + return nil, err + } + return &baseResp, err +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo_test.go new file mode 100644 index 0000000000..9896d5d026 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo/bkrepo_test.go @@ -0,0 +1,20 @@ +package bkrepo_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util/bkrepo" + "net/url" + "path" + "testing" +) + +func TestUploadFile(t *testing.T) { + t.Log("start...") + r, err := url.Parse(path.Join("/generic", "/")) + t.Log(r.String()) + resp, err := bkrepo.UploadFile("/tmp/1.sql", "", "", "", 0, "") + if err != nil { + t.Log(err.Error()) + return + } + t.Log(resp) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter.go new file mode 100644 index 0000000000..a8dbee7d61 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter.go @@ -0,0 +1,263 @@ +// Package db_table_filter 库表过滤 +package db_table_filter + +import ( + "fmt" + "strings" + + "github.com/dlclark/regexp2" + _ "github.com/go-sql-driver/mysql" // mysql 驱动 + "github.com/jmoiron/sqlx" +) + +// DbTableFilter 库表过滤 +type DbTableFilter struct { + IncludeDbPatterns []string + IncludeTablePatterns []string + ExcludeDbPatterns []string + ExcludeTablePatterns []string + dbFilterIncludeRegex string + dbFilterExcludeRegex string + tableFilterIncludeRegex string + tableFilterExcludeRegex string +} + +// NewDbTableFilter 构造函数 +func NewDbTableFilter( + includeDbPatterns []string, + includeTablePatterns []string, + excludeDbPatterns []string, + excludeTablePatterns []string, +) (*DbTableFilter, error) { + + tf := &DbTableFilter{ + IncludeDbPatterns: cleanIt(includeDbPatterns), + IncludeTablePatterns: cleanIt(includeTablePatterns), + ExcludeDbPatterns: cleanIt(excludeDbPatterns), + ExcludeTablePatterns: cleanIt(excludeTablePatterns), + dbFilterIncludeRegex: "", + dbFilterExcludeRegex: "", + tableFilterIncludeRegex: "", + tableFilterExcludeRegex: "", + } + + err := tf.validate() + if err != nil { + return nil, err + } + + tf.buildDbFilterRegex() + tf.buildTableFilterRegex() + + return tf, nil +} + +func (c *DbTableFilter) validate() error { + if len(c.IncludeDbPatterns) == 0 || len(c.IncludeTablePatterns) == 0 { + return fmt.Errorf("include patterns can't be empty") + } + if !((len(c.ExcludeDbPatterns) > 0 && len(c.ExcludeTablePatterns) > 0) || + (len(c.ExcludeDbPatterns) == 0 && len(c.ExcludeTablePatterns) == 0)) { + return fmt.Errorf("exclude patterns can't be partial empty") + } + + if err := globCheck(c.IncludeDbPatterns); err != nil { + return err + } + if err := globCheck(c.IncludeTablePatterns); err != nil { + return err + } + if err := globCheck(c.ExcludeDbPatterns); err != nil { + return err + } + if err := globCheck(c.ExcludeTablePatterns); err != nil { + return err + } + return nil +} + +func (c *DbTableFilter) buildDbFilterRegex() { + var includeParts []string + for _, db := range c.IncludeDbPatterns { + includeParts = append(includeParts, fmt.Sprintf(`%s$`, ReplaceGlob(db))) + } + + var excludeParts []string + for _, db := range c.ExcludeDbPatterns { + excludeParts = append(excludeParts, fmt.Sprintf(`%s$`, ReplaceGlob(db))) + } + c.dbFilterIncludeRegex = buildIncludeRegexp(includeParts) + c.dbFilterExcludeRegex = buildExcludeRegexp(excludeParts) +} + +func (c *DbTableFilter) buildTableFilterRegex() { + var includeParts []string + for _, db := range c.IncludeDbPatterns { + for _, table := range c.IncludeTablePatterns { + includeParts = append( + includeParts, + fmt.Sprintf(`%s\.%s$`, ReplaceGlob(db), ReplaceGlob(table)), + ) + } + } + + var excludeParts []string + for _, db := range c.ExcludeDbPatterns { + for _, table := range c.ExcludeTablePatterns { + excludeParts = append( + excludeParts, + fmt.Sprintf(`%s\.%s$`, ReplaceGlob(db), ReplaceGlob(table)), + ) + } + } + + c.tableFilterIncludeRegex = buildIncludeRegexp(includeParts) + c.tableFilterExcludeRegex = buildExcludeRegexp(excludeParts) +} + +// TableFilterRegex 返回表过滤正则 +func (c *DbTableFilter) TableFilterRegex() string { + return fmt.Sprintf(`^%s%s`, c.tableFilterIncludeRegex, c.tableFilterExcludeRegex) +} + +// DbFilterRegex 返回库过滤正则 +func (c *DbTableFilter) DbFilterRegex() string { + return fmt.Sprintf(`^%s%s`, c.dbFilterIncludeRegex, c.dbFilterExcludeRegex) +} + +// GetTables 过滤后的表 +func (c *DbTableFilter) GetTables(ip string, port int, user string, password string) ([]string, error) { + return c.getTablesByRegexp( + ip, + port, + user, + password, + c.TableFilterRegex(), + ) +} + +// GetDbs 过滤后的库 +func (c *DbTableFilter) GetDbs(ip string, port int, user string, password string) ([]string, error) { + return c.getDbsByRegexp( + ip, + port, + user, + password, + c.DbFilterRegex(), + ) +} + +// GetExcludeDbs 排除的库 +func (c *DbTableFilter) GetExcludeDbs(ip string, port int, user string, password string) ([]string, error) { + if c.dbFilterExcludeRegex == "" { + return []string{}, nil + } + return c.getDbsByRegexp( + ip, + port, + user, + password, + strings.Replace(c.dbFilterExcludeRegex, "!", "=", 1), // 替换掉第一个 ! , 这样就变成匹配模式 + ) +} + +// GetExcludeTables 排除的表 +func (c *DbTableFilter) GetExcludeTables(ip string, port int, user string, password string) ([]string, error) { + if c.tableFilterExcludeRegex == "" { + return []string{}, nil + } + return c.getTablesByRegexp( + ip, + port, + user, + password, + strings.Replace(c.tableFilterExcludeRegex, "!", "=", 1), + ) +} + +func (c *DbTableFilter) getDbsByRegexp(ip string, port int, user string, password string, reg string) ( + []string, + error, +) { + dbh, err := sqlx.Connect( + "mysql", + fmt.Sprintf(`%s:%s@tcp(%s:%d)/`, user, password, ip, port), + ) + if err != nil { + return nil, err + } + + rows, err := dbh.Queryx(`SHOW DATABASES`) + if err != nil { + return nil, err + } + + pattern, err := regexp2.Compile(reg, regexp2.None) + if err != nil { + return nil, err + } + + var selectedDbs []string + for rows.Next() { + var database string + err := rows.Scan(&database) + if err != nil { + return nil, err + } + + ok, err := pattern.MatchString(database) + if err != nil { + return nil, err + } + if ok { + selectedDbs = append(selectedDbs, database) + } + } + + return selectedDbs, nil +} + +func (c *DbTableFilter) getTablesByRegexp(ip string, port int, user string, password string, reg string) ( + []string, + error, +) { + dbh, err := sqlx.Connect( + "mysql", + fmt.Sprintf(`%s:%s@tcp(%s:%d)/`, user, password, ip, port), + ) + if err != nil { + return nil, err + } + + rows, err := dbh.Queryx( + `SELECT CONCAT(table_schema, ".", table_name) AS fullname` + + ` from INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE="BASE TABLE"`, + ) + if err != nil { + return nil, err + } + + pattern, err := regexp2.Compile(reg, regexp2.None) + if err != nil { + return nil, err + } + + var selectedTables []string + for rows.Next() { + var fullname string + err := rows.Scan(&fullname) + if err != nil { + return nil, err + } + + ok, err := pattern.MatchString(fullname) + if err != nil { + return nil, err + } + if ok { + selectedTables = append(selectedTables, fullname) + } + } + + return selectedTables, nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter_test.go new file mode 100644 index 0000000000..99f823f109 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/db_table_filter_test.go @@ -0,0 +1,21 @@ +package db_table_filter_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter" + "testing" +) + +func TestDbtableFilter(t *testing.T) { + t.Log("start...") + r, err := db_table_filter.NewDbTableFilter( + []string{"*"}, + []string{"*"}, + []string{"sys", "information_schema"}, + []string{"*"}, + ) + if err != nil { + t.Fatal(err) + return + } + t.Log(r.TableFilterRegex()) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/mydumper_regex.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/mydumper_regex.go new file mode 100644 index 0000000000..bb7c63ef3e --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/mydumper_regex.go @@ -0,0 +1,67 @@ +package db_table_filter + +import ( + "fmt" + "strings" +) + +// MyloaderRegex TODO +func (c *DbTableFilter) MyloaderRegex(doDr bool) string { + if doDr { + sysDBExclude := `^(?!(mysql\.|sys\.|infodba_schema\.|test\.|db_infobase\.))` + return sysDBExclude + } + for i, db := range c.IncludeDbPatterns { + c.IncludeDbPatterns[i] = fmt.Sprintf(`%s\.`, ReplaceGlob(db)) + } + for i, tb := range c.IncludeTablePatterns { + if containGlob(tb) { + c.IncludeTablePatterns[i] = ReplaceGlob(tb) + } else { + c.IncludeTablePatterns[i] = fmt.Sprintf(`%s$`, tb) + } + } + for i, db := range c.ExcludeDbPatterns { + c.ExcludeDbPatterns[i] = fmt.Sprintf(`%s\.`, ReplaceGlob(db)) + } + for i, tb := range c.ExcludeTablePatterns { + if containGlob(tb) { + c.ExcludeTablePatterns[i] = ReplaceGlob(tb) + } else { + c.ExcludeTablePatterns[i] = fmt.Sprintf(`%s$`, tb) + } + } + + c.dbFilterIncludeRegex = buildRegexString(c.IncludeDbPatterns) + c.tableFilterIncludeRegex = buildRegexString(c.IncludeTablePatterns) + c.dbFilterExcludeRegex = buildRegexString(c.ExcludeDbPatterns) + c.tableFilterExcludeRegex = buildRegexString(c.ExcludeTablePatterns) + + if c.dbFilterIncludeRegex != "" && c.dbFilterExcludeRegex == "" { + dbtableInclude := fmt.Sprintf(`^(%s%s)`, c.dbFilterIncludeRegex, c.tableFilterIncludeRegex) + return dbtableInclude + } + + if c.dbFilterExcludeRegex != "" && c.dbFilterIncludeRegex == "" { + dbtableExclude := fmt.Sprintf(`^(?!(%s%s))`, c.dbFilterExcludeRegex, c.tableFilterExcludeRegex) + return dbtableExclude + } + if c.dbFilterIncludeRegex != "" && c.dbFilterExcludeRegex != "" { + dbtable := fmt.Sprintf(`^(?=(?:%s%s))(?!(?:%s%s))`, c.dbFilterIncludeRegex, c.tableFilterIncludeRegex, + c.dbFilterExcludeRegex, c.tableFilterExcludeRegex) + return dbtable + } + return "" +} + +func buildRegexString(patterns []string) string { + ss := strings.Join(patterns, "|") + if len(patterns) > 1 { + ss = fmt.Sprintf(`(%s)`, ss) + } + return ss +} + +func isAllPattern(ss string) bool { + return ss == `.*\.` +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/tools.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/tools.go new file mode 100644 index 0000000000..5dcfd04cb6 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/db_table_filter/tools.go @@ -0,0 +1,87 @@ +package db_table_filter + +import ( + "fmt" + "strings" + + "github.com/dlclark/regexp2" +) + +func containGlob(p string) bool { + return strings.Contains(p, "*") || + strings.Contains(p, "?") || + strings.Contains(p, "%") +} + +func cleanIt(s []string) []string { + var r []string + for _, e := range s { + te := strings.TrimSpace(e) + if len(te) > 0 { + r = append(r, strings.TrimSpace(e)) + } + } + return r +} + +// ReplaceGlob 通配符替换为正则 +// todo . -> \. ? +func ReplaceGlob(p string) string { + return strings.Replace( + strings.Replace( + strings.Replace(p, "*", ".*", -1), + "%", ".*", -1, + ), + "?", ".", -1, + ) +} + +// HasGlobPattern 是否有通配符 +func HasGlobPattern(patterns []string) bool { + for _, p := range patterns { + if strings.Contains(p, "%") || strings.Contains(p, "?") || strings.Contains(p, "*") { + return true + } + } + return false +} + +func buildIncludeRegexp(parts []string) string { + return buildRegexp(parts, `(?=(?:(%s)))`) +} + +func buildExcludeRegexp(parts []string) string { + return buildRegexp(parts, `(?!(?:(%s)))`) +} + +func buildRegexp(parts []string, template string) string { + var res string + + if len(parts) > 0 { + res += fmt.Sprintf(template, strings.Join(parts, "|")) + } + return res +} + +func globCheck(patterns []string) error { + r1 := regexp2.MustCompile(`^[%?]+$`, regexp2.None) + r2 := regexp2.MustCompile(`^\*+$`, regexp2.None) + for _, p := range patterns { + if containGlob(p) { + if len(patterns) > 1 { + return fmt.Errorf("%s: multi patterns not allowd if has glob", patterns) + } + + m1, _ := r1.MatchString(p) + if (strings.Contains(p, "%") || strings.Contains(p, "?")) && m1 { + return fmt.Errorf(`%% ? can't be used alone`) + } + + m2, _ := r2.MatchString(p) + if strings.Contains(p, "*") && !m2 { + return fmt.Errorf("* must used alone") + } + } + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf.go new file mode 100644 index 0000000000..e91ea39d38 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf.go @@ -0,0 +1,614 @@ +package util + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "fmt" + "os" + "path" + "path/filepath" + "reflect" + "regexp" + "sort" + "strings" + "sync" + + "github.com/pkg/errors" + "gopkg.in/ini.v1" +) + +const ( + // SecTag TODO + SecTag = "sectag" + // KeyTag TODO + KeyTag = "keytag" +) + +const ( + // MysqldSec TODO + MysqldSec = "mysqld" +) + +// CnfFile TODO +type CnfFile struct { + FileName string + Cfg *ini.File + mu *sync.Mutex +} + +// CnfUint TODO +type CnfUint struct { + KvMap map[string]string + // 可重复的key + ShadowKvMap map[string]string + // skip_symbolic_links单key的配置 + BoolKey []string +} + +// MycnfIniObject TODO +type MycnfIniObject struct { + Section map[string]*CnfUint +} + +var iniLoadOption = ini.LoadOptions{ + PreserveSurroundedQuote: true, + IgnoreInlineComment: true, + AllowBooleanKeys: true, + AllowShadows: true, +} + +// NewMyCnfObject 渲染模板全量的配置文件 +// +// @receiver c +// @receiver myfileName +// @return nf +// @return err +func NewMyCnfObject(c interface{}, myfileName string) (nf *CnfFile, err error) { + nf = NewEmptyCnfObj(myfileName) + t := reflect.TypeOf(c) + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("mf reflect is not struct") + } + var isMysqldSectionExists bool // 要求 mysqld section 存在 + for i := 0; i < t.NumField(); i++ { // 这里遍历的是 [map{client} map{mysqld} ...] + var sectionName = t.Field(i).Tag.Get(SecTag) + m := reflect.ValueOf(c).FieldByName(t.Field(i).Name) + if _, err := nf.Cfg.NewSection(sectionName); err != nil { + return nil, err + } + for _, k := range m.MapKeys() { + if err = nf.RenderSection(sectionName, k.String(), m.MapIndex(k).String(), false); err != nil { + return nil, err + } + } + if sectionName == MysqldSec { + isMysqldSectionExists = true + } + } + if !isMysqldSectionExists { + return nil, fmt.Errorf("must Include Sections [mysqld]") + } + return +} + +// ReplaceMyconfigsObjects TODO +func ReplaceMyconfigsObjects(f *CnfFile, c interface{}) error { + t := reflect.TypeOf(c) + v := reflect.ValueOf(c) + if t.Kind() != reflect.Struct { + return fmt.Errorf("mycnf object reflect is not struct") + } + for i := 0; i < t.NumField(); i++ { + var sectionName = t.Field(i).Tag.Get(SecTag) + if v.Field(i).Type().Kind() == reflect.Struct { + structField := v.Field(i).Type() + for j := 0; j < structField.NumField(); j++ { + keyName := structField.Field(j).Tag.Get(KeyTag) + val := v.Field(i).Field(j).String() + f.ReplaceValue(sectionName, string(keyName), false, val) + } + } + } + return nil +} + +// NewEmptyCnfObj TODO +// NewCnfFile 生成ini empty 用于外部传参的配置中渲染新的my.cnf +// +// @receiver mycnf +// @return *CnfFile +// @return error +func NewEmptyCnfObj(mycnf string) *CnfFile { + return &CnfFile{ + FileName: mycnf, + mu: &sync.Mutex{}, + Cfg: ini.Empty(iniLoadOption), + } +} + +// LoadMyCnfForFile 读取一个已经存在的配置文件,将配置文件的内容解析,用于程序读取、修改my.cnf +// +// @receiver mycnf +// @return *CnfFile +// @return error +func LoadMyCnfForFile(mycnf string) (*CnfFile, error) { + if err := cmutil.FileExistsErr(mycnf); err != nil { + return nil, err + } + cfg, err := ini.LoadSources(iniLoadOption, mycnf) + if err != nil { + return nil, err + } + return &CnfFile{ + FileName: mycnf, + mu: &sync.Mutex{}, + Cfg: cfg, + }, nil +} + +func newMyCnfUint() *CnfUint { + return &CnfUint{ + KvMap: make(map[string]string), + ShadowKvMap: make(map[string]string), + BoolKey: make([]string, 0), + } +} + +// Load load m.FileName to CnfOj +func (m *CnfFile) Load() error { + if obj, err := LoadMyCnfForFile(m.FileName); err != nil { + return err + } else { + m.Cfg = obj.Cfg + if m.mu == nil { + m.mu = &sync.Mutex{} + } + } + return nil +} + +// GetMySQLDataDir 从my.cnf 获取datadir +// +// @receiver m +// @return datadir +// @return err +// +// e.g: datadir=/data1/mysqldata/20000/data +func (m *CnfFile) GetMySQLDataDir() (datadir string, err error) { + if m.Cfg.Section(MysqldSec).HasKey("datadir") { + return filepath.Dir(m.Cfg.Section(MysqldSec).Key("datadir").String()), nil + } + return "", fmt.Errorf("在配置中没找到datadir的配置项") +} + +// GetMySQLLogDir 从配置中获取mysql logdir +// +// @receiver m +// @return logdir +// @return err +func (m *CnfFile) GetMySQLLogDir() (logdir string, err error) { + // 先从 log_bin 配置项获取logdir + // 但是可能存在历史的实例并没有开始binlog + // log_bin = ON + // log_bin_basename = /data/mysqllog/20000/binlog/binlog20000 + // log_bin_index = /data/mysqllog/20000/binlog/binlog20000.index + // 或者 log_bin = /data/mysqllog/20000/binlog/binlog20000.bin + // 或者 slow_query_log_file = /data/mysqllog/20000/slow-query.log + keys := []string{"log_bin", "log_bin_basename", "slow_query_log_file"} + + for _, k := range keys { + if val, err := m.GetMySQLCnfByKey(MysqldSec, k); err == nil { + if filepath.IsAbs(val) { + return val, nil + } + } + } + return "", fmt.Errorf("在配置中没找到 log_bin 的配置项") +} + +// GetBinLogDir 获取 binlog dir +// 这里只从 my.cnf 获取,有可能没有设置选项,外部需要考虑再次从 global variables 获取 +// 返回 binlog 目录和 binlog 文件名前缀 +func (m *CnfFile) GetBinLogDir() (binlogDir, namePrefix string, err error) { + // log_bin = ON + // log_bin_basename = /data/mysqllog/20000/binlog/binlog20000 // binlog 在没有指定路径的情况下,默认存放在 datadir + // log_bin_index = /data/mysqllog/20000/binlog/binlog20000.index + // 或者 log_bin = /data/mysqllog/20000/binlog/binlog20000.bin + keys := []string{"log_bin", "log_bin_basename"} + for _, k := range keys { + if val, err := m.GetMySQLCnfByKey(MysqldSec, k); err == nil { + if filepath.IsAbs(val) { + if binlogDir, namePrefix, err = m.ParseLogBinBasename(val); err == nil { + return binlogDir, namePrefix, err + } + } + } + } + return "", "", fmt.Errorf("binlog dir not found or parse failed") +} + +// ParseLogBinBasename TODO +func (m *CnfFile) ParseLogBinBasename(val string) (binlogDir, namePrefix string, err error) { + binlogDir, namePrefix = path.Split(val) + if cmutil.IsDirectory(binlogDir) && !cmutil.IsDirectory(val) { + if strings.Contains(namePrefix, ".") { + binlogFilename := strings.Split(namePrefix, ".") + namePrefix = binlogFilename[0] + } + return binlogDir, namePrefix, nil + } else { + logger.Error("expect %s is a dir and % is not dir", binlogDir, val) + } + errStr := fmt.Sprintf("%s is not a valid log_bin_basename", val) + logger.Warn(errStr) + return "", "", errors.New(errStr) +} + +// GetRelayLogDir TODO +func (m *CnfFile) GetRelayLogDir() (string, error) { + // relay-log = /data1/mysqldata/20000/relay-log/relay-log.bin + // 或者 relay_log_basename = /data1/mysqldata/20000/relay-log/relay-bin + keys := []string{"relay_log", "relay_log_basename"} + for _, k := range keys { + if val, err := m.GetMySQLCnfByKey(MysqldSec, k); err == nil { + if filepath.IsAbs(val) { // 必须是绝对路径 + return val, nil + } + } + } + return "", fmt.Errorf("在配置中没找到 relay 的配置项") +} + +// GetMySQLSocket 从my.cnf中获取socket value +// +// @receiver m +// @return socket +// @return err +func (m *CnfFile) GetMySQLSocket() (socket string, err error) { + if m.Cfg.Section(MysqldSec).HasKey("socket") { + return m.Cfg.Section(MysqldSec).Key("socket").String(), nil + } + return "", fmt.Errorf("在配置中没找到socket的配置项") +} + +// GetMySQLCnfByKey 从 my.cnf 获取 key 对应的 value +// 允许替换 _, - +// 如果 section 为空,会尝试从 key 中以 . 切分 section +func (m *CnfFile) GetMySQLCnfByKey(section, key string) (string, error) { + if section == "" { + sk := GetSectionFromKey(key, false) + key = sk.Key + section = sk.Section + } + key = m.GetKeyFromFile(section, key) + if m.Cfg.Section(section).HasKey(key) { + } else { + return "", fmt.Errorf("在配置中没找到 %s 的配置项", key) + } + return m.Cfg.Section(section).Key(key).String(), nil +} + +// GetMyCnfByKeyWithDefault TODO +func (m *CnfFile) GetMyCnfByKeyWithDefault(section, key string, valueDefault string) string { + if val, err := m.GetMySQLCnfByKey(section, key); err != nil { + return valueDefault + } else { + return val + } +} + +// GetProxyLogFilePath 获取 Proxy log-file 的value +// +// @receiver m +// @return logFile +// @return err +func (m *CnfFile) GetProxyLogFilePath() (logFile string, err error) { + if m.Cfg.Section("mysql-proxy").HasKey("log-file") { + return m.Cfg.Section("mysql-proxy").Key("log-file").String(), nil + } + return "", fmt.Errorf("在配置中没找到log-file的配置项") +} + +// SaveMySQLConfig2Object 将 my.cnf 变成 key map +func (m *CnfFile) SaveMySQLConfig2Object() MycnfIniObject { + var object MycnfIniObject + object.Section = make(map[string]*CnfUint) + for _, section := range m.Cfg.SectionStrings() { + object.Section[section] = newMyCnfUint() + for _, keyName := range m.Cfg.Section(section).KeyStrings() { + if kv, err := m.Cfg.Section(section).GetKey(keyName); err == nil { + object.Section[section].KvMap[keyName] = kv.Value() + } + } + } + return object +} + +// FastSaveChange 快速修改一个配置项,并持久化到文件 +func (m *CnfFile) FastSaveChange(port int, section, key, value string) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + // 假如删除一个不存在的Key,不会抛出异常 + m.Cfg.Section(section).DeleteKey(key) + if _, err = m.Cfg.Section(section).NewKey(key, value); err != nil { + return + } + err = m.Cfg.SaveTo(fmt.Sprintf("my.cnf.%d", port)) + return +} + +// SafeSaveFile 全量持久化配置文件 +func (m *CnfFile) SafeSaveFile(isProxy bool) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + nf, err := m.sortAllkeys(isProxy) + if err != nil { + return err + } + err = nf.Cfg.SaveTo(m.FileName) + return +} + +// sortAllkeys 对写入的key进行排序 +// +// @receiver m +// @return *CnfFile +// @return error +func (m *CnfFile) sortAllkeys(isProxy bool) (*CnfFile, error) { + f := CnfFile{ + Cfg: ini.Empty(iniLoadOption), + mu: &sync.Mutex{}, + } + for _, sec := range m.Cfg.Sections() { + secName := sec.Name() + if _, err := f.Cfg.NewSection(secName); err != nil { + return nil, err + } + keys := m.Cfg.Section(secName).KeyStrings() + sort.Strings(keys) + for _, key := range keys { + if m.isShadowKey(key) { + for _, val := range m.Cfg.Section(secName).Key(key).ValueWithShadows() { + f.RenderSection(secName, key, val, isProxy) + } + } + f.RenderSection(secName, key, m.Cfg.Section(secName).Key(key).Value(), isProxy) + } + } + return &f, nil +} + +// isShadowKey TODO +// 表示下面的key,可以在配置文件重复出现 +func (m *CnfFile) isShadowKey(key string) bool { + key = strings.ReplaceAll(key, "-", "_") + sk := []string{ + "replicate_do_db", "replicate_ignore_db", "replicate_do_table", "replicate_wild_do_table", + "replicate_ignore_table", "replicate_wild_ignore_table", + } + return cmutil.HasElem(key, sk) +} + +// GetInitDirItemTpl TODO +func (m *CnfFile) GetInitDirItemTpl(initDirs map[string]string) (err error) { + mysqld, err := m.Cfg.GetSection(MysqldSec) + if err != nil { + return + } + for key := range initDirs { + initDirs[key] = mysqld.Key(key).String() + } + return +} + +// RenderSection 替换渲染配置,proxy keepalive=true 不能和mysql的bool一样进行渲染 +// +// @receiver f +// @receiver sectionName +// @receiver key +// @receiver val +// @receiver isProxy +// @return err +func (m *CnfFile) RenderSection(sectionName, key, val string, isProxy bool) (err error) { + if m.isShadowKey(key) { + for _, shadowv := range strings.Split(val, ",") { + if _, err := m.Cfg.Section(sectionName).NewKey(key, shadowv); err != nil { + return err + } + fmt.Println(",", "M") + } + return nil + } + // proxy.cnf 需要渲染 boolkey + // my.cnf 如果是空值,当做boolkey处理 + if !isProxy { + if strings.TrimSpace(val) == "true" { + if _, err = m.Cfg.Section(sectionName).NewBooleanKey(key); err != nil { + return err + } + return nil + } + } + // 如果是不是空值,当做boolkey处理 + if _, err = m.Cfg.Section(sectionName).NewKey(key, val); err != nil { + return err + } + return nil +} + +// ReplaceKeyName 存在oldkey,则替换为newkey; 不存在oldkey,则作任何处理 +// 比如用在把 default_charset_server 替换成 default-charset-server 的场景 +func (m *CnfFile) ReplaceKeyName(section string, oldKey string, newKey string) { + m.mu.Lock() + defer m.mu.Unlock() + + sel := m.Cfg.Section(section) + if sel.HasKey(oldKey) { + k, _ := sel.GetKey(oldKey) + sel.NewKey(newKey, k.Value()) + sel.DeleteKey(oldKey) + } +} + +// ReplaceMoreKv TODO +func (m *CnfFile) ReplaceMoreKv(pairs map[string]CnfUint) error { + if len(pairs) <= 0 { + return nil + } + for section, mu := range pairs { + for k, v := range mu.KvMap { + m.ReplaceValue(section, k, false, v) + } + } + return nil +} + +// GetKeyFromFile godoc +// my.cnf 里面允许 _, - 两种分隔符的变量,获取或者替换时,需要两种都尝试获取 +func (m *CnfFile) GetKeyFromFile(section string, key string) string { + if !m.Cfg.Section(section).HasKey(key) { + oldKey := key + key = strings.ReplaceAll(key, "_", "-") + if !m.Cfg.Section(section).HasKey(key) { + key = strings.ReplaceAll(key, "-", "_") + if !m.Cfg.Section(section).HasKey(key) { + key = oldKey // 始终没找到 key, 恢复输入值 + } + } + } + return key +} + +// ReplaceValue kv不存在则写入,k存在则更新 +// 会判断是否是 shadowKey +// 如果 key 不存在会尝试替换 _ 成 - +func (m *CnfFile) ReplaceValue(section string, key string, isBool bool, value string) { + m.mu.Lock() + defer m.mu.Unlock() + key = m.GetKeyFromFile(section, key) + if !m.isShadowKey(key) { + m.Cfg.Section(section).DeleteKey(key) + } + if isBool { + m.Cfg.Section(section).NewBooleanKey(key) + return + } + m.Cfg.Section(section).NewKey(key, value) +} + +// UpdateKeyValue 修改一个配置项,会判断是否是 shadowKey +func (m *CnfFile) UpdateKeyValue(section, key, value string) (err error) { + m.mu.Lock() + defer m.mu.Unlock() + if m.isShadowKey(key) { + // 如果这些key 是可重复的key + err = m.Cfg.Section(section).Key(key).AddShadow(value) + return + } + m.Cfg.Section(section).Key(key).SetValue(value) + return +} + +// GetMyCnfFileName 获取默认 my.cnf 的路径,不检查是否存在 +func GetMyCnfFileName(port int) string { + return fmt.Sprintf("%s.%d", cst.DefaultMyCnfName, port) +} + +// GetProxyCnfName TODO +/** + * @description: 计算proxy cnf name + * @receiver {int} port + * @return {*} + */ +func GetProxyCnfName(port int) string { + return fmt.Sprintf("%s.%d", cst.DefaultProxyCnfName, port) +} + +// ReplaceValuesToFile 文本替换 my.cnf 里面的 value,如果 key 不存在则插入 [mysqld] 后面 +func (m *CnfFile) ReplaceValuesToFile(newItems map[string]string) error { + f, err := os.ReadFile(m.FileName) + if err != nil { + return err + } + lines := strings.Split(string(f), "\n") + itemsNotFound := make(map[string]string) + for i, lineText := range lines { + for k, v := range newItems { + itemsNotFound[k] = v + reg := regexp.MustCompile(fmt.Sprintf(`^\s*%s\s*=(.*)`, k)) + if reg.MatchString(lineText) { + lines[i] = fmt.Sprintf(`%s = %s`, k, v) + delete(itemsNotFound, k) // found + } + } + } + for k, v := range itemsNotFound { + StringsInsertAfter(lines, "[mysqld]", fmt.Sprintf(`%s = %s`, k, v)) + } + if err = os.WriteFile(m.FileName, []byte(strings.Join(lines, "\n")), 0644); err != nil { + return err + } + return nil +} + +// GetMysqldKeyVaule 增加基础方法,获取myconf上面的某个配置参数值,用于做前置校验的对比 +func (m *CnfFile) GetMysqldKeyVaule(keyName string) (value string, err error) { + mysqld, err := m.Cfg.GetSection(MysqldSec) + if err != nil { + return "", err + } + return mysqld.Key(keyName).String(), nil +} + +// CnfKey my.cnf key格式 +type CnfKey struct { + Section string + Key string + IsBool bool + Separator string +} + +// GetSectionFromKey 从 . 分隔符里分离 section, key +// replace 控制是否将 - 替换为 _ (set global 用) +func GetSectionFromKey(key string, replace bool) *CnfKey { + sk := &CnfKey{Separator: "."} + ss := strings.Split(key, sk.Separator) + if len(ss) == 2 { + sk.Section = ss[0] + sk.Key = ss[1] + } else { + sk.Section = "" + sk.Key = key + } + if replace { + sk.Key = strings.ReplaceAll(sk.Key, "-", "_") + } + return sk +} + +// MycnfItemsMap mysqld变量映射到 my.cnf 中的配置名 +var MycnfItemsMap = map[string]string{ + "time_zone": "default-time-zone", + "character_set_system": "character_set_server", +} + +// CreateExporterConf 简单写一个根据端口号生成exporter文件的方法 +func CreateExporterConf(fileName string, host string, port string, user string, password string) (err error) { + cnfPath := fmt.Sprintf("%s", fileName) + cfg := ini.Empty() + + exporterSection, err := cfg.NewSection("client") + if err != nil { + return err + } + exporterSection.NewKey("user", user) + exporterSection.NewKey("password", password) + exporterSection.NewKey("host", host) + exporterSection.NewKey("port", port) + err = cfg.SaveTo(cnfPath) + if err != nil { + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf_test.go new file mode 100644 index 0000000000..41de672e58 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/dbcnf_test.go @@ -0,0 +1,24 @@ +package util_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "testing" +) + +func TestGetMySQLDatadir(t *testing.T) { + t.Log("start") + c, err := util.LoadMyCnfForFile("/etc/my.cnf.20000") + if err != nil { + t.Fatal(err) + } + datadir, err := c.GetMySQLDataDir() + if err != nil { + t.Fatal(err) + } + t.Log("datadir path:", datadir) + logdir, err := c.GetMySQLLogDir() + if err != nil { + t.Fatal(err) + } + t.Log("logdir path:", logdir) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/filelock.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/filelock.go new file mode 100644 index 0000000000..3a44ed004a --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/filelock.go @@ -0,0 +1,199 @@ +package util + +import ( + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/gofrs/flock" + "github.com/pkg/errors" +) + +// FLock TODO +type FLock struct { + fileName string + file *os.File + filedataName string + filedata *os.File + filedataMax int +} + +// NewFlock TODO +func NewFlock(filename string, maxConn int) (*FLock, error) { + // maxConn cannot be 0 + + // check filedataName lastModifyTime + // 如果 filedataName 文件最后修改时间在 1h 以前,删掉这个文件 + + if strings.ContainsAny(filename, " ;'\"") { + // 非法文件名 + return nil, fmt.Errorf("illegal filename:%s", filename) + } else if maxConn == 0 { + return nil, fmt.Errorf("illegal maxConn:%d", maxConn) + } + + filedataName := filename + ".data" + if ok, mtimeInt := GetFileModifyTime(filedataName); ok { + curTime := time.Now() + mtime := time.Unix(mtimeInt, 0) + timeDiff := curTime.Sub(mtime) + if timeDiff.Minutes() > 60 { + os.Remove(filedataName) + } + } + + fl := &FLock{ + fileName: filename, + filedataName: filedataName, + filedataMax: maxConn, + } + + return fl, nil +} + +// FileFlock TODO +func (fl *FLock) FileFlock() (locked bool, err error) { + if fl.fileName == "" { + return false, errors.New("fileLock filename canot be empty") + } + fileLock := flock.New(fl.fileName) + return fileLock.TryLock() +} + +// FileUnlock TODO +func (fl *FLock) FileUnlock() error { + fileLock := flock.New(fl.fileName) + return fileLock.Unlock() +} + +// SetFileLockIncr TODO +func (fl *FLock) SetFileLockIncr(incr int) (succ int, err error) { + f, err := os.OpenFile(fl.filedataName, os.O_CREATE|os.O_RDWR, 0644) + if err == nil { + defer f.Close() + } + content, err := ioutil.ReadAll(f) + contentStr := strings.Trim(strings.ReplaceAll(string(content), " ", ""), "\n") + if err != nil { + return -1, fmt.Errorf(`io error:%v`, err.Error()) + } else if contentStr == "" { + contentStr = fmt.Sprintf(`%d:0`, fl.filedataMax) + } + concurrent := strings.Split(contentStr, ":") + if len(concurrent) != 2 { + return -1, fmt.Errorf(`error:contentStr=%s`, contentStr) + } + maxNum, err1 := strconv.Atoi(concurrent[0]) + CurNum, err2 := strconv.Atoi(concurrent[1]) + if err1 == nil && err2 == nil { + CurNum += incr + if CurNum > maxNum && incr > 0 { + // lock fail + return 0, nil + } + if CurNum < 0 { + CurNum = 0 + } + contentStr = fmt.Sprintf(`%d:%d`, maxNum, CurNum) + f.Seek(0, 0) + f.Truncate(0) + f.WriteString(contentStr) + return 1, nil + } else { + return -1, fmt.Errorf(`error:contentStr=%s`, contentStr) + } +} + +// FileIncrSafe TODO +// retryInterval: 如果获取锁失败,下次重试间隔(秒)。为 0 时表示不重试,麻烦返回获取锁失败 +// retcode: +// 1: success incr +// 0: full +// -1: operation failed +func (fl *FLock) FileIncrSafe(incr int, retryInterval int) (succ int, err error) { + intvl := time.Duration(retryInterval) + + fileLock := flock.New(fl.fileName) + locked, err := fileLock.TryLock() + + if err != nil { + // handle locking error + return -1, errors.New(fmt.Sprintf("failed to get lock: %s", err.Error())) + } + if locked { + // open and incr 1 and close + succ, err2 := fl.SetFileLockIncr(incr) + fileLock.Unlock() + if succ == 1 { + /* + if err = fileLock.Unlock(); err != nil { + // handle unlock error + return false, errors.New(fmt.Sprintf(`failed to unlock: %s`, err.Error())) + } + */ + return 1, nil + } else if succ == 0 { + if retryInterval == 0 { + return 0, nil + } else { + time.Sleep(intvl * time.Second) + return fl.FileIncrSafe(incr, retryInterval) + } + } else { + return -1, errors.New(fmt.Sprintf("failed to incr: %s", err2.Error())) + } + + } else { + // wait and retry + if retryInterval == 0 { + return 0, nil + } else { + // lockWaitMs := IntnRange(500, 3000) + // time.Sleep(time.Duration(lockWaitMs) * time.Millisecond) + time.Sleep(time.Duration(IntnRange(500, 3000)) * time.Millisecond) + return fl.FileIncrSafe(incr, retryInterval) + } + } + return 1, nil +} + +// FileUnlockIncr TODO +func (fl *FLock) FileUnlockIncr(filename string) error { + fileLock := flock.New(filename) + return fileLock.Unlock() +} + +// Test TODO +func Test() { + filename := "flashback.lock" + maxConn := 4 + fl, err := NewFlock(filename, maxConn) + if err != nil { + fmt.Println(err) + return + } + + wg := &sync.WaitGroup{} + for i := 0; i <= 8; i++ { + fmt.Println(i) + wg.Add(1) + go func(i int) { + time.Sleep(time.Duration(IntnRange(100, 2000)) * time.Millisecond) + defer wg.Done() + // 这个 retryInterval 尽量跟单个任务处理时间接近 + if succ, err := fl.FileIncrSafe(1, 20); succ == 1 { + // do + fmt.Printf("id=%d\n", i) + time.Sleep(20 * time.Second) + fl.FileIncrSafe(-1, 1) + } else if err != nil { + fmt.Printf("id=%d err=%v\n", i, err.Error()) + } + }(i) + } + wg.Wait() +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/helpers.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/helpers.go new file mode 100644 index 0000000000..9b4f985342 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/helpers.go @@ -0,0 +1,52 @@ +package util + +import ( + "dbm-services/common/go-pubpkg/logger" + "fmt" + "os" + "strings" +) + +const ( + // DefaultErrorExitCode TODO + DefaultErrorExitCode = 1 +) + +// CheckErr TODO +func CheckErr(err error) { + if err == nil { + return + } + msg, ok := StandardErrorMessage(err) + if !ok { + msg = err.Error() + if !strings.HasPrefix(msg, "error: ") { + msg = fmt.Sprintf("error: %s", msg) + } + } + LoggerErrorStack(logger.Error, err) + fatal(msg, DefaultErrorExitCode) +} + +func fatal(msg string, code int) { + if len(msg) > 0 { + // add newline if needed + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + fmt.Fprint(os.Stderr, msg) + } + os.Exit(code) +} + +type debugError interface { + DebugError() (msg string, args []interface{}) +} + +// StandardErrorMessage TODO +func StandardErrorMessage(err error) (string, bool) { + if debugErr, ok := err.(debugError); ok { + logger.Info(debugErr.DebugError()) + } + return "", false +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/client.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/client.go new file mode 100644 index 0000000000..11a7f3d32d --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/client.go @@ -0,0 +1,73 @@ +package httpclient + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "encoding/base64" + "fmt" + "io" + "net/http" + "os" +) + +// Download TODO +func Download(server, dstDir string, fileName string, authUser, authPass string, bwlimitMB int64) error { + srcFile := fmt.Sprintf("%s%s", server, fileName) + tgtFile := fmt.Sprintf("%s/%s", dstDir, fileName) + if fileName == "" { + return fmt.Errorf("fileName to download cannot be empty") + // tgtFile = fmt.Sprintf("%s/%s", dstDir, "__empty_file_list__") + } + logger.Info("start download to %s", tgtFile) + f, err := os.Create(tgtFile) + if err != nil { + return err + } + defer f.Close() + resp, err := DoWithBA(http.MethodGet, srcFile, nil, authUser, authPass) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad status: %s", resp.Status) + } + done := make(chan int, 1) + defer close(done) + go func(chan int) { + osutil.PrintFileSizeIncr(tgtFile, 1, 10, logger.Info, done) + }(done) + _, err = cmutil.IOLimitRate(f, resp.Body, bwlimitMB) // util.IOLimitRate(f, resp.Body, bwlimitMB) + if err != nil { + return err + } + return nil +} + +func basicAuth(username, password string) string { + auth := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(auth)) +} + +func redirectPolicyFunc(req *http.Request, via []*http.Request) error { + req.Header.Add("Authorization", "Basic "+basicAuth("username1", "password123")) + return nil +} + +// DoWithBA TODO +// http do with basic auth +func DoWithBA(method string, url string, payload io.Reader, username, password string) (*http.Response, error) { + req, err := http.NewRequest(method, url, payload) + if err != nil { + return nil, err + } + // Set the auth for the request. + req.SetBasicAuth(username, password) + + client := &http.Client{ + CheckRedirect: redirectPolicyFunc, + } + return client.Do(req) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/httpclient.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/httpclient.go new file mode 100644 index 0000000000..8a8bb26c42 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/httpclient/httpclient.go @@ -0,0 +1,130 @@ +// Package httpclient TODO +package httpclient + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "time" + + "github.com/pkg/errors" +) + +// New TODO +func New() *http.Client { + client := &http.Client{Timeout: 10 * time.Second} + return client +} + +// HttpClient TODO +type HttpClient struct { + Url string + Params interface{} + Headers map[string]string + Client *http.Client +} + +// Get TODO +func Get(client *http.Client, url string, params map[string]string, headers map[string]string) ([]byte, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, errors.Wrap(err, "make request") + } + q := req.URL.Query() + for pk, pv := range params { + q.Add(pk, pv) + } + req.URL.RawQuery = q.Encode() + if headers != nil { + for hk, hv := range headers { + req.Header.Add(hk, hv) + } + } + + resp, err := client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "do request") + } + defer resp.Body.Close() + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "read resp body") + } + return respBody, nil +} + +// Post TODO +func Post(client *http.Client, url string, params interface{}, contentType string, headers map[string]string) ([]byte, + error) { + jsonData, err := json.Marshal(params) + if err != nil { + return nil, errors.Wrap(err, "param marshal to json") + } + req, err := http.NewRequest(http.MethodPost, url, bytes.NewBuffer(jsonData)) + if err != nil { + return nil, errors.Wrap(err, "make request") + } + + if headers != nil { + for hk, hv := range headers { + req.Header.Add(hk, hv) + } + } + if contentType != "" { + req.Header.Add("Content-Type", contentType) + } + resp, err := client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "do request") + } + defer resp.Body.Close() + respBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "read resp body") + } + if !(resp.StatusCode >= 200 && resp.StatusCode < 300) { + return nil, errors.Errorf("response code %d. body: %s", resp.StatusCode, respBody) + } + return respBody, nil +} + +// IBSHttpApiResp TODO +type IBSHttpApiResp struct { + Code string `json:"code"` + Message string `json:"message"` + Data json.RawMessage `json:"data"` +} + +// BaseHttpApiResp TODO +type BaseHttpApiResp struct { + Code int `json:"code"` + Message string `json:"message"` + Data interface{} `json:"data"` +} + +// PostJson TODO +func (c *HttpClient) PostJson(url string, params interface{}, headers map[string]string) (json.RawMessage, error) { + respBytes, err := Post(c.Client, url, params, "application/json", headers) + if err != nil { + return nil, err + } + return respBytes, nil +} + +// PostJsonWithServers TODO +func (c *HttpClient) PostJsonWithServers(client *http.Client, servers []string, params interface{}, + headers map[string]string) (*IBSHttpApiResp, error) { + + return nil, nil +} + +// PostForm TODO +func PostForm(client *http.Client, url string, params interface{}, headers map[string]string) ([]byte, error) { + return Post(client, url, params, "application/x-www-form-urlencoded", headers) +} + +// PostFile TODO +func PostFile(client *http.Client, url string, params interface{}, headers map[string]string) ([]byte, error) { + return Post(client, url, params, "multipart/form-data", headers) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/logger.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/logger.go new file mode 100644 index 0000000000..b76aad8a27 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/logger.go @@ -0,0 +1,10 @@ +package util + +// LoggerErrorStack 在最外层遇到 error 时打印 stack 信息到日志 +// err == nil 时不打印 +// output 是个 logger,避免在 util 里引入 logger导致循环 import +func LoggerErrorStack(output func(format string, args ...interface{}), err error) { + if err != nil { + output("%+v", err) + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master.go new file mode 100644 index 0000000000..bbb258efce --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master.go @@ -0,0 +1,100 @@ +package mysqlutil + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" + "regexp" + "strconv" + + "github.com/pkg/errors" +) + +// ChangeMaster TODO +type ChangeMaster struct { + MasterHost string `json:"master_host" validate:"required,ip" ` // 主库ip + MasterPort int `json:"master_port" validate:"required,lt=65536,gte=3306"` // 主库端口 + MasterUser string `json:"master_user" validate:"required"` + MasterPassword string `json:"master_password" validate:"required"` + MasterLogFile string `json:"master_log_file"` // binlog 文件名称 + MasterLogPos int64 `json:"master_log_pos"` // binlog 位点信息 + + MasterAutoPosition int `json:"master_auto_position"` + Channel string `json:"channel"` + IsGtid bool `json:"is_gtid"` // 是否启动GID方式进行建立主从 + ExecutedGtidSet string `json:"executed_gtid_set"` + MaxTolerateDelay int `json:"max_tolerate_delay"` // 最大容忍延迟,即主从延迟小于该值,认为建立主从关系成功 + Force bool `json:"force"` // 如果当前实例存在主从关系是否直接reset slave后,强制change master + + ChangeSQL string `json:"change_sql"` +} + +// Validate TODO +func (c *ChangeMaster) Validate() error { + return nil +} + +// GetSQL 根据各个字段组合成 change master to +func (c *ChangeMaster) GetSQL() string { + var sql string + if c.IsGtid { + sql = fmt.Sprintf( + `CHANGE MASTER TO MASTER_HOST='%s',MASTER_PORT=%d, MASTER_USER = '%s', MASTER_PASSWORD = '%s'`, + c.MasterHost, c.MasterPort, c.MasterUser, c.MasterPassword, + ) + } else { + sql = fmt.Sprintf( + `CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, MASTER_USER ='%s', MASTER_PASSWORD='%s',MASTER_LOG_FILE='%s', MASTER_LOG_POS=%d`, + c.MasterHost, + c.MasterPort, + c.MasterUser, + c.MasterPassword, + c.MasterLogFile, + c.MasterLogPos, + ) + } + c.ChangeSQL = sql + return sql +} + +// ParseChangeSQL 根据 change_sql 字段拆解成各个字段 +func (c *ChangeMaster) ParseChangeSQL() error { + // 移除 = 号前后的空格 + c.ChangeSQL = util.RegexReplaceSubString(c.ChangeSQL, `\s+=`, "=") + c.ChangeSQL = util.RegexReplaceSubString(c.ChangeSQL, `=\s+`, "=") + + reHost := regexp.MustCompile(`(?iU)master_host=['"](.*)['"]`) + rePort := regexp.MustCompile(`(?i)master_port=(\d+)`) + reLogFile := regexp.MustCompile(`(?iU)master_log_file=['"](.*)['"]`) + reLogPos := regexp.MustCompile(`(?i)master_log_pos=(\d+)`) + if m := reLogFile.FindStringSubmatch(c.ChangeSQL); len(m) == 2 { + c.MasterLogFile = m[1] + } + if m := reLogPos.FindStringSubmatch(c.ChangeSQL); len(m) == 2 { + c.MasterLogPos, _ = strconv.ParseInt(m[1], 10, 64) + } + if m := reHost.FindStringSubmatch(c.ChangeSQL); len(m) == 2 { + c.MasterHost = m[1] + } + if m := rePort.FindStringSubmatch(c.ChangeSQL); len(m) == 2 { + c.MasterPort, _ = strconv.Atoi(m[1]) + } + logger.Warn("parsed items %+v", c) + return nil +} + +// ParseXtraBinlogInfo 从 xtrabackup_binlog_info 中解析出 file,pos,无 host,port 信息 +func ParseXtraBinlogInfo(binlogInfo string) (*ChangeMaster, error) { + // binlog20000.005986 54045 + reg := regexp.MustCompile(`(.+\.\d+)\s+(\d+)`) + if m := reg.FindStringSubmatch(binlogInfo); len(m) != 3 { + return nil, errors.Errorf("fail to get binlog_info from %s", binlogInfo) + } else { + pos, _ := strconv.Atoi(m[2]) + cm := &ChangeMaster{ + MasterLogFile: m[1], + MasterLogPos: int64(pos), + } + return cm, nil + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master_test.go new file mode 100644 index 0000000000..ab1189bfb5 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/change_master_test.go @@ -0,0 +1,23 @@ +package mysqlutil + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestChangeMasterParseSQL(t *testing.T) { + Convey("Parse change master sql", func() { + changeSQL1 := "-- CHANGE MASTER TO MASTER_LOG_FILE='binlog20000.004450', MASTER_LOG_POS=63779;" + cm1 := ChangeMaster{ChangeSQL: changeSQL1} + cm1.ParseChangeSQL() + So("binlog20000.004450", ShouldEqual, cm1.MasterLogFile) + So(63779, ShouldEqual, cm1.MasterLogPos) + + changeSQL2 := "change master to master_log_file='xxx.100', master_log_pos = 123, master_host= \"1.1.1.1\", master_port =3306" + cm2 := ChangeMaster{ChangeSQL: changeSQL2} + cm2.ParseChangeSQL() + So("1.1.1.1", ShouldEqual, cm2.MasterHost) + So(3306, ShouldEqual, cm2.MasterPort) + }) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/hide_passowrd.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/hide_passowrd.go new file mode 100644 index 0000000000..1df291a6fd --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/hide_passowrd.go @@ -0,0 +1,76 @@ +package mysqlutil + +import ( + "regexp" + "strings" +) + +var ( + mysqlRegex = regexp.MustCompile(`mysql.*-u\w+.*\s-p(\w+).*`) + mysqlAdminRegex = regexp.MustCompile(`mysqladmin.*-u\w+.*\s-p(\w+).*`) + mysqlPasswordRegex = regexp.MustCompile(`\s-p[^\s]+`) + masterPasswordRegexp = regexp.MustCompile(`master_password="[^\s]*"`) + identifyByRegex = regexp.MustCompile(`identified by '[^\s]*'`) + userPasswordRegex = regexp.MustCompile(`\s-u\w+.*\s-p(\w+).*`) + dsnRegex = regexp.MustCompile(`\w+:[^\s]*@tcp\([^\s]+\)`) + dsnPasswordRegex = regexp.MustCompile(`:[^\s]*@tcp\(`) +) + +// ClearSensitiveInformation clear sensitive information from input +func ClearSensitiveInformation(input string) string { + output := RemoveMysqlCommandPassword(input) + output = ClearMasterPasswordInSQL(output) + output = RemoveMysqlAdminCommandPassword(output) + output = ClearIdentifyByInSQL(output) + output = RemovePasswordInDSN(output) + return output +} + +// ClearIdentifyByInSQL TODO +func ClearIdentifyByInSQL(input string) string { + output := identifyByRegex.ReplaceAllString(input, `identified by 'xxxx'`) + return output +} + +// ClearIdentifyByInSQLs TODO +func ClearIdentifyByInSQLs(input []string) []string { + output := make([]string, len(input)) + for i, s := range input { + output[i] = identifyByRegex.ReplaceAllString(strings.ToLower(s), `identified by 'xxxx'`) + } + return output +} + +// ClearMasterPasswordInSQL TODO +func ClearMasterPasswordInSQL(input string) string { + output := masterPasswordRegexp.ReplaceAllString(input, `master_password="xxxx"`) + return output +} + +// RemoveMysqlCommandPassword replace password field +func RemoveMysqlCommandPassword(input string) string { + return mysqlRegex.ReplaceAllStringFunc(input, func(sub string) string { + return mysqlPasswordRegex.ReplaceAllString(sub, " -pxxxx") + }) +} + +// RemoveMysqlAdminCommandPassword replace password field +func RemoveMysqlAdminCommandPassword(input string) string { + return mysqlAdminRegex.ReplaceAllStringFunc(input, func(sub string) string { + return mysqlPasswordRegex.ReplaceAllString(sub, " -pxxxx") + }) +} + +// RemovePassword replace password in -u -p pattern +func RemovePassword(input string) string { + return userPasswordRegex.ReplaceAllStringFunc(input, func(sub string) string { + return mysqlPasswordRegex.ReplaceAllString(sub, " -pxxxx") + }) +} + +// RemovePasswordInDSN TODO +func RemovePasswordInDSN(input string) string { + return dsnRegex.ReplaceAllStringFunc(input, func(sub string) string { + return dsnPasswordRegex.ReplaceAllString(sub, `:xxxx@tcp\(`) + }) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf.go new file mode 100644 index 0000000000..7a1e00abe9 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf.go @@ -0,0 +1,23 @@ +package mysqlutil + +import ( + "gopkg.in/ini.v1" +) + +// MysqlCnfFile my.cnf 配置ini 解析 +type MysqlCnfFile struct { + FileName string + Cfg *ini.File +} + +// MycnfObject TODO +type MycnfObject struct { + Client map[string]string `json:"client" sectag:"client"` + Mysql map[string]string `json:"mysql" sectag:"mysql"` + Mysqld map[string]string `json:"mysqld" sectag:"mysqld"` + Mysqldump map[string]string `json:"mysqldump" sectag:"mysqldump"` + Mysqld55 map[string]string `json:"mysqld-5.5" sectag:"mysqld-5.5"` + Mysqld56 map[string]string `json:"mysqld-5.6" sectag:"mysqld-5.6"` + Mysqld57 map[string]string `json:"mysqld-5.7" sectag:"mysqld-5.7"` + Mysqld80 map[string]string `json:"mysqld-8.0" sectag:"mysqld-8.0"` +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf_test.go new file mode 100644 index 0000000000..21c1730e5e --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_cnf_test.go @@ -0,0 +1,84 @@ +package mysqlutil_test + +import ( + "testing" +) + +// go test -v pkg/util/mysqlutil/mysql_cnf* +func TestGenerateMycnf(t *testing.T) { + t.Logf("start ...") + input := `{ + "client": { + "port": "3306", + "socket": "mysqldata/mysql.sock" + }, + "mysql": { + "default-character-set": "utf-8", + "no_auto_rehash": "ON", + "port": "3306", + "socket": "mysqldata/mysql.sock" + }, + "mysqld": { + "max_connect_errors":"99999", + "skip_symbolic_links":"ON", + "init_connect":"\"set @user=user(),@cur_user=current_user(); insert into test.conn_log values(connection_id(),now(),@user,@cur_user,'127.0.0.1');\"", + "skip_name_resolve":"ON", + "bind_address":"127.0.0.1", + "binlog_format":"ROW", + "character_set_server":"utf8", + "datadir":"/data1/mysqldata/20001/data", + "default_storage_engine":"innodb", + "innodb_buffer_pool_size":"2150M", + "innodb_data_home_dir":"/data1/mysqldata/20001/innodb/data", + "innodb_file_format":"Barracuda", + "innodb_file_per_table":"1", + "innodb_flush_log_at_trx_commit":"0", + "innodb_flush_method":"O_DIRECT", + "innodb_io_capacity":"1000", + "innodb_lock_wait_timeout":"50", + "innodb_log_buffer_size":"32M", + "innodb_log_file_size":"268435456", + "innodb_log_files_in_group":"4", + "innodb_log_group_home_dir":"/data1/mysqldata/20001/innodb/log", + "innodb_read_io_threads":"4", + "innodb_thread_concurrency":"16", + "innodb_write_io_threads":"4", + "interactive_timeout":"86400", + "key_buffer_size":"64M", + "log_bin":"/data/mysqllog/20001/binlog/binlog20001.bin", + "log_bin_trust_function_creators":"1", + "log_slave_updates":"1", + "log_warnings":"0", + "long_query_time":"1", + "max_allowed_packet":"64M", + "max_binlog_size":"256M", + "max_connect_errors":"99999999", + "max_connections":"3000", + "myisam_sort_buffer_size":"64M", + "port":"20001", + "query_cache_size":"0", + "query_cache_type":"0", + "read_buffer_size":"2M", + "relay_log":"/data1/mysqldata/20001/relay-log/relay-log.bin", + "replicate_wild_ignore_table":"mysql.%;test.conn_log", + "server_id":"104544287", + "skip_external_locking":"ON", + "skip_symbolic_links":"ON", + "slow_query_log":"1", + "slow_query_log_file":"/data/mysqllog/20001/slow-query.log", + "socket":"/data1/mysqldata/20001/mysql.sock", + "sort_buffer_size":"2M", + "tmpdir":"/data1/mysqldata/20001/tmp", + "wait_timeout":"86400", + "sql_mode":"" + }, + "mysqldump": { + "max_allowed_packet": "64M", + "quick": "ON" + }, + "mysqld-5.5": { + "innodb_additional_mem_pool_size": "20M" + } + }` + t.Log(input) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper.go new file mode 100644 index 0000000000..b2a4113286 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper.go @@ -0,0 +1,280 @@ +package mysqlutil + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "path" + "regexp" + "runtime" + "strings" + "sync" + + "github.com/panjf2000/ants/v2" +) + +var dumpCompleteReg = regexp.MustCompile("Dump completed on") + +// Dumper TODO +type Dumper interface { + Dump() error +} + +// MySQLDumpOption TODO +type MySQLDumpOption struct { + /* DumpSchema bool + DumpData bool */ + NoData bool + AddDropTable bool // 默认 false 代表添加 --skip-add-drop-table 选项 + NeedUseDb bool + NoCreateDb bool + NoCreateTb bool + DumpRoutine bool // 默认 false 代表添加不导出存储过程,True导出存储过程 + DumpTrigger bool // 默认 false 代表添加不导出触发器 + DumpEvent bool // 默认 false 导出 event + +} + +type runtimectx struct { + maxConcurrency int + maxResourceUsePercent int +} + +// MySQLDumper TODO +type MySQLDumper struct { + MySQLDumpOption + DumpDir string // 备份到哪个目录 + DbBackupUser string + DbBackupPwd string + Ip string + Port int + Charset string + DumpCmdFile string // mysqldump 的绝对路径 + DbNames []string + IsMaster bool + // Todo + // SelfDefineArgs []string ... + // Precheck ... + runtimectx +} + +// MySQLDumperTogether TODO +type MySQLDumperTogether struct { + MySQLDumper + OutputfileName string + UseTMySQLDump bool // 是否使用的是自研的mysqldump,一般介质在备份目录下 +} + +// checkDumpComplete 检查导出结果是否Ok +// +// @receiver file 导出SQL文件的绝对路径 +// @return err +func checkDumpComplete(file string) (err error) { + // 倒着读取10行 + res, err := util.ReverseRead(file, 10) + if err != nil { + return err + } + for _, l := range res { + // 如果匹配到了,表示备份的文件oK + if dumpCompleteReg.MatchString(l) { + return nil + } + } + return fmt.Errorf("备份文件没有匹配到Dump completed on") +} + +// init 初始化运行时参数 +// +// @receiver m +func (m *MySQLDumper) init() { + m.maxConcurrency = runtime.NumCPU() / 2 + m.maxResourceUsePercent = 50 + if m.IsMaster || m.maxConcurrency == 0 { + // 如果是在Master Dump的话不允许开启并发 + m.maxConcurrency = 1 + } +} + +// Dump Together 后面指定的 db 名字空格分隔,例如 --databases db1 db2 > just_one_file.sql +// +// @receiver m +// @return err +func (m *MySQLDumperTogether) Dump() (err error) { + m.init() + outputFile := path.Join(m.DumpDir, m.OutputfileName) + errFile := path.Join(m.DumpDir, m.OutputfileName+".err") + dumpOption := "" + if m.UseTMySQLDump { + dumpOption = m.getTMySQLDumpOption() + } + dumpCmd := m.getDumpCmd(strings.Join(m.DbNames, " "), outputFile, errFile, dumpOption) + logger.Info("mysqldump cmd:%s", RemovePassword(dumpCmd)) + output, err := osutil.ExecShellCommand(false, dumpCmd) + if err != nil { + return fmt.Errorf("execte %s get an error:%s,%w", dumpCmd, output, err) + } + if err := checkDumpComplete(outputFile); err != nil { + logger.Error("checkDumpComplete failed %s", err.Error()) + return err + } + return +} + +// Dump OneByOne 按照每个db 分别导出不同的文件,可控制并发 +// +// @receiver m +// @return err +func (m *MySQLDumper) Dump() (err error) { + var wg sync.WaitGroup + var errs []string + m.init() + errChan := make(chan error, 1) + logger.Info("mysqldump data:%+v", *m) + pool, _ := ants.NewPool(m.maxConcurrency) + defer pool.Release() + f := func(db string) func() { + return func() { + outputFile := path.Join(m.DumpDir, fmt.Sprintf("%s.sql", db)) + errFile := path.Join(m.DumpDir, fmt.Sprintf("%s.err", db)) + dumpCmd := m.getDumpCmd(db, outputFile, errFile, "") + logger.Info("mysqldump cmd:%s", RemovePassword(dumpCmd)) + output, err := osutil.ExecShellCommand(false, dumpCmd) + if err != nil { + errChan <- fmt.Errorf("execte %s get an error:%s,%w", dumpCmd, output, err) + wg.Done() + return + } + if err := checkDumpComplete(outputFile); err != nil { + errChan <- err + wg.Done() + return + } + wg.Done() + } + } + + for _, db := range m.DbNames { + wg.Add(1) + pool.Submit(f(db)) + } + go func() { + for err := range errChan { + logger.Error("dump db failed: %s", err.Error()) + errs = append(errs, err.Error()) + } + }() + wg.Wait() + close(errChan) + if len(errs) > 0 { + return fmt.Errorf("Errrors: %s", strings.Join(errs, "\n")) + } + return err +} + +/* +mysqldump 参数说明: +-B --databases :后面指定的 db 名字空格分隔,例如 --databases db1 db2 >> aaa.sql + +-d, --no-data:不导出 row information,也就是不导出行数据。 只导出 schema 的时候比较常用,例如: --databases testdb -d > testdb_d.sql 。 +需要注意的是带上 -B,sql 文件里面就会多上 create database 相关语句: +CREATE DATABASE testdb ... +USE `testdb`; +--skip-add-drop-table:导出的时候不带上 DROP TABLE IF EXISTS table_name; + 提示:默认是--add-drop-table (Add a DROP TABLE before each create) +这个一般建议带上这个选项, 不然很容易由于dump 没有用好,导致drop了正确的 table 。 +*/ + +// getDumpCmd TODO +/* +mysqldump --skip-add-drop-table -d testdb > testdb.sql + +DumpSchema 功能概述: +1. 一个 DB 一个 schema 文件 +2. 文件名 DumpDir/$dump_file.$old_db_name.$SUBJOB_ID +3. $mysqldump_file +-h$SOURCE_IP +-P $SOURCE_PORT +-u$dbbackup_user +-p$dbbackup_pass $dump_schema_opt +--skip-foreign-key-check +--skip-opt +--create-option +--single-transaction +-q +--no-autocommit +--default-character-set=$charset_server +-R $create_db_opt $old_db_name +>/data/dbbak/$dump_file.$old_db_name 2>/data/dbbak/$dump_file.$old_db_name.$SUBJOB_ID.err; +*/ +func (m *MySQLDumper) getDumpCmd(dbName, outputFile, errFile, dumpOption string) (dumpCmd string) { + if m.NoData { + dumpOption += " -d " + } + if m.AddDropTable { + dumpOption += " --add-drop-table " + } else { + dumpOption += "--skip-add-drop-table" + } + if m.NeedUseDb { + dumpOption += " -B " + } + if m.NoCreateDb { + dumpOption += " -n " + } + if m.NoCreateTb { + dumpOption += " -t " + } + if m.DumpRoutine { + dumpOption += " -R " + } + if m.DumpTrigger { + dumpOption += " --triggers " + } else { + dumpOption += " --skip-triggers " + } + if m.DumpEvent { + dumpOption += " --events" + } + dumpCmd = fmt.Sprintf( + `%s + -h%s + -P%d + -u%s + -p%s + --skip-opt + --create-options + --single-transaction + --max-allowed-packet=1G + -q + --no-autocommit + --default-character-set=%s %s %s > %s 2>%s`, + m.DumpCmdFile, + m.Ip, + m.Port, + m.DbBackupUser, + m.DbBackupPwd, + m.Charset, + dumpOption, + dbName, + outputFile, + errFile, + ) + return strings.ReplaceAll(dumpCmd, "\n", " ") +} + +// getTMySQLDumpOption 自研mysqldump +// +// @receiver m +// @return dumpCmd +func (m *MySQLDumper) getTMySQLDumpOption() (dumpOption string) { + return fmt.Sprintf( + ` + --ignore-show-create-table-error + --skip-foreign-key-check + --max-concurrency=%d + --max-resource-use-percent=%d + `, m.maxConcurrency, m.maxResourceUsePercent, + ) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper_test.go new file mode 100644 index 0000000000..79b9ca5fbb --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_dumper_test.go @@ -0,0 +1,61 @@ +package mysqlutil_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "testing" +) + +func TestDump(t *testing.T) { + t.Log("start testing dump") + var dumper mysqlutil.Dumper + dumper = &mysqlutil.MySQLDumper{ + DumpDir: "/data/", + Ip: "127.0.0.1", + Port: 3306, + DbBackupUser: "make", + DbBackupPwd: "make", + DbNames: []string{"bk-dbm", "test", "cmdb"}, + Charset: "utf8mb4", + DumpCmdFile: "/usr/local/mysql/bin/mysqldump", + MySQLDumpOption: mysqlutil.MySQLDumpOption{ + NoData: true, + AddDropTable: true, + NeedUseDb: true, + DumpRoutine: true, + DumpTrigger: false, + }, + } + if err := dumper.Dump(); err != nil { + t.Fatal("dump failed: ", err.Error()) + } + t.Log("ending backup...") +} + +func TestDumpTogether(t *testing.T) { + t.Log("start testing dump") + var dumper mysqlutil.Dumper + dumper = &mysqlutil.MySQLDumperTogether{ + MySQLDumper: mysqlutil.MySQLDumper{ + DumpDir: "/data/", + Ip: "127.0.0.1", + Port: 3306, + DbBackupUser: "make", + DbBackupPwd: "make", + DbNames: []string{"bk-dbm", "test", "cmdb"}, + DumpCmdFile: "/usr/local/mysql/bin/mysqldump", + Charset: "utf8", + MySQLDumpOption: mysqlutil.MySQLDumpOption{ + NoData: true, + AddDropTable: true, + NeedUseDb: true, + DumpRoutine: true, + DumpTrigger: false, + }, + }, + OutputfileName: "make.sql", + } + if err := dumper.Dump(); err != nil { + t.Fatal("dump failed: ", err.Error()) + } + t.Log("ending backup...") +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os.go new file mode 100644 index 0000000000..0c0c18e238 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os.go @@ -0,0 +1,249 @@ +package mysqlutil + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/pkg/errors" + "github.com/shirou/gopsutil/v3/mem" +) + +// GetInstMemByIP 返回的内存单位是 MB +func GetInstMemByIP(instCount uint64) (uint64, error) { + vMem, err := mem.VirtualMemory() + if err != nil { + return 0, err + } + kilo := uint64(1024) + totalMemInMi := vMem.Total / kilo / kilo + + var availMem int64 + switch { + case totalMemInMi <= 2*kilo: + if int64(float64(totalMemInMi)*0.3) < 256 { + availMem = int64(float64(totalMemInMi) * 0.3) + } else { + availMem = 256 + } + case totalMemInMi <= 4*kilo: + if int64(float64(totalMemInMi)*0.5) < (1024 + 512) { + availMem = int64(float64(totalMemInMi) * 0.5) + } else { + availMem = 1024 + 512 + } + case totalMemInMi <= 18*kilo: + availMem = int64(float64(totalMemInMi-1*kilo) * 0.6) + default: + availMem = int64(float64(totalMemInMi) * 0.7) + } + mysqlTotalMem := float64(availMem) * ratio(instCount) + return uint64(mysqlTotalMem) / instCount, nil +} + +func ratio(instNum uint64) float64 { + switch { + case instNum <= 8: + return 1 + case instNum <= 16: + return 0.8 + case instNum <= 32: + return 0.7 + default: + return 1 + } +} + +// IsSudo TODO +func IsSudo() bool { + result, err := osutil.ExecShellCommand(false, "/usr/bin/id |grep 30019") + if err != nil { + logger.Warn("/usr/bin/id |grep 30019 error: %s", err.Error()) + return false + } + if strings.Contains(string(result), "30019") { + return true + } + return false +} + +// InnodbDataFilePathValue TODO +func InnodbDataFilePathValue(value string) string { + result := regexp.MustCompile(`(\d+)(m|M|g|G)`).FindStringSubmatch(value) + if len(result) > 0 && + regexp.MustCompile("(?i)M").MatchString(result[2]) { + return fmt.Sprintf("%sM:autoextend\n", result[1]) + } else if len(result) > 0 && + regexp.MustCompile("(?i)G").MatchString(result[2]) { + size, err := strconv.Atoi(result[1]) + if err != nil { + logger.Info("%s convert to int get an error:%s", result[1], err.Error()) + return "" + } + var ( + ibDataStr = "" + index = 1 + ) + for ; size > 10; size -= 10 { + ibDataStr += fmt.Sprintf("ibdata%d:10G;", index) + index++ + } + ibDataStr += fmt.Sprintf("ibdata%d:%dG:autoextend", index, size) + return ibDataStr + } + + return "" +} + +// MySQLVersionParse (): +// input: select version() 获取到的string +// output: 获取tmysql中的mysql前缀版本 +// example: +// 5.7.20-tmysql-3.1.5-log ==> 5*1000000 + 7*1000 + 20 ==> 5007020 +// MySQL5.1.13 ==> 5*1000000+1*1000+13 ==> 5001013 +func MySQLVersionParse(version string) uint64 { + re := regexp.MustCompile(`([\d]+).?([\d]+)?.?([\d]+)?`) + return mysqlVersionParse(re, version) +} + +// TmysqlVersionParse tmysql version parse +// +// @receiver mysqlVersion +// @return uint64 +func TmysqlVersionParse(mysqlVersion string) uint64 { + re := regexp.MustCompile(`tmysql-([\d]+).?([\d]+)?.?([\d]+)?`) + return mysqlVersionParse(re, mysqlVersion) +} + +// GetMajorVerNum TODO +// GetIntMajorVersion +// 获取主版本 int 类型 +// 用于版本比较 +func GetMajorVerNum(versionNu uint64) uint64 { + return versionNu / 1000000 +} + +// VersionCompare TODO +// InsVersionCompare +// 切换主从版本比较 +// 现在的版本前置检查条件:不论是mysql 还是 tmysql ,大版本一样都可以进行主从切换 +// (mysql:5.5 5.6 5.7 tmysql: 2.2 ) +func VersionCompare(masterVer, slaveVer string) (err error) { + if strings.TrimSpace(masterVer) == "" || strings.TrimSpace(slaveVer) == "" { + return errors.New("Compare Version Is Empty String!!!") + } + masterMajVer := GetMajorVerNum(MySQLVersionParse(masterVer)) + slaveMajVer := GetMajorVerNum(MySQLVersionParse(slaveVer)) + if masterMajVer > slaveMajVer { + err = fmt.Errorf("master version(%s) must less than or equal to slave version(%s)", masterVer, slaveVer) + return err + } + return nil +} + +// GetMajorVersion 获取mysql的大版本号 From MySQL Version Parse 返回的大版本号码 +// +// @receiver versionNu +// @return MajorVersion +func GetMajorVersion(versionNu uint64) (majorVersion string) { + first := versionNu / 1000000 + second := (versionNu % 1000000) / 1000 + return fmt.Sprintf("%d.%d", first, second) +} + +func mysqlVersionParse(re *regexp.Regexp, mysqlVersion string) uint64 { + result := re.FindStringSubmatch(mysqlVersion) + var ( + total uint64 + billion string + thousand string + single string + // 2.1.5 => 2 * 1000000 + 1 * 1000 + 5 + ) + switch len(result) { + case 0: + return 0 + case 4: + billion = result[1] + thousand = result[2] + single = result[3] + if billion != "" { + b, err := strconv.ParseUint(billion, 10, 64) + if err != nil { + // log.Printf("%s", err) + b = 0 + } + total += b * 1000000 + } + if thousand != "" { + t, err := strconv.ParseUint(thousand, 10, 64) + if err != nil { + // log.Printf("%s", err) + t = 0 + } + total += t * 1000 + } + if single != "" { + s, err := strconv.ParseUint(single, 10, 64) + if err != nil { + s = 0 + } + total += s + } + default: + return 0 + } + return total +} + +// GenMysqlServerId 生成my.cnf 里面的server_id +// +// @receiver ip +// @receiver port +// @return uint64 +// @return error +func GenMysqlServerId(ip string, port int) (uint64, error) { + var ( + ips = strings.Split(ip, ".") + err error + first int + ) + if len(ips) != 4 { + err = fmt.Errorf("len(ips) is not 4. ips:%+v", ips) + return 0, err + } + switch { + case ips[0] == "172": + first = 1 + case ips[0] == "10": + first = 2 + case ips[0] == "192": + first = 3 + default: + first = 4 + } + + first += (port % 10000 % 64) * 4 + two, err := strconv.ParseInt(ips[1], 10, 64) + if err != nil { + return 0, err + } + + three, err := strconv.ParseInt(ips[2], 10, 64) + if err != nil { + return 0, err + } + four, err := strconv.ParseInt(ips[3], 10, 64) + if err != nil { + return 0, err + } + + logger.Info("one:%d,two:%d,three:%d,four:%d", first, two, three, four) + + serverId := fmt.Sprintf("%08b%08b%08b%08b", first, two, three, four) + logger.Info("serverID:%s\n", serverId) + return strconv.ParseUint(serverId, 2, 64) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os_test.go new file mode 100644 index 0000000000..1d1b0daa4d --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysql_os_test.go @@ -0,0 +1,20 @@ +package mysqlutil_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "testing" +) + +func TestMySQLVersionParse(t *testing.T) { + t.Log(" start mysql version Parse") + ver := "mysql-5.6.24-linux-x86_64-tmysql-2.2.3-gcs.tar.gz" + verNu := mysqlutil.MySQLVersionParse(ver) + t.Logf("%s parse version is:%d", ver, verNu) + ver_maria := "mariadb-10.3.7-linux-x86_64-tspider-3.7.6-gcs.tar.gz" + verNu = mysqlutil.MySQLVersionParse(ver_maria) + t.Logf("%s parse version is:%d", ver_maria, verNu) +} + +func TestMajorVersion(t *testing.T) { + t.Logf("major Version:%s", mysqlutil.GetMajorVersion(10003007)) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec.go new file mode 100644 index 0000000000..4144e8ec6a --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec.go @@ -0,0 +1,192 @@ +package mysqlutil + +import ( + "database/sql" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "io" + "os" + "os/exec" + "path" + "regexp" + "sync" + "time" +) + +// ExecuteSqlAtLocal TODO +type ExecuteSqlAtLocal struct { + MySQLBinPath string + WorkDir string `json:"workdir"` + IsForce bool `json:"isForce"` + Charset string `json:"charset"` + NeedShowWarnings bool `json:"needShowWarnings"` + Host string `json:"host"` + Port int `json:"port"` + Socket string `json:"socket"` + User string `json:"user"` + Password string `json:"password"` + ErrFile string +} + +// CreateLoadSQLCommand TODO +func (e ExecuteSqlAtLocal) CreateLoadSQLCommand() (command string) { + var forceStr, mysqlclient string + if e.IsForce { + forceStr = " -f " + } + mysqlclient = e.MySQLBinPath + if util.StrIsEmpty(e.MySQLBinPath) { + mysqlclient = cst.MySQLClientPath + } + connCharset := "" + if !util.StrIsEmpty(e.Charset) { + connCharset = fmt.Sprintf(" --default-character-set=%s ", e.Charset) + } + passwd := "" + if !util.StrIsEmpty(e.Password) { + passwd = fmt.Sprintf("-p%s", e.Password) + } + // 如果socket不存在的话的,选择连接tcp的方式导入 + if util.StrIsEmpty(e.Socket) { + return fmt.Sprintf( + `%s %s --safe_updates=0 -u %s %s -h%s -P %d %s -vvv `, + mysqlclient, forceStr, e.User, passwd, e.Host, e.Port, e.Charset, + ) + } + return fmt.Sprintf( + `%s %s --safe_updates=0 -u %s %s --socket=%s %s -vvv `, + mysqlclient, forceStr, e.User, passwd, e.Socket, connCharset, + ) +} + +// ExcuteSqlByMySQLClient TODO +func (e ExecuteSqlAtLocal) ExcuteSqlByMySQLClient(sqlfile string, targetdbs []string) (err error) { + for _, db := range targetdbs { + if err = e.ExcuteSqlByMySQLClientOne(sqlfile, db); err != nil { + return err + } + } + return nil +} + +// ExcuteSqlByMySQLClientOne 使用本地mysqlclient 去执行sql +// +// @receiver e +// @receiver sqlfile +// @receiver targetdbs +// @return err +func (e ExecuteSqlAtLocal) ExcuteSqlByMySQLClientOne(sqlfile string, db string) (err error) { + command := e.CreateLoadSQLCommand() + command = command + " " + db + "<" + path.Join(e.WorkDir, sqlfile) + e.ErrFile = path.Join(e.WorkDir, fmt.Sprintf("%s.%s.%s.err", sqlfile, db, time.Now().Format(cst.TimeLayoutDir))) + err = e.ExcuteCommand(command) + if err != nil { + return err + } + return nil +} + +// ExcuteCommand TODO +func (e ExecuteSqlAtLocal) ExcuteCommand(command string) (err error) { + var errStdout, errStderr error + logger.Info("The Command Is %s", ClearSensitiveInformation(command)) + cmd := exec.Command("/bin/bash", "-c", command) + stdoutIn, _ := cmd.StdoutPipe() + stderrIn, _ := cmd.StderrPipe() + stdout := osutil.NewCapturingPassThroughWriter(os.Stdout) + stderr := osutil.NewCapturingPassThroughWriter(os.Stderr) + defer func() { + // 写入error 文件 + ef, errO := os.OpenFile(e.ErrFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + defer ef.Close() + if errO != nil { + logger.Warn("打开日志时失败! %s", errO.Error()) + return + } + _, errW := ef.Write(stderr.Bytes()) + if errW != nil { + logger.Warn("写错误日志时失败! %s", err.Error()) + } + }() + if err = cmd.Start(); err != nil { + logger.Error("start command failed:%s", err.Error()) + return + } + var wg sync.WaitGroup + wg.Add(1) + + go func() { + _, errStdout = io.Copy(stdout, stdoutIn) + wg.Done() + }() + + _, errStderr = io.Copy(stderr, stderrIn) + wg.Wait() + + if err = cmd.Wait(); err != nil { + logger.Error("cmd.wait failed:%s", err.Error()) + return + } + + if errStdout != nil || errStderr != nil { + logger.Error("failed to capture stdout or stderr\n") + return + } + outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + re, err := regexp.Compile(`((?i)\s*error\s+\d+)|No such file or directory`) + if err != nil { + return err + } + logger.Info("outstr:%s,errstr:%s", outStr, errStr) + if re.MatchString(outStr + errStr) { // @todo 这里的写法不够细致,可能匹配表结构里的关键字 + return fmt.Errorf("执行sql的输出含有error") + } + return nil +} + +// ExcutePartitionByMySQLClient TODO +func (e ExecuteSqlAtLocal) ExcutePartitionByMySQLClient( + dbw *sql.DB, partitionsql string, + lock *sync.Mutex, +) (err error) { + logger.Info("The partitionsql is %s", ClearSensitiveInformation(partitionsql)) + err = util.Retry( + util.RetryConfig{Times: 2, DelayTime: 2 * time.Second}, func() error { + _, err = dbw.Exec(partitionsql) + return err + }, + ) + if err != nil { + logger.Error("分区执行失败!%s", err) + lock.Lock() + errFile := path.Join(e.WorkDir, e.ErrFile) + ef, errO := os.OpenFile(errFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + defer lock.Unlock() + defer ef.Close() + if errO != nil { + logger.Warn("打开日志时失败! %s", errO.Error()) + return + } + if err != nil { + _, errW := ef.Write([]byte(fmt.Sprintf("%s\n", err.Error()))) + if errW != nil { + logger.Warn("写错误日志时失败! %s", err.Error()) + } + } + return err + } + return nil +} + +// ExcuteInitPartition TODO +func (e ExecuteSqlAtLocal) ExcuteInitPartition(command string) (err error) { + e.ErrFile = path.Join(e.WorkDir, e.ErrFile) + err = e.ExcuteCommand(command) + if err != nil { + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec_test.go new file mode 100644 index 0000000000..49f51f258d --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlclient_exec_test.go @@ -0,0 +1,66 @@ +package mysqlutil_test + +import ( + "database/sql" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "fmt" + "sync" + "testing" + + _ "github.com/go-sql-driver/mysql" +) + +func TestExcutePartitionByMySQLClient(t *testing.T) { + host := "1.1.1.1" + port := 20000 + user := "xfan" + pwd := "xfan" + param := "" + tcpdsn := fmt.Sprintf("%s:%d", host, port) + dsn := fmt.Sprintf( + "%s:%s@tcp(%s)/?charset=utf8&parseTime=True&loc=Local&timeout=30s&readTimeout=30s&lock_wait_timeout=5%s", user, + pwd, + tcpdsn, param, + ) + sqlDB, err := sql.Open("mysql", dsn) + if err != nil { + t.Fatal("数据库连接失败!", err.Error()) + } + partitionsql := "alter table `db1`.`tb1` drop partition p20230217,p20230218,p20230219,p20230220" + lock := &sync.Mutex{} + e := mysqlutil.ExecuteSqlAtLocal{ + WorkDir: "/data/install/partition", + ErrFile: "partitionTest.err", + } + err = e.ExcutePartitionByMySQLClient(sqlDB, partitionsql, lock) + if err != nil { + t.Fatal("分区执行失败!", err.Error()) + } + t.Log("分区执行成功!") +} + +func TestExcuteInitPartition(t *testing.T) { + e := mysqlutil.ExecuteSqlAtLocal{ + ErrFile: "/data/install/partition/partitionTest.err", + } + sql := "--alter \"partition by RANGE (TO_DAYS(b)) (partition p20230221 values less than (to_days('2023-02-22')))\" D=db1,t=tb000 --charset=utf8 --recursion-method=NONE --alter-foreign-keys-method=auto --max-load Threads_running=80 --critical-load=Threads_running=0 --set-vars lock_wait_timeout=5 --print --pause-file=/tmp/partition_osc_pause_db1_tb000 --execute" + command := fmt.Sprintf("%s/%s %s", cst.DBAToolkitPath, "percona-toolkit-3.5.0", sql) + err := e.ExcuteCommand(command) + if err != nil { + t.Fatal("初始化分区失败!", err.Error()) + } + t.Log("初始化分区成功!") +} + +func TestExcuteCommand(t *testing.T) { + e := mysqlutil.ExecuteSqlAtLocal{ + ErrFile: "/data/install/partition/commandTest.err", + } + command := "df -h" + err := e.ExcuteCommand(command) + if err != nil { + t.Fatal("命令执行失败!", err.Error()) + } + t.Log("命令执行成功!") +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlutil.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlutil.go new file mode 100644 index 0000000000..8e5a9dcdcb --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/mysqlutil.go @@ -0,0 +1,2 @@ +// Package mysqlutil TODO +package mysqlutil diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/sql_builder.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/sql_builder.go new file mode 100644 index 0000000000..189089287f --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil/sql_builder.go @@ -0,0 +1,67 @@ +package mysqlutil + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// UnsafeIn UnsafeBuilderStringIn ignore error +func UnsafeIn(in []string, quote string) string { + inStr, _ := UnsafeBuilderStringIn(in, quote) + return inStr +} + +// UnsafeEqual UnsafeBuilderStringEqual ignore error +func UnsafeEqual(val string, quote string) string { + inStr, _ := UnsafeBuilderStringEqual(val, quote) + return inStr +} + +// UnsafeBuilderStringIn godoc +// 将 string list 转成 in (?) 格式 +// ['a', 'b'] to string 'a', 'b'. quote: '或者" +// 限制: 值不能包含特定字符, 输入只能是[]string, []int +func UnsafeBuilderStringIn(in []string, quote string) (string, error) { + if len(in) == 0 { + return "", nil + } + newIn := make([]string, len(in)) + unSafeRunes := "\"\\'`,;()" + unSafeStrings := regexp.MustCompile("(?i)sleep|delimiter|call") + for i, val := range in { + if strings.ContainsAny(val, unSafeRunes) || unSafeStrings.MatchString(val) { + return "", fmt.Errorf("unsafe value %s", val) + } + newIn[i] = quote + val + quote + } + return strings.Join(newIn, ","), nil +} + +// UnsafeBuilderIntIn godoc +func UnsafeBuilderIntIn(in []int, quote string) string { + if len(in) == 0 { + return "" + } + newIn := make([]string, len(in)) + for i, val := range in { + valStr := strconv.Itoa(val) + newIn[i] = valStr + } + return strings.Join(newIn, ",") +} + +// UnsafeBuilderStringEqual godoc +// convert a to 'a' +func UnsafeBuilderStringEqual(val, quote string) (string, error) { + if val == "" { + return quote + quote, nil // "''" + } + unSafeRunes := "\"\\'`,;()" + unSafeStrings := regexp.MustCompile("(?i)sleep|delimiter|call") + if strings.ContainsAny(val, unSafeRunes) || unSafeStrings.MatchString(val) { + return "", fmt.Errorf("unsafe value %s", val) + } + return quote + val + quote, nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec.go new file mode 100644 index 0000000000..73c1ab3f4c --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec.go @@ -0,0 +1,169 @@ +package osutil + +import ( + "bytes" + "dbm-services/common/go-pubpkg/logger" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/pkg/errors" +) + +// FileOutputCmd 封装exec.Command,用于执行命令并输出到文件的场景,支持自动将输出文件上传到文件服务器(尽可能上传,如果上传失败则返回原文件) +type FileOutputCmd struct { + exec.Cmd + StdOutFile string + StdErrFile string + + stdOutFile *os.File + stdErrFile *os.File + stdOutDownloadLink string + stdErrDownloadLink string +} + +// GetStdOutDownloadLink TODO +func (c *FileOutputCmd) GetStdOutDownloadLink() string { + return c.stdOutDownloadLink +} + +// GetStdErrDownloadLink TODO +func (c *FileOutputCmd) GetStdErrDownloadLink() string { + return c.stdErrDownloadLink +} + +func (c *FileOutputCmd) initOutputFile() error { + if c.StdErrFile == "" { + c.StdErrFile = c.StdOutFile + } + if c.StdOutFile != "" { + stdOutFile, err := os.OpenFile(c.StdOutFile, os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return errors.Wrapf(err, "open std out log file %s failed", c.StdOutFile) + } + c.stdOutFile = stdOutFile + c.Cmd.Stdout = stdOutFile + } + + if c.StdOutFile == c.StdErrFile { + c.stdErrFile = nil + c.Cmd.Stderr = c.stdOutFile + return nil + } + + if c.StdErrFile != "" { + stdErrFile, err := os.OpenFile(c.StdErrFile, os.O_CREATE|os.O_WRONLY, os.ModePerm) + if err != nil { + return errors.Wrapf(err, "open std err log file %s failed", c.StdErrFile) + } + c.stdErrFile = stdErrFile + c.Cmd.Stderr = stdErrFile + } + return nil +} + +func (c *FileOutputCmd) closeOutputFile() { + if c.stdOutFile != nil { + if err := c.stdOutFile.Close(); err != nil { + logger.Warn("close %s failed, err:%s", c.StdOutFile, err.Error()) + } + } + if c.stdErrFile != nil { + if err := c.stdErrFile.Close(); err != nil { + logger.Warn("close %s failed, err:%s", c.StdErrFile, err.Error()) + } + } + // UploadPath? + return +} + +// Run TODO +func (c *FileOutputCmd) Run() error { + if err := c.initOutputFile(); err != nil { + return err + } + + defer func() { + c.closeOutputFile() + }() + + return c.Cmd.Run() +} + +// Start TODO +func (c *FileOutputCmd) Start() error { + if err := c.initOutputFile(); err != nil { + return err + } + + return c.Cmd.Start() +} + +// Wait TODO +func (c *FileOutputCmd) Wait() error { + defer func() { + c.closeOutputFile() + }() + + return c.Cmd.Wait() +} + +// RunInBG TODO +func RunInBG(isSudo bool, param string) (pid int, err error) { + if isSudo { + param = "sudo " + param + } + cmd := exec.Command("bash", "-c", param) + err = cmd.Start() + if err != nil { + return -1, err + } + return cmd.Process.Pid, nil +} + +// ExecShellCommand 执行 shell 命令 +// 如果有 err, 返回 stderr; 如果没有 err 返回的是 stdout +// 后续尽量不要用这个方法,因为通过标准错误来判断有点不靠谱 +func ExecShellCommand(isSudo bool, param string) (stdoutStr string, err error) { + if isSudo { + param = "sudo " + param + } + cmd := exec.Command("bash", "-c", param) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + if err != nil { + // return stderr.String(), err + return stderr.String(), errors.WithMessage(err, stderr.String()) + } + + if len(stderr.String()) > 0 { + err = fmt.Errorf("execute shell command(%s) error:%s", param, stderr.String()) + return stderr.String(), err + } + + return stdout.String(), nil +} + +// CleanExecShellOutput TODO +func CleanExecShellOutput(s string) string { + return strings.ReplaceAll(strings.TrimSpace(s), "\n", "") +} + +// StandardShellCommand TODO +func StandardShellCommand(isSudo bool, param string) (stdoutStr string, err error) { + var stdout, stderr bytes.Buffer + if isSudo { + param = "sudo " + param + } + cmd := exec.Command("bash", "-c", param) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + if err != nil { + return stderr.String(), errors.WithMessage(err, stderr.String()) + } + return stdout.String(), nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec_test.go new file mode 100644 index 0000000000..5bd50172cd --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/cmdexec_test.go @@ -0,0 +1,15 @@ +package osutil_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "testing" +) + +func TestExecShellCommand(t *testing.T) { + t.Log("start..") + out, err := osutil.StandardShellCommand(false, "usermod -d /home/mysql mysql") + if err != nil { + t.Fatal(err) + } + t.Log(out) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab.go new file mode 100644 index 0000000000..85fc8b0164 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab.go @@ -0,0 +1,246 @@ +/* + * @Description: 主机 crontab 操作 + */ + +package osutil + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "fmt" + "math/rand" + "os" + "regexp" + "strings" + "time" +) + +// CrontabLockFile TODO +const CrontabLockFile = "/home/mysql/.crontab_lock" + +// RemoveUserCrontab TODO +func RemoveUserCrontab(user string) error { + cmd := fmt.Sprintf("crontab -u %s -r ", user) + output, err := ExecShellCommand(false, cmd) + if err != nil { + logger.Info("%s. %s", output, err.Error()) + return err + } + return nil +} + +// GetStatusCrontab TODO +func GetStatusCrontab(user string) string { + newCrontab := make([]string, 0) + newCrontab = append( + newCrontab, fmt.Sprintf( + "#get_status.pl: mysql monitor and report status to tnm, distribute at %s by %s", + time.Now().Format(cst.TIMELAYOUT), + user, + ), + ) + newCrontab = append(newCrontab, "*/5 * * * * /home/mysql/monitor/get_status.pl 1>/dev/null 2>&1 \n") + return strings.Join(newCrontab, "\n") +} + +// RemoveSystemCrontab TODO +/** + * @description: 删除 mysql 用户下的crontab任务 + * @receiver {string} removeKey + * @return {*} + */ +func RemoveSystemCrontab(removeKey string) (err error) { + var ( + crontabs = make([]string, 0) + output string + crontabList string + ) + + output, err = ListCrontb("mysql") + if err != nil { + return err + } + + formerCrontab := strings.Split(output, "\n") + logger.Info("formerCrontab:%#v \n len(formerCrontab):%d", formerCrontab, len(formerCrontab)) + for _, crontab := range formerCrontab { + if regexp.MustCompile(`^\\s*$`).MatchString(crontab) || strings.Contains(crontab, `#.*DO NOT EDIT THIS FILE`) || + strings.Contains(crontab, "#.*cron installed") || strings.Contains(crontab, "#.*Cron version") || + strings.Contains(crontab, "#.*installed on") || strings.Contains(crontab, removeKey) { + continue + } + crontabs = append(crontabs, crontab) + } + // return crontabs, nil + crontabStr := strings.Join(crontabs, "\n") + err = ExecCrontab(crontabStr) + if err != nil { + return err + } + result, err := IsCrontabKeyExist(removeKey) + if err != nil { + return err + } + if result { + err = fmt.Errorf("remove %s failed ,pls execute %s to check it mannually", removeKey, crontabList) + return err + } + return nil +} + +// ListCrontb TODO +/** + * @description: 查看user下的crontab + * @receiver {string} user + * @return {*} + */ +func ListCrontb(user string) (output string, err error) { + crontabList := fmt.Sprintf("crontab -u %s -l|egrep -v ^$ || true", user) + // "crontab -u " + user + " -l" + output, err = ExecShellCommand(false, crontabList) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w,%s", crontabList, err, output) + if strings.Contains(output, "no crontab for") { + return "", nil + } else { + return "", err + } + } + return output, err +} + +// AddCrontab TODO +/** + * @description: 追加添加crontab + * @receiver {string} crontab 表达式 + * @return {*} + */ +func AddCrontab(crontab string) error { + output, err := ListCrontb("mysql") + if err != nil { + return err + } + + crontab = output + "\n" + crontab + err = ExecCrontab(crontab) + if err != nil { + return err + } + return nil +} + +// ExecCrontab TODO +/** + * @description: 添加crontab + * @receiver {string} crontab 表达式 + * @return {*} + */ +func ExecCrontab(crontab string) error { + cmd := fmt.Sprintf("echo -e '%s' | crontab - -u mysql", crontab) + output, err := ExecShellCommand(false, cmd) + if err != nil { + logger.Info("%s. %s", output, err.Error()) + return err + } + return nil +} + +// IsCrontabKeyExist TODO +/** + * @description: grep crontab + * @receiver {string} key + * @return {*} + */ +func IsCrontabKeyExist(key string) (bool, error) { + var ( + output string + err error + ) + output, err = ListCrontb("mysql") + if err != nil { + return false, err + } + if strings.Contains(output, key) { + return true, nil + } + return false, nil +} + +// CrontabsExist 检查存在哪些Crontab +// +// @receiver crontabKeys +// @return existCrontabs +// @return err +func CrontabsExist(crontabKeys []string) (existCrontabs []string, err error) { + output, err := ListCrontb("mysql") + if err != nil { + return nil, err + } + for _, key := range crontabKeys { + if strings.Contains(output, key) { + existCrontabs = append(existCrontabs, key) + } + } + return +} + +// CleanLocalCrontab 通过 导出 -> grep -v -> 导入 的方式实现清理crontab任务 +func CleanLocalCrontab() error { + var ( + randnum = rand.Intn(10000) + tmpCronFile = fmt.Sprintf("/tmp/cron_%s_%d.crd", time.Now().Format(cst.TIMELAYOUTSEQ), randnum) + getStatusPL = "/home/mysql/monitor/get_status.pl" + dbBackupOld = "/home/mysql/dbbackup/dbbackup.sh" + dbBackupNew = "/home/mysql/dbbackup/dbbackup.pl" + dbBackupMulti = "/home/mysql/dbbackup/dbbackup_main.sh" + dbBackupMultiGo = "/home/mysql/dbbackup-go/dbbackup_main.sh" + dbBackupXtrabackup = "/home/mysql/dbbackup/xtrabackup/xtrabackup_main.sh" + rotateLog = "/home/mysql/rotate_logbin/rotate_logbin.pl" + rotateLogGo = "/home/mysql/rotate_binlog/rotatebinlog" + proxyStatus = "/home/mysql/proxy_monitor/get_proxy_status.pl" + slaveSync = "/home/mysql/monitor/master_slave_sync_check.pl" + tbinlodumperStatus = "tbinlogdumper_status.pl" + prometheus = "prometheus" + ) + cleanCrontabs := []string{ + getStatusPL, dbBackupOld, dbBackupNew, dbBackupMulti, dbBackupXtrabackup, rotateLog, + proxyStatus, slaveSync, tbinlodumperStatus, prometheus, rotateLogGo, dbBackupMultiGo, + } + + existCrontabs, err := CrontabsExist(cleanCrontabs) + if err != nil { + return err + } + // 如果不存在需要清理的crontab 直接返回成功 + if len(existCrontabs) <= 0 { + return nil + } + logger.Info("还存在的Crontabs %v", existCrontabs) + // 导出mysql用户的crontab任务,并过滤掉要清理的任务 + shellCMD := fmt.Sprintf("/usr/bin/crontab -u mysql -l") + for _, v := range existCrontabs { + shellCMD += fmt.Sprintf("|grep -v %s", v) + } + shellCMD += fmt.Sprintf(">%s 2>&1", tmpCronFile) + output, err := ExecShellCommand(false, shellCMD) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%s,%s", shellCMD, output, err.Error()) + logger.Warn(err.Error()) + // grep 没有找到结果也认为是失败的,这个地方不能当做错误返回。 + } + // 重新导入crontab文件 + shellCMD = fmt.Sprintf("/usr/bin/crontab -u mysql %s 2>&1", tmpCronFile) + output, err = ExecShellCommand(false, shellCMD) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%s", shellCMD, output) + logger.Error(err.Error()) + return err + } + // crontab延时1分钟,所以直接删掉监控执行文件,保证监控执行不成功,就不会有告警了 + if err := os.RemoveAll(getStatusPL); err != nil { + err = fmt.Errorf("rm %s failed, err:%s", getStatusPL, err.Error()) + logger.Error(err.Error()) + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab_test.go new file mode 100644 index 0000000000..5bd2eea7da --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/crontab_test.go @@ -0,0 +1,6 @@ +package osutil + +import "testing" + +func TestAddCrontab(t *testing.T) { +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/mountpoint.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/mountpoint.go new file mode 100644 index 0000000000..781909cda7 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/mountpoint.go @@ -0,0 +1,72 @@ +package osutil + +import ( + "strconv" + "strings" +) + +// IsDataDirOk TODO +func IsDataDirOk(filepath string) bool { + mountPaths := GetMountPathInfo() + if m, ok := mountPaths[filepath]; ok { + // 如果 /data 在根分区,并且根分区大于 60G,通过 + if m.AvailSizeMB > 6144 { + return true + } else { + // not large enough + return false + } + } + // no mount point found + return false +} + +// MountPath TODO +type MountPath struct { + Filesystem string + FileSystemType string + TotalSizeMB int64 + UsedSizeMB int64 + AvailSizeMB int64 + UsePct int + Path string +} + +// ParseDfOutput TODO +func ParseDfOutput(rawOutput string) map[string]*MountPath { + mountPaths := make(map[string]*MountPath) + lines := strings.Split(rawOutput, "\n") + for i, line := range lines { + // skip headers + if i == 0 { + continue + } + + fields := strings.Fields(line) + if len(fields) == 0 || len(fields) != 7 { + continue + } + mountPath := &MountPath{ + Path: fields[5], + Filesystem: fields[0], + } + mountPath.FileSystemType = fields[1] + mountPath.TotalSizeMB, _ = strconv.ParseInt(fields[2], 10, 64) + mountPath.UsedSizeMB, _ = strconv.ParseInt(fields[3], 10, 64) + mountPath.AvailSizeMB, _ = strconv.ParseInt(fields[4], 10, 64) + mountPath.UsePct, _ = strconv.Atoi(strings.TrimSuffix(fields[5], "%")) + + mountPaths[fields[6]] = mountPath + } + return mountPaths +} + +// GetMountPathInfo TODO +func GetMountPathInfo() map[string]*MountPath { + cmdDfm, err := ExecShellCommand(false, "df -Thm") + mountPaths := ParseDfOutput(cmdDfm) + if err != nil { + return nil + } + return mountPaths +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/netutil.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/netutil.go new file mode 100644 index 0000000000..7587cd442e --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/netutil.go @@ -0,0 +1,57 @@ +package osutil + +import ( + "net" + "strconv" + "time" + + "github.com/pkg/errors" +) + +// IsPortUp 判断端口是否开启监听 +func IsPortUp(host string, ports ...int) bool { + for _, port := range ports { + timeout := time.Second + hostPort := net.JoinHostPort(host, strconv.Itoa(port)) + conn, err := net.DialTimeout("tcp", hostPort, timeout) + if err != nil { + // fmt.Println("Connecting error:", err) + return false + } + if conn != nil { + defer conn.Close() + return true + // fmt.Println("Opened", net.JoinHostPort(host, port)) + } + } + return false +} + +// GetLocalIPAddrs TODO +func GetLocalIPAddrs() ([]string, error) { + ifaces, err := net.Interfaces() + if err != nil { + return nil, errors.Wrap(err, "get local ipaddrs") + } + ipAddrs := []string{} + for _, i := range ifaces { + addrs, err := i.Addrs() + if err != nil { + return nil, errors.Wrap(err, "get local ipaddr") + } + for _, addr := range addrs { + var ip net.IP + switch v := addr.(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + ipAddrs = append(ipAddrs, ip.String()) + } + } + if len(ipAddrs) == 0 { + return nil, errors.New("failed to get any local ipaddr") + } + return ipAddrs, nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil.go new file mode 100644 index 0000000000..54d20f3da2 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil.go @@ -0,0 +1,670 @@ +// Package osutil TODO +package osutil + +import ( + "bufio" + "bytes" + "dbm-services/common/go-pubpkg/logger" + "fmt" + "io" + "math" + "math/rand" + "net" + "os" + "os/exec" + "os/user" + "path" + "path/filepath" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "unicode" + + "github.com/dustin/go-humanize" + "github.com/pkg/errors" +) + +// Ucfirst TODO +func Ucfirst(str string) string { + for i, v := range str { + return string(unicode.ToUpper(v)) + str[i+1:] + } + return "" +} + +// HasElem TODO +func HasElem(elem interface{}, slice interface{}) bool { + defer func() { + if err := recover(); err != nil { + logger.Error("HasElem error %s", err) + } + }() + arrV := reflect.ValueOf(slice) + if arrV.Kind() == reflect.Slice || arrV.Kind() == reflect.Array { + for i := 0; i < arrV.Len(); i++ { + // XXX - panics if slice element points to an unexported struct field + // see https://golang.org/pkg/reflect/#Value.Interface + if reflect.DeepEqual(arrV.Index(i).Interface(), elem) { + return true + } + } + } + return false +} + +// 描述: +// 把任何类型的值转换成字符串类型 +// 目前暂时支持的类型为:string,int,int64,float64,bool + +// ChangeValueToString TODO +func ChangeValueToString(value interface{}) (string, error) { + var result string + if item, ok := value.(string); ok { + result = item + } else if item1, ok := value.(int); ok { + result = strconv.Itoa(item1) + } else if item2, ok := value.(int64); ok { + result = strconv.FormatInt(item2, 10) + } else if item3, ok := value.(float64); ok { + result = strconv.FormatFloat(item3, 'f', -1, 64) + } else if item4, ok := value.(bool); ok { + result = strconv.FormatBool(item4) + } else { + return result, errors.New("[ChangeValueToString]value type unknow,not in (string,int,int64,float64,bool)") + } + return result, nil +} + +// GetLocalIP 获得本地IP +func GetLocalIP() (string, error) { + var localIP string + var err error + addrs, err := net.InterfaceAddrs() + if err != nil { + return localIP, err + } + for _, addr := range addrs { + if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + localIP = ipnet.IP.String() + return localIP, nil + } + } + } + err = fmt.Errorf("can't find local ip") + return localIP, err +} + +// StringToMap 字符串 TO map +// 如db1,,db2,db3,db2 ,等去重并转换成db1,db2,db3 +func StringToMap(srcStr string, seq string) map[string]struct{} { + splitReg := regexp.MustCompile(seq) + strList := splitReg.Split(srcStr, -1) + strMap := make(map[string]struct{}) + for _, str := range strList { + if len(strings.TrimSpace(str)) == 0 { + continue + } + strMap[strings.TrimSpace(str)] = struct{}{} + } + return strMap +} + +// StrSliceToMap 字符串slice to map,目标是去重 +func StrSliceToMap(srcStrSlice []string) map[string]struct{} { + strMap := make(map[string]struct{}) + for _, str := range srcStrSlice { + if len(strings.TrimSpace(str)) == 0 { + continue + } + strMap[strings.TrimSpace(str)] = struct{}{} + } + return strMap +} + +// MapKeysToSlice TODO +func MapKeysToSlice(mapObj map[string]struct{}) []string { + keys := make([]string, len(mapObj)) + + i := 0 + for k := range mapObj { + keys[i] = k + i++ + } + return keys +} + +// IntnRange TODO +func IntnRange(min, max int) int { + rand.Seed(time.Now().Unix()) + return rand.Intn(max-min) + min +} + +// GetFileModifyTime TODO +func GetFileModifyTime(filename string) (bool, int64) { + if _, err := os.Stat(filename); !os.IsNotExist(err) { + f, err1 := os.Open(filename) + if err1 != nil { + return true, 0 + } + fi, err2 := f.Stat() + if err2 != nil { + return true, 0 + } + return true, fi.ModTime().Unix() + } + return false, 0 +} + +// GetMySQLBaseDir TODO +func GetMySQLBaseDir(grepstr string) (string, error) { + strCmd := fmt.Sprintf(`ps -ef | grep 'mysqld '|grep basedir | grep %s| grep -v grep`, grepstr) + data, err := ExecShellCommand(false, strCmd) + reg := regexp.MustCompile(`--basedir=[/A-Za-z_]*`) + tmparr := reg.FindAllString(data, -1) + if len(tmparr) != 1 { + return "", errors.New("get basedir unexpected") + } + basedir := strings.Split(strings.TrimSpace(tmparr[0]), "=") + if len(basedir) != 2 || strings.TrimSpace(basedir[1]) == "" { + return "", fmt.Errorf("get base dir error:%v", basedir) + } + return strings.TrimSpace(basedir[1]), err +} + +// GetMySQLBinDir TODO +func GetMySQLBinDir(getstr string) (string, error) { + basedir, err := GetMySQLBaseDir(getstr) + if err != nil { + return "", err + } + if !strings.HasPrefix(basedir, "/") { + return "", fmt.Errorf("basedir must start at /") + } + return strings.TrimRight(basedir, "/") + "/bin", nil +} + +// MakeSoftLink src and dest are absolute path with filename +func MakeSoftLink(src string, dest string, force bool) error { + if !FileExist(src) { + return errors.Errorf("src file does not exists: %s", src) + } + if src == dest { + return nil + } + if FileExist(dest) { + if !force { + return errors.Errorf("dest file exists: %s", dest) + } + if err := os.Remove(dest); err != nil { + logger.Warn("remove file %s failed, err:%s", dest, err.Error()) + } + } + // os.Symlink(src, dest) + cmd := exec.Command("ln", "-s", src, dest) + out, err := cmd.CombinedOutput() + if err != nil { + logger.Error("ln -s failed, output:%s, err:%s", string(out), err.Error()) + } + return err +} + +// MakeHardLink TODO +func MakeHardLink(src string, dest string) error { + if !FileExist(src) { + return errors.New("src file does not exists") + } else if FileExist(dest) { + return errors.New("dest file already exists") + } + if err := os.Link(src, dest); err != nil { + return err + } + return nil +} + +// CheckFileExistWithPath TODO +func CheckFileExistWithPath(filename, dirname string) bool { + var destFile string + if strings.HasPrefix(filename, "/") { + destFile = filename + } else { + destFile = fmt.Sprintf(`%s/%s`, dirname, filename) // app_149/ulog/xxxx.ulog + } + + if _, err := os.Stat(destFile); err != nil { + if os.IsNotExist(err) { + return false + } + return false + } + return true +} + +// CheckAndMkdir mkdir ppathname/pathname +func CheckAndMkdir(pathname, ppathname string) error { + if !CheckFileExistWithPath(pathname, ppathname) { + return os.MkdirAll(ppathname+"/"+pathname, 0755) + } + return nil +} + +// ParsePsOutput TODO +// for ps command, output should skip first line, which +// refer to cmd string itself(catch by ps after bash -c) +func ParsePsOutput(rawOutput string) string { + var output []string + lines := strings.Split(rawOutput, "\n") + for i, line := range lines { + // skip headers + if i == 0 { + continue + } + + fields := strings.Fields(line) + if len(fields) == 0 { + continue + } + output = append(output, fields[0]) + } + return strings.Join(output, "\n") +} + +// FileExist TODO +func FileExist(fileName string) bool { + _, err := os.Stat(fileName) + if err != nil { + if os.IsExist(err) { + return true + } + return false + } + return true +} + +// GetFileMd5 TODO +func GetFileMd5(file string) (string, error) { + cmd := "md5sum " + file + data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() + if err != nil { + return "", err + } + reg, err := regexp.Compile(`\s+`) + if err != nil { + return "", err + } + array := reg.Split(string(data), -1) + if len(array) != 3 { + return "", errors.New("data result len wrong ,not 3,is " + strconv.Itoa(len(array))) + } + return array[0], nil +} + +// GetLinuxDisksInfo TODO +func GetLinuxDisksInfo() ([]DiskInfo, error) { + var res []DiskInfo + cmd := "df -l|grep -vE 'Filesystem|overlay|tmpfs'" + data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() + if err != nil { + return res, err + } + reg, err := regexp.Compile(`\n+`) + if err != nil { + return res, err + } + strs := reg.Split(string(data), -1) + + for _, row := range strs { + if strings.TrimSpace(row) == "" { + continue + } + result := DiskInfo{} + reg, err := regexp.Compile(`\s+`) + if err != nil { + return res, err + } + array := reg.Split(row, -1) + if len(array) == 6 { + result.Filesystem = array[0] + result.Blocks_1K = array[1] + result.Used, err = strconv.Atoi(array[2]) + if err != nil { + return res, err + } + result.Available, err = strconv.ParseInt(array[3], 10, 64) + if err != nil { + return res, err + } + result.UsedRate = array[4] + result.MountedOn = array[5] + + res = append(res, result) + } else { + return res, errors.New("data result len wrong ,not 6,is " + strconv.Itoa(len(array))) + } + } + + return res, nil +} + +// GetCurrentUser TODO +func GetCurrentUser() (string, error) { + var currentUser = "" + cmd := `whoami` + data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() + if err != nil { + return currentUser, fmt.Errorf(err.Error() + ",cmd:" + cmd) + } + reg, err := regexp.Compile(`\n+`) + if err != nil { + return currentUser, err + } + array := reg.Split(string(data), -1) + if len(array) == 2 { + currentUser = array[0] + } else { + return currentUser, fmt.Errorf("get currentUser fail,len not 2,array:%s", strings.Join(array, ";")) + } + + return currentUser, nil +} + +// GetLinuxDirDiskInfo TODO +func GetLinuxDirDiskInfo(dir string) (DiskInfo, error) { + result := DiskInfo{} + cmd := fmt.Sprintf("df -l %s|grep -v Filesystem", dir) + data, err := exec.Command("/bin/bash", "-c", cmd).CombinedOutput() + if err != nil { + return result, err + } + reg, err := regexp.Compile(`\s+`) + if err != nil { + return result, err + } + array := reg.Split(string(data), -1) + if len(array) == 7 { + result.Filesystem = array[0] + result.Blocks_1K = array[1] + result.Used, err = strconv.Atoi(array[2]) + if err != nil { + return result, err + } + result.Available, err = strconv.ParseInt(array[3], 10, 64) + if err != nil { + return result, err + } + result.UsedRate = array[4] + result.MountedOn = array[5] + } else { + return result, errors.New("data result len wrong ,not 7,is " + strconv.Itoa(len(array))) + } + + return result, nil +} + +// DiskInfo TODO +type DiskInfo struct { + Filesystem string `json:"filesystem"` + Blocks_1K string `json:"blocks_1K"` + Used int `json:"used"` + Available int64 `json:"available"` + UsedRate string `json:"usedRate"` + MountedOn string `json:"MountedOn"` +} + +// SplitName 切分用户传过来的IP字符串列表等 +// 切分规则: +// 把\r+|\s+|;+|\n+|,+这些分隔符,转成字符串数组 +// 返回字符串数组 +func SplitName(input string) ([]string, error) { + if reg, err := regexp.Compile(`\r+|\s+|;+|\n+`); err != nil { + return nil, err + } else { + input = reg.ReplaceAllString(input, ",") + } + if reg, err := regexp.Compile(`^,+|,+$`); err != nil { + return nil, err + } else { + input = reg.ReplaceAllString(input, "") + } + if reg, err := regexp.Compile(`,+`); err != nil { + return nil, err + } else { + input = reg.ReplaceAllString(input, ",") + } + result := strings.Split(input, ",") + return result, nil +} + +// Uniq 对字符串数组进行去重 +func Uniq(input []string) []string { + var newData []string + if len(input) > 0 { + temp := map[string]bool{} + for _, value := range input { + temp[value] = true + } + for k := range temp { + newData = append(newData, k) + } + } + return newData +} + +// GetUidGid TODO +func GetUidGid(osuser string) (int, int, error) { + group, err := user.Lookup(osuser) + if err != nil { + logger.Info("Failed to lookup user %s", osuser) + return 0, 0, err + } + + uid, err := strconv.Atoi(group.Uid) + if err != nil { + logger.Info("Convert Uid for %s : `%s` failed", osuser, group.Uid) + return 0, 0, err + } + + gid, err := strconv.Atoi(group.Gid) + if err != nil { + logger.Info("Convert Gid for %s : `%s` failed", osuser, group.Gid) + return 0, 0, err + } + + return uid, gid, err +} + +// FileLineCounter 计算文件行数 +// 参考: https://stackoverflow.com/questions/24562942/golang-how-do-i-determine-the-number-of-lines-in-a-file-efficiently +func FileLineCounter(filename string) (lineCnt uint64, err error) { + _, err = os.Stat(filename) + if err != nil && os.IsNotExist(err) { + return 0, fmt.Errorf("file:%s not exists", filename) + } + file, err := os.Open(filename) + if err != nil { + return 0, fmt.Errorf("file:%s open fail,err:%w", filename, err) + } + defer func() { + if err := file.Close(); err != nil { + logger.Warn("close file %s failed, err:%s", filename, err.Error()) + } + }() + reader01 := bufio.NewReader(file) + buf := make([]byte, 32*1024) + lineCnt = 0 + lineSep := []byte{'\n'} + + for { + c, err := reader01.Read(buf) + lineCnt += uint64(bytes.Count(buf[:c], lineSep)) + + switch { + case err == io.EOF: + return lineCnt, nil + + case err != nil: + return lineCnt, fmt.Errorf("file:%s read fail,err:%w", filename, err) + } + } +} + +// WrapFileLink TODO +func WrapFileLink(link string) string { + name := filepath.Base(link) + return fmt.Sprintf(`%s`, link, name) +} + +// SetOSUserPassword run set user password by chpasswd +func SetOSUserPassword(user, password string) error { + exec.Command("/bin/bash", "-c", "") + cmd := exec.Command("chpasswd") + stdin, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("new pipe failed, err:%w", err) + } + go func() { + _, err := io.WriteString(stdin, fmt.Sprintf("%s:%s", user, password)) + if err != nil { + logger.Warn("write into pipe failed, err:%s", err.Error()) + } + if err := stdin.Close(); err != nil { + logger.Warn("colse stdin failed, err:%s", err.Error()) + } + }() + if output, err := cmd.CombinedOutput(); err != nil { + return fmt.Errorf("run chpasswd failed, output:%s, err:%w", string(output), err) + } + return nil +} + +// GetNumaStr TODO +func GetNumaStr() string { + numaCmd := "numactl --show | grep policy" + output, err := ExecShellCommand(false, numaCmd) + if err != nil { + logger.Error(err.Error()) + return "" + } + if len(output) > 0 { + return "numactl --interleave=all " + } + return "" +} + +// SafeRmDir TODO +func SafeRmDir(dir string) (err error) { + if strings.TrimSpace(dir) == "/" { + return fmt.Errorf("禁止删除系统根目录") + } + return os.RemoveAll(dir) +} + +func getFileSize(f string) (int64, error) { + fd, err := os.Stat(f) + if err != nil { + return 0, err + } + return fd.Size(), nil +} + +// CalcFileSizeIncr TODO +func CalcFileSizeIncr(f string, secs uint64) string { + var err error + var t1Size, t2Size int64 + if t1Size, err = getFileSize(f); err != nil { + return "0" + } + time.Sleep(time.Duration(secs) * time.Second) + if t2Size, err = getFileSize(f); err != nil { + return "0" + } + + bytesIncr := uint64(math.Abs(float64(t2Size-t1Size))) / secs + return humanize.Bytes(bytesIncr) +} + +// PrintFileSizeIncr 后台计算文件变化 +// ch 通知退出,外层需要 close(ch) +// 2 hour 超时 +func PrintFileSizeIncr( + f string, secs uint64, printInterval uint64, + output func(format string, args ...interface{}), ch chan int, +) { + for true { + speed := CalcFileSizeIncr(f, secs) + if speed != "0" { + output("file %s change speed %s", f, speed) + } else { + break + } + select { + case _, beforeClosed := <-ch: + if !beforeClosed { + return + } + case <-time.After(2 * time.Hour): + return + default: + time.Sleep(time.Duration(printInterval) * time.Second) + } + /* + ch <- 1 // 这里为了不阻塞,我们只关注外面的 close 信号 + if _, beforeClosed := <-ch; !beforeClosed { + return + } + */ + } +} + +// CapturingPassThroughWriter is a writer that remembers +// data written to it and passes it to w +type CapturingPassThroughWriter struct { + buf bytes.Buffer + w io.Writer +} + +// NewCapturingPassThroughWriter creates new CapturingPassThroughWriter +func NewCapturingPassThroughWriter(w io.Writer) *CapturingPassThroughWriter { + return &CapturingPassThroughWriter{ + w: w, + } +} + +// Write 用于常见IO +func (w *CapturingPassThroughWriter) Write(d []byte) (int, error) { + w.buf.Write(d) + return w.w.Write(d) +} + +// Bytes returns bytes written to the writer +func (w *CapturingPassThroughWriter) Bytes() []byte { + return w.buf.Bytes() +} + +// ReadFileString TODO +func ReadFileString(filename string) (string, error) { + if body, err := os.ReadFile(filename); err != nil { + return "", err + } else { + return string(body), nil + } +} + +// CreateSoftLink TODO +// sourceFile : 绝对路径 +// linkName: 觉得路径 +func CreateSoftLink(sourceFile string, linkFile string) (err error) { + if !(path.IsAbs(sourceFile) && path.IsAbs(linkFile)) { + return fmt.Errorf("源文件和目标链接文件传参必须是绝对路径") + } + // try del origin link file + if FileExist(linkFile) { + if err := os.Remove(linkFile); err != nil { + logger.Error("del %s failed", linkFile) + return err + } + } + return os.Symlink(sourceFile, linkFile) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil_test.go new file mode 100644 index 0000000000..a5ee0830c9 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/osutil_test.go @@ -0,0 +1,26 @@ +package osutil_test + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "testing" +) + +func TestIsFileExist(t *testing.T) { + f := "/tmp/1.txt" + d := "/tmp/asdad/" + exist_f := osutil.FileExist(f) + exist_d := osutil.FileExist(d) + t.Log("f exist", exist_f) + t.Log("d exist", exist_d) + return +} + +func TestCreateLink(t *testing.T) { + t.Log("start..") + err := osutil.CreateSoftLink("/data/mysql/3306/mysql.sock", "/tmp/mysql.sock") + if err != nil { + t.Log(err.Error()) + return + } + t.Log("end..") +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/sysctl.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/sysctl.go new file mode 100644 index 0000000000..67a5aef9fc --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/sysctl.go @@ -0,0 +1,59 @@ +// Package osutil TODO +/* + * @Author: your name + * @Date: 2022-04-21 15:07:16 + * @LastEditTime: 2022-04-21 15:07:16 + * @LastEditors: your name + * @Description: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE + * @FilePath: /bk-dbactuator/pkg/util/osutil/sysctl.go + */ +package osutil + +import ( + "dbm-services/common/go-pubpkg/logger" + "fmt" +) + +// ClearTcpRecycle TODO +// +// logger.Warn +func ClearTcpRecycle() error { + twRecycleCmd := "grep 'net.ipv4.tcp_tw_recycle=1' /etc/sysctl.conf" + result, err := ExecShellCommand(false, twRecycleCmd) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w", twRecycleCmd, err) + logger.Warn(err.Error()) + } + if len(result) > 0 { + insertTwRecycle := "sed -i -e 's/net.ipv4.tcp_tw_recycle=1/net.ipv4.tcp_tw_recycle=0/g' /etc/sysctl.conf" + _, err := ExecShellCommand(false, insertTwRecycle) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w", insertTwRecycle, err) + logger.Info(err.Error()) + return err + } + } + twReuseCmd := "grep 'net.ipv4.tcp_tw_reuse=1' /etc/sysctl.conf" + result, err = ExecShellCommand(false, twReuseCmd) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w", twReuseCmd, err) + logger.Warn(err.Error()) + } + if len(result) > 0 { + insertTwReuse := "sed -i -e 's/net.ipv4.tcp_tw_reuse=1/net.ipv4.tcp_tw_reuse=0/g' /etc/sysctl.conf" + _, err := ExecShellCommand(false, insertTwReuse) + if err != nil { + err = fmt.Errorf("execute [%s] get an error:%w", insertTwReuse, err) + logger.Info(err.Error()) + return err + } + } + + // Linux kernel 那边只有 t-linux2-0044 开始才支持上面的 2 个参数,下面执行报错的话,给出一个warning。 + _, err = ExecShellCommand(false, "/sbin/sysctl -p") + if err != nil { + err = fmt.Errorf("execute [/sbin/sysctl -p] get an error:%w", err) + logger.Warn(err.Error()) + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/unix_only.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/unix_only.go new file mode 100644 index 0000000000..074479411a --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/unix_only.go @@ -0,0 +1,241 @@ +//go:build !windows +// +build !windows + +package osutil + +import ( + "bytes" + "dbm-services/common/go-pubpkg/logger" + "fmt" + "math/rand" + "os" + "os/exec" + "strings" + "syscall" + "time" +) + +// IsMountPoint TODO +// Determine if a directory is a mountpoint, by comparing the device for the directory +// with the device for it's parent. If they are the same, it's not a mountpoint, if they're +// different, it is. +// reference: https://github.com/cnaize/kubernetes/blob/master/pkg/util/mount/mountpoint_unix.go#L29 +func IsMountPoint(file string) (bool, error) { + stat, err := os.Stat(file) + if err != nil { + return false, err + } + rootStat, err := os.Lstat(file + "/..") + if err != nil { + return false, err + } + // If the directory has the same device as parent, then it's not a mountpoint. + return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev, nil +} + +// FindFirstMountPoint find first mountpoint in prefer order +func FindFirstMountPoint(paths ...string) (string, error) { + for _, path := range paths { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + continue + } + } + isMountPoint, err := IsMountPoint(path) + if err != nil { + return "", fmt.Errorf("check whether mountpoint failed, path: %s, err: %w", path, err) + } + if isMountPoint { + return path, nil + } + } + return "", fmt.Errorf("no available mountpoint found, choices: %#v", paths) +} + +// FindFirstMountPointProxy TODO +func FindFirstMountPointProxy(paths ...string) (string, error) { + for _, path := range paths { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + continue + } + } + isMountPoint, err := IsMountPoint(path) + if err != nil { + return "", fmt.Errorf("check whether mountpoint failed, path: %s, err: %w", path, err) + } + if isMountPoint { + return path, nil + } else { + // 如果目录不是独立挂载点,获取它的父目录,判断父目录所在挂载点是否 > 80GB + // 使用 df -hm 来替代 + // /data1 -> [, data1] -> / + path = strings.TrimSuffix(path, "/") + path_slice := strings.Split(path, "/") + if len(path_slice) < 2 { + return "", fmt.Errorf("wrong patch %s", path) + } + path_slice = path_slice[:len(path_slice)-1] + parentPath := "" + if len(path_slice) == 1 { + parentPath = "/" + } else { + parentPath = strings.Join(path_slice, "/") + } + if IsDataDirOk(parentPath) { + return path, nil + } + } + } + return "", fmt.Errorf("no available mountpoint found, choices: %#v", paths) +} + +// RunShellCmdAsUser a simple wrapper of Cmd +// NOTE(wangqingping) len(strings.Join(args, " ")) cannot +// exceed MAX_ARG_STRLEN, checkout: +// https://www.linuxjournal.com/article/6060 +func RunShellCmdAsUser(args []string, osuser string) (string, error) { + cmd := exec.Command("bash", "-c", strings.Join(args, " ")) + var outbuff, errbuff bytes.Buffer + cmd.Stdout = &outbuff + cmd.Stderr = &errbuff + uid, gid, err := GetUidGid(osuser) + if err != nil { + return "", err + } + cmd.SysProcAttr = &syscall.SysProcAttr{} + cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)} + if err := cmd.Run(); err != nil { + logger.Info( + "Run command failed, cmd `%s` error %s, %s", + strings.Join(args, " "), errbuff.String(), err, + ) + return "", err + } else { + logger.Info("Run command `%s` successfully", strings.Join(args, " ")) + } + return outbuff.String(), nil +} + +// RunShellCmdNoWaitAsUser TODO +// starts the specified command but does not wait for it to complete. +func RunShellCmdNoWaitAsUser(args []string, osuser string) (string, error) { + cmd := exec.Command("bash", "-c", strings.Join(args, " ")) + var outbuff, errbuff bytes.Buffer + cmd.Stdout = &outbuff + cmd.Stderr = &errbuff + uid, gid, err := GetUidGid(osuser) + if err != nil { + return "", err + } + cmd.SysProcAttr = &syscall.SysProcAttr{} + cmd.SysProcAttr.Credential = &syscall.Credential{Uid: uint32(uid), Gid: uint32(gid)} + if err := cmd.Start(); err != nil { + logger.Info( + "Run command failed, cmd `%s` error %s, %s", + strings.Join(args, " "), errbuff.String(), err, + ) + return "", err + } else { + logger.Info("Run command `%s` successfully", strings.Join(args, " ")) + } + + return outbuff.String(), nil +} + +// Lock TODO +func (l *DirLock) Lock() error { + f, err := os.Open(l.dir) + if err != nil { + return err + } + l.f = f + err = syscall.Flock(int(l.f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) + if err != nil { + return fmt.Errorf("cannot flock directory %s - %w", l.dir, err) + } + return nil +} + +// Unlock TODO +func (l *DirLock) Unlock() error { + defer func() { + if err := l.f.Close(); err != nil { + logger.Warn("close lock file failed, err:%s", err.Error()) + } + }() + return syscall.Flock(int(l.f.Fd()), syscall.LOCK_UN) +} + +// GetDirLock TODO +/* + GetDirLock 获取 crontab lock. + set waitTime = 0 if you don't want to wait crontab lock +*/ +func GetDirLock(waitTime time.Duration, l *DirLock) error { + var ( + flockErr = make(chan error, 1) + timeoutChan = make(chan struct{}) + err error + ) + + if waitTime == 0 { + return l.Lock() + } + + go func() { + var deadline = time.Now().Add(waitTime) + for { + err := l.Lock() + if err == nil { + flockErr <- err + return + } + logger.Error("get file lock error:%s,continue to wait", err) + if time.Until(deadline) < 0 { + timeoutChan <- struct{}{} + return + } + time.Sleep(time.Duration(7+rand.Intn(7)) * time.Second) + } + }() + + select { + case err := <-flockErr: + return err + case <-timeoutChan: + err = fmt.Errorf("lock file(%s) timeout", l.GetDirName()) + return err + } +} + +// ReleaseDirLock TODO +func ReleaseDirLock(l *DirLock) error { + return l.Unlock() +} + +// DirLock TODO +// from https://github.com/nsqio/nsq/blob/master/internal/dirlock/dirlock.go +type DirLock struct { + dir string + f *os.File +} + +// NewDirLock TODO +func NewDirLock(dir string) *DirLock { + isExist := FileExist(dir) + if !isExist { + _, err := os.OpenFile(dir, os.O_RDWR|os.O_CREATE, 0755) + if err != nil { + logger.Warn("openFile(%s) error:%s", dir, err) + } + } + return &DirLock{ + dir: dir, + } +} + +// GetDirName TODO +func (l *DirLock) GetDirName() string { + return l.dir +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/windows_only.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/windows_only.go new file mode 100644 index 0000000000..3a8de4e525 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil/windows_only.go @@ -0,0 +1,16 @@ +//go:build windows +// +build windows + +package osutil + +// 这里只是为了能在 windows 编译成功,不一定可以使用 + +// FindFirstMountPoint find first mountpoint in prefer order +func FindFirstMountPoint(paths ...string) (string, error) { + return "/data", nil +} + +// FindFirstMountPointProxy TODO +func FindFirstMountPointProxy(paths ...string) (string, error) { + return "/data", nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy.go new file mode 100644 index 0000000000..045720cb88 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy.go @@ -0,0 +1,160 @@ +package proxyutil + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "path" + "regexp" + "strconv" + "strings" + "time" +) + +// StartProxyParam TODO +type StartProxyParam struct { + InstallPath string // /usr/local/mysql-proxy/bin/mysql-proxy + ProxyCnf string // "/etc/proxy.cnf." + fmt.Sprint(startPort) + Host string + Port int + ProxyUser string // check connect + ProxyPwd string // check connect +} + +// Start TODO +func (s StartProxyParam) Start() (err error) { + scmd := fmt.Sprintf( + "su - mysql -c \"cd %s && ./mysql-proxy --defaults-file=%s &>/dev/null &\"", + path.Join(s.InstallPath, "bin"), s.ProxyCnf, + ) + logger.Info("start mysql-proxy commands: [%s]", scmd) + if _, err = osutil.ExecShellCommand(false, scmd); err != nil { + return err + } + return util.Retry(util.RetryConfig{Times: 6, DelayTime: 5 * time.Second}, func() error { return s.checkStart() }) +} + +// checkStart 检查mysql proxy 是否启成功 +func (s StartProxyParam) checkStart() (err error) { + shellCmd := fmt.Sprintf("ps -efwww|grep 'mysql-proxy'|grep '%s'|grep -v grep", s.ProxyCnf) + out, err := osutil.ExecShellCommand(false, shellCmd) + if err != nil { + logger.Error("invoke shellCmd[%s] error:%s", shellCmd, err.Error()) + return err + } + if !strings.Contains(string(out), s.ProxyCnf) { + return fmt.Errorf("proxyStartCmd:%s not contain proxyCnf:[%s]", out, s.ProxyCnf) + } + // Test Conn ... + pc, err := native.NewDbWorkerNoPing(fmt.Sprintf("%s:%d", s.Host, s.Port), s.ProxyUser, s.ProxyPwd) + if err != nil { + return err + } + var ver string + if ver, err = pc.SelectVersion(); err != nil { + return err + } + logger.Info("Proxy Version %s", ver) + return +} + +// KillDownProxy 停止MySQL Proxy 实例,目前是kill的方式 +// +// @receiver port +// @return err +func KillDownProxy(port int) (err error) { + shellCMD := fmt.Sprintf("ps -ef | grep mysql-proxy |grep %s |grep -v grep|wc -l", util.GetProxyCnfName(port)) + output, err := osutil.ExecShellCommand(false, shellCMD) + if err != nil { + return err + } + if strings.Compare(output, "0") == 0 { + logger.Info("没有发现proxy进程~") + return nil + } + shellCMD = fmt.Sprintf( + "ps -ef | grep mysql-proxy | grep %s |grep -v grep |awk '{print $2}'", + util.GetProxyCnfName(port), + ) + output, err = osutil.ExecShellCommand(false, shellCMD) + if err != nil { + return fmt.Errorf("execute [%s] get an error:%w.output:%s", shellCMD, err, output) + } + tmpPids := strings.Split(output, "\n") + logger.Info("proxy output:%s, tmpPids:%s", output, tmpPids) + for _, pid := range tmpPids { + if pid != "" { + killCMD := fmt.Sprintf("kill -9 %s", pid) + logger.Info("kill proxy cmd %s", killCMD) + output, err = osutil.ExecShellCommand(false, killCMD) + if err != nil { + return fmt.Errorf("execute [%s] get an error:%w.output:%s", killCMD, err, output) + } + } + } + return nil +} + +// ProxyVersionParse proxy version 解析 +// +// @receiver proxyVersion +// @return uint64 +func ProxyVersionParse(proxyVersion string) uint64 { + re := regexp.MustCompile(`mysql-proxy ([\d]+).([\d]+).([\d]+).([\d]+)`) + result := re.FindStringSubmatch(proxyVersion) + // [mysql-proxy 0.8.2.4] + var ( + total uint64 + billion string + million string + thousand string + single string + // 0.8.2.4 => 0 * 1000000000 + 8 * 1000000 + 2*1000 + 4 + ) + if len(result) == 0 { + return 0 + } else if len(result) == 5 { + billion = result[1] + million = result[2] + thousand = result[3] + single = result[4] + if billion != "" { + b, err := strconv.ParseUint(billion, 10, 64) + if err != nil { + logger.Info(err.Error()) + b = 0 + } + total += b * 1000000000 + } + if million != "" { + b, err := strconv.ParseUint(million, 10, 64) + if err != nil { + logger.Info(err.Error()) + b = 0 + } + total += b * 1000000 + } + if thousand != "" { + t, err := strconv.ParseUint(thousand, 10, 64) + if err != nil { + logger.Info(err.Error()) + t = 0 + } + total += t * 1000 + } + if single != "" { + s, err := strconv.ParseUint(single, 10, 64) + if err != nil { + logger.Info(err.Error()) + s = 0 + } + total += s + } + } else { + // impossible condition,just for safe. + return 0 + } + return total +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf.go new file mode 100644 index 0000000000..27279545b4 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf.go @@ -0,0 +1,52 @@ +package proxyutil + +import ( + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + "fmt" + "reflect" +) + +// PROXY_SEC_NAME TODO +const PROXY_SEC_NAME = "mysql-proxy" + +// ProxyCnfObject TODO +type ProxyCnfObject struct { + MysqlProxy map[string]string `json:"mysql-proxy" sectag:"mysql-proxy"` +} + +// ReplaceProxyConfigs TODO +type ReplaceProxyConfigs struct { + AdminAddress string `keytag:"admin-address"` + AdminUserName string `keytag:"admin-username"` + AdminPassWord string `keytag:"admin-password"` + AdminLuaScript string `keytag:"admin-lua-script"` + AdminUsersFile string `keytag:"admin-users-file"` // 配置文件 + ProxyAddress string `keytag:"proxy-address"` + ProxyBackendAddress string `keytag:"proxy-backend-addresses"` + BaseDir string `keytag:"basedir"` + LogFile string `keytag:"log-file"` +} + +// NewProxyCnfObject TODO +func (c ProxyCnfObject) NewProxyCnfObject(proxycnffile string) (pf *util.CnfFile, err error) { + pf = util.NewEmptyCnfObj(proxycnffile) + for key, v := range c.MysqlProxy { + pf.RenderSection(PROXY_SEC_NAME, key, v, true) + } + return +} + +// ReplaceProxyConfigsObjects TODO +func ReplaceProxyConfigsObjects(f *util.CnfFile, c ReplaceProxyConfigs) error { + t := reflect.TypeOf(c) + v := reflect.ValueOf(c) + if t.Kind() != reflect.Struct { + return fmt.Errorf("proxycnf object reflect is not struct") + } + for i := 0; i < t.NumField(); i++ { + keyName := t.Field(i).Tag.Get(util.KeyTag) + val := v.Field(i).String() + f.ReplaceValue(PROXY_SEC_NAME, string(keyName), false, val) + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf_test.go new file mode 100644 index 0000000000..12fdce366d --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxy_cnf_test.go @@ -0,0 +1,35 @@ +package proxyutil + +import ( + "encoding/json" + "testing" +) + +func TestNewProxyCnf(t *testing.T) { + var c ProxyCnfObject + proxyConfigJson := `{ + "mysql-proxy": + { + "ignore-user": "MONITOR,proxy", + "conn_log": "true", + "keepalive": "true", + "daemon": "true", + "interactive_timeout": "86400", + "admin-username": "proxy" + } + }` + if err := json.Unmarshal([]byte(proxyConfigJson), &c); err != nil { + t.Fatalf("unmarshal failed %s", err.Error()) + return + } + nf, err := c.NewProxyCnfObject("proxy.cnf") + if err != nil { + t.Fatalf("NewProxyCnfObject failed %s", err.Error()) + return + } + nf.FileName = "proxy.cnf.10000" + if err := nf.SafeSaveFile(true); err != nil { + t.Fatalf("save file error %s", err.Error()) + return + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxyutil.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxyutil.go new file mode 100644 index 0000000000..cfeb20aa84 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/proxyutil/proxyutil.go @@ -0,0 +1,2 @@ +// Package proxyutil TODO +package proxyutil diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/init.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/init.go new file mode 100644 index 0000000000..be4739a431 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/init.go @@ -0,0 +1,77 @@ +package sftp + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + "fmt" + "log" + "os" +) + +// user:pass@host:port:/data/dbbak +// /data/dbbak + +// Download TODO +func Download(src Config, srcDir, dstDir string, fileName string, bwlimitMB int64) error { + remote, err := New(src) + if err != nil { + return err + } + defer remote.Close() + + srcFile := fmt.Sprintf(`%s/%s`, srcDir, fileName) + dstFile := fmt.Sprintf(`%s/%s`, dstDir, fileName) + if fileName == "" { + srcFile = srcDir + dstFile = dstDir + } + logger.Info("start download to %s", dstFile) + // Get remote file stats. + info, err := remote.Info(srcFile) + if err != nil { + return err + } + fmt.Printf("%+v\n", info) + + // Download remote file. + r, err := remote.Download(srcFile) + if err != nil { + return err + } + defer r.Close() + + // create local file + f, err := os.Create(dstFile) + if err != nil { + log.Fatal(err) + } + defer f.Close() + + done := make(chan int, 1) + defer close(done) + go func(chan int) { + osutil.PrintFileSizeIncr(dstFile, 1, 10, logger.Info, done) + /* + for true { + speed := osutil.CalcFileSizeIncr(dstFile, 1) + if speed != "0" { + logger.Info("file %s download current speed %s", dstFile, speed) + } else { + break + } + time.Sleep(10 * time.Second) + } + */ + }(done) + + // Read downloaded file. + // data, err := ioutil.ReadAll(file) + // fmt.Println(string(data)) + // _, err = io.Copy(f, ratelimit.Reader(r, srcBucket)) + _, err = cmutil.IOLimitRate(f, r, bwlimitMB) + if err != nil { + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp.go new file mode 100644 index 0000000000..7053da19f6 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp.go @@ -0,0 +1,203 @@ +// Package sftp TODO +package sftp + +import ( + "fmt" + "io" + "os" + "regexp" + "time" + + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" +) + +// Config represents SSH connection parameters. +type Config struct { + Username string + Password string + PrivateKey string + Server string + KeyExchanges []string + + Timeout time.Duration +} + +// Client provides basic functionality to interact with a SFTP server. +type Client struct { + config Config + sshClient *ssh.Client + sftpClient *sftp.Client +} + +// New initialises SSH and SFTP clients and returns Client type to use. +func New(config Config) (*Client, error) { + c := &Client{ + config: config, + } + + if err := c.connect(); err != nil { + return nil, err + } + + return c, nil +} + +// Create creates a remote/destination file for I/O. +func (c *Client) Create(filePath string) (io.ReadWriteCloser, error) { + if err := c.connect(); err != nil { + return nil, fmt.Errorf("connect: %w", err) + } + + return c.sftpClient.Create(filePath) +} + +// Upload writes local/source file data streams to remote/destination file. +func (c *Client) Upload(source io.Reader, destination io.Writer, size int) error { + if err := c.connect(); err != nil { + return fmt.Errorf("connect: %w", err) + } + + chunk := make([]byte, size) + + for { + num, err := source.Read(chunk) + if err == io.EOF { + tot, err := destination.Write(chunk[:num]) + if err != nil { + return err + } + + if tot != len(chunk[:num]) { + return fmt.Errorf("failed to write stream") + } + + return nil + } + + if err != nil { + return err + } + + tot, err := destination.Write(chunk[:num]) + if err != nil { + return err + } + + if tot != len(chunk[:num]) { + return fmt.Errorf("failed to write stream") + } + } +} + +// Download returns remote/destination file for reading. +func (c *Client) Download(filePath string) (io.ReadCloser, error) { + if err := c.connect(); err != nil { + return nil, fmt.Errorf("connect: %w", err) + } + + return c.sftpClient.Open(filePath) +} + +// Info gets the details of a file. If the file was not found, an error is returned. +func (c *Client) Info(filePath string) (os.FileInfo, error) { + if err := c.connect(); err != nil { + return nil, fmt.Errorf("connect: %w", err) + } + + info, err := c.sftpClient.Lstat(filePath) + if err != nil { + return nil, fmt.Errorf("file stats: %w", err) + } + + return info, nil +} + +// Close closes open connections. +func (c *Client) Close() { + if c.sftpClient != nil { + c.sftpClient.Close() + } + if c.sshClient != nil { + c.sshClient.Close() + } +} + +// GetAuthMethods TODO +func (c *Config) GetAuthMethods(password string) []ssh.AuthMethod { + auth := ssh.Password(password) + /* + if c.config.PrivateKey != "" { + signer, err := ssh.ParsePrivateKey([]byte(c.config.PrivateKey)) + if err != nil { + return fmt.Errorf("ssh parse private key: %w", err) + } + auth = ssh.PublicKeys(signer) + } + */ + keyboardInteractiveChallenge := func( + user, + instruction string, + questions []string, + echos []bool, + ) (answers []string, err error) { + if len(questions) == 0 { + return []string{}, nil + } + /* + for i, question := range questions { + log.Debug("SSH Question %d: %s", i+1, question) + } + */ + answers = make([]string, len(questions)) + for i := range questions { + yes, _ := regexp.MatchString("*yes*", questions[i]) + if yes { + answers[i] = "yes" + + } else { + answers[i] = password + } + } + return answers, nil + } + auth2 := ssh.KeyboardInteractive(keyboardInteractiveChallenge) + + methods := []ssh.AuthMethod{auth2, auth} + return methods +} + +// connect initialises a new SSH and SFTP client only if they were not +// initialised before at all and, they were initialised but the SSH +// connection was lost for any reason. +func (c *Client) connect() error { + if c.sshClient != nil { + _, _, err := c.sshClient.SendRequest("keepalive", false, nil) + if err == nil { + return nil + } + } + + cfg := &ssh.ClientConfig{ + User: c.config.Username, + Auth: c.config.GetAuthMethods(c.config.Password), + // HostKeyCallback: func(string, net.Addr, ssh.PublicKey) error { return nil }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + // HostKeyCallback: ssh.FixedHostKey(hostKey), + Timeout: c.config.Timeout, + } + + sshClient, err := ssh.Dial("tcp", c.config.Server, cfg) + if err != nil { + return fmt.Errorf("ssh dial: %w", err) + } + c.sshClient = sshClient + + sftpClient, err := sftp.NewClient(sshClient) + if err != nil { + return fmt.Errorf("sftp new client: %w", err) + } + c.sftpClient = sftpClient + + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp_test.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp_test.go new file mode 100644 index 0000000000..1206b1c40f --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/sftp/sftp_test.go @@ -0,0 +1,18 @@ +package sftp + +import ( + "testing" + "time" +) + +func TestDownloadFile(t *testing.T) { + config := Config{ + Username: "mysql", + Password: "xxx", // required only if password authentication is to be used + Server: "a.b.c.d:22", + // KeyExchanges: []string{"diffie-hellman-group-exchange-sha256", "diffie-hellman-group14-sha256"}, // optional + Timeout: time.Second * 10, // 0 for not timeout + } + + Download(config, "/data/dbbak/", "/data/dbbak", "xxxx.info", 1) +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/slice.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/slice.go new file mode 100644 index 0000000000..32e9926704 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/slice.go @@ -0,0 +1,273 @@ +package util + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +// IntsHas check the []int contains the given value +func IntsHas(ints []int, val int) bool { + for _, ele := range ints { + if ele == val { + return true + } + } + return false +} + +// Int64sHas check the []int64 contains the given value +func Int64sHas(ints []int64, val int64) bool { + for _, ele := range ints { + if ele == val { + return true + } + } + return false +} + +// StringsHas check the []string contains the given element +func StringsHas(ss []string, val string) bool { + for _, ele := range ss { + if ele == val { + return true + } + } + return false +} + +// StringsHasICase check the []string contains the given element. insensitive case +func StringsHasICase(ss []string, val string) bool { + val = strings.ToLower(val) + for _, ele := range ss { + if strings.ToLower(ele) == val { + return true + } + } + return false +} + +// UniqueStrings Returns unique items in a slice +func UniqueStrings(slice []string) []string { + // create a map with all the values as key + uniqMap := make(map[string]struct{}) + for _, v := range slice { + uniqMap[v] = struct{}{} + } + + // turn the map keys into a slice + uniqSlice := make([]string, 0, len(uniqMap)) + for v := range uniqMap { + uniqSlice = append(uniqSlice, v) + } + return uniqSlice +} + +// UniqueInts Returns unique items in a slice +func UniqueInts(slice []int) []int { + // create a map with all the values as key + uniqMap := make(map[int]struct{}) + for _, v := range slice { + uniqMap[v] = struct{}{} + } + + // turn the map keys into a slice + uniqSlice := make([]int, 0, len(uniqMap)) + for v := range uniqMap { + uniqSlice = append(uniqSlice, v) + } + return uniqSlice +} + +// IsConsecutiveStrings 是否是连续数字 +// 如果存在 空元素 则报错 +// 如果 isNumber=false, 则当做字符比较是否连续 +func IsConsecutiveStrings(strList []string, isNumber bool) error { + err := errors.New("not consecutive numbers") + intList := make([]int, len(strList)) + if !isNumber { + // string to ascii + // .aa .ab .ac => 469797 469798 469799 + for i, s := range strList { + ss := "" + for _, si := range []rune(s) { + ss += strconv.FormatInt(int64(si), 10) + } + // todo ss 不能超过20位 + strList[i] = ss + } + } + for i, s := range strList { + if d, e := strconv.Atoi(s); e != nil { + return errors.Errorf("illegal number %s", s) + } else { + intList[i] = d + } + } + intList = UniqueInts(intList) + sort.Ints(intList) + count := len(intList) + if (intList[count-1] - intList[0] + 1) != count { + return err + } + return nil +} + +// RemoveEmpty 过滤掉空字符串 +func RemoveEmpty(input []string) []string { + var result []string + for _, item := range input { + if strings.TrimSpace(item) != "" { + result = append(result, item) + } + } + return result +} + +// StringSliceToInterfaceSlice 把字符串数组转换为interface{}数组 +func StringSliceToInterfaceSlice(ids []string) []interface{} { + var result []interface{} + if len(ids) == 1 { + result = append(result, ids[0]) + } else { + for i := 0; i < len(ids); i++ { + result = append(result, ids[i]) + } + } + return result +} + +// StringsRemove an value form an string slice +func StringsRemove(ss []string, s string) []string { + var ns []string + for _, v := range ss { + if v != s { + ns = append(ns, v) + } + } + + return ns +} + +// StringsInsertAfter 在 slice 里插入某个元素之后,仅匹配一次 +// 如果没有找到元素,忽略 +func StringsInsertAfter(ss []string, old string, new string) []string { + var ssNew = make([]string, len(ss)+1) + var found bool + for i, v := range ss { + if found { + ssNew[i+1] = v + } else if v == old { + ssNew[i] = v + ssNew[i+1] = new + found = true + } else { + ssNew[i] = v + } + } + if !found { + return ssNew[:len(ss)] + } + return ssNew +} + +// StringsInsertIndex 在 slice index 当前位置,插入一个元素 +// 如果 index 非法,则忽略 +func StringsInsertIndex(ss []string, index int, new string) []string { + if index < 0 || index > len(ss)-1 { + return ss + } + var ssNew = make([]string, len(ss)+1) + for i, v := range ss { + if i > index { + ssNew[i+1] = v + } else if i < index { + ssNew[i] = v + } else { + ssNew[i] = new + ssNew[i+1] = v + } + } + return ssNew +} + +// FilterOutStringSlice 滤除scr中含有filters 里面元素的数组 +// +// @receiver src +// @receiver filters +// @return dst +func FilterOutStringSlice(src []string, filters []string) (dst []string) { + for _, v := range src { + if !StringsHas(filters, v) { + dst = append(dst, v) + } + } + return +} + +// RemoveNilElements TODO +func RemoveNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +// StrVal TODO +func StrVal(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +// StrSlice TODO +func StrSlice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, StrVal(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, StrVal(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{StrVal(v)} + } + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/str.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/str.go new file mode 100644 index 0000000000..f306057802 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/str.go @@ -0,0 +1,45 @@ +package util + +import ( + "regexp" + "strings" +) + +// SplitAny TODO +// util.SplitAny("ab##cd$$ef", "(##|\$\$)") +func SplitAny(s string, delimiters string) []string { + // seps := fmt.Sprintf() + // splitRegex := regexp.MustCompile(`[;,\n\t ]+`) + // delimiters=[;,\t\s ]+ + splitRegex := regexp.MustCompile(delimiters) + splitResults := splitRegex.Split(s, -1) + results := make([]string, 0) + for _, s := range splitResults { + if strings.TrimSpace(s) != "" { + results = append(results, strings.TrimSpace(s)) + } + } + return results +} + +// SplitAnyRune TODO +// util.SplitAnyRune("a,b c", ", ") +// if s is empty, return [], not [""] +func SplitAnyRune(s string, seps string) []string { + splitter := func(r rune) bool { + return strings.ContainsRune(seps, r) + } + return strings.FieldsFunc(s, splitter) +} + +// SplitAnyRuneTrim 分隔字符串,并去除空字符 +func SplitAnyRuneTrim(s string, seps string) []string { + ss := SplitAnyRune(s, seps) + for i, el := range ss { + if sss := strings.TrimSpace(el); sss != "" { + ss[i] = sss + } + // 忽略空字符 + } + return ss +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/cmd_groups.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/cmd_groups.go new file mode 100644 index 0000000000..e153e2ee69 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/cmd_groups.go @@ -0,0 +1,21 @@ +package templates + +import ( + "github.com/spf13/cobra" +) + +// CommandGroup TODO +type CommandGroup struct { + Message string + Commands []*cobra.Command +} + +// CommandGroups TODO +type CommandGroups []CommandGroup + +// Add TODO +func (g CommandGroups) Add(c *cobra.Command) { + for _, group := range g { + c.AddCommand(group.Commands...) + } +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/normallizers.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/normallizers.go new file mode 100644 index 0000000000..b1ec2db878 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/normallizers.go @@ -0,0 +1,32 @@ +package templates + +import ( + "strings" + + "github.com/MakeNowJust/heredoc" +) + +// Indentation TODO +const Indentation = ` ` + +// LongDesc TODO +func LongDesc(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.heredoc().trim().string +} + +type normalizer struct { + string +} + +func (s normalizer) heredoc() normalizer { + s.string = heredoc.Doc(s.string) + return s +} + +func (s normalizer) trim() normalizer { + s.string = strings.TrimSpace(s.string) + return s +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/templates.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/templates.go new file mode 100644 index 0000000000..08870591a3 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/templates/templates.go @@ -0,0 +1,2 @@ +// Package templates TODO +package templates diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/util.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/util.go new file mode 100644 index 0000000000..ca01610bd0 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/util.go @@ -0,0 +1,335 @@ +// Package util TODO +package util + +import ( + "crypto/md5" + "dbm-services/common/go-pubpkg/logger" + "encoding/json" + "fmt" + "io" + "math/rand" + "net" + "net/url" + "os" + "path" + "reflect" + "regexp" + "runtime" + "strings" + "time" + + "github.com/TylerBrock/colorjson" + "github.com/pkg/errors" +) + +// RetryConfig TODO +type RetryConfig struct { + Times int // 重试次数 + DelayTime time.Duration // 每次重试间隔 +} + +// Retry 重试 +// 第 0 次也需要 delay 再运行 +func Retry(r RetryConfig, f func() error) (err error) { + for i := 0; i < r.Times; i++ { + time.Sleep(r.DelayTime) + if err = f(); err == nil { + return nil + } + logger.Warn("第%d次重试,函数错误:%s", i, err.Error(), err.Error()) + } + return +} + +// AtWhere TODO +func AtWhere() string { + pc, _, _, ok := runtime.Caller(1) + if ok { + fileName, line := runtime.FuncForPC(pc).FileLine(pc) + result := strings.Index(fileName, "/bk-dbactuator/") + if result > 1 { + preStr := fileName[0:result] + fileName = strings.Replace(fileName, preStr, "", 1) + } + return fmt.Sprintf("%s:%d", fileName, line) + } else { + return "Method not Found!" + } +} + +const ( + tcpDialTimeout = 3 * time.Second +) + +// HostCheck TODO +func HostCheck(host string) bool { + _, err := net.DialTimeout("tcp", host, time.Duration(tcpDialTimeout)) + if err != nil { + logger.Info(err.Error()) + return false + } + return true +} + +// GetFileMd5 TODO +func GetFileMd5(fileAbPath string) (md5sum string, err error) { + rFile, err := os.Open(fileAbPath) + if err != nil { + return "", err + } + defer rFile.Close() + h := md5.New() + if _, err := io.Copy(h, rFile); err != nil { + return "", err + } + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +// Struct2Map TODO +func Struct2Map(s interface{}, tag string) (map[string]interface{}, error) { + out := make(map[string]interface{}) + v := reflect.ValueOf(s) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("only accept struct or pointer, got %T", v) + } + t := v.Type() + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if tagValue := f.Tag.Get(tag); tagValue != "" { + out[tagValue] = v.Field(i).Interface() + } + } + return out, nil +} + +// SetField TODO +func SetField(obj interface{}, name string, value interface{}) error { + structValue := reflect.ValueOf(obj).Elem() + structFieldValue := structValue.FieldByName(name) + + if !structFieldValue.IsValid() { + return fmt.Errorf("no such field: %s in obj", name) + } + + if !structFieldValue.CanSet() { + return fmt.Errorf("cannot set %s field value", name) + } + + structFieldType := structFieldValue.Type() + val := reflect.ValueOf(value) + if structFieldType != val.Type() { + return errors.New("provided value type didn't match obj field type") + } + + structFieldValue.Set(val) + return nil +} + +// Convert2Map TODO +func Convert2Map(m interface{}) map[string]string { + ret := make(map[string]string) + v := reflect.ValueOf(m) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + var fd string + for i := 0; i < v.NumField(); i++ { + f := v.Field(i) + switch f.Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + Convert2Map(f.Interface()) + default: + fd = f.String() + } + ret[v.Type().Field(i).Tag.Get("json")] = fd + } + return ret +} + +// StrIsEmpty TODO +func StrIsEmpty(str string) bool { + return strings.TrimSpace(str) == "" +} + +// OutputPrettyJson 直接传一个空结构体过来 +func OutputPrettyJson(p interface{}) { + var inInterface map[string]interface{} + inrec, _ := json.Marshal(p) + json.Unmarshal(inrec, &inInterface) + // Make a custom formatter with indent set + f := colorjson.NewFormatter() + f.Indent = 4 + pp, err := f.Marshal(inInterface) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("Payload Example: ") + fmt.Println("") + fmt.Println(string(pp)) + fmt.Println("") +} + +// IntSlice2String 效果:[]int{1,2,3,4} -> "1,2,3,4" +func IntSlice2String(elements []int, sep string) string { + elemStr := "" + if len(elements) > 0 { + for i, elem := range elements { + if i == (len(elements) - 1) { + elemStr += fmt.Sprintf("%d", elem) + break + } + elemStr += fmt.Sprintf("%d%s", elem, sep) + } + } + return elemStr +} + +// ConverMapInterface2MapString TODO +func ConverMapInterface2MapString(mi map[string]interface{}) (ms map[string]string, err error) { + ms = make(map[string]string) + for key, v := range mi { + dv, ok := v.(string) + if !ok { + return nil, fmt.Errorf("key:%s 断言string 失败", key) + } + ms[key] = dv + } + return +} + +// RegexReplaceSubString TODO +func RegexReplaceSubString(str, old, new string) string { + re := regexp.MustCompile(fmt.Sprintf(`(%s)`, old)) + return re.ReplaceAllString(str, new) +} + +// GetSuffixWithLenAndSep 获取后缀 +// 先截取后面 maxlen 长度字符串,再根据 separator 分隔取后缀 +func GetSuffixWithLenAndSep(strList []string, separator string, maxlen int) []string { + if maxlen > 0 { + for i, s := range strList { + l := len(s) + if l-maxlen > 0 { + strList[i] = s[l-maxlen:] + } + } + } + seqList := make([]string, len(strList)) + for i, s := range strList { + seqList[i] = LastElement(strings.Split(s, separator)) + } + return seqList +} + +// LastElement TODO +func LastElement(arr []string) string { + return arr[len(arr)-1] +} + +// ReverseRead · 逆序读取文件,类型tail -n 10 +// +// @receiver name +// @receiver lineNum 读取最后多少上内容 +// @return []string 返回逆序读取的文件内容 +// @return error +func ReverseRead(name string, lineNum uint) ([]string, error) { + // 打开文件 + file, err := os.Open(name) + if err != nil { + return nil, err + } + defer file.Close() + // 获取文件大小 + fs, err := file.Stat() + if err != nil { + return nil, err + } + fileSize := fs.Size() + + var offset int64 = -1 // 偏移量,初始化为-1,若为0则会读到EOF + char := make([]byte, 1) // 用于读取单个字节 + lineStr := "" // 存放一行的数据 + buff := make([]string, 0, 100) + for (-offset) <= fileSize { + // 通过Seek函数从末尾移动游标然后每次读取一个字节 + file.Seek(offset, io.SeekEnd) + _, err := file.Read(char) + if err != nil { + return buff, err + } + if char[0] == '\n' { + offset-- // windows跳过'\r' + lineNum-- // 到此读取完一行 + buff = append(buff, lineStr) + lineStr = "" + if lineNum == 0 { + return buff, nil + } + } else { + lineStr = string(char) + lineStr + } + offset-- + } + buff = append(buff, lineStr) + return buff, nil +} + +// SliceErrorsToError TODO +func SliceErrorsToError(errs []error) error { + var errStrs []string + for _, e := range errs { + errStrs = append(errStrs, e.Error()) + } + errString := strings.Join(errStrs, "\n") + return errors.New(errString) +} + +// IntnRange TODO +func IntnRange(min, max int) int { + rand.Seed(time.Now().Unix()) + return rand.Intn(max-min) + min +} + +// GetFileModifyTime TODO +func GetFileModifyTime(filename string) (bool, int64) { + if _, err := os.Stat(filename); !os.IsNotExist(err) { + f, err1 := os.Open(filename) + if err1 != nil { + return true, 0 + } + fi, err2 := f.Stat() + if err2 != nil { + return true, 0 + } + return true, fi.ModTime().Unix() + } + return false, 0 +} + +// UrlJoinPath utl.JoinPath go1.919 +func UrlJoinPath(p, subPath string) (string, error) { + u, err := url.Parse(p) + if err != nil { + return "", err + } + u.Path = path.Join(u.Path, subPath) + return u.String(), nil +} + +// FileIsEmpty TODO +func FileIsEmpty(path string) error { + fileInfo, err := os.Stat(path) + if err != nil { + return err + } + if fileInfo.Size() <= 0 { + return fmt.Errorf("文件为空") + } + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xml.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xml.go new file mode 100644 index 0000000000..d83fee7966 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xml.go @@ -0,0 +1,60 @@ +package xmlutil + +import "encoding/xml" + +// GenericMap TODO +type GenericMap map[string]interface{} + +// MarshalXML TODO +func (g GenericMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name.Local = "performance_status" + tokens := []xml.Token{start} + tokens = ScanXMLNode(g, tokens) + tokens = append(tokens, xml.EndElement{Name: start.Name}) + + for _, t := range tokens { + err := e.EncodeToken(t) + if err != nil { + return err + } + } + // flush to ensure tokens are written + err := e.Flush() + if err != nil { + return err + } + return nil +} + +// ScanXMLNode TODO +func ScanXMLNode(g map[string]interface{}, tokens []xml.Token) []xml.Token { + for key, value := range g { + t := xml.StartElement{Name: xml.Name{Space: "", Local: key}} + if mapInterface, ok := value.(map[string]interface{}); ok { + haveAttr := false + for k, v := range mapInterface { // k:check,expire_days v: + if str, innerOk := v.(string); innerOk { + t.Attr = append(t.Attr, xml.Attr{Name: xml.Name{Space: "", Local: k}, Value: str}) + haveAttr = true + } + // 暂时不考虑既有 child 是 map[string]string, 又是 map[string]map[string]interface{} 这种。 + } + if haveAttr { + tokens = append(tokens, t) + } else { + tokens = append(tokens, t) + tokens = ScanXMLNode(mapInterface, tokens) + } + } else if mapString, ok := value.(map[string]string); ok { + for k, v := range mapString { + t.Attr = append(t.Attr, xml.Attr{Name: xml.Name{Space: "", Local: k}, Value: v}) + } + tokens = append(tokens, t) + } else { + return nil + } + // fmt.Println("key end:", key) + tokens = append(tokens, xml.EndElement{Name: xml.Name{Space: "", Local: key}}) + } + return tokens +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go b/dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go new file mode 100644 index 0000000000..b71255c936 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/util/xmlutil/xmlutil.go @@ -0,0 +1,2 @@ +// Package xmlutil TODO +package xmlutil diff --git a/dbm-services/mysql/db-tools/mysql-crond/.ci/codecc.yml b/dbm-services/mysql/db-tools/mysql-crond/.ci/codecc.yml new file mode 100644 index 0000000000..c824dddd90 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/.ci/codecc.yml @@ -0,0 +1,29 @@ +version: v2.0 +resources: + repositories: + - repository: ci_templates/public/codecc + name: codecc +on: + mr: + target-branches: [ "*" ] +stages: + - name: "代码检查" + check-out: + gates: + - template: commonGate.yml@codecc + timeout-hours: 10 + jobs: + codecc: + name: "CodeCC代码检查" + runs-on: + pool-name: docker #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0 + steps: + - checkout: self + - uses: CodeccCheckAtomDebug@4.* + name: 腾讯代码分析 + with: + beAutoLang: true # 自动检测项目语言 + checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置 + toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1 diff --git a/dbm-services/mysql/db-tools/mysql-crond/.ci/open_source_check.yml b/dbm-services/mysql/db-tools/mysql-crond/.ci/open_source_check.yml new file mode 100644 index 0000000000..f421f315f3 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/.ci/open_source_check.yml @@ -0,0 +1,84 @@ +version: "v2.0" +name: "开源检查" +label: [] +variables: {} +stages: +- name: "开源检查" + label: + - "Build" + jobs: + job_AfK: + name: "构建环境-LINUX" + runs-on: + pool-name: "docker" + container: + image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0" + needs: {} + steps: + - checkout: self + - name: "敏感信息检查-部门RTX" + uses: "SensitiveRtxChecker@3.*" + - name: "腾讯代码分析(官方-代码分析工作组)" + uses: "CodeccCheckAtomDebug@4.*" + with: + beAutoLang: true + languages: + - "GOLANG" + checkerSetType: "communityOpenScan" + tools: + - "WOODPECKER_COMMITSCAN" + - "SCC" + - "PECKER_SECURITY" + - "SENSITIVE" + - "DUPC" + - "IP_CHECK" + - "WOODPECKER_SENSITIVE" + - "HORUSPY" + - "XCHECK" + - "CCN" + asyncTask: false + asyncTaskId: "" + scriptType: "SHELL" + script: |- + # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷 + # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh + # 确保build.sh能够编译代码 + # cd path/to/build.sh + # sh build.sh + languageRuleSetMap: {} + checkerSetEnvType: "prod" + multiPipelineMark: "" + rtxReceiverType: "1" + botWebhookUrl: "" + botRemindRange: "2" + botRemindSeverity: "7" + botRemaindTools: [] + emailReceiverType: "1" + emailCCReceiverList: [] + instantReportStatus: "2" + reportDate: [] + reportTime: "" + reportTools: [] + toolScanType: "1" + diffBranch: "" + byFile: false + mrCommentEnable: true + prohibitIgnore: false + newDefectJudgeFromDate: "" + transferAuthorList: [] + path: [] + customPath: [] + scanTestSource: false + openScanPrj: false + openScanFilterEnable: false + issueSystem: "TAPD" + issueSubSystem: "" + issueResolvers: [] + issueReceivers: [] + issueFindByVersion: "" + maxIssue: 1000 + issueAutoCommit: false + check-out: + gates: + - template: open_source_gate.yml + timeout-hours: 10 \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-crond/.ci/templates/open_source_gate.yml b/dbm-services/mysql/db-tools/mysql-crond/.ci/templates/open_source_gate.yml new file mode 100644 index 0000000000..d14127e08c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/.ci/templates/open_source_gate.yml @@ -0,0 +1,26 @@ +parameters: + - name: receivers + type: array + default: [ "${{ ci.actor }}" ] + +gates: + - name: open-source-gate + rule: + - "CodeccCheckAtomDebug.all_risk <= 0" + - "CodeccCheckAtomDebug.high_med_new_issue <= 0" + - "CodeccCheckAtomDebug.ccn_new_max_value <= 40" + - "CodeccCheckAtomDebug.sensitive_defect <= 0" + - "CodeccCheckAtomDebug.dupc_average <= 15" + - "CodeccCheckAtomDebug.ccn_average <= 3" + - "CodeccCheckAtomDebug.ccn_new_defect <= 0" + - "CodeccCheckAtomDebug.ccn_funcmax <= 20" + - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0" + - "CodeccCheckAtomDebug.horuspy_all_defect <= 0" + - "CodeccCheckAtomDebug.go_serious_defect <= 0" + - "CodeccCheckAtomDebug.go_all_defect <= 100" + notify-on-fail: + - type: wework-message + receivers: ${{ parameters.receivers }} + continue-on-fail: + gatekeepers: + - "${{ ci.actor }}" \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-crond/.gitignore b/dbm-services/mysql/db-tools/mysql-crond/.gitignore new file mode 100644 index 0000000000..c2bddb594b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/.gitignore @@ -0,0 +1,5 @@ +.idea +logs +*.pid +build +demo \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-crond/.golangci.yml b/dbm-services/mysql/db-tools/mysql-crond/.golangci.yml new file mode 100644 index 0000000000..b165022e4c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/.golangci.yml @@ -0,0 +1,121 @@ +# 完整版本在 https://golangci-lint.run/usage/configuration/ +linters-settings: + funlen: + lines: 80 + statements: 80 + govet: + check-shadowing: true + lll: + line-length: 120 + errcheck: + check-type-assertions: true + goconst: + min-len: 2 + min-occurrences: 2 + gocyclo: + min-complexity: 20 + goimports: + revive: + confidence: 0 + rules: + - name: var-declaration + - name: package-comments + - name: dot-imports + - name: blank-imports + - name: exported + - name: var-naming + - name: indent-error-flow + - name: range + - name: errorf + - name: error-naming + - name: error-strings + - name: receiver-naming + - name: increment-decrement + - name: error-return + #- name: unexported-return + - name: time-naming + - name: context-keys-type + - name: context-as-argument + - name: argument-limit + severity: warning + disabled: false + arguments: [ 5 ] + gocritic: + enabled-checks: + - nestingReduce + - commentFormatting + settings: + nestingReduce: + bodyWidth: 5 + +linters: + disable-all: true + enable: + - deadcode + - funlen + - goconst + - gocyclo + - gofmt + - ineffassign + - staticcheck + - structcheck # 当非导出结构嵌入另一个结构, 前一个结构被使用就不会监测到, 这个需要每个业务自己屏蔽 + - typecheck + - goimports + - revive + - gosimple + - govet + - lll + - rowserrcheck + - errcheck + - unused + - varcheck + - sqlclosecheck + - gocritic + # - bodyclose https://github.com/timakin/bodyclose/issues 问题太多了,屏蔽都屏蔽不过来,显式不使用它 + +run: + # default concurrency is a available CPU number + concurrency: 4 + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 2m + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + # include test files or not, default is true + tests: false + # default is true. Enables skipping of directories: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs-use-default: true + skip-files: + - ".*/mock/.*.go" + - ".*testing.go" + - "docs/*.go" + +issues: + exclude-use-default: true + # The list of ids of default excludes to include or disable. By default it's empty. + # 下面的规则,golangci-lint认为应该屏蔽,但是我们选择不屏蔽。所以,`exclude-use-default: true`屏蔽一部分,把下面的再捞出来。 + # golanglint-ci维护的忽略列表里有一些是我们不想屏蔽的,捞出来。这里说一下,使用白名单是好于黑名单的。名单随着golanglint-ci引入更多工具,我们跟进享受好处。我们搞黑名单,就变成自己维护,不如golanglint-ci去维护,更好。 + include: + - EXC0004 # govet (possible misuse of unsafe.Pointer|should have signature) + - EXC0005 # staticcheck ineffective break statement. Did you mean to break out of the outer loop + - EXC0012 # revive exported (method|function|type|const) (.+) should have comment or be unexported + - EXC0013 # revive package comment should be of the form "(.+)... + - EXC0014 # revive comment on exported (.+) should be of the form "(.+)..." + - EXC0015 # revive should have a package comment, unless it's in another file for this package + exclude-rules: + - path: _test\.go + linters: + - funlen # 规范说单测函数,单个函数可以到160行,但是工具不好做区分处理,这里就直接不检查单测的函数长度 + - linters: + - staticcheck + text: "SA6002: argument should be pointer-like to avoid allocations" # sync.pool.Put(buf), slice `var buf []byte` will tiger this + - linters: + - lll + source: "^//go:generate " # Exclude lll issues for long lines with go:generate + max-same-issues: 0 + new: false + max-issues-per-linter: 0 +output: + sort-results: true +service: + golangci-lint-version: 1.28.x diff --git a/dbm-services/mysql/db-tools/mysql-crond/Makefile b/dbm-services/mysql/db-tools/mysql-crond/Makefile new file mode 100644 index 0000000000..cc273ef54c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/Makefile @@ -0,0 +1,25 @@ +PROJ="mysql-crond" +MODULE="dbm-services/mysql/db-tools/mysql-crond" +VERSION = $(error please set VERSION flag) +#VERSION=$(shell date +'%y%m%d.%H.%M') +PKG=${PROJ}.tar.gz +OUTPUT_DIR=build +RELEASE_BUILD_FLAG = "-X ${MODULE}/cmd.version=${VERSION} -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash=`git rev-parse HEAD` " +DEV_BUILD_FLAG = "-X ${MODULE}/cmd.version="develop" -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash="" " + +.PHONY: release-bin +release-bin: + @CGO_ENABLE=0 GOARCH=amd64 GOOS=linux go build -ldflags ${RELEASE_BUILD_FLAG} -o ${OUTPUT_DIR}/${$PROJ} + @cp mysql-crond.conf.go.tpl $(OUTPUT_DIR) + @cp start.sh $(OUTPUT_DIR) + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} mysql-crond mysql-crond.conf.go.tpl start.sh + +.PHONY: dev-bin +dev-bin: + @CGO_ENABLE=0 go build -ldflags ${DEV_BUILD_FLAG} -o ${OUTPUT_DIR}/${$PROJ} + @cp mysql-crond.conf.go.tpl $(OUTPUT_DIR) + @cp start.sh $(OUTPUT_DIR) + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} mysql-crond mysql-crond.conf.go.tpl start.sh + +clean: + @rm -rf ${OUTPUT_DIR} \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-crond/README.md b/dbm-services/mysql/db-tools/mysql-crond/README.md new file mode 100644 index 0000000000..b047434d12 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/README.md @@ -0,0 +1,364 @@ +# 使用方法 + +1. 打包 `make linux-pkg VERSION=0.0.4` + * _VERSION_ 必须给定值, 这个值是设计给蓝盾流水线用的. 人工使用随便给个 _x.y.z_ 形式的就行 +2. 命令行参数 + * `-c/--config` 指定运行时参数文件 + * `--without-heart-beat` 关闭心跳, 专门给开发测试用的 +3. 测试运行 `sh run_local.sh -c /path_to/runtime.yaml --without-heart-beat` + +* _runtime.yaml_ 部署后几乎不会有变化 +* _jobs-config.yaml_ 是任务定义, 可以被 _api_ 影响 + * 所有修改 _jobs_ 的 _api_ 在使用 _permanent=true_ 时会将配置持久化 + * 持久化时原文件会被整个重写, 所有注释全都删除 + +## 查看注册的任务列表 + +1. 方法一: http api: +``` +curl http://127.0.0.1:9999/entries |jq +``` +2. 方法二: list 命令 +``` +./mysql-crond -c runtime.yaml list +``` + +# 事件 + +1. 所有注册的任务执行失败时会自动发送蓝鲸告警通知, 要求是 + * 任务命令必须严格遵守 `0/1 exit code` 的标准 + * 执行失败时要向 _stderr_ 打印必要的错误信息 +2. 所有任务都遵循 `SkipIfStillRunning` 的调度策略 + * 上一轮任务未结束时, 本次调度会跳过, 等待下一次调度 + * 产生这种情况时会发送蓝鲸告警 +3. 事件名由 _runtime config_ 中的 _bk_monitor_beat.inner_event_name_ 指定 + +# 心跳 +* 程序本身会默认启动一个 `@every 1m` 的任务发送心跳指标到蓝鲸监控 +* 指标名由 _runtime config_ 中的 _bk_monitor_beat.inner_metrics_name_ 指定, 添加对应的监控策略就可以监控任务调度是否正常 + +# 部署 +* 程序部署依赖蓝鲸节点管理, _runtime config_ 由蓝鲸生成 +* 初次部署后 _jobs config_ 文件为空 +* 初次部署后需要编辑 _jobs config_ 添加 `bk_biz_id, immute_domain, machine_type, role` 信息 +* 完成修改 _jobs config_ 后调用 `/config/reload GET` 加载配置 + +# 配置文件 + +## 运行时配置 _--config_ +```yaml +ip: 127.0.0.1 +port: 9999 +bk_cloud_id: 0 +bk_monitor_beat: + inner_event_name: mysql_crond_event + inner_metrics_name: mysql_crond_beat + custom_event: + bk_data_id: 542898 + access_token: xxxx + report_type: agent + message_kind: event + custom_metrics: + bk_data_id: 543957 + access_token: xxxx + report_type: agent + message_kind: timeseries + beat_path: /usr/local/gse_bkte/plugins/bin/bkmonitorbeat + agent_address: /usr/local/gse_bkte/agent/data/ipc.state.report +log: + console: true + log_file_dir: /Users/xfwduke/mysql-crond/logs + debug: true + source: true + json: false +pid_path: /Users/xfwduke/mysql-crond +jobs_user: xfwduke +jobs_config: /Users/xfwduke/mysql-crond/jobs-config.yaml +``` + +1. `ip` 为本机 _ip_ 地址 +2. `port` 为 _mysql-crond http_ 服务监听端口, 服务强制 _bind 127.0.0.1_ +3. `log_file_dir, pid_path, jobs_config` 必须为绝对路径 +4. `jobs_user` 设置为机器上存在的用户名, 所有任务及日志文件都属于这个用户 +5. `custom_event` 为蓝鲸自定义事件配置 + * `bk_data_id, access_token` 按需修改 + * 其他的不要动 +6. `custom_metrics` 为蓝鲸自定义指标配置 + * `bk_data_id, access_token` 按需修改 + * 其他的不要动 +7. `inner_event_name` 指定本程序内部发送的事件名, 用于监控任务调度是否有延迟 +8. `inner_metrics_name` 指定本程序自身的心跳指标名, 用于监控任务调度是否正常 + + +## 任务定义 _--jobs-config_ +```yaml +jobs: + - name: bb + enable: true + command: echo + args: + - dd + schedule: '@every 1m' + creator: ob + work_dir: "" +bk_biz_id: 404 +immute_domain: aaa.bbbb.ccc +machine_type: backend +role: master +``` + +* `work_dir`: 默认情况下 `mysql-crond` 调度的作业 _cwd_ 是 `mysql-crond` 的所在目录, 在注册作业使用 _cwd_ 时可能会出现异常. 可以使用这个参数指定作业自己的 _cwd_ + +# _http api_ + +## `/entries GET` + +返回活跃的任务 + +### _response_ +```json +{ + "entries": []cron.Entry +} +``` + +```go +type Entry struct { + ID EntryID + Schedule Schedule + Next time.Time + Prev time.Time + WrappedJob Job + Job Job +} +``` + +`Entry.Job` 是一个 `interface` , 所以实际是自定义的任务类型 + +```go +type ExternalJob struct { + Name string `yaml:"name" binding:"required" validate:"required"` + Enable *bool `yaml:"enable" binding:"required" validate:"required"` + Command string `yaml:"command" binding:"required" validate:"required"` + Args []string `yaml:"args" binding:"required" validate:"required"` + Schedule string `yaml:"schedule" binding:"required" validate:"required"` + Creator string `yaml:"creator" binding:"required" validate:"required"` + WorkDir string `yaml:"work_dir" json:"work_dir" form:"work_dir"` +} +``` + +## `/disabled GET` +返回被停止的任务 + +### _response_ +```json +{ + "jobs": []ExternalJob +} +``` + +## `/disable POST` + +停止处于活跃的任务 + +### _request_ + +```json +{ + "name": string, + "permanent": bool, +} +``` + +* _name_ : 任务名称 +* _permanent_ : 是否持久化到配置文件 + +### _response_ +```json +{ + "entry_id": int +} +``` + +* _entry_id_ : 成功操作的作业 _id_ + +## `/resume POST` +恢复被停止的任务 + +### _request_ + +```json +{ + "name": string, + "permanent": bool, +} +``` + +* _name_ : 任务名称 +* _permanent_ : 是否持久化到配置文件 + +### _response_ +```json +{ + "entry_id": int +} +``` + + +## `/pause POST` +暂停处于活跃的任务一段事件, 超时后自动恢复 + +### _request_ +```json +{ + "name": string, + "duration": time.Duration, +} +``` + +* _name_ : 任务名称 +* _duration_ : _golang_ 形式的 _time.Duration_ 字符串, 如 _1s, 1h30m_ + +### _response_ +```json +{ + "entry_id": int +} +``` + +* _entry_id_ : 成功操作的作业 _id_ + + + +## `/create_or_replace POST` +新增或者替换一个任务的定义 + + +### _request_ +```json +{ + "job": { + "name": string + "command": string + "args": []string, + "schedule": string, + "creator": string, + "work_dir": string, # optional + "enable": bool + }, + "permanent": bool +} +``` + +* _job_ : 任务描述 + * _name_ : 任务名称, 全局唯一 + * _command_ : 可以正常运行的命令 + * _args_ : 命令参数列表 + * _schedule_ : 支持秒的调度配置, 如 _@every 2s_ , _@every 1h10m_ , _*/30 * * * * *_ + * _creator_ : 创建人 + * _enable_ : 是否启用 +* _permanent_: 是否持久化到配置文件 + +## `/delete POST` +删除一个任务 +### _request_ + +```json +{ + "name": string, + "permanent": bool, +} +``` + +* _name_ : 任务名称 +* _permanent_ : 是否持久化到配置文件 +### _response_ +```json +{ + "entry_id": int +} +``` + +* 删除的是 _activity_ 任务时为真实 _id_ +* 删除的是 _disabled_ 任务时恒为 _0_ + +## `/beat/event POST` +发送一个自定义事件 +### _request_ +```json +{ + "name": string + "content": string + "dimension": dict[string]:string or int +} +``` + +* `name`: 自定义事件名称 +* `content`: 事件内容 +* `dimension`: 附加的维度 + +默认就强制添加的维度 +```go +dimension["bk_biz_id"] = JobsConfig.BkBizId +dimension["bk_cloud_id"] = *RuntimeConfig.BkCloudID +dimension["server_ip"] = RuntimeConfig.Ip +dimension["immute_domain"] = JobsConfig.ImmuteDomain +dimension["machine_type"] = JobsConfig.MachineType + +if JobsConfig.Role != nil { + dimension["role"] = *JobsConfig.Role +} +``` + +## _response_ +无 + +## `/beat/metrics POST` +发送一个自定义指标 +### _request_ +```json +{ + "name": string + "value": int + "dimension": dict[string]:string or int +} +``` + +* `name`: 自定义指标名称 +* `value`: 指标的值 +* `dimension`: 附加的维度 + +带有和 `/beat/event` 一样的默认维度 + +### _response_ +无 + +## `/config/jobs-config GET` +返回配置的 _jobs config file_ 的路径 + +### _response_ +```json +{ + "path": string +} +``` + +## `/config/reload GET` +重新加载 _jobs config_ , 用于人肉修改配置文件后的加载 + +# _sdk_ + +```go +import ma "dbm-services/mysql/db-tools/mysql-crond/api" +``` + +1. 用 `func NewManager(apiUrl string) *Manager` 获得管理器 +2. 提供了所有的 _http api_ 操作 +3. _entries_ 的返回是 + ```go + type SimpleEntry struct { + ID int `json:"ID"` + Job config.ExternalJob `json:"Job"` + } + ``` + 比 _http api_ 要简单 \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/api.go b/dbm-services/mysql/db-tools/mysql-crond/api/api.go new file mode 100644 index 0000000000..2536080e0d --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/api.go @@ -0,0 +1,20 @@ +// Package api TODO +package api + +import ( + "net/http" +) + +// Manager TODO +type Manager struct { + apiUrl string + client *http.Client +} + +// NewManager TODO +func NewManager(apiUrl string) *Manager { + return &Manager{ + apiUrl: apiUrl, + client: &http.Client{}, + } +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/create_or_replace.go b/dbm-services/mysql/db-tools/mysql-crond/api/create_or_replace.go new file mode 100644 index 0000000000..a2ee89c808 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/create_or_replace.go @@ -0,0 +1,43 @@ +package api + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// JobDefine TODO +type JobDefine struct { + Name string `json:"name" validate:"required"` + Command string `json:"command" validate:"required"` + Args []string `json:"args"` + Schedule string `json:"schedule" validate:"required"` + Creator string `json:"creator"` + Enable bool `json:"enable"` + WorkDir string `json:"work_dir"` +} + +// CreateOrReplace TODO +func (m *Manager) CreateOrReplace(job JobDefine, permanent bool) (int, error) { + body := struct { + Job JobDefine `json:"job"` + Permanent bool `json:"permanent"` + }{ + Job: job, + Permanent: permanent, + } + resp, err := m.do("/create_or_replace", "POST", body) + if err != nil { + return 0, errors.Wrap(err, "manager call /create_or_replace") + } + + res := struct { + EntryId int `json:"entry_id"` + }{} + err = json.Unmarshal(resp, &res) + if err != nil { + return 0, errors.Wrap(err, "manager unmarshal /create_or_replace response") + } + + return res.EntryId, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/delete.go b/dbm-services/mysql/db-tools/mysql-crond/api/delete.go new file mode 100644 index 0000000000..a273ab885b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/delete.go @@ -0,0 +1,33 @@ +package api + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// Delete TODO +func (m *Manager) Delete(name string, permanent bool) (int, error) { + body := struct { + Name string `json:"name"` + Permanent bool `json:"permanent"` + }{ + Name: name, + Permanent: permanent, + } + + resp, err := m.do("/delete", "POST", body) + if err != nil { + return 0, errors.Wrap(err, "manager call /delete") + } + + res := struct { + EntryId int `json:"entry_id"` + }{} + err = json.Unmarshal(resp, &res) + if err != nil { + return 0, errors.Wrap(err, "manager unmarshal /delete response") + } + + return res.EntryId, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/disable.go b/dbm-services/mysql/db-tools/mysql-crond/api/disable.go new file mode 100644 index 0000000000..6df9d60e61 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/disable.go @@ -0,0 +1,33 @@ +package api + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// Disable TODO +func (m *Manager) Disable(name string, permanent bool) (int, error) { + body := struct { + Name string `json:"name"` + Permanent bool `json:"permanent"` + }{ + Name: name, + Permanent: permanent, + } + + resp, err := m.do("/disable", "POST", body) + if err != nil { + return 0, errors.Wrap(err, "manager call /disable") + } + + res := struct { + EntryId int `json:"entry_id"` + }{} + err = json.Unmarshal(resp, &res) + if err != nil { + return 0, errors.Wrap(err, "manager unmarshal /delete response") + } + + return res.EntryId, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/do.go b/dbm-services/mysql/db-tools/mysql-crond/api/do.go new file mode 100644 index 0000000000..fee02a6db8 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/do.go @@ -0,0 +1,53 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "net/http" + "net/url" + "strings" + + "github.com/pkg/errors" +) + +func (m *Manager) do(action string, method string, payLoad interface{}) ([]byte, error) { + apiUrl, err := url.JoinPath(m.apiUrl, action) + if err != nil { + return nil, errors.Wrap(err, "join api url") + } + + body, err := json.Marshal(payLoad) + if err != nil { + return nil, errors.Wrap(err, "marshal payload") + } + + req, err := http.NewRequest( + strings.ToUpper(method), + apiUrl, + bytes.NewReader(body), + ) + if err != nil { + return nil, errors.Wrap(err, "new request") + } + + resp, err := m.client.Do(req) + if err != nil { + return nil, errors.Wrap(err, "call http api") + } + defer func() { + _ = resp.Body.Close() + }() + + if resp.StatusCode != http.StatusOK { + respBody, _ := io.ReadAll(resp.Body) + return nil, errors.Errorf("http code: %d, status: %s, resp body: %s", + resp.StatusCode, resp.Status, string(respBody)) + } + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "read resp body") + } + return respBody, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/entries.go b/dbm-services/mysql/db-tools/mysql-crond/api/entries.go new file mode 100644 index 0000000000..4ba6e161f9 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/entries.go @@ -0,0 +1,59 @@ +package api + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "encoding/json" + "strings" + + "github.com/pkg/errors" +) + +// SimpleEntry TODO +type SimpleEntry struct { + ID int `json:"ID"` + Job config.ExternalJob `json:"Job"` +} + +// Entries TODO +func (m *Manager) Entries() ([]*SimpleEntry, error) { + resp, err := m.do("/entries", "GET", nil) + if err != nil { + return nil, errors.Wrap(err, "manager call /entries") + } + + var res struct { + Entries []*SimpleEntry `json:"entries"` + } + err = json.Unmarshal(resp, &res) + if err != nil { + return nil, errors.Wrap(err, "manager unmarshal /entries response") + } + + return res.Entries, nil +} + +// SimpleEntryList 用于自定义排序 +type SimpleEntryList []*SimpleEntry + +// Len 用于排序 +func (e SimpleEntryList) Len() int { + return len(e) +} + +// Less 用于排序 +func (e SimpleEntryList) Less(i, j int) bool { + if e[i].Job.Command > e[j].Job.Command { + return true + } else if e[i].Job.Command == e[j].Job.Command && e[i].Job.Schedule > e[j].Job.Schedule { + return true + } else if e[i].Job.Command == e[j].Job.Command && e[i].Job.Schedule == e[j].Job.Schedule && + strings.Join(e[i].Job.Args, " ") > strings.Join(e[j].Job.Args, " ") { + return true + } + return false +} + +// Swap 用于排序 +func (e SimpleEntryList) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/jobs_config.go b/dbm-services/mysql/db-tools/mysql-crond/api/jobs_config.go new file mode 100644 index 0000000000..1e5a1c9a2d --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/jobs_config.go @@ -0,0 +1,25 @@ +package api + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// JobConfig TODO +func (m *Manager) JobConfig() (string, error) { + resp, err := m.do("/config/jobs-config", "GET", nil) + if err != nil { + return "", errors.Wrap(err, "manager call /config/jobs-config") + } + + res := struct { + Path string `json:"path"` + }{} + err = json.Unmarshal(resp, &res) + if err != nil { + return "", errors.Wrap(err, "manager unmarshal /config/jobs-config response") + } + + return res.Path, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/pause.go b/dbm-services/mysql/db-tools/mysql-crond/api/pause.go new file mode 100644 index 0000000000..ff9032cbeb --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/pause.go @@ -0,0 +1,34 @@ +package api + +import ( + "encoding/json" + "time" + + "github.com/pkg/errors" +) + +// Pause TODO +func (m *Manager) Pause(name string, duration time.Duration) (int, error) { + body := struct { + Name string `json:"name"` + Duration time.Duration `json:"duration"` + }{ + Name: name, + Duration: duration, + } + + resp, err := m.do("/pause", "POST", body) + if err != nil { + return 0, errors.Wrap(err, "manager call /pause") + } + + res := struct { + EntryId int `json:"entry_id"` + }{} + err = json.Unmarshal(resp, &res) + if err != nil { + return 0, errors.Wrap(err, "manager unmarshal /pause response") + } + + return res.EntryId, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/quit.go b/dbm-services/mysql/db-tools/mysql-crond/api/quit.go new file mode 100644 index 0000000000..5e628993ee --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/quit.go @@ -0,0 +1,12 @@ +package api + +import "github.com/pkg/errors" + +// Quit TODO +func (m *Manager) Quit() error { + _, err := m.do("/quit", "GET", nil) + if err != nil { + return errors.Wrap(err, "manager call /quit") + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/reload.go b/dbm-services/mysql/db-tools/mysql-crond/api/reload.go new file mode 100644 index 0000000000..eb749925e5 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/reload.go @@ -0,0 +1,12 @@ +package api + +import "github.com/pkg/errors" + +// Reload TODO +func (m *Manager) Reload() error { + _, err := m.do("/config/reload", "GET", nil) + if err != nil { + return errors.Wrap(err, "manager call /reload") + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/resume.go b/dbm-services/mysql/db-tools/mysql-crond/api/resume.go new file mode 100644 index 0000000000..3dd847022e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/resume.go @@ -0,0 +1,33 @@ +package api + +import ( + "encoding/json" + + "github.com/pkg/errors" +) + +// Resume TODO +func (m *Manager) Resume(name string, permanent bool) (int, error) { + body := struct { + Name string `json:"name"` + Permanent bool `json:"permanent"` + }{ + Name: name, + Permanent: permanent, + } + + resp, err := m.do("/resume", "POST", body) + if err != nil { + return 0, errors.Wrap(err, "manager call /resume") + } + + res := struct { + EntryId int `json:"entry_id"` + }{} + err = json.Unmarshal(resp, &res) + if err != nil { + return 0, errors.Wrap(err, "manager unmarshal /resume response") + } + + return res.EntryId, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/send_event.go b/dbm-services/mysql/db-tools/mysql-crond/api/send_event.go new file mode 100644 index 0000000000..44643d3788 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/send_event.go @@ -0,0 +1,24 @@ +package api + +import ( + "github.com/pkg/errors" +) + +// SendEvent TODO +func (m *Manager) SendEvent(name string, content string, dimension map[string]interface{}) error { + body := struct { + Name string `json:"name"` + Content string `json:"content"` + Dimension map[string]interface{} `json:"dimension"` + }{ + Name: name, + Content: content, + Dimension: dimension, + } + + _, err := m.do("/beat/event", "POST", body) + if err != nil { + return errors.Wrap(err, "manager call /event") + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/api/send_metrics.go b/dbm-services/mysql/db-tools/mysql-crond/api/send_metrics.go new file mode 100644 index 0000000000..ba36fa0baf --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/api/send_metrics.go @@ -0,0 +1,24 @@ +package api + +import ( + "github.com/pkg/errors" +) + +// SendMetrics TODO +func (m *Manager) SendMetrics(name string, value int64, dimension map[string]interface{}) error { + body := struct { + Name string `json:"name"` + Value int64 `json:"value"` + Dimension map[string]interface{} `json:"dimension"` + }{ + Name: name, + Value: value, + Dimension: dimension, + } + + _, err := m.do("/beat/metrics", "POST", body) + if err != nil { + return errors.Wrap(err, "manager call /metrics") + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/cmd/cmd.go b/dbm-services/mysql/db-tools/mysql-crond/cmd/cmd.go new file mode 100644 index 0000000000..5b729f1814 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/cmd/cmd.go @@ -0,0 +1,2 @@ +// Package cmd TODO +package cmd diff --git a/dbm-services/mysql/db-tools/mysql-crond/cmd/init.go b/dbm-services/mysql/db-tools/mysql-crond/cmd/init.go new file mode 100644 index 0000000000..71aa14845e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/cmd/init.go @@ -0,0 +1,22 @@ +package cmd + +import ( + "os" + "path/filepath" + + "github.com/spf13/viper" +) + +// ExecutableName TODO +var ExecutableName string + +func init() { + rootCmd.Flags().StringP("config", "c", "", "runtime config file") + rootCmd.Flags().BoolP("without-heart-beat", "", false, "disable heart beat") + _ = rootCmd.MarkFlagRequired("config") + _ = viper.BindPFlag("config", rootCmd.Flags().Lookup("config")) + _ = viper.BindPFlag("without-heart-beat", rootCmd.Flags().Lookup("without-heart-beat")) + + ex, _ := os.Executable() + ExecutableName = filepath.Base(ex) +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/cmd/root.go b/dbm-services/mysql/db-tools/mysql-crond/cmd/root.go new file mode 100644 index 0000000000..9b91ca1f13 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/cmd/root.go @@ -0,0 +1,154 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "dbm-services/mysql/db-tools/mysql-crond/pkg/crond" + "dbm-services/mysql/db-tools/mysql-crond/pkg/service" + "fmt" + "io" + "os" + "path" + "sync" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slog" + "gopkg.in/natefinch/lumberjack.v2" +) + +var ( + m sync.Mutex +) + +var rootCmd = &cobra.Command{ + Use: "mysql-crond", + Short: "mysql-crond", + RunE: func(cmd *cobra.Command, args []string) error { + err := config.InitConfig(viper.GetString("config")) + if err != nil { + slog.Error("start crond", err) + return err + } + + initLogger() + + pidFile := path.Join( + config.RuntimeConfig.PidPath, fmt.Sprintf("%s.pid", ExecutableName), + ) + pf, err := os.Create(pidFile) + if err != nil { + slog.Error("start crond", err) + return err + } + err = os.Chown(pidFile, config.JobsUserUid, config.JobsUserGid) + if err != nil { + slog.Error("start crond", err) + return err + } + err = os.Truncate(pidFile, 0) + if err != nil { + slog.Error("start crond", err) + return err + } + _, err = io.WriteString(pf, fmt.Sprintf("%d\n", os.Getpid())) + if err != nil { + slog.Error("start crond", err) + return err + } + + quit := make(chan struct{}) + + go func() { + <-quit + time.Sleep(10 * time.Second) + crond.Stop() + slog.Info("quit mysql-crond") + os.Exit(0) + }() + + err = crond.Start() + if err != nil { + slog.Error("start crond", err) + return err + } + + err = service.Start(version, buildStamp, gitHash, quit, &m) + if err != nil { + slog.Error("start http server", err) + return err + } + + return nil + }, +} + +// Execute TODO +func Execute() { + err := rootCmd.Execute() + if err != nil { + slog.Error("start", err) + os.Exit(1) + } +} + +func initLogger() { + var ioWriters []io.Writer + + if config.RuntimeConfig.Log.Console { + ioWriters = append(ioWriters, os.Stdout) + } + + if config.RuntimeConfig.Log.LogFileDir != nil { + if !path.IsAbs(*config.RuntimeConfig.Log.LogFileDir) { + err := fmt.Errorf("log_file_dir need absolute dir") + panic(err) + } + + err := os.MkdirAll(*config.RuntimeConfig.Log.LogFileDir, 0755) + if err != nil { + panic(err) + } + err = os.Chown(*config.RuntimeConfig.Log.LogFileDir, config.JobsUserUid, config.JobsUserGid) + if err != nil { + panic(err) + } + + logFile := path.Join(*config.RuntimeConfig.Log.LogFileDir, fmt.Sprintf("%s.log", ExecutableName)) + _, err = os.Stat(logFile) + if err != nil { + if os.IsNotExist(err) { + _, err := os.Create(logFile) + if err != nil { + panic(err) + } + err = os.Chown(logFile, config.JobsUserUid, config.JobsUserGid) + if err != nil { + panic(err) + } + } else { + panic(err) + } + } + + ioWriters = append(ioWriters, &lumberjack.Logger{Filename: logFile}) + } + + handleOpt := slog.HandlerOptions{ + AddSource: config.RuntimeConfig.Log.Source, + } + + if config.RuntimeConfig.Log.Debug { + handleOpt.Level = slog.LevelDebug + } else { + handleOpt.Level = slog.LevelInfo + } + + var logger *slog.Logger + if config.RuntimeConfig.Log.Json { + logger = slog.New(handleOpt.NewJSONHandler(io.MultiWriter(ioWriters...))) + } else { + logger = slog.New(handleOpt.NewTextHandler(io.MultiWriter(ioWriters...))) + } + slog.SetDefault(logger) +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_list.go b/dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_list.go new file mode 100644 index 0000000000..66274b3bae --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_list.go @@ -0,0 +1,66 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "fmt" + "os" + "sort" + "strings" + + "github.com/olekukonko/tablewriter" + "github.com/spf13/cast" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +// versionCmd represents the version command +var listEntriesCmd = &cobra.Command{ + Use: "list", + Short: "list active crond entries", + Long: `list active crond entries`, + Run: func(cmd *cobra.Command, args []string) { + listEntries() + }, +} + +func init() { + listEntriesCmd.PersistentFlags().StringP("config", "c", "", "config file") + _ = listEntriesCmd.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("list-config", listEntriesCmd.PersistentFlags().Lookup("config")) + + rootCmd.AddCommand(listEntriesCmd) +} + +func listEntries() { + // init config to get listen ip:port + var err error + apiUrl := "" + if apiUrl, err = config.GetApiUrlFromConfig(viper.GetString("list-config")); err != nil { + fmt.Fprintln(os.Stderr, "read config error", err.Error()) + os.Exit(1) + } + + manager := api.NewManager(apiUrl) + entries, err := manager.Entries() + if err != nil { + fmt.Fprintln(os.Stderr, "fail to list entries", err.Error()) + os.Exit(1) + } + sort.Sort(api.SimpleEntryList(entries)) // 自定义排序展示 + table := tablewriter.NewWriter(os.Stdout) + table.SetAutoWrapText(false) + table.SetRowLine(false) + table.SetAutoFormatHeaders(false) + table.SetHeader([]string{"ID", "Schedule", "Command", "Args", "WorkDir", "Enable"}) + for _, e := range entries { + table.Append([]string{ + cast.ToString(e.ID), + e.Job.Schedule, + e.Job.Command, + strings.Join(e.Job.Args, " "), + e.Job.WorkDir, + cast.ToString(e.Job.Enable)}) + } + table.Render() +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_version.go b/dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_version.go new file mode 100644 index 0000000000..a48f577194 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/cmd/subcmd_version.go @@ -0,0 +1,42 @@ +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// versionCmd represents the version command +var versionCmd = &cobra.Command{ + Use: "version", + Short: "A brief description of your command", + Long: `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + Run: func(cmd *cobra.Command, args []string) { + printVersion() + }, +} +var version = "" +var buildStamp = "" +var gitHash = "" + +func init() { + rootCmd.AddCommand(versionCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // versionCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // versionCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} +func printVersion() { + fmt.Printf("Version: %s, GitHash: %s, BuildAt: %s\n", version, gitHash, buildStamp) +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/go.mod b/dbm-services/mysql/db-tools/mysql-crond/go.mod new file mode 100644 index 0000000000..a5e3b08108 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/go.mod @@ -0,0 +1,55 @@ +module dbm-services/mysql/db-tools/mysql-crond + +go 1.19 + +require ( + github.com/gin-gonic/gin v1.9.0 + github.com/go-playground/validator/v10 v10.12.0 + github.com/olekukonko/tablewriter v0.0.5 + github.com/pkg/errors v0.9.1 + github.com/robfig/cron/v3 v3.0.1 + github.com/spf13/cast v1.5.0 + github.com/spf13/cobra v1.7.0 + github.com/spf13/viper v1.15.0 + golang.org/x/exp v0.0.0-20230418202329-0354be287a23 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 + gopkg.in/yaml.v2 v2.4.0 + gopkg.in/yaml.v3 v3.0.1 +) + +require ( + github.com/bytedance/sonic v1.8.8 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/leodido/go-urn v1.2.3 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/ini.v1 v1.67.0 // indirect +) diff --git a/dbm-services/mysql/db-tools/mysql-crond/go.sum b/dbm-services/mysql/db-tools/mysql-crond/go.sum new file mode 100644 index 0000000000..9698ec997f --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/go.sum @@ -0,0 +1,559 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q= +github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= +github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= +github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= +github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/dbm-services/mysql/db-tools/mysql-crond/jobs-config.yaml b/dbm-services/mysql/db-tools/mysql-crond/jobs-config.yaml new file mode 100644 index 0000000000..7a7ee80710 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/jobs-config.yaml @@ -0,0 +1,14 @@ +jobs: + - name: mysql-monitor-20000-@every 10s + enable: true + command: /Users/xfwduke/mysql-monitor/mysql-monitor + args: + - run + - -j + - character_consistency + - -c + - /Users/xfwduke/mysql-monitor/config.yaml + schedule: '@every 10s' + creator: xfwduke + work_dir: +bk_biz_id: 404 diff --git a/dbm-services/mysql/db-tools/mysql-crond/main.go b/dbm-services/mysql/db-tools/mysql-crond/main.go new file mode 100644 index 0000000000..9ef1d2c1f5 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/main.go @@ -0,0 +1,7 @@ +package main + +import "dbm-services/mysql/db-tools/mysql-crond/cmd" + +func main() { + cmd.Execute() +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.go.tpl b/dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.go.tpl new file mode 100644 index 0000000000..f4544a6e74 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.go.tpl @@ -0,0 +1,27 @@ +ip: {{ .IP }} +port: 9999 +bk_cloud_id: {{ .BkCloudId }} +bk_monitor_beat: + custom_event: + bk_data_id: {{ .EventDataId }} + access_token: {{ .EventDataToken }} + report_type: agent + message_kind: event + custom_metrics: + bk_data_id: {{ .MetricsDataId }} + access_token: {{ .MetricsDataToken }} + report_type: agent + message_kind: timeseries + beat_path: {{ .BeatPath }} + agent_address: {{ .AgentAddress }} +log: + console: false + log_file_dir: {{ .LogPath }} + debug: false + source: true + json: true +pid_path: {{ .PidPath }} +jobs_user: mysql +jobs_config: {{ .InstallPath }}/jobs-config.yaml + + diff --git a/dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.tpl b/dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.tpl new file mode 100644 index 0000000000..3b9cf4de6e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/mysql-crond.conf.tpl @@ -0,0 +1,27 @@ +ip: {{ cmdb_instance.host.bk_host_innerip }} +port: 9999 +bk_cloud_id: {{ cmdb_instance.host.bk_cloud_id }} +bk_monitor_beat: + custom_event: + bk_data_id: 542898 + access_token: xxxx + report_type: agent + message_kind: event + custom_metrics: + bk_data_id: 543957 + access_token: xxxx + report_type: agent + message_kind: timeseries + beat_path: {{ plugin_path.setup_path }}/plugins/bin/bkmonitorbeat + agent_address: {{ plugin_path.endpoint }} +log: + console: true + log_file_dir: {{ plugin_path.log_path }} + debug: false + source: true + json: true +pid_path: {{ plugin_path.pid_path }} +jobs_user: mysql +jobs_config: {{ plugin_path.setup_path }}/plugins/etc/jobs-config.yaml + + diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/config/bk_monitor_beat_config.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/bk_monitor_beat_config.go new file mode 100644 index 0000000000..619cca510d --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/bk_monitor_beat_config.go @@ -0,0 +1,22 @@ +package config + +type bkCustom struct { + BkDataId int `yaml:"bk_data_id" validate:"required"` + AccessToken string `yaml:"access_token" validate:"required"` + ReportType string `yaml:"report_type" validate:"required"` + MessageKind string `yaml:"message_kind" validate:"required"` +} + +// BkMonitorBeat TODO +type BkMonitorBeat struct { + // CustomEvent struct { + // bkCustom `yaml:",inline"` + // //Name string `yaml:"name" validate:"required"` + // } `yaml:"custom_event" validate:"required"` + CustomMetrics bkCustom `yaml:"custom_metrics" validate:"required"` + CustomEvent bkCustom `yaml:"custom_event" validate:"required"` + // InnerEventName string `yaml:"inner_event_name" validate:"required"` + // InnerMetricsName string `yaml:"inner_metrics_name" validate:"required"` + BeatPath string `yaml:"beat_path" validate:"required,file"` + AgentAddress string `yaml:"agent_address" validate:"required,file"` +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/config/config.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/config.go new file mode 100644 index 0000000000..1a83d63ca2 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/config.go @@ -0,0 +1,91 @@ +// Package config TODO +package config + +import ( + "fmt" + "os" + "os/user" + "strconv" + + "github.com/go-playground/validator/v10" + "golang.org/x/exp/slog" + "gopkg.in/yaml.v2" +) + +// RuntimeConfig TODO +var RuntimeConfig *runtimeConfig + +// JobsConfig TODO +var JobsConfig *jobsConfig + +var jobsUser *user.User +var currentUser *user.User + +// JobsUserUid TODO +var JobsUserUid int + +// JobsUserGid TODO +var JobsUserGid int + +var mysqlCrondEventName = "mysql-crond-event" + +// InitConfig TODO +func InitConfig(configFilePath string) error { + err := initConfig(configFilePath) + if err != nil { + return err + } + + jobsUser, err = user.Lookup(RuntimeConfig.JobsUser) + if err != nil { + slog.Error("init runtimeConfig find jobs user", err) + return err + } + + JobsUserUid, _ = strconv.Atoi(jobsUser.Uid) + JobsUserGid, _ = strconv.Atoi(jobsUser.Gid) + + currentUser, err = user.Current() + if err != nil { + slog.Error("init runtimeConfig get current user", err) + return err + } + + err = InitJobsConfig() + if err != nil { + return err + } + + return nil +} + +func initConfig(configFilePath string) error { + content, err := os.ReadFile(configFilePath) + if err != nil { + slog.Error("init runtimeConfig", err) + return err + } + + RuntimeConfig = &runtimeConfig{} + err = yaml.UnmarshalStrict(content, RuntimeConfig) + if err != nil { + slog.Error("init runtimeConfig", err) + return err + } + + validate := validator.New() + err = validate.Struct(RuntimeConfig) + if err != nil { + panic(err) + } + + return nil +} + +// GetApiUrlFromConfig TODO +func GetApiUrlFromConfig(configFilePath string) (string, error) { + if err := initConfig(configFilePath); err != nil { + return "", err + } + return fmt.Sprintf(`http://127.0.0.1:%d`, RuntimeConfig.Port), nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/config/job_config.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/job_config.go new file mode 100644 index 0000000000..fc39013425 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/job_config.go @@ -0,0 +1,166 @@ +package config + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path" + "syscall" + + "github.com/go-playground/validator/v10" + "golang.org/x/exp/slog" + "gopkg.in/yaml.v2" +) + +type jobsConfig struct { + Jobs []*ExternalJob `yaml:"jobs"` + BkBizId int `yaml:"bk_biz_id"` + // ImmuteDomain string `yaml:"immute_domain"` + // MachineType string `yaml:"machine_type"` + // Role *string `yaml:"role,omitempty"` +} + +// ExternalJob TODO +type ExternalJob struct { + Name string `yaml:"name" json:"name" binding:"required" validate:"required"` + Enable *bool `yaml:"enable" json:"enable" binding:"required" validate:"required"` + Command string `yaml:"command" json:"command" binding:"required" validate:"required"` + Args []string `yaml:"args" json:"args" binding:"required" validate:"required"` + Schedule string `yaml:"schedule" json:"schedule" binding:"required" validate:"required"` + Creator string `yaml:"creator" json:"creator" binding:"required" validate:"required"` + WorkDir string `yaml:"work_dir" json:"work_dir"` + ch chan struct{} +} + +func (j *ExternalJob) run() { + cmd := exec.Command(j.Command, j.Args...) + if j.WorkDir != "" { + cmd.Dir = j.WorkDir + } + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if currentUser.Uid != jobsUser.Uid { + cmd.SysProcAttr = &syscall.SysProcAttr{ + Credential: &syscall.Credential{ + Uid: uint32(JobsUserUid), + Gid: uint32(JobsUserGid), + }, + } + } + + err := cmd.Run() + if err != nil { + slog.Error( + "external job", + err, + slog.String("name", j.Name), + slog.String("stderr", stderr.String()), + ) + err = SendEvent( + mysqlCrondEventName, + fmt.Sprintf( + "execute job %s failed: %s [%s]", + j.Name, err.Error(), stderr.String(), + ), + map[string]interface{}{ + "job_name": j.Name, + }, + ) + if err != nil { + slog.Error("send event", err) + } + } else { + slog.Info( + "external job", + slog.String("name", j.Name), + slog.String("stdout", stdout.String()), + ) + } +} + +// Run TODO +func (j *ExternalJob) Run() { + select { + case v := <-j.ch: + j.run() + j.ch <- v + default: + slog.Warn("skip job", slog.String("name", j.Name)) + err := SendEvent( + mysqlCrondEventName, + fmt.Sprintf("%s skipt for last round use too much time", j.Name), + map[string]interface{}{ + "job_name": j.Name, + }, + ) + slog.Error("send event", err) + } +} + +// SetupChannel TODO +func (j *ExternalJob) SetupChannel( /*ip string*/ ) { + j.ch = make(chan struct{}, 1) + j.ch <- struct{}{} +} + +func (j *ExternalJob) validate() error { + validate := validator.New() + return validate.Struct(j) +} + +// InitJobsConfig TODO +func InitJobsConfig() error { + if !path.IsAbs(RuntimeConfig.JobsConfigFile) { + err := fmt.Errorf("jobs-config need absolute path") + slog.Error("init jobs config", err) + return err + } + + _, err := os.Stat(RuntimeConfig.JobsConfigFile) + if err != nil { + if os.IsNotExist(err) { + slog.Info("init jobs config jobs-config file not found, try create it") + _, err := os.Create(RuntimeConfig.JobsConfigFile) + if err != nil { + slog.Error("init jobs config create empty jobs-config file", err) + return err + } + + err = os.Chown(RuntimeConfig.JobsConfigFile, JobsUserUid, JobsUserGid) + if err != nil { + slog.Error("init jobs config chown for jobs config", err) + return err + } + } else { + slog.Error("init jobs config get jobs-config file stat", err) + return err + } + } + + content, err := os.ReadFile(RuntimeConfig.JobsConfigFile) + if err != nil { + slog.Error("init jobs config", err) + return err + } + + JobsConfig = &jobsConfig{} + err = yaml.Unmarshal(content, &JobsConfig) + if err != nil { + slog.Error("init jobs config", err) + return err + } + + for _, j := range JobsConfig.Jobs { + err := j.validate() + if err != nil { + panic(err) + } + + j.SetupChannel() + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/config/log_config.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/log_config.go new file mode 100644 index 0000000000..21db398b32 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/log_config.go @@ -0,0 +1,10 @@ +package config + +// LogConfig TODO +type LogConfig struct { + Console bool `yaml:"console"` + LogFileDir *string `yaml:"log_file_dir"` + Debug bool `yaml:"debug"` + Source bool `yaml:"source"` + Json bool `yaml:"json"` +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/config/runtime_config.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/runtime_config.go new file mode 100644 index 0000000000..cc7a18c799 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/runtime_config.go @@ -0,0 +1,12 @@ +package config + +type runtimeConfig struct { + Ip string `yaml:"ip" validate:"required,ipv4"` + Port int `yaml:"port" validate:"required,gt=1024,lte=65535"` + BkCloudID *int `yaml:"bk_cloud_id" validate:"required,gte=0"` + BkMonitorBeat *BkMonitorBeat `yaml:"bk_monitor_beat" validate:"required"` + Log *LogConfig `yaml:"log"` + PidPath string `yaml:"pid_path" validate:"required,dir"` + JobsUser string `yaml:"jobs_user" validate:"required"` + JobsConfigFile string `yaml:"jobs_config" validate:"required"` +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/config/send_bk_monitor_beat.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/send_bk_monitor_beat.go new file mode 100644 index 0000000000..a39faf37d6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/send_bk_monitor_beat.go @@ -0,0 +1,186 @@ +package config + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strconv" + "time" + + "golang.org/x/exp/slog" +) + +type commonData struct { + Target string `json:"target"` + Timestamp int64 `json:"timestamp"` + Dimension map[string]interface{} `json:"dimension"` + Metrics map[string]int64 `json:"metrics"` +} + +type eventData struct { + EventName string `json:"event_name"` + Event map[string]interface{} `json:"event"` + commonData +} + +type metricsData struct { + commonData +} + +type commonBody struct { + DataId int `json:"bk_data_id"` + AccessToken string `json:"access_token"` +} + +type eventBody struct { + commonBody + Data []eventData `json:"data"` +} + +type metricsBody struct { + commonBody + Data []metricsData `json:"data"` +} + +// SendBkMonitorBeat TODO +func SendBkMonitorBeat( + dataId int, reportType string, + messageKind string, body interface{}, +) error { + output, err := json.Marshal(body) + if err != nil { + slog.Error( + "send bk monitor heart beat encode body", + err, slog.Any("body", body), + ) + return err + } + + cmd := exec.Command( + RuntimeConfig.BkMonitorBeat.BeatPath, []string{ + "-report", + "-report.bk_data_id", fmt.Sprintf("%d", dataId), + "-report.type", reportType, + "-report.message.kind", messageKind, + "-report.agent.address", RuntimeConfig.BkMonitorBeat.AgentAddress, + "-report.message.body", string(output), + }..., + ) + slog.Info("send bk monitor", slog.String("command", cmd.String())) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err = cmd.Run() + if err != nil { + slog.Error( + "send bk monitor beat", + err, + slog.String("std out", stdout.String()), + slog.String("std err", stderr.String()), + ) + return err + } + + return nil +} + +// SendEvent TODO +func SendEvent(name string, content string, additionDimension map[string]interface{}) error { + l, _ := time.LoadLocation("Local") + + body := eventBody{ + commonBody: commonBody{ + DataId: RuntimeConfig.BkMonitorBeat.CustomEvent.BkDataId, + AccessToken: RuntimeConfig.BkMonitorBeat.CustomEvent.AccessToken, + }, + Data: []eventData{ + { + EventName: name, // RuntimeConfig.BkMonitorBeat.CustomEvent.Name, + Event: map[string]interface{}{ + "content": content, + }, + commonData: commonData{ + Target: RuntimeConfig.Ip, + Timestamp: time.Now().In(l).UnixMilli(), + Dimension: buildDimension(additionDimension), + Metrics: nil, + }, + }, + }, + } + + err := SendBkMonitorBeat( + RuntimeConfig.BkMonitorBeat.CustomEvent.BkDataId, + RuntimeConfig.BkMonitorBeat.CustomEvent.ReportType, + RuntimeConfig.BkMonitorBeat.CustomEvent.MessageKind, + body, + ) + if err != nil { + slog.Error("send event", err) + return err + } + + return nil +} + +// SendMetrics TODO +func SendMetrics(mKey string, mValue int64, additionDimension map[string]interface{}) error { + l, _ := time.LoadLocation("Local") + + body := metricsBody{ + commonBody: commonBody{ + DataId: RuntimeConfig.BkMonitorBeat.CustomMetrics.BkDataId, + AccessToken: RuntimeConfig.BkMonitorBeat.CustomMetrics.AccessToken, + }, + Data: []metricsData{ + { + commonData: commonData{ + Target: RuntimeConfig.Ip, + Timestamp: time.Now().In(l).UnixMilli(), + Dimension: buildDimension(additionDimension), + Metrics: map[string]int64{ + mKey: mValue, + }, + }, + }, + }, + } + + err := SendBkMonitorBeat( + RuntimeConfig.BkMonitorBeat.CustomMetrics.BkDataId, + RuntimeConfig.BkMonitorBeat.CustomMetrics.ReportType, + RuntimeConfig.BkMonitorBeat.CustomMetrics.MessageKind, + body, + ) + + if err != nil { + slog.Error("send event", err) + return err + } + + return nil +} + +func buildDimension(addition map[string]interface{}) map[string]interface{} { + dimension := make(map[string]interface{}) + dimension["bk_biz_id"] = strconv.Itoa(JobsConfig.BkBizId) + dimension["app_id"] = strconv.Itoa(JobsConfig.BkBizId) + dimension["bk_cloud_id"] = strconv.Itoa(*RuntimeConfig.BkCloudID) + dimension["server_ip"] = RuntimeConfig.Ip + dimension["bk_target_ip"] = RuntimeConfig.Ip + + // dimension["immute_domain"] = JobsConfig.ImmuteDomain + // dimension["machine_type"] = JobsConfig.MachineType + // + // if JobsConfig.Role != nil { + // dimension["role"] = *JobsConfig.Role + // } + + for k, v := range addition { + dimension[k] = v + } + + return dimension +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/config/sync.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/sync.go new file mode 100644 index 0000000000..e612db62d8 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/config/sync.go @@ -0,0 +1,133 @@ +package config + +import ( + "fmt" + "os" + + "golang.org/x/exp/slog" + "gopkg.in/yaml.v3" +) + +// SyncAddJob TODO +func SyncAddJob(newJob *ExternalJob) error { + content, err := os.ReadFile(RuntimeConfig.JobsConfigFile) + if err != nil { + slog.Error("sync add new read config from disk", err) + return err + } + + // var jobs []*ExternalJob + err = yaml.Unmarshal(content, &JobsConfig) + if err != nil { + slog.Error("sync add encode config", err) + return err + } + + // jobs = append(jobs, newJob) + JobsConfig.Jobs = append(JobsConfig.Jobs, newJob) + + // output, err := yaml.Marshal(jobs) + output, err := yaml.Marshal(JobsConfig) + if err != nil { + slog.Error("sync add decode updated config", err) + return err + } + + err = os.WriteFile(RuntimeConfig.JobsConfigFile, output, 0644) + if err != nil { + slog.Error("sync add write to disk", err) + return err + } + return nil +} + +// SyncJobEnable TODO +func SyncJobEnable(name string, enable bool) error { + content, err := os.ReadFile(RuntimeConfig.JobsConfigFile) + if err != nil { + slog.Error("sync job enable new read config from disk", err) + return err + } + + // var jobs []*ExternalJob + err = yaml.Unmarshal(content, &JobsConfig) + if err != nil { + slog.Error("sync job enable encode config", err) + return err + } + + idx := -1 + for i, j := range JobsConfig.Jobs { + if j.Name == name { + idx = i + *j.Enable = enable + } + } + if idx < 0 { + err := fmt.Errorf( + "target job %s not found in %s", + name, RuntimeConfig.JobsConfigFile, + ) + slog.Error("sync job enable seek target job", err) + return err + } + + output, err := yaml.Marshal(JobsConfig) + if err != nil { + slog.Error("sync enable decode updated config", err) + return err + } + + err = os.WriteFile(RuntimeConfig.JobsConfigFile, output, 0644) + if err != nil { + slog.Error("sync enable write to disk", err) + return err + } + return nil +} + +// SyncDelete TODO +func SyncDelete(name string) error { + content, err := os.ReadFile(RuntimeConfig.JobsConfigFile) + if err != nil { + slog.Error("sync job enable new read config from disk", err) + return err + } + + // var jobs []*ExternalJob + err = yaml.Unmarshal(content, &JobsConfig) + if err != nil { + slog.Error("sync job enable encode config", err) + return err + } + + idx := -1 + for i, j := range JobsConfig.Jobs { + if j.Name == name { + idx = i + } + } + if idx < 0 { + err := fmt.Errorf( + "target job %s not found in %s", + name, RuntimeConfig.JobsConfigFile, + ) + slog.Error("sync job enable seek target job", err) + return err + } + + JobsConfig.Jobs = append(JobsConfig.Jobs[:idx], JobsConfig.Jobs[idx+1:]...) + + output, err := yaml.Marshal(JobsConfig) + if err != nil { + slog.Error("sync enable decode updated config", err) + return err + } + + err = os.WriteFile(RuntimeConfig.JobsConfigFile, output, 0644) + if err != nil { + slog.Error("sync enable write to disk", err) + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/crond.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/crond.go new file mode 100644 index 0000000000..5a320a4e79 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/crond.go @@ -0,0 +1,120 @@ +// Package crond TODO +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "dbm-services/mysql/db-tools/mysql-crond/pkg/schedule" + "sync" + + "github.com/robfig/cron/v3" + "github.com/spf13/viper" + "golang.org/x/exp/slog" +) + +// DisabledJobs TODO +var DisabledJobs sync.Map + +var cronJob *cron.Cron + +func init() { + cronJob = cron.New( + cron.WithParser( + cron.NewParser( + cron.SecondOptional | + cron.Minute | + cron.Hour | + cron.Dom | + cron.Month | + cron.Dow | + cron.Descriptor, + ), + ), + ) +} + +// Stop TODO +func Stop() { + cronJob.Stop() +} + +// Start TODO +func Start() error { + for _, j := range config.JobsConfig.Jobs { + entryID, err := Add(j, false) + if err != nil { + slog.Error("load job from config", err) + return err + } + slog.Info( + "load job from config", + slog.Int("entry id", entryID), + slog.String("name", j.Name), + ) + } + + // 专门清理执行过的一次性任务 + entryID, err := cronJob.AddFunc( + "@every 1s", func() { + for _, entry := range cronJob.Entries() { + if s, ok := entry.Schedule.(*schedule.OnceSchedule); ok { + if s.IsExecuted() { + cronJob.Remove(entry.ID) + } + } + } + }, + ) + if err != nil { + slog.Error("add clearance job", err) + return err + } + slog.Info("add clearance job", slog.Int("entry id", int(entryID))) + + // 心跳 + if !viper.GetBool("without-heart-beat") { + entryID, err = cronJob.AddFunc( + "@every 1m", func() { + err := config.SendMetrics( + "mysql_crond_heart_beat", + 1, + map[string]interface{}{}, + ) + if err != nil { + slog.Error("heart beat", err) + } else { + slog.Info("heart beat success") + } + }, + ) + slog.Info("add heart beat job", slog.Int("entry id", int(entryID))) + } + + cronJob.Start() + slog.Info("crond start") + return nil +} + +// Reload TODO +func Reload() error { + cronJob.Stop() + for _, e := range cronJob.Entries() { + cronJob.Remove(e.ID) + } + DisabledJobs.Range( + func(key, value any) bool { + DisabledJobs.Delete(key) + return true + }, + ) + + err := config.InitJobsConfig() + if err != nil { + slog.Error("reload re-init jobs-config", err) + return err + } + err = Start() + if err != nil { + slog.Error("reload start crond", err) + } + return err +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/error.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/error.go new file mode 100644 index 0000000000..cd6eacd65c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/error.go @@ -0,0 +1,9 @@ +package crond + +// NotFoundError TODO +type NotFoundError string + +// Error 用于错误处理 +func (r NotFoundError) Error() string { + return string(r) +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/find_entry.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/find_entry.go new file mode 100644 index 0000000000..5378433e47 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/find_entry.go @@ -0,0 +1,20 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + + "github.com/robfig/cron/v3" +) + +func findEntry(name string) *cron.Entry { + for _, entry := range ListEntry() { + if j, ok := entry.Job.(*config.ExternalJob); !ok { + continue + } else { + if j.Name == name { + return &entry + } + } + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_add.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_add.go new file mode 100644 index 0000000000..9c3cb62527 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_add.go @@ -0,0 +1,71 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "fmt" + + "golang.org/x/exp/slog" +) + +// Add TODO +func Add(j *config.ExternalJob, permanent bool) (int, error) { + existEntry := findEntry(j.Name) + if existEntry != nil { + err := fmt.Errorf("duplicate activate job name: %s", j.Name) + slog.Error("add job", err) + return 0, err + } + + if _, ok := DisabledJobs.Load(j.Name); ok { + err := fmt.Errorf("duplicate deleted job name: %s", j.Name) + slog.Error("add job", err) + return 0, err + } + + if *j.Enable { + return addActivate(j, permanent) + } else { + return addDisabled(j, permanent) + } +} + +func addActivate(j *config.ExternalJob, permanent bool) (int, error) { + entryID, err := cronJob.AddJob(j.Schedule, j) + if err != nil { + slog.Error("add job", err) + return 0, err + } + slog.Info( + "add job", + slog.String("name", j.Name), + slog.Int("entry id", int(entryID)), + ) + + if permanent { + err := config.SyncAddJob(j) + if err != nil { + cronJob.Remove(entryID) + return 0, err + } + } + + return int(entryID), nil +} + +func addDisabled(j *config.ExternalJob, permanent bool) (int, error) { + DisabledJobs.Store(j.Name, j) + slog.Info( + "add disabled job", + slog.String("name", j.Name), + ) + + if permanent { + err := config.SyncAddJob(j) + if err != nil { + DisabledJobs.Delete(j.Name) + return 0, err + } + } + + return 0, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_delete.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_delete.go new file mode 100644 index 0000000000..6dd43692a6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_delete.go @@ -0,0 +1,65 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "fmt" + + "github.com/robfig/cron/v3" + "golang.org/x/exp/slog" +) + +// Delete TODO +func Delete(name string, permanent bool) (int, error) { + existEntry := findEntry(name) + if existEntry != nil { + return deleteActivate(existEntry, permanent) + } + + if _, ok := DisabledJobs.Load(name); ok { + return deleteDisabled(name, permanent) + } + + err := NotFoundError(fmt.Sprintf("job %s not found", name)) + slog.Error("delete job", err) + return 0, err +} + +func deleteActivate(entry *cron.Entry, permanent bool) (int, error) { + j, ok := entry.Job.(*config.ExternalJob) + if !ok { + err := fmt.Errorf("convert %v to ExternalJob failed", entry) + slog.Error("delete activate", err) + return 0, err + } + + cronJob.Remove(entry.ID) + if permanent { + err := config.SyncDelete(j.Name) + if err != nil { + _, _ = cronJob.AddJob(j.Schedule, j) + return 0, err + } + } + slog.Info("delete activity success", slog.String("name", j.Name)) + return 0, nil +} + +func deleteDisabled(name string, permanent bool) (int, error) { + v, _ := DisabledJobs.LoadAndDelete(name) + job, ok := v.(*config.ExternalJob) + if !ok { + err := fmt.Errorf("conver %v to ExternalJob failed", v) + slog.Error("delete disabled", err) + return 0, err + } + + if permanent { + err := config.SyncDelete(name) + if err != nil { + DisabledJobs.Store(name, job) + return 0, err + } + } + slog.Info("delete disabled success", slog.String("name", name)) + return 0, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_disable.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_disable.go new file mode 100644 index 0000000000..f0d285966a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_disable.go @@ -0,0 +1,49 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "fmt" + + "golang.org/x/exp/slog" +) + +// Disable TODO +func Disable(name string, permanent bool) (int, error) { + existEntry := findEntry(name) + if existEntry == nil { + err := fmt.Errorf("entry %s not found", name) + slog.Error("delete job", err) + return 0, err + } + + j, ok := existEntry.Job.(*config.ExternalJob) + if !ok { + err := fmt.Errorf("convert %v to ExternalJob failed", existEntry) + slog.Error("disable job", err) + return 0, err + } + *j.Enable = false + + cronJob.Remove(existEntry.ID) + slog.Info( + "disable job", + slog.String("name", name), + slog.Int("entry id", int(existEntry.ID)), + ) + + DisabledJobs.Store(j.Name, j) + + if permanent { + err := config.SyncJobEnable(name, false) + if err != nil { + *j.Enable = true + // 本来就是错误处理, 这里再出错也不知道咋办了 + // 但是想来也不太可能出错 + _, _ = cronJob.AddJob(j.Schedule, j) + DisabledJobs.Delete(j.Name) + return 0, err + } + } + + return int(existEntry.ID), nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_list.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_list.go new file mode 100644 index 0000000000..45049c7ca1 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_list.go @@ -0,0 +1,29 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + + "github.com/robfig/cron/v3" +) + +// ListEntry TODO +func ListEntry() (res []cron.Entry) { + for _, entry := range cronJob.Entries() { + if _, ok := entry.Job.(*config.ExternalJob); ok { + res = append(res, entry) + } + } + return res +} + +// ListDisabledJob TODO +func ListDisabledJob() (res []*config.ExternalJob) { + DisabledJobs.Range( + func(key, value any) bool { + j, _ := value.(*config.ExternalJob) + res = append(res, j) + return true + }, + ) + return res +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_pause.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_pause.go new file mode 100644 index 0000000000..ca09169463 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_pause.go @@ -0,0 +1,33 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "dbm-services/mysql/db-tools/mysql-crond/pkg/schedule" + "fmt" + "time" + + "github.com/robfig/cron/v3" + "golang.org/x/exp/slog" +) + +// Pause TODO +func Pause(name string, du time.Duration) (int, error) { + existEntry := findEntry(name) + if existEntry == nil { + err := fmt.Errorf("entry %s not found", name) + slog.Error("pause job", err) + return 0, err + } + + j, _ := existEntry.Job.(*config.ExternalJob) + cronJob.Schedule( + schedule.NewOnceSchedule(time.Now().Add(du)), + cron.FuncJob( + func() { + _, _ = cronJob.AddJob(j.Schedule, j) + }, + ), + ) + + return int(existEntry.ID), nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_replace.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_replace.go new file mode 100644 index 0000000000..e89913e683 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_replace.go @@ -0,0 +1,26 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + + "golang.org/x/exp/slog" +) + +// CreateOrReplace TODO +func CreateOrReplace(j *config.ExternalJob, permanent bool) (int, error) { + _, err := Delete(j.Name, permanent) + + if err != nil { + if _, ok := err.(NotFoundError); !ok { + slog.Error("create or replace job", err, slog.Any("job", j)) + return 0, err + } + } + + entryID, err := Add(j, permanent) + if err != nil { + slog.Error("create or replace job", err, slog.Any("job", j)) + return 0, err + } + return entryID, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_resume.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_resume.go new file mode 100644 index 0000000000..1aad57e5fd --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/crond/job_resume.go @@ -0,0 +1,47 @@ +package crond + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "fmt" + + "golang.org/x/exp/slog" +) + +// Resume TODO +func Resume(name string, permanent bool) (int, error) { + if value, ok := DisabledJobs.LoadAndDelete(name); ok { + if j, ok := value.(*config.ExternalJob); ok { + *j.Enable = true + + entryID, err := cronJob.AddJob(j.Schedule, j) + if err != nil { + slog.Error("resume job", err) + return 0, err + } + slog.Info( + "resume job", + slog.String("name", name), + slog.Int("entry id", int(entryID)), + ) + + if permanent { + err := config.SyncJobEnable(name, true) + if err != nil { + cronJob.Remove(entryID) + *j.Enable = false + DisabledJobs.Store(name, j) + return 0, err + } + } + return int(entryID), nil + } else { + err := fmt.Errorf("conver %v to ExternalJob failed", value) + slog.Error("resume job", err) + return 0, err + } + } else { + err := fmt.Errorf("%s not found", name) + slog.Error("resume job", err) + return 0, err + } +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/once_schedule.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/once_schedule.go new file mode 100644 index 0000000000..7b231f0d91 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/once_schedule.go @@ -0,0 +1,31 @@ +package schedule + +import "time" + +// OnceSchedule TODO +type OnceSchedule struct { + next time.Time + executed bool +} + +// Next TODO +func (s *OnceSchedule) Next(t time.Time) time.Time { + if t.Before(s.next) { + return s.next + } + s.executed = true + return time.Time{} +} + +// NewOnceSchedule TODO +func NewOnceSchedule(next time.Time) *OnceSchedule { + return &OnceSchedule{ + next: next, + executed: false, + } +} + +// IsExecuted TODO +func (s *OnceSchedule) IsExecuted() bool { + return s.executed +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/schedule.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/schedule.go new file mode 100644 index 0000000000..3087a3b345 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/schedule/schedule.go @@ -0,0 +1,2 @@ +// Package schedule TODO +package schedule diff --git a/dbm-services/mysql/db-tools/mysql-crond/pkg/service/service.go b/dbm-services/mysql/db-tools/mysql-crond/pkg/service/service.go new file mode 100644 index 0000000000..cee6132a2c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/pkg/service/service.go @@ -0,0 +1,291 @@ +// Package service TODO +package service + +import ( + "dbm-services/mysql/db-tools/mysql-crond/pkg/config" + "dbm-services/mysql/db-tools/mysql-crond/pkg/crond" + "fmt" + "net/http" + "sync" + "time" + + "github.com/gin-gonic/gin" + "golang.org/x/exp/slog" +) + +// Start TODO +func Start(version string, buildStamp string, gitHash string, quit chan struct{}, m *sync.Mutex) error { + r := gin.New() + + r.Use( + func(context *gin.Context) { + start := time.Now() + path := context.Request.URL.Path + query := context.Request.URL.RawQuery + + context.Next() + + cost := time.Since(start) + slog.Info( + path, + slog.Int("status", context.Writer.Status()), + slog.String("method", context.Request.Method), + slog.String("path", path), + slog.String("query", query), + slog.String("ip", context.ClientIP()), + slog.String("user-agent", context.Request.UserAgent()), + slog.String("errors", context.Errors.ByType(gin.ErrorTypePrivate).String()), + slog.Duration("cost", cost), + ) + }, + gin.Recovery(), + ) + + r.GET( + "/version", func(context *gin.Context) { + context.JSON( + http.StatusOK, gin.H{ + "version": version, + "build_timestamp": buildStamp, + "git_hash": gitHash, + }, + ) + }, + ) + + r.GET( + "/entries", func(context *gin.Context) { + context.JSON( + http.StatusOK, gin.H{ + "entries": crond.ListEntry(), + }, + ) + }, + ) + r.POST( + "/disable", func(context *gin.Context) { + body := struct { + Name string `json:"name" binding:"required"` + Permanent *bool `json:"permanent" binding:"required"` + }{} + err := context.BindJSON(&body) + if err != nil { + _ = context.AbortWithError(http.StatusBadRequest, err) + return + } + + m.Lock() + defer func() { + m.Unlock() + }() + entryID, err := crond.Disable(body.Name, *body.Permanent) + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + + context.JSON( + http.StatusOK, gin.H{ + "entry_id": entryID, + }, + ) + }, + ) + r.POST( + "/pause", func(context *gin.Context) { + body := struct { + Name string `json:"name" binding:"required"` + Duration time.Duration `json:"duration" binding:"required"` + }{} + err := context.BindJSON(&body) + if err != nil { + _ = context.AbortWithError(http.StatusBadRequest, err) + return + } + + m.Lock() + defer func() { + m.Unlock() + }() + entryID, err := crond.Pause(body.Name, body.Duration) + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + + context.JSON( + http.StatusOK, gin.H{ + "entry_id": entryID, + }, + ) + }, + ) + r.POST( + "/create_or_replace", func(context *gin.Context) { + body := struct { + Job *config.ExternalJob `json:"job" binding:"required"` + Permanent *bool `json:"permanent" binding:"required"` + }{} + err := context.BindJSON(&body) + if err != nil { + context.AbortWithStatusJSON(http.StatusBadRequest, gin.H{"message": err.Error()}) + return + } + m.Lock() + defer func() { + m.Unlock() + }() + body.Job.SetupChannel( /*config.RuntimeConfig.Ip*/ ) + entryID, err := crond.CreateOrReplace(body.Job, *body.Permanent) + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + + context.JSON( + http.StatusOK, gin.H{ + "entry_id": entryID, + }, + ) + }, + ) + r.POST( + "/resume", func(context *gin.Context) { + body := struct { + Name string `json:"name" binding:"required"` + Permanent *bool `json:"permanent" binding:"required"` + }{} + err := context.BindJSON(&body) + if err != nil { + _ = context.AbortWithError(http.StatusBadRequest, err) + return + } + + m.Lock() + defer func() { + m.Unlock() + }() + entryID, err := crond.Resume(body.Name, *body.Permanent) + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + + context.JSON( + http.StatusOK, gin.H{ + "entry_id": entryID, + }, + ) + }, + ) + r.GET( + "/disabled", func(context *gin.Context) { + context.JSON( + http.StatusOK, gin.H{ + "jobs": crond.ListDisabledJob(), + }, + ) + }, + ) + r.POST( + "/delete", func(context *gin.Context) { + body := struct { + Name string `json:"name" binding:"required"` + Permanent *bool `json:"permanent" binding:"required"` + }{} + err := context.BindJSON(&body) + if err != nil { + _ = context.AbortWithError(http.StatusBadRequest, err) + return + } + + m.Lock() + defer func() { + m.Unlock() + }() + entryID, err := crond.Delete(body.Name, *body.Permanent) + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + + context.JSON( + http.StatusOK, gin.H{ + "entry_id": entryID, + }, + ) + }, + ) + r.POST( + "/beat/event", func(context *gin.Context) { + body := struct { + Name string `json:"name" binding:"required"` + Content string `json:"content" binding:"required"` + Dimension map[string]interface{} `json:"dimension,omitempty"` + }{} + err := context.BindJSON(&body) + if err != nil { + _ = context.AbortWithError(http.StatusBadRequest, err) + return + } + slog.Debug("api beat event", slog.Any("body", body)) + err = config.SendEvent(body.Name, body.Content, body.Dimension) + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + context.JSON(http.StatusOK, gin.H{}) + }, + ) + r.POST( + "/beat/metrics", func(context *gin.Context) { + body := struct { + Name string `json:"name" binding:"required"` + Value int64 `json:"value" binding:"required"` + Dimension map[string]interface{} `json:"dimension,omitempty"` + }{} + err := context.BindJSON(&body) + if err != nil { + _ = context.AbortWithError(http.StatusBadRequest, err) + return + } + slog.Debug("api beat event", slog.Any("body", body)) + err = config.SendMetrics(body.Name, body.Value, body.Dimension) + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + context.JSON(http.StatusOK, gin.H{}) + }, + ) + r.GET( + "/config/jobs-config", func(context *gin.Context) { + context.JSON( + http.StatusOK, gin.H{ + "path": config.RuntimeConfig.JobsConfigFile, + }, + ) + }, + ) + r.GET( + "/config/reload", func(context *gin.Context) { + m.Lock() + defer func() { + m.Unlock() + }() + err := crond.Reload() + if err != nil { + _ = context.AbortWithError(http.StatusInternalServerError, err) + return + } + context.JSON(http.StatusOK, gin.H{}) + }, + ) + r.GET( + "/quit", func(context *gin.Context) { + quit <- struct{}{} + context.JSON(http.StatusOK, gin.H{}) + }, + ) + return r.Run(fmt.Sprintf("127.0.0.1:%d", config.RuntimeConfig.Port)) +} diff --git a/dbm-services/mysql/db-tools/mysql-crond/project.yaml b/dbm-services/mysql/db-tools/mysql-crond/project.yaml new file mode 100644 index 0000000000..8a9f249ae7 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/project.yaml @@ -0,0 +1,25 @@ +name: mysql-crond +version: ${MYSQL_CROND_VERSION} +description: MySQL Crondtab 管理插件 +scenario: MySQL Crondtab 管理插件 +category: official +config_file: mysql-crond.conf +config_format: yaml +launch_node: all +auto_launch: 0 +is_binary: 1 +use_db: 0 +config_templates: + - plugin_version: "*" + name: mysql-crond.conf + version: 1 + file_path: etc + format: yaml + is_main_config: 1 + source_path: etc/mysql-crond.conf.tpl +control: + start: "./start.sh mysql-crond" + stop: "./stop.sh mysql-crond" + restart: "./restart.sh mysql-crond" + reload: "./reload.sh mysql-crond" + version: "" diff --git a/dbm-services/mysql/db-tools/mysql-crond/run_local.sh b/dbm-services/mysql/db-tools/mysql-crond/run_local.sh new file mode 100644 index 0000000000..13dd8ff883 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/run_local.sh @@ -0,0 +1,6 @@ +VERSION_FLAG="-X main.version=`date +'%y%m%d.%H.%M'`" +STAMP_FLAG="-X main.buildStamp='`date -u '+%Y-%m-%d_%I:%M:%S%p'`" +GIT_FLAG="-X main.gitHash='`git rev-parse HEAD`'" + + +go run -ldflags "${VERSION_FLAG} ${STAMP_FLAG} ${GIT_FLAG}" main.go ${@: 1} \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-crond/runtime-local.yaml b/dbm-services/mysql/db-tools/mysql-crond/runtime-local.yaml new file mode 100644 index 0000000000..b926993738 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/runtime-local.yaml @@ -0,0 +1,27 @@ +ip: 127.0.0.1 +port: 9999 +bk_cloud_id: 0 +bk_monitor_beat: + inner_event_name: mysql_crond_event + inner_metrics_name: mysql_crond_beat + custom_event: + bk_data_id: 542898 + access_token: xxxx + report_type: agent + message_kind: event + custom_metrics: + bk_data_id: 543957 + access_token: xxxx + report_type: agent + message_kind: timeseries + beat_path: /usr/local/gse_bkte/plugins/bin/bkmonitorbeat + agent_address: /usr/local/gse_bkte/agent/data/ipc.state.report +log: + console: true + log_file_dir: /Users/xfwduke/mysql-crond/logs + debug: true + source: true + json: false +pid_path: /Users/xfwduke/mysql-crond +jobs_user: xfwduke +jobs_config: /Users/xfwduke/mysql-crond/jobs-config.yaml diff --git a/dbm-services/mysql/db-tools/mysql-crond/runtime.yaml b/dbm-services/mysql/db-tools/mysql-crond/runtime.yaml new file mode 100644 index 0000000000..e87fd26483 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/runtime.yaml @@ -0,0 +1,27 @@ +ip: $LOCALHOST +port: 9999 +bk_cloud_id: 0 +bk_monitor_beat: +# inner_event_name: mysql-crond-event +# inner_metrics_name: mysql-crond-beat + custom_event: + bk_data_id: 542898 + access_token: xxxx + report_type: agent + message_kind: event + custom_metrics: + bk_data_id: 543957 + access_token: xxxx + report_type: agent + message_kind: timeseries + beat_path: /usr/local/gse_bkte/plugins/bin/bkmonitorbeat + agent_address: /usr/local/gse_bkte/agent/data/ipc.state.report +log: + console: true + log_file_dir: /home/mysql/mysql-crond/logs + debug: true + source: true + json: false +pid_path: /home/mysql/mysql-crond +jobs_user: mysql +jobs_config: /home/mysql/mysql-crond/jobs-config.yaml diff --git a/dbm-services/mysql/db-tools/mysql-crond/start.sh b/dbm-services/mysql/db-tools/mysql-crond/start.sh new file mode 100755 index 0000000000..f1b1cb341e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-crond/start.sh @@ -0,0 +1,2 @@ +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) +cd $SCRIPT_DIR && nohup ./mysql-crond ${@:1} & \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/.ci/codecc.yml b/dbm-services/mysql/db-tools/mysql-monitor/.ci/codecc.yml new file mode 100644 index 0000000000..f6ec8f4fa8 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/.ci/codecc.yml @@ -0,0 +1,29 @@ +version: v2.0 +resources: + repositories: + - repository: ci_templates/public/codecc + name: codecc +on: + mr: + target-branches: [ "*" ] +stages: + - name: "代码检查" + check-out: + gates: + - template: commonGate.yml@codecc + timeout-hours: 10 + jobs: + codecc: + name: "CodeCC代码检查" + runs-on: + pool-name: docker #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0 + steps: + - checkout: self + - uses: CodeccCheckAtomDebug@4.* + name: 腾讯代码分析 + with: + beAutoLang: true # 自动检测项目语言 + checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置 + toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1 \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/.ci/open_source_check.yml b/dbm-services/mysql/db-tools/mysql-monitor/.ci/open_source_check.yml new file mode 100644 index 0000000000..f421f315f3 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/.ci/open_source_check.yml @@ -0,0 +1,84 @@ +version: "v2.0" +name: "开源检查" +label: [] +variables: {} +stages: +- name: "开源检查" + label: + - "Build" + jobs: + job_AfK: + name: "构建环境-LINUX" + runs-on: + pool-name: "docker" + container: + image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0" + needs: {} + steps: + - checkout: self + - name: "敏感信息检查-部门RTX" + uses: "SensitiveRtxChecker@3.*" + - name: "腾讯代码分析(官方-代码分析工作组)" + uses: "CodeccCheckAtomDebug@4.*" + with: + beAutoLang: true + languages: + - "GOLANG" + checkerSetType: "communityOpenScan" + tools: + - "WOODPECKER_COMMITSCAN" + - "SCC" + - "PECKER_SECURITY" + - "SENSITIVE" + - "DUPC" + - "IP_CHECK" + - "WOODPECKER_SENSITIVE" + - "HORUSPY" + - "XCHECK" + - "CCN" + asyncTask: false + asyncTaskId: "" + scriptType: "SHELL" + script: |- + # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷 + # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh + # 确保build.sh能够编译代码 + # cd path/to/build.sh + # sh build.sh + languageRuleSetMap: {} + checkerSetEnvType: "prod" + multiPipelineMark: "" + rtxReceiverType: "1" + botWebhookUrl: "" + botRemindRange: "2" + botRemindSeverity: "7" + botRemaindTools: [] + emailReceiverType: "1" + emailCCReceiverList: [] + instantReportStatus: "2" + reportDate: [] + reportTime: "" + reportTools: [] + toolScanType: "1" + diffBranch: "" + byFile: false + mrCommentEnable: true + prohibitIgnore: false + newDefectJudgeFromDate: "" + transferAuthorList: [] + path: [] + customPath: [] + scanTestSource: false + openScanPrj: false + openScanFilterEnable: false + issueSystem: "TAPD" + issueSubSystem: "" + issueResolvers: [] + issueReceivers: [] + issueFindByVersion: "" + maxIssue: 1000 + issueAutoCommit: false + check-out: + gates: + - template: open_source_gate.yml + timeout-hours: 10 \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/.ci/templates/open_source_gate.yml b/dbm-services/mysql/db-tools/mysql-monitor/.ci/templates/open_source_gate.yml new file mode 100644 index 0000000000..5a7b8c0c13 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/.ci/templates/open_source_gate.yml @@ -0,0 +1,26 @@ +parameters: +- name: receivers + type: array + default: [ "${{ ci.actor }}" ] + +gates: +- name: open-source-gate + rule: + - "CodeccCheckAtomDebug.all_risk <= 0" + - "CodeccCheckAtomDebug.high_med_new_issue <= 0" + - "CodeccCheckAtomDebug.ccn_new_max_value <= 40" + - "CodeccCheckAtomDebug.sensitive_defect <= 0" + - "CodeccCheckAtomDebug.dupc_average <= 15" + - "CodeccCheckAtomDebug.ccn_average <= 3" + - "CodeccCheckAtomDebug.ccn_new_defect <= 0" + - "CodeccCheckAtomDebug.ccn_funcmax <= 20" + - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0" + - "CodeccCheckAtomDebug.horuspy_all_defect <= 0" + - "CodeccCheckAtomDebug.go_serious_defect <= 0" + - "CodeccCheckAtomDebug.go_all_defect <= 100" + notify-on-fail: + - type: wework-message + receivers: ${{ parameters.receivers }} + continue-on-fail: + gatekeepers: + - "${{ ci.actor }}" \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/.gitignore b/dbm-services/mysql/db-tools/mysql-monitor/.gitignore new file mode 100644 index 0000000000..cef0d0d82b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/.gitignore @@ -0,0 +1,4 @@ +build +.idea +logs +.DS_Store \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/Makefile b/dbm-services/mysql/db-tools/mysql-monitor/Makefile new file mode 100644 index 0000000000..fb28551f69 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/Makefile @@ -0,0 +1,30 @@ +PROJ="mysql-monitor" +MODULE="dbm-services/mysql/db-tools/mysql-monitor" +VERSION = $(error please set VERSION flag) +PKG = ${PROJ}.tar.gz +OUTPUT_DIR = build +RELEASE_BUILD_FLAG = "-X ${MODULE}/cmd.version=${VERSION} -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash=`git rev-parse HEAD` " +DEV_BUILD_FLAG = "-X ${MODULE}/cmd.version="develop" -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash="" " + + +.PHONY: release-bin +release-bin: + @CGO_ENABLE=0 GOARCH=amd64 GOOS=linux go build -ldflags ${RELEASE_BUILD_FLAG} -o ${OUTPUT_DIR}/${$PROJ} + @cp config.yaml.go.tpl ${OUTPUT_DIR}/config.yaml.go.tpl + @cp pt-config-diff ${OUTPUT_DIR}/pt-config-diff + @cp pt-summary ${OUTPUT_DIR}/pt-summary + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} mysql-monitor config.yaml.go.tpl pt-config-diff pt-summary + +.PHONY: dev-bin +dev-bin: + @go build -ldflags ${DEV_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ} + @cp config.yaml.go.tpl ${OUTPUT_DIR}/config.yaml.go.tpl + @cp pt-config-diff ${OUTPUT_DIR}/pt-config-diff + @cp pt-summary ${OUTPUT_DIR}/pt-summary + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} mysql-monitor config.yaml.go.tpl pt-config-diff pt-summary + +.PHONY: clean +clean: + @rm -rf $(OUTPUT_DIR) + + diff --git a/dbm-services/mysql/db-tools/mysql-monitor/README.md b/dbm-services/mysql/db-tools/mysql-monitor/README.md new file mode 100644 index 0000000000..d7919fdc3c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/README.md @@ -0,0 +1,186 @@ + + + + +# 使用 +* 必须先部署 `mysql-crond` +* 配置文件分为 _runtime_ 配置 和 监控项配置 +* _runtime_ 配置需要作为命令行参数传入, 如 `mysql-monitor -c runtime.yaml` +* 监控项配置在 _runtime_ 配置中指定 + +## _reschedule_ +* 部署监控后, 执行 `mysql-monitor -c runtime.yaml reschedule --staff who-r-u` 来注册 `mysql-crond entry` +* 任何时候修改了监控项配置都需要 `reschedule` + +## _clean_ +* 执行 `mysql-monitor -c runtime.yaml clean` 会删除所有相关的 `mysql-crond entry` + +## 硬编码项 +目前有两个硬编码项 +1. 执行心跳 +2. 连接 _DB_ 失败 + +* 如果监控项配置中缺少这两项, 会被自动添加 +* `reschedule` 后会自动注册 `mysql-crond entry` +* `schedule` 不可修改, 即使修改了也会忽略 +* `enable` 可以修改 +* 以 `hardcode-run` 子命令运行 + + +# 监控项配置 + +## 语义 +```yaml +- name: character_consistency + enable: true + machine_type: backend + role: ["master", "slave", "repeater"] + schedule: "1 1 13 * * 1" +- name: routine-definer + enable: true + machine_type: backend + role: ["master", "slave", "repeater"] +- name: master-slave-heartbeat + enable: true + machine_type: backend +``` + +```go +type MonitorItem struct { + Name string `yaml:"name" validate:"required"` + Enable *bool `yaml:"enable" validate:"required"` + Schedule *string `yaml:"schedule"` + MachineType string `yaml:"machine_type"` + Role []string `yaml:"role"` +} +``` + +* `name`: 监控项名称, 对应蓝鲸监控平台的事件 +* `enable`: 是否启用 +* `schedule`: 可选, 在 _runtime_ 配置中有默认值, 不建议修改 +* `machine_type`: 基于机器类型的过滤 +* `role`: 基于角色的过滤, 如未提供则对应机器类型的所有角色都可用 + +## 分组 +在注册 `mysql-crond entry` 时, 会按照 _schedule_ 把所有监控项分组注册 +比如下面这样子 +```json +{ + "entries": [ + { + "ID": 5, + "Job": { + "Name": "mysql-monitor-20000-@every 5m", + "Enable": true, + "Command": "/home/mysql/mysql-monitor/mysql-monitor", + "Args": [ + "run", + "--items", + "routine-definer,view-definer,trigger-definer,engine,ext3_check,master_slave_heartbeat", + "-c", + "/home/mysql/mysql-monitor/monitor-config_20000.yaml" + ], + "Schedule": "@every 5m", + "Creator": "xxx" + } + }, + { + "ID": 6, + "Job": { + "Name": "mysql-monitor-20000-hardcode", + "Enable": true, + "Command": "/home/mysql/mysql-monitor/mysql-monitor", + "Args": [ + "hardcode-run", + "--items", + "db-up,mysql-monitor-heart-beat", + "-c", + "/home/mysql/mysql-monitor/monitor-config_20000.yaml" + ], + "Schedule": "@every 5m", + "Creator": "xxx" + } + }, + { + "ID": 4, + "Job": { + "Name": "mysql-monitor-20000-1 1 13 * * 1", + "Enable": true, + "Command": "/home/mysql/mysql-monitor/mysql-monitor", + "Args": [ + "run", + "--items", + "character_consistency", + "-c", + "/home/mysql/mysql-monitor/monitor-config_20000.yaml" + ], + "Schedule": "1 1 13 * * 1", + "Creator": "xxx" + } + } + ] +} +``` + +# 开发 + +1. 在 `items_collect` 中添加监控项目录, 如 _some_new_item_ +2. 在 _some_new_item_ 中实现 `monitor_item_interface.MonitorItemInterface` +3. 同时还要提供 + * `func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface` + * `func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType)` +4. 在 `items_collect.init` 中注册新增的监控项 +5. 把新增监控项的相关配置已经添加到 _items-config.yaml_ 中 + +## _monitor_item_interface.ConnectionCollect_ + +```go +type ConnectionCollect struct { + MySqlDB *sqlx.DB + ProxyDB *sqlx.DB + ProxyAdminDB *sqlx.DB +} +``` +这是一个连接句柄的集合, _monitor_ 程序会按照 _runtime_config_ 中 _machine_type_ 的值初始化对应的成员 +* _backend_: 初始化 `MySQLDB` +* _proxy_: 初始化 `ProxyDB` 和 `ProxyAdminDB` , 其中 `ProxyAdminDB` 的连接端口为 `服务端口 + 1000` + +## _MonitorItemInterface::Run() (msg string, err error)_ + +* `msg != nil && err == nil` : 监控项顺利, 发现了需要上报的事件. +* `err!=nil`: 监控项执行异常 + +这两种情况都会生成上报的事件 + +# 监控项 + +|监控项|调度计划|机器类型| 实例角色 |级别|说明|自定义| +|-----|-----|-----|-----------------|-----|-----|-----| +|slave-status|@every 1m|backend| repeater, slave |致命|mysql replicate 同步状态|schedule, enable +|character-consistency|0 0 14 \* \* 1|backend| |预警|mysqld 字符集和 database 默认字符集一致性|schedule, enable +|ext3-check|0 0 16 \* \* 1|backend| | 致命 |文件系统为 ext3 是否有 1T+ 的数据文件|schedule, enable +|rotate-slowlog|0 55 23 \* \* \*|backend| | 无 |慢查询文件切换|schedule, enable +|master-slave-heartbeat|@every 10s|backend| | 无 |同步心跳|schedule, enable +|routine-definer|0 0 15 \* \* 1|backend| | 预警 |存储过程 definer 存在, 且 host 必须为 localhost|schedule, enable +|view-definer|0 0 15 \* \* 1|backend| | 预警 |视图 definer 存在, 且 host 必须为 localhost|schedule, enable +|trigger-definer|0 0 15 \* \* 1|backend| | 预警 |触发器 definer 存在, 且 host 必须为 localhost|schedule, enable +|engine|0 0 12 \* \* 1|backend| | 预警 | 引擎混用检查以及非系统表是否有 myisam 引擎|schedule, enable +|mysql-config-diff | @every 10m | backend | | 预警 |配置文件和运行时变量一致性|schedule, enable +|mysql-inject | @every 1m | backend | | 致命 |注入检查|schedule, enable +|mysql-lock | @every 1m | backend | | 致命 |锁等待|schedule, enable +|mysql-err-critical|@every 1m|backend| | 致命 |致命错误日志|schedule, enable +|mysql-err-notice|@every 1m|backend| | 预警 |预警错误日志|schedule, enable +|mysql-connlog-size|0 0 12 \* \* \*|backend| | 预警 |连接日志大于 4G 自动关闭记录连接日志|schedule, enable +|mysql-connlog-rotate|0 30 23 \* \* \*|backend| | 无 |切换连接日志表|schedule, enable +|mysql-connlog-report|0 40 23 \* \* \*|backend| | 无 |上报连接日志|schedule, enable +|proxy-user-list|0 0 14 * * 1|proxy| | 致命 |proxy 白名单配置文件和运行时一致性|schedule, enable +|proxy-backend|0 0 14 * * 1|proxy| | 致命 |proxy backend 配置文件和运行时一致性|schedule, enable +|db-up|@every 10s|backend, proxy| | 致命 |db 连通性. 硬编码, 不可配置, 无需录入配置系统|enable +|mysql_monitor_heart_beat|@every 10s|backend, proxy| | 无 |监控心跳. 硬编码, 不可配置, 无需录入配置系统|enable + + +## 重要 +* `character-consistency, ext3-check, *-definer, engine` 不要在 _spider_ 执行 +* `ext3-check` 没有意义 +* `engine` 单独检查也没有意义 +* 其他 _2_ 个要以中控的结果做全集群对比, 是外围建设工具 \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/cmd.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/cmd.go new file mode 100644 index 0000000000..5b729f1814 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/cmd.go @@ -0,0 +1,2 @@ +// Package cmd TODO +package cmd diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go new file mode 100644 index 0000000000..b9b096af45 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/init.go @@ -0,0 +1,75 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + "io" + "os" + "path/filepath" + + "golang.org/x/exp/slog" + "gopkg.in/natefinch/lumberjack.v2" +) + +var executable string +var executableName string +var executableDir string + +func init() { + executable, _ = os.Executable() + executableName = filepath.Base(executable) + executableDir = filepath.Dir(executable) +} + +func initLogger(cfg *config.LogConfig) { + var ioWriters []io.Writer + + if cfg.Console { + ioWriters = append(ioWriters, os.Stdout) + } + + if cfg.LogFileDir != nil { + if !filepath.IsAbs(*cfg.LogFileDir) { + *cfg.LogFileDir = filepath.Join(executableDir, *cfg.LogFileDir) + } + + err := os.MkdirAll(*cfg.LogFileDir, 0755) + if err != nil { + panic(err) + } + + // ToDo 修改目录宿主 + + logFile := filepath.Join(*cfg.LogFileDir, fmt.Sprintf("%s.log", executableName)) + _, err = os.Stat(logFile) + if err != nil { + if os.IsNotExist(err) { + _, err := os.Create(logFile) + if err != nil { + panic(err) + } + // ToDo 修改日志文件宿主 + } else { + panic(err) + } + } + + ioWriters = append(ioWriters, &lumberjack.Logger{Filename: logFile}) + } + + handleOpt := slog.HandlerOptions{AddSource: cfg.Source} + if cfg.Debug { + handleOpt.Level = slog.LevelDebug + } else { + handleOpt.Level = slog.LevelInfo + } + + var logger *slog.Logger + if cfg.Json { + logger = slog.New(handleOpt.NewJSONHandler(io.MultiWriter(ioWriters...))) + } else { + logger = slog.New(handleOpt.NewTextHandler(io.MultiWriter(ioWriters...))) + } + + slog.SetDefault(logger) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/root.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/root.go new file mode 100644 index 0000000000..6953d85079 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/root.go @@ -0,0 +1,27 @@ +package cmd + +import ( + "os" + + "github.com/spf13/cobra" + "golang.org/x/exp/slog" +) + +var rootCmd = &cobra.Command{ + Use: "mysql-monitor", + Short: "mysql-monitor", +} + +func init() { + // rootCmd.PersistentFlags().StringP("config", "c", "", "config file") + // _ = viper.BindPFlag("config", rootCmd.PersistentFlags().Lookup("config")) +} + +// Execute TODO +func Execute() { + err := rootCmd.Execute() + if err != nil { + slog.Error("start", err) + os.Exit(1) + } +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_clean.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_clean.go new file mode 100644 index 0000000000..0d9d1cb5be --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_clean.go @@ -0,0 +1,63 @@ +package cmd + +import ( + ma "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slog" +) + +var subCmdClean = &cobra.Command{ + Use: "clean", + Short: "clean all mysql-crond entry", + Long: "clean all mysql-crond entry", + RunE: func(cmd *cobra.Command, args []string) error { + err := config.InitConfig(viper.GetString("clean-config")) + if err != nil { + return err + } + initLogger(config.MonitorConfig.Log) + + manager := ma.NewManager(config.MonitorConfig.ApiUrl) + entries, err := manager.Entries() + if err != nil { + slog.Error("clean list entries", err) + return err + } + + for _, entry := range entries { + if strings.HasPrefix( + entry.Job.Name, + fmt.Sprintf("mysql-monitor-%d", config.MonitorConfig.Port), + ) { + eid, err := manager.Delete(entry.Job.Name, true) + if err != nil { + slog.Error( + "reschedule delete entry", err, + slog.String("name", entry.Job.Name), + ) + return err + } + slog.Info( + "reschedule delete entry", + slog.String("name", entry.Job.Name), + slog.Int("ID", eid), + ) + } + } + return nil + }, +} + +func init() { + subCmdClean.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdClean.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("clean-config", subCmdClean.PersistentFlags().Lookup("config")) + + rootCmd.AddCommand(subCmdClean) + +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_hardcode_run.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_hardcode_run.go new file mode 100644 index 0000000000..23ce0c7075 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_hardcode_run.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slog" +) + +var subCmdHardCodeRun = &cobra.Command{ + Use: "hardcode-run", + Short: "run hardcode monitor items", + Long: "run hardcode monitor items", + RunE: func(cmd *cobra.Command, args []string) error { + err := config.InitConfig(viper.GetString("hard-run-config")) + if err != nil { + return err + } + initLogger(config.MonitorConfig.Log) + + err = config.LoadMonitorItemsConfig() + if err != nil { + slog.Error("run hardcode monitor load items", err) + return err + } + + err = main_loop.Run(true) + if err != nil { + slog.Error("run monitor hardcode items", err) + return err + } + return nil + }, +} + +func init() { + subCmdHardCodeRun.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdHardCodeRun.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("hard-run-config", subCmdHardCodeRun.PersistentFlags().Lookup("config")) + + subCmdHardCodeRun.PersistentFlags().StringSliceP("items", "", nil, "run items") + _ = subCmdHardCodeRun.MarkPersistentFlagRequired("items") + _ = viper.BindPFlag("hardcode-items", subCmdHardCodeRun.PersistentFlags().Lookup("items")) + + rootCmd.AddCommand(subCmdHardCodeRun) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_list.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_list.go new file mode 100644 index 0000000000..1240d58776 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_list.go @@ -0,0 +1,23 @@ +package cmd + +// var subCmdList = &cobra.Command{ +// Use: "list", +// Short: "list registered monitor items", +// Long: "list registered monitor items", +// RunE: func(cmd *cobra.Command, args []string) error { +// err := config.InitConfig() +// if err != nil { +// return err +// } +// initLogger(config.MonitorConfig.Log) +// +// for k, _ := range items_collect.RegisteredItemConstructor() { +// fmt.Println(k) +// } +// return nil +// }, +// } +// +// func init() { +// rootCmd.AddCommand(subCmdList) +// } diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_reschedule.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_reschedule.go new file mode 100644 index 0000000000..88ddf5cfbe --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_reschedule.go @@ -0,0 +1,179 @@ +package cmd + +import ( + ma "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slog" +) + +var subCmdReschedule = &cobra.Command{ + Use: "reschedule", + Short: "reschedule mysql-crond entry", + Long: "reschedule mysql-crond entry", + RunE: func(cmd *cobra.Command, args []string) error { + /* + 就只有这个子命令需要这样把配置转换成绝对路径 + 因为注册到crond后cwd是其他目录了 + */ + configPath := viper.GetString("reschedule-config") + if !filepath.IsAbs(configPath) { + cwd, err := os.Getwd() + if err != nil { + slog.Error("reschedule get config abs path", err) + return err + } + configPath = filepath.Join(cwd, configPath) + } + configFileDir, configFileName := filepath.Split(configPath) + + err := config.InitConfig(configPath) + if err != nil { + return err + } + initLogger(config.MonitorConfig.Log) + + err = config.LoadMonitorItemsConfig() + if err != nil { + slog.Error("reschedule load items", err) + return err + } + + config.InjectHardCodeItem() + + err = config.WriteMonitorItemsBack() + if err != nil { + slog.Error("reschedule write back items", err) + return err + } + + manager := ma.NewManager(config.MonitorConfig.ApiUrl) + entries, err := manager.Entries() + if err != nil { + slog.Error("reschedule list entries", err) + return err + } + + for _, entry := range entries { + if strings.HasPrefix( + entry.Job.Name, + fmt.Sprintf("mysql-monitor-%d", config.MonitorConfig.Port), + ) { + eid, err := manager.Delete(entry.Job.Name, true) + if err != nil { + slog.Error( + "reschedule delete entry", err, + slog.String("name", entry.Job.Name), + ) + return err + } + slog.Info( + "reschedule delete entry", + slog.String("name", entry.Job.Name), + slog.Int("ID", eid), + ) + } + } + + var hardCodeItems []*config.MonitorItem + itemGroups := make(map[string][]*config.MonitorItem) + for _, ele := range config.ItemsConfig { + // 硬编码监控项先排除掉 + if ele.Name == "db-up" || ele.Name == config.HeartBeatName { + if ele.IsEnable() { + hardCodeItems = append(hardCodeItems, ele) + } + continue + } + + if ele.IsEnable() && ele.IsMatchMachineType() && ele.IsMatchRole() { + var key string + + if ele.Schedule == nil { + key = config.MonitorConfig.DefaultSchedule + } else { + key = *ele.Schedule + } + + if _, ok := itemGroups[key]; !ok { + itemGroups[key] = []*config.MonitorItem{} + } + itemGroups[key] = append(itemGroups[key], ele) + } + } + + for k, v := range itemGroups { + var itemNames []string + for _, j := range v { + itemNames = append(itemNames, j.Name) + } + args := []string{ + "run", + "--items", strings.Join(itemNames, ","), + "-c", configFileName, // use WorkDir + } + eid, err := manager.CreateOrReplace( + ma.JobDefine{ + Name: fmt.Sprintf("mysql-monitor-%d-%s", config.MonitorConfig.Port, k), + Command: executable, + Args: args, + Schedule: k, + Creator: viper.GetString("staff"), + Enable: true, + WorkDir: configFileDir, + }, true, + ) + if err != nil { + slog.Error("reschedule add entry", err) + return err + } + slog.Info("reschedule add entry", slog.Int("entry id", eid)) + } + + // 注册 hardcode + var itemNames []string + for _, j := range hardCodeItems { + itemNames = append(itemNames, j.Name) + } + args = []string{ + "hardcode-run", + "--items", strings.Join(itemNames, ","), + "-c", configPath, + } + eid, err := manager.CreateOrReplace( + ma.JobDefine{ + Name: fmt.Sprintf("mysql-monitor-%d-hardcode", config.MonitorConfig.Port), + Command: executable, + Args: args, + Schedule: config.HardCodeSchedule, + Creator: viper.GetString("staff"), + Enable: true, + }, true, + ) + if err != nil { + slog.Error("reschedule add hardcode entry", err) + return err + } + slog.Info("reschedule add hardcode entry", slog.Int("entry id", eid)) + + return nil + }, +} + +func init() { + subCmdReschedule.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdReschedule.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("reschedule-config", subCmdReschedule.PersistentFlags().Lookup("config")) + + subCmdReschedule.PersistentFlags().StringP("staff", "", "", "staff name") + _ = subCmdReschedule.MarkPersistentFlagRequired("staff") + _ = viper.BindPFlag("staff", subCmdReschedule.PersistentFlags().Lookup("staff")) + + rootCmd.AddCommand(subCmdReschedule) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_run.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_run.go new file mode 100644 index 0000000000..2d9c7b7e5e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_run.go @@ -0,0 +1,48 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slog" +) + +var subCmdRun = &cobra.Command{ + Use: "run", + Short: "run monitor items", + Long: "run monitor items", + RunE: func(cmd *cobra.Command, args []string) error { + err := config.InitConfig(viper.GetString("run-config")) + if err != nil { + return err + } + initLogger(config.MonitorConfig.Log) + + err = config.LoadMonitorItemsConfig() + if err != nil { + slog.Error("run monitor load items", err) + return err + } + + err = main_loop.Run(false) + if err != nil { + slog.Error("run monitor items", err) + return err + } + return nil + }, +} + +func init() { + subCmdRun.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdRun.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("run-config", subCmdRun.PersistentFlags().Lookup("config")) + + subCmdRun.PersistentFlags().StringSliceP("items", "", nil, "run items") + _ = subCmdRun.MarkPersistentFlagRequired("items") + _ = viper.BindPFlag("run-items", subCmdRun.PersistentFlags().Lookup("items")) + + rootCmd.AddCommand(subCmdRun) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_version.go b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_version.go new file mode 100644 index 0000000000..eeb516b6df --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/cmd/subcmd_version.go @@ -0,0 +1,42 @@ +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// versionCmd represents the version command +var subCmdVersion = &cobra.Command{ + Use: "version", + Short: "A brief description of your command", + Long: `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + Run: func(cmd *cobra.Command, args []string) { + printVersion() + }, +} +var version = "" +var buildStamp = "" +var gitHash = "" + +func init() { + rootCmd.AddCommand(subCmdVersion) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // versionCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // versionCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} +func printVersion() { + fmt.Printf("Version: %s, GitHash: %s, BuildAt: %s\n", version, gitHash, buildStamp) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/config.yaml b/dbm-services/mysql/db-tools/mysql-monitor/config.yaml new file mode 100644 index 0000000000..d866da2e87 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/config.yaml @@ -0,0 +1,20 @@ +ip: 127.0.0.1 +port: 20000 +immute_domain: aaa.bbb.ccc +machine_type: storage +role: master +bk_cloud_id: 1 +log: + console: true + log_file_dir: /Users/xfwduke/mysql-monitor/logs + debug: true + source: true + json: false +api_url: http://127.0.0.1:9999 +items_config_file: /Users/xfwduke/mysql-monitor/items-config.yaml +auth: + user: root + password: 123 +dba_sys_dbs: ["mysql", "test", "information_schema", "performance_schema", "db_infobase", "sys"] +interact_timeout: 2s +default_schedule: '@every 1m' \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/config.yaml.go.tpl b/dbm-services/mysql/db-tools/mysql-monitor/config.yaml.go.tpl new file mode 100644 index 0000000000..733de08750 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/config.yaml.go.tpl @@ -0,0 +1,20 @@ +ip: {{ .IP }} +port: {{ .Port }} +immute_domain: {{ .ImmuteDomain }} +machine_type: {{ .MachineType }} +role: {{ .Role }} +bk_cloud_id: {{ .BkCloudId }} +log: + console: true + log_file_dir: {{ .LogPath }} + debug: true + source: true + json: false +api_url: http://127.0.0.1:9999 +items_config_file: {{ .ItemsConfigPath }} +auth: + user: {{ .User }} + password: {{ .Password }} +dba_sys_dbs: {{ .DbaSysDbs }} +interact_timeout: 2s +default_schedule: '@every 1m' \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/go.mod b/dbm-services/mysql/db-tools/mysql-monitor/go.mod new file mode 100644 index 0000000000..1b5d503287 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/go.mod @@ -0,0 +1,43 @@ +module dbm-services/mysql/db-tools/mysql-monitor + +go 1.19 + +require ( + github.com/dlclark/regexp2 v1.8.1 + github.com/go-playground/validator/v10 v10.12.0 + github.com/go-sql-driver/mysql v1.7.1 + github.com/jmoiron/sqlx v1.3.5 + github.com/juju/ratelimit v1.0.2 + github.com/pkg/errors v0.9.1 + github.com/spf13/cobra v1.7.0 + github.com/spf13/viper v1.15.0 + golang.org/x/exp v0.0.0-20230418202329-0354be287a23 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/leodido/go-urn v1.2.3 // indirect + github.com/lib/pq v1.10.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-sqlite3 v1.14.16 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/dbm-services/mysql/db-tools/mysql-monitor/go.sum b/dbm-services/mysql/db-tools/mysql-monitor/go.sum new file mode 100644 index 0000000000..7c3fbe72df --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/go.sum @@ -0,0 +1,525 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dlclark/regexp2 v1.8.1 h1:6Lcdwya6GjPUNsBct8Lg/yRPwMhABj269AAzdGSiR+0= +github.com/dlclark/regexp2 v1.8.1/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= +github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= +github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= +github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/dbm-services/mysql/db-tools/mysql-monitor/items-config.tpl.yaml b/dbm-services/mysql/db-tools/mysql-monitor/items-config.tpl.yaml new file mode 100644 index 0000000000..33428239f9 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/items-config.tpl.yaml @@ -0,0 +1,92 @@ +- name: character-consistency + enable: true + schedule: 0 0 14 * * 1 + machine_type: backend + role: [] +- name: routine-definer + enable: true + schedule: 0 0 15 * * 1 + machine_type: backend + role: [] +- name: view-definer + enable: true + schedule: 0 0 15 * * 1 + machine_type: backend + role: [] +- name: trigger-definer + enable: true + schedule: 0 0 15 * * 1 + machine_type: backend + role: [] +- name: engine + enable: true + schedule: 0 0 12 * * * + machine_type: backend + role: [] +- name: ext3-check + enable: true + schedule: 0 0 16 * * 1 + machine_type: backend + role: [] +- name: slave-status + enable: true + schedule: "@every 1m" + machine_type: backend + role: + - slave + - repeater +- name: mysql-err-notice + enable: true + schedule: "@every 1m" + machine_type: backend + role: [] +- name: mysql-err-critical + enable: true + schedule: "@every 1m" + machine_type: backend + role: [] +- name: mysql-lock + enable: true + schedule: "@every 1m" + machine_type: backend + role: [] +- name: mysql-inject + enable: true + schedule: "@every 1m" + machine_type: backend + role: [] +- name: rotate-slowlog + enable: true + schedule: 0 55 23 * * * + machine_type: backend + role: [] +- name: mysql-connlog-size + enable: true + schedule: 0 0 12 * * * + machine_type: backend + role: [] +- name: mysql-connlog-rotate + enable: true + schedule: 0 30 23 * * * + machine_type: backend + role: [] +- name: mysql-connlog-report + enable: true + schedule: 0 40 23 * * * + machine_type: backend + role: [] +- name: mysql-config-diff + enable: true + schedule: "@every 1m" + machine_type: backend + role: [] +- name: db-up + enable: true + schedule: "@every 10s" + machine_type: backend + role: [] +- name: mysql_monitor_heart_beat + enable: true + schedule: "@every 10s" + machine_type: backend + role: [] \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml b/dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml new file mode 100644 index 0000000000..03d4b3233f --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/items-config.yaml @@ -0,0 +1,48 @@ +- name: character-consistency + enable: true + schedule: 0 0 14 * * 1 + machine_type: backend + role: [] +- name: routine-definer + enable: true + schedule: 0 0 15 * * 1 + machine_type: backend + role: + - master + - slave + - repeater +- name: view-definer + enable: true + schedule: 0 0 15 * * 1 + machine_type: backend + role: [] +- name: trigger-definer + enable: true + schedule: 0 0 15 * * 1 + machine_type: [backend, remote] + role: [] +- name: engine + enable: true + schedule: 0 0 12 * * * + machine_type: backend + role: [] +- name: ext3_check + enable: true + schedule: 0 0 16 * * 1 + machine_type: backend + role: [] +- name: master-slave-heartbeat + enable: true + schedule: null + machine_type: backend + role: [] +- name: db-up + enable: true + schedule: '@every 10s' + machine_type: storage + role: [] +- name: mysql-monitor-heart-beat + enable: true + schedule: '@every 10s' + machine_type: storage + role: [] diff --git a/dbm-services/mysql/db-tools/mysql-monitor/main.go b/dbm-services/mysql/db-tools/mysql-monitor/main.go new file mode 100644 index 0000000000..4d2b8e89c6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/main.go @@ -0,0 +1,7 @@ +package main + +import "dbm-services/mysql/db-tools/mysql-monitor/cmd" + +func main() { + cmd.Execute() +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/config.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/config.go new file mode 100644 index 0000000000..0579a3a16d --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/config.go @@ -0,0 +1,2 @@ +// Package config TODO +package config diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/init.go new file mode 100644 index 0000000000..2cd7acc33f --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/init.go @@ -0,0 +1,150 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/go-playground/validator/v10" + "golang.org/x/exp/slog" + "gopkg.in/yaml.v2" +) + +// HeartBeatName TODO +var HeartBeatName = "mysql_monitor_heart_beat" + +// MonitorConfig TODO +var MonitorConfig *monitorConfig + +// ItemsConfig TODO +var ItemsConfig []*MonitorItem + +// HardCodeSchedule TODO +var HardCodeSchedule = "@every 10s" + +// InitConfig TODO +func InitConfig(configPath string) error { + fmt.Printf("config flag: %s\n", configPath) + if !filepath.IsAbs(configPath) { + cwd, err := os.Getwd() + if err != nil { + slog.Error("init config", err) + return err + } + + configPath = filepath.Join(cwd, configPath) + } + fmt.Printf("config path: %s\n", configPath) + + content, err := os.ReadFile(configPath) + if err != nil { + slog.Error("init config", err) + return err + } + + MonitorConfig = &monitorConfig{} + err = yaml.UnmarshalStrict(content, MonitorConfig) + if err != nil { + slog.Error("init config", err) + return err + } + + validate := validator.New() + err = validate.Struct(MonitorConfig) + if err != nil { + slog.Error("validate monitor config", err) + return err + } + + return nil +} + +// LoadMonitorItemsConfig TODO +func LoadMonitorItemsConfig() error { + ItemsConfig = make([]*MonitorItem, 0) + + content, err := os.ReadFile(MonitorConfig.ItemsConfigFile) + if err != nil { + slog.Error("load monitor items config", err) + return err + } + + err = yaml.UnmarshalStrict(content, &ItemsConfig) + if err != nil { + slog.Error("unmarshal monitor items config", err) + return err + } + + validate := validator.New() + for _, ele := range ItemsConfig { + err := validate.Struct(ele) + if err != nil { + slog.Error("validate monitor items config", err) + return err + } + } + + return nil +} + +// InjectHardCodeItem TODO +func InjectHardCodeItem() { + enable := true + dbUpItem := &MonitorItem{ + Name: "db-up", + Enable: &enable, + Schedule: &HardCodeSchedule, // &MonitorConfig.DefaultSchedule, + MachineType: []string{MonitorConfig.MachineType}, + Role: nil, + } + heartBeatItem := &MonitorItem{ + Name: HeartBeatName, + Enable: &enable, + Schedule: &HardCodeSchedule, // &MonitorConfig.DefaultSchedule, + MachineType: []string{MonitorConfig.MachineType}, + Role: nil, + } + slog.Debug("load monitor item", slog.Any("items", ItemsConfig)) + + ItemsConfig = injectItem(dbUpItem, ItemsConfig) + slog.Debug("inject hardcode", slog.Any("items", ItemsConfig)) + + ItemsConfig = injectItem(heartBeatItem, ItemsConfig) + slog.Debug("inject hardcode", slog.Any("items", ItemsConfig)) +} + +func injectItem(item *MonitorItem, collect []*MonitorItem) (res []*MonitorItem) { + for i, ele := range collect { + if ele.Name == item.Name { + // 如果已经在配置文件, 保留 enable 配置, 其他覆盖为默认配置 + res = append(collect[:i], collect[i+1:]...) + item.Enable = ele.Enable + return append(res, item) + } + } + + return append(collect, item) +} + +// WriteMonitorItemsBack TODO +func WriteMonitorItemsBack() error { + // 注入硬编码监控项后回写items文件 + content, err := yaml.Marshal(ItemsConfig) + if err != nil { + slog.Error("marshal items config", err) + return err + } + + f, err := os.OpenFile(MonitorConfig.ItemsConfigFile, os.O_TRUNC|os.O_WRONLY, 0755) + if err != nil { + slog.Error("open items config file", err) + return err + } + + _, err = f.Write(content) + if err != nil { + slog.Error("write items config file", err) + return err + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/items_config.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/items_config.go new file mode 100644 index 0000000000..51e2269737 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/items_config.go @@ -0,0 +1,39 @@ +package config + +import "golang.org/x/exp/slices" + +// MonitorItem TODO +type MonitorItem struct { + Name string `yaml:"name" validate:"required"` + Enable *bool `yaml:"enable" validate:"required"` + Schedule *string `yaml:"schedule"` + MachineType []string `yaml:"machine_type"` + Role []string `yaml:"role"` +} + +// IsEnable TODO +func (c *MonitorItem) IsEnable() bool { + return c.Enable != nil && *c.Enable +} + +// IsMatchMachineType TODO +func (c *MonitorItem) IsMatchMachineType() bool { + return slices.Index(c.MachineType, MonitorConfig.MachineType) >= 0 +} + +// IsMatchRole TODO +func (c *MonitorItem) IsMatchRole() bool { + if MonitorConfig.Role == nil { + return true + } + + if c.Role == nil || len(c.Role) < 1 { + return true + } + + if *MonitorConfig.Role == "repeater" { + return true + } + + return slices.Index(c.Role, *MonitorConfig.Role) >= 0 +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/log_config.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/log_config.go new file mode 100644 index 0000000000..21db398b32 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/log_config.go @@ -0,0 +1,10 @@ +package config + +// LogConfig TODO +type LogConfig struct { + Console bool `yaml:"console"` + LogFileDir *string `yaml:"log_file_dir"` + Debug bool `yaml:"debug"` + Source bool `yaml:"source"` + Json bool `yaml:"json"` +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/monitor_config.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/monitor_config.go new file mode 100644 index 0000000000..2db7e02d43 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/config/monitor_config.go @@ -0,0 +1,35 @@ +package config + +import ( + "time" +) + +// ConnectAuth TODO +type ConnectAuth struct { + User string `yaml:"user" validate:"required"` + Password string `yaml:"password" validate:"required"` +} + +type authCollect struct { + Mysql *ConnectAuth `yaml:"mysql"` // spider, ctl 也是这一套 + Proxy *ConnectAuth `yaml:"proxy"` + ProxyAdmin *ConnectAuth `yaml:"proxy_admin"` +} + +type monitorConfig struct { + BkBizId int `yaml:"bk_biz_id"` + Ip string `yaml:"ip" validate:"required,ipv4"` + Port int `yaml:"port" validate:"required,gt=1024,lte=65535"` + BkInstanceId int64 `yaml:"bk_instance_id" validate:"required,gt=0"` + ImmuteDomain string `yaml:"immute_domain"` + MachineType string `yaml:"machine_type"` + Role *string `yaml:"role"` + BkCloudID *int `yaml:"bk_cloud_id" validate:"required,gte=0"` + Log *LogConfig `yaml:"log"` + ItemsConfigFile string `yaml:"items_config_file" validate:"required"` + ApiUrl string `yaml:"api_url" validate:"required"` + Auth authCollect `yaml:"auth"` + DBASysDbs []string `yaml:"dba_sys_dbs" validate:"required"` + InteractTimeout time.Duration `yaml:"interact_timeout" validate:"required"` + DefaultSchedule string `yaml:"default_schedule" validate:"required"` +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/const.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/const.go new file mode 100644 index 0000000000..a43b69b973 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/const.go @@ -0,0 +1,8 @@ +package cst + +const ( + // DBASchema TODO + DBASchema = "infodba_schema" + // DBAReportBase TODO + DBAReportBase = "/home/mysql/dbareport" +) diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/cst.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/cst.go new file mode 100644 index 0000000000..f558488b06 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst/cst.go @@ -0,0 +1,2 @@ +// Package cst TODO +package cst diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/character_consistency.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/character_consistency.go new file mode 100644 index 0000000000..1b232d6f4e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/character_consistency.go @@ -0,0 +1,2 @@ +// Package character_consistency TODO +package character_consistency diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/checker.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/checker.go new file mode 100644 index 0000000000..9164b9b174 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency/checker.go @@ -0,0 +1,71 @@ +package character_consistency + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +var name = "character-consistency" + +// Checker TODO +type Checker struct { + db *sqlx.DB +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var characterSetServer string + err = c.db.GetContext(ctx, &characterSetServer, `SELECT @@character_set_server`) + if err != nil { + return "", errors.Wrap(err, "get character_set_server") // ToDo 这里需要发告警么? + } + + q, args, err := sqlx.In( + `SELECT SCHEMA_NAME, DEFAULT_CHARACTER_SET_NAME + FROM INFORMATION_SCHEMA.SCHEMATA + WHERE DEFAULT_CHARACTER_SET_NAME <> ? AND SCHEMA_NAME NOT IN (?)`, + characterSetServer, + config.MonitorConfig.DBASysDbs, + ) + if err != nil { + return "", errors.Wrap(err, "build IN query db charset") + } + + var res []struct { + SchemaName string `db:"SCHEMA_NAME"` + SchemaCharset string `db:"DEFAULT_CHARACTER_SET_NAME"` + } + err = c.db.SelectContext(ctx, &res, c.db.Rebind(q), args...) + if err != nil { + return "", errors.Wrap(err, "query charset inconsistent dbs") + } + + if len(res) > 0 { + return fmt.Sprintf("%v charset inconsistent with server charset", res), nil + } else { + return "", nil + } +} + +// Name TODO +func (c *Checker) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{db: cc.MySqlDB} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/check_definer.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/check_definer.go new file mode 100644 index 0000000000..f6659140bf --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/check_definer.go @@ -0,0 +1,42 @@ +package definer + +import ( + "fmt" + "strings" + + "golang.org/x/exp/slices" + "golang.org/x/exp/slog" +) + +func checkDefiner(ownerFinger string, definer string) string { + slog.Debug( + "check definer", + slog.String("owner", ownerFinger), slog.String("definer", definer), + ) + + splitDefiner := strings.Split(definer, `@`) + definerUserName := splitDefiner[0] + definerHost := splitDefiner[1] + + var msgSlice []string + if slices.Index(mysqlUsers, definerUserName) < 0 { + msgSlice = append( + msgSlice, + fmt.Sprintf("username %s not exists", definerUserName), + ) + } + if definerHost != "localhost" { + msgSlice = append( + msgSlice, + fmt.Sprintf("host %s not localhost", definerHost), + ) + } + if len(msgSlice) > 0 { + return fmt.Sprintf( + "%s definer %s", + ownerFinger, + strings.Join(msgSlice, ","), + ) + } + return "" +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/definer.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/definer.go new file mode 100644 index 0000000000..12667b06b6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/definer.go @@ -0,0 +1,2 @@ +// Package definer TODO +package definer diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/init.go new file mode 100644 index 0000000000..83fe3c7047 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/init.go @@ -0,0 +1,99 @@ +package definer + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +var nameRoutine = "routine-definer" +var nameView = "view-definer" +var nameTrigger = "trigger-definer" + +var mysqlUsers []string +var snapped bool + +func init() { + snapped = false +} + +// Checker TODO +type Checker struct { + db *sqlx.DB + name string + f func(*sqlx.DB) ([]string, error) +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + err = snapshot(c.db) + if err != nil { + return "", err + } + + msgSlice, err := c.f(c.db) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("run %s", c.name)) + } + + return strings.Join(msgSlice, ". "), nil +} + +// Name TODO +func (c *Checker) Name() string { + return c.name +} + +// NewCheckRoutineDefiner TODO +func NewCheckRoutineDefiner(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameRoutine, + f: routines, + } +} + +// NewCheckViewDefiner TODO +func NewCheckViewDefiner(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameView, + f: views, + } +} + +// NewCheckTriggerDefiner TODO +func NewCheckTriggerDefiner(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameTrigger, + f: triggers, + } +} + +// RegisterCheckRoutineDefiner TODO +func RegisterCheckRoutineDefiner() ( + string, + monitor_item_interface.MonitorItemConstructorFuncType, +) { + return nameRoutine, NewCheckRoutineDefiner +} + +// RegisterCheckViewDefiner TODO +func RegisterCheckViewDefiner() ( + string, + monitor_item_interface.MonitorItemConstructorFuncType, +) { + return nameView, NewCheckViewDefiner +} + +// RegisterCheckTriggerDefiner TODO +func RegisterCheckTriggerDefiner() ( + string, + monitor_item_interface.MonitorItemConstructorFuncType, +) { + return nameTrigger, NewCheckTriggerDefiner +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/routine.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/routine.go new file mode 100644 index 0000000000..58e6ef5461 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/routine.go @@ -0,0 +1,49 @@ +package definer + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +func routines(db *sqlx.DB) (msg []string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + q, args, err := sqlx.In( + `SELECT ROUTINE_TYPE, ROUTINE_NAME, ROUTINE_SCHEMA, DEFINER + FROM information_schema.ROUTINES + WHERE ROUTINE_SCHEMA NOT IN (?)`, + config.MonitorConfig.DBASysDbs, + ) + if err != nil { + return nil, errors.Wrap(err, "build In query routine") + } + + var res []struct { + RoutineType string `db:"ROUTINE_TYPE"` + RoutineName string `db:"ROUTINE_NAME"` + RoutineSchema string `db:"ROUTINE_SCHEMA"` + Definer string `db:"DEFINER"` + } + err = db.SelectContext(ctx, &res, db.Rebind(q), args...) + if err != nil { + return nil, errors.Wrap(err, "query routines") + } + slog.Debug("query routines", slog.Any("routines", res)) + + for _, ele := range res { + owner := fmt.Sprintf( + "%s %s.%s", + ele.RoutineType, ele.RoutineSchema, ele.RoutineName, + ) + if r := checkDefiner(owner, ele.Definer); r != "" { + msg = append(msg, r) + } + } + return msg, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/trigger.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/trigger.go new file mode 100644 index 0000000000..7a483dd21b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/trigger.go @@ -0,0 +1,48 @@ +package definer + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +func triggers(db *sqlx.DB) (msg []string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + q, args, err := sqlx.In( + `SELECT TRIGGER_NAME, TRIGGER_SCHEMA, DEFINER + FROM information_schema.TRIGGERS + WHERE TRIGGER_SCHEMA NOT IN (?)`, + config.MonitorConfig.DBASysDbs, + ) + if err != nil { + return nil, errors.Wrap(err, "build In query trigger") + } + + var res []struct { + TriggerName string `db:"TRIGGER_NAME"` + TriggerSchema string `db:"TRIGGER_SCHEMA"` + Definer string `db:"DEFINER"` + } + err = db.SelectContext(ctx, &res, db.Rebind(q), args...) + if err != nil { + return nil, errors.Wrap(err, "query triggers") + } + slog.Debug("query triggers", slog.Any("triggers", res)) + + for _, ele := range res { + owner := fmt.Sprintf( + "trigger %s.%s", + ele.TriggerSchema, ele.TriggerName, + ) + if r := checkDefiner(owner, ele.Definer); r != "" { + msg = append(msg, r) + } + } + return msg, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/user_list_snap_shot.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/user_list_snap_shot.go new file mode 100644 index 0000000000..c16b1b51d8 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/user_list_snap_shot.go @@ -0,0 +1,25 @@ +package definer + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +func snapshot(db *sqlx.DB) error { + if !snapped { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + err := db.SelectContext(ctx, &mysqlUsers, `SELECT user FROM mysql.user`) + if err != nil { + slog.Error("query users", err) + return err + } + slog.Debug("query users", slog.Any("users", mysqlUsers)) + snapped = true + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/view.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/view.go new file mode 100644 index 0000000000..2ff3e6266e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer/view.go @@ -0,0 +1,48 @@ +package definer + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +func views(db *sqlx.DB) (msg []string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + q, args, err := sqlx.In( + `SELECT TABLE_NAME, TABLE_SCHEMA, DEFINER + FROM information_schema.VIEWS + WHERE TABLE_SCHEMA NOT IN (?)`, + config.MonitorConfig.DBASysDbs, + ) + if err != nil { + return nil, errors.Wrap(err, "build In query view") + } + + var res []struct { + ViewName string `db:"TABLE_NAME"` + ViewSchema string `db:"TABLE_SCHEMA"` + Definer string `db:"DEFINER"` + } + err = db.SelectContext(ctx, &res, db.Rebind(q), args...) + if err != nil { + return nil, errors.Wrap(err, "query views") + } + slog.Debug("query views", slog.Any("views", res)) + + for _, ele := range res { + owner := fmt.Sprintf( + "view %s.%s", + ele.ViewSchema, ele.ViewName, + ) + if r := checkDefiner(owner, ele.Definer); r != "" { + msg = append(msg, r) + } + } + return msg, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/engine.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/engine.go new file mode 100644 index 0000000000..91201ba7e9 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/engine.go @@ -0,0 +1,2 @@ +// Package engine TODO +package engine diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/hyper_engine.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/hyper_engine.go new file mode 100644 index 0000000000..37c1da793e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/hyper_engine.go @@ -0,0 +1,17 @@ +package engine + +import "strings" + +func (c *Checker) hyperEngine() map[string]int { + engineCount := make(map[string]int) + for _, ele := range c.infos { + engine := strings.ToLower(ele.Engine) + if !strings.HasPrefix(engine, "myisam") { + if _, ok := engineCount[engine]; !ok { + engineCount[engine] = 0 + } + engineCount[engine] += 1 + } + } + return engineCount +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/init.go new file mode 100644 index 0000000000..bf64e09ddb --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/init.go @@ -0,0 +1,86 @@ +package engine + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +var name = "engine" + +type tableEngineInfo struct { + TableSchema string `db:"TABLE_SCHEMA"` + TableName string `db:"TABLE_NAME"` + Engine string `db:"ENGINE"` +} + +// Checker TODO +type Checker struct { + db *sqlx.DB + infos []tableEngineInfo +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + q, args, err := sqlx.In( + `SELECT TABLE_SCHEMA, TABLE_NAME, ENGINE + FROM information_schema.TABLES + WHERE TABLE_SCHEMA NOT IN (?) + AND + TABLE_TYPE = ?`, + config.MonitorConfig.DBASysDbs, + "BASE TABLE", + ) + if err != nil { + return "", errors.Wrap(err, "build IN query table engine") + } + + var infos []tableEngineInfo + err = c.db.SelectContext(ctx, &infos, c.db.Rebind(q), args...) + if err != nil { + return "", errors.Wrap(err, "query table engine") + } + c.infos = infos + + myisamTables := c.myisam() + if len(myisamTables) > 0 { + msg = fmt.Sprintf("%d myisam-like talbe found", len(myisamTables)) + } + + engineCountMap := c.hyperEngine() + var engineCountSlice []string + for k, v := range engineCountMap { + engineCountSlice = append(engineCountSlice, fmt.Sprintf("%d %s tables", v, k)) + } + if len(engineCountSlice) > 1 { + msg = fmt.Sprintf( + "%s. hyper engine found: %s", + msg, strings.Join(engineCountSlice, ","), + ) + } + + return msg, nil +} + +// Name TODO +func (c *Checker) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{db: cc.MySqlDB} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/myisam.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/myisam.go new file mode 100644 index 0000000000..13f593247a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine/myisam.go @@ -0,0 +1,19 @@ +package engine + +import ( + "fmt" + "strings" +) + +func (c *Checker) myisam() (tables []string) { + var myisamTables []string + for _, ele := range c.infos { + if strings.HasPrefix(strings.ToLower(ele.Engine), "myisam") { + myisamTables = append( + myisamTables, + fmt.Sprintf("%s.%s", ele.TableSchema, ele.TableName), + ) + } + } + return myisamTables +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/ext3_check.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/ext3_check.go new file mode 100644 index 0000000000..9305ba8099 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/ext3_check.go @@ -0,0 +1,2 @@ +// Package ext3_check TODO +package ext3_check diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/filter_dir_fs.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/filter_dir_fs.go new file mode 100644 index 0000000000..b66105912e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/filter_dir_fs.go @@ -0,0 +1,54 @@ +package ext3_check + +import ( + "bufio" + "bytes" + "os/exec" + "regexp" + "strings" + + "github.com/pkg/errors" + "golang.org/x/exp/slices" +) + +func filterDirFs(dirs []string, filterFs ...string) (ftDirs []string, err error) { + splitR := regexp.MustCompile(`\s+`) + + for _, dir := range dirs { + var stdout, stderr bytes.Buffer + cmd := exec.Command("df", "-P", "-T", dir) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err = cmd.Run() + if err != nil { + return nil, errors.Wrapf(err, "df -P %s: %s", dir, stderr.String()) + } + + var lines []string + scanner := bufio.NewScanner(strings.NewReader(stdout.String())) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + err := scanner.Err() + if err != nil { + return nil, errors.Wrap(err, "scan failed") + } + } + + if len(lines) != 2 { + err = errors.Errorf("parse df result failed: %s", stdout.String()) + return nil, err + } + + splitLine := splitR.Split(lines[1], -1) + if len(splitLine) != 7 { + err = errors.Errorf("unexpect df output line: %s", lines[1]) + return nil, err + } + + if slices.Index(filterFs, splitLine[1]) >= 0 { + ftDirs = append(ftDirs, dir) + } + } + + return ftDirs, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/find_huge_file.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/find_huge_file.go new file mode 100644 index 0000000000..eadd4212c4 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/find_huge_file.go @@ -0,0 +1,32 @@ +package ext3_check + +import ( + "io/fs" + "os" + "path/filepath" +) + +func findHugeFile(dirs []string, threshold int64) (files []string, err error) { + for _, dir := range dirs { + err = filepath.WalkDir( + dir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return filepath.SkipDir + } + + st, sterr := os.Stat(path) + if sterr != nil { + return filepath.SkipDir + } + if !d.IsDir() && st.Size() >= threshold { + files = append(files, path) + } + return nil + }, + ) + if err != nil { + return nil, err + } + } + return files, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/init.go new file mode 100644 index 0000000000..898e21fdac --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/init.go @@ -0,0 +1,77 @@ +package ext3_check + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +/* +1. 先拿到 mysql 的 日志目录 和 数据目录 +2. 看看是不是 ext3 +3. 如果是就找有没有接近 2T 的文件 +4. 是不是还要检查下文件的 +e 属性呢? +5. 是不是还要检查磁盘的 huge file 属性呢? +*/ + +var name = "ext3-check" + +var mysqlDirVariables []string +var hugeSize int64 + +func init() { + mysqlDirVariables = []string{ + "datadir", + "innodb_data_home_dir", + "slow_query_log_file", + "tmpdir", + } + + hugeSize = 1024 * 1024 * 1024 * 1024 // 1T +} + +type ext3Check struct { + db *sqlx.DB +} + +// Run TODO +func (e *ext3Check) Run() (msg string, err error) { + dirs, err := mysqlDirs(e.db, mysqlDirVariables) + if err != nil { + return "", errors.Wrap(err, "get mysql variable dirs") + } + + ftDirs, err := filterDirFs(uniqueDirs(dirs), "ext3") + if err != nil { + return "", errors.Wrap(err, "filter dirs by fs") + } + + hugeFiles, err := findHugeFile(ftDirs, hugeSize) + if err != nil { + return "", errors.Wrap(err, "find huge file") + } + + if len(hugeFiles) > 0 { + return fmt.Sprintf("ext3 FS huge file found: %s", strings.Join(hugeFiles, ",")), nil + } else { + return "", nil + } +} + +// Name TODO +func (e *ext3Check) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &ext3Check{db: cc.MySqlDB} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/query_mysql_dirs.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/query_mysql_dirs.go new file mode 100644 index 0000000000..972a438b19 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/query_mysql_dirs.go @@ -0,0 +1,64 @@ +package ext3_check + +import ( + "context" + "database/sql" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + "path/filepath" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +func mysqlDirs(db *sqlx.DB, variables []string) (dirs []string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var datadir string + + for _, v := range variables { + var dir sql.NullString + err = db.GetContext(ctx, &dir, fmt.Sprintf(`SELECT @@%s`, v)) + if err != nil && err != sql.ErrNoRows { + return nil, errors.Wrap(err, fmt.Sprintf(`SELECT @@%s`, v)) + } + + // mysql其他的目录可能是以 datadir 为 base, 所以要单独存一下 + if dir.Valid { + dirs = append(dirs, dir.String) + if v == "datadir" { + datadir = dir.String + } + } + } + + var binlogBase sql.NullString + err = db.GetContext(ctx, &binlogBase, `SELECT @@log_bin_basename`) + if err != nil && err != sql.ErrNoRows { + return nil, errors.Wrap(err, `SELECT @@log_bin_basename`) + } + + if binlogBase.Valid { + dirs = append(dirs, filepath.Dir(binlogBase.String)) + } + + var relaylogBase sql.NullString + err = db.GetContext(ctx, &relaylogBase, `SELECT @@relay_log_basename`) + if err != nil && err != sql.ErrNoRows { + return nil, errors.Wrap(err, `SELECT @@relay_log_basename`) + } + + if relaylogBase.Valid { + // fmt.Printf("relay-log: %s\n", filepath.Dir(relaylogBase.String)) + dirs = append(dirs, filepath.Dir(relaylogBase.String)) + } + + for i, dir := range dirs { + if !filepath.IsAbs(dir) { + dirs[i] = filepath.Join(datadir, dir) + } + } + + return dirs, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/unique_dirs.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/unique_dirs.go new file mode 100644 index 0000000000..6cca141866 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check/unique_dirs.go @@ -0,0 +1,44 @@ +package ext3_check + +import "strings" + +// uniqueDirs TODO +/* +在某些情况下, mysql 的工作目录可能是这样子 +datadir: /data/mysqldata/data +tmpdir: /data/mysqldata/data/tmp +如果不做去重, tmp会被扫描两遍 +*/ +func uniqueDirs(dirs []string) []string { + if len(dirs) <= 1 { + return dirs + } + + for i, d := range dirs { + for j, ld := range dirs { + if i == j { + continue + } + + if strings.HasPrefix(d, ld) { // ld is base, replace d with ld + dirs[i] = ld + break + } + if strings.HasPrefix(ld, d) { // d is base, replace ld with d + dirs[j] = d + continue + } + } + } + um := make(map[string]int) + for _, d := range dirs { + um[d] = 1 + } + + var res []string + for k := range um { + res = append(res, k) + } + + return res +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/collect_result.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/collect_result.go new file mode 100644 index 0000000000..e8396e265c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/collect_result.go @@ -0,0 +1,60 @@ +package ibd_statistic + +import ( + "io/fs" + "os" + "path/filepath" + "strings" + + "golang.org/x/exp/slog" +) + +func collectResult(dataDir string) (map[string]map[string]int64, error) { + result := make(map[string]map[string]int64) + + err := filepath.WalkDir( + dataDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return fs.SkipDir + } + + if !d.IsDir() && strings.ToLower(filepath.Ext(d.Name())) == ibdExt { + dir := filepath.Dir(path) + dbName := filepath.Base(dir) + + var tableName string + + match := partitionPattern.FindStringSubmatch(d.Name()) + + if match == nil { + tableName = strings.TrimSuffix(d.Name(), ibdExt) + } else { + tableName = match[1] + } + + st, err := os.Stat(path) + if err != nil { + slog.Error("ibd-statistic collect result", err) + return err + } + + if _, ok := result[dbName]; !ok { + result[dbName] = make(map[string]int64) + } + if _, ok := result[dbName][tableName]; !ok { + result[dbName][tableName] = 0 + } + + result[dbName][tableName] += st.Size() + } + return nil + }, + ) + + if err != nil { + slog.Error("ibd-statistic collect result", err) + return nil, err + } + + return result, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/ibd_statistic.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/ibd_statistic.go new file mode 100644 index 0000000000..9091f7cb76 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/ibd_statistic.go @@ -0,0 +1,2 @@ +// Package ibd_statistic TODO +package ibd_statistic diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/init.go new file mode 100644 index 0000000000..0eb63778f4 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/init.go @@ -0,0 +1,84 @@ +package ibd_statistic + +import ( + "context" + "database/sql" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "regexp" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +/* +以扫描磁盘文件的方式统计 innodb 库表大小 +本来计划同时实现 .frm 和 .par 文件丢失的告警 +但是在 8.0 里面已经没有这两个文件了 +所以就只做一个单纯统计表大小的功能 +虽然都是磁盘文件扫描, 但还是没办法和 ext3_check 整合 +因为不太好把文件信息缓存下来共享使用, 可能会比较大 +同时经过实际测试, 50w 表的统计耗时 2s, 所以独立扫描一次问题应该也不大 +*/ + +var name = "ibd-statistic" + +var ibdExt string +var partitionPattern *regexp.Regexp + +func init() { + ibdExt = ".ibd" + partitionPattern = regexp.MustCompile(`^(.*)#P#.*\.ibd`) + +} + +type ibdStatistic struct { + db *sqlx.DB +} + +// Run TODO +func (c *ibdStatistic) Run() (msg string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var dataDir sql.NullString + err = c.db.GetContext(ctx, &dataDir, `SELECT @@datadir`) + if err != nil { + slog.Error("ibd-statistic", err) + return "", err + } + + if !dataDir.Valid { + err := errors.Errorf("invalid datadir: '%s'", dataDir.String) + slog.Error("ibd-statistic", err) + return "", err + } + + result, err := collectResult(dataDir.String) + if err != nil { + return "", err + } + + err = reportMetrics(result) + if err != nil { + return "", err + } + + return "", nil +} + +// Name TODO +func (c *ibdStatistic) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &ibdStatistic{db: cc.MySqlDB} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/report_metrics.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/report_metrics.go new file mode 100644 index 0000000000..c94ca68bf8 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic/report_metrics.go @@ -0,0 +1,66 @@ +package ibd_statistic + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/utils" + "regexp" + + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +var tableSizeMetricName string +var dbSizeMetricName string + +var tendbClusterDbNamePattern *regexp.Regexp + +func init() { + tableSizeMetricName = "mysql_table_size" + dbSizeMetricName = "mysql_database_size" + + tendbClusterDbNamePattern = regexp.MustCompile(`^(.*)_[0-9]+$`) +} + +func reportMetrics(result map[string]map[string]int64) error { + for dbName, dbInfo := range result { + var dbSize int64 + originalDbName := dbName + + // 根据 dbm 枚举约定, remote 是 tendbcluster 的存储机器类型 + if config.MonitorConfig.MachineType == "remote" { + match := tendbClusterDbNamePattern.FindStringSubmatch(dbName) + if match == nil { + err := errors.Errorf( + "invalid dbname: '%s' on %s", + dbName, config.MonitorConfig.MachineType, + ) + slog.Error("ibd-statistic report", err) + return err + } + dbName = match[1] + } + + for tableName, tableSize := range dbInfo { + utils.SendMonitorMetrics( + tableSizeMetricName, + tableSize, + map[string]interface{}{ + "table_name": tableName, + "database_name": dbName, + "original_database_name": originalDbName, + }, + ) + + dbSize += tableSize + } + utils.SendMonitorMetrics( + dbSizeMetricName, + dbSize, + map[string]interface{}{ + "database_name": dbName, + "original_database_name": originalDbName, + }, + ) + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/init.go new file mode 100644 index 0000000000..d35ee8372d --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/init.go @@ -0,0 +1,73 @@ +package items_collect + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/character_consistency" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/definer" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/engine" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ext3_check" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/ibd_statistic" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_config_diff" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_backend" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_user_list" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status" + mi "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + + "golang.org/x/exp/slog" +) + +// ToDo 加点说明字符串似乎能让list命令更好用 +var registeredItemConstructor map[string]func(*mi.ConnectionCollect) mi.MonitorItemInterface + +func registerItemConstructor( + name string, f func(*mi.ConnectionCollect) mi.MonitorItemInterface, +) error { + if _, ok := registeredItemConstructor[name]; ok { + err := fmt.Errorf("%s already registered", name) + slog.Error("register item creator", err) + return err + } + registeredItemConstructor[name] = f + return nil +} + +// RegisteredItemConstructor TODO +func RegisteredItemConstructor() map[string]func(*mi.ConnectionCollect) mi.MonitorItemInterface { + return registeredItemConstructor +} + +func init() { + registeredItemConstructor = make(map[string]func(*mi.ConnectionCollect) mi.MonitorItemInterface) + /* + 注册监控项 + */ + _ = registerItemConstructor(character_consistency.Register()) + _ = registerItemConstructor(definer.RegisterCheckTriggerDefiner()) + _ = registerItemConstructor(definer.RegisterCheckViewDefiner()) + _ = registerItemConstructor(definer.RegisterCheckRoutineDefiner()) + _ = registerItemConstructor(engine.Register()) + _ = registerItemConstructor(ext3_check.Register()) + _ = registerItemConstructor(master_slave_heartbeat.Register()) + _ = registerItemConstructor(slave_status.RegisterSlaveStatusChecker()) + _ = registerItemConstructor(mysql_errlog.RegisterMySQLErrNotice()) + _ = registerItemConstructor(mysql_errlog.RegisterMySQLErrCritical()) + _ = registerItemConstructor(mysql_errlog.RegisterSpiderErrNotice()) + _ = registerItemConstructor(mysql_errlog.RegisterSpiderErrWarn()) + _ = registerItemConstructor(mysql_errlog.RegisterSpiderErrCritical()) + _ = registerItemConstructor(mysql_processlist.RegisterMySQLLock()) + _ = registerItemConstructor(mysql_processlist.RegisterMySQLInject()) + _ = registerItemConstructor(rotate_slowlog.RegisterRotateSlowLog()) + _ = registerItemConstructor(mysql_connlog.RegisterMySQLConnLogSize()) + _ = registerItemConstructor(mysql_connlog.RegisterMySQLConnLogRotate()) + _ = registerItemConstructor(mysql_connlog.RegisterMySQLConnLogReport()) + _ = registerItemConstructor(mysql_config_diff.Register()) + _ = registerItemConstructor(proxy_user_list.Register()) + _ = registerItemConstructor(proxy_backend.Register()) + _ = registerItemConstructor(ibd_statistic.Register()) + _ = registerItemConstructor(slave_status.RegisterCtlReplicateChecker()) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/items_collect.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/items_collect.go new file mode 100644 index 0000000000..dca3b3280e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/items_collect.go @@ -0,0 +1,2 @@ +// Package items_collect TODO +package items_collect diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/heartbeat.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/heartbeat.go new file mode 100644 index 0000000000..3fe7507813 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/heartbeat.go @@ -0,0 +1,152 @@ +package master_slave_heartbeat + +import ( + "context" + "database/sql" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + + "github.com/go-sql-driver/mysql" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +var ( + name = "master-slave-heartbeat" + checkTable = "master_slave_heartbeat" +) + +// Checker TODO +type Checker struct { + db *sqlx.DB + heartBeatTable string +} + +func (c *Checker) updateHeartbeat() error { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + masterServerId := "" + binlogFormatOld := "" + err := c.db.QueryRow("select @@server_id, @@binlog_format"). + Scan(&masterServerId, &binlogFormatOld) + if err != nil { + slog.Error("master-slave-heartbeat query server_id, binlog_format", err) + return err + } + slog.Debug( + "master-slave-heartbeat", + slog.String("server_id", masterServerId), + slog.String("binlog_format", binlogFormatOld), + ) + + // will set session variables, so get a connection from pool + conn, err := c.db.DB.Conn(context.Background()) + if err != nil { + slog.Error("master-slave-heartbeat get conn from db", err) + return err + } + defer func() { + _ = conn.Close() + }() + + binlogSQL := "SET SESSION binlog_format='STATEMENT'" + updateSQL := fmt.Sprintf( + `UPDATE %s + SET master_time=now(),slave_time=sysdate(),delay_sec=timestampdiff(SECOND, now(),sysdate()) + WHERE slave_server_id=@@server_id and master_server_id= '%s'`, + c.heartBeatTable, masterServerId) + insertSQL := fmt.Sprintf( + `REPLACE INTO %s(master_server_id, slave_server_id, master_time, slave_time, delay_sec) + VALUES('%s', @@server_id, now(), sysdate(), timestampdiff(SECOND, now(),sysdate()))`, + c.heartBeatTable, masterServerId) + + if _, err := conn.ExecContext(ctx, binlogSQL); err != nil { + err := errors.WithMessage(err, "update heartbeat need binlog_format=STATEMENT") + slog.Error("master-slave-heartbeat", err) + return err + } + + res, err := conn.ExecContext(ctx, updateSQL) + if err != nil { + if merr, ok := err.(*mysql.MySQLError); ok { + if merr.Number == 1146 || merr.Number == 1054 { + slog.Debug("master-slave-heartbeat table not found") // ERROR 1054 (42S22): Unknown colum + res, err = c.initTableHeartbeat() + if err != nil { + slog.Error("master-slave-heartbeat init table", err) + return err + } + slog.Debug("master-slave-heartbeat init table success") + } + } else { + slog.Error("master-slave-heart beat", err) + return err + } + } + + num, _ := res.RowsAffected() + slog.Debug("master-slave-heartbeat", slog.String("update rows", name)) + if num == 0 { + if _, err = conn.ExecContext(ctx, insertSQL); err != nil { + slog.Error("master-slave-heartbeat insert", err) + return err + } + slog.Debug("master-slave-heartbeat insert success") + } + /* + // 正常只在 slave 上才需要 update slave beat_sec,但 repeater 也需要更新,所以可以直接忽略角色 + updateSlave := fmt.Sprintf( + `UPDATE %s + SET beat_sec = timestampdiff(SECOND, master_time, now()) WHERE slave_server_id=@@server_id and master_server_id='%s'`, + c.heartBeatTable, masterServerId) + if _, err := conn.ExecContext(ctx, updateSlave); err != nil { + slog.Error("master-slave-heartbeat update slave", err) + return err + } + */ + slog.Debug("master-slave-heartbeat update slave success") + return nil +} + +func (c *Checker) initTableHeartbeat() (sql.Result, error) { + dropTable := fmt.Sprintf("DROP TABLE IF EXISTS %s", c.heartBeatTable) + _, _ = c.db.Exec(dropTable) // we do not care if table drop success, but care if table create success or not + createTable := fmt.Sprintf( + `CREATE TABLE IF NOT EXISTS %s ( + master_server_id varchar(40) COMMENT 'server_id that run this update', + slave_server_id varchar(40) COMMENT 'slave server_id', + master_time varchar(32) COMMENT 'the time on master', + slave_time varchar(32) COMMENT 'the time on slave', + delay_sec int DEFAULT 0 COMMENT 'the slave delay to master', + PRIMARY KEY (master_server_id) + ) ENGINE=InnoDB`, + c.heartBeatTable, + ) + // beat_sec int DEFAULT 0 COMMENT 'the beat since master heartbeat:timestampdiff(SECOND, master_time, now())', + return c.db.Exec(createTable) +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + err = c.updateHeartbeat() + return "", err +} + +// Name TODO +func (c *Checker) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{db: cc.MySqlDB, heartBeatTable: fmt.Sprintf("%s.%s", cst.DBASchema, checkTable)} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/master_slave_heartbeat.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/master_slave_heartbeat.go new file mode 100644 index 0000000000..6bffa6f89c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/master_slave_heartbeat/master_slave_heartbeat.go @@ -0,0 +1,2 @@ +// Package master_slave_heartbeat TODO +package master_slave_heartbeat diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_config_diff/mysql_config_diff.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_config_diff/mysql_config_diff.go new file mode 100644 index 0000000000..840a6ee317 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_config_diff/mysql_config_diff.go @@ -0,0 +1,127 @@ +// Package mysql_config_diff TODO +package mysql_config_diff + +import ( + "bytes" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/pkg/errors" + "golang.org/x/exp/slices" + "golang.org/x/exp/slog" +) + +var name = "mysql-config-diff" +var executable string +var importantVariables []string + +func init() { + executable, _ = os.Executable() + importantVariables = []string{ + "init_connect", + } +} + +// Checker TODO +type Checker struct { +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + var cnfFile string + if config.MonitorConfig.Port == 3306 { + cnfFile = "/etc/my.cnf" + } else { + cnfFile = fmt.Sprintf("/etc/my.cnf.%d", config.MonitorConfig.Port) + } + + diffCmd := exec.Command( + filepath.Join(filepath.Dir(executable), "pt-config-diff"), + "--no-version-check", + "--json-report", + cnfFile, + fmt.Sprintf( + `h=%s,P=%d,u=%s,p=%s`, + config.MonitorConfig.Ip, + config.MonitorConfig.Port, + config.MonitorConfig.Auth.Mysql.User, + config.MonitorConfig.Auth.Mysql.Password, + ), + ) + + var stdout, stderr bytes.Buffer + diffCmd.Stdout = &stdout + diffCmd.Stderr = &stderr + + err = diffCmd.Run() + if err == nil { + return "", nil + } + + var exitError *exec.ExitError + var ok bool + if exitError, ok = err.(*exec.ExitError); !ok { + slog.Error("compare mysql config", err) + return "", err + } + + if exitError.ExitCode() != 1 { + unexpectErr := errors.Errorf("unexpect error: %s, stderr: %s", err.Error(), stderr.String()) + slog.Error("compare mysql config", unexpectErr) + return "", unexpectErr + } + + diffs := make(map[string]map[string]interface{}) + jerr := json.Unmarshal(stdout.Bytes(), &diffs) + if jerr != nil { + slog.Error("unmarshal variables diffs", err) + return "", jerr + } + + var res []string + for variableName, detail := range diffs { + if slices.Index(importantVariables, variableName) < 0 { + continue + } + + var runtimeValue string + var cnfValue string + for k, v := range detail { + if k == cnfFile { + cnfValue = v.(string) + } else { + runtimeValue = v.(string) + } + } + res = append( + res, + fmt.Sprintf( + "[%s] runtime='%s', cnf='%s'", + variableName, runtimeValue, cnfValue, + ), + ) + + } + return strings.Join(res, "\n"), nil +} + +// Name TODO +func (c *Checker) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_report.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_report.go new file mode 100644 index 0000000000..be2da63175 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_report.go @@ -0,0 +1,118 @@ +package mysql_connlog + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst" + "encoding/json" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/jmoiron/sqlx" + "github.com/juju/ratelimit" + "golang.org/x/exp/slog" +) + +type connRecord struct { + ConnId int64 `db:"conn_id" json:"conn_id"` + ConnTime time.Time `db:"conn_time" json:"conn_time"` + UserName string `db:"user_name" json:"user_name"` + CurUserName string `db:"cur_user_name" json:"cur_user_name"` + Ip string `db:"ip" json:"ip"` + BkBizId int `json:"bk_biz_id"` + BkCloudId *int `json:"bk_cloud_id"` + ImmuteDomain string `json:"immute_domain"` + // Ip string `json:"ip"` + Port int `json:"port"` + MachineType string `json:"machine_type"` + Role *string `json:"role"` +} + +func mysqlConnLogReport(db *sqlx.DB) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + conn, err := db.Connx(ctx) + if err != nil { + slog.Error("connlog report get conn from db", err) + return "", err + } + + // report 会把 old 表删掉, 所以也要禁用 binlog 防止从 master 到 slave + _, err = conn.ExecContext(ctx, `SET SQL_LOG_BIN=0`) + if err != nil { + slog.Error("disable binlog", err) + return "", err + } + slog.Info("report conn log disable binlog success") + + reportFilePath := filepath.Join(cst.DBAReportBase, "mysql", "conn_log", "report.log") + err = os.MkdirAll(filepath.Dir(reportFilePath), 0755) + if err != nil { + slog.Error("make report dir", err) + return "", err + } + slog.Info("make report dir", slog.String("dir", filepath.Dir(reportFilePath))) + + f, err := os.OpenFile( + reportFilePath, + os.O_CREATE|os.O_TRUNC|os.O_RDWR, + 0755, + ) + if err != nil { + slog.Error("open conn log report file", err) + return "", err + } + slog.Info("open conn report file", slog.String("file path", f.Name())) + + lf := ratelimit.Writer(f, ratelimit.NewBucketWithRate(float64(speedLimit), speedLimit)) + + rows, err := conn.QueryxContext(ctx, fmt.Sprintf(`SELECT * FROM %s.conn_log_old`, cst.DBASchema)) + if err != nil { + slog.Error("connlog report query conn_log_old", err) + return "", err + } + defer func() { + _ = rows.Close() + }() + + for rows.Next() { + cr := connRecord{ + BkBizId: config.MonitorConfig.BkBizId, + BkCloudId: config.MonitorConfig.BkCloudID, + ImmuteDomain: config.MonitorConfig.ImmuteDomain, + // Ip: config.MonitorConfig.Ip, + Port: config.MonitorConfig.Port, + MachineType: config.MonitorConfig.MachineType, + Role: config.MonitorConfig.Role, + } + err := rows.StructScan(&cr) + if err != nil { + slog.Error("scan conn_log record", err) + return "", err + } + + content, err := json.Marshal(cr) + if err != nil { + slog.Error("marshal conn record", err) + return "", err + } + + _, err = lf.Write(append(content, []byte("\n")...)) + if err != nil { + slog.Error("write conn report", err) + return "", err + } + } + + _, err = db.ExecContext(ctx, fmt.Sprintf(`DROP TABLE IF EXISTS %s.conn_log_old`, cst.DBASchema)) + if err != nil { + slog.Error("drop conn_log_old", err) + return "", err + } + slog.Info("drop conn_log_old") + + return "", nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_rotate.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_rotate.go new file mode 100644 index 0000000000..aa551762d6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_rotate.go @@ -0,0 +1,129 @@ +package mysql_connlog + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst" + "fmt" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +// mysqlConnLogRotate TODO +/* +1. binlog 是 session 变量, 所以只需要禁用就行了 +2. 首先禁用了 init_connect, 后续表 rotate 失败会 return, 不会恢复 init_connect. 所以不会影响连接 +*/ +func mysqlConnLogRotate(db *sqlx.DB) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + conn, err := db.Connx(ctx) + if err != nil { + slog.Error("connlog rotate get conn from db", err) + return "", err + } + defer func() { + _ = conn.Close() + }() + + _, err = conn.ExecContext(ctx, `SET SQL_LOG_BIN=0`) + if err != nil { + slog.Error("disable binlog", err) + return "", err + } + slog.Info("rotate conn log disable binlog success") + + var initConn string + err = conn.QueryRowxContext(ctx, "SELECT @@INIT_CONNECT").Scan(&initConn) + if err != nil { + slog.Error("query init connect", err) + return "", err + } + slog.Info("rotate conn log", slog.String("init connect", initConn)) + + _, err = conn.ExecContext(ctx, `SET @OLD_INIT_CONNECT=@@INIT_CONNECT`) + if err != nil { + slog.Error("save init_connect", err) + return "", err + } + + var oldInitConn string + err = conn.QueryRowxContext(ctx, "SELECT @OLD_INIT_CONNECT").Scan(&oldInitConn) + if err != nil { + slog.Error("query old init connect", err) + return "", err + } + slog.Info("rotate conn log", slog.String("old init connect", oldInitConn)) + + if initConn != oldInitConn { + err = errors.Errorf("save init_connect failed") + slog.Error("check save init connect", err) + return "", err + } + + _, err = conn.ExecContext(ctx, `SET GLOBAL INIT_CONNECT = ''`) + if err != nil { + slog.Error("disable init_connect", err) + return "", err + } + + _, err = conn.ExecContext( + ctx, + fmt.Sprintf( + `DROP TABLE IF EXISTS %s.conn_log_old`, cst.DBASchema, + ), + ) + if err != nil { + slog.Error("drop conn_log_old", err) + return "", err + } + + _, err = conn.ExecContext( + ctx, + fmt.Sprintf( + `RENAME TABLE %[1]s.conn_log to %[1]s.conn_log_old`, + cst.DBASchema, + ), + ) + if err != nil { + slog.Error("rename conn_log", err) + return "", err + } + slog.Info("rotate conn log", "rename conn_log success") + + _, err = conn.ExecContext( + ctx, + fmt.Sprintf( + `CREATE TABLE IF NOT EXISTS %[1]s.conn_log like %[1]s.conn_log_old`, + cst.DBASchema, + ), + ) + if err != nil { + slog.Error("recreate conn_log", err) + return "", err + } + slog.Info("rotate conn log", "recreate conn_log success") + + _, err = conn.ExecContext(ctx, `SET GLOBAL INIT_CONNECT = @OLD_INIT_CONNECT`) + if err != nil { + slog.Error("restore init_connect", err) + return "", err + } + initConn = "" + err = conn.QueryRowxContext(ctx, "SELECT @@INIT_CONNECT").Scan(&initConn) + if err != nil { + slog.Error("query init connect", err) + return "", err + } + slog.Info("rotate conn log", slog.String("init connect", initConn)) + if initConn != oldInitConn { + err = errors.Errorf("restore init_connect failed") + slog.Error("check restore init_connect", err) + return "", err + } + + return "", nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_size.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_size.go new file mode 100644 index 0000000000..ce0b96f7eb --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/connlog_size.go @@ -0,0 +1,71 @@ +package mysql_connlog + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/internal/cst" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +func mysqlConnLogSize(db *sqlx.DB) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var dataDir string + err := db.QueryRowxContext(ctx, `SELECT @@datadir`).Scan(&dataDir) + if err != nil { + slog.Error("select @@datadir", err) + return "", err + } + + var logSize int64 + + slog.Debug("statistic conn log", slog.String("path", filepath.Join(dataDir, cst.DBASchema))) + err = filepath.WalkDir( + filepath.Join(dataDir, cst.DBASchema), + func(path string, d fs.DirEntry, err error) error { + if err != nil { + slog.Error("statistic conn log size", err, slog.String("path", path)) + return filepath.SkipDir + } + + slog.Debug("statistic conn log size", slog.String("path", path)) + if strings.HasPrefix(filepath.Base(path), "conn_log") { + st, sterr := os.Stat(path) + if sterr != nil { + return filepath.SkipDir + } + if !st.IsDir() { + slog.Debug( + "statistic conn log size", + slog.Any("status", st), + ) + logSize += st.Size() + } + } + return nil + }, + ) + if err != nil { + slog.Error("statistic conn log size", err) + return "", err + } + slog.Info("statistic conn log size", slog.Int64("size", logSize)) + + if logSize >= sizeLimit { + _, err = db.ExecContext(ctx, `SET GLOBAL INIT_CONNECT = ''`) + if err != nil { + slog.Error("disable init_connect", err, slog.Int64("size", logSize)) + return "", err + } + return fmt.Sprintf("too big connlog table size %d", logSize), nil + } + return "", nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/init.go new file mode 100644 index 0000000000..4e045667e1 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/init.go @@ -0,0 +1,92 @@ +package mysql_connlog + +import ( + "context" + "database/sql" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +var nameMySQLConnLogSize = "mysql-connlog-size" +var nameMySQLConnLogRotate = "mysql-connlog-rotate" +var nameMySQLConnLogReport = "mysql-connlog-report" + +var sizeLimit int64 = 1024 * 1024 * 1024 * 2 +var speedLimit int64 = 1024 * 1024 * 10 + +// Checker TODO +type Checker struct { + db *sqlx.DB + name string + f func(*sqlx.DB) (string, error) +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var initConnLog sql.NullString + err = c.db.QueryRowxContext(ctx, `SELECT @@init_connect`).Scan(&initConnLog) + if err != nil { + slog.Error("select @@init_connect", err) + return "", err + } + + if !initConnLog.Valid { + slog.Info("init_connect disabled") + return "", nil + } + + return c.f(c.db) +} + +// Name TODO +func (c *Checker) Name() string { + return c.name +} + +// NewMySQLConnLogSize TODO +func NewMySQLConnLogSize(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameMySQLConnLogSize, + f: mysqlConnLogSize, + } +} + +// NewMySQLConnLogRotate TODO +func NewMySQLConnLogRotate(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameMySQLConnLogRotate, + f: mysqlConnLogRotate, + } +} + +// NewMySQLConnLogReport TODO +func NewMySQLConnLogReport(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameMySQLConnLogReport, + f: mysqlConnLogReport, + } +} + +// RegisterMySQLConnLogSize TODO +func RegisterMySQLConnLogSize() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameMySQLConnLogSize, NewMySQLConnLogSize +} + +// RegisterMySQLConnLogRotate TODO +func RegisterMySQLConnLogRotate() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameMySQLConnLogRotate, NewMySQLConnLogRotate +} + +// RegisterMySQLConnLogReport TODO +func RegisterMySQLConnLogReport() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameMySQLConnLogReport, NewMySQLConnLogReport +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/mysql_connlog.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/mysql_connlog.go new file mode 100644 index 0000000000..dde74b27fe --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_connlog/mysql_connlog.go @@ -0,0 +1,2 @@ +// Package mysql_connlog TODO +package mysql_connlog diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/errlog_snapshot.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/errlog_snapshot.go new file mode 100644 index 0000000000..4f4416d374 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/errlog_snapshot.go @@ -0,0 +1,172 @@ +package mysql_errlog + +import ( + "bufio" + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "os" + "path/filepath" + "strconv" + + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +func snapShot(db *sqlx.DB) error { + slog.Debug("snap shot err log", slog.Bool("scanned", scanned)) + if scanned { + return nil + } + + errLogPath, err := findErrLogFile(db) + if err != nil { + return err + } + slog.Info("snap shot err log", slog.String("path", errLogPath)) + + scanner, offset, err := newScanner(errLogPath) + if err != nil { + return err + } + + regFile, err := os.OpenFile( + errLogRegFile, + os.O_CREATE|os.O_TRUNC|os.O_RDWR, + 0755, + ) + if err != nil { + slog.Error("create reg file", err) + return err + } + + for scanner.Scan() { + content := scanner.Bytes() + err := scanner.Err() + if err != nil { + slog.Error("scan err log", err) + return err + } + offset += int64(len(content)) + 1 + + startMatch, err := rowStartPattern.MatchString(string(content)) + if err != nil { + slog.Error("apply row start pattern", err) + return err + } + + baseErrTokenMatch, err := baseErrTokenPattern.MatchString(string(content)) + if err != nil { + slog.Error("apply base error token pattern", err) + return err + } + + if startMatch && baseErrTokenMatch { + _, err = regFile.Write(append(content, []byte("\n")...)) + if err != nil { + slog.Error("write errlog.reg", err) + return err + } + } + } + + f, err := os.OpenFile(offsetRegFile, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0755) + if err != nil { + slog.Error("open offset reg", err) + return err + } + _, err = f.WriteString(strconv.FormatInt(offset, 10)) + if err != nil { + slog.Error("update offset reg", err) + return err + } + + scanned = true + return nil +} + +func loadSnapShot() (*bufio.Scanner, error) { + f, err := os.Open(errLogRegFile) + if err != nil { + slog.Error("open err log reg", err) + return nil, err + } + + return bufio.NewScanner(f), nil +} + +func findErrLogFile(db *sqlx.DB) (string, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var errLogPath, dataDir string + err := db.QueryRowxContext(ctx, `SELECT @@LOG_ERROR, @@DATADIR`).Scan(&errLogPath, &dataDir) + if err != nil { + slog.Error("query log_error, datadir", err) + return "", err + } + + if !filepath.IsAbs(errLogPath) { + errLogPath = filepath.Join(dataDir, errLogPath) + } + return errLogPath, nil +} + +func newScanner(logPath string) (*bufio.Scanner, int64, error) { + f, err := os.Open(logPath) + if err != nil { + slog.Error("open err log", err) + return nil, 0, err + } + + st, err := f.Stat() + if err != nil { + slog.Error("stat of err log", err) + return nil, 0, err + } + errLogSize := st.Size() + slog.Debug("snap shot err log", slog.Int64("err log size", errLogSize)) + + lastOffset, err := lastRoundOffset() + if err != nil { + return nil, 0, err + } + + slog.Debug("snap shot err log", slog.Int64("last offset", lastOffset)) + + // errlog 应该是被 rotate 了 + if errLogSize < lastOffset { + lastOffset = 0 + } + + if errLogSize-lastOffset > maxScanSize { + lastOffset = errLogSize - maxScanSize - 1 + } + + offset, err := f.Seek(lastOffset, 0) + if err != nil { + slog.Error("seek err log", err) + return nil, 0, err + } + + return bufio.NewScanner(f), offset, nil +} + +func lastRoundOffset() (int64, error) { + content, err := os.ReadFile(offsetRegFile) + + if err != nil { + if os.IsNotExist(err) { + return 0, nil + } + slog.Error("read offset reg", err, slog.String("file", offsetRegFile)) + return 0, err + } + + r, err := strconv.ParseInt(string(content), 10, 64) + if err != nil { + slog.Error("parse last offset", err) + return 0, err + } + + return r, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/general_scan.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/general_scan.go new file mode 100644 index 0000000000..77f8e336cc --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/general_scan.go @@ -0,0 +1,42 @@ +package mysql_errlog + +import ( + "strings" + + "github.com/dlclark/regexp2" + "golang.org/x/exp/slog" +) + +func scanSnapShot(name string, pattern *regexp2.Regexp) (string, error) { + slog.Debug("scan err log", slog.String("name", name), slog.String("pattern", pattern.String())) + scanner, err := loadSnapShot() + if err != nil { + return "", err + } + + var lines []string + for scanner.Scan() { + line := scanner.Text() + err := scanner.Err() + if err != nil { + slog.Error("scan err log", err, slog.String("item", name)) + return "", err + } + slog.Debug("scan err log", slog.String("line", line)) + + match, err := pattern.MatchString(line) + if err != nil { + slog.Error( + "apply pattern", err, + slog.String("item", name), slog.String("pattern", pattern.String()), + ) + } + slog.Debug("scan err log", slog.Any("match", match)) + + if match { + lines = append(lines, line) + } + } + + return strings.Join(lines, "\n"), nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/init.go new file mode 100644 index 0000000000..43099716b6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/init.go @@ -0,0 +1,191 @@ +package mysql_errlog + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/dlclark/regexp2" + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +var executable string +var maxScanSize int64 = 50 * 1024 * 1024 +var offsetRegFile string +var errLogRegFile string +var scanned bool + +var rowStartPattern *regexp2.Regexp +var baseErrTokenPattern *regexp2.Regexp + +var nameMySQLErrNotice = "mysql-err-notice" +var nameMySQLErrCritical = "mysql-err-critical" +var nameSpiderErrNotice = "spider-err-notice" +var nameSpiderErrWarn = "spider-err-warn" +var nameSpiderErrCritical = "spider-err-critical" + +var mysqlNoticePattern *regexp2.Regexp +var mysqlCriticalExcludePattern *regexp2.Regexp +var spiderNoticePattern *regexp2.Regexp +var spiderWarnPattern *regexp2.Regexp +var spiderCriticalPattern *regexp2.Regexp + +func init() { + executable, _ = os.Executable() + offsetRegFile = filepath.Join(filepath.Dir(executable), "errlog_offset.reg") + errLogRegFile = filepath.Join(filepath.Dir(executable), "errlog.reg") + + now := time.Now() + rowStartPattern = regexp2.MustCompile( + fmt.Sprintf( + `^(?=(?:(%s|%s|%s)))`, + now.Format("060102"), + now.Format("20060102"), + now.Format("2006-01-02"), + ), + regexp2.None, + ) + + baseErrTokenPattern = regexp2.MustCompile( + fmt.Sprintf( + `(?=(?:(%s)))`, + strings.Join( + []string{"error", "warn", "fail", "restarted", "hanging", "locked"}, + "|", + ), + ), + regexp2.IgnoreCase, + ) + + spiderNoticePattern = regexp2.MustCompile( + fmt.Sprintf( + `(?=(?:(%s)))`, + strings.Join( + []string{"Got error 12701", "Got error 1159"}, + "|", + ), + ), + regexp2.IgnoreCase, + ) + + spiderWarnPattern = regexp2.MustCompile( + fmt.Sprintf( + `(?=(?:(%s)))`, + strings.Join( + []string{"ERROR SPIDER RESULT", "Got error 1317", "Got error 1146"}, + "|", + ), + ), + regexp2.IgnoreCase, + ) + + spiderCriticalPattern = regexp2.MustCompile( + fmt.Sprintf( + `(?=(?:(%s)))`, + strings.Join( + []string{"2014 Commands out of sync", "Table has no partition"}, + "|", + ), + ), + regexp2.IgnoreCase, + ) + + scanned = false +} + +// Checker TODO +type Checker struct { + db *sqlx.DB + name string + f func() (string, error) +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + err = snapShot(c.db) + if err != nil { + slog.Error(c.name, err) + return "", err + } + + return c.f() +} + +// Name TODO +func (c *Checker) Name() string { + return c.name +} + +// NewMySQLErrNotice TODO +func NewMySQLErrNotice(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameMySQLErrNotice, + f: mysqlNotice, + } +} + +// NewMySQLErrCritical TODO +func NewMySQLErrCritical(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameMySQLErrCritical, + f: mysqlCritical, + } +} + +// NewSpiderErrNotice TODO +func NewSpiderErrNotice(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameSpiderErrNotice, + f: spiderNotice, + } +} + +// NewSpiderErrWarn TODO +func NewSpiderErrWarn(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameSpiderErrWarn, + f: spiderWarn, + } +} + +// NewSpiderErrCritical TODO +func NewSpiderErrCritical(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameSpiderErrCritical, + f: spiderCritical, + } +} + +// RegisterMySQLErrNotice TODO +func RegisterMySQLErrNotice() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameMySQLErrNotice, NewMySQLErrNotice +} + +// RegisterMySQLErrCritical TODO +func RegisterMySQLErrCritical() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameMySQLErrCritical, NewMySQLErrCritical +} + +// RegisterSpiderErrNotice TODO +func RegisterSpiderErrNotice() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameSpiderErrNotice, NewSpiderErrNotice +} + +// RegisterSpiderErrWarn TODO +func RegisterSpiderErrWarn() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameSpiderErrWarn, NewSpiderErrWarn +} + +// RegisterSpiderErrCritical TODO +func RegisterSpiderErrCritical() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameSpiderErrCritical, NewSpiderErrCritical +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_critical.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_critical.go new file mode 100644 index 0000000000..9a7ddd8338 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_critical.go @@ -0,0 +1,32 @@ +package mysql_errlog + +import ( + "fmt" + "strings" + + "github.com/dlclark/regexp2" +) + +func init() { + mysqlCriticalExcludePattern = regexp2.MustCompile( + fmt.Sprintf( + `^(?!.*(?:(%s)))`, + strings.Join( + []string{ + "checkpoint", + "server_errno=2013", + "sort aborted", + "restarting transaction", + "slave SQL thread was killed", + `\[Warning\]`, + }, + "|", + ), + ), + regexp2.IgnoreCase, + ) +} + +func mysqlCritical() (string, error) { + return scanSnapShot(nameMySQLErrCritical, mysqlCriticalExcludePattern) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_errlog.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_errlog.go new file mode 100644 index 0000000000..ee01232b4e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_errlog.go @@ -0,0 +1,2 @@ +// Package mysql_errlog TODO +package mysql_errlog diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_notice.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_notice.go new file mode 100644 index 0000000000..55302c55f0 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/mysql_notice.go @@ -0,0 +1,26 @@ +package mysql_errlog + +import ( + "fmt" + "strings" + + "github.com/dlclark/regexp2" +) + +func init() { + mysqlNoticePattern = regexp2.MustCompile( + fmt.Sprintf( + `(?=(?:(%s)))`, + strings.Join( + []string{"slave SQL thread was killed"}, + "|", + ), + ), + regexp2.IgnoreCase, + ) + +} + +func mysqlNotice() (string, error) { + return scanSnapShot(nameMySQLErrNotice, mysqlNoticePattern) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_critical.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_critical.go new file mode 100644 index 0000000000..e2ff9af416 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_critical.go @@ -0,0 +1,5 @@ +package mysql_errlog + +func spiderCritical() (string, error) { + return scanSnapShot(nameSpiderErrCritical, spiderCriticalPattern) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_notice.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_notice.go new file mode 100644 index 0000000000..45c2091866 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_notice.go @@ -0,0 +1,5 @@ +package mysql_errlog + +func spiderNotice() (string, error) { + return scanSnapShot(nameSpiderErrNotice, spiderNoticePattern) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_warn.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_warn.go new file mode 100644 index 0000000000..fa49a0c96f --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_errlog/spider_warn.go @@ -0,0 +1,5 @@ +package mysql_errlog + +func spiderWarn() (string, error) { + return scanSnapShot(nameSpiderErrWarn, spiderWarnPattern) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/init.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/init.go new file mode 100644 index 0000000000..c7d4d5619e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/init.go @@ -0,0 +1,67 @@ +package mysql_processlist + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "os" + + "github.com/jmoiron/sqlx" +) + +var stored = false +var executable string + +var nameMySQLLock = "mysql-lock" +var nameMySQLInject = "mysql-inject" + +func init() { + executable, _ = os.Executable() +} + +// Checker TODO +type Checker struct { + db *sqlx.DB + name string + f func() (string, error) +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + err = snapShot(c.db) + if err != nil { + return "", err + } + return c.f() +} + +// Name TODO +func (c *Checker) Name() string { + return c.name +} + +// NewMySQLLock TODO +func NewMySQLLock(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameMySQLLock, + f: mysqlLock, + } +} + +// NewMySQLInject TODO +func NewMySQLInject(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{ + db: cc.MySqlDB, + name: nameMySQLInject, + f: mysqlInject, + } +} + +// RegisterMySQLLock TODO +func RegisterMySQLLock() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameMySQLLock, NewMySQLLock +} + +// RegisterMySQLInject TODO +func RegisterMySQLInject() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return nameMySQLInject, NewMySQLInject +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_inject.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_inject.go new file mode 100644 index 0000000000..f7154f1127 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_inject.go @@ -0,0 +1,74 @@ +package mysql_processlist + +import ( + "strings" + + "github.com/dlclark/regexp2" + "golang.org/x/exp/slog" +) + +func mysqlInject() (string, error) { + processList, err := loadSnapShot() + if err != nil { + return "", err + } + + var injects []string + for _, p := range processList { + pstr, err := p.JsonString() + if err != nil { + return "", err + } + + slog.Debug("mysql inject check process", slog.Any("process", pstr)) + + if strings.ToLower(p.User.String) == "system user" { + continue + } + + hasSleep, err := hasLongUserSleep(p) + if err != nil { + return "", err + } + slog.Debug("mysql inject check process", slog.Bool("has user sleep", hasSleep)) + + isLongSleep := hasSleep && p.Time.Int64 > 300 + slog.Debug("mysql inject check process", slog.Bool("is long sleep", isLongSleep)) + + hasComment, err := hasCommentInQuery(p) + if err != nil { + return "", err + } + slog.Debug("mysql inject check process", slog.Bool("has inline comment", hasComment)) + + if isLongSleep || hasComment { + injects = append(injects, pstr) + } + } + return strings.Join(injects, ","), nil +} + +func hasLongUserSleep(p *mysqlProcess) (bool, error) { + re := regexp2.MustCompile(`User sleep`, regexp2.IgnoreCase) + match, err := re.MatchString(p.State.String) + if err != nil { + slog.Error("check long user sleep", err) + return false, err + } + + return match, nil +} + +func hasCommentInQuery(p *mysqlProcess) (bool, error) { + re := regexp2.MustCompile(`\s+#`, regexp2.IgnoreCase) + match, err := re.MatchString(p.Command.String) + if err != nil { + slog.Error("check comment in query", err) + return false, err + } + + return match && + (strings.HasPrefix(strings.ToLower(p.Command.String), "update") || + strings.HasPrefix(strings.ToLower(p.Command.String), "delete")), + nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_lock.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_lock.go new file mode 100644 index 0000000000..81f84ff35d --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_lock.go @@ -0,0 +1,114 @@ +package mysql_processlist + +import ( + "strings" + "time" + + "github.com/dlclark/regexp2" + "golang.org/x/exp/slog" +) + +/* ++------+------+-------------------+------+---------+------+----------+------------------+ +|Id 0 |User 1| Host 2 | db 3 |Command 4|Time 5| State 6 | Info 7 | ++------+------+-------------------+------+---------+------+----------+------------------+ +| 4157 | root | 127.0.0.1:58872 | NULL | Query | 0 | starting | show processlist | ++------+------+-------------------+------+---------+------+----------+------------------+ +perl 版监控的一些逻辑 +1. 会抓取 FLUSH TABLE WITH READ LOCK 操作然后干掉, 不应该这样 +2. Status 包含 lock 时, 忽略掉 Status == 'System lock' && Command =~ 'LOAD DATA' | '^BINLOG' +3. 接上面, 夜间锁表评估更加宽容, Time < 300 不告警 +*/ + +func mysqlLock() (string, error) { + processList, err := loadSnapShot() + if err != nil { + return "", err + } + + var locks []string + for _, p := range processList { + pstr, err := p.JsonString() + if err != nil { + return "", err + } + + slog.Debug("mysql lock check process", slog.Any("process", pstr)) + + if strings.ToLower(p.User.String) == "system user" { + continue + } + + hasLongWait, err := hasLongWaitingForTableFlush(p) + if err != nil { + return "", err + } + slog.Debug("mysql lock check process", slog.Bool("has long wait for table flush", hasLongWait)) + + hasNormal, err := hasNormalLock(p) + if err != nil { + return "", err + } + slog.Debug("mysql lock check process", slog.Bool("has normal lock", hasNormal)) + + if hasLongWait || hasNormal { + locks = append(locks, pstr) + } + } + return strings.Join(locks, ","), nil +} + +func hasLongWaitingForTableFlush(p *mysqlProcess) (bool, error) { + return p.Time.Int64 > 60 && + strings.Contains(strings.ToLower(p.State.String), "waiting for table flush"), + nil +} + +func hasNormalLock(p *mysqlProcess) (bool, error) { + reLockPattern := regexp2.MustCompile(`lock`, regexp2.IgnoreCase) + match, err := reLockPattern.MatchString(p.State.String) + if err != nil { + slog.Error("apply lock pattern", err) + return false, err + } + if !match { + return false, nil + } + + reSystemLockPattern := regexp2.MustCompile(`system lock`, regexp2.IgnoreCase) + match, err = reSystemLockPattern.MatchString(p.State.String) + if err != nil { + slog.Error("apply system lock pattern", err) + return false, err + } + + slog.Debug("check normal lock", slog.Bool("match status lock", match)) + if match { + return false, nil + } + + // reExcludeCommands := regexp2.MustCompile(`^(?!.*(?:(^binlog|load data)))`, regexp2.IgnoreCase) + reExcludeSql := regexp2.MustCompile(`(?=(?:(^binlog|load data)))`, regexp2.IgnoreCase) + match, err = reExcludeSql.MatchString(p.Info.String) + if err != nil { + slog.Error("apply exclude commands pattern", err) + return false, err + } + + slog.Debug("check normal lock", slog.Bool("exclude command binlog|load data", match)) + if match { + return false, nil + } + + now := time.Now() + if now.Hour() >= 21 && now.Hour() < 9 { + if p.Time.Int64 > 300 { + return true, nil + } + } else { + if p.Time.Int64 > 5 { + return true, nil + } + } + return false, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_processlist.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_processlist.go new file mode 100644 index 0000000000..fc67105529 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/mysql_processlist.go @@ -0,0 +1,2 @@ +// Package mysql_processlist TODO +package mysql_processlist diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/processlist_snapshot.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/processlist_snapshot.go new file mode 100644 index 0000000000..9027b94b58 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/mysql_processlist/processlist_snapshot.go @@ -0,0 +1,148 @@ +package mysql_processlist + +import ( + "context" + "database/sql" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "encoding/json" + "os" + "path/filepath" + + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +type mysqlProcess struct { + Id sql.NullInt64 `db:"ID" json:"id"` + User sql.NullString `db:"USER" json:"user"` + Host sql.NullString `db:"HOST" json:"host"` + Db sql.NullString `db:"DB" json:"db"` + Command sql.NullString `db:"COMMAND" json:"command"` + Time sql.NullInt64 `db:"TIME" json:"time"` + State sql.NullString `db:"STATE" json:"state"` + Info sql.NullString `db:"INFO" json:"info"` + // RowsSent sql.NullInt64 `db:"Rows_sent" json:"rows_sent"` + // RowsExamined sql.NullInt64 `db:"Rows_examined" json:"rows_examined"` +} + +// JsonString TODO +func (c *mysqlProcess) JsonString() (string, error) { + content, err := json.Marshal( + struct { + Id int64 `json:"id"` + User string `json:"user"` + Host string `json:"host"` + Db string `json:"db"` + Command string `json:"command"` + Time int64 `json:"time"` + State string `json:"state"` + Info string `json:"info"` + // RowsSent int64 `json:"rows_sent"` + // RowsExamined int64 `json:"rows_examined"` + }{ + Id: c.Id.Int64, + User: c.User.String, + Host: c.Host.String, + Db: c.Db.String, + Command: c.Command.String, + Time: c.Time.Int64, + State: c.State.String, + Info: c.Info.String, + // RowsSent: c.RowsSent.Int64, + // RowsExamined: c.RowsExamined.Int64, + }, + ) + + if err != nil { + slog.Error("marshal process list", err) + return "", err + } + + return string(content), nil +} + +func snapShot(db *sqlx.DB) error { + if stored { + return nil + } + + processList, err := queryProcessList(db) + if err != nil { + return err + } + + f, err := os.OpenFile( + filepath.Join(filepath.Dir(executable), "processlist.reg"), + os.O_CREATE|os.O_TRUNC|os.O_RDWR, + 0755, + ) + if err != nil { + slog.Error("create processlist.reg", err) + return err + } + + content, err := json.Marshal(processList) + if err != nil { + slog.Error("marshal processlist", err) + return err + } + + _, err = f.Write(content) + if err != nil { + slog.Error("write processlist.reg", err) + return err + } + + stored = true + return nil +} + +func loadSnapShot() ([]*mysqlProcess, error) { + content, err := os.ReadFile( + filepath.Join( + filepath.Dir(executable), "processlist.reg", + ), + ) + if err != nil { + slog.Error("read processlist.reg", err) + return nil, err + } + + var res []*mysqlProcess + err = json.Unmarshal(content, &res) + if err != nil { + slog.Error("unmarshal processlist", err) + return nil, err + } + + return res, nil +} + +func queryProcessList(db *sqlx.DB) ([]mysqlProcess, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + rows, err := db.QueryxContext( + ctx, + `SELECT ID, USER, HOST, DB, COMMAND, TIME, STATE, INFO FROM INFORMATION_SCHEMA.PROCESSLIST`) + if err != nil { + slog.Error("show full processlist", err) + return nil, err + } + defer func() { + _ = rows.Close() + }() + + var res []mysqlProcess + for rows.Next() { + p := mysqlProcess{} + err := rows.StructScan(&p) + if err != nil { + slog.Error("scan processlist", err) + return nil, err + } + res = append(res, p) + } + + return res, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_backend/proxy_backend.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_backend/proxy_backend.go new file mode 100644 index 0000000000..a715422184 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_backend/proxy_backend.go @@ -0,0 +1,109 @@ +// Package proxy_backend TODO +package proxy_backend + +import ( + "bufio" + "context" + "database/sql" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +var name = "proxy-backend" + +// Checker TODO +type Checker struct { + db *sqlx.DB +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + cnfPath := filepath.Join("/etc", fmt.Sprintf(`proxy.cnf.%d`, config.MonitorConfig.Port)) + f, err := os.Open(cnfPath) + if err != nil { + slog.Error("open proxy cnf file", err) + return "", err + } + defer func() { + _ = f.Close() + }() + + var backendLine string + pattern := regexp.MustCompile(`^proxy-backend-addresses.*`) + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + if err := scanner.Err(); err != nil { + slog.Error("scan proxy cnf file", err) + return "", err + } + + if pattern.MatchString(line) { + backendLine = strings.TrimSpace(line) + break + } + } + + if backendLine == "" { + err := errors.Errorf("proxy-backend-addresses not found in cnf") + slog.Error("find backend in cnf", err) + return "", nil + } + + splitPattern := regexp.MustCompile(`\s*=\s*`) + splitLine := splitPattern.Split(backendLine, -1) + if len(splitLine) != 2 { + err := errors.Errorf("invalid config: %s", backendLine) + slog.Error("split proxy-backend-addresses", err) + return "", nil + } + + backendAddr := splitLine[1] + + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var backendInfo struct { + Ndx sql.NullInt32 `db:"backend_ndx"` + Address sql.NullString `db:"address"` + Stat sql.NullString `db:"state"` + Type sql.NullString `db:"type"` + Uuid sql.NullString `db:"uuid"` + ClientCount sql.NullInt32 `db:"connected_clients"` + } + err = c.db.QueryRowxContext(ctx, `SELECT * FROM BACKENDS`).StructScan(&backendInfo) + if err != nil { + slog.Error("query backends", err) + return "", err + } + + if backendAddr == "" || !backendInfo.Address.Valid || backendAddr != backendInfo.Address.String { + msg = fmt.Sprintf("cnf.backend=%s, mem.backend=%s", backendAddr, backendInfo.Address.String) + } + + return msg, nil +} + +// Name TODO +func (c *Checker) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{db: cc.ProxyAdminDB} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_user_list/proxy_user_list.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_user_list/proxy_user_list.go new file mode 100644 index 0000000000..58c83fd96a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/proxy_user_list/proxy_user_list.go @@ -0,0 +1,128 @@ +// Package proxy_user_list TODO +package proxy_user_list + +import ( + "bufio" + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +var name = "proxy-user-list" + +// Checker TODO +type Checker struct { + db *sqlx.DB +} + +// Run TODO +func (c *Checker) Run() (msg string, err error) { + userListFilePath := filepath.Join( + "/etc", + fmt.Sprintf(`proxy_user.cnf.%d`, config.MonitorConfig.Port), + ) + f, err := os.Open(userListFilePath) + if err != nil { + slog.Error("read proxy user list file", err) + return "", err + } + defer func() { + _ = f.Close() + }() + + var usersFromFile []string + scanner := bufio.NewScanner(f) + for scanner.Scan() { + usersFromFile = append(usersFromFile, scanner.Text()) + err := scanner.Err() + if err != nil { + slog.Error("scan proxy user list file", err) + return "", err + } + } + + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var usersFromQuery []string + err = c.db.SelectContext(ctx, &usersFromQuery, `SELECT * FROM USERS`) + if err != nil { + slog.Error("query user list", err) + return "", err + } + + /* + 这种比较两个 slice, 并抽取各自独有的 element 的算法会更快, 理论上是 O(2(m+n)) + 如果用传统的以一个 slice 做循环, 查找另一个 slice, 理论上是 O(mlog(n)) + + BenchmarkMapIter-6 1000000000 0.001124 ns/op 0 B/op 0 allocs/op + BenchmarkMapIter-6 1000000000 0.001228 ns/op 0 B/op 0 allocs/op + BenchmarkMapIter-6 1000000000 0.001189 ns/op 0 B/op 0 allocs/op + BenchmarkMapIter-6 1000000000 0.002286 ns/op 0 B/op 0 allocs/op + BenchmarkMapIter-6 1000000000 0.001106 ns/op 0 B/op 0 allocs/op + BenchmarkNormalIter-6 1000000000 0.1990 ns/op 0 B/op 0 allocs/op + BenchmarkNormalIter-6 1000000000 0.1922 ns/op 0 B/op 0 allocs/op + BenchmarkNormalIter-6 1000000000 0.1985 ns/op 0 B/op 0 allocs/op + BenchmarkNormalIter-6 1000000000 0.1944 ns/op 0 B/op 0 allocs/op + BenchmarkNormalIter-6 1000000000 0.1928 ns/op 0 B/op 0 allocs/op + + 测试性能差别还是蛮大的 + */ + stage := make(map[string]int) + for _, u := range usersFromFile { + stage[u] = 1 + } + for _, u := range usersFromQuery { + if _, ok := stage[u]; !ok { + stage[u] = 0 + } + stage[u] -= 1 + } + + var onlyFile, onlyQuery []string + for k, v := range stage { + if v == 1 { + onlyFile = append(onlyFile, k) + } + if v == -1 { + onlyQuery = append(onlyQuery, k) + } + } + + var msgs []string + var onlyFileMsg string + var onlyQueryMsg string + + if len(onlyFile) > 0 { + onlyFileMsg = fmt.Sprintf("user only in file: %s", strings.Join(onlyFile, ",")) + msgs = append(msgs, onlyFileMsg) + } + if len(onlyQuery) > 0 { + onlyQueryMsg = fmt.Sprintf("user only in mem: %s", strings.Join(onlyQuery, ",")) + msgs = append(msgs, onlyQueryMsg) + } + + return strings.Join(msgs, "\n"), nil +} + +// Name TODO +func (c *Checker) Name() string { + return name +} + +// New TODO +func New(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Checker{db: cc.ProxyAdminDB} +} + +// Register TODO +func Register() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, New +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/roate_slowlog.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/roate_slowlog.go new file mode 100644 index 0000000000..5f25255076 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/roate_slowlog.go @@ -0,0 +1,142 @@ +package rotate_slowlog + +import ( + "bytes" + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" + + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slog" +) + +var name = "rotate-slowlog" + +// Dummy TODO +type Dummy struct { + db *sqlx.DB +} + +/* +perl 版本中执行了一系列的操作 + my @sqls = ( + qq{select \@\@global.slow_query_log into \@sq_log_save}, + qq{set global slow_query_log=off}, + qq{select sleep(2)}, + qq{FLUSH SLOW LOGS}, + qq{select sleep(3)}, + qq{set global slow_query_log=\@sq_log_save}, + ); + +但是似乎只需要 FLUSH SLOW LOGS +*/ + +// Run TODO +func (d *Dummy) Run() (msg string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var slowLogPath string + var slowLogOn bool + err = d.db.QueryRowxContext( + ctx, + `SELECT @@slow_query_log, @@slow_query_log_file`, + ).Scan(&slowLogOn, &slowLogPath) + if err != nil { + slog.Error("query slow_query_log, slow_query_log_file", err) + return "", err + } + slog.Info( + "rotate slow log", + slog.Bool("slow_query_log", slowLogOn), + slog.String("slow_query_log_file", slowLogPath), + ) + + if !slowLogOn { + return "", nil + } + + slowLogDir := filepath.Dir(slowLogPath) + slowLogFile := filepath.Base(slowLogPath) + + historySlowLogFilePath := filepath.Join( + slowLogDir, + fmt.Sprintf("%s.%d", slowLogFile, time.Now().Weekday()), + ) + + /* + 1. 文件不存在, st == nil, err != nil && os.IsNotExist(err) == true + 2. 文件存在, st != nil, err == nil + */ + st, err := os.Stat(historySlowLogFilePath) + if err != nil { + if !os.IsNotExist(err) { // 文件存在 + slog.Error("get history slow log file stat", err, slog.String("history file path", historySlowLogFilePath)) + return "", nil + } + // 文件不存在 + } else { + // 3 天只是为了方便, 实际控制的是 1 周 rotate 1 次 + // 短时间连续执行不会重复 rotate + if time.Now().Sub(st.ModTime()) < 3*24*time.Hour { + slog.Info( + "rotate slow log skip too frequency call", + slog.Time("now", time.Now()), + slog.Time("history file mod time", st.ModTime()), + slog.String("history file", historySlowLogFilePath), + ) + return "", nil + } + } + + mvCmd := exec.Command( + "mv", + slowLogPath, + historySlowLogFilePath, + ) + + var stderr bytes.Buffer + mvCmd.Stderr = &stderr + err = mvCmd.Run() + if err != nil { + slog.Error("mv slow log", err, slog.String("stderr", stderr.String())) + return "", err + } + + touchCmd := exec.Command("touch", slowLogPath) + stderr.Reset() + touchCmd.Stderr = &stderr + err = touchCmd.Run() + if err != nil { + slog.Error("touch slow log", err, slog.String("stderr", stderr.String())) + return "", err + } + + _, err = d.db.ExecContext(ctx, `FLUSH SLOW LOGS`) + if err != nil { + slog.Error("flush slow logs", err) + return "", err + } + + return "", nil +} + +// Name TODO +func (d *Dummy) Name() string { + return name +} + +// NewRotateSlowLog TODO +func NewRotateSlowLog(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &Dummy{db: cc.MySqlDB} +} + +// RegisterRotateSlowLog TODO +func RegisterRotateSlowLog() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return name, NewRotateSlowLog +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/rotate_slowlog.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/rotate_slowlog.go new file mode 100644 index 0000000000..b9163809c5 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/rotate_slowlog/rotate_slowlog.go @@ -0,0 +1,2 @@ +// Package rotate_slowlog TODO +package rotate_slowlog diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/ctl_replicate.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/ctl_replicate.go new file mode 100644 index 0000000000..64a09d19f7 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/ctl_replicate.go @@ -0,0 +1,87 @@ +package slave_status + +import ( + "context" + "database/sql" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +var ctlReplicateName = "ctl-replicate" + +type ctlReplicateChecker struct { + slaveStatusChecker +} + +// Run TODO +func (c *ctlReplicateChecker) Run() (msg string, err error) { + isPrimary, err := c.isPrimary() + if err != nil { + return "", err + } + + if !isPrimary { + return "", nil + } + + err = c.fetchSlaveStatus() + if err != nil { + return "", err + } + + if c.slaveStatus == nil || len(c.slaveStatus) == 0 { + return "empty slave status", nil + } + + if !c.isOk() { + slaveErr, err := c.collectError() + if err != nil { + return "", err + } + return fmt.Sprintf("IO/SQL thread not running: %s", slaveErr), nil + + } + return "", nil +} + +func (c *ctlReplicateChecker) isPrimary() (bool, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + var tcIsPrimary sql.NullInt32 + err := c.db.GetContext(ctx, &tcIsPrimary, `SELECT @@tc_is_primary`) + if err != nil { + slog.Error("select @@tc_is_primary", err) + return false, err + } + + if !tcIsPrimary.Valid { + err := errors.Errorf("invalide tc_is_primary: %v", tcIsPrimary) + slog.Error("select @@tc_is_primary", err) + return false, err + } + + return tcIsPrimary.Int32 == 1, nil +} + +// Name TODO +func (c *ctlReplicateChecker) Name() string { + return ctlReplicateName +} + +// NewCtlReplicateChecker TODO +func NewCtlReplicateChecker(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &ctlReplicateChecker{slaveStatusChecker{ + db: cc.CtlDB, + slaveStatus: make(map[string]interface{}), + }} +} + +// RegisterCtlReplicateChecker TODO +func RegisterCtlReplicateChecker() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return ctlReplicateName, NewCtlReplicateChecker +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/slave_status.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/slave_status.go new file mode 100644 index 0000000000..8a8d03c01c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect/slave_status/slave_status.go @@ -0,0 +1,142 @@ +// Package slave_status TODO +package slave_status + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "fmt" + "strings" + + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +var slaveStatusName = "slave-status" + +type slaveStatusChecker struct { + db *sqlx.DB + slaveStatus map[string]interface{} +} + +// Run TODO +func (s *slaveStatusChecker) Run() (msg string, err error) { + err = s.fetchSlaveStatus() + if err != nil { + return "", err + } + + if s.slaveStatus == nil || len(s.slaveStatus) == 0 { + return "empty slave status", nil + } + + if !s.isOk() { + slaveErr, err := s.collectError() + if err != nil { + return "", err + } + return fmt.Sprintf("IO/SQL thread not running: %s", slaveErr), nil + } + + return "", nil +} + +func (s *slaveStatusChecker) isOk() bool { + return strings.ToUpper(s.slaveStatus["Slave_IO_Running"].(string)) == "YES" && + strings.ToUpper(s.slaveStatus["Slave_SQL_Running"].(string)) == "YES" +} + +func (s *slaveStatusChecker) collectError() (string, error) { + var slaveErrors []string + for _, ek := range []struct { + ErrKey string + ErrnoKey string + }{ + {ErrKey: "Last_Error", ErrnoKey: "Last_Errno"}, + {ErrKey: "Last_SQL_Error", ErrnoKey: "Last_SQL_Errno"}, + {ErrKey: "Last_IO_Error", ErrnoKey: "Last_IO_Errno"}, + } { + // 反射出来的都是字符串, 所以这里要做字符串对比 + if errNo, ok := s.slaveStatus[ek.ErrnoKey]; !ok { + err := errors.Errorf("%s not found in slave status", ek.ErrnoKey) + return "", err + } else { + slog.Debug( + "collect slave errors", + slog.String("key", ek.ErrnoKey), slog.String("value", errNo.(string)), + ) + if errNo.(string) != "0" { + if errMsg, ok := s.slaveStatus[ek.ErrKey]; !ok { + err := errors.Errorf("%s not found in slave status", ek.ErrnoKey) + slog.Error("collect slave errors", err) + return "", err + } else { + slaveErr := fmt.Sprintf( + `%s: %s [%s]`, + ek.ErrKey, + errMsg, + errNo, + ) + slaveErrors = append( + slaveErrors, + slaveErr, + ) + slog.Debug("collect slave errors", slog.String("slave error", slaveErr)) + } + } + } + } + + return strings.Join(slaveErrors, ","), nil +} + +func (s *slaveStatusChecker) fetchSlaveStatus() error { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + rows, err := s.db.QueryxContext(ctx, `SHOW SLAVE STATUS`) + if err != nil { + slog.Error("show slave status", err) + return err + } + defer func() { + _ = rows.Close() + }() + + for rows.Next() { + err := rows.MapScan(s.slaveStatus) + if err != nil { + slog.Error("scan slave status", err) + return err + } + break + } + + for k, v := range s.slaveStatus { + if value, ok := v.([]byte); ok { + s.slaveStatus[k] = strings.TrimSpace(string(value)) + } + } + slog.Debug("slave status", slog.Any("status", s.slaveStatus)) + + return nil +} + +// Name TODO +func (s *slaveStatusChecker) Name() string { + return slaveStatusName +} + +// NewSlaveStatusChecker TODO +func NewSlaveStatusChecker(cc *monitor_item_interface.ConnectionCollect) monitor_item_interface.MonitorItemInterface { + return &slaveStatusChecker{ + db: cc.MySqlDB, + slaveStatus: make(map[string]interface{}), + } +} + +// RegisterSlaveStatusChecker TODO +func RegisterSlaveStatusChecker() (string, monitor_item_interface.MonitorItemConstructorFuncType) { + return slaveStatusName, NewSlaveStatusChecker +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/main_loop.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/main_loop.go new file mode 100644 index 0000000000..9833ff881a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/main_loop.go @@ -0,0 +1,2 @@ +// Package main_loop TODO +package main_loop diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/monitor.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/monitor.go new file mode 100644 index 0000000000..986e6334ff --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/main_loop/monitor.go @@ -0,0 +1,82 @@ +package main_loop + +import ( + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/items_collect" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/utils" + "fmt" + "strings" + + _ "github.com/go-sql-driver/mysql" // mysql TODO + "github.com/pkg/errors" + "github.com/spf13/viper" + "golang.org/x/exp/slices" + "golang.org/x/exp/slog" +) + +// Run TODO +func Run(hardcode bool) error { + var iNames []string + if hardcode { + iNames = viper.GetStringSlice("hardcode-items") + } else { + iNames = viper.GetStringSlice("run-items") + } + slog.Info("main loop", slog.String("items", strings.Join(iNames, ","))) + slog.Info("main loop", slog.Bool("hardcode", hardcode)) + + if hardcode && slices.Index(iNames, config.HeartBeatName) >= 0 { + utils.SendMonitorMetrics(config.HeartBeatName, 1, nil) + } + + cc, err := monitor_item_interface.NewConnectionCollect() + if err != nil { + if hardcode && slices.Index(iNames, "db-up") >= 0 { + utils.SendMonitorEvent("db-up", err.Error()) + } + return nil + } + defer func() { + cc.Close() + }() + + slog.Debug("make connection collect", slog.Any("connection collect", cc)) + + if hardcode { + return nil + } + + for _, iName := range iNames { + + if constructor, ok := items_collect.RegisteredItemConstructor()[iName]; ok { + msg, err := constructor(cc).Run() + if err != nil { + slog.Error("run monitor item", err, slog.String("name", iName)) + utils.SendMonitorEvent( + "monitor-internal-error", + fmt.Sprintf("run monitor item %s failed: %s", iName, err.Error()), + ) + continue + } + + if msg != "" { + slog.Info( + "run monitor items", + slog.String("name", iName), + slog.String("msg", msg), + ) + utils.SendMonitorEvent(iName, msg) + continue + } + + slog.Info("run monitor item pass", slog.String("name", iName)) + + } else { + err := errors.Errorf("%s not registered", iName) + slog.Error("run monitor item", err) + continue + } + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/connection_collect.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/connection_collect.go new file mode 100644 index 0000000000..10633d2fc9 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/connection_collect.go @@ -0,0 +1,152 @@ +package monitor_item_interface + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "fmt" + "time" + + "github.com/go-sql-driver/mysql" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +// ConnectionCollect TODO +type ConnectionCollect struct { + MySqlDB *sqlx.DB // spider 也用这个 + ProxyDB *sqlx.DB + ProxyAdminDB *sqlx.DB + CtlDB *sqlx.DB +} + +// Close TODO +func (c *ConnectionCollect) Close() { + if c.MySqlDB != nil { + _ = c.MySqlDB.Close() + } + + if c.ProxyDB != nil { + _ = c.ProxyDB.Close() + } + + if c.ProxyAdminDB != nil { + _ = c.ProxyAdminDB.Close() + } + + if c.CtlDB != nil { + _ = c.CtlDB.Close() + } +} + +// NewConnectionCollect TODO +func NewConnectionCollect() (*ConnectionCollect, error) { + switch config.MonitorConfig.MachineType { + case "backend", "remote": + db, err := connectDB( + config.MonitorConfig.Ip, + config.MonitorConfig.Port, + config.MonitorConfig.Auth.Mysql, + ) + if err != nil { + slog.Error( + fmt.Sprintf("connect %s", config.MonitorConfig.MachineType), err, + slog.String("ip", config.MonitorConfig.Ip), + slog.Int("port", config.MonitorConfig.Port), + ) + return nil, err + } + return &ConnectionCollect{MySqlDB: db}, nil + case "proxy": + db1, err := connectDB( + config.MonitorConfig.Ip, + config.MonitorConfig.Port, + config.MonitorConfig.Auth.Proxy, + ) + if err != nil { + slog.Error( + "connect proxy", err, + slog.String("ip", config.MonitorConfig.Ip), + slog.Int("port", config.MonitorConfig.Port), + ) + return nil, err + } + + adminPort := config.MonitorConfig.Port + 1000 + db2, err := connectDB( + config.MonitorConfig.Ip, + adminPort, + config.MonitorConfig.Auth.ProxyAdmin, + ) + if err != nil { + if merr, ok := err.(*mysql.MySQLError); ok { + if merr.Number == 1105 { + // 连接 proxy 管理端肯定在这里返回 + return &ConnectionCollect{ProxyDB: db1, ProxyAdminDB: db2}, nil + } + } + slog.Error( + "connect proxy admin", err, + slog.String("ip", config.MonitorConfig.Ip), + slog.Int("port", adminPort), + ) + return nil, err + } + // 这里其实永远到不了, 因为 mysql 协议连接 proxy 管理端必然 err!=nil + return &ConnectionCollect{ProxyDB: db1, ProxyAdminDB: db2}, nil + case "spider": + db1, err := connectDB( + config.MonitorConfig.Ip, + config.MonitorConfig.Port, + config.MonitorConfig.Auth.Mysql, + ) + if err != nil { + slog.Error( + "connect spider", err, + slog.String("ip", config.MonitorConfig.Ip), + slog.Int("port", config.MonitorConfig.Port), + ) + return nil, err + } + + ctlPort := config.MonitorConfig.Port + 1000 + db2, err := connectDB( + config.MonitorConfig.Ip, + ctlPort, + config.MonitorConfig.Auth.Mysql, + ) + if err != nil { + slog.Error( + "connect ctl", err, + slog.String("ip", config.MonitorConfig.Ip), + slog.Int("port", ctlPort), + ) + return nil, err + } + + return &ConnectionCollect{MySqlDB: db1, CtlDB: db2}, nil + default: + err := errors.Errorf( + "not support machine type: %s", + config.MonitorConfig.MachineType, + ) + slog.Error("new connect", err) + return nil, err + } +} + +func connectDB(ip string, port int, ca *config.ConnectAuth) (*sqlx.DB, error) { + ctx, cancel := context.WithTimeout(context.Background(), config.MonitorConfig.InteractTimeout) + defer cancel() + + return sqlx.ConnectContext( + ctx, + "mysql", fmt.Sprintf( + "%s:%s@tcp(%s:%d)/%s?parseTime=true&loc=%s&timeout=%s", + ca.User, ca.Password, ip, port, + "", + time.Local.String(), + config.MonitorConfig.InteractTimeout, + ), + ) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/interface_define.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/interface_define.go new file mode 100644 index 0000000000..018dde6647 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/interface_define.go @@ -0,0 +1,10 @@ +package monitor_item_interface + +// MonitorItemInterface TODO +type MonitorItemInterface interface { + Run() (msg string, err error) + Name() string +} + +// MonitorItemConstructorFuncType TODO +type MonitorItemConstructorFuncType func(cc *ConnectionCollect) MonitorItemInterface diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/monitor_item_interface.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/monitor_item_interface.go new file mode 100644 index 0000000000..4d50c63ca3 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/monitor_item_interface/monitor_item_interface.go @@ -0,0 +1,2 @@ +// Package monitor_item_interface TODO +package monitor_item_interface diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_event.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_event.go new file mode 100644 index 0000000000..8743f221bd --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_event.go @@ -0,0 +1,43 @@ +package utils + +import ( + ma "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "strconv" + + "golang.org/x/exp/slog" +) + +// SendMonitorEvent TODO +func SendMonitorEvent(name string, msg string) { + crondManager := ma.NewManager(config.MonitorConfig.ApiUrl) + + additionDimension := map[string]interface{}{ + "immute_domain": config.MonitorConfig.ImmuteDomain, + "machine_type": config.MonitorConfig.MachineType, + "bk_cloud_id": *config.MonitorConfig.BkCloudID, + "port": config.MonitorConfig.Port, + "bk_target_service_instance_id": strconv.FormatInt(config.MonitorConfig.BkInstanceId, 10), + } + + if config.MonitorConfig.Role != nil { + additionDimension["role"] = *config.MonitorConfig.Role + } + + err := crondManager.SendEvent( + name, + msg, + additionDimension, + ) + if err != nil { + slog.Error( + "send event", err, + slog.String("name", name), slog.String("msg", msg), + ) + } + + slog.Info( + "send event", + slog.String("name", name), slog.String("msg", msg), + ) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_metrics.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_metrics.go new file mode 100644 index 0000000000..b716e56923 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/send_monitor_metrics.go @@ -0,0 +1,48 @@ +package utils + +import ( + ma "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-monitor/pkg/config" + "strconv" + + "golang.org/x/exp/maps" + "golang.org/x/exp/slog" +) + +// SendMonitorMetrics TODO +func SendMonitorMetrics(name string, value int64, customDimension map[string]interface{}) { + crondManager := ma.NewManager(config.MonitorConfig.ApiUrl) + + additionDimension := map[string]interface{}{ + "immute_domain": config.MonitorConfig.ImmuteDomain, + "machine_type": config.MonitorConfig.MachineType, + "bk_cloud_id": strconv.Itoa(*config.MonitorConfig.BkCloudID), + "port": strconv.Itoa(config.MonitorConfig.Port), + "bk_target_service_instance_id": strconv.FormatInt(config.MonitorConfig.BkInstanceId, 10), + } + + if customDimension != nil { + maps.Copy(additionDimension, customDimension) + } + + if config.MonitorConfig.Role != nil { + additionDimension["role"] = *config.MonitorConfig.Role + } + + err := crondManager.SendMetrics( + name, + value, + additionDimension, + ) + if err != nil { + slog.Error( + "send metrics", err, + slog.String("name", name), slog.Int64("value", value), + ) + } + + slog.Info( + "send metrics", + slog.String("name", name), slog.Int64("msg", value), + ) +} diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/utils.go b/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/utils.go new file mode 100644 index 0000000000..8fecf0f06c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pkg/utils/utils.go @@ -0,0 +1,2 @@ +// Package utils TODO +package utils diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pt-config-diff b/dbm-services/mysql/db-tools/mysql-monitor/pt-config-diff new file mode 100755 index 0000000000..99b2c122c4 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pt-config-diff @@ -0,0 +1,6026 @@ +#!/usr/bin/env perl + +# This program is part of Percona Toolkit: http://www.percona.com/software/ +# See "COPYRIGHT, LICENSE, AND WARRANTY" at the end of this file for legal +# notices and disclaimers. + +use strict; +use warnings FATAL => 'all'; + +# This tool is "fat-packed": most of its dependent modules are embedded +# in this file. Setting %INC to this file for each module makes Perl aware +# of this so it will not try to load the module from @INC. See the tool's +# documentation for a full list of dependencies. +BEGIN { + $INC{$_} = __FILE__ for map { (my $pkg = "$_.pm") =~ s!::!/!g; $pkg } (qw( + Percona::Toolkit + Lmo::Utils + Lmo::Meta + Lmo::Object + Lmo::Types + Lmo + OptionParser + DSNParser + Cxn + Daemon + TextResultSetParser + MySQLConfig + MySQLConfigComparer + ReportFormatter + HTTP::Micro + VersionCheck + )); +} + +# ########################################################################### +# Percona::Toolkit package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Percona/Toolkit.pm +# t/lib/Percona/Toolkit.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::Toolkit; + +our $VERSION = '3.5.1'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Carp qw(carp cluck); +use Data::Dumper qw(); + +require Exporter; +our @ISA = qw(Exporter); +our @EXPORT_OK = qw( + have_required_args + Dumper + _d +); + +sub have_required_args { + my ($args, @required_args) = @_; + my $have_required_args = 1; + foreach my $arg ( @required_args ) { + if ( !defined $args->{$arg} ) { + $have_required_args = 0; + carp "Argument $arg is not defined"; + } + } + cluck unless $have_required_args; # print backtrace + return $have_required_args; +} + +sub Dumper { + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Sortkeys = 1; + local $Data::Dumper::Quotekeys = 0; + Data::Dumper::Dumper(@_); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Percona::Toolkit package +# ########################################################################### + +# ########################################################################### +# Lmo::Utils package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Lmo/Utils.pm +# t/lib/Lmo/Utils.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Utils; + +use strict; +use warnings qw( FATAL all ); +require Exporter; +our (@ISA, @EXPORT, @EXPORT_OK); + +BEGIN { + @ISA = qw(Exporter); + @EXPORT = @EXPORT_OK = qw( + _install_coderef + _unimport_coderefs + _glob_for + _stash_for + ); +} + +{ + no strict 'refs'; + sub _glob_for { + return \*{shift()} + } + + sub _stash_for { + return \%{ shift() . "::" }; + } +} + +sub _install_coderef { + my ($to, $code) = @_; + + return *{ _glob_for $to } = $code; +} + +sub _unimport_coderefs { + my ($target, @names) = @_; + return unless @names; + my $stash = _stash_for($target); + foreach my $name (@names) { + if ($stash->{$name} and defined(&{$stash->{$name}})) { + delete $stash->{$name}; + } + } +} + +1; +} +# ########################################################################### +# End Lmo::Utils package +# ########################################################################### + +# ########################################################################### +# Lmo::Meta package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Lmo/Meta.pm +# t/lib/Lmo/Meta.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Meta; +use strict; +use warnings qw( FATAL all ); + +my %metadata_for; + +sub new { + my $class = shift; + return bless { @_ }, $class +} + +sub metadata_for { + my $self = shift; + my ($class) = @_; + + return $metadata_for{$class} ||= {}; +} + +sub class { shift->{class} } + +sub attributes { + my $self = shift; + return keys %{$self->metadata_for($self->class)} +} + +sub attributes_for_new { + my $self = shift; + my @attributes; + + my $class_metadata = $self->metadata_for($self->class); + while ( my ($attr, $meta) = each %$class_metadata ) { + if ( exists $meta->{init_arg} ) { + push @attributes, $meta->{init_arg} + if defined $meta->{init_arg}; + } + else { + push @attributes, $attr; + } + } + return @attributes; +} + +1; +} +# ########################################################################### +# End Lmo::Meta package +# ########################################################################### + +# ########################################################################### +# Lmo::Object package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Lmo/Object.pm +# t/lib/Lmo/Object.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Object; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(blessed); + +use Lmo::Meta; +use Lmo::Utils qw(_glob_for); + +sub new { + my $class = shift; + my $args = $class->BUILDARGS(@_); + + my $class_metadata = Lmo::Meta->metadata_for($class); + + my @args_to_delete; + while ( my ($attr, $meta) = each %$class_metadata ) { + next unless exists $meta->{init_arg}; + my $init_arg = $meta->{init_arg}; + + if ( defined $init_arg ) { + $args->{$attr} = delete $args->{$init_arg}; + } + else { + push @args_to_delete, $attr; + } + } + + delete $args->{$_} for @args_to_delete; + + for my $attribute ( keys %$args ) { + if ( my $coerce = $class_metadata->{$attribute}{coerce} ) { + $args->{$attribute} = $coerce->($args->{$attribute}); + } + if ( my $isa_check = $class_metadata->{$attribute}{isa} ) { + my ($check_name, $check_sub) = @$isa_check; + $check_sub->($args->{$attribute}); + } + } + + while ( my ($attribute, $meta) = each %$class_metadata ) { + next unless $meta->{required}; + Carp::confess("Attribute ($attribute) is required for $class") + if ! exists $args->{$attribute} + } + + my $self = bless $args, $class; + + my @build_subs; + my $linearized_isa = mro::get_linear_isa($class); + + for my $isa_class ( @$linearized_isa ) { + unshift @build_subs, *{ _glob_for "${isa_class}::BUILD" }{CODE}; + } + my @args = %$args; + for my $sub (grep { defined($_) && exists &$_ } @build_subs) { + $sub->( $self, @args); + } + return $self; +} + +sub BUILDARGS { + shift; # No need for the classname + if ( @_ == 1 && ref($_[0]) ) { + Carp::confess("Single parameters to new() must be a HASH ref, not $_[0]") + unless ref($_[0]) eq ref({}); + return {%{$_[0]}} # We want a new reference, always + } + else { + return { @_ }; + } +} + +sub meta { + my $class = shift; + $class = Scalar::Util::blessed($class) || $class; + return Lmo::Meta->new(class => $class); +} + +1; +} +# ########################################################################### +# End Lmo::Object package +# ########################################################################### + +# ########################################################################### +# Lmo::Types package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Lmo/Types.pm +# t/lib/Lmo/Types.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Types; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + + +our %TYPES = ( + Bool => sub { !$_[0] || (defined $_[0] && looks_like_number($_[0]) && $_[0] == 1) }, + Num => sub { defined $_[0] && looks_like_number($_[0]) }, + Int => sub { defined $_[0] && looks_like_number($_[0]) && $_[0] == int($_[0]) }, + Str => sub { defined $_[0] }, + Object => sub { defined $_[0] && blessed($_[0]) }, + FileHandle => sub { local $@; require IO::Handle; fileno($_[0]) && $_[0]->opened }, + + map { + my $type = /R/ ? $_ : uc $_; + $_ . "Ref" => sub { ref $_[0] eq $type } + } qw(Array Code Hash Regexp Glob Scalar) +); + +sub check_type_constaints { + my ($attribute, $type_check, $check_name, $val) = @_; + ( ref($type_check) eq 'CODE' + ? $type_check->($val) + : (ref $val eq $type_check + || ($val && $val eq $type_check) + || (exists $TYPES{$type_check} && $TYPES{$type_check}->($val))) + ) + || Carp::confess( + qq + . qq + . (defined $val ? Lmo::Dumper($val) : 'undef') ) +} + +sub _nested_constraints { + my ($attribute, $aggregate_type, $type) = @_; + + my $inner_types; + if ( $type =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $inner_types = _nested_constraints($1, $2); + } + else { + $inner_types = $TYPES{$type}; + } + + if ( $aggregate_type eq 'ArrayRef' ) { + return sub { + my ($val) = @_; + return unless ref($val) eq ref([]); + + if ($inner_types) { + for my $value ( @{$val} ) { + return unless $inner_types->($value) + } + } + else { + for my $value ( @{$val} ) { + return unless $value && ($value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type))); + } + } + return 1; + }; + } + elsif ( $aggregate_type eq 'Maybe' ) { + return sub { + my ($value) = @_; + return 1 if ! defined($value); + if ($inner_types) { + return unless $inner_types->($value) + } + else { + return unless $value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type)); + } + return 1; + } + } + else { + Carp::confess("Nested aggregate types are only implemented for ArrayRefs and Maybe"); + } +} + +1; +} +# ########################################################################### +# End Lmo::Types package +# ########################################################################### + +# ########################################################################### +# Lmo package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Lmo.pm +# t/lib/Lmo.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +BEGIN { +$INC{"Lmo.pm"} = __FILE__; +package Lmo; +our $VERSION = '0.30_Percona'; # Forked from 0.30 of Mo. + + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + +use Lmo::Meta; +use Lmo::Object; +use Lmo::Types; + +use Lmo::Utils; + +my %export_for; +sub import { + warnings->import(qw(FATAL all)); + strict->import(); + + my $caller = scalar caller(); # Caller's package + my %exports = ( + extends => \&extends, + has => \&has, + with => \&with, + override => \&override, + confess => \&Carp::confess, + ); + + $export_for{$caller} = \%exports; + + for my $keyword ( keys %exports ) { + _install_coderef "${caller}::$keyword" => $exports{$keyword}; + } + + if ( !@{ *{ _glob_for "${caller}::ISA" }{ARRAY} || [] } ) { + @_ = "Lmo::Object"; + goto *{ _glob_for "${caller}::extends" }{CODE}; + } +} + +sub extends { + my $caller = scalar caller(); + for my $class ( @_ ) { + _load_module($class); + } + _set_package_isa($caller, @_); + _set_inherited_metadata($caller); +} + +sub _load_module { + my ($class) = @_; + + (my $file = $class) =~ s{::|'}{/}g; + $file .= '.pm'; + { local $@; eval { require "$file" } } # or warn $@; + return; +} + +sub with { + my $package = scalar caller(); + require Role::Tiny; + for my $role ( @_ ) { + _load_module($role); + _role_attribute_metadata($package, $role); + } + Role::Tiny->apply_roles_to_package($package, @_); +} + +sub _role_attribute_metadata { + my ($package, $role) = @_; + + my $package_meta = Lmo::Meta->metadata_for($package); + my $role_meta = Lmo::Meta->metadata_for($role); + + %$package_meta = (%$role_meta, %$package_meta); +} + +sub has { + my $names = shift; + my $caller = scalar caller(); + + my $class_metadata = Lmo::Meta->metadata_for($caller); + + for my $attribute ( ref $names ? @$names : $names ) { + my %args = @_; + my $method = ($args{is} || '') eq 'ro' + ? sub { + Carp::confess("Cannot assign a value to a read-only accessor at reader ${caller}::${attribute}") + if $#_; + return $_[0]{$attribute}; + } + : sub { + return $#_ + ? $_[0]{$attribute} = $_[1] + : $_[0]{$attribute}; + }; + + $class_metadata->{$attribute} = (); + + if ( my $type_check = $args{isa} ) { + my $check_name = $type_check; + + if ( my ($aggregate_type, $inner_type) = $type_check =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $type_check = Lmo::Types::_nested_constraints($attribute, $aggregate_type, $inner_type); + } + + my $check_sub = sub { + my ($new_val) = @_; + Lmo::Types::check_type_constaints($attribute, $type_check, $check_name, $new_val); + }; + + $class_metadata->{$attribute}{isa} = [$check_name, $check_sub]; + my $orig_method = $method; + $method = sub { + $check_sub->($_[1]) if $#_; + goto &$orig_method; + }; + } + + if ( my $builder = $args{builder} ) { + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$builder + : goto &$original_method + }; + } + + if ( my $code = $args{default} ) { + Carp::confess("${caller}::${attribute}'s default is $code, but should be a coderef") + unless ref($code) eq 'CODE'; + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$code + : goto &$original_method + }; + } + + if ( my $role = $args{does} ) { + my $original_method = $method; + $method = sub { + if ( $#_ ) { + Carp::confess(qq) + unless Scalar::Util::blessed($_[1]) && eval { $_[1]->does($role) } + } + goto &$original_method + }; + } + + if ( my $coercion = $args{coerce} ) { + $class_metadata->{$attribute}{coerce} = $coercion; + my $original_method = $method; + $method = sub { + if ( $#_ ) { + return $original_method->($_[0], $coercion->($_[1])) + } + goto &$original_method; + } + } + + _install_coderef "${caller}::$attribute" => $method; + + if ( $args{required} ) { + $class_metadata->{$attribute}{required} = 1; + } + + if ($args{clearer}) { + _install_coderef "${caller}::$args{clearer}" + => sub { delete shift->{$attribute} } + } + + if ($args{predicate}) { + _install_coderef "${caller}::$args{predicate}" + => sub { exists shift->{$attribute} } + } + + if ($args{handles}) { + _has_handles($caller, $attribute, \%args); + } + + if (exists $args{init_arg}) { + $class_metadata->{$attribute}{init_arg} = $args{init_arg}; + } + } +} + +sub _has_handles { + my ($caller, $attribute, $args) = @_; + my $handles = $args->{handles}; + + my $ref = ref $handles; + my $kv; + if ( $ref eq ref [] ) { + $kv = { map { $_,$_ } @{$handles} }; + } + elsif ( $ref eq ref {} ) { + $kv = $handles; + } + elsif ( $ref eq ref qr// ) { + Carp::confess("Cannot delegate methods based on a Regexp without a type constraint (isa)") + unless $args->{isa}; + my $target_class = $args->{isa}; + $kv = { + map { $_, $_ } + grep { $_ =~ $handles } + grep { !exists $Lmo::Object::{$_} && $target_class->can($_) } + grep { !$export_for{$target_class}->{$_} } + keys %{ _stash_for $target_class } + }; + } + else { + Carp::confess("handles for $ref not yet implemented"); + } + + while ( my ($method, $target) = each %{$kv} ) { + my $name = _glob_for "${caller}::$method"; + Carp::confess("You cannot overwrite a locally defined method ($method) with a delegation") + if defined &$name; + + my ($target, @curried_args) = ref($target) ? @$target : $target; + *$name = sub { + my $self = shift; + my $delegate_to = $self->$attribute(); + my $error = "Cannot delegate $method to $target because the value of $attribute"; + Carp::confess("$error is not defined") unless $delegate_to; + Carp::confess("$error is not an object (got '$delegate_to')") + unless Scalar::Util::blessed($delegate_to) || (!ref($delegate_to) && $delegate_to->can($target)); + return $delegate_to->$target(@curried_args, @_); + } + } +} + +sub _set_package_isa { + my ($package, @new_isa) = @_; + my $package_isa = \*{ _glob_for "${package}::ISA" }; + @{*$package_isa} = @new_isa; +} + +sub _set_inherited_metadata { + my $class = shift; + my $class_metadata = Lmo::Meta->metadata_for($class); + my $linearized_isa = mro::get_linear_isa($class); + my %new_metadata; + + for my $isa_class (reverse @$linearized_isa) { + my $isa_metadata = Lmo::Meta->metadata_for($isa_class); + %new_metadata = ( + %new_metadata, + %$isa_metadata, + ); + } + %$class_metadata = %new_metadata; +} + +sub unimport { + my $caller = scalar caller(); + my $target = caller; + _unimport_coderefs($target, keys %{$export_for{$caller}}); +} + +sub Dumper { + require Data::Dumper; + local $Data::Dumper::Indent = 0; + local $Data::Dumper::Sortkeys = 0; + local $Data::Dumper::Quotekeys = 0; + local $Data::Dumper::Terse = 1; + + Data::Dumper::Dumper(@_) +} + +BEGIN { + if ($] >= 5.010) { + { local $@; require mro; } + } + else { + local $@; + eval { + require MRO::Compat; + } or do { + *mro::get_linear_isa = *mro::get_linear_isa_dfs = sub { + no strict 'refs'; + + my $classname = shift; + + my @lin = ($classname); + my %stored; + foreach my $parent (@{"$classname\::ISA"}) { + my $plin = mro::get_linear_isa_dfs($parent); + foreach (@$plin) { + next if exists $stored{$_}; + push(@lin, $_); + $stored{$_} = 1; + } + } + return \@lin; + }; + } + } +} + +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + +} +1; +} +# ########################################################################### +# End Lmo package +# ########################################################################### + +# ########################################################################### +# OptionParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/OptionParser.pm +# t/lib/OptionParser.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package OptionParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use List::Util qw(max); +use Getopt::Long; +use Data::Dumper; + +my $POD_link_re = '[LC]<"?([^">]+)"?>'; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my ($program_name) = $PROGRAM_NAME =~ m/([.A-Za-z-]+)$/; + $program_name ||= $PROGRAM_NAME; + my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; + + my %attributes = ( + 'type' => 1, + 'short form' => 1, + 'group' => 1, + 'default' => 1, + 'cumulative' => 1, + 'negatable' => 1, + 'repeatable' => 1, # means it can be specified more than once + ); + + my $self = { + head1 => 'OPTIONS', # These args are used internally + skip_rules => 0, # to instantiate another Option- + item => '--(.*)', # Parser obj that parses the + attributes => \%attributes, # DSN OPTIONS section. Tools + parse_attributes => \&_parse_attribs, # don't tinker with these args. + + %args, + + strict => 1, # disabled by a special rule + program_name => $program_name, + opts => {}, + got_opts => 0, + short_opts => {}, + defaults => {}, + groups => {}, + allowed_groups => {}, + errors => [], + rules => [], # desc of rules for --help + mutex => [], # rule: opts are mutually exclusive + atleast1 => [], # rule: at least one opt is required + disables => {}, # rule: opt disables other opts + defaults_to => {}, # rule: opt defaults to value of other opt + DSNParser => undef, + default_files => [ + "/etc/percona-toolkit/percona-toolkit.conf", + "/etc/percona-toolkit/$program_name.conf", + "$home/.percona-toolkit.conf", + "$home/.$program_name.conf", + ], + types => { + string => 's', # standard Getopt type + int => 'i', # standard Getopt type + float => 'f', # standard Getopt type + Hash => 'H', # hash, formed from a comma-separated list + hash => 'h', # hash as above, but only if a value is given + Array => 'A', # array, similar to Hash + array => 'a', # array, similar to hash + DSN => 'd', # DSN + size => 'z', # size with kMG suffix (powers of 2^10) + time => 'm', # time, with an optional suffix of s/h/m/d + }, + }; + + return bless $self, $class; +} + +sub get_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + my @specs = $self->_pod_to_specs($file); + $self->_parse_specs(@specs); + + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + if ( $contents =~ m/^=head1 DSN OPTIONS/m ) { + PTDEBUG && _d('Parsing DSN OPTIONS'); + my $dsn_attribs = { + dsn => 1, + copy => 1, + }; + my $parse_dsn_attribs = sub { + my ( $self, $option, $attribs ) = @_; + map { + my $val = $attribs->{$_}; + if ( $val ) { + $val = $val eq 'yes' ? 1 + : $val eq 'no' ? 0 + : $val; + $attribs->{$_} = $val; + } + } keys %$attribs; + return { + key => $option, + %$attribs, + }; + }; + my $dsn_o = new OptionParser( + description => 'DSN OPTIONS', + head1 => 'DSN OPTIONS', + dsn => 0, # XXX don't infinitely recurse! + item => '\* (.)', # key opts are a single character + skip_rules => 1, # no rules before opts + attributes => $dsn_attribs, + parse_attributes => $parse_dsn_attribs, + ); + my @dsn_opts = map { + my $opts = { + key => $_->{spec}->{key}, + dsn => $_->{spec}->{dsn}, + copy => $_->{spec}->{copy}, + desc => $_->{desc}, + }; + $opts; + } $dsn_o->_pod_to_specs($file); + $self->{DSNParser} = DSNParser->new(opts => \@dsn_opts); + } + + if ( $contents =~ m/^=head1 VERSION\n\n^(.+)$/m ) { + $self->{version} = $1; + PTDEBUG && _d($self->{version}); + } + + return; +} + +sub DSNParser { + my ( $self ) = @_; + return $self->{DSNParser}; +}; + +sub get_defaults_files { + my ( $self ) = @_; + return @{$self->{default_files}}; +} + +sub _pod_to_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + + my @specs = (); + my @rules = (); + my $para; + + local $INPUT_RECORD_SEPARATOR = ''; + while ( $para = <$fh> ) { + next unless $para =~ m/^=head1 $self->{head1}/; + last; + } + + while ( $para = <$fh> ) { + last if $para =~ m/^=over/; + next if $self->{skip_rules}; + chomp $para; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + PTDEBUG && _d('Option rule:', $para); + push @rules, $para; + } + + die "POD has no $self->{head1} section" unless $para; + + do { + if ( my ($option) = $para =~ m/^=item $self->{item}/ ) { + chomp $para; + PTDEBUG && _d($para); + my %attribs; + + $para = <$fh>; # read next paragraph, possibly attributes + + if ( $para =~ m/: / ) { # attributes + $para =~ s/\s+\Z//g; + %attribs = map { + my ( $attrib, $val) = split(/: /, $_); + die "Unrecognized attribute for --$option: $attrib" + unless $self->{attributes}->{$attrib}; + ($attrib, $val); + } split(/; /, $para); + if ( $attribs{'short form'} ) { + $attribs{'short form'} =~ s/-//; + } + $para = <$fh>; # read next paragraph, probably short help desc + } + else { + PTDEBUG && _d('Option has no attributes'); + } + + $para =~ s/\s+\Z//g; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + + $para =~ s/\.(?:\n.*| [A-Z].*|\Z)//s; + PTDEBUG && _d('Short help:', $para); + + die "No description after option spec $option" if $para =~ m/^=item/; + + if ( my ($base_option) = $option =~ m/^\[no\](.*)/ ) { + $option = $base_option; + $attribs{'negatable'} = 1; + } + + push @specs, { + spec => $self->{parse_attributes}->($self, $option, \%attribs), + desc => $para + . (defined $attribs{default} ? " (default $attribs{default})" : ''), + group => ($attribs{'group'} ? $attribs{'group'} : 'default'), + attributes => \%attribs + }; + } + while ( $para = <$fh> ) { + last unless $para; + if ( $para =~ m/^=head1/ ) { + $para = undef; # Can't 'last' out of a do {} block. + last; + } + last if $para =~ m/^=item /; + } + } while ( $para ); + + die "No valid specs in $self->{head1}" unless @specs; + + close $fh; + return @specs, @rules; +} + +sub _parse_specs { + my ( $self, @specs ) = @_; + my %disables; # special rule that requires deferred checking + + foreach my $opt ( @specs ) { + if ( ref $opt ) { # It's an option spec, not a rule. + PTDEBUG && _d('Parsing opt spec:', + map { ($_, '=>', $opt->{$_}) } keys %$opt); + + my ( $long, $short ) = $opt->{spec} =~ m/^([\w-]+)(?:\|([^!+=]*))?/; + if ( !$long ) { + die "Cannot parse long option from spec $opt->{spec}"; + } + $opt->{long} = $long; + + die "Duplicate long option --$long" if exists $self->{opts}->{$long}; + $self->{opts}->{$long} = $opt; + + if ( length $long == 1 ) { + PTDEBUG && _d('Long opt', $long, 'looks like short opt'); + $self->{short_opts}->{$long} = $long; + } + + if ( $short ) { + die "Duplicate short option -$short" + if exists $self->{short_opts}->{$short}; + $self->{short_opts}->{$short} = $long; + $opt->{short} = $short; + } + else { + $opt->{short} = undef; + } + + $opt->{is_negatable} = $opt->{spec} =~ m/!/ ? 1 : 0; + $opt->{is_cumulative} = $opt->{spec} =~ m/\+/ ? 1 : 0; + $opt->{is_repeatable} = $opt->{attributes}->{repeatable} ? 1 : 0; + $opt->{is_required} = $opt->{desc} =~ m/required/ ? 1 : 0; + + $opt->{group} ||= 'default'; + $self->{groups}->{ $opt->{group} }->{$long} = 1; + + $opt->{value} = undef; + $opt->{got} = 0; + + my ( $type ) = $opt->{spec} =~ m/=(.)/; + $opt->{type} = $type; + PTDEBUG && _d($long, 'type:', $type); + + + $opt->{spec} =~ s/=./=s/ if ( $type && $type =~ m/[HhAadzm]/ ); + + if ( (my ($def) = $opt->{desc} =~ m/default\b(?: ([^)]+))?/) ) { + $self->{defaults}->{$long} = defined $def ? $def : 1; + PTDEBUG && _d($long, 'default:', $def); + } + + if ( $long eq 'config' ) { + $self->{defaults}->{$long} = join(',', $self->get_defaults_files()); + } + + if ( (my ($dis) = $opt->{desc} =~ m/(disables .*)/) ) { + $disables{$long} = $dis; + PTDEBUG && _d('Deferring check of disables rule for', $opt, $dis); + } + + $self->{opts}->{$long} = $opt; + } + else { # It's an option rule, not a spec. + PTDEBUG && _d('Parsing rule:', $opt); + push @{$self->{rules}}, $opt; + my @participants = $self->_get_participants($opt); + my $rule_ok = 0; + + if ( $opt =~ m/mutually exclusive|one and only one/ ) { + $rule_ok = 1; + push @{$self->{mutex}}, \@participants; + PTDEBUG && _d(@participants, 'are mutually exclusive'); + } + if ( $opt =~ m/at least one|one and only one/ ) { + $rule_ok = 1; + push @{$self->{atleast1}}, \@participants; + PTDEBUG && _d(@participants, 'require at least one'); + } + if ( $opt =~ m/default to/ ) { + $rule_ok = 1; + $self->{defaults_to}->{$participants[0]} = $participants[1]; + PTDEBUG && _d($participants[0], 'defaults to', $participants[1]); + } + if ( $opt =~ m/restricted to option groups/ ) { + $rule_ok = 1; + my ($groups) = $opt =~ m/groups ([\w\s\,]+)/; + my @groups = split(',', $groups); + %{$self->{allowed_groups}->{$participants[0]}} = map { + s/\s+//; + $_ => 1; + } @groups; + } + if( $opt =~ m/accepts additional command-line arguments/ ) { + $rule_ok = 1; + $self->{strict} = 0; + PTDEBUG && _d("Strict mode disabled by rule"); + } + + die "Unrecognized option rule: $opt" unless $rule_ok; + } + } + + foreach my $long ( keys %disables ) { + my @participants = $self->_get_participants($disables{$long}); + $self->{disables}->{$long} = \@participants; + PTDEBUG && _d('Option', $long, 'disables', @participants); + } + + return; +} + +sub _get_participants { + my ( $self, $str ) = @_; + my @participants; + foreach my $long ( $str =~ m/--(?:\[no\])?([\w-]+)/g ) { + die "Option --$long does not exist while processing rule $str" + unless exists $self->{opts}->{$long}; + push @participants, $long; + } + PTDEBUG && _d('Participants for', $str, ':', @participants); + return @participants; +} + +sub opts { + my ( $self ) = @_; + my %opts = %{$self->{opts}}; + return %opts; +} + +sub short_opts { + my ( $self ) = @_; + my %short_opts = %{$self->{short_opts}}; + return %short_opts; +} + +sub set_defaults { + my ( $self, %defaults ) = @_; + $self->{defaults} = {}; + foreach my $long ( keys %defaults ) { + die "Cannot set default for nonexistent option $long" + unless exists $self->{opts}->{$long}; + $self->{defaults}->{$long} = $defaults{$long}; + PTDEBUG && _d('Default val for', $long, ':', $defaults{$long}); + } + return; +} + +sub get_defaults { + my ( $self ) = @_; + return $self->{defaults}; +} + +sub get_groups { + my ( $self ) = @_; + return $self->{groups}; +} + +sub _set_option { + my ( $self, $opt, $val ) = @_; + my $long = exists $self->{opts}->{$opt} ? $opt + : exists $self->{short_opts}->{$opt} ? $self->{short_opts}->{$opt} + : die "Getopt::Long gave a nonexistent option: $opt"; + $opt = $self->{opts}->{$long}; + if ( $opt->{is_cumulative} ) { + $opt->{value}++; + } + elsif ( ($opt->{type} || '') eq 's' && $val =~ m/^--?(.+)/ ) { + my $next_opt = $1; + if ( exists $self->{opts}->{$next_opt} + || exists $self->{short_opts}->{$next_opt} ) { + $self->save_error("--$long requires a string value"); + return; + } + else { + if ($opt->{is_repeatable}) { + push @{$opt->{value}} , $val; + } + else { + $opt->{value} = $val; + } + } + } + else { + if ($opt->{is_repeatable}) { + push @{$opt->{value}} , $val; + } + else { + $opt->{value} = $val; + } + } + $opt->{got} = 1; + PTDEBUG && _d('Got option', $long, '=', $val); +} + +sub get_opts { + my ( $self ) = @_; + + foreach my $long ( keys %{$self->{opts}} ) { + $self->{opts}->{$long}->{got} = 0; + $self->{opts}->{$long}->{value} + = exists $self->{defaults}->{$long} ? $self->{defaults}->{$long} + : $self->{opts}->{$long}->{is_cumulative} ? 0 + : undef; + } + $self->{got_opts} = 0; + + $self->{errors} = []; + + if ( @ARGV && $ARGV[0] =~/^--config=/ ) { + $ARGV[0] = substr($ARGV[0],9); + $ARGV[0] =~ s/^'(.*)'$/$1/; + $ARGV[0] =~ s/^"(.*)"$/$1/; + $self->_set_option('config', shift @ARGV); + } + if ( @ARGV && $ARGV[0] eq "--config" ) { + shift @ARGV; + $self->_set_option('config', shift @ARGV); + } + if ( $self->has('config') ) { + my @extra_args; + foreach my $filename ( split(',', $self->get('config')) ) { + eval { + push @extra_args, $self->_read_config_file($filename); + }; + if ( $EVAL_ERROR ) { + if ( $self->got('config') ) { + die $EVAL_ERROR; + } + elsif ( PTDEBUG ) { + _d($EVAL_ERROR); + } + } + } + unshift @ARGV, @extra_args; + } + + Getopt::Long::Configure('no_ignore_case', 'bundling'); + GetOptions( + map { $_->{spec} => sub { $self->_set_option(@_); } } + grep { $_->{long} ne 'config' } # --config is handled specially above. + values %{$self->{opts}} + ) or $self->save_error('Error parsing options'); + + if ( exists $self->{opts}->{version} && $self->{opts}->{version}->{got} ) { + if ( $self->{version} ) { + print $self->{version}, "\n"; + exit 0; + } + else { + print "Error parsing version. See the VERSION section of the tool's documentation.\n"; + exit 1; + } + } + + if ( @ARGV && $self->{strict} ) { + $self->save_error("Unrecognized command-line options @ARGV"); + } + + foreach my $mutex ( @{$self->{mutex}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$mutex; + if ( @set > 1 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$mutex}[ 0 .. scalar(@$mutex) - 2] ) + . ' and --'.$self->{opts}->{$mutex->[-1]}->{long} + . ' are mutually exclusive.'; + $self->save_error($err); + } + } + + foreach my $required ( @{$self->{atleast1}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$required; + if ( @set == 0 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$required}[ 0 .. scalar(@$required) - 2] ) + .' or --'.$self->{opts}->{$required->[-1]}->{long}; + $self->save_error("Specify at least one of $err"); + } + } + + $self->_check_opts( keys %{$self->{opts}} ); + $self->{got_opts} = 1; + return; +} + +sub _check_opts { + my ( $self, @long ) = @_; + my $long_last = scalar @long; + while ( @long ) { + foreach my $i ( 0..$#long ) { + my $long = $long[$i]; + next unless $long; + my $opt = $self->{opts}->{$long}; + if ( $opt->{got} ) { + if ( exists $self->{disables}->{$long} ) { + my @disable_opts = @{$self->{disables}->{$long}}; + map { $self->{opts}->{$_}->{value} = undef; } @disable_opts; + PTDEBUG && _d('Unset options', @disable_opts, + 'because', $long,'disables them'); + } + + if ( exists $self->{allowed_groups}->{$long} ) { + + my @restricted_groups = grep { + !exists $self->{allowed_groups}->{$long}->{$_} + } keys %{$self->{groups}}; + + my @restricted_opts; + foreach my $restricted_group ( @restricted_groups ) { + RESTRICTED_OPT: + foreach my $restricted_opt ( + keys %{$self->{groups}->{$restricted_group}} ) + { + next RESTRICTED_OPT if $restricted_opt eq $long; + push @restricted_opts, $restricted_opt + if $self->{opts}->{$restricted_opt}->{got}; + } + } + + if ( @restricted_opts ) { + my $err; + if ( @restricted_opts == 1 ) { + $err = "--$restricted_opts[0]"; + } + else { + $err = join(', ', + map { "--$self->{opts}->{$_}->{long}" } + grep { $_ } + @restricted_opts[0..scalar(@restricted_opts) - 2] + ) + . ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long}; + } + $self->save_error("--$long is not allowed with $err"); + } + } + + } + elsif ( $opt->{is_required} ) { + $self->save_error("Required option --$long must be specified"); + } + + $self->_validate_type($opt); + if ( $opt->{parsed} ) { + delete $long[$i]; + } + else { + PTDEBUG && _d('Temporarily failed to parse', $long); + } + } + + die "Failed to parse options, possibly due to circular dependencies" + if @long == $long_last; + $long_last = @long; + } + + return; +} + +sub _validate_type { + my ( $self, $opt ) = @_; + return unless $opt; + + if ( !$opt->{type} ) { + $opt->{parsed} = 1; + return; + } + + my $val = $opt->{value}; + + if ( $val && $opt->{type} eq 'm' ) { # type time + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a time value'); + my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/; + if ( !$suffix ) { + my ( $s ) = $opt->{desc} =~ m/\(suffix (.)\)/; + $suffix = $s || 's'; + PTDEBUG && _d('No suffix given; using', $suffix, 'for', + $opt->{long}, '(value:', $val, ')'); + } + if ( $suffix =~ m/[smhd]/ ) { + $val = $suffix eq 's' ? $num # Seconds + : $suffix eq 'm' ? $num * 60 # Minutes + : $suffix eq 'h' ? $num * 3600 # Hours + : $num * 86400; # Days + $opt->{value} = ($prefix || '') . $val; + PTDEBUG && _d('Setting option', $opt->{long}, 'to', $val); + } + else { + $self->save_error("Invalid time suffix for --$opt->{long}"); + } + } + elsif ( $val && $opt->{type} eq 'd' ) { # type DSN + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a DSN'); + my $prev = {}; + my $from_key = $self->{defaults_to}->{ $opt->{long} }; + if ( $from_key ) { + PTDEBUG && _d($opt->{long}, 'DSN copies from', $from_key, 'DSN'); + if ( $self->{opts}->{$from_key}->{parsed} ) { + $prev = $self->{opts}->{$from_key}->{value}; + } + else { + PTDEBUG && _d('Cannot parse', $opt->{long}, 'until', + $from_key, 'parsed'); + return; + } + } + my $defaults = $self->{DSNParser}->parse_options($self); + if (!$opt->{attributes}->{repeatable}) { + $opt->{value} = $self->{DSNParser}->parse($val, $prev, $defaults); + } else { + my $values = []; + for my $dsn_string (@$val) { + push @$values, $self->{DSNParser}->parse($dsn_string, $prev, $defaults); + } + $opt->{value} = $values; + } + } + elsif ( $val && $opt->{type} eq 'z' ) { # type size + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a size value'); + $self->_parse_size($opt, $val); + } + elsif ( $opt->{type} eq 'H' || (defined $val && $opt->{type} eq 'h') ) { + $opt->{value} = { map { $_ => 1 } split(/(?{type} eq 'A' || (defined $val && $opt->{type} eq 'a') ) { + $opt->{value} = [ split(/(?{long}, 'type', $opt->{type}, 'value', $val); + } + + $opt->{parsed} = 1; + return; +} + +sub get { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{value}; +} + +sub got { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{got}; +} + +sub has { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + return defined $long ? exists $self->{opts}->{$long} : 0; +} + +sub set { + my ( $self, $opt, $val ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + $self->{opts}->{$long}->{value} = $val; + return; +} + +sub save_error { + my ( $self, $error ) = @_; + push @{$self->{errors}}, $error; + return; +} + +sub errors { + my ( $self ) = @_; + return $self->{errors}; +} + +sub usage { + my ( $self ) = @_; + warn "No usage string is set" unless $self->{usage}; # XXX + return "Usage: " . ($self->{usage} || '') . "\n"; +} + +sub descr { + my ( $self ) = @_; + warn "No description string is set" unless $self->{description}; # XXX + my $descr = ($self->{description} || $self->{program_name} || '') + . " For more details, please use the --help option, " + . "or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation."; + $descr = join("\n", $descr =~ m/(.{0,80})(?:\s+|$)/g) + unless $ENV{DONT_BREAK_LINES}; + $descr =~ s/ +$//mg; + return $descr; +} + +sub usage_or_errors { + my ( $self, $file, $return ) = @_; + $file ||= $self->{file} || __FILE__; + + if ( !$self->{description} || !$self->{usage} ) { + PTDEBUG && _d("Getting description and usage from SYNOPSIS in", $file); + my %synop = $self->_parse_synopsis($file); + $self->{description} ||= $synop{description}; + $self->{usage} ||= $synop{usage}; + PTDEBUG && _d("Description:", $self->{description}, + "\nUsage:", $self->{usage}); + } + + if ( $self->{opts}->{help}->{got} ) { + print $self->print_usage() or die "Cannot print usage: $OS_ERROR"; + exit 0 unless $return; + } + elsif ( scalar @{$self->{errors}} ) { + print $self->print_errors() or die "Cannot print errors: $OS_ERROR"; + exit 1 unless $return; + } + + return; +} + +sub print_errors { + my ( $self ) = @_; + my $usage = $self->usage() . "\n"; + if ( (my @errors = @{$self->{errors}}) ) { + $usage .= join("\n * ", 'Errors in command-line arguments:', @errors) + . "\n"; + } + return $usage . "\n" . $self->descr(); +} + +sub print_usage { + my ( $self ) = @_; + die "Run get_opts() before print_usage()" unless $self->{got_opts}; + my @opts = values %{$self->{opts}}; + + my $maxl = max( + map { + length($_->{long}) # option long name + + ($_->{is_negatable} ? 4 : 0) # "[no]" if opt is negatable + + ($_->{type} ? 2 : 0) # "=x" where x is the opt type + } + @opts); + + my $maxs = max(0, + map { + length($_) + + ($self->{opts}->{$_}->{is_negatable} ? 4 : 0) + + ($self->{opts}->{$_}->{type} ? 2 : 0) + } + values %{$self->{short_opts}}); + + my $lcol = max($maxl, ($maxs + 3)); + my $rcol = 80 - $lcol - 6; + my $rpad = ' ' x ( 80 - $rcol ); + + $maxs = max($lcol - 3, $maxs); + + my $usage = $self->descr() . "\n" . $self->usage(); + + my @groups = reverse sort grep { $_ ne 'default'; } keys %{$self->{groups}}; + push @groups, 'default'; + + foreach my $group ( reverse @groups ) { + $usage .= "\n".($group eq 'default' ? 'Options' : $group).":\n\n"; + foreach my $opt ( + sort { $a->{long} cmp $b->{long} } + grep { $_->{group} eq $group } + @opts ) + { + my $long = $opt->{is_negatable} ? "[no]$opt->{long}" : $opt->{long}; + my $short = $opt->{short}; + my $desc = $opt->{desc}; + + $long .= $opt->{type} ? "=$opt->{type}" : ""; + + if ( $opt->{type} && $opt->{type} eq 'm' ) { + my ($s) = $desc =~ m/\(suffix (.)\)/; + $s ||= 's'; + $desc =~ s/\s+\(suffix .\)//; + $desc .= ". Optional suffix s=seconds, m=minutes, h=hours, " + . "d=days; if no suffix, $s is used."; + } + $desc = join("\n$rpad", grep { $_ } $desc =~ m/(.{0,$rcol}(?!\W))(?:\s+|(?<=\W)|$)/g); + $desc =~ s/ +$//mg; + if ( $short ) { + $usage .= sprintf(" --%-${maxs}s -%s %s\n", $long, $short, $desc); + } + else { + $usage .= sprintf(" --%-${lcol}s %s\n", $long, $desc); + } + } + } + + $usage .= "\nOption types: s=string, i=integer, f=float, h/H/a/A=comma-separated list, d=DSN, z=size, m=time\n"; + + if ( (my @rules = @{$self->{rules}}) ) { + $usage .= "\nRules:\n\n"; + $usage .= join("\n", map { " $_" } @rules) . "\n"; + } + if ( $self->{DSNParser} ) { + $usage .= "\n" . $self->{DSNParser}->usage(); + } + $usage .= "\nOptions and values after processing arguments:\n\n"; + foreach my $opt ( sort { $a->{long} cmp $b->{long} } @opts ) { + my $val = $opt->{value}; + my $type = $opt->{type} || ''; + my $bool = $opt->{spec} =~ m/^[\w-]+(?:\|[\w-])?!?$/; + $val = $bool ? ( $val ? 'TRUE' : 'FALSE' ) + : !defined $val ? '(No value)' + : $type eq 'd' ? $self->{DSNParser}->as_string($val) + : $type =~ m/H|h/ ? join(',', sort keys %$val) + : $type =~ m/A|a/ ? join(',', @$val) + : $val; + $usage .= sprintf(" --%-${lcol}s %s\n", $opt->{long}, $val); + } + return $usage; +} + +sub prompt_noecho { + shift @_ if ref $_[0] eq __PACKAGE__; + my ( $prompt ) = @_; + local $OUTPUT_AUTOFLUSH = 1; + print STDERR $prompt + or die "Cannot print: $OS_ERROR"; + my $response; + eval { + require Term::ReadKey; + Term::ReadKey::ReadMode('noecho'); + chomp($response = ); + Term::ReadKey::ReadMode('normal'); + print "\n" + or die "Cannot print: $OS_ERROR"; + }; + if ( $EVAL_ERROR ) { + die "Cannot read response; is Term::ReadKey installed? $EVAL_ERROR"; + } + return $response; +} + +sub _read_config_file { + my ( $self, $filename ) = @_; + open my $fh, "<", $filename or die "Cannot open $filename: $OS_ERROR\n"; + my @args; + my $prefix = '--'; + my $parse = 1; + + LINE: + while ( my $line = <$fh> ) { + chomp $line; + next LINE if $line =~ m/^\s*(?:\#|\;|$)/; + $line =~ s/\s+#.*$//g; + $line =~ s/^\s+|\s+$//g; + if ( $line eq '--' ) { + $prefix = ''; + $parse = 0; + next LINE; + } + + if ( $parse + && !$self->has('version-check') + && $line =~ /version-check/ + ) { + next LINE; + } + + if ( $parse + && (my($opt, $arg) = $line =~ m/^\s*([^=\s]+?)(?:\s*=\s*(.*?)\s*)?$/) + ) { + push @args, grep { defined $_ } ("$prefix$opt", $arg); + } + elsif ( $line =~ m/./ ) { + push @args, $line; + } + else { + die "Syntax error in file $filename at line $INPUT_LINE_NUMBER"; + } + } + close $fh; + return @args; +} + +sub read_para_after { + my ( $self, $file, $regex ) = @_; + open my $fh, "<", $file or die "Can't open $file: $OS_ERROR"; + local $INPUT_RECORD_SEPARATOR = ''; + my $para; + while ( $para = <$fh> ) { + next unless $para =~ m/^=pod$/m; + last; + } + while ( $para = <$fh> ) { + next unless $para =~ m/$regex/; + last; + } + $para = <$fh>; + chomp($para); + close $fh or die "Can't close $file: $OS_ERROR"; + return $para; +} + +sub clone { + my ( $self ) = @_; + + my %clone = map { + my $hashref = $self->{$_}; + my $val_copy = {}; + foreach my $key ( keys %$hashref ) { + my $ref = ref $hashref->{$key}; + $val_copy->{$key} = !$ref ? $hashref->{$key} + : $ref eq 'HASH' ? { %{$hashref->{$key}} } + : $ref eq 'ARRAY' ? [ @{$hashref->{$key}} ] + : $hashref->{$key}; + } + $_ => $val_copy; + } qw(opts short_opts defaults); + + foreach my $scalar ( qw(got_opts) ) { + $clone{$scalar} = $self->{$scalar}; + } + + return bless \%clone; +} + +sub _parse_size { + my ( $self, $opt, $val ) = @_; + + if ( lc($val || '') eq 'null' ) { + PTDEBUG && _d('NULL size for', $opt->{long}); + $opt->{value} = 'null'; + return; + } + + my %factor_for = (k => 1_024, M => 1_048_576, G => 1_073_741_824); + my ($pre, $num, $factor) = $val =~ m/^([+-])?(\d+)([kMG])?$/; + if ( defined $num ) { + if ( $factor ) { + $num *= $factor_for{$factor}; + PTDEBUG && _d('Setting option', $opt->{y}, + 'to num', $num, '* factor', $factor); + } + $opt->{value} = ($pre || '') . $num; + } + else { + $self->save_error("Invalid size for --$opt->{long}: $val"); + } + return; +} + +sub _parse_attribs { + my ( $self, $option, $attribs ) = @_; + my $types = $self->{types}; + return $option + . ($attribs->{'short form'} ? '|' . $attribs->{'short form'} : '' ) + . ($attribs->{'negatable'} ? '!' : '' ) + . ($attribs->{'cumulative'} ? '+' : '' ) + . ($attribs->{'type'} ? '=' . $types->{$attribs->{type}} : '' ); +} + +sub _parse_synopsis { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + PTDEBUG && _d("Parsing SYNOPSIS in", $file); + + local $INPUT_RECORD_SEPARATOR = ''; # read paragraphs + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $para; + 1 while defined($para = <$fh>) && $para !~ m/^=head1 SYNOPSIS/; + die "$file does not contain a SYNOPSIS section" unless $para; + my @synop; + for ( 1..2 ) { # 1 for the usage, 2 for the description + my $para = <$fh>; + push @synop, $para; + } + close $fh; + PTDEBUG && _d("Raw SYNOPSIS text:", @synop); + my ($usage, $desc) = @synop; + die "The SYNOPSIS section in $file is not formatted properly" + unless $usage && $desc; + + $usage =~ s/^\s*Usage:\s+(.+)/$1/; + chomp $usage; + + $desc =~ s/\n/ /g; + $desc =~ s/\s{2,}/ /g; + $desc =~ s/\. ([A-Z][a-z])/. $1/g; + $desc =~ s/\s+$//; + + return ( + description => $desc, + usage => $usage, + ); +}; + +sub set_vars { + my ($self, $file) = @_; + $file ||= $self->{file} || __FILE__; + + my %user_vars; + my $user_vars = $self->has('set-vars') ? $self->get('set-vars') : undef; + if ( $user_vars ) { + foreach my $var_val ( @$user_vars ) { + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && defined $val; + $user_vars{$var} = { + val => $val, + default => 0, + }; + } + } + + my %default_vars; + my $default_vars = $self->read_para_after($file, qr/MAGIC_set_vars/); + if ( $default_vars ) { + %default_vars = map { + my $var_val = $_; + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && defined $val; + $var => { + val => $val, + default => 1, + }; + } split("\n", $default_vars); + } + + my %vars = ( + %default_vars, # first the tool's defaults + %user_vars, # then the user's which overwrite the defaults + ); + PTDEBUG && _d('--set-vars:', Dumper(\%vars)); + return \%vars; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +if ( PTDEBUG ) { + print STDERR '# ', $^X, ' ', $], "\n"; + if ( my $uname = `uname -a` ) { + $uname =~ s/\s+/ /g; + print STDERR "# $uname\n"; + } + print STDERR '# Arguments: ', + join(' ', map { my $a = "_[$_]_"; $a =~ s/\n/\n# /g; $a; } @ARGV), "\n"; +} + +1; +} +# ########################################################################### +# End OptionParser package +# ########################################################################### + +# ########################################################################### +# DSNParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/DSNParser.pm +# t/lib/DSNParser.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package DSNParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 0; +$Data::Dumper::Quotekeys = 0; + +my $dsn_sep = qr/(? {} # h, P, u, etc. Should come from DSN OPTIONS section in POD. + }; + foreach my $opt ( @{$args{opts}} ) { + if ( !$opt->{key} || !$opt->{desc} ) { + die "Invalid DSN option: ", Dumper($opt); + } + PTDEBUG && _d('DSN option:', + join(', ', + map { "$_=" . (defined $opt->{$_} ? ($opt->{$_} || '') : 'undef') } + keys %$opt + ) + ); + $self->{opts}->{$opt->{key}} = { + dsn => $opt->{dsn}, + desc => $opt->{desc}, + copy => $opt->{copy} || 0, + }; + } + return bless $self, $class; +} + +sub prop { + my ( $self, $prop, $value ) = @_; + if ( @_ > 2 ) { + PTDEBUG && _d('Setting', $prop, 'property'); + $self->{$prop} = $value; + } + return $self->{$prop}; +} + +sub parse { + my ( $self, $dsn, $prev, $defaults ) = @_; + if ( !$dsn ) { + PTDEBUG && _d('No DSN to parse'); + return; + } + PTDEBUG && _d('Parsing', $dsn); + $prev ||= {}; + $defaults ||= {}; + my %given_props; + my %final_props; + my $opts = $self->{opts}; + + foreach my $dsn_part ( split($dsn_sep, $dsn) ) { + $dsn_part =~ s/\\,/,/g; + if ( my ($prop_key, $prop_val) = $dsn_part =~ m/^(.)=(.*)$/ ) { + $given_props{$prop_key} = $prop_val; + } + else { + PTDEBUG && _d('Interpreting', $dsn_part, 'as h=', $dsn_part); + $given_props{h} = $dsn_part; + } + } + + foreach my $key ( keys %$opts ) { + PTDEBUG && _d('Finding value for', $key); + $final_props{$key} = $given_props{$key}; + if ( !defined $final_props{$key} + && defined $prev->{$key} && $opts->{$key}->{copy} ) + { + $final_props{$key} = $prev->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from previous DSN'); + } + if ( !defined $final_props{$key} ) { + $final_props{$key} = $defaults->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from defaults'); + } + } + + foreach my $key ( keys %given_props ) { + die "Unknown DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless exists $opts->{$key}; + } + if ( (my $required = $self->prop('required')) ) { + foreach my $key ( keys %$required ) { + die "Missing required DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless $final_props{$key}; + } + } + + return \%final_props; +} + +sub parse_options { + my ( $self, $o ) = @_; + die 'I need an OptionParser object' unless ref $o eq 'OptionParser'; + my $dsn_string + = join(',', + map { "$_=".$o->get($_); } + grep { $o->has($_) && $o->get($_) } + keys %{$self->{opts}} + ); + PTDEBUG && _d('DSN string made from options:', $dsn_string); + return $self->parse($dsn_string); +} + +sub as_string { + my ( $self, $dsn, $props ) = @_; + return $dsn unless ref $dsn; + my @keys = $props ? @$props : sort keys %$dsn; + return join(',', + map { "$_=" . ($_ eq 'p' ? '...' : $dsn->{$_}) } + grep { + exists $self->{opts}->{$_} + && exists $dsn->{$_} + && defined $dsn->{$_} + } @keys); +} + +sub usage { + my ( $self ) = @_; + my $usage + = "DSN syntax is key=value[,key=value...] Allowable DSN keys:\n\n" + . " KEY COPY MEANING\n" + . " === ==== =============================================\n"; + my %opts = %{$self->{opts}}; + foreach my $key ( sort keys %opts ) { + $usage .= " $key " + . ($opts{$key}->{copy} ? 'yes ' : 'no ') + . ($opts{$key}->{desc} || '[No description]') + . "\n"; + } + $usage .= "\n If the DSN is a bareword, the word is treated as the 'h' key.\n"; + return $usage; +} + +sub get_cxn_params { + my ( $self, $info ) = @_; + my $dsn; + my %opts = %{$self->{opts}}; + my $driver = $self->prop('dbidriver') || ''; + if ( $driver eq 'Pg' ) { + $dsn = 'DBI:Pg:dbname=' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(h P)); + } + else { + $dsn = 'DBI:mysql:' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(F h P S A)) + . ';mysql_read_default_group=client' + . ($info->{L} ? ';mysql_local_infile=1' : ''); + } + PTDEBUG && _d($dsn); + return ($dsn, $info->{u}, $info->{p}); +} + +sub fill_in_dsn { + my ( $self, $dbh, $dsn ) = @_; + my $vars = $dbh->selectall_hashref('SHOW VARIABLES', 'Variable_name'); + my ($user, $db) = $dbh->selectrow_array('SELECT USER(), DATABASE()'); + $user =~ s/@.*//; + $dsn->{h} ||= $vars->{hostname}->{Value}; + $dsn->{S} ||= $vars->{'socket'}->{Value}; + $dsn->{P} ||= $vars->{port}->{Value}; + $dsn->{u} ||= $user; + $dsn->{D} ||= $db; +} + +sub get_dbh { + my ( $self, $cxn_string, $user, $pass, $opts ) = @_; + $opts ||= {}; + my $defaults = { + AutoCommit => 0, + RaiseError => 1, + PrintError => 0, + ShowErrorStatement => 1, + mysql_enable_utf8 => ($cxn_string =~ m/charset=utf8/i ? 1 : 0), + }; + @{$defaults}{ keys %$opts } = values %$opts; + if (delete $defaults->{L}) { # L for LOAD DATA LOCAL INFILE, our own extension + $defaults->{mysql_local_infile} = 1; + } + + if ( $opts->{mysql_use_result} ) { + $defaults->{mysql_use_result} = 1; + } + + if ( !$have_dbi ) { + die "Cannot connect to MySQL because the Perl DBI module is not " + . "installed or not found. Run 'perl -MDBI' to see the directories " + . "that Perl searches for DBI. If DBI is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbi-perl\n" + . " RHEL/CentOS yum install perl-DBI\n" + . " OpenSolaris pkg install pkg:/SUNWpmdbi\n"; + + } + + my $dbh; + my $tries = 2; + while ( !$dbh && $tries-- ) { + PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass, + join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults )); + + $dbh = eval { DBI->connect($cxn_string, $user, $pass, $defaults) }; + + if ( !$dbh && $EVAL_ERROR ) { + if ( $EVAL_ERROR =~ m/locate DBD\/mysql/i ) { + die "Cannot connect to MySQL because the Perl DBD::mysql module is " + . "not installed or not found. Run 'perl -MDBD::mysql' to see " + . "the directories that Perl searches for DBD::mysql. If " + . "DBD::mysql is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbd-mysql-perl\n" + . " RHEL/CentOS yum install perl-DBD-MySQL\n" + . " OpenSolaris pgk install pkg:/SUNWapu13dbd-mysql\n"; + } + elsif ( $EVAL_ERROR =~ m/not a compiled character set|character set utf8/ ) { + PTDEBUG && _d('Going to try again without utf8 support'); + delete $defaults->{mysql_enable_utf8}; + } + if ( !$tries ) { + die $EVAL_ERROR; + } + } + } + + if ( $cxn_string =~ m/mysql/i ) { + my $sql; + + if ( my ($charset) = $cxn_string =~ m/charset=([\w]+)/ ) { + $sql = qq{/*!40101 SET NAMES "$charset"*/}; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting NAMES to $charset: $EVAL_ERROR"; + } + PTDEBUG && _d('Enabling charset for STDOUT'); + if ( $charset eq 'utf8' ) { + binmode(STDOUT, ':utf8') + or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR"; + } + else { + binmode(STDOUT) or die "Can't binmode(STDOUT): $OS_ERROR"; + } + } + + if ( my $vars = $self->prop('set-vars') ) { + $self->set_vars($dbh, $vars); + } + + $sql = 'SELECT @@SQL_MODE'; + PTDEBUG && _d($dbh, $sql); + my ($sql_mode) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + die "Error getting the current SQL_MODE: $EVAL_ERROR"; + } + + $sql = 'SET @@SQL_QUOTE_SHOW_CREATE = 1' + . '/*!40101, @@SQL_MODE=\'NO_AUTO_VALUE_ON_ZERO' + . ($sql_mode ? ",$sql_mode" : '') + . '\'*/'; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting SQL_QUOTE_SHOW_CREATE, SQL_MODE" + . ($sql_mode ? " and $sql_mode" : '') + . ": $EVAL_ERROR"; + } + } + my ($mysql_version) = eval { $dbh->selectrow_array('SELECT VERSION()') }; + if ($EVAL_ERROR) { + die "Cannot get MySQL version: $EVAL_ERROR"; + } + + my (undef, $character_set_server) = eval { $dbh->selectrow_array("SHOW VARIABLES LIKE 'character_set_server'") }; + if ($EVAL_ERROR) { + die "Cannot get MySQL var character_set_server: $EVAL_ERROR"; + } + + if ($mysql_version =~ m/^(\d+)\.(\d)\.(\d+).*/) { + if ($1 >= 8 && $character_set_server =~ m/^utf8/) { + $dbh->{mysql_enable_utf8} = 1; + my $msg = "MySQL version $mysql_version >= 8 and character_set_server = $character_set_server\n". + "Setting: SET NAMES $character_set_server"; + PTDEBUG && _d($msg); + eval { $dbh->do("SET NAMES 'utf8mb4'") }; + if ($EVAL_ERROR) { + die "Cannot SET NAMES $character_set_server: $EVAL_ERROR"; + } + } + } + + PTDEBUG && _d('DBH info: ', + $dbh, + Dumper($dbh->selectrow_hashref( + 'SELECT DATABASE(), CONNECTION_ID(), VERSION()/*!50038 , @@hostname*/')), + 'Connection info:', $dbh->{mysql_hostinfo}, + 'Character set info:', Dumper($dbh->selectall_arrayref( + "SHOW VARIABLES LIKE 'character_set%'", { Slice => {}})), + '$DBD::mysql::VERSION:', $DBD::mysql::VERSION, + '$DBI::VERSION:', $DBI::VERSION, + ); + + return $dbh; +} + +sub get_hostname { + my ( $self, $dbh ) = @_; + if ( my ($host) = ($dbh->{mysql_hostinfo} || '') =~ m/^(\w+) via/ ) { + return $host; + } + my ( $hostname, $one ) = $dbh->selectrow_array( + 'SELECT /*!50038 @@hostname, */ 1'); + return $hostname; +} + +sub disconnect { + my ( $self, $dbh ) = @_; + PTDEBUG && $self->print_active_handles($dbh); + $dbh->disconnect; +} + +sub print_active_handles { + my ( $self, $thing, $level ) = @_; + $level ||= 0; + printf("# Active %sh: %s %s %s\n", ($thing->{Type} || 'undef'), "\t" x $level, + $thing, (($thing->{Type} || '') eq 'st' ? $thing->{Statement} || '' : '')) + or die "Cannot print: $OS_ERROR"; + foreach my $handle ( grep {defined} @{ $thing->{ChildHandles} } ) { + $self->print_active_handles( $handle, $level + 1 ); + } +} + +sub copy { + my ( $self, $dsn_1, $dsn_2, %args ) = @_; + die 'I need a dsn_1 argument' unless $dsn_1; + die 'I need a dsn_2 argument' unless $dsn_2; + my %new_dsn = map { + my $key = $_; + my $val; + if ( $args{overwrite} ) { + $val = defined $dsn_1->{$key} ? $dsn_1->{$key} : $dsn_2->{$key}; + } + else { + $val = defined $dsn_2->{$key} ? $dsn_2->{$key} : $dsn_1->{$key}; + } + $key => $val; + } keys %{$self->{opts}}; + return \%new_dsn; +} + +sub set_vars { + my ($self, $dbh, $vars) = @_; + + return unless $vars; + + foreach my $var ( sort keys %$vars ) { + my $val = $vars->{$var}->{val}; + + (my $quoted_var = $var) =~ s/_/\\_/; + my ($var_exists, $current_val); + eval { + ($var_exists, $current_val) = $dbh->selectrow_array( + "SHOW VARIABLES LIKE '$quoted_var'"); + }; + my $e = $EVAL_ERROR; + if ( $e ) { + PTDEBUG && _d($e); + } + + if ( $vars->{$var}->{default} && !$var_exists ) { + PTDEBUG && _d('Not setting default var', $var, + 'because it does not exist'); + next; + } + + if ( $current_val && $current_val eq $val ) { + PTDEBUG && _d('Not setting var', $var, 'because its value', + 'is already', $val); + next; + } + + my $sql = "SET SESSION $var=$val"; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( my $set_error = $EVAL_ERROR ) { + chomp($set_error); + $set_error =~ s/ at \S+ line \d+//; + my $msg = "Error setting $var: $set_error"; + if ( $current_val ) { + $msg .= " The current value for $var is $current_val. " + . "If the variable is read only (not dynamic), specify " + . "--set-vars $var=$current_val to avoid this warning, " + . "else manually set the variable and restart MySQL."; + } + warn $msg . "\n\n"; + } + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End DSNParser package +# ########################################################################### + +# ########################################################################### +# Cxn package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Cxn.pm +# t/lib/Cxn.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package Cxn; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Scalar::Util qw(blessed); +use constant { + PTDEBUG => $ENV{PTDEBUG} || 0, + PERCONA_TOOLKIT_TEST_USE_DSN_NAMES => $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} || 0, +}; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(DSNParser OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my ($dp, $o) = @args{@required_args}; + + my $dsn_defaults = $dp->parse_options($o); + my $prev_dsn = $args{prev_dsn}; + my $dsn = $args{dsn}; + if ( !$dsn ) { + $args{dsn_string} ||= 'h=' . ($dsn_defaults->{h} || 'localhost'); + + $dsn = $dp->parse( + $args{dsn_string}, $prev_dsn, $dsn_defaults); + } + elsif ( $prev_dsn ) { + $dsn = $dp->copy($prev_dsn, $dsn); + } + + my $dsn_name = $dp->as_string($dsn, [qw(h P S)]) + || $dp->as_string($dsn, [qw(F)]) + || ''; + + my $self = { + dsn => $dsn, + dbh => $args{dbh}, + dsn_name => $dsn_name, + hostname => '', + set => $args{set}, + NAME_lc => defined($args{NAME_lc}) ? $args{NAME_lc} : 1, + dbh_set => 0, + ask_pass => $o->get('ask-pass'), + DSNParser => $dp, + is_cluster_node => undef, + parent => $args{parent}, + }; + + return bless $self, $class; +} + +sub connect { + my ( $self, %opts ) = @_; + my $dsn = $opts{dsn} || $self->{dsn}; + my $dp = $self->{DSNParser}; + + my $dbh = $self->{dbh}; + if ( !$dbh || !$dbh->ping() ) { + if ( $self->{ask_pass} && !$self->{asked_for_pass} && !defined $dsn->{p} ) { + $dsn->{p} = OptionParser::prompt_noecho("Enter MySQL password: "); + $self->{asked_for_pass} = 1; + } + $dbh = $dp->get_dbh( + $dp->get_cxn_params($dsn), + { + AutoCommit => 1, + %opts, + }, + ); + } + + $dbh = $self->set_dbh($dbh); + if ( $opts{dsn} ) { + $self->{dsn} = $dsn; + $self->{dsn_name} = $dp->as_string($dsn, [qw(h P S)]) + || $dp->as_string($dsn, [qw(F)]) + || ''; + + } + PTDEBUG && _d($dbh, 'Connected dbh to', $self->{hostname},$self->{dsn_name}); + return $dbh; +} + +sub set_dbh { + my ($self, $dbh) = @_; + + if ( $self->{dbh} && $self->{dbh} == $dbh && $self->{dbh_set} ) { + PTDEBUG && _d($dbh, 'Already set dbh'); + return $dbh; + } + + PTDEBUG && _d($dbh, 'Setting dbh'); + + $dbh->{FetchHashKeyName} = 'NAME_lc' if $self->{NAME_lc}; + + my $sql = 'SELECT @@server_id /*!50038 , @@hostname*/'; + PTDEBUG && _d($dbh, $sql); + my ($server_id, $hostname) = $dbh->selectrow_array($sql); + PTDEBUG && _d($dbh, 'hostname:', $hostname, $server_id); + if ( $hostname ) { + $self->{hostname} = $hostname; + } + + if ( $self->{parent} ) { + PTDEBUG && _d($dbh, 'Setting InactiveDestroy=1 in parent'); + $dbh->{InactiveDestroy} = 1; + } + + if ( my $set = $self->{set}) { + $set->($dbh); + } + + $self->{dbh} = $dbh; + $self->{dbh_set} = 1; + return $dbh; +} + +sub lost_connection { + my ($self, $e) = @_; + return 0 unless $e; + return $e =~ m/MySQL server has gone away/ + || $e =~ m/Lost connection to MySQL server/ + || $e =~ m/Server shutdown in progress/; +} + +sub dbh { + my ($self) = @_; + return $self->{dbh}; +} + +sub dsn { + my ($self) = @_; + return $self->{dsn}; +} + +sub name { + my ($self) = @_; + return $self->{dsn_name} if PERCONA_TOOLKIT_TEST_USE_DSN_NAMES; + return $self->{hostname} || $self->{dsn_name} || 'unknown host'; +} + +sub description { + my ($self) = @_; + return sprintf("%s -> %s:%s", $self->name(), $self->{dsn}->{h} || 'localhost' , $self->{dsn}->{P} || 'socket'); +} + +sub get_id { + my ($self, $cxn) = @_; + + $cxn ||= $self; + + my $unique_id; + if ($cxn->is_cluster_node()) { # for cluster we concatenate various variables to maximize id 'uniqueness' across versions + my $sql = q{SHOW STATUS LIKE 'wsrep\_local\_index'}; + my (undef, $wsrep_local_index) = $cxn->dbh->selectrow_array($sql); + PTDEBUG && _d("Got cluster wsrep_local_index: ",$wsrep_local_index); + $unique_id = $wsrep_local_index."|"; + foreach my $val ('server\_id', 'wsrep\_sst\_receive\_address', 'wsrep\_node\_name', 'wsrep\_node\_address') { + my $sql = "SHOW VARIABLES LIKE '$val'"; + PTDEBUG && _d($cxn->name, $sql); + my (undef, $val) = $cxn->dbh->selectrow_array($sql); + $unique_id .= "|$val"; + } + } else { + my $sql = 'SELECT @@SERVER_ID'; + PTDEBUG && _d($sql); + $unique_id = $cxn->dbh->selectrow_array($sql); + } + PTDEBUG && _d("Generated unique id for cluster:", $unique_id); + return $unique_id; +} + + +sub is_cluster_node { + my ($self, $cxn) = @_; + + $cxn ||= $self; + + my $sql = "SHOW VARIABLES LIKE 'wsrep\_on'"; + + my $dbh; + if ($cxn->isa('DBI::db')) { + $dbh = $cxn; + PTDEBUG && _d($sql); #don't invoke name() if it's not a Cxn! + } + else { + $dbh = $cxn->dbh(); + PTDEBUG && _d($cxn->name, $sql); + } + + my $row = $dbh->selectrow_arrayref($sql); + return $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1') ? 1 : 0; + +} + +sub remove_duplicate_cxns { + my ($self, %args) = @_; + my @cxns = @{$args{cxns}}; + my $seen_ids = $args{seen_ids} || {}; + PTDEBUG && _d("Removing duplicates from ", join(" ", map { $_->name } @cxns)); + my @trimmed_cxns; + + for my $cxn ( @cxns ) { + + my $id = $cxn->get_id(); + PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id); + + if ( ! $seen_ids->{$id}++ ) { + push @trimmed_cxns, $cxn + } + else { + PTDEBUG && _d("Removing ", $cxn->name, + ", ID ", $id, ", because we've already seen it"); + } + } + + return \@trimmed_cxns; +} + +sub DESTROY { + my ($self) = @_; + + PTDEBUG && _d('Destroying cxn'); + + if ( $self->{parent} ) { + PTDEBUG && _d($self->{dbh}, 'Not disconnecting dbh in parent'); + } + elsif ( $self->{dbh} + && blessed($self->{dbh}) + && $self->{dbh}->can("disconnect") ) + { + PTDEBUG && _d($self->{dbh}, 'Disconnecting dbh on', $self->{hostname}, + $self->{dsn_name}); + $self->{dbh}->disconnect(); + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Cxn package +# ########################################################################### + +# ########################################################################### +# Daemon package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/Daemon.pm +# t/lib/Daemon.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package Daemon; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(setsid); +use Fcntl qw(:DEFAULT); + +sub new { + my ($class, %args) = @_; + my $self = { + log_file => $args{log_file}, + pid_file => $args{pid_file}, + daemonize => $args{daemonize}, + force_log_file => $args{force_log_file}, + parent_exit => $args{parent_exit}, + pid_file_owner => 0, + }; + return bless $self, $class; +} + +sub run { + my ($self) = @_; + + my $daemonize = $self->{daemonize}; + my $pid_file = $self->{pid_file}; + my $log_file = $self->{log_file}; + my $force_log_file = $self->{force_log_file}; + my $parent_exit = $self->{parent_exit}; + + PTDEBUG && _d('Starting daemon'); + + if ( $pid_file ) { + eval { + $self->_make_pid_file( + pid => $PID, # parent's pid + pid_file => $pid_file, + ); + }; + die "$EVAL_ERROR\n" if $EVAL_ERROR; + if ( !$daemonize ) { + $self->{pid_file_owner} = $PID; # parent's pid + } + } + + if ( $daemonize ) { + defined (my $child_pid = fork()) or die "Cannot fork: $OS_ERROR"; + if ( $child_pid ) { + PTDEBUG && _d('Forked child', $child_pid); + $parent_exit->($child_pid) if $parent_exit; + exit 0; + } + + POSIX::setsid() or die "Cannot start a new session: $OS_ERROR"; + chdir '/' or die "Cannot chdir to /: $OS_ERROR"; + + if ( $pid_file ) { + $self->_update_pid_file( + pid => $PID, # child's pid + pid_file => $pid_file, + ); + $self->{pid_file_owner} = $PID; + } + } + + if ( $daemonize || $force_log_file ) { + PTDEBUG && _d('Redirecting STDIN to /dev/null'); + close STDIN; + open STDIN, '/dev/null' + or die "Cannot reopen STDIN to /dev/null: $OS_ERROR"; + if ( $log_file ) { + PTDEBUG && _d('Redirecting STDOUT and STDERR to', $log_file); + close STDOUT; + open STDOUT, '>>', $log_file + or die "Cannot open log file $log_file: $OS_ERROR"; + + close STDERR; + open STDERR, ">&STDOUT" + or die "Cannot dupe STDERR to STDOUT: $OS_ERROR"; + } + else { + if ( -t STDOUT ) { + PTDEBUG && _d('No log file and STDOUT is a terminal;', + 'redirecting to /dev/null'); + close STDOUT; + open STDOUT, '>', '/dev/null' + or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR"; + } + if ( -t STDERR ) { + PTDEBUG && _d('No log file and STDERR is a terminal;', + 'redirecting to /dev/null'); + close STDERR; + open STDERR, '>', '/dev/null' + or die "Cannot reopen STDERR to /dev/null: $OS_ERROR"; + } + } + + $OUTPUT_AUTOFLUSH = 1; + } + + PTDEBUG && _d('Daemon running'); + return; +} + +sub _make_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + eval { + sysopen(PID_FH, $pid_file, O_RDWR|O_CREAT|O_EXCL) or die $OS_ERROR; + print PID_FH $PID, "\n"; + close PID_FH; + }; + if ( my $e = $EVAL_ERROR ) { + if ( $e =~ m/file exists/i ) { + my $old_pid = $self->_check_pid_file( + pid_file => $pid_file, + pid => $PID, + ); + if ( $old_pid ) { + warn "Overwriting PID file $pid_file because PID $old_pid " + . "is not running.\n"; + } + $self->_update_pid_file( + pid => $PID, + pid_file => $pid_file + ); + } + else { + die "Error creating PID file $pid_file: $e\n"; + } + } + + return; +} + +sub _check_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid_file pid); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid_file = $args{pid_file}; + my $pid = $args{pid}; + + PTDEBUG && _d('Checking if PID in', $pid_file, 'is running'); + + if ( ! -f $pid_file ) { + PTDEBUG && _d('PID file', $pid_file, 'does not exist'); + return; + } + + open my $fh, '<', $pid_file + or die "Error opening $pid_file: $OS_ERROR"; + my $existing_pid = do { local $/; <$fh> }; + chomp($existing_pid) if $existing_pid; + close $fh + or die "Error closing $pid_file: $OS_ERROR"; + + if ( $existing_pid ) { + if ( $existing_pid == $pid ) { + warn "The current PID $pid already holds the PID file $pid_file\n"; + return; + } + else { + PTDEBUG && _d('Checking if PID', $existing_pid, 'is running'); + my $pid_is_alive = kill 0, $existing_pid; + if ( $pid_is_alive ) { + die "PID file $pid_file exists and PID $existing_pid is running\n"; + } + } + } + else { + die "PID file $pid_file exists but it is empty. Remove the file " + . "if the process is no longer running.\n"; + } + + return $existing_pid; +} + +sub _update_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + open my $fh, '>', $pid_file + or die "Cannot open $pid_file: $OS_ERROR"; + print { $fh } $pid, "\n" + or die "Cannot print to $pid_file: $OS_ERROR"; + close $fh + or warn "Cannot close $pid_file: $OS_ERROR"; + + return; +} + +sub remove_pid_file { + my ($self, $pid_file) = @_; + $pid_file ||= $self->{pid_file}; + if ( $pid_file && -f $pid_file ) { + unlink $self->{pid_file} + or warn "Cannot remove PID file $pid_file: $OS_ERROR"; + PTDEBUG && _d('Removed PID file'); + } + else { + PTDEBUG && _d('No PID to remove'); + } + return; +} + +sub DESTROY { + my ($self) = @_; + + if ( $self->{pid_file_owner} == $PID ) { + $self->remove_pid_file(); + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Daemon package +# ########################################################################### + +# ########################################################################### +# TextResultSetParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/TextResultSetParser.pm +# t/lib/TextResultSetParser.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package TextResultSetParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + my %value_for = ( + 'NULL' => undef, # DBI::selectall_arrayref() does this + ($args{value_for} ? %{$args{value_for}} : ()), + ); + my $self = { + %args, + value_for => \%value_for, + }; + return bless $self, $class; +} + +sub _parse_tabular { + my ( $text, @cols ) = @_; + my %row; + my @vals = $text =~ m/\| +([^\|]*?)(?= +\|)/msg; + return (undef, \@vals) unless @cols; + @row{@cols} = @vals; + return (\%row, undef); +} + +sub _parse_tab_sep { + my ( $text, @cols ) = @_; + my %row; + my @vals = split(/\t/, $text); + return (undef, \@vals) unless @cols; + @row{@cols} = @vals; + return (\%row, undef); +} + +sub parse_vertical_row { + my ( $self, $text ) = @_; + my %row = $text =~ m/^\s*(\w+):(?: ([^\n]*))?/msg; + if ( $self->{NAME_lc} ) { + my %lc_row = map { + my $key = lc $_; + $key => $row{$_}; + } keys %row; + return \%lc_row; + } + else { + return \%row; + } +} + +sub parse { + my ( $self, $text ) = @_; + my $result_set; + + if ( $text =~ m/^\+---/m ) { # standard "tabular" output + PTDEBUG && _d('Result set text is standard tabular'); + my $line_pattern = qr/^(\| .*)[\r\n]+/m; + $result_set + = $self->parse_horizontal_row($text, $line_pattern, \&_parse_tabular); + } + elsif ( $text =~ m/^\w+\t\w+/m ) { # tab-separated + PTDEBUG && _d('Result set text is tab-separated'); + my $line_pattern = qr/^(.*?\t.*)[\r\n]+/m; + $result_set + = $self->parse_horizontal_row($text, $line_pattern, \&_parse_tab_sep); + } + elsif ( $text =~ m/\*\*\* \d+\. row/ ) { # "vertical" output + PTDEBUG && _d('Result set text is vertical (\G)'); + foreach my $row ( split_vertical_rows($text) ) { + push @$result_set, $self->parse_vertical_row($row); + } + } + else { + my $text_sample = substr $text, 0, 300; + my $remaining = length $text > 300 ? (length $text) - 300 : 0; + chomp $text_sample; + die "Cannot determine if text is tabular, tab-separated or vertical:\n" + . "$text_sample\n" + . ($remaining ? "(not showing last $remaining bytes of text)\n" : ""); + } + + if ( $self->{value_for} ) { + foreach my $result_set ( @$result_set ) { + foreach my $key ( keys %$result_set ) { + next unless defined $result_set->{$key}; + $result_set->{$key} = $self->{value_for}->{ $result_set->{$key} } + if exists $self->{value_for}->{ $result_set->{$key} }; + } + } + } + + return $result_set; +} + + +sub parse_horizontal_row { + my ( $self, $text, $line_pattern, $sub ) = @_; + my @result_sets = (); + my @cols = (); + foreach my $line ( $text =~ m/$line_pattern/g ) { + my ( $row, $cols ) = $sub->($line, @cols); + if ( $row ) { + push @result_sets, $row; + } + else { + @cols = map { $self->{NAME_lc} ? lc $_ : $_ } @$cols; + } + } + return \@result_sets; +} + +sub split_vertical_rows { + my ( $text ) = @_; + my $ROW_HEADER = '\*{3,} \d+\. row \*{3,}'; + my @rows = $text =~ m/($ROW_HEADER.*?)(?=$ROW_HEADER|\z)/omgs; + return @rows; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TextResultSetParser package +# ########################################################################### + +# ########################################################################### +# MySQLConfig package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/MySQLConfig.pm +# t/lib/MySQLConfig.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package MySQLConfig; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +my %can_be_duplicate = ( + replicate_wild_do_table => 1, + replicate_wild_ignore_table => 1, + replicate_rewrite_db => 1, + replicate_ignore_table => 1, + replicate_ignore_db => 1, + replicate_do_table => 1, + replicate_do_db => 1, +); + +sub new { + my ( $class, %args ) = @_; + my @requires_one_of = qw(file output result_set dbh); + my $required_arg = grep { $args{$_} } @requires_one_of; + if ( !$required_arg ) { + die "I need a " . join(', ', @requires_one_of[0..$#requires_one_of-1]) + . " or " . $requires_one_of[-1] . " argument"; + } + if ( $required_arg > 1 ) { + die "Specify only one " + . join(', ', @requires_one_of[0..$#requires_one_of-1]) + . " or " . $requires_one_of[-1] . " argument"; + } + if ( $args{file} || $args{output} ) { + die "I need a TextResultSetParser argument" + unless $args{TextResultSetParser}; + } + + if ( $args{file} ) { + $args{output} = _slurp_file($args{file}); + } + + my %config_data = _parse_config(%args); + + my $self = { + %args, + %config_data, + }; + + return bless $self, $class; +} + +sub _parse_config { + my ( %args ) = @_; + + my %config_data; + if ( $args{output} ) { + %config_data = _parse_config_output(%args); + } + elsif ( my $rows = $args{result_set} ) { + $config_data{format} = $args{format} || 'show_variables'; + $config_data{vars} = { map { @$_ } @$rows }; + } + elsif ( my $dbh = $args{dbh} ) { + $config_data{format} = $args{format} || 'show_variables'; + my $sql = "SHOW /*!40103 GLOBAL*/ VARIABLES"; + PTDEBUG && _d($dbh, $sql); + my $rows = $dbh->selectall_arrayref($sql); + $config_data{vars} = { map { @$_ } @$rows }; + $config_data{mysql_version} = _get_version($dbh); + } + else { + die "Unknown config source"; + } + + handle_special_vars(\%config_data); + + return %config_data; +} + +sub handle_special_vars { + my ($config_data) = @_; + + if ( $config_data->{vars}->{wsrep_provider_options} ) { + my $vars = $config_data->{vars}; + my $dupes = $config_data->{duplicate_vars}; + for my $wpo ( $vars->{wsrep_provider_options}, @{$dupes->{wsrep_provider_options} || [] } ) { + my %opts = $wpo =~ /(\S+)\s*=\s*(\S*)(?:;|;?$)/g; + while ( my ($var, $val) = each %opts ) { + $val =~ s/;$//; + if ( exists $vars->{$var} ) { + push @{$dupes->{$var} ||= []}, $val; + } + $vars->{$var} = $val; + } + } + delete $vars->{wsrep_provider_options}; + } + + return; +} + +sub _parse_config_output { + my ( %args ) = @_; + my @required_args = qw(output TextResultSetParser); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + PTDEBUG && _d("Parsing config output"); + + my $format = $args{format} || detect_config_output_format(%args); + if ( !$format ) { + die "Cannot auto-detect the MySQL config format"; + } + + my $vars; # variables hashref + my $dupes; # duplicate vars hashref + my $opt_files; # option files arrayref + if ( $format eq 'show_variables' ) { + $vars = parse_show_variables(%args); + } + elsif ( $format eq 'mysqld' ) { + ($vars, $opt_files) = parse_mysqld(%args); + } + elsif ( $format eq 'my_print_defaults' ) { + ($vars, $dupes) = parse_my_print_defaults(%args); + } + elsif ( $format eq 'option_file' ) { + ($vars, $dupes) = parse_option_file(%args); + } + else { + die "Invalid MySQL config format: $format"; + } + + die "Failed to parse MySQL config" unless $vars && keys %$vars; + + if ( $format ne 'show_variables' ) { + _mimic_show_variables( + %args, + format => $format, + vars => $vars, + ); + } + + return ( + format => $format, + vars => $vars, + option_files => $opt_files, + duplicate_vars => $dupes, + ); +} + +sub detect_config_output_format { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + my $format; + if ( $output =~ m/\|\s+\w+\s+\|\s+.+?\|/ + || $output =~ m/\*+ \d/ + || $output =~ m/Variable_name:\s+\w+/ + || $output =~ m/Variable_name\s+Value$/m ) + { + PTDEBUG && _d('show variables format'); + $format = 'show_variables'; + } + elsif ( $output =~ m/Starts the MySQL database server/ + || $output =~ m/Default options are read from / + || $output =~ m/^help\s+TRUE /m ) + { + PTDEBUG && _d('mysqld format'); + $format = 'mysqld'; + } + elsif ( $output =~ m/^--\w+/m ) { + PTDEBUG && _d('my_print_defaults format'); + $format = 'my_print_defaults'; + } + elsif ( $output =~ m/^\s*\[[a-zA-Z]+\]\s*$/m ) { + PTDEBUG && _d('option file format'); + $format = 'option_file', + } + + return $format; +} + +sub parse_show_variables { + my ( %args ) = @_; + my @required_args = qw(output TextResultSetParser); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output, $trp) = @args{@required_args}; + + my %config = map { + $_->{Variable_name} => $_->{Value} + } @{ $trp->parse($output) }; + + return \%config; +} + +sub parse_mysqld { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + my @opt_files; + if ( $output =~ m/^Default options are read.+\n/mg ) { + my ($opt_files) = $output =~ m/\G^(.+)\n/m; + my %seen; + my @opt_files = grep { !$seen{$_} } split(' ', $opt_files); + PTDEBUG && _d('Option files:', @opt_files); + } + else { + PTDEBUG && _d("mysqld help output doesn't list option files"); + } + + if ( $output !~ m/^-+ -+$(.+?)(?:\n\n.+)?\z/sm ) { + PTDEBUG && _d("mysqld help output doesn't list vars and vals"); + return; + } + + my $varvals = $1; + + my ($config, undef) = _parse_varvals( + qr/^(\S+)(.*)$/, + $varvals, + ); + + return $config, \@opt_files; +} + +sub parse_my_print_defaults { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + my ($config, $dupes) = _parse_varvals( + qr/^--([^=]+)(?:=(.*))?$/, + $output, + ); + + return $config, $dupes; +} + +sub parse_option_file { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + my ($mysqld_section) = $output =~ m/\[mysqld\](.+?)(?:^\s*\[\w+\]|\Z)/xms; + die "Failed to parse the [mysqld] section" unless $mysqld_section; + + my ($config, $dupes) = _parse_varvals( + qr/^([^=]+)(?:=(.*))?$/, + $mysqld_section, + ); + + return $config, $dupes; +} + +sub _preprocess_varvals { + my ($re, $to_parse) = @_; + + my %vars; + LINE: + foreach my $line ( split /\n/, $to_parse ) { + next LINE if $line =~ m/^\s*$/; # no empty lines + next LINE if $line =~ /^\s*[#;]/; # no # or ; comment lines + + if ( $line !~ $re ) { + PTDEBUG && _d("Line <", $line, "> didn't match $re"); + next LINE; + } + + my ($var, $val) = ($1, $2); + + $var =~ tr/-/_/; + + $var =~ s/\s*#.*$//; + + if ( !defined $val ) { + $val = ''; + } + + for my $item ($var, $val) { + $item =~ s/^\s+//; + $item =~ s/\s+$//; + } + + push @{$vars{$var} ||= []}, $val + } + + return \%vars; +} + +sub _parse_varvals { + my ( $vars ) = _preprocess_varvals(@_); + + my %config; + + my %duplicates; + + while ( my ($var, $vals) = each %$vars ) { + my $val = _process_val( pop @$vals ); + if ( @$vals && !$can_be_duplicate{$var} ) { + PTDEBUG && _d("Duplicate var:", $var); + foreach my $current_val ( map { _process_val($_) } @$vals ) { + push @{$duplicates{$var} ||= []}, $current_val; + } + } + + PTDEBUG && _d("Var:", $var, "val:", $val); + + $config{$var} = $val; + } + + return \%config, \%duplicates; +} + +my $quote_re = qr/ + \A # Start of value + (['"]) # Opening quote + (.*) # Value + \1 # Closing quote + \s*(?:\#.*)? # End of line comment + [\n\r]*\z # End of value +/x; +sub _process_val { + my ($val) = @_; + + if ( $val =~ $quote_re ) { + $val = $2; + } + else { + $val =~ s/\s*#.*//; + } + + if ( my ($num, $factor) = $val =~ m/(\d+)([KMGT])b?$/i ) { + my %factor_for = ( + k => 1_024, + m => 1_048_576, + g => 1_073_741_824, + t => 1_099_511_627_776, + ); + $val = $num * $factor_for{lc $factor}; + } + elsif ( $val =~ m/No default/ ) { + $val = ''; + } + return $val; +} + +sub _mimic_show_variables { + my ( %args ) = @_; + my @required_args = qw(vars format); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($vars, $format) = @args{@required_args}; + + foreach my $var ( keys %$vars ) { + if ( $vars->{$var} eq '' ) { + if ( $format eq 'mysqld' ) { + if ( $var ne 'log_error' && $var =~ m/^(?:log|skip|ignore)/ ) { + $vars->{$var} = 'OFF'; + } + } + else { + $vars->{$var} = 'ON'; + } + } + } + + return; +} + +sub _slurp_file { + my ( $file ) = @_; + die "I need a file argument" unless $file; + PTDEBUG && _d("Reading", $file); + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + return $contents; +} + +sub _get_version { + my ( $dbh ) = @_; + return unless $dbh; + my $version = $dbh->selectrow_arrayref('SELECT VERSION()')->[0]; + $version =~ s/(\d\.\d{1,2}.\d{1,2})/$1/; + PTDEBUG && _d('MySQL version', $version); + return $version; +} + + +sub has { + my ( $self, $var ) = @_; + return exists $self->{vars}->{$var}; +} + +sub value_of { + my ( $self, $var ) = @_; + return unless $var; + return $self->{vars}->{$var}; +} + +sub variables { + my ( $self, %args ) = @_; + return $self->{vars}; +} + +sub duplicate_variables { + my ( $self ) = @_; + return $self->{duplicate_vars}; +} + +sub option_files { + my ( $self ) = @_; + return $self->{option_files}; +} + +sub mysql_version { + my ( $self ) = @_; + return $self->{mysql_version}; +} + +sub format { + my ( $self ) = @_; + return $self->{format}; +} + +sub is_active { + my ( $self ) = @_; + return $self->{dbh} ? 1 : 0; +} + +sub has_engine { + my ($self, $engine) = @_; + if (!$self->{dbh}) { + die "invalid dbh in has_engine method"; + } + + my $rows = $self->{dbh}->selectall_arrayref('SHOW ENGINES', {Slice=>{}}); + my $is_enabled; + for my $row (@$rows) { + if ($row->{engine} eq 'ROCKSDB') { + $is_enabled = 1; + last; + } + } + return $is_enabled; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End MySQLConfig package +# ########################################################################### + +# ########################################################################### +# MySQLConfigComparer package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/MySQLConfigComparer.pm +# t/lib/MySQLConfigComparer.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package MySQLConfigComparer; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +my %alt_val_for = ( + ON => 1, + YES => 1, + TRUE => 1, + OFF => 0, + NO => 0, + FALSE => 0, +); + +sub new { + my ( $class, %args ) = @_; + + my %ignore_vars = ( + date_format => 1, + datetime_format => 1, + ft_stopword_file => 1, + timestamp => 1, + time_format => 1, + ($args{ignore_variables} + ? map { $_ => 1 } @{$args{ignore_variables}} + : ()), + ); + + my %is_numeric = ( + long_query_time => 1, + ($args{numeric_variables} + ? map { $_ => 1 } @{$args{numeric_variables}} + : ()), + ); + + my %value_is_optional = ( + log_error => 1, + log_isam => 1, + secure_file_priv => 1, + ($args{optional_value_variables} + ? map { $_ => 1 } @{$args{optional_value_variables}} + : ()), + ); + + my %any_value_is_true = ( + log => 1, + log_bin => 1, + log_slow_queries => 1, + ($args{any_value_is_true_variables} + ? map { $_ => 1 } @{$args{any_value_is_true_variables}} + : ()), + ); + + my %base_path = ( + character_sets_dir => 'basedir', + datadir => 'basedir', + general_log_file => 'datadir', + language => 'basedir', + log_error => 'datadir', + pid_file => 'datadir', + plugin_dir => 'basedir', + slow_query_log_file => 'datadir', + socket => 'datadir', + ($args{base_paths} + ? map { $_ => 1 } @{$args{base_paths}} + : ()), + ); + + my $self = { + ignore_vars => \%ignore_vars, + is_numeric => \%is_numeric, + value_is_optional => \%value_is_optional, + any_value_is_true => \%any_value_is_true, + base_path => \%base_path, + ignore_case => exists $args{ignore_case} + ? $args{ignore_case} + : 1, + }; + + return bless $self, $class; +} + +sub diff { + my ( $self, %args ) = @_; + my @required_args = qw(configs); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($configs) = @args{@required_args}; + + if ( @$configs < 2 ) { + PTDEBUG && _d("Less than two MySQLConfig objects; nothing to compare"); + return; + } + + my $base_path = $self->{base_path}; + my $is_numeric = $self->{is_numeric}; + my $any_value_is_true = $self->{any_value_is_true}; + my $value_is_optional = $self->{value_is_optional}; + + my $config0 = $configs->[0]; + my $last_config = @$configs - 1; + my $vars = $self->_get_shared_vars(%args); + my $ignore_case = $self->{ignore_case}; + + my $diffs; + VARIABLE: + foreach my $var ( @$vars ) { + my $is_dir = $var =~ m/dir$/ || $var eq 'language'; + my $val0 = $self->_normalize_value( # config0 value + value => $config0->value_of($var), + is_directory => $is_dir, + base_path => $config0->value_of($base_path->{$var}) || "", + ); + + eval { + CONFIG: + foreach my $configN ( @$configs[1..$last_config] ) { + my $valN = $self->_normalize_value( # configN value + value => $configN->value_of($var), + is_directory => $is_dir, + base_path => $configN->value_of($base_path->{$var}) || "", + ); + + if ( $is_numeric->{$var} ) { + next CONFIG if $val0 == $valN; + } + else { + next CONFIG if $ignore_case + ? lc($val0) eq lc($valN) + : $val0 eq $valN; + + if ( $config0->format() ne $configN->format() ) { + if ( $any_value_is_true->{$var} ) { + next CONFIG if $val0 && $valN; + } + if ( $value_is_optional->{$var} ) { + next CONFIG if (!$val0 && $valN) || ($val0 && !$valN); + } + } + } + + PTDEBUG && _d("Different", $var, "values:", $val0, $valN); + $diffs->{$var} = [ map { $_->value_of($var) } @$configs ]; + last CONFIG; + } # CONFIG + }; + if ( $EVAL_ERROR ) { + my $vals = join(', ', + map { + my $val = $_->value_of($var); + defined $val ? $val : 'undef' + } @$configs); + warn "Comparing $var values ($vals) caused an error: $EVAL_ERROR"; + } + } # VARIABLE + + return $diffs; +} + +sub missing { + my ( $self, %args ) = @_; + my @required_args = qw(configs); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($configs) = @args{@required_args}; + + if ( @$configs < 2 ) { + PTDEBUG && _d("Less than two MySQLConfig objects; nothing to compare"); + return; + } + + my %vars = map { $_ => 1 } map { keys %{$_->variables()} } @$configs; + my $missing; + foreach my $var ( keys %vars ) { + my $n_configs_having_var = grep { $_->has($var) } @$configs; + if ( $n_configs_having_var < @$configs ) { + $missing->{$var} = [ map { $_->has($var) ? 1 : 0 } @$configs ]; + } + } + + return $missing; +} + +sub _normalize_value { + my ( $self, %args ) = @_; + my ($val, $is_dir, $base_path) = @args{qw(value is_directory base_path)}; + + $val = defined $val ? $val : ''; + $val = $alt_val_for{$val} if exists $alt_val_for{$val}; + + if ( $val ) { + if ( $is_dir ) { + $val .= '/' unless $val =~ m/\/$/; + } + if ( $base_path && $val !~ m/^\// ) { + $val =~ s/^\.?(.+)/$base_path\/$1/; # prepend base path + $val =~ s/\/{2,}/\//g; # make redundant // single / + } + } + return $val; +} + +sub _get_shared_vars { + my ( $self, %args ) = @_; + my ($configs) = @args{qw(configs)}; + my $ignore_vars = $self->{ignore_vars}; + my $config0 = $configs->[0]; + my $last_config = @$configs - 1; + my @vars + = grep { !$ignore_vars->{$_} } + map { + my $config = $_; + my $vars = $config->variables(); + grep { $config0->has($_); } keys %$vars; + } @$configs[1..$last_config]; + return \@vars; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End MySQLConfigComparer package +# ########################################################################### + +# ########################################################################### +# ReportFormatter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/ReportFormatter.pm +# t/lib/ReportFormatter.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package ReportFormatter; + +use Lmo; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use List::Util qw(min max); +use POSIX qw(ceil); + +eval { require Term::ReadKey }; +my $have_term = $EVAL_ERROR ? 0 : 1; + + +has underline_header => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has line_prefix => ( + is => 'ro', + isa => 'Str', + default => sub { '# ' }, +); +has line_width => ( + is => 'ro', + isa => 'Int', + default => sub { 78 }, +); +has column_spacing => ( + is => 'ro', + isa => 'Str', + default => sub { ' ' }, +); +has extend_right => ( + is => 'ro', + isa => 'Bool', + default => sub { '' }, +); +has truncate_line_mark => ( + is => 'ro', + isa => 'Str', + default => sub { '...' }, +); +has column_errors => ( + is => 'ro', + isa => 'Str', + default => sub { 'warn' }, +); +has truncate_header_side => ( + is => 'ro', + isa => 'Str', + default => sub { 'left' }, +); +has strip_whitespace => ( + is => 'ro', + isa => 'Bool', + default => sub { 1 }, +); +has title => ( + is => 'rw', + isa => 'Str', + predicate => 'has_title', +); + + +has n_cols => ( + is => 'rw', + isa => 'Int', + default => sub { 0 }, + init_arg => undef, +); + +has cols => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_cols', +); + +has lines => ( + is => 'ro', + isa => 'ArrayRef', + init_arg => undef, + default => sub { [] }, + clearer => 'clear_lines', +); + +has truncate_headers => ( + is => 'rw', + isa => 'Bool', + default => sub { undef }, + init_arg => undef, + clearer => 'clear_truncate_headers', +); + +sub BUILDARGS { + my $class = shift; + my $args = $class->SUPER::BUILDARGS(@_); + + if ( ($args->{line_width} || '') eq 'auto' ) { + die "Cannot auto-detect line width because the Term::ReadKey module " + . "is not installed" unless $have_term; + ($args->{line_width}) = GetTerminalSize(); + PTDEBUG && _d('Line width:', $args->{line_width}); + } + + return $args; +} + +sub set_columns { + my ( $self, @cols ) = @_; + my $min_hdr_wid = 0; # check that header fits on line + my $used_width = 0; + my @auto_width_cols; + + for my $i ( 0..$#cols ) { + my $col = $cols[$i]; + my $col_name = $col->{name}; + my $col_len = length $col_name; + die "Column does not have a name" unless defined $col_name; + + if ( $col->{width} ) { + $col->{width_pct} = ceil(($col->{width} * 100) / $self->line_width()); + PTDEBUG && _d('col:', $col_name, 'width:', $col->{width}, 'chars =', + $col->{width_pct}, '%'); + } + + if ( $col->{width_pct} ) { + $used_width += $col->{width_pct}; + } + else { + PTDEBUG && _d('Auto width col:', $col_name); + $col->{auto_width} = 1; + push @auto_width_cols, $i; + } + + $col->{truncate} = 1 unless defined $col->{truncate}; + $col->{truncate_mark} = '...' unless defined $col->{truncate_mark}; + $col->{truncate_side} ||= 'right'; + $col->{undef_value} = '' unless defined $col->{undef_value}; + + $col->{min_val} = 0; + $col->{max_val} = 0; + + $min_hdr_wid += $col_len; + $col->{header_width} = $col_len; + + $col->{right_most} = 1 if $i == $#cols; + + push @{$self->cols}, $col; + } + + $self->n_cols( scalar @cols ); + + if ( ($used_width || 0) > 100 ) { + die "Total width_pct for all columns is >100%"; + } + + if ( @auto_width_cols ) { + my $wid_per_col = int((100 - $used_width) / scalar @auto_width_cols); + PTDEBUG && _d('Line width left:', (100-$used_width), '%;', + 'each auto width col:', $wid_per_col, '%'); + map { $self->cols->[$_]->{width_pct} = $wid_per_col } @auto_width_cols; + } + + $min_hdr_wid += ($self->n_cols() - 1) * length $self->column_spacing(); + PTDEBUG && _d('min header width:', $min_hdr_wid); + if ( $min_hdr_wid > $self->line_width() ) { + PTDEBUG && _d('Will truncate headers because min header width', + $min_hdr_wid, '> line width', $self->line_width()); + $self->truncate_headers(1); + } + + return; +} + +sub add_line { + my ( $self, @vals ) = @_; + my $n_vals = scalar @vals; + if ( $n_vals != $self->n_cols() ) { + $self->_column_error("Number of values $n_vals does not match " + . "number of columns " . $self->n_cols()); + } + for my $i ( 0..($n_vals-1) ) { + my $col = $self->cols->[$i]; + my $val = defined $vals[$i] ? $vals[$i] : $col->{undef_value}; + if ( $self->strip_whitespace() ) { + $val =~ s/^\s+//g; + $val =~ s/\s+$//; + $vals[$i] = $val; + } + my $width = length $val; + $col->{min_val} = min($width, ($col->{min_val} || $width)); + $col->{max_val} = max($width, ($col->{max_val} || $width)); + } + push @{$self->lines}, \@vals; + return; +} + +sub get_report { + my ( $self, %args ) = @_; + + $self->_calculate_column_widths(); + if ( $self->truncate_headers() ) { + $self->_truncate_headers(); + } + $self->_truncate_line_values(%args); + + my @col_fmts = $self->_make_column_formats(); + my $fmt = $self->line_prefix() + . join($self->column_spacing(), @col_fmts); + PTDEBUG && _d('Format:', $fmt); + + (my $hdr_fmt = $fmt) =~ s/%([^-])/%-$1/g; + + my @lines; + push @lines, $self->line_prefix() . $self->title() if $self->has_title(); + push @lines, $self->_truncate_line( + sprintf($hdr_fmt, map { $_->{name} } @{$self->cols}), + strip => 1, + mark => '', + ); + + if ( $self->underline_header() ) { + my @underlines = map { '=' x $_->{print_width} } @{$self->cols}; + push @lines, $self->_truncate_line( + sprintf($fmt, map { $_ || '' } @underlines), + mark => '', + ); + } + + push @lines, map { + my $vals = $_; + my $i = 0; + my @vals = map { + my $val = defined $_ ? $_ : $self->cols->[$i++]->{undef_value}; + $val = '' if !defined $val; + $val =~ s/\n/ /g; + $val; + } @$vals; + my $line = sprintf($fmt, @vals); + if ( $self->extend_right() ) { + $line; + } + else { + $self->_truncate_line($line); + } + } @{$self->lines}; + + $self->clear_cols(); + $self->clear_lines(); + $self->clear_truncate_headers(); + + return join("\n", @lines) . "\n"; +} + +sub truncate_value { + my ( $self, $col, $val, $width, $side ) = @_; + return $val if length $val <= $width; + return $val if $col->{right_most} && $self->extend_right(); + $side ||= $col->{truncate_side}; + my $mark = $col->{truncate_mark}; + if ( $side eq 'right' ) { + $val = substr($val, 0, $width - length $mark); + $val .= $mark; + } + elsif ( $side eq 'left') { + $val = $mark . substr($val, -1 * $width + length $mark); + } + else { + PTDEBUG && _d("I don't know how to", $side, "truncate values"); + } + return $val; +} + +sub _calculate_column_widths { + my ( $self ) = @_; + + my $extra_space = 0; + foreach my $col ( @{$self->cols} ) { + my $print_width = int($self->line_width() * ($col->{width_pct} / 100)); + + PTDEBUG && _d('col:', $col->{name}, 'width pct:', $col->{width_pct}, + 'char width:', $print_width, + 'min val:', $col->{min_val}, 'max val:', $col->{max_val}); + + if ( $col->{auto_width} ) { + if ( $col->{min_val} && $print_width < $col->{min_val} ) { + PTDEBUG && _d('Increased to min val width:', $col->{min_val}); + $print_width = $col->{min_val}; + } + elsif ( $col->{max_val} && $print_width > $col->{max_val} ) { + PTDEBUG && _d('Reduced to max val width:', $col->{max_val}); + $extra_space += $print_width - $col->{max_val}; + $print_width = $col->{max_val}; + } + } + + $col->{print_width} = $print_width; + PTDEBUG && _d('print width:', $col->{print_width}); + } + + PTDEBUG && _d('Extra space:', $extra_space); + while ( $extra_space-- ) { + foreach my $col ( @{$self->cols} ) { + if ( $col->{auto_width} + && ( $col->{print_width} < $col->{max_val} + || $col->{print_width} < $col->{header_width}) + ) { + $col->{print_width}++; + } + } + } + + return; +} + +sub _truncate_headers { + my ( $self, $col ) = @_; + my $side = $self->truncate_header_side(); + foreach my $col ( @{$self->cols} ) { + my $col_name = $col->{name}; + my $print_width = $col->{print_width}; + next if length $col_name <= $print_width; + $col->{name} = $self->truncate_value($col, $col_name, $print_width, $side); + PTDEBUG && _d('Truncated hdr', $col_name, 'to', $col->{name}, + 'max width:', $print_width); + } + return; +} + +sub _truncate_line_values { + my ( $self, %args ) = @_; + my $n_vals = $self->n_cols() - 1; + foreach my $vals ( @{$self->lines} ) { + for my $i ( 0..$n_vals ) { + my $col = $self->cols->[$i]; + my $val = defined $vals->[$i] ? $vals->[$i] : $col->{undef_value}; + my $width = length $val; + + if ( $col->{print_width} && $width > $col->{print_width} ) { + if ( !$col->{truncate} ) { + $self->_column_error("Value '$val' is too wide for column " + . $col->{name}); + } + + my $callback = $args{truncate_callback}; + my $print_width = $col->{print_width}; + $val = $callback ? $callback->($col, $val, $print_width) + : $self->truncate_value($col, $val, $print_width); + PTDEBUG && _d('Truncated val', $vals->[$i], 'to', $val, + '; max width:', $print_width); + $vals->[$i] = $val; + } + } + } + return; +} + +sub _make_column_formats { + my ( $self ) = @_; + my @col_fmts; + my $n_cols = $self->n_cols() - 1; + for my $i ( 0..$n_cols ) { + my $col = $self->cols->[$i]; + + my $width = $col->{right_most} && !$col->{right_justify} ? '' + : $col->{print_width}; + + my $col_fmt = '%' . ($col->{right_justify} ? '' : '-') . $width . 's'; + push @col_fmts, $col_fmt; + } + return @col_fmts; +} + +sub _truncate_line { + my ( $self, $line, %args ) = @_; + my $mark = defined $args{mark} ? $args{mark} : $self->truncate_line_mark(); + if ( $line ) { + $line =~ s/\s+$// if $args{strip}; + my $len = length($line); + if ( $len > $self->line_width() ) { + $line = substr($line, 0, $self->line_width() - length $mark); + $line .= $mark if $mark; + } + } + return $line; +} + +sub _column_error { + my ( $self, $err ) = @_; + my $msg = "Column error: $err"; + $self->column_errors() eq 'die' ? die $msg : warn $msg; + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +no Lmo; +1; +} +# ########################################################################### +# End ReportFormatter package +# ########################################################################### + +# ########################################################################### +# HTTP::Micro package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/HTTP/Micro.pm +# t/lib/HTTP/Micro.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package HTTP::Micro; + +our $VERSION = '0.01'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Carp (); + +my @attributes; +BEGIN { + @attributes = qw(agent timeout); + no strict 'refs'; + for my $accessor ( @attributes ) { + *{$accessor} = sub { + @_ > 1 ? $_[0]->{$accessor} = $_[1] : $_[0]->{$accessor}; + }; + } +} + +sub new { + my($class, %args) = @_; + (my $agent = $class) =~ s{::}{-}g; + my $self = { + agent => $agent . "/" . ($class->VERSION || 0), + timeout => 60, + }; + for my $key ( @attributes ) { + $self->{$key} = $args{$key} if exists $args{$key} + } + return bless $self, $class; +} + +my %DefaultPort = ( + http => 80, + https => 443, +); + +sub request { + my ($self, $method, $url, $args) = @_; + @_ == 3 || (@_ == 4 && ref $args eq 'HASH') + or Carp::croak(q/Usage: $http->request(METHOD, URL, [HASHREF])/); + $args ||= {}; # we keep some state in this during _request + + my $response; + for ( 0 .. 1 ) { + $response = eval { $self->_request($method, $url, $args) }; + last unless $@ && $method eq 'GET' + && $@ =~ m{^(?:Socket closed|Unexpected end)}; + } + + if (my $e = "$@") { + $response = { + success => q{}, + status => 599, + reason => 'Internal Exception', + content => $e, + headers => { + 'content-type' => 'text/plain', + 'content-length' => length $e, + } + }; + } + return $response; +} + +sub _request { + my ($self, $method, $url, $args) = @_; + + my ($scheme, $host, $port, $path_query) = $self->_split_url($url); + + my $request = { + method => $method, + scheme => $scheme, + host_port => ($port == $DefaultPort{$scheme} ? $host : "$host:$port"), + uri => $path_query, + headers => {}, + }; + + my $handle = HTTP::Micro::Handle->new(timeout => $self->{timeout}); + + $handle->connect($scheme, $host, $port); + + $self->_prepare_headers_and_cb($request, $args); + $handle->write_request_header(@{$request}{qw/method uri headers/}); + $handle->write_content_body($request) if $request->{content}; + + my $response; + do { $response = $handle->read_response_header } + until (substr($response->{status},0,1) ne '1'); + + if (!($method eq 'HEAD' || $response->{status} =~ /^[23]04/)) { + $response->{content} = ''; + $handle->read_content_body(sub { $_[1]->{content} .= $_[0] }, $response); + } + + $handle->close; + $response->{success} = substr($response->{status},0,1) eq '2'; + return $response; +} + +sub _prepare_headers_and_cb { + my ($self, $request, $args) = @_; + + for ($args->{headers}) { + next unless defined; + while (my ($k, $v) = each %$_) { + $request->{headers}{lc $k} = $v; + } + } + $request->{headers}{'host'} = $request->{host_port}; + $request->{headers}{'connection'} = "close"; + $request->{headers}{'user-agent'} ||= $self->{agent}; + + if (defined $args->{content}) { + $request->{headers}{'content-type'} ||= "application/octet-stream"; + utf8::downgrade($args->{content}, 1) + or Carp::croak(q/Wide character in request message body/); + $request->{headers}{'content-length'} = length $args->{content}; + $request->{content} = $args->{content}; + } + return; +} + +sub _split_url { + my $url = pop; + + my ($scheme, $authority, $path_query) = $url =~ m<\A([^:/?#]+)://([^/?#]*)([^#]*)> + or Carp::croak(qq/Cannot parse URL: '$url'/); + + $scheme = lc $scheme; + $path_query = "/$path_query" unless $path_query =~ m<\A/>; + + my $host = (length($authority)) ? lc $authority : 'localhost'; + $host =~ s/\A[^@]*@//; # userinfo + my $port = do { + $host =~ s/:([0-9]*)\z// && length $1 + ? $1 + : $DefaultPort{$scheme} + }; + + return ($scheme, $host, $port, $path_query); +} + +} # HTTP::Micro + +{ + package HTTP::Micro::Handle; + + use strict; + use warnings FATAL => 'all'; + use English qw(-no_match_vars); + + use Carp qw(croak); + use Errno qw(EINTR EPIPE); + use IO::Socket qw(SOCK_STREAM); + + sub BUFSIZE () { 32768 } + + my $Printable = sub { + local $_ = shift; + s/\r/\\r/g; + s/\n/\\n/g; + s/\t/\\t/g; + s/([^\x20-\x7E])/sprintf('\\x%.2X', ord($1))/ge; + $_; + }; + + sub new { + my ($class, %args) = @_; + return bless { + rbuf => '', + timeout => 60, + max_line_size => 16384, + %args + }, $class; + } + + my $ssl_verify_args = { + check_cn => "when_only", + wildcards_in_alt => "anywhere", + wildcards_in_cn => "anywhere" + }; + + sub connect { + @_ == 4 || croak(q/Usage: $handle->connect(scheme, host, port)/); + my ($self, $scheme, $host, $port) = @_; + + if ( $scheme eq 'https' ) { + eval "require IO::Socket::SSL" + unless exists $INC{'IO/Socket/SSL.pm'}; + croak(qq/IO::Socket::SSL must be installed for https support\n/) + unless $INC{'IO/Socket/SSL.pm'}; + } + elsif ( $scheme ne 'http' ) { + croak(qq/Unsupported URL scheme '$scheme'\n/); + } + + $self->{fh} = IO::Socket::INET->new( + PeerHost => $host, + PeerPort => $port, + Proto => 'tcp', + Type => SOCK_STREAM, + Timeout => $self->{timeout} + ) or croak(qq/Could not connect to '$host:$port': $@/); + + binmode($self->{fh}) + or croak(qq/Could not binmode() socket: '$!'/); + + if ( $scheme eq 'https') { + IO::Socket::SSL->start_SSL($self->{fh}); + ref($self->{fh}) eq 'IO::Socket::SSL' + or die(qq/SSL connection failed for $host\n/); + if ( $self->{fh}->can("verify_hostname") ) { + $self->{fh}->verify_hostname( $host, $ssl_verify_args ) + or die(qq/SSL certificate not valid for $host\n/); + } + else { + my $fh = $self->{fh}; + _verify_hostname_of_cert($host, _peer_certificate($fh), $ssl_verify_args) + or die(qq/SSL certificate not valid for $host\n/); + } + } + + $self->{host} = $host; + $self->{port} = $port; + + return $self; + } + + sub close { + @_ == 1 || croak(q/Usage: $handle->close()/); + my ($self) = @_; + CORE::close($self->{fh}) + or croak(qq/Could not close socket: '$!'/); + } + + sub write { + @_ == 2 || croak(q/Usage: $handle->write(buf)/); + my ($self, $buf) = @_; + + my $len = length $buf; + my $off = 0; + + local $SIG{PIPE} = 'IGNORE'; + + while () { + $self->can_write + or croak(q/Timed out while waiting for socket to become ready for writing/); + my $r = syswrite($self->{fh}, $buf, $len, $off); + if (defined $r) { + $len -= $r; + $off += $r; + last unless $len > 0; + } + elsif ($! == EPIPE) { + croak(qq/Socket closed by remote server: $!/); + } + elsif ($! != EINTR) { + croak(qq/Could not write to socket: '$!'/); + } + } + return $off; + } + + sub read { + @_ == 2 || @_ == 3 || croak(q/Usage: $handle->read(len)/); + my ($self, $len) = @_; + + my $buf = ''; + my $got = length $self->{rbuf}; + + if ($got) { + my $take = ($got < $len) ? $got : $len; + $buf = substr($self->{rbuf}, 0, $take, ''); + $len -= $take; + } + + while ($len > 0) { + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $buf, $len, length $buf); + if (defined $r) { + last unless $r; + $len -= $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + if ($len) { + croak(q/Unexpected end of stream/); + } + return $buf; + } + + sub readline { + @_ == 1 || croak(q/Usage: $handle->readline()/); + my ($self) = @_; + + while () { + if ($self->{rbuf} =~ s/\A ([^\x0D\x0A]* \x0D?\x0A)//x) { + return $1; + } + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $self->{rbuf}, BUFSIZE, length $self->{rbuf}); + if (defined $r) { + last unless $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + croak(q/Unexpected end of stream while looking for line/); + } + + sub read_header_lines { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->read_header_lines([headers])/); + my ($self, $headers) = @_; + $headers ||= {}; + my $lines = 0; + my $val; + + while () { + my $line = $self->readline; + + if ($line =~ /\A ([^\x00-\x1F\x7F:]+) : [\x09\x20]* ([^\x0D\x0A]*)/x) { + my ($field_name) = lc $1; + $val = \($headers->{$field_name} = $2); + } + elsif ($line =~ /\A [\x09\x20]+ ([^\x0D\x0A]*)/x) { + $val + or croak(q/Unexpected header continuation line/); + next unless length $1; + $$val .= ' ' if length $$val; + $$val .= $1; + } + elsif ($line =~ /\A \x0D?\x0A \z/x) { + last; + } + else { + croak(q/Malformed header line: / . $Printable->($line)); + } + } + return $headers; + } + + sub write_header_lines { + (@_ == 2 && ref $_[1] eq 'HASH') || croak(q/Usage: $handle->write_header_lines(headers)/); + my($self, $headers) = @_; + + my $buf = ''; + while (my ($k, $v) = each %$headers) { + my $field_name = lc $k; + $field_name =~ /\A [\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5A\x5E-\x7A\x7C\x7E]+ \z/x + or croak(q/Invalid HTTP header field name: / . $Printable->($field_name)); + $field_name =~ s/\b(\w)/\u$1/g; + $buf .= "$field_name: $v\x0D\x0A"; + } + $buf .= "\x0D\x0A"; + return $self->write($buf); + } + + sub read_content_body { + @_ == 3 || @_ == 4 || croak(q/Usage: $handle->read_content_body(callback, response, [read_length])/); + my ($self, $cb, $response, $len) = @_; + $len ||= $response->{headers}{'content-length'}; + + croak("No content-length in the returned response, and this " + . "UA doesn't implement chunking") unless defined $len; + + while ($len > 0) { + my $read = ($len > BUFSIZE) ? BUFSIZE : $len; + $cb->($self->read($read), $response); + $len -= $read; + } + + return; + } + + sub write_content_body { + @_ == 2 || croak(q/Usage: $handle->write_content_body(request)/); + my ($self, $request) = @_; + my ($len, $content_length) = (0, $request->{headers}{'content-length'}); + + $len += $self->write($request->{content}); + + $len == $content_length + or croak(qq/Content-Length missmatch (got: $len expected: $content_length)/); + + return $len; + } + + sub read_response_header { + @_ == 1 || croak(q/Usage: $handle->read_response_header()/); + my ($self) = @_; + + my $line = $self->readline; + + $line =~ /\A (HTTP\/(0*\d+\.0*\d+)) [\x09\x20]+ ([0-9]{3}) [\x09\x20]+ ([^\x0D\x0A]*) \x0D?\x0A/x + or croak(q/Malformed Status-Line: / . $Printable->($line)); + + my ($protocol, $version, $status, $reason) = ($1, $2, $3, $4); + + return { + status => $status, + reason => $reason, + headers => $self->read_header_lines, + protocol => $protocol, + }; + } + + sub write_request_header { + @_ == 4 || croak(q/Usage: $handle->write_request_header(method, request_uri, headers)/); + my ($self, $method, $request_uri, $headers) = @_; + + return $self->write("$method $request_uri HTTP/1.1\x0D\x0A") + + $self->write_header_lines($headers); + } + + sub _do_timeout { + my ($self, $type, $timeout) = @_; + $timeout = $self->{timeout} + unless defined $timeout && $timeout >= 0; + + my $fd = fileno $self->{fh}; + defined $fd && $fd >= 0 + or croak(q/select(2): 'Bad file descriptor'/); + + my $initial = time; + my $pending = $timeout; + my $nfound; + + vec(my $fdset = '', $fd, 1) = 1; + + while () { + $nfound = ($type eq 'read') + ? select($fdset, undef, undef, $pending) + : select(undef, $fdset, undef, $pending) ; + if ($nfound == -1) { + $! == EINTR + or croak(qq/select(2): '$!'/); + redo if !$timeout || ($pending = $timeout - (time - $initial)) > 0; + $nfound = 0; + } + last; + } + $! = 0; + return $nfound; + } + + sub can_read { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_read([timeout])/); + my $self = shift; + return $self->_do_timeout('read', @_) + } + + sub can_write { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_write([timeout])/); + my $self = shift; + return $self->_do_timeout('write', @_) + } +} # HTTP::Micro::Handle + +my $prog = <<'EOP'; +BEGIN { + if ( defined &IO::Socket::SSL::CAN_IPV6 ) { + *CAN_IPV6 = \*IO::Socket::SSL::CAN_IPV6; + } + else { + constant->import( CAN_IPV6 => '' ); + } + my %const = ( + NID_CommonName => 13, + GEN_DNS => 2, + GEN_IPADD => 7, + ); + while ( my ($name,$value) = each %const ) { + no strict 'refs'; + *{$name} = UNIVERSAL::can( 'Net::SSLeay', $name ) || sub { $value }; + } +} +{ + use Carp qw(croak); + my %dispatcher = ( + issuer => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_issuer_name( shift )) }, + subject => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_subject_name( shift )) }, + ); + if ( $Net::SSLeay::VERSION >= 1.30 ) { + $dispatcher{commonName} = sub { + my $cn = Net::SSLeay::X509_NAME_get_text_by_NID( + Net::SSLeay::X509_get_subject_name( shift ), NID_CommonName); + $cn =~s{\0$}{}; # work around Bug in Net::SSLeay <1.33 + $cn; + } + } else { + $dispatcher{commonName} = sub { + croak "you need at least Net::SSLeay version 1.30 for getting commonName" + } + } + + if ( $Net::SSLeay::VERSION >= 1.33 ) { + $dispatcher{subjectAltNames} = sub { Net::SSLeay::X509_get_subjectAltNames( shift ) }; + } else { + $dispatcher{subjectAltNames} = sub { + return; + }; + } + + $dispatcher{authority} = $dispatcher{issuer}; + $dispatcher{owner} = $dispatcher{subject}; + $dispatcher{cn} = $dispatcher{commonName}; + + sub _peer_certificate { + my ($self, $field) = @_; + my $ssl = $self->_get_ssl_object or return; + + my $cert = ${*$self}{_SSL_certificate} + ||= Net::SSLeay::get_peer_certificate($ssl) + or return $self->error("Could not retrieve peer certificate"); + + if ($field) { + my $sub = $dispatcher{$field} or croak + "invalid argument for peer_certificate, valid are: ".join( " ",keys %dispatcher ). + "\nMaybe you need to upgrade your Net::SSLeay"; + return $sub->($cert); + } else { + return $cert + } + } + + + my %scheme = ( + ldap => { + wildcards_in_cn => 0, + wildcards_in_alt => 'leftmost', + check_cn => 'always', + }, + http => { + wildcards_in_cn => 'anywhere', + wildcards_in_alt => 'anywhere', + check_cn => 'when_only', + }, + smtp => { + wildcards_in_cn => 0, + wildcards_in_alt => 0, + check_cn => 'always' + }, + none => {}, # do not check + ); + + $scheme{www} = $scheme{http}; # alias + $scheme{xmpp} = $scheme{http}; # rfc 3920 + $scheme{pop3} = $scheme{ldap}; # rfc 2595 + $scheme{imap} = $scheme{ldap}; # rfc 2595 + $scheme{acap} = $scheme{ldap}; # rfc 2595 + $scheme{nntp} = $scheme{ldap}; # rfc 4642 + $scheme{ftp} = $scheme{http}; # rfc 4217 + + + sub _verify_hostname_of_cert { + my $identity = shift; + my $cert = shift; + my $scheme = shift || 'none'; + if ( ! ref($scheme) ) { + $scheme = $scheme{$scheme} or croak "scheme $scheme not defined"; + } + + return 1 if ! %$scheme; # 'none' + + my $commonName = $dispatcher{cn}->($cert); + my @altNames = $dispatcher{subjectAltNames}->($cert); + + if ( my $sub = $scheme->{callback} ) { + return $sub->($identity,$commonName,@altNames); + } + + + my $ipn; + if ( CAN_IPV6 and $identity =~m{:} ) { + $ipn = IO::Socket::SSL::inet_pton(IO::Socket::SSL::AF_INET6,$identity) + or croak "'$identity' is not IPv6, but neither IPv4 nor hostname"; + } elsif ( $identity =~m{^\d+\.\d+\.\d+\.\d+$} ) { + $ipn = IO::Socket::SSL::inet_aton( $identity ) or croak "'$identity' is not IPv4, but neither IPv6 nor hostname"; + } else { + if ( $identity =~m{[^a-zA-Z0-9_.\-]} ) { + $identity =~m{\0} and croak("name '$identity' has \\0 byte"); + $identity = IO::Socket::SSL::idn_to_ascii($identity) or + croak "Warning: Given name '$identity' could not be converted to IDNA!"; + } + } + + my $check_name = sub { + my ($name,$identity,$wtyp) = @_; + $wtyp ||= ''; + my $pattern; + if ( $wtyp eq 'anywhere' and $name =~m{^([a-zA-Z0-9_\-]*)\*(.+)} ) { + $pattern = qr{^\Q$1\E[a-zA-Z0-9_\-]*\Q$2\E$}i; + } elsif ( $wtyp eq 'leftmost' and $name =~m{^\*(\..+)$} ) { + $pattern = qr{^[a-zA-Z0-9_\-]*\Q$1\E$}i; + } else { + $pattern = qr{^\Q$name\E$}i; + } + return $identity =~ $pattern; + }; + + my $alt_dnsNames = 0; + while (@altNames) { + my ($type, $name) = splice (@altNames, 0, 2); + if ( $ipn and $type == GEN_IPADD ) { + return 1 if $ipn eq $name; + + } elsif ( ! $ipn and $type == GEN_DNS ) { + $name =~s/\s+$//; $name =~s/^\s+//; + $alt_dnsNames++; + $check_name->($name,$identity,$scheme->{wildcards_in_alt}) + and return 1; + } + } + + if ( ! $ipn and ( + $scheme->{check_cn} eq 'always' or + $scheme->{check_cn} eq 'when_only' and !$alt_dnsNames)) { + $check_name->($commonName,$identity,$scheme->{wildcards_in_cn}) + and return 1; + } + + return 0; # no match + } +} +EOP + +eval { require IO::Socket::SSL }; +if ( $INC{"IO/Socket/SSL.pm"} ) { + eval $prog; + die $@ if $@; +} + +1; +# ########################################################################### +# End HTTP::Micro package +# ########################################################################### + +# ########################################################################### +# VersionCheck package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/VersionCheck.pm +# t/lib/VersionCheck.t +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### +{ +package VersionCheck; + + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +local $Data::Dumper::Indent = 1; +local $Data::Dumper::Sortkeys = 1; +local $Data::Dumper::Quotekeys = 0; + +use Digest::MD5 qw(md5_hex); +use Sys::Hostname qw(hostname); +use File::Basename qw(); +use File::Spec; +use FindBin qw(); + +eval { + require Percona::Toolkit; + require HTTP::Micro; +}; + +my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; +my @vc_dirs = ( + '/etc/percona', + '/etc/percona-toolkit', + '/tmp', + "$home", +); + +{ + my $file = 'percona-version-check'; + + sub version_check_file { + foreach my $dir ( @vc_dirs ) { + if ( -d $dir && -w $dir ) { + PTDEBUG && _d('Version check file', $file, 'in', $dir); + return $dir . '/' . $file; + } + } + PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD}); + return $file; # in the CWD + } +} + +sub version_check_time_limit { + return 60 * 60 * 24; # one day +} + + +sub version_check { + my (%args) = @_; + + my $instances = $args{instances} || []; + my $instances_to_check; + + PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin); + if ( !$args{force} ) { + if ( $FindBin::Bin + && (-d "$FindBin::Bin/../.bzr" || + -d "$FindBin::Bin/../../.bzr" || + -d "$FindBin::Bin/../.git" || + -d "$FindBin::Bin/../../.git" + ) + ) { + PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check"); + return; + } + } + + eval { + foreach my $instance ( @$instances ) { + my ($name, $id) = get_instance_id($instance); + $instance->{name} = $name; + $instance->{id} = $id; + } + + push @$instances, { name => 'system', id => 0 }; + + $instances_to_check = get_instances_to_check( + instances => $instances, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + PTDEBUG && _d(scalar @$instances_to_check, 'instances to check'); + return unless @$instances_to_check; + + my $protocol = 'https'; + eval { require IO::Socket::SSL; }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + PTDEBUG && _d("SSL not available, won't run version_check"); + return; + } + PTDEBUG && _d('Using', $protocol); + my $url = $args{url} # testing + || $ENV{PERCONA_VERSION_CHECK_URL} # testing + || "$protocol://v.percona.com"; + PTDEBUG && _d('API URL:', $url); + + my $advice = pingback( + instances => $instances_to_check, + protocol => $protocol, + url => $url, + ); + if ( $advice ) { + PTDEBUG && _d('Advice:', Dumper($advice)); + if ( scalar @$advice > 1) { + print "\n# " . scalar @$advice . " software updates are " + . "available:\n"; + } + else { + print "\n# A software update is available:\n"; + } + print join("\n", map { "# * $_" } @$advice), "\n\n"; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Version check failed:', $EVAL_ERROR); + } + + if ( @$instances_to_check ) { + eval { + update_check_times( + instances => $instances_to_check, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error updating version check file:', $EVAL_ERROR); + } + } + + if ( $ENV{PTDEBUG_VERSION_CHECK} ) { + warn "Exiting because the PTDEBUG_VERSION_CHECK " + . "environment variable is defined.\n"; + exit 255; + } + + return; +} + +sub get_instances_to_check { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + + if ( !-f $vc_file ) { + PTDEBUG && _d('Version check file', $vc_file, 'does not exist;', + 'version checking all instances'); + return $instances; + } + + open my $fh, '<', $vc_file or die "Cannot open $vc_file: $OS_ERROR"; + chomp(my $file_contents = do { local $/ = undef; <$fh> }); + PTDEBUG && _d('Version check file', $vc_file, 'contents:', $file_contents); + close $fh; + my %last_check_time_for = $file_contents =~ /^([^,]+),(.+)$/mg; + + my $check_time_limit = version_check_time_limit(); + my @instances_to_check; + foreach my $instance ( @$instances ) { + my $last_check_time = $last_check_time_for{ $instance->{id} }; + PTDEBUG && _d('Instance', $instance->{id}, 'last checked', + $last_check_time, 'now', $now, 'diff', $now - ($last_check_time || 0), + 'hours until next check', + sprintf '%.2f', + ($check_time_limit - ($now - ($last_check_time || 0))) / 3600); + if ( !defined $last_check_time + || ($now - $last_check_time) >= $check_time_limit ) { + PTDEBUG && _d('Time to check', Dumper($instance)); + push @instances_to_check, $instance; + } + } + + return \@instances_to_check; +} + +sub update_check_times { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + PTDEBUG && _d('Updating last check time:', $now); + + my %all_instances = map { + $_->{id} => { name => $_->{name}, ts => $now } + } @$instances; + + if ( -f $vc_file ) { + open my $fh, '<', $vc_file or die "Cannot read $vc_file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + + foreach my $line ( split("\n", ($contents || '')) ) { + my ($id, $ts) = split(',', $line); + if ( !exists $all_instances{$id} ) { + $all_instances{$id} = { ts => $ts }; # original ts, not updated + } + } + } + + open my $fh, '>', $vc_file or die "Cannot write to $vc_file: $OS_ERROR"; + foreach my $id ( sort keys %all_instances ) { + PTDEBUG && _d('Updated:', $id, Dumper($all_instances{$id})); + print { $fh } $id . ',' . $all_instances{$id}->{ts} . "\n"; + } + close $fh; + + return; +} + +sub get_instance_id { + my ($instance) = @_; + + my $dbh = $instance->{dbh}; + my $dsn = $instance->{dsn}; + + my $sql = q{SELECT CONCAT(@@hostname, @@port)}; + PTDEBUG && _d($sql); + my ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $sql = q{SELECT @@hostname}; + PTDEBUG && _d($sql); + ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $name = ($dsn->{h} || 'localhost') . ($dsn->{P} || 3306); + } + else { + $sql = q{SHOW VARIABLES LIKE 'port'}; + PTDEBUG && _d($sql); + my (undef, $port) = eval { $dbh->selectrow_array($sql) }; + PTDEBUG && _d('port:', $port); + $name .= $port || ''; + } + } + my $id = md5_hex($name); + + PTDEBUG && _d('MySQL instance:', $id, $name, Dumper($dsn)); + + return $name, $id; +} + + +sub get_uuid { + my $uuid_file = '/.percona-toolkit.uuid'; + foreach my $dir (@vc_dirs) { + my $filename = $dir.$uuid_file; + my $uuid=_read_uuid($filename); + return $uuid if $uuid; + } + + my $filename = $ENV{"HOME"} . $uuid_file; + my $uuid = _generate_uuid(); + + my $fh; + eval { + open($fh, '>', $filename); + }; + if (!$EVAL_ERROR) { + print $fh $uuid; + close $fh; + } + + return $uuid; +} + +sub _generate_uuid { + return sprintf+($}="%04x")."$}-$}-$}-$}-".$}x3,map rand 65537,0..7; +} + +sub _read_uuid { + my $filename = shift; + my $fh; + + eval { + open($fh, '<:encoding(UTF-8)', $filename); + }; + return if ($EVAL_ERROR); + + my $uuid; + eval { $uuid = <$fh>; }; + return if ($EVAL_ERROR); + + chomp $uuid; + return $uuid; +} + + +sub pingback { + my (%args) = @_; + my @required_args = qw(url instances); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my $url = $args{url}; + my $instances = $args{instances}; + + my $ua = $args{ua} || HTTP::Micro->new( timeout => 3 ); + + my $response = $ua->request('GET', $url); + PTDEBUG && _d('Server response:', Dumper($response)); + die "No response from GET $url" + if !$response; + die("GET on $url returned HTTP status $response->{status}; expected 200\n", + ($response->{content} || '')) if $response->{status} != 200; + die("GET on $url did not return any programs to check") + if !$response->{content}; + + my $items = parse_server_response( + response => $response->{content} + ); + die "Failed to parse server requested programs: $response->{content}" + if !scalar keys %$items; + + my $versions = get_versions( + items => $items, + instances => $instances, + ); + die "Failed to get any program versions; should have at least gotten Perl" + if !scalar keys %$versions; + + my $client_content = encode_client_response( + items => $items, + versions => $versions, + general_id => get_uuid(), + ); + + my $tool_name = $ENV{XTRABACKUP_VERSION} ? "Percona XtraBackup" : File::Basename::basename($0); + my $client_response = { + headers => { "X-Percona-Toolkit-Tool" => $tool_name }, + content => $client_content, + }; + PTDEBUG && _d('Client response:', Dumper($client_response)); + + $response = $ua->request('POST', $url, $client_response); + PTDEBUG && _d('Server suggestions:', Dumper($response)); + die "No response from POST $url $client_response" + if !$response; + die "POST $url returned HTTP status $response->{status}; expected 200" + if $response->{status} != 200; + + return unless $response->{content}; + + $items = parse_server_response( + response => $response->{content}, + split_vars => 0, + ); + die "Failed to parse server suggestions: $response->{content}" + if !scalar keys %$items; + my @suggestions = map { $_->{vars} } + sort { $a->{item} cmp $b->{item} } + values %$items; + + return \@suggestions; +} + +sub encode_client_response { + my (%args) = @_; + my @required_args = qw(items versions general_id); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items, $versions, $general_id) = @args{@required_args}; + + my @lines; + foreach my $item ( sort keys %$items ) { + next unless exists $versions->{$item}; + if ( ref($versions->{$item}) eq 'HASH' ) { + my $mysql_versions = $versions->{$item}; + for my $id ( sort keys %$mysql_versions ) { + push @lines, join(';', $id, $item, $mysql_versions->{$id}); + } + } + else { + push @lines, join(';', $general_id, $item, $versions->{$item}); + } + } + + my $client_response = join("\n", @lines) . "\n"; + return $client_response; +} + +sub parse_server_response { + my (%args) = @_; + my @required_args = qw(response); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($response) = @args{@required_args}; + + my %items = map { + my ($item, $type, $vars) = split(";", $_); + if ( !defined $args{split_vars} || $args{split_vars} ) { + $vars = [ split(",", ($vars || '')) ]; + } + $item => { + item => $item, + type => $type, + vars => $vars, + }; + } split("\n", $response); + + PTDEBUG && _d('Items:', Dumper(\%items)); + + return \%items; +} + +my %sub_for_type = ( + os_version => \&get_os_version, + perl_version => \&get_perl_version, + perl_module_version => \&get_perl_module_version, + mysql_variable => \&get_mysql_variable, + xtrabackup => \&get_xtrabackup_version, +); + +sub valid_item { + my ($item) = @_; + return unless $item; + if ( !exists $sub_for_type{ $item->{type} } ) { + PTDEBUG && _d('Invalid type:', $item->{type}); + return 0; + } + return 1; +} + +sub get_versions { + my (%args) = @_; + my @required_args = qw(items); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items) = @args{@required_args}; + + my %versions; + foreach my $item ( values %$items ) { + next unless valid_item($item); + eval { + my $version = $sub_for_type{ $item->{type} }->( + item => $item, + instances => $args{instances}, + ); + if ( $version ) { + chomp $version unless ref($version); + $versions{$item->{item}} = $version; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error getting version for', Dumper($item), $EVAL_ERROR); + } + } + + return \%versions; +} + + +sub get_os_version { + if ( $OSNAME eq 'MSWin32' ) { + require Win32; + return Win32::GetOSDisplayName(); + } + + chomp(my $platform = `uname -s`); + PTDEBUG && _d('platform:', $platform); + return $OSNAME unless $platform; + + chomp(my $lsb_release + = `which lsb_release 2>/dev/null | awk '{print \$1}'` || ''); + PTDEBUG && _d('lsb_release:', $lsb_release); + + my $release = ""; + + if ( $platform eq 'Linux' ) { + if ( -f "/etc/fedora-release" ) { + $release = `cat /etc/fedora-release`; + } + elsif ( -f "/etc/redhat-release" ) { + $release = `cat /etc/redhat-release`; + } + elsif ( -f "/etc/system-release" ) { + $release = `cat /etc/system-release`; + } + elsif ( $lsb_release ) { + $release = `$lsb_release -ds`; + } + elsif ( -f "/etc/lsb-release" ) { + $release = `grep DISTRIB_DESCRIPTION /etc/lsb-release`; + $release =~ s/^\w+="([^"]+)".+/$1/; + } + elsif ( -f "/etc/debian_version" ) { + chomp(my $rel = `cat /etc/debian_version`); + $release = "Debian $rel"; + if ( -f "/etc/apt/sources.list" ) { + chomp(my $code_name = `awk '/^deb/ {print \$3}' /etc/apt/sources.list | awk -F/ '{print \$1}'| awk 'BEGIN {FS="|"} {print \$1}' | sort | uniq -c | sort -rn | head -n1 | awk '{print \$2}'`); + $release .= " ($code_name)" if $code_name; + } + } + elsif ( -f "/etc/os-release" ) { # openSUSE + chomp($release = `grep PRETTY_NAME /etc/os-release`); + $release =~ s/^PRETTY_NAME="(.+)"$/$1/; + } + elsif ( `ls /etc/*release 2>/dev/null` ) { + if ( `grep DISTRIB_DESCRIPTION /etc/*release 2>/dev/null` ) { + $release = `grep DISTRIB_DESCRIPTION /etc/*release | head -n1`; + } + else { + $release = `cat /etc/*release | head -n1`; + } + } + } + elsif ( $platform =~ m/(?:BSD|^Darwin)$/ ) { + my $rel = `uname -r`; + $release = "$platform $rel"; + } + elsif ( $platform eq "SunOS" ) { + my $rel = `head -n1 /etc/release` || `uname -r`; + $release = "$platform $rel"; + } + + if ( !$release ) { + PTDEBUG && _d('Failed to get the release, using platform'); + $release = $platform; + } + chomp($release); + + $release =~ s/^"|"$//g; + + PTDEBUG && _d('OS version =', $release); + return $release; +} + +sub get_perl_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $version = sprintf '%vd', $PERL_VERSION; + PTDEBUG && _d('Perl version', $version); + return $version; +} + +sub get_xtrabackup_version { + return $ENV{XTRABACKUP_VERSION}; +} + +sub get_perl_module_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $var = '$' . $item->{item} . '::VERSION'; + my $version = eval "use $item->{item}; $var;"; + PTDEBUG && _d('Perl version for', $var, '=', $version); + return $version; +} + +sub get_mysql_variable { + return get_from_mysql( + show => 'VARIABLES', + @_, + ); +} + +sub get_from_mysql { + my (%args) = @_; + my $show = $args{show}; + my $item = $args{item}; + my $instances = $args{instances}; + return unless $show && $item; + + if ( !$instances || !@$instances ) { + PTDEBUG && _d('Cannot check', $item, + 'because there are no MySQL instances'); + return; + } + + if ($item->{item} eq 'MySQL' && $item->{type} eq 'mysql_variable') { + @{$item->{vars}} = grep { $_ eq 'version' || $_ eq 'version_comment' } @{$item->{vars}}; + } + + + my @versions; + my %version_for; + foreach my $instance ( @$instances ) { + next unless $instance->{id}; # special system instance has id=0 + my $dbh = $instance->{dbh}; + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $sql = qq/SHOW $show/; + PTDEBUG && _d($sql); + my $rows = $dbh->selectall_hashref($sql, 'variable_name'); + + my @versions; + foreach my $var ( @{$item->{vars}} ) { + $var = lc($var); + my $version = $rows->{$var}->{value}; + PTDEBUG && _d('MySQL version for', $item->{item}, '=', $version, + 'on', $instance->{name}); + push @versions, $version; + } + $version_for{ $instance->{id} } = join(' ', @versions); + } + + return \%version_for; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End VersionCheck package +# ########################################################################### + +# ########################################################################### +# This is a combination of modules and programs in one -- a runnable module. +# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last +# Or, look it up in the Camel book on pages 642 and 643 in the 3rd edition. +# +# Check at the end of this package for the call to main() which actually runs +# the program. +# ########################################################################### +package pt_config_diff; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Data::Dumper; +use JSON; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +use Percona::Toolkit; +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub main { + local @ARGV = @_; # set global ARGV for this package + + # ######################################################################## + # Get configuration information. + # ######################################################################## + my $o = new OptionParser(); + $o->get_specs(); + $o->get_opts(); + + my $dp = $o->DSNParser(); + $dp->prop('set-vars', $o->set_vars()); + + if ( !$o->get('help') ) { + if ( @ARGV < 1 ) { + $o->save_error("Specify at least one file or DSN on the command line"); + } + } + + $o->usage_or_errors(); + + # ######################################################################### + # Make common modules. + # ######################################################################### + my $trp = new TextResultSetParser(); + my $config_cmp = new MySQLConfigComparer( + ignore_variables => $o->get('ignore-variables'), + ignore_case => $o->get('ignore-case'), + ); + my %common_modules = ( + DSNParser => $dp, + OptionParser => $o, + MySQLConfigComparer => $config_cmp, + TextResultSetParser => $trp, + ); + + # ######################################################################### + # Make MySQLConfig objs for each FILE|DSN. + # ######################################################################### + my $dsn_defaults = $dp->parse_options($o); + my $last_dsn; + my @configs; # MySQLConfig objects + my @config_names; # Human-readable names for those ^ objs + my @cxn; + foreach my $config_src ( @ARGV ) { + if ( -f $config_src ) { + PTDEBUG && _d('Config source', $config_src, 'is a file'); + push @configs, new MySQLConfig( + file => $config_src, + %common_modules, + ); + push @config_names, $config_src; # filename + } + else { + PTDEBUG && _d('Config source', $config_src, 'is a DSN'); + my $cxn = new Cxn( + dsn_string => $config_src, + prev_dsn => $last_dsn, + DSNParser => $dp, + OptionParser => $o, + ); + $cxn->connect(); + $last_dsn = $cxn->dsn(); + + push @configs, new MySQLConfig( + dbh => $cxn->dbh(), + dsn => $cxn->dsn(), + %common_modules, + ); + push @config_names, $cxn->name(); + push @cxn, $cxn; + } + } + + # ######################################################################## + # Daemonize now that everything is setup and ready to work. + # ######################################################################## + my $daemon; + if ( $o->get('pid') ) { + # We're not daemoninzing, it just handles PID stuff. + $daemon = new Daemon(o=>$o); + $daemon->make_PID_file(); + } + + # ######################################################################## + # Do the version-check + # ######################################################################## + if ( $o->get('version-check') && (!$o->has('quiet') || !$o->get('quiet')) ) { + VersionCheck::version_check( + force => $o->got('version-check'), + instances => [ map({ +{ dbh => $_->dbh, dsn => $_->dsn } } @cxn) ], + ); + } + + # ######################################################################### + # Diff the given configs. + # ######################################################################### + my $report; + my $truncate_callback; + if ( $o->get('report') ) { + $report = new ReportFormatter( + line_prefix => '', + line_width => $o->get('report-width'), + ); + $report->set_columns( + { name => 'Variable', width=>25, }, + map { { name => $_ } } @config_names, + ); + + # This is difficult. Ideally, we want to know which var this + # val applies to (i.e. first column, same row). But that's + # not how ReportFormatter works. Plus, even if we truncate a + # path on the left side, that might be where the difference is. + # So there's no easy solution here. + # $truncate_callback = sub { + # }; + } + + PTDEBUG && _d("Comparing", scalar @configs, "configs"); + my $diffs = $config_cmp->diff(configs=>\@configs); + my $n_diffs = scalar keys %$diffs; + PTDEBUG && _d($n_diffs, "differences found:", Dumper($diffs)); + if ( $n_diffs ) { + if ( $o->get('report') ) { + if ( $o->get('json-report')) { + my $JSON = JSON->new->utf8; + $JSON->convert_blessed(1); + my $json_diffs = {}; + foreach my $key (keys $diffs) { + $json_diffs->{$key} = { + $config_names[0] => $diffs->{$key}[0], + $config_names[1] => $diffs->{$key}[1], + } + } + PTDEBUG && _d("json_diffs", Dumper($json_diffs)); + my $json_report = $JSON->encode($json_diffs); + print "$json_report\n"; + } else { + foreach my $var ( sort keys %$diffs ) { + $report->add_line($var, @{$diffs->{$var}}); + } + $report->title( + "$n_diffs config difference" . ($n_diffs > 1 ? 's' : '')); + print $report->get_report(); + } + } + return 1; + } + + # No differences. + return 0; +} + +# ########################################################################## +# Subroutines +# ########################################################################## + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +# ############################################################################ +# Run the program. +# ############################################################################ +if ( !caller ) { exit main(@ARGV); } + +1; # Because this is a module as well as a script. + +# ############################################################################ +# Documentation +# ############################################################################ + +=pod + +=head1 NAME + +pt-config-diff - Diff MySQL configuration files and server variables. + +=head1 SYNOPSIS + +Usage: pt-config-diff [OPTIONS] CONFIG CONFIG [CONFIG...] + +pt-config-diff diffs MySQL configuration files and server variables. +CONFIG can be a filename or a DSN. At least two CONFIG sources must be given. +Like standard Unix diff, there is no output if there are no differences. + +Diff host1 config from SHOW VARIABLES against host2: + + pt-config-diff h=host1 h=host2 + +Diff config from [mysqld] section in my.cnf against host1 config: + + pt-config-diff /etc/my.cnf h=host1 + +Diff the [mysqld] section of two option files: + + pt-config-diff /etc/my-small.cnf /etc/my-large.cnf + +=head1 RISKS + +Percona Toolkit is mature, proven in the real world, and well tested, +but all database tools can pose a risk to the system and the database +server. Before using this tool, please: + +=over + +=item * Read the tool's documentation + +=item * Review the tool's known L<"BUGS"> + +=item * Test the tool on a non-production server + +=item * Backup your production server and verify the backups + +=back + +=head1 DESCRIPTION + +pt-config-diff diffs MySQL configurations by examining the values of server +system variables from two or more CONFIG sources specified on the command +line. A CONFIG source can be a DSN or a filename containing the output of +C, C, C, or +an option file (e.g. my.cnf). + +For each DSN CONFIG, pt-config-diff connects to MySQL and gets variables +and values by executing C. This is +an "active config" because it shows what server values MySQL is +actively (currently) running with. + +Only variables that all CONFIG sources have are compared because if a +variable is not present then we cannot know or safely guess its value. +For example, if you compare an option file (e.g. my.cnf) to an active config +(i.e. SHOW VARIABLES from a DSN CONFIG), the option file will probably +only have a few variables, whereas the active config has every variable. +Only values of the variables present in both configs are compared. + +Option file and DSN configs provide the best results. + +=head1 OUTPUT + +There is no output when there are no differences. When there are differences, +pt-config-diff prints a report to STDOUT that looks similar to the following: + + 2 config differences + Variable my.master.cnf my.slave.cnf + ========================= =============== =============== + datadir /tmp/12345/data /tmp/12346/data + port 12345 12346 + +Comparing MySQL variables is difficult because there are many variations and +subtleties across the many versions and distributions of MySQL. When a +comparison fails, the tool prints a warning to STDERR, such as the following: + + Comparing log_error values (mysqld.log, /tmp/12345/data/mysqld.log) + caused an error: Argument "/tmp/12345/data/mysqld.log" isn't numeric + in numeric eq (==) at ./pt-config-diff line 2311. + +Please report these warnings so the comparison functions can be improved. + +=head1 EXIT STATUS + +pt-config-diff exits with a zero exit status when there are no differences, and +1 if there are. + +=head1 OPTIONS + +This tool accepts additional command-line arguments. Refer to the +L<"SYNOPSIS"> and usage information for details. + +=over + +=item --ask-pass + +Prompt for a password when connecting to MySQL. + +=item --charset + +short form: -A; type: string + +Default character set. If the value is utf8, sets Perl's binmode on +STDOUT to utf8, passes the mysql_enable_utf8 option to DBD::mysql, and +runs SET NAMES UTF8 after connecting to MySQL. Any other value sets +binmode on STDOUT without the utf8 layer, and runs SET NAMES after +connecting to MySQL. + +=item --config + +type: Array + +Read this comma-separated list of config files; if specified, this must be the +first option on the command line. (This option does not specify a CONFIG; +it's equivalent to C<--defaults-file>.) + +=item --database + +short form: -D; type: string + +Connect to this database. + +=item --defaults-file + +short form: -F; type: string + +Only read mysql options from the given file. You must give an absolute +pathname. + +=item --help + +Show help and exit. + +=item --host + +short form: -h; type: string + +Connect to host. + +=item --[no]ignore-case + +default: yes + +Compare the variables case-insensitively. + +=item --ignore-variables + +type: array + +Ignore, do not compare, these variables. + +=item --password + +short form: -p; type: string + +Password to use for connection. + +=item --pid + +type: string + +Create the given PID file. The tool won't start if the PID file already +exists and the PID it contains is different than the current PID. However, +if the PID file exists and the PID it contains is no longer running, the +tool will overwrite the PID file with the current PID. The PID file is +removed automatically when the tool exits. + +=item --port + +short form: -P; type: int + +Port number to use for connection. + +=item --[no]report + +default: yes + +Print the MySQL config diff report to STDOUT. If you just want to check +if the given configs are different or not by examining the tool's exit +status, then specify C<--no-report> to suppress the report. + +=item --report-width + +type: int; default: 78 + +Truncate report lines to this many characters. Since some variable values can +be long, or when comparing multiple configs, it may help to increase the +report width so values are not truncated beyond readability. + +=item --[no]json-report + +default: no + +Print report in json format + +=item --set-vars + +type: Array + +Set the MySQL variables in this comma-separated list of C pairs. + +By default, the tool sets: + +=for comment ignore-pt-internal-value +MAGIC_set_vars + + wait_timeout=10000 + +Variables specified on the command line override these defaults. For +example, specifying C<--set-vars wait_timeout=500> overrides the defaultvalue of C<10000>. + +The tool prints a warning and continues if a variable cannot be set. + +=item --socket + +short form: -S; type: string + +Socket file to use for connection. + +=item --user + +short form: -u; type: string + +MySQL user if not current user. + +=item --version + +Show version and exit. + +=item --[no]version-check + +default: yes + +Check for the latest version of Percona Toolkit, MySQL, and other programs. + +This is a standard "check for updates automatically" feature, with two +additional features. First, the tool checks its own version and also the +versions of the following software: operating system, Percona Monitoring and +Management (PMM), MySQL, Perl, MySQL driver for Perl (DBD::mysql), and +Percona Toolkit. Second, it checks for and warns about versions with known +problems. For example, MySQL 5.5.25 had a critical bug and was re-released +as 5.5.25a. + +A secure connection to Percona's Version Check database server is done to +perform these checks. Each request is logged by the server, including software +version numbers and unique ID of the checked system. The ID is generated by the +Percona Toolkit installation script or when the Version Check database call is +done for the first time. + +Any updates or known problems are printed to STDOUT before the tool's normal +output. This feature should never interfere with the normal operation of the +tool. + +For more information, visit L. + +=back + +=head1 DSN OPTIONS + +These DSN options are used to create a DSN. Each option is given like +C. The options are case-sensitive, so P and p are not the +same option. There cannot be whitespace before or after the C<=> and +if the value contains whitespace it must be quoted. DSN options are +comma-separated. See the L manpage for full details. + +=over + +=item * A + +dsn: charset; copy: yes + +Default character set. + +=item * D + +dsn: database; copy: yes + +Default database. + +=item * F + +dsn: mysql_read_default_file; copy: yes + +Only read default options from the given file + +=item * h + +dsn: host; copy: yes + +Connect to host. + +=item * p + +dsn: password; copy: yes + +Password to use when connecting. +If password contains commas they must be escaped with a backslash: "exam\,ple" + +=item * P + +dsn: port; copy: yes + +Port number to use for connection. + +=item * S + +dsn: mysql_socket; copy: yes + +Socket file to use for connection. + +=item * u + +dsn: user; copy: yes + +User for login if not current user. + +=back + +=head1 ENVIRONMENT + +The environment variable C enables verbose debugging output to STDERR. +To enable debugging and capture all output to a file, run the tool like: + + PTDEBUG=1 pt-config-diff ... > FILE 2>&1 + +Be careful: debugging output is voluminous and can generate several megabytes +of output. + +=head1 ATTENTION + +Using might expose passwords. When debug is enabled, all command line +parameters are shown in the output. + +=head1 SYSTEM REQUIREMENTS + +You need Perl, DBI, DBD::mysql, and some core packages that ought to be +installed in any reasonably new version of Perl. + +=head1 BUGS + +For a list of known bugs, see L. + +Please report bugs at L. +Include the following information in your bug report: + +=over + +=item * Complete command-line used to run the tool + +=item * Tool L<"--version"> + +=item * MySQL version of all servers involved + +=item * Output from the tool including STDERR + +=item * Input files (log/dump/config files, etc.) + +=back + +If possible, include debugging output by running the tool with C; +see L<"ENVIRONMENT">. + +=head1 DOWNLOADING + +Visit L to download the +latest release of Percona Toolkit. Or, get the latest release from the +command line: + + wget percona.com/get/percona-toolkit.tar.gz + + wget percona.com/get/percona-toolkit.rpm + + wget percona.com/get/percona-toolkit.deb + +You can also get individual tools from the latest release: + + wget percona.com/get/TOOL + +Replace C with the name of any tool. + +=head1 AUTHORS + +Baron Schwartz and Daniel Nichter + +=head1 ABOUT PERCONA TOOLKIT + +This tool is part of Percona Toolkit, a collection of advanced command-line +tools for MySQL developed by Percona. Percona Toolkit was forked from two +projects in June, 2011: Maatkit and Aspersa. Those projects were created by +Baron Schwartz and primarily developed by him and Daniel Nichter. Visit +L to learn about other free, open-source +software from Percona. + +=head1 COPYRIGHT, LICENSE, AND WARRANTY + +This program is copyright 2011-2021 Percona LLC and/or its affiliates. + +THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +systems, you can issue `man perlgpl' or `man perlartistic' to read these +licenses. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA. + +=head1 VERSION + +pt-config-diff 3.5.1 + +=cut diff --git a/dbm-services/mysql/db-tools/mysql-monitor/pt-summary b/dbm-services/mysql/db-tools/mysql-monitor/pt-summary new file mode 100755 index 0000000000..b28557273a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-monitor/pt-summary @@ -0,0 +1,2776 @@ +#!/bin/sh + +# This program is part of Percona Toolkit: http://www.percona.com/software/ +# See "COPYRIGHT, LICENSE, AND WARRANTY" at the end of this file for legal +# notices and disclaimers. + +set -u + +# ######################################################################## +# Globals, settings, helper functions +# ######################################################################## +TOOL="pt-summary" +POSIXLY_CORRECT=1 +export POSIXLY_CORRECT + +# ########################################################################### +# log_warn_die package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/log_warn_die.sh +# t/lib/bash/log_warn_die.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + +set -u + +PTFUNCNAME="" +PTDEBUG="${PTDEBUG:-""}" +EXIT_STATUS=0 + +ts() { + TS=$(date +%F-%T | tr ':-' '_') + echo "$TS $*" +} + +info() { + [ ${OPT_VERBOSE:-3} -ge 3 ] && ts "$*" +} + +log() { + [ ${OPT_VERBOSE:-3} -ge 2 ] && ts "$*" +} + +warn() { + [ ${OPT_VERBOSE:-3} -ge 1 ] && ts "$*" >&2 + EXIT_STATUS=1 +} + +die() { + ts "$*" >&2 + EXIT_STATUS=1 + exit 1 +} + +_d () { + [ "$PTDEBUG" ] && echo "# $PTFUNCNAME: $(ts "$*")" >&2 +} + +# ########################################################################### +# End log_warn_die package +# ########################################################################### + +# ########################################################################### +# parse_options package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/parse_options.sh +# t/lib/bash/parse_options.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + + + + +set -u + +ARGV="" # Non-option args (probably input files) +EXT_ARGV="" # Everything after -- (args for an external command) +HAVE_EXT_ARGV="" # Got --, everything else is put into EXT_ARGV +OPT_ERRS=0 # How many command line option errors +OPT_VERSION="" # If --version was specified +OPT_HELP="" # If --help was specified +OPT_ASK_PASS="" # If --ask-pass was specified +PO_DIR="" # Directory with program option spec files + +usage() { + local file="$1" + + local usage="$(grep '^Usage: ' "$file")" + echo $usage + echo + echo "For more information, 'man $TOOL' or 'perldoc $file'." +} + +usage_or_errors() { + local file="$1" + local version="" + + if [ "$OPT_VERSION" ]; then + version=$(grep '^pt-[^ ]\+ [0-9]' "$file") + echo "$version" + return 1 + fi + + if [ "$OPT_HELP" ]; then + usage "$file" + echo + echo "Command line options:" + echo + perl -e ' + use strict; + use warnings FATAL => qw(all); + my $lcol = 20; # Allow this much space for option names. + my $rcol = 80 - $lcol; # The terminal is assumed to be 80 chars wide. + my $name; + while ( <> ) { + my $line = $_; + chomp $line; + if ( $line =~ s/^long:/ --/ ) { + $name = $line; + } + elsif ( $line =~ s/^desc:// ) { + $line =~ s/ +$//mg; + my @lines = grep { $_ } + $line =~ m/(.{0,$rcol})(?:\s+|\Z)/g; + if ( length($name) >= $lcol ) { + print $name, "\n", (q{ } x $lcol); + } + else { + printf "%-${lcol}s", $name; + } + print join("\n" . (q{ } x $lcol), @lines); + print "\n"; + } + } + ' "$PO_DIR"/* + echo + echo "Options and values after processing arguments:" + echo + ( + cd "$PO_DIR" + for opt in *; do + local varname="OPT_$(echo "$opt" | tr a-z- A-Z_)" + eval local varvalue=\$$varname + if ! grep -q "type:" "$PO_DIR/$opt" >/dev/null; then + if [ "$varvalue" -a "$varvalue" = "yes" ]; + then varvalue="TRUE" + else + varvalue="FALSE" + fi + fi + printf -- " --%-30s %s" "$opt" "${varvalue:-(No value)}" + echo + done + ) + return 1 + fi + + if [ $OPT_ERRS -gt 0 ]; then + echo + usage "$file" + return 1 + fi + + return 0 +} + +option_error() { + local err="$1" + OPT_ERRS=$(($OPT_ERRS + 1)) + echo "$err" >&2 +} + +parse_options() { + local file="$1" + shift + + ARGV="" + EXT_ARGV="" + HAVE_EXT_ARGV="" + OPT_ERRS=0 + OPT_VERSION="" + OPT_HELP="" + OPT_ASK_PASS="" + PO_DIR="$PT_TMPDIR/po" + + if [ ! -d "$PO_DIR" ]; then + mkdir "$PO_DIR" + if [ $? -ne 0 ]; then + echo "Cannot mkdir $PO_DIR" >&2 + exit 1 + fi + fi + + rm -rf "$PO_DIR"/* + if [ $? -ne 0 ]; then + echo "Cannot rm -rf $PO_DIR/*" >&2 + exit 1 + fi + + _parse_pod "$file" # Parse POD into program option (po) spec files + _eval_po # Eval po into existence with default values + + if [ $# -ge 2 ] && [ "$1" = "--config" ]; then + shift # --config + local user_config_files="$1" + shift # that ^ + local IFS="," + for user_config_file in $user_config_files; do + _parse_config_files "$user_config_file" + done + else + _parse_config_files "/etc/percona-toolkit/percona-toolkit.conf" "/etc/percona-toolkit/$TOOL.conf" + if [ "${HOME:-}" ]; then + _parse_config_files "$HOME/.percona-toolkit.conf" "$HOME/.$TOOL.conf" + fi + fi + + _parse_command_line "${@:-""}" +} + +_parse_pod() { + local file="$1" + + PO_FILE="$file" PO_DIR="$PO_DIR" perl -e ' + $/ = ""; + my $file = $ENV{PO_FILE}; + open my $fh, "<", $file or die "Cannot open $file: $!"; + while ( defined(my $para = <$fh>) ) { + next unless $para =~ m/^=head1 OPTIONS/; + while ( defined(my $para = <$fh>) ) { + last if $para =~ m/^=head1/; + chomp; + if ( $para =~ m/^=item --(\S+)/ ) { + my $opt = $1; + my $file = "$ENV{PO_DIR}/$opt"; + open my $opt_fh, ">", $file or die "Cannot open $file: $!"; + print $opt_fh "long:$opt\n"; + $para = <$fh>; + chomp; + if ( $para =~ m/^[a-z ]+:/ ) { + map { + chomp; + my ($attrib, $val) = split(/: /, $_); + print $opt_fh "$attrib:$val\n"; + } split(/; /, $para); + $para = <$fh>; + chomp; + } + my ($desc) = $para =~ m/^([^?.]+)/; + print $opt_fh "desc:$desc.\n"; + close $opt_fh; + } + } + last; + } + ' +} + +_eval_po() { + local IFS=":" + for opt_spec in "$PO_DIR"/*; do + local opt="" + local default_val="" + local neg=0 + local size=0 + while read key val; do + case "$key" in + long) + opt=$(echo $val | sed 's/-/_/g' | tr '[:lower:]' '[:upper:]') + ;; + default) + default_val="$val" + ;; + "short form") + ;; + type) + [ "$val" = "size" ] && size=1 + ;; + desc) + ;; + negatable) + if [ "$val" = "yes" ]; then + neg=1 + fi + ;; + *) + echo "Invalid attribute in $opt_spec: $line" >&2 + exit 1 + esac + done < "$opt_spec" + + if [ -z "$opt" ]; then + echo "No long attribute in option spec $opt_spec" >&2 + exit 1 + fi + + if [ $neg -eq 1 ]; then + if [ -z "$default_val" ] || [ "$default_val" != "yes" ]; then + echo "Option $opt_spec is negatable but not default: yes" >&2 + exit 1 + fi + fi + + if [ $size -eq 1 -a -n "$default_val" ]; then + default_val=$(size_to_bytes $default_val) + fi + + eval "OPT_${opt}"="$default_val" + done +} + +_parse_config_files() { + + for config_file in "${@:-""}"; do + test -f "$config_file" || continue + + while read config_opt; do + + echo "$config_opt" | grep '^[ ]*[^#]' >/dev/null 2>&1 || continue + + config_opt="$(echo "$config_opt" | sed -e 's/^ *//g' -e 's/ *$//g' -e 's/[ ]*=[ ]*/=/' -e 's/[ ]+#.*$//')" + + [ "$config_opt" = "" ] && continue + + echo "$config_opt" | grep -v 'version-check' >/dev/null 2>&1 || continue + + if ! [ "$HAVE_EXT_ARGV" ]; then + config_opt="--$config_opt" + fi + + _parse_command_line "$config_opt" + + done < "$config_file" + + HAVE_EXT_ARGV="" # reset for each file + + done +} + +_parse_command_line() { + local opt="" + local val="" + local next_opt_is_val="" + local opt_is_ok="" + local opt_is_negated="" + local real_opt="" + local required_arg="" + local spec="" + + for opt in "${@:-""}"; do + if [ "$opt" = "--" -o "$opt" = "----" ]; then + HAVE_EXT_ARGV=1 + continue + fi + if [ "$HAVE_EXT_ARGV" ]; then + if [ "$EXT_ARGV" ]; then + EXT_ARGV="$EXT_ARGV $opt" + else + EXT_ARGV="$opt" + fi + continue + fi + + if [ "$next_opt_is_val" ]; then + next_opt_is_val="" + if [ $# -eq 0 ] || [ $(expr "$opt" : "\-") -eq 1 ]; then + option_error "$real_opt requires a $required_arg argument" + continue + fi + val="$opt" + opt_is_ok=1 + else + if [ $(expr "$opt" : "\-") -eq 0 ]; then + if [ -z "$ARGV" ]; then + ARGV="$opt" + else + ARGV="$ARGV $opt" + fi + continue + fi + + real_opt="$opt" + + if $(echo $opt | grep '^--no[^-]' >/dev/null); then + local base_opt=$(echo $opt | sed 's/^--no//') + if [ -f "$PT_TMPDIR/po/$base_opt" ]; then + opt_is_negated=1 + opt="$base_opt" + else + opt_is_negated="" + opt=$(echo $opt | sed 's/^-*//') + fi + else + if $(echo $opt | grep '^--no-' >/dev/null); then + opt_is_negated=1 + opt=$(echo $opt | sed 's/^--no-//') + else + opt_is_negated="" + opt=$(echo $opt | sed 's/^-*//') + fi + fi + + if $(echo $opt | grep '^[a-z-][a-z-]*=' >/dev/null 2>&1); then + val="$(echo $opt | awk -F= '{print $2}')" + opt="$(echo $opt | awk -F= '{print $1}')" + fi + + if [ -f "$PT_TMPDIR/po/$opt" ]; then + spec="$PT_TMPDIR/po/$opt" + else + spec=$(grep "^short form:-$opt\$" "$PT_TMPDIR"/po/* | cut -d ':' -f 1) + if [ -z "$spec" ]; then + continue + fi + fi + + required_arg=$(cat "$spec" | awk -F: '/^type:/{print $2}') + if [ "$required_arg" ]; then + if [ "$val" ]; then + opt_is_ok=1 + else + next_opt_is_val=1 + fi + else + if [ "$val" ]; then + option_error "Option $real_opt does not take a value" + continue + fi + if [ "$opt_is_negated" ]; then + val="" + else + val="yes" + fi + opt_is_ok=1 + fi + fi + + if [ "$opt_is_ok" ]; then + opt=$(cat "$spec" | grep '^long:' | cut -d':' -f2 | sed 's/-/_/g' | tr '[:lower:]' '[:upper:]') + + if grep "^type:size" "$spec" >/dev/null; then + val=$(size_to_bytes $val) + fi + + eval "OPT_$opt"="'$val'" + + opt="" + val="" + next_opt_is_val="" + opt_is_ok="" + opt_is_negated="" + real_opt="" + required_arg="" + spec="" + fi + done +} + +size_to_bytes() { + local size="$1" + echo $size | perl -ne '%f=(B=>1, K=>1_024, M=>1_048_576, G=>1_073_741_824, T=>1_099_511_627_776); m/^(\d+)([kMGT])?/i; print $1 * $f{uc($2 || "B")};' +} + +# ########################################################################### +# End parse_options package +# ########################################################################### + +# ########################################################################### +# tmpdir package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/tmpdir.sh +# t/lib/bash/tmpdir.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + +set -u + +PT_TMPDIR="" + +mk_tmpdir() { + local dir="${1:-""}" + + if [ -n "$dir" ]; then + if [ ! -d "$dir" ]; then + mkdir "$dir" || die "Cannot make tmpdir $dir" + fi + PT_TMPDIR="$dir" + else + local tool="${0##*/}" + local pid="$$" + PT_TMPDIR=`mktemp -d -t "${tool}.${pid}.XXXXXX"` \ + || die "Cannot make secure tmpdir" + fi +} + +rm_tmpdir() { + if [ -n "$PT_TMPDIR" ] && [ -d "$PT_TMPDIR" ]; then + rm -rf "$PT_TMPDIR" + fi + PT_TMPDIR="" +} + +# ########################################################################### +# End tmpdir package +# ########################################################################### + +# ########################################################################### +# alt_cmds package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/alt_cmds.sh +# t/lib/bash/alt_cmds.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + +set -u + +_seq() { + local i="$1" + awk "BEGIN { for(i=1; i<=$i; i++) print i; }" +} + +_pidof() { + local cmd="$1" + if ! pidof "$cmd" 2>/dev/null; then + ps -eo pid,ucomm | awk -v comm="$cmd" '$2 == comm { print $1 }' + fi +} + +_lsof() { + local pid="$1" + if ! lsof -p $pid 2>/dev/null; then + /bin/ls -l /proc/$pid/fd 2>/dev/null + fi +} + + + +_which() { + if [ -x /usr/bin/which ]; then + /usr/bin/which "$1" 2>/dev/null | awk '{print $1}' + elif which which 1>/dev/null 2>&1; then + which "$1" 2>/dev/null | awk '{print $1}' + else + echo "$1" + fi +} + +# ########################################################################### +# End alt_cmds package +# ########################################################################### + +# ########################################################################### +# summary_common package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/summary_common.sh +# t/lib/bash/summary_common.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + +set -u + +CMD_FILE="$( _which file 2>/dev/null )" +CMD_NM="$( _which nm 2>/dev/null )" +CMD_OBJDUMP="$( _which objdump 2>/dev/null )" + +get_nice_of_pid () { + local pid="$1" + local niceness="$(ps -p $pid -o nice | awk '$1 !~ /[^0-9]/ {print $1; exit}')" + + if [ -n "${niceness}" ]; then + echo $niceness + else + local tmpfile="$PT_TMPDIR/nice_through_c.tmp.c" + _d "Getting the niceness from ps failed, somehow. We are about to try this:" + cat < "$tmpfile" + +int main(void) { + int priority = getpriority(PRIO_PROCESS, $pid); + if ( priority == -1 && errno == ESRCH ) { + return 1; + } + else { + printf("%d\\n", priority); + return 0; + } +} + +EOC + local c_comp=$(_which gcc) + if [ -z "${c_comp}" ]; then + c_comp=$(_which cc) + fi + _d "$tmpfile: $( cat "$tmpfile" )" + _d "$c_comp -xc \"$tmpfile\" -o \"$tmpfile\" && eval \"$tmpfile\"" + $c_comp -xc "$tmpfile" -o "$tmpfile" 2>/dev/null && eval "$tmpfile" 2>/dev/null + if [ $? -ne 0 ]; then + echo "?" + _d "Failed to get a niceness value for $pid" + fi + fi +} + +get_oom_of_pid () { + local pid="$1" + local oom_adj="" + + if [ -n "${pid}" -a -e /proc/cpuinfo ]; then + if [ -s "/proc/$pid/oom_score_adj" ]; then + oom_adj=$(cat "/proc/$pid/oom_score_adj" 2>/dev/null) + _d "For $pid, the oom value is $oom_adj, retreived from oom_score_adj" + else + oom_adj=$(cat "/proc/$pid/oom_adj" 2>/dev/null) + _d "For $pid, the oom value is $oom_adj, retreived from oom_adj" + fi + fi + + if [ -n "${oom_adj}" ]; then + echo "${oom_adj}" + else + echo "?" + _d "Can't find the oom value for $pid" + fi +} + +has_symbols () { + local executable="$(_which "$1")" + local has_symbols="" + + if [ "${CMD_FILE}" ] \ + && [ "$($CMD_FILE "${executable}" | grep 'not stripped' )" ]; then + has_symbols=1 + elif [ "${CMD_NM}" ] \ + || [ "${CMD_OBJDMP}" ]; then + if [ "${CMD_NM}" ] \ + && [ !"$("${CMD_NM}" -- "${executable}" 2>&1 | grep 'File format not recognized' )" ]; then + if [ -z "$( $CMD_NM -- "${executable}" 2>&1 | grep ': no symbols' )" ]; then + has_symbols=1 + fi + elif [ -z "$("${CMD_OBJDUMP}" -t -- "${executable}" | grep '^no symbols$' )" ]; then + has_symbols=1 + fi + fi + + if [ "${has_symbols}" ]; then + echo "Yes" + else + echo "No" + fi +} + +setup_data_dir () { + local existing_dir="$1" + local data_dir="" + if [ -z "$existing_dir" ]; then + mkdir "$PT_TMPDIR/data" || die "Cannot mkdir $PT_TMPDIR/data" + data_dir="$PT_TMPDIR/data" + else + if [ ! -d "$existing_dir" ]; then + mkdir "$existing_dir" || die "Cannot mkdir $existing_dir" + elif [ "$( ls -A "$existing_dir" )" ]; then + die "--save-samples directory isn't empty, halting." + fi + touch "$existing_dir/test" || die "Cannot write to $existing_dir" + rm "$existing_dir/test" || die "Cannot rm $existing_dir/test" + data_dir="$existing_dir" + fi + echo "$data_dir" +} + +get_var () { + local varname="$1" + local file="$2" + awk -v pattern="${varname}" '$1 == pattern { if (length($2)) { len = length($1); print substr($0, len+index(substr($0, len+1), $2)) } }' "${file}" +} + +# ########################################################################### +# End summary_common package +# ########################################################################### + +# ########################################################################### +# report_formatting package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/report_formatting.sh +# t/lib/bash/report_formatting.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + +set -u + +POSIXLY_CORRECT=1 +export POSIXLY_CORRECT + +fuzzy_formula=' + rounded = 0; + if (fuzzy_var <= 10 ) { + rounded = 1; + } + factor = 1; + while ( rounded == 0 ) { + if ( fuzzy_var <= 50 * factor ) { + fuzzy_var = sprintf("%.0f", fuzzy_var / (5 * factor)) * 5 * factor; + rounded = 1; + } + else if ( fuzzy_var <= 100 * factor) { + fuzzy_var = sprintf("%.0f", fuzzy_var / (10 * factor)) * 10 * factor; + rounded = 1; + } + else if ( fuzzy_var <= 250 * factor) { + fuzzy_var = sprintf("%.0f", fuzzy_var / (25 * factor)) * 25 * factor; + rounded = 1; + } + factor = factor * 10; + }' + +fuzz () { + awk -v fuzzy_var="$1" "BEGIN { ${fuzzy_formula} print fuzzy_var;}" +} + +fuzzy_pct () { + local pct="$(awk -v one="$1" -v two="$2" 'BEGIN{ if (two > 0) { printf "%d", one/two*100; } else {print 0} }')"; + echo "$(fuzz "${pct}")%" +} + +section () { + local str="$1" + awk -v var="${str} _" 'BEGIN { + line = sprintf("# %-60s", var); + i = index(line, "_"); + x = substr(line, i); + gsub(/[_ \t]/, "#", x); + printf("%s%s\n", substr(line, 1, i-1), x); + }' +} + +NAME_VAL_LEN=12 +name_val () { + printf "%+*s | %s\n" "${NAME_VAL_LEN}" "$1" "$2" +} + +shorten() { + local num="$1" + local prec="${2:-2}" + local div="${3:-1024}" + + echo "$num" | awk -v prec="$prec" -v div="$div" ' + { + num = $1; + unit = num >= 1125899906842624 ? "P" \ + : num >= 1099511627776 ? "T" \ + : num >= 1073741824 ? "G" \ + : num >= 1048576 ? "M" \ + : num >= 1024 ? "k" \ + : ""; + while ( num >= div ) { + num /= div; + } + printf "%.*f%s", prec, num, unit; + } + ' +} + +group_concat () { + sed -e '{H; $!d;}' -e 'x' -e 's/\n[[:space:]]*\([[:digit:]]*\)[[:space:]]*/, \1x/g' -e 's/[[:space:]][[:space:]]*/ /g' -e 's/, //' "${1}" +} + +# ########################################################################### +# End report_formatting package +# ########################################################################### + +# ########################################################################### +# collect_system_info package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/collect_system_info.sh +# t/lib/bash/collect_system_info.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + + +set -u + +setup_commands () { + CMD_SYSCTL="$(_which sysctl 2>/dev/null )" + CMD_DMIDECODE="$(_which dmidecode 2>/dev/null )" + CMD_ZONENAME="$(_which zonename 2>/dev/null )" + CMD_DMESG="$(_which dmesg 2>/dev/null )" + CMD_FILE="$(_which file 2>/dev/null )" + CMD_LSPCI="$(_which lspci 2>/dev/null )" + CMD_PRTDIAG="$(_which prtdiag 2>/dev/null )" + CMD_SMBIOS="$(_which smbios 2>/dev/null )" + CMD_GETENFORCE="$(_which getenforce 2>/dev/null )" + CMD_PRTCONF="$(_which prtconf 2>/dev/null )" + CMD_LVS="$(_which lvs 2>/dev/null)" + CMD_VGS="$(_which vgs 2>/dev/null)" + CMD_PRSTAT="$(_which prstat 2>/dev/null)" + CMD_ISAINFO="$(_which isainfo 2>/dev/null)" + CMD_TOP="$(_which top 2>/dev/null)" + CMD_ARCCONF="$( _which arcconf 2>/dev/null )" + CMD_HPACUCLI="$( _which hpacucli 2>/dev/null )" + CMD_MEGACLI64="$( _which MegaCli64 2>/dev/null )" + CMD_VMSTAT="$(_which vmstat 2>/dev/null)" + CMD_IP="$( _which ip 2>/dev/null )" + CMD_NETSTAT="$( _which netstat 2>/dev/null )" + CMD_PSRINFO="$( _which psrinfo 2>/dev/null )" + CMD_SWAPCTL="$( _which swapctl 2>/dev/null )" + CMD_LSB_RELEASE="$( _which lsb_release 2>/dev/null )" + CMD_ETHTOOL="$( _which ethtool 2>/dev/null )" + CMD_GETCONF="$( _which getconf 2>/dev/null )" + CMD_FIO_STATUS="$( _which fio-status 2>/dev/null )" +} + +collect_system_data () { local PTFUNCNAME=collect_system_data; + local data_dir="$1" + + if [ -r /var/log/dmesg -a -s /var/log/dmesg ]; then + cat "/var/log/dmesg" > "$data_dir/dmesg_file" + fi + + $CMD_SYSCTL -a > "$data_dir/sysctl" 2>/dev/null + + if [ "${CMD_LSPCI}" ]; then + $CMD_LSPCI > "$data_dir/lspci_file" 2>/dev/null + fi + + local platform="$(uname -s)" + echo "platform $platform" >> "$data_dir/summary" + echo "hostname $(uname -n)" >> "$data_dir/summary" + uptime >> "$data_dir/uptime" + + processor_info "$data_dir" + find_release_and_kernel "$platform" >> "$data_dir/summary" + cpu_and_os_arch "$platform" >> "$data_dir/summary" + find_virtualization "$platform" "$data_dir/dmesg_file" "$data_dir/lspci_file" >> "$data_dir/summary" + dmidecode_system_info >> "$data_dir/summary" + + if [ "${platform}" = "SunOS" -a "${CMD_ZONENAME}" ]; then + echo "zonename $($CMD_ZONENAME)" >> "$data_dir/summary" + fi + + if [ -x /lib/libc.so.6 ]; then + echo "compiler $(/lib/libc.so.6 | grep 'Compiled by' | cut -c13-)" >> "$data_dir/summary" + fi + + local rss=$(ps -eo rss 2>/dev/null | awk '/[0-9]/{total += $1 * 1024} END {print total}') + echo "rss ${rss}" >> "$data_dir/summary" + + [ "$CMD_DMIDECODE" ] && $CMD_DMIDECODE > "$data_dir/dmidecode" 2>/dev/null + + find_memory_stats "$platform" > "$data_dir/memory" + find_numa_stats > "$data_dir/numactl" + [ "$OPT_SUMMARIZE_MOUNTS" ] && mounted_fs_info "$platform" > "$data_dir/mounted_fs" + raid_controller "$data_dir/dmesg_file" "$data_dir/lspci_file" >> "$data_dir/summary" + + local controller="$(get_var raid_controller "$data_dir/summary")" + propietary_raid_controller "$data_dir/raid-controller" "$data_dir/summary" "$data_dir" "$controller" + + [ "${platform}" = "Linux" ] && linux_exclusive_collection "$data_dir" + + if [ "$CMD_IP" -a "$OPT_SUMMARIZE_NETWORK" ]; then + $CMD_IP -s link > "$data_dir/ip" + network_device_info "$data_dir/ip" > "$data_dir/network_devices" + fi + + [ "$CMD_SWAPCTL" ] && $CMD_SWAPCTL -s > "$data_dir/swapctl" + + if [ "$OPT_SUMMARIZE_PROCESSES" ]; then + top_processes > "$data_dir/processes" + notable_processes_info > "$data_dir/notable_procs" + + if [ "$CMD_VMSTAT" ]; then + touch "$data_dir/vmstat" + ( + $CMD_VMSTAT 1 $OPT_SLEEP > "$data_dir/vmstat" + ) & + fi + fi + + fio_status_minus_a "$data_dir/fusion-io_card" + + for file in $data_dir/*; do + [ "$file" = "vmstat" ] && continue + [ ! -s "$file" ] && rm "$file" + done +} + +fio_status_minus_a () { + local file="$1" + local full_output="${file}_original_output" + [ -z "$CMD_FIO_STATUS" ] && return; + $CMD_FIO_STATUS -a > "$full_output" + + cat <<'EOP' > "$PT_TMPDIR/fio_status_format.pl" + my $tmp_adapter; + while (<>) { + if ( /Fusion-io driver version:\s*(.+)/ ) { + print "driver_version $1" + } + next unless /^Adapter:(.+)/; + $tmp_adapter = $1; + last; + } + + $/ = "\nAdapter: "; + $_ = $tmp_adapter . "\n" . scalar(<>); + my @adapters; + do { + my ($adapter, $adapter_general) = /\s*(.+)\s*\n\s*(.+)/m; + $adapter =~ tr/ /:/; + $adapter .= "::" . scalar(@adapters); # To differentiate two adapters with the same name + push @adapters, $adapter; + my ($connected_modules) = /Connected \S+ modules?:\s*\n(.+?\n)\n/smg; + my @connected_modules = $connected_modules =~ /\s+([^:]+):.+\n/g; + + print "${adapter}_general $adapter_general"; + print "${adapter}_modules @connected_modules"; + + for my $module (@connected_modules) { + my ($rest, $attached, $general, $firmware, $temperature, $media_status) = /( + ^ \s* $module \s+ (Attached[^\n]+) \n + \s+ ([^\n]+) \n # All the second line + .+? (Firmware\s+[^\n]+) \n + .+? (Internal \s+ temperature:[^\n]+) \n + .+? ((?:Media | Reserve \s+ space) \s+ status:[^\n]+) \n + .+?(?:\n\n|\z) + )/xsm; + my ($pbw) = $rest =~ /.+?(Rated \s+ PBW:[^\n]+)/xsm; + print "${adapter}_${module}_attached_as $attached"; + print "${adapter}_${module}_general $general"; + print "${adapter}_${module}_firmware $firmware"; + print "${adapter}_${module}_media_status $media_status"; + print "${adapter}_${module}_temperature $temperature"; + print "${adapter}_${module}_rated_pbw $pbw" if $pbw; + } + } while <>; + + print "adapters @adapters\n"; + + exit; +EOP + + perl -wln "$PT_TMPDIR/fio_status_format.pl" "$full_output" > "$file" +} + +linux_exclusive_collection () { local PTFUNCNAME=linux_exclusive_collection; + local data_dir="$1" + + echo "threading $(getconf GNU_LIBPTHREAD_VERSION)" >> "$data_dir/summary" + + local getenforce="" + [ "$CMD_GETENFORCE" ] && getenforce="$($CMD_GETENFORCE 2>&1)" + echo "getenforce ${getenforce:-"No SELinux detected"}" >> "$data_dir/summary" + + if [ -e "$data_dir/sysctl" ]; then + echo "swappiness $(awk '/vm.swappiness/{print $3}' "$data_dir/sysctl")" >> "$data_dir/summary" + + local dirty_ratio="$(awk '/vm.dirty_ratio/{print $3}' "$data_dir/sysctl")" + local dirty_bg_ratio="$(awk '/vm.dirty_background_ratio/{print $3}' "$data_dir/sysctl")" + if [ "$dirty_ratio" -a "$dirty_bg_ratio" ]; then + echo "dirtypolicy $dirty_ratio, $dirty_bg_ratio" >> "$data_dir/summary" + fi + + local dirty_bytes="$(awk '/vm.dirty_bytes/{print $3}' "$data_dir/sysctl")" + if [ "$dirty_bytes" ]; then + echo "dirtystatus $(awk '/vm.dirty_bytes/{print $3}' "$data_dir/sysctl"), $(awk '/vm.dirty_background_bytes/{print $3}' "$data_dir/sysctl")" >> "$data_dir/summary" + fi + fi + + if [ -e "$data_dir/numactl" ]; then + echo "numa-available $(awk '/available/{print $2}' "$data_dir/numactl")" >> "$data_dir/summary" + echo "numa-policy $(awk '/policy/{print $2}' "$data_dir/numactl")" >> "$data_dir/summary" + echo "numa-preferred-node $(awk '/preferred node/{print $3}' "$data_dir/numactl")" >> "$data_dir/summary" + fi + + schedulers_and_queue_size "$data_dir/summary" > "$data_dir/partitioning" + + for file in dentry-state file-nr inode-nr; do + echo "${file} $(cat /proc/sys/fs/${file} 2>&1)" >> "$data_dir/summary" + done + + [ "$CMD_LVS" -a -x "$CMD_LVS" ] && $CMD_LVS 1>"$data_dir/lvs" 2>"$data_dir/lvs.stderr" + + [ "$CMD_VGS" -a -x "$CMD_VGS" ] && \ + $CMD_VGS -o vg_name,vg_size,vg_free 2>/dev/null > "$data_dir/vgs" + + [ "$CMD_NETSTAT" -a "$OPT_SUMMARIZE_NETWORK" ] && \ + $CMD_NETSTAT -antp > "$data_dir/netstat" 2>/dev/null +} + +network_device_info () { + local ip_minus_s_file="$1" + + if [ "$CMD_ETHTOOL" ]; then + local tempfile="$PT_TMPDIR/ethtool_output_temp" + for device in $( awk '/^[1-9]/{ print $2 }' "$ip_minus_s_file" \ + | awk -F: '{print $1}' \ + | grep -v '^lo\|^in\|^gr' \ + | sort -u ); do + ethtool $device > "$tempfile" 2>/dev/null + + if ! grep -q 'No data available' "$tempfile"; then + cat "$tempfile" + fi + done + fi +} + +find_release_and_kernel () { local PTFUNCNAME=find_release_and_kernel; + local platform="$1" + + local kernel="" + local release="" + if [ "${platform}" = "Linux" ]; then + kernel="$(uname -r)" + if [ -e /etc/fedora-release ]; then + release=$(cat /etc/fedora-release); + elif [ -e /etc/redhat-release ]; then + release=$(cat /etc/redhat-release); + elif [ -e /etc/system-release ]; then + release=$(cat /etc/system-release); + elif [ "$CMD_LSB_RELEASE" ]; then + release="$($CMD_LSB_RELEASE -ds) ($($CMD_LSB_RELEASE -cs))" + elif [ -e /etc/lsb-release ]; then + release=$(grep DISTRIB_DESCRIPTION /etc/lsb-release |awk -F'=' '{print $2}' |sed 's#"##g'); + elif [ -e /etc/debian_version ]; then + release="Debian-based version $(cat /etc/debian_version)"; + if [ -e /etc/apt/sources.list ]; then + local code=` awk '/^deb/ {print $3}' /etc/apt/sources.list \ + | awk -F/ '{print $1}'| awk 'BEGIN {FS="|"}{print $1}' \ + | sort | uniq -c | sort -rn | head -n1 | awk '{print $2}'` + release="${release} (${code})" + fi + elif ls /etc/*release >/dev/null 2>&1; then + if grep -q DISTRIB_DESCRIPTION /etc/*release; then + release=$(grep DISTRIB_DESCRIPTION /etc/*release | head -n1); + else + release=$(cat /etc/*release | head -n1); + fi + fi + elif [ "${platform}" = "FreeBSD" ] \ + || [ "${platform}" = "NetBSD" ] \ + || [ "${platform}" = "OpenBSD" ]; then + release="$(uname -r)" + kernel="$($CMD_SYSCTL -n "kern.osrevision")" + elif [ "${platform}" = "SunOS" ]; then + release="$(head -n1 /etc/release)" + if [ -z "${release}" ]; then + release="$(uname -r)" + fi + kernel="$(uname -v)" + fi + echo "kernel $kernel" + echo "release $release" +} + +cpu_and_os_arch () { local PTFUNCNAME=cpu_and_os_arch; + local platform="$1" + + local CPU_ARCH='32-bit' + local OS_ARCH='32-bit' + if [ "${platform}" = "Linux" ]; then + if grep -q ' lm ' /proc/cpuinfo; then + CPU_ARCH='64-bit' + fi + elif [ "${platform}" = "FreeBSD" ] || [ "${platform}" = "NetBSD" ]; then + if $CMD_SYSCTL "hw.machine_arch" | grep -v 'i[36]86' >/dev/null; then + CPU_ARCH='64-bit' + fi + elif [ "${platform}" = "OpenBSD" ]; then + if $CMD_SYSCTL "hw.machine" | grep -v 'i[36]86' >/dev/null; then + CPU_ARCH='64-bit' + fi + elif [ "${platform}" = "SunOS" ]; then + if $CMD_ISAINFO -b | grep 64 >/dev/null ; then + CPU_ARCH="64-bit" + fi + fi + if [ -z "$CMD_FILE" ]; then + if [ "$CMD_GETCONF" ] && $CMD_GETCONF LONG_BIT 1>/dev/null 2>&1; then + OS_ARCH="$($CMD_GETCONF LONG_BIT 2>/dev/null)-bit" + else + OS_ARCH='N/A' + fi + elif $CMD_FILE /bin/sh | grep '64-bit' >/dev/null; then + OS_ARCH='64-bit' + fi + + echo "CPU_ARCH $CPU_ARCH" + echo "OS_ARCH $OS_ARCH" +} + +find_virtualization () { local PTFUNCNAME=find_virtualization; + local platform="$1" + local dmesg_file="$2" + local lspci_file="$3" + + local tempfile="$PT_TMPDIR/find_virtualziation.tmp" + + local virt="" + if [ -s "$dmesg_file" ]; then + virt="$(find_virtualization_dmesg "$dmesg_file")" + fi + if [ -z "${virt}" ] && [ -s "$lspci_file" ]; then + if grep -qi "virtualbox" "$lspci_file" ; then + virt="VirtualBox" + elif grep -qi "vmware" "$lspci_file" ; then + virt="VMWare" + fi + elif [ "${platform}" = "FreeBSD" ]; then + if ps -o stat | grep J ; then + virt="FreeBSD Jail" + fi + elif [ "${platform}" = "SunOS" ]; then + if [ "$CMD_PRTDIAG" ] && $CMD_PRTDIAG > "$tempfile" 2>/dev/null; then + virt="$(find_virtualization_generic "$tempfile" )" + elif [ "$CMD_SMBIOS" ] && $CMD_SMBIOS > "$tempfile" 2>/dev/null; then + virt="$(find_virtualization_generic "$tempfile" )" + fi + elif [ -e /proc/user_beancounters ]; then + virt="OpenVZ/Virtuozzo" + fi + echo "virt ${virt:-"No virtualization detected"}" +} + +find_virtualization_generic() { local PTFUNCNAME=find_virtualization_generic; + local file="$1" + if grep -i -e "virtualbox" "$file" >/dev/null; then + echo "VirtualBox" + elif grep -i -e "vmware" "$file" >/dev/null; then + echo "VMWare" + fi +} + +find_virtualization_dmesg () { local PTFUNCNAME=find_virtualization_dmesg; + local file="$1" + if grep -qi -e "vmware" -e "vmxnet" -e 'paravirtualized kernel on vmi' "${file}"; then + echo "VMWare"; + elif grep -qi -e 'paravirtualized kernel on xen' -e 'Xen virtual console' "${file}"; then + echo "Xen"; + elif grep -qi "qemu" "${file}"; then + echo "QEmu"; + elif grep -qi 'paravirtualized kernel on KVM' "${file}"; then + echo "KVM"; + elif grep -q "VBOX" "${file}"; then + echo "VirtualBox"; + elif grep -qi 'hd.: Virtual .., ATA.*drive' "${file}"; then + echo "Microsoft VirtualPC"; + fi +} + +dmidecode_system_info () { local PTFUNCNAME=dmidecode_system_info; + if [ "${CMD_DMIDECODE}" ]; then + local vendor="$($CMD_DMIDECODE -s "system-manufacturer" 2>/dev/null | sed 's/ *$//g')" + echo "vendor ${vendor}" + if [ "${vendor}" ]; then + local product="$($CMD_DMIDECODE -s "system-product-name" 2>/dev/null | sed 's/ *$//g')" + local version="$($CMD_DMIDECODE -s "system-version" 2>/dev/null | sed 's/ *$//g')" + local chassis="$($CMD_DMIDECODE -s "chassis-type" 2>/dev/null | sed 's/ *$//g')" + local servicetag="$($CMD_DMIDECODE -s "system-serial-number" 2>/dev/null | sed 's/ *$//g')" + local system="${vendor}; ${product}; v${version} (${chassis})" + + echo "system ${system}" + echo "servicetag ${servicetag:-"Not found"}" + fi + fi +} + +find_memory_stats () { local PTFUNCNAME=find_memory_stats; + local platform="$1" + + if [ "${platform}" = "Linux" ]; then + free -b + cat /proc/meminfo + elif [ "${platform}" = "SunOS" ]; then + $CMD_PRTCONF | awk -F: '/Memory/{print $2}' + fi +} + +find_numa_stats () { local PTFUNCNAME=find_numa_stats; + if command -v numactl >/dev/null; then + numactl --hardware + numactl --show + fi +} + +mounted_fs_info () { local PTFUNCNAME=mounted_fs_info; + local platform="$1" + + if [ "${platform}" != "SunOS" ]; then + local cmd="df -h" + if [ "${platform}" = "Linux" ]; then + cmd="df -h -P" + fi + $cmd | sort > "$PT_TMPDIR/mounted_fs_info.tmp" + mount | sort | join "$PT_TMPDIR/mounted_fs_info.tmp" - + fi +} + +raid_controller () { local PTFUNCNAME=raid_controller; + local dmesg_file="$1" + local lspci_file="$2" + + local tempfile="$PT_TMPDIR/raid_controller.tmp" + + local controller="" + if [ -s "$lspci_file" ]; then + controller="$(find_raid_controller_lspci "$lspci_file")" + fi + if [ -z "${controller}" ] && [ -s "$dmesg_file" ]; then + controller="$(find_raid_controller_dmesg "$dmesg_file")" + fi + + echo "raid_controller ${controller:-"No RAID controller detected"}" +} + +find_raid_controller_dmesg () { local PTFUNCNAME=find_raid_controller_dmesg; + local file="$1" + local pat='scsi[0-9].*: .*' + if grep -qi "${pat}megaraid" "${file}"; then + echo 'LSI Logic MegaRAID SAS' + elif grep -q "Fusion MPT SAS" "${file}"; then + echo 'Fusion-MPT SAS' + elif grep -q "${pat}aacraid" "${file}"; then + echo 'AACRAID' + elif grep -q "${pat}3ware [0-9]* Storage Controller" "${file}"; then + echo '3Ware' + fi +} + +find_raid_controller_lspci () { local PTFUNCNAME=find_raid_controller_lspci; + local file="$1" + if grep -q "RAID bus controller: LSI Logic / Symbios Logic MegaRAID SAS" "${file}" \ + || grep -q "RAID bus controller: LSI Logic / Symbios Logic LSI MegaSAS" $file; then + echo 'LSI Logic MegaRAID SAS' + elif grep -q "Fusion-MPT SAS" "${file}"; then + echo 'Fusion-MPT SAS' + elif grep -q "RAID bus controller: LSI Logic / Symbios Logic Unknown" "${file}"; then + echo 'LSI Logic Unknown' + elif grep -q "RAID bus controller: Adaptec AAC-RAID" "${file}"; then + echo 'AACRAID' + elif grep -q "3ware [0-9]* Storage Controller" "${file}"; then + echo '3Ware' + elif grep -q "Hewlett-Packard Company Smart Array" "${file}"; then + echo 'HP Smart Array' + elif grep -q " RAID bus controller: " "${file}"; then + awk -F: '/RAID bus controller\:/ {print $3" "$5" "$6}' "${file}" + fi +} + +schedulers_and_queue_size () { local PTFUNCNAME=schedulers_and_queue_size; + local file="$1" + + local disks="$(ls /sys/block/ | grep -v -e ram -e loop -e 'fd[0-9]' | xargs echo)" + echo "internal::disks $disks" >> "$file" + + for disk in $disks; do + if [ -e "/sys/block/${disk}/queue/scheduler" ]; then + echo "internal::${disk} $(cat /sys/block/${disk}/queue/scheduler | grep -o '\[.*\]') $(cat /sys/block/${disk}/queue/nr_requests)" >> "$file" + fdisk -l "/dev/${disk}" 2>/dev/null + fi + done +} + +top_processes () { local PTFUNCNAME=top_processes; + if [ "$CMD_PRSTAT" ]; then + $CMD_PRSTAT | head + elif [ "$CMD_TOP" ]; then + local cmd="$CMD_TOP -bn 1" + if [ "${platform}" = "FreeBSD" ] \ + || [ "${platform}" = "NetBSD" ] \ + || [ "${platform}" = "OpenBSD" ]; then + cmd="$CMD_TOP -b -d 1" + fi + $cmd \ + | sed -e 's# *$##g' -e '/./{H;$!d;}' -e 'x;/PID/!d;' \ + | grep . \ + | head + fi +} + +notable_processes_info () { local PTFUNCNAME=notable_processes_info; + local format="%5s %+2d %s\n" + local sshd_pid=$(ps -eo pid,args | awk '$2 ~ /\/usr\/sbin\/sshd/ { print $1; exit }') + + echo " PID OOM COMMAND" + + if [ "$sshd_pid" ]; then + printf "$format" "$sshd_pid" "$(get_oom_of_pid $sshd_pid)" "sshd" + else + printf "%5s %3s %s\n" "?" "?" "sshd doesn't appear to be running" + fi + + local PTDEBUG="" + ps -eo pid,ucomm | grep '^[0-9]' | while read pid proc; do + [ "$sshd_pid" ] && [ "$sshd_pid" = "$pid" ] && continue + local oom="$(get_oom_of_pid $pid)" + if [ "$oom" ] && [ "$oom" != "?" ] && [ "$oom" = "-17" ]; then + printf "$format" "$pid" "$oom" "$proc" + fi + done +} + +processor_info () { local PTFUNCNAME=processor_info; + local data_dir="$1" + if [ -f /proc/cpuinfo ]; then + cat /proc/cpuinfo > "$data_dir/proc_cpuinfo_copy" 2>/dev/null + elif [ "${platform}" = "SunOS" ]; then + $CMD_PSRINFO -v > "$data_dir/psrinfo_minus_v" + fi +} + +propietary_raid_controller () { local PTFUNCNAME=propietary_raid_controller; + local file="$1" + local variable_file="$2" + local data_dir="$3" + local controller="$4" + + notfound="" + if [ "${controller}" = "AACRAID" ]; then + if [ -z "$CMD_ARCCONF" ]; then + notfound="e.g. http://www.adaptec.com/en-US/support/raid/scsi_raid/ASR-2120S/" + elif $CMD_ARCCONF getconfig 1 > "$file" 2>/dev/null; then + echo "internal::raid_opt 1" >> "$variable_file" + fi + elif [ "${controller}" = "HP Smart Array" ]; then + if [ -z "$CMD_HPACUCLI" ]; then + notfound="your package repository or the manufacturer's website" + elif $CMD_HPACUCLI ctrl all show config > "$file" 2>/dev/null; then + echo "internal::raid_opt 2" >> "$variable_file" + fi + elif [ "${controller}" = "LSI Logic MegaRAID SAS" ]; then + if [ -z "$CMD_MEGACLI64" ]; then + notfound="your package repository or the manufacturer's website" + else + echo "internal::raid_opt 3" >> "$variable_file" + $CMD_MEGACLI64 -AdpAllInfo -aALL -NoLog > "$data_dir/lsi_megaraid_adapter_info.tmp" 2>/dev/null + $CMD_MEGACLI64 -AdpBbuCmd -GetBbuStatus -aALL -NoLog > "$data_dir/lsi_megaraid_bbu_status.tmp" 2>/dev/null + $CMD_MEGACLI64 -LdPdInfo -aALL -NoLog > "$data_dir/lsi_megaraid_devices.tmp" 2>/dev/null + fi + fi + + if [ "${notfound}" ]; then + echo "internal::raid_opt 0" >> "$variable_file" + echo " RAID controller software not found; try getting it from" > "$file" + echo " ${notfound}" >> "$file" + fi +} + +# ########################################################################### +# End collect_system_info package +# ########################################################################### + +# ########################################################################### +# report_system_info package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the GitHub repository at, +# lib/bash/report_system_info.sh +# t/lib/bash/report_system_info.sh +# See https://github.com/percona/percona-toolkit for more information. +# ########################################################################### + + +set -u + + +parse_proc_cpuinfo () { local PTFUNCNAME=parse_proc_cpuinfo; + local file="$1" + local virtual="$(grep -c ^processor "${file}")"; + local physical="$(grep 'physical id' "${file}" | sort -u | wc -l)"; + local cores="$(grep 'cpu cores' "${file}" | head -n 1 | cut -d: -f2)"; + + [ "${physical}" = "0" ] && physical="${virtual}" + [ -z "${cores}" ] && cores=0 + + cores=$((${cores} * ${physical})); + local htt="" + if [ ${cores} -gt 0 -a $cores -lt $virtual ]; then htt=yes; else htt=no; fi + + name_val "Processors" "physical = ${physical}, cores = ${cores}, virtual = ${virtual}, hyperthreading = ${htt}" + + awk -F: '/cpu MHz/{print $2}' "${file}" \ + | sort | uniq -c > "$PT_TMPDIR/parse_proc_cpuinfo_cpu.unq" + name_val "Speeds" "$(group_concat "$PT_TMPDIR/parse_proc_cpuinfo_cpu.unq")" + + awk -F: '/model name/{print $2}' "${file}" \ + | sort | uniq -c > "$PT_TMPDIR/parse_proc_cpuinfo_model.unq" + name_val "Models" "$(group_concat "$PT_TMPDIR/parse_proc_cpuinfo_model.unq")" + + awk -F: '/cache size/{print $2}' "${file}" \ + | sort | uniq -c > "$PT_TMPDIR/parse_proc_cpuinfo_cache.unq" + name_val "Caches" "$(group_concat "$PT_TMPDIR/parse_proc_cpuinfo_cache.unq")" +} + +parse_sysctl_cpu_freebsd() { local PTFUNCNAME=parse_sysctl_cpu_freebsd; + local file="$1" + [ -e "$file" ] || return; + local virtual="$(awk '/hw.ncpu/{print $2}' "$file")" + name_val "Processors" "virtual = ${virtual}" + name_val "Speeds" "$(awk '/hw.clockrate/{print $2}' "$file")" + name_val "Models" "$(awk -F: '/hw.model/{print substr($2, 2)}' "$file")" +} + +parse_sysctl_cpu_netbsd() { local PTFUNCNAME=parse_sysctl_cpu_netbsd; + local file="$1" + + [ -e "$file" ] || return + + local virtual="$(awk '/hw.ncpu /{print $NF}' "$file")" + name_val "Processors" "virtual = ${virtual}" + name_val "Models" "$(awk -F: '/hw.model/{print $3}' "$file")" +} + +parse_sysctl_cpu_openbsd() { local PTFUNCNAME=parse_sysctl_cpu_openbsd; + local file="$1" + + [ -e "$file" ] || return + + name_val "Processors" "$(awk -F= '/hw.ncpu=/{print $2}' "$file")" + name_val "Speeds" "$(awk -F= '/hw.cpuspeed/{print $2}' "$file")" + name_val "Models" "$(awk -F= '/hw.model/{print substr($2, 1, index($2, " "))}' "$file")" +} + +parse_psrinfo_cpus() { local PTFUNCNAME=parse_psrinfo_cpus; + local file="$1" + + [ -e "$file" ] || return + + name_val "Processors" "$(grep -c 'Status of .* processor' "$file")" + awk '/operates at/ { + start = index($0, " at ") + 4; + end = length($0) - start - 4 + print substr($0, start, end); + }' "$file" | sort | uniq -c > "$PT_TMPDIR/parse_psrinfo_cpus.tmp" + name_val "Speeds" "$(group_concat "$PT_TMPDIR/parse_psrinfo_cpus.tmp")" +} + +parse_free_minus_b () { local PTFUNCNAME=parse_free_minus_b; + local file="$1" + + [ -e "$file" ] || return + + local physical=$(awk '/Mem:/{print $3}' "${file}") + local swap_alloc=$(awk '/Swap:/{print $2}' "${file}") + local swap_used=$(awk '/Swap:/{print $3}' "${file}") + local virtual=$(shorten $(($physical + $swap_used)) 1) + + name_val "Total" $(shorten $(awk '/Mem:/{print $2}' "${file}") 1) + name_val "Free" $(shorten $(awk '/Mem:/{print $4}' "${file}") 1) + name_val "Used" "physical = $(shorten ${physical} 1), swap allocated = $(shorten ${swap_alloc} 1), swap used = $(shorten ${swap_used} 1), virtual = ${virtual}" + name_val "Shared" $(shorten $(awk '/Mem:/{print $5}' "${file}") 1) + name_val "Buffers" $(shorten $(awk '/Mem:/{print $6}' "${file}") 1) + name_val "Caches" $(shorten $(awk '/Mem:/{print $7}' "${file}") 1) + name_val "Dirty" "$(awk '/Dirty:/ {print $2, $3}' "${file}")" +} + +parse_memory_sysctl_freebsd() { local PTFUNCNAME=parse_memory_sysctl_freebsd; + local file="$1" + + [ -e "$file" ] || return + + local physical=$(awk '/hw.realmem:/{print $2}' "${file}") + local mem_hw=$(awk '/hw.physmem:/{print $2}' "${file}") + local mem_used=$(awk ' + /hw.physmem/ { mem_hw = $2; } + /vm.stats.vm.v_inactive_count/ { mem_inactive = $2; } + /vm.stats.vm.v_cache_count/ { mem_cache = $2; } + /vm.stats.vm.v_free_count/ { mem_free = $2; } + /hw.pagesize/ { pagesize = $2; } + END { + mem_inactive *= pagesize; + mem_cache *= pagesize; + mem_free *= pagesize; + print mem_hw - mem_inactive - mem_cache - mem_free; + } + ' "$file"); + name_val "Total" $(shorten ${mem_hw} 1) + name_val "Virtual" $(shorten ${physical} 1) + name_val "Used" $(shorten ${mem_used} 1) +} + +parse_memory_sysctl_netbsd() { local PTFUNCNAME=parse_memory_sysctl_netbsd; + local file="$1" + local swapctl_file="$2" + + [ -e "$file" -a -e "$swapctl_file" ] || return + + local swap_mem="$(awk '{print $2*512}' "$swapctl_file")" + name_val "Total" $(shorten "$(awk '/hw.physmem /{print $NF}' "$file")" 1) + name_val "User" $(shorten "$(awk '/hw.usermem /{print $NF}' "$file")" 1) + name_val "Swap" $(shorten ${swap_mem} 1) +} + +parse_memory_sysctl_openbsd() { local PTFUNCNAME=parse_memory_sysctl_openbsd; + local file="$1" + local swapctl_file="$2" + + [ -e "$file" -a -e "$swapctl_file" ] || return + + local swap_mem="$(awk '{print $2*512}' "$swapctl_file")" + name_val "Total" $(shorten "$(awk -F= '/hw.physmem/{print $2}' "$file")" 1) + name_val "User" $(shorten "$(awk -F= '/hw.usermem/{print $2}' "$file")" 1) + name_val "Swap" $(shorten ${swap_mem} 1) +} + +parse_dmidecode_mem_devices () { local PTFUNCNAME=parse_dmidecode_mem_devices; + local file="$1" + + [ -e "$file" ] || return + + echo " Locator Size Speed Form Factor Type Type Detail" + echo " ========= ======== ================= ============= ============= ===========" + sed -e '/./{H;$!d;}' \ + -e 'x;/Memory Device\n/!d;' \ + -e 's/: /:/g' \ + -e 's//}/g' \ + -e 's/[ \t]*\n/\n/g' \ + "${file}" \ + | awk -F: '/Size|Type|Form.Factor|Type.Detail|^[\t ]+Locator|^[\t ]+Speed/{printf("|%s", $2)}/^$/{print}' \ + | sed '/^$/d' \ + | sed -e 's/No Module Installed/{EMPTY}/' \ + | sort \ + | awk -F'|' '{printf(" %-9s %-8s %-17s %-13s %-13s %-8s\n", $4, $2, $7, $3, $5, $6);}' +} + +parse_numactl () { local PTFUNCNAME=parse_numactl; + local file="$1" + + [ -e "$file" ] || return + + echo " Node Size Free CPUs" + echo " ==== ==== ==== ====" + + sed -n -e 's/node /node/g' \ + -e '/node[[:digit:]]/p' \ + "${file}" \ + | sort -r \ + | awk '$1 == cnode { + if (NF > 4) { for(i=3;i<=NF;i++){printf("%s ", $i)} printf "\n" } + else { printf("%-12s", $3" "$4); } + } + $1 != cnode { cnode = $1; printf(" %-8s", $1); printf("%-12s", $3" "$4); }' + + echo +} + +parse_ip_s_link () { local PTFUNCNAME=parse_ip_s_link; + local file="$1" + + [ -e "$file" ] || return + + echo " interface rx_bytes rx_packets rx_errors tx_bytes tx_packets tx_errors" + echo " ========= ========= ========== ========== ========== ========== ==========" + + awk "/^[1-9][0-9]*:/ { + save[\"iface\"] = substr(\$2, 1, index(\$2, \":\") - 1); + new = 1; + } + \$0 !~ /[^0-9 ]/ { + if ( new == 1 ) { + new = 0; + fuzzy_var = \$1; ${fuzzy_formula} save[\"bytes\"] = fuzzy_var; + fuzzy_var = \$2; ${fuzzy_formula} save[\"packs\"] = fuzzy_var; + fuzzy_var = \$3; ${fuzzy_formula} save[\"errs\"] = fuzzy_var; + } + else { + fuzzy_var = \$1; ${fuzzy_formula} tx_bytes = fuzzy_var; + fuzzy_var = \$2; ${fuzzy_formula} tx_packets = fuzzy_var; + fuzzy_var = \$3; ${fuzzy_formula} tx_errors = fuzzy_var; + printf \" %-8s %10.0f %10.0f %10.0f %10.0f %10.0f %10.0f\\n\", save[\"iface\"], save[\"bytes\"], save[\"packs\"], save[\"errs\"], tx_bytes, tx_packets, tx_errors; + } + }" "$file" +} + +parse_ethtool () { + local file="$1" + + [ -e "$file" ] || return + + echo " Device Speed Duplex" + echo " ========= ========= =========" + + + awk ' + /^Settings for / { + device = substr($3, 1, index($3, ":") ? index($3, ":")-1 : length($3)); + device_names[device] = device; + } + /Speed:/ { devices[device ",speed"] = $2 } + /Duplex:/ { devices[device ",duplex"] = $2 } + END { + for ( device in device_names ) { + printf(" %-10s %-10s %-10s\n", + device, + devices[device ",speed"], + devices[device ",duplex"]); + } + } + ' "$file" + +} + +parse_netstat () { local PTFUNCNAME=parse_netstat; + local file="$1" + + [ -e "$file" ] || return + + echo " Connections from remote IP addresses" + awk '$1 ~ /^tcp/ && $5 ~ /^[1-9]/ { + print substr($5, 1, index($5, ":") - 1); + }' "${file}" | sort | uniq -c \ + | awk "{ + fuzzy_var=\$1; + ${fuzzy_formula} + printf \" %-15s %5d\\n\", \$2, fuzzy_var; + }" \ + | sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 + echo " Connections to local IP addresses" + awk '$1 ~ /^tcp/ && $5 ~ /^[1-9]/ { + print substr($4, 1, index($4, ":") - 1); + }' "${file}" | sort | uniq -c \ + | awk "{ + fuzzy_var=\$1; + ${fuzzy_formula} + printf \" %-15s %5d\\n\", \$2, fuzzy_var; + }" \ + | sort -n -t . -k 1,1 -k 2,2 -k 3,3 -k 4,4 + echo " Connections to top 10 local ports" + awk '$1 ~ /^tcp/ && $5 ~ /^[1-9]/ { + print substr($4, index($4, ":") + 1); + }' "${file}" | sort | uniq -c | sort -rn | head -n10 \ + | awk "{ + fuzzy_var=\$1; + ${fuzzy_formula} + printf \" %-15s %5d\\n\", \$2, fuzzy_var; + }" | sort + echo " States of connections" + awk '$1 ~ /^tcp/ { + print $6; + }' "${file}" | sort | uniq -c | sort -rn \ + | awk "{ + fuzzy_var=\$1; + ${fuzzy_formula} + printf \" %-15s %5d\\n\", \$2, fuzzy_var; + }" | sort +} + +parse_filesystems () { local PTFUNCNAME=parse_filesystems; + local file="$1" + local platform="$2" + + [ -e "$file" ] || return + + local spec="$(awk " + BEGIN { + device = 10; + fstype = 4; + options = 4; + } + /./ { + f_device = \$1; + f_fstype = \$10; + f_options = substr(\$11, 2, length(\$11) - 2); + if ( \"$2\" ~ /(Free|Open|Net)BSD/ ) { + f_fstype = substr(\$9, 2, length(\$9) - 2); + f_options = substr(\$0, index(\$0, \",\") + 2); + f_options = substr(f_options, 1, length(f_options) - 1); + } + if ( length(f_device) > device ) { + device=length(f_device); + } + if ( length(f_fstype) > fstype ) { + fstype=length(f_fstype); + } + if ( length(f_options) > options ) { + options=length(f_options); + } + } + END{ + print \"%-\" device \"s %5s %4s %-\" fstype \"s %-\" options \"s %s\"; + } + " "${file}")" + + awk " + BEGIN { + spec=\" ${spec}\\n\"; + printf spec, \"Filesystem\", \"Size\", \"Used\", \"Type\", \"Opts\", \"Mountpoint\"; + } + { + f_fstype = \$10; + f_options = substr(\$11, 2, length(\$11) - 2); + if ( \"$2\" ~ /(Free|Open|Net)BSD/ ) { + f_fstype = substr(\$9, 2, length(\$9) - 2); + f_options = substr(\$0, index(\$0, \",\") + 2); + f_options = substr(f_options, 1, length(f_options) - 1); + } + printf spec, \$1, \$2, \$5, f_fstype, f_options, \$6; + } + " "${file}" +} + +parse_fdisk () { local PTFUNCNAME=parse_fdisk; + local file="$1" + + [ -e "$file" -a -s "$file" ] || return + + awk ' + BEGIN { + format="%-12s %4s %10s %10s %18s\n"; + printf(format, "Device", "Type", "Start", "End", "Size"); + printf(format, "============", "====", "==========", "==========", "=================="); + } + /Disk.*bytes/ { + disk = substr($2, 1, length($2) - 1); + size = $5; + printf(format, disk, "Disk", "", "", size); + } + /Units/ { + units = $9; + } + /^\/dev/ { + if ( $2 == "*" ) { + start = $3; + end = $4; + } + else { + start = $2; + end = $3; + } + printf(format, $1, "Part", start, end, sprintf("%.0f", (end - start) * units)); + } + ' "${file}" +} + +parse_ethernet_controller_lspci () { local PTFUNCNAME=parse_ethernet_controller_lspci; + local file="$1" + + [ -e "$file" ] || return + + grep -i ethernet "${file}" | cut -d: -f3 | while read line; do + name_val "Controller" "${line}" + done +} + +parse_hpacucli () { local PTFUNCNAME=parse_hpacucli; + local file="$1" + [ -e "$file" ] || return + grep 'logicaldrive\|physicaldrive' "${file}" +} + +parse_arcconf () { local PTFUNCNAME=parse_arcconf; + local file="$1" + + [ -e "$file" ] || return + + local model="$(awk -F: '/Controller Model/{print $2}' "${file}")" + local chan="$(awk -F: '/Channel description/{print $2}' "${file}")" + local cache="$(awk -F: '/Installed memory/{print $2}' "${file}")" + local status="$(awk -F: '/Controller Status/{print $2}' "${file}")" + name_val "Specs" "$(echo "$model" | sed -e 's/ //'),${chan},${cache} cache,${status}" + + local battery="" + if grep -q "ZMM" "$file"; then + battery="$(grep -A2 'Controller ZMM Information' "$file" \ + | awk '/Status/ {s=$4} + END {printf "ZMM %s", s}')" + else + battery="$(grep -A5 'Controller Battery Info' "${file}" \ + | awk '/Capacity remaining/ {c=$4} + /Status/ {s=$3} + /Time remaining/ {t=sprintf("%dd%dh%dm", $7, $9, $11)} + END {printf("%d%%, %s remaining, %s", c, t, s)}')" + fi + name_val "Battery" "${battery}" + + echo + echo " LogicalDev Size RAID Disks Stripe Status Cache" + echo " ========== ========= ==== ===== ====== ======= =======" + for dev in $(awk '/Logical device number/{print $4}' "${file}"); do + sed -n -e "/^Logical device .* ${dev}$/,/^$\|^Logical device number/p" "${file}" \ + | awk ' + /Logical device name/ {d=$5} + /Size/ {z=$3 " " $4} + /RAID level/ {r=$4} + /Group [0-9]/ {g++} + /Stripe-unit size/ {p=$4 " " $5} + /Status of logical/ {s=$6} + /Write-cache mode.*Ena.*write-back/ {c="On (WB)"} + /Write-cache mode.*Ena.*write-thro/ {c="On (WT)"} + /Write-cache mode.*Disabled/ {c="Off"} + END { + printf(" %-10s %-9s %4d %5d %-6s %-7s %-7s\n", + d, z, r, g, p, s, c); + }' + done + + echo + echo " PhysiclDev State Speed Vendor Model Size Cache" + echo " ========== ======= ============= ======= ============ =========== =======" + + local tempresult="" + sed -n -e '/Physical Device information/,/^$/p' "${file}" \ + | awk -F: ' + /Device #[0-9]/ { + device=substr($0, index($0, "#")); + devicenames[device]=device; + } + /Device is a/ { + devices[device ",isa"] = substr($0, index($0, "is a") + 5); + } + /State/ { + devices[device ",state"] = substr($2, 2); + } + /Transfer Speed/ { + devices[device ",speed"] = substr($2, 2); + } + /Vendor/ { + devices[device ",vendor"] = substr($2, 2); + } + /Model/ { + devices[device ",model"] = substr($2, 2); + } + /Size/ { + devices[device ",size"] = substr($2, 2); + } + /Write Cache/ { + if ( $2 ~ /Enabled .write-back./ ) + devices[device ",cache"] = "On (WB)"; + else + if ( $2 ~ /Enabled .write-th/ ) + devices[device ",cache"] = "On (WT)"; + else + devices[device ",cache"] = "Off"; + } + END { + for ( device in devicenames ) { + if ( devices[device ",isa"] ~ /Hard drive/ ) { + printf(" %-10s %-7s %-13s %-7s %-12s %-11s %-7s\n", + devices[device ",isa"], + devices[device ",state"], + devices[device ",speed"], + devices[device ",vendor"], + devices[device ",model"], + devices[device ",size"], + devices[device ",cache"]); + } + } + }' +} + +parse_fusionmpt_lsiutil () { local PTFUNCNAME=parse_fusionmpt_lsiutil; + local file="$1" + echo + awk '/LSI.*Firmware/ { print " ", $0 }' "${file}" + grep . "${file}" | sed -n -e '/B___T___L/,$ {s/^/ /; p}' +} + +parse_lsi_megaraid_adapter_info () { local PTFUNCNAME=parse_lsi_megaraid_adapter_info; + local file="$1" + + [ -e "$file" ] || return + + local name="$(awk -F: '/Product Name/{print substr($2, 2)}' "${file}")"; + local int=$(awk '/Host Interface/{print $4}' "${file}"); + local prt=$(awk '/Number of Backend Port/{print $5}' "${file}"); + local bbu=$(awk '/^BBU :/{print $3}' "${file}"); + local mem=$(awk '/Memory Size/{print $4}' "${file}"); + local vdr=$(awk '/Virtual Drives/{print $4}' "${file}"); + local dvd=$(awk '/Degraded/{print $3}' "${file}"); + local phy=$(awk '/^ Disks/{print $3}' "${file}"); + local crd=$(awk '/Critical Disks/{print $4}' "${file}"); + local fad=$(awk '/Failed Disks/{print $4}' "${file}"); + + name_val "Model" "${name}, ${int} interface, ${prt} ports" + name_val "Cache" "${mem} Memory, BBU ${bbu}" +} + +parse_lsi_megaraid_bbu_status () { local PTFUNCNAME=parse_lsi_megaraid_bbu_status; + local file="$1" + + [ -e "$file" ] || return + + local charge=$(awk '/Relative State/{print $5}' "${file}"); + local temp=$(awk '/^Temperature/{print $2}' "${file}"); + local soh=$(awk '/isSOHGood:/{print $2}' "${file}"); + name_val "BBU" "${charge}% Charged, Temperature ${temp}C, isSOHGood=${soh}" +} + +format_lvs () { local PTFUNCNAME=format_lvs; + local file="$1" + if [ -e "$file" ]; then + grep -v "open failed" "$file" + else + echo "Unable to collect information"; + fi +} + +parse_lsi_megaraid_devices () { local PTFUNCNAME=parse_lsi_megaraid_devices; + local file="$1" + + [ -e "$file" ] || return + + echo + echo " PhysiclDev Type State Errors Vendor Model Size" + echo " ========== ==== ======= ====== ======= ============ ===========" + for dev in $(awk '/Device Id/{print $3}' "${file}"); do + sed -e '/./{H;$!d;}' -e "x;/Device Id: ${dev}/!d;" "${file}" \ + | awk ' + /Media Type/ {d=substr($0, index($0, ":") + 2)} + /PD Type/ {t=$3} + /Firmware state/ {s=$3} + /Media Error Count/ {me=$4} + /Other Error Count/ {oe=$4} + /Predictive Failure Count/ {pe=$4} + /Inquiry Data/ {v=$3; m=$4;} + /Raw Size/ {z=$3} + END { + printf(" %-10s %-4s %-7s %6s %-7s %-12s %-7s\n", + substr(d, 1, 10), t, s, me "/" oe "/" pe, v, m, z); + }' + done +} + +parse_lsi_megaraid_virtual_devices () { local PTFUNCNAME=parse_lsi_megaraid_virtual_devices; + local file="$1" + + [ -e "$file" ] || return + + echo + echo " VirtualDev Size RAID Level Disks SpnDpth Stripe Status Cache" + echo " ========== ========= ========== ===== ======= ====== ======= =========" + awk ' + /^Virtual (Drive|Disk):/ { + device = $3; + devicenames[device] = device; + } + /Number Of Drives/ { + devices[device ",numdisks"] = substr($0, index($0, ":") + 1); + } + /^Name/ { + devices[device ",name"] = substr($0, index($0, ":") + 1) > "" ? substr($0, index($0, ":") + 1) : "(no name)"; + } + /RAID Level/ { + devices[device ",primary"] = substr($3, index($3, "-") + 1, 1); + devices[device ",secondary"] = substr($4, index($4, "-") + 1, 1); + devices[device ",qualifier"] = substr($NF, index($NF, "-") + 1, 1); + } + /Span Depth/ { + devices[device ",spandepth"] = substr($2, index($2, ":") + 1); + } + /Number of Spans/ { + devices[device ",numspans"] = $4; + } + /^Size/ { + devices[device ",size"] = substr($0, index($0, ":") + 1); + } + /^State/ { + devices[device ",state"] = substr($0, index($0, ":") + 2); + } + /^Stripe? Size/ { + devices[device ",stripe"] = substr($0, index($0, ":") + 1); + } + /^Current Cache Policy/ { + devices[device ",wpolicy"] = $4 ~ /WriteBack/ ? "WB" : "WT"; + devices[device ",rpolicy"] = $5 ~ /ReadAheadNone/ ? "no RA" : "RA"; + } + END { + for ( device in devicenames ) { + raid = 0; + if ( devices[device ",primary"] == 1 ) { + raid = 1; + if ( devices[device ",secondary"] == 3 ) { + raid = 10; + } + } + else { + if ( devices[device ",primary"] == 5 ) { + raid = 5; + } + } + printf(" %-10s %-9s %-10s %5d %7s %6s %-7s %s\n", + device devices[device ",name"], + devices[device ",size"], + raid " (" devices[device ",primary"] "-" devices[device ",secondary"] "-" devices[device ",qualifier"] ")", + devices[device ",numdisks"], + devices[device ",spandepth"] "-" devices[device ",numspans"], + devices[device ",stripe"], devices[device ",state"], + devices[device ",wpolicy"] ", " devices[device ",rpolicy"]); + } + }' "${file}" +} + +format_vmstat () { local PTFUNCNAME=format_vmstat; + local file="$1" + + [ -e "$file" ] || return + + awk " + BEGIN { + format = \" %2s %2s %4s %4s %5s %5s %6s %6s %3s %3s %3s %3s %3s\n\"; + } + /procs/ { + print \" procs ---swap-- -----io---- ---system---- --------cpu--------\"; + } + /bo/ { + printf format, \"r\", \"b\", \"si\", \"so\", \"bi\", \"bo\", \"ir\", \"cs\", \"us\", \"sy\", \"il\", \"wa\", \"st\"; + } + \$0 !~ /r/ { + fuzzy_var = \$1; ${fuzzy_formula} r = fuzzy_var; + fuzzy_var = \$2; ${fuzzy_formula} b = fuzzy_var; + fuzzy_var = \$7; ${fuzzy_formula} si = fuzzy_var; + fuzzy_var = \$8; ${fuzzy_formula} so = fuzzy_var; + fuzzy_var = \$9; ${fuzzy_formula} bi = fuzzy_var; + fuzzy_var = \$10; ${fuzzy_formula} bo = fuzzy_var; + fuzzy_var = \$11; ${fuzzy_formula} ir = fuzzy_var; + fuzzy_var = \$12; ${fuzzy_formula} cs = fuzzy_var; + fuzzy_var = \$13; us = fuzzy_var; + fuzzy_var = \$14; sy = fuzzy_var; + fuzzy_var = \$15; il = fuzzy_var; + fuzzy_var = \$16; wa = fuzzy_var; + fuzzy_var = \$17; st = fuzzy_var; + printf format, r, b, si, so, bi, bo, ir, cs, us, sy, il, wa, st; + } + " "${file}" +} + +processes_section () { local PTFUNCNAME=processes_section; + local top_process_file="$1" + local notable_procs_file="$2" + local vmstat_file="$3" + local platform="$4" + + section "Top Processes" + cat "$top_process_file" + section "Notable Processes" + cat "$notable_procs_file" + if [ -e "$vmstat_file" ]; then + section "Simplified and fuzzy rounded vmstat (wait please)" + wait # For the process we forked that was gathering vmstat samples + if [ "${platform}" = "Linux" ]; then + format_vmstat "$vmstat_file" + else + cat "$vmstat_file" + fi + fi +} + +section_Processor () { + local platform="$1" + local data_dir="$2" + + section "Processor" + + if [ -e "$data_dir/proc_cpuinfo_copy" ]; then + parse_proc_cpuinfo "$data_dir/proc_cpuinfo_copy" + elif [ "${platform}" = "FreeBSD" ]; then + parse_sysctl_cpu_freebsd "$data_dir/sysctl" + elif [ "${platform}" = "NetBSD" ]; then + parse_sysctl_cpu_netbsd "$data_dir/sysctl" + elif [ "${platform}" = "OpenBSD" ]; then + parse_sysctl_cpu_openbsd "$data_dir/sysctl" + elif [ "${platform}" = "SunOS" ]; then + parse_psrinfo_cpus "$data_dir/psrinfo_minus_v" + fi +} + +section_Memory () { + local platform="$1" + local data_dir="$2" + + local name_val_len_orig=$NAME_VAL_LEN; + local NAME_VAL_LEN=14 + + section "Memory" + if [ "${platform}" = "Linux" ]; then + parse_free_minus_b "$data_dir/memory" + elif [ "${platform}" = "FreeBSD" ]; then + parse_memory_sysctl_freebsd "$data_dir/sysctl" + elif [ "${platform}" = "NetBSD" ]; then + parse_memory_sysctl_netbsd "$data_dir/sysctl" "$data_dir/swapctl" + elif [ "${platform}" = "OpenBSD" ]; then + parse_memory_sysctl_openbsd "$data_dir/sysctl" "$data_dir/swapctl" + elif [ "${platform}" = "SunOS" ]; then + name_val "Memory" "$(cat "$data_dir/memory")" + fi + + local rss=$( get_var "rss" "$data_dir/summary" ) + name_val "UsedRSS" "$(shorten ${rss} 1)" + + if [ "${platform}" = "Linux" ]; then + name_val "Swappiness" "$(get_var "swappiness" "$data_dir/summary")" + name_val "DirtyPolicy" "$(get_var "dirtypolicy" "$data_dir/summary")" + local dirty_status="$(get_var "dirtystatus" "$data_dir/summary")" + if [ -n "$dirty_status" ]; then + name_val "DirtyStatus" "$dirty_status" + fi + fi + + if [ -s "$data_dir/numactl" ]; then + name_val "Numa Nodes" "$(get_var "numa-available" "$data_dir/summary")" + name_val "Numa Policy" "$(get_var "numa-policy" "$data_dir/summary")" + name_val "Preferred Node" "$(get_var "numa-preferred-node" "$data_dir/summary")" + + parse_numactl "$data_dir/numactl" + fi + + local NAME_VAL_LEN=$name_val_len_orig; + + if [ -s "$data_dir/dmidecode" ]; then + parse_dmidecode_mem_devices "$data_dir/dmidecode" + fi +} + +parse_uptime () { + local file="$1" + + awk ' / up / { + printf substr($0, index($0, " up ")+4 ); + } + !/ up / { + printf $0; + } +' "$file" +} + +report_fio_minus_a () { + local file="$1" + + name_val "fio Driver" "$(get_var driver_version "$file")" + + local adapters="$( get_var "adapters" "$file" )" + for adapter in $( echo $adapters | awk '{for (i=1; i<=NF; i++) print $i;}' ); do + local adapter_for_output="$(echo "$adapter" | sed 's/::[0-9]*$//' | tr ':' ' ')" + name_val "$adapter_for_output" "$(get_var "${adapter}_general" "$file")" + + local modules="$(get_var "${adapter}_modules" "$file")" + for module in $( echo $modules | awk '{for (i=1; i<=NF; i++) print $i;}' ); do + local name_val_len_orig=$NAME_VAL_LEN; + local NAME_VAL_LEN=16 + name_val "$module" "$(get_var "${adapter}_${module}_attached_as" "$file")" + name_val "" "$(get_var "${adapter}_${module}_general" "$file")" + name_val "" "$(get_var "${adapter}_${module}_firmware" "$file")" + name_val "" "$(get_var "${adapter}_${module}_temperature" "$file")" + name_val "" "$(get_var "${adapter}_${module}_media_status" "$file")" + if [ "$(get_var "${adapter}_${module}_rated_pbw" "$file")" ]; then + name_val "" "$(get_var "${adapter}_${module}_rated_pbw" "$file")" + fi + local NAME_VAL_LEN=$name_val_len_orig; + done + done +} + +report_system_summary () { local PTFUNCNAME=report_system_summary; + local data_dir="$1" + + section "Percona Toolkit System Summary Report" + + + [ -e "$data_dir/summary" ] \ + || die "The data directory doesn't have a summary file, exiting." + + local platform="$(get_var "platform" "$data_dir/summary")" + name_val "Date" "`date -u +'%F %T UTC'` (local TZ: `date +'%Z %z'`)" + name_val "Hostname" "$(get_var hostname "$data_dir/summary")" + name_val "Uptime" "$(parse_uptime "$data_dir/uptime")" + + if [ "$(get_var "vendor" "$data_dir/summary")" ]; then + name_val "System" "$(get_var "system" "$data_dir/summary")"; + name_val "Service Tag" "$(get_var "servicetag" "$data_dir/summary")"; + fi + + name_val "Platform" "${platform}" + local zonename="$(get_var zonename "$data_dir/summary")"; + [ -n "${zonename}" ] && name_val "Zonename" "$zonename" + + name_val "Release" "$(get_var "release" "$data_dir/summary")" + name_val "Kernel" "$(get_var "kernel" "$data_dir/summary")" + + name_val "Architecture" "CPU = $(get_var "CPU_ARCH" "$data_dir/summary"), OS = $(get_var "OS_ARCH" "$data_dir/summary")" + + local threading="$(get_var threading "$data_dir/summary")" + local compiler="$(get_var compiler "$data_dir/summary")" + [ -n "$threading" ] && name_val "Threading" "$threading" + [ -n "$compiler" ] && name_val "Compiler" "$compiler" + + local getenforce="$(get_var getenforce "$data_dir/summary")" + [ -n "$getenforce" ] && name_val "SELinux" "${getenforce}"; + + name_val "Virtualized" "$(get_var "virt" "$data_dir/summary")" + + section_Processor "$platform" "$data_dir" + + section_Memory "$platform" "$data_dir" + + + if [ -s "$data_dir/fusion-io_card" ]; then + section "Fusion-io Card" + report_fio_minus_a "$data_dir/fusion-io_card" + fi + + if [ -s "$data_dir/mounted_fs" ]; then + section "Mounted Filesystems" + parse_filesystems "$data_dir/mounted_fs" "${platform}" + fi + + if [ "${platform}" = "Linux" ]; then + + section "Disk Schedulers And Queue Size" + local disks="$( get_var "internal::disks" "$data_dir/summary" )" + for disk in $disks; do + local scheduler="$( get_var "internal::${disk}" "$data_dir/summary" )" + name_val "${disk}" "${scheduler:-"UNREADABLE"}" + done + + section "Disk Partioning" + parse_fdisk "$data_dir/partitioning" + + section "Kernel Inode State" + for file in dentry-state file-nr inode-nr; do + name_val "${file}" "$(get_var "${file}" "$data_dir/summary")" + done + + section "LVM Volumes" + format_lvs "$data_dir/lvs" + section "LVM Volume Groups" + format_lvs "$data_dir/vgs" + fi + + section "RAID Controller" + local controller="$(get_var "raid_controller" "$data_dir/summary")" + name_val "Controller" "$controller" + local key="$(get_var "internal::raid_opt" "$data_dir/summary")" + case "$key" in + 0) + cat "$data_dir/raid-controller" + ;; + 1) + parse_arcconf "$data_dir/raid-controller" + ;; + 2) + parse_hpacucli "$data_dir/raid-controller" + ;; + 3) + [ -e "$data_dir/lsi_megaraid_adapter_info.tmp" ] && \ + parse_lsi_megaraid_adapter_info "$data_dir/lsi_megaraid_adapter_info.tmp" + [ -e "$data_dir/lsi_megaraid_bbu_status.tmp" ] && \ + parse_lsi_megaraid_bbu_status "$data_dir/lsi_megaraid_bbu_status.tmp" + if [ -e "$data_dir/lsi_megaraid_devices.tmp" ]; then + parse_lsi_megaraid_virtual_devices "$data_dir/lsi_megaraid_devices.tmp" + parse_lsi_megaraid_devices "$data_dir/lsi_megaraid_devices.tmp" + fi + ;; + esac + + if [ "${OPT_SUMMARIZE_NETWORK}" ]; then + if [ "${platform}" = "Linux" ]; then + section "Network Config" + if [ -s "$data_dir/lspci_file" ]; then + parse_ethernet_controller_lspci "$data_dir/lspci_file" + fi + if grep "net.ipv4.tcp_fin_timeout" "$data_dir/sysctl" > /dev/null 2>&1; then + name_val "FIN Timeout" "$(awk '/net.ipv4.tcp_fin_timeout/{print $NF}' "$data_dir/sysctl")" + name_val "Port Range" "$(awk '/net.ipv4.ip_local_port_range/{print $NF}' "$data_dir/sysctl")" + fi + fi + + + if [ -s "$data_dir/ip" ]; then + section "Interface Statistics" + parse_ip_s_link "$data_dir/ip" + fi + + if [ -s "$data_dir/network_devices" ]; then + section "Network Devices" + parse_ethtool "$data_dir/network_devices" + fi + + if [ "${platform}" = "Linux" -a -e "$data_dir/netstat" ]; then + section "Network Connections" + parse_netstat "$data_dir/netstat" + fi + fi + + [ "$OPT_SUMMARIZE_PROCESSES" ] && processes_section \ + "$data_dir/processes" \ + "$data_dir/notable_procs" \ + "$data_dir/vmstat" \ + "$platform" + + section "Memory management" + report_transparent_huge_pages + + section "The End" +} + +report_transparent_huge_pages () { + + if [ -f /sys/kernel/mm/transparent_hugepage/enabled ]; then + CONTENT_TRANSHP=$("$data_dir/collect.err" + fi + + report_system_summary "$data_dir" + + rm_tmpdir +} + +sigtrap() { local PTFUNCNAME=sigtrap; + warn "Caught signal, forcing exit" + rm_tmpdir + exit $EXIT_STATUS +} + +# Execute the program if it was not included from another file. This makes it +# possible to include without executing, and thus test. +if [ "${0##*/}" = "$TOOL" ] \ + || [ "${0##*/}" = "bash" -a "${_:-""}" = "$0" ]; then + + # Set up temporary dir. + mk_tmpdir + # Parse command line options. + parse_options "$0" "${@:-""}" + usage_or_errors "$0" + po_status=$? + rm_tmpdir + + if [ $po_status -ne 0 ]; then + exit $po_status + fi + + main "${@:-""}" +fi + + +# ############################################################################ +# Documentation +# ############################################################################ +:<<'DOCUMENTATION' +=pod + +=head1 NAME + +pt-summary - Summarize system information nicely. + +=head1 SYNOPSIS + +Usage: pt-summary + +pt-summary conveniently summarizes the status and configuration of a server. +It is not a tuning tool or diagnosis tool. It produces a report that is easy +to diff and can be pasted into emails without losing the formatting. This +tool works well on many types of Unix systems. + +Download and run: + + wget http://percona.com/get/pt-summary + bash ./pt-summary + +=head1 RISKS + +Percona Toolkit is mature, proven in the real world, and well tested, +but all database tools can pose a risk to the system and the database +server. Before using this tool, please: + +=over + +=item * Read the tool's documentation + +=item * Review the tool's known L<"BUGS"> + +=item * Test the tool on a non-production server + +=item * Backup your production server and verify the backups + +=back + +=head1 DESCRIPTION + +pt-summary runs a large variety of commands to inspect system status and +configuration, saves the output into files in a temporary directory, and +then runs Unix commands on these results to format them nicely. It works +best when executed as a privileged user, but will also work without privileges, +although some output might not be possible to generate without root. + +=head1 OUTPUT + +Many of the outputs from this tool are deliberately rounded to show their +magnitude but not the exact detail. This is called fuzzy-rounding. The idea is +that it doesn't matter whether a particular counter is 918 or 921; such a small +variation is insignificant, and only makes the output hard to compare to other +servers. Fuzzy-rounding rounds in larger increments as the input grows. It +begins by rounding to the nearest 5, then the nearest 10, nearest 25, and then +repeats by a factor of 10 larger (50, 100, 250), and so on, as the input grows. + +The following is a simple report generated from a CentOS virtual machine, +broken into sections with commentary following each section. Some long lines +are reformatted for clarity when reading this documentation as a manual page in +a terminal. + + # Percona Toolkit System Summary Report ###################### + Date | 2012-03-30 00:58:07 UTC (local TZ: EDT -0400) + Hostname | localhost.localdomain + Uptime | 20:58:06 up 1 day, 20 min, 1 user, + load average: 0.14, 0.18, 0.18 + System | innotek GmbH; VirtualBox; v1.2 () + Service Tag | 0 + Platform | Linux + Release | CentOS release 5.5 (Final) + Kernel | 2.6.18-194.el5 + Architecture | CPU = 32-bit, OS = 32-bit + Threading | NPTL 2.5 + Compiler | GNU CC version 4.1.2 20080704 (Red Hat 4.1.2-48). + SELinux | Enforcing + Virtualized | VirtualBox + +This section shows the current date and time, and a synopsis of the server and +operating system. + + # Processor ################################################## + Processors | physical = 1, cores = 0, virtual = 1, hyperthreading = no + Speeds | 1x2510.626 + Models | 1xIntel(R) Core(TM) i5-2400S CPU @ 2.50GHz + Caches | 1x6144 KB + +This section is derived from F. + + # Memory ##################################################### + Total | 503.2M + Free | 29.0M + Used | physical = 474.2M, swap allocated = 1.0M, + swap used = 16.0k, virtual = 474.3M + Buffers | 33.9M + Caches | 262.6M + Dirty | 396 kB + UsedRSS | 201.9M + Swappiness | 60 + DirtyPolicy | 40, 10 + Locator Size Speed Form Factor Type Type Detail + ======= ==== ===== =========== ==== =========== + +Information about memory is gathered from C. The Used statistic is the +total of the rss sizes displayed by C. The Dirty statistic for the cached +value comes from F. On Linux, the swappiness settings are +gathered from C. The final portion of this section is a table of the +DIMMs, which comes from C. In this example there is no output. + + # Mounted Filesystems ######################################## + Filesystem Size Used Type Opts Mountpoint + /dev/mapper/VolGroup00-LogVol00 15G 17% ext3 rw / + /dev/sda1 99M 13% ext3 rw /boot + tmpfs 252M 0% tmpfs rw /dev/shm + +The mounted filesystem section is a combination of information from C and +C. This section is skipped if you disable L<"--summarize-mounts">. + + # Disk Schedulers And Queue Size ############################# + dm-0 | UNREADABLE + dm-1 | UNREADABLE + hdc | [cfq] 128 + md0 | UNREADABLE + sda | [cfq] 128 + +The disk scheduler information is extracted from the F filesystem in +Linux. + + # Disk Partitioning ###################################### + Device Type Start End Size + ============ ==== ========== ========== ================== + /dev/sda Disk 17179869184 + /dev/sda1 Part 1 13 98703360 + /dev/sda2 Part 14 2088 17059230720 + +Information about disk partitioning comes from C. + + # Kernel Inode State ######################################### + dentry-state | 10697 8559 45 0 0 0 + file-nr | 960 0 50539 + inode-nr | 14059 8139 + +These lines are from the files of the same name in the F +directory on Linux. Read the C man page to learn about the meaning of +these files on your system. + + # LVM Volumes ################################################ + LV VG Attr LSize Origin Snap% Move Log Copy% Convert + LogVol00 VolGroup00 -wi-ao 269.00G + LogVol01 VolGroup00 -wi-ao 9.75G + +This section shows the output of C. + + # RAID Controller ############################################ + Controller | No RAID controller detected + +The tool can detect a variety of RAID controllers by examining C and +C information. If the controller software is installed on the system, in +many cases it is able to execute status commands and show a summary of the RAID +controller's status and configuration. If your system is not supported, please +file a bug report. + + # Network Config ############################################# + Controller | Intel Corporation 82540EM Gigabit Ethernet Controller + FIN Timeout | 60 + Port Range | 61000 + +The network controllers attached to the system are detected from C. The +TCP/IP protocol configuration parameters are extracted from C. You can skip this section by disabling the L<"--summarize-network"> option. + + # Interface Statistics ####################################### + interface rx_bytes rx_packets rx_errors tx_bytes tx_packets tx_errors + ========= ======== ========== ========= ======== ========== ========= + lo 60000000 12500 0 60000000 12500 0 + eth0 15000000 80000 0 1500000 10000 0 + sit0 0 0 0 0 0 0 + +Interface statistics are gathered from C and are fuzzy-rounded. The +columns are received and transmitted bytes, packets, and errors. You can skip +this section by disabling the L<"--summarize-network"> option. + + # Network Connections ######################################## + Connections from remote IP addresses + 127.0.0.1 2 + Connections to local IP addresses + 127.0.0.1 2 + Connections to top 10 local ports + 38346 1 + 60875 1 + States of connections + ESTABLISHED 5 + LISTEN 8 + +This section shows a summary of network connections, retrieved from C +and "fuzzy-rounded" to make them easier to compare when the numbers grow large. +There are two sub-sections showing how many connections there are per origin +and destination IP address, and a sub-section showing the count of ports in +use. The section ends with the count of the network connections' states. You +can skip this section by disabling the L<"--summarize-network"> option. + + # Top Processes ############################################## + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 1 root 15 0 2072 628 540 S 0.0 0.1 0:02.55 init + 2 root RT -5 0 0 0 S 0.0 0.0 0:00.00 migration/0 + 3 root 34 19 0 0 0 S 0.0 0.0 0:00.03 ksoftirqd/0 + 4 root RT -5 0 0 0 S 0.0 0.0 0:00.00 watchdog/0 + 5 root 10 -5 0 0 0 S 0.0 0.0 0:00.97 events/0 + 6 root 10 -5 0 0 0 S 0.0 0.0 0:00.00 khelper + 7 root 10 -5 0 0 0 S 0.0 0.0 0:00.00 kthread + 10 root 10 -5 0 0 0 S 0.0 0.0 0:00.13 kblockd/0 + 11 root 20 -5 0 0 0 S 0.0 0.0 0:00.00 kacpid + # Notable Processes ########################################## + PID OOM COMMAND + 2028 +0 sshd + +This section shows the first few lines of C so that you can see what +processes are actively using CPU time. The notable processes include the SSH +daemon and any process whose out-of-memory-killer priority is set to 17. You +can skip this section by disabling the L<"--summarize-processes"> option. + + # Simplified and fuzzy rounded vmstat (wait please) ########## + procs ---swap-- -----io---- ---system---- --------cpu-------- + r b si so bi bo ir cs us sy il wa st + 2 0 0 0 3 15 30 125 0 0 99 0 0 + 0 0 0 0 0 0 1250 800 6 10 84 0 0 + 0 0 0 0 0 0 1000 125 0 0 100 0 0 + 0 0 0 0 0 0 1000 125 0 0 100 0 0 + 0 0 0 0 0 450 1000 125 0 1 88 11 0 + # The End #################################################### + +This section is a trimmed-down sample of C, so you can see the +general status of the system at present. The values in the table are +fuzzy-rounded, except for the CPU columns. You can skip this section by +disabling the L<"--summarize-processes"> option. + +=head1 OPTIONS + +=over + +=item --config + +type: string + +Read this comma-separated list of config files. If specified, this must be the +first option on the command line. + +=item --help + +Print help and exit. + +=item --read-samples + +type: string + +Create a report from the files in this directory. + +=item --save-samples + +type: string + +Save the collected data in this directory. + +=item --sleep + +type: int; default: 5 + +How long to sleep when gathering samples from vmstat. + +=item --summarize-mounts + +default: yes; negatable: yes + +Report on mounted filesystems and disk usage. + +=item --summarize-network + +default: yes; negatable: yes + +Report on network controllers and configuration. + +=item --summarize-processes + +default: yes; negatable: yes + +Report on top processes and C output. + +=item --version + +Print tool's version and exit. + +=back + +=head1 ENVIRONMENT + +This tool does not use any environment variables. + +=head1 SYSTEM REQUIREMENTS + +This tool requires the Bourne shell (F). + +=head1 BUGS + +For a list of known bugs, see L. + +Please report bugs at L. +Include the following information in your bug report: + +=over + +=item * Complete command-line used to run the tool + +=item * Tool L<"--version"> + +=item * MySQL version of all servers involved + +=item * Output from the tool including STDERR + +=item * Input files (log/dump/config files, etc.) + +=back + +If possible, include debugging output by running the tool with C; +see L<"ENVIRONMENT">. + +=head1 ATTENTION + +Using might expose passwords. When debug is enabled, all command line +parameters are shown in the output. + +=head1 DOWNLOADING + +Visit L to download the +latest release of Percona Toolkit. Or, get the latest release from the +command line: + + wget percona.com/get/percona-toolkit.tar.gz + + wget percona.com/get/percona-toolkit.rpm + + wget percona.com/get/percona-toolkit.deb + +You can also get individual tools from the latest release: + + wget percona.com/get/TOOL + +Replace C with the name of any tool. + +=head1 AUTHORS + +Baron Schwartz, Kevin van Zonneveld, and Brian Fraser + +=head1 ABOUT PERCONA TOOLKIT + +This tool is part of Percona Toolkit, a collection of advanced command-line +tools for MySQL developed by Percona. Percona Toolkit was forked from two +projects in June, 2011: Maatkit and Aspersa. Those projects were created by +Baron Schwartz and primarily developed by him and Daniel Nichter. Visit +L to learn about other free, open-source +software from Percona. + +=head1 COPYRIGHT, LICENSE, AND WARRANTY + +This program is copyright 2011-2021 Percona LLC and/or its affiliates, +2010-2011 Baron Schwartz. + +THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +systems, you can issue `man perlgpl' or `man perlartistic' to read these +licenses. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA. + +=head1 VERSION + +pt-summary 3.5.1 + +=cut + +DOCUMENTATION diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/Makefile b/dbm-services/mysql/db-tools/mysql-rotatebinlog/Makefile new file mode 100644 index 0000000000..4ac38012e2 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/Makefile @@ -0,0 +1,27 @@ +PROJ="mysql-rotatebinlog" +PROJ_BIN="rotatebinlog" +MODULE="dbm-services/mysql/db-tools/mysql-rotatebinlog" +VERSION = $(error please set VERSION flag) +PROJ_PKG = ${PROJ}.tar.gz +OUTPUT_DIR = build +RELEASE_BUILD_FLAG = "-X ${MODULE}/cmd.version=${VERSION} -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash=`git rev-parse HEAD` " +BETA_BUILD_FLAG = "-X ${MODULE}/cmd.version="develop" -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash="" " +BASE_DIR = $(shell pwd) + +.PHONY: release +release: + @CGO_ENABLE=0 GOARCH=amd64 GOOS=linux go build -ldflags ${RELEASE_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ}/${PROJ_BIN} + @cp config.example.yaml ${OUTPUT_DIR}/${PROJ}/config.yaml.example + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PROJ_PKG} ${PROJ}/ + +.PHONY: beta +beta: + @cd ${BASE_DIR}/cmd && go build -ldflags ${BETA_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ}/${PROJ_BIN} + @cp config.example.yaml ${OUTPUT_DIR}/${PROJ}/config.yaml.example + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PROJ_PKG} ${PROJ}/ + +.PHONY: clean +clean: + @rm -rf $(OUTPUT_DIR) + + diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/README.md b/dbm-services/mysql/db-tools/mysql-rotatebinlog/README.md new file mode 100644 index 0000000000..e2b33117db --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/README.md @@ -0,0 +1,99 @@ + +# 开发 +## 编译 +``` +make release VERSION=0.2.10 +``` + +# 使用 +## rotate 频率 +rotate_binlog 通过 crontab 等定时任务管理器,周期性调起 + +`flush binary logs` 的频率由 `rotate_interval` 控制,一般它要 >= rotate_binlog运行频率 + +## 自适应 binlog 目录 +会自己判断需要释放多少空间,来满足空间的要求。 + +多实例的情况下,会进可能保证每个实例的 binlog 总大小接近,从最大的开始删。 + +多实例的binlog在不同的挂载分区时,会各自根据每个分区的空间大小,来判断每个分区上有哪些 binlog实例。 + + +## 单机多实例 binlog rotate,使用一个进程进行 +因为 rotate 需要计算总共释放多少空间来满足 空间使用率 要求,单机多个实例不能完全独立的去rotate,而是需要全局去把控。 + +所以 config.yaml 配置渲染的时候是多实例配置在一个 配置文件 + +## purge index +为了避免直接 purge 来直接删除 binlog 可能会导致实例卡主,rotate_binlog 程序是先删 binlog 文件,再 purge 掉已删除的文件,让 purge 来维护 binlog.index 文件。 + +purge index 的频率由 `purge_interval` 来控制,注意 当 `purge_interval` 小于 rotate 运行周期时,每次都会 purge 。 + +## 上传备份系统 +超过 `max_keep_duration` 时间的 binlog 会直接从本地删除。 + +目前只有 db_role = master 的实例才会上传 binlog + +## 删除某个 binlog 实例的 rotate +``` +./rotate_binlog -c config.yaml --removeConfig 20000,20001 +``` +如果机器上所有实例都被删除,需要外部去停止定时任务。 + +## 管理schedule +rotate_binlog 可以作为独立程序 crontab 来运行,也可以注册到 mysql-crond 中,由它来定时调度 +``` +./rotate_binlog -c config.yaml --addSchedule +./rotate_binlog -c config.yaml --delSchedule +``` +调度频率,由 config.yaml `crond.schedule` 来定义。 + +## sqlite +rotate_binlog 通过 sqlite 本地 db 记录处理过的 binlog 状态,代替一起通过文本文件的方式。 + +## sqlite migrations +每次允许时都会允许 sqlite migrate,是为了避免对存量 DB 实例 如果更新了 binlog_rotate sqlite 表结构,避免重建 sqlite 库,会导致已处理的 binlog 混乱。 +如果涉及字段变更,因为 sqlite 不支持 drop column, change column 语法,migrations 里面要重建表,例如: +``` +PRAGMA foreign_keys=off; + +BEGIN TRANSACTION; + +ALTER TABLE binlog_rotate RENAME TO binlog_rotate_old; + +CREATE TABLE IF NOT EXISTS binlog_rotate ( + bk_biz_id integer not null, + cluster_id integer not null, + cluster_domain varchar(120) default '', + db_role varchar(20) not null default '', + host varchar(64) not null, + port integer default 0, + filename varchar(64) not null, + filesize integer not null, + file_mtime varchar(32) not null, + start_time varchar(32) default '', + stop_time varchar(32) default '', + backup_status integer default -2, + backup_status_info varchar(120) not null default '', + backup_taskid varchar(64) default '', + created_at varchar(32) default '', + updated_at varchar(32) default '', + PRIMARY KEY(cluster_id,filename,host,port) +); + +CREATE INDEX idx_status + ON binlog_rotate (backup_status); + +INSERT INTO binlog_rotate ( + bk_biz_id, cluster_id, db_role,host,port,filename,filesize,file_mtime,start_time,stop_time,backup_status,backup_status_info,backup_taskid,created_at,updated_at + ) + SELECT + bk_biz_id, cluster_id, db_role,host,port,filename,filesize,file_mtime,start_time,stop_time,backup_status,backup_status_info,backup_taskid,created_at,updated_at + FROM binlog_rotate_old; + +DROP TABLE IF EXISTS binlog_rotate_old; + +COMMIT; + +PRAGMA foreign_keys=on; +``` \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/cmd.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/cmd.go new file mode 100644 index 0000000000..5b729f1814 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/cmd.go @@ -0,0 +1,2 @@ +// Package cmd TODO +package cmd diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/root.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/root.go new file mode 100644 index 0000000000..8f71e252ea --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd/root.go @@ -0,0 +1,64 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate" + "fmt" + "log" + "os" + "runtime/debug" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +// Execute TODO +func Execute() { + defer func() { + if err := recover(); err != nil { + fmt.Println(err) + log.Println("panic goroutine inner error", err, string(debug.Stack())) + os.Exit(1) + return + } + }() + err := rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +var rootCmd = &cobra.Command{ + Use: "rotate_binlog", + Short: "rotate binlog and backup them to remote", + Long: `rotate binlog files and backup them to remote + backup system`, + RunE: func(cmd *cobra.Command, args []string) error { + comp := rotate.RotateBinlogComp{Config: viper.GetString("config")} + if removeConfigs, err := cmd.PersistentFlags().GetStringSlice("removeConfig"); err != nil { + return err + } else if len(removeConfigs) > 0 { + return comp.RemoveConfig(removeConfigs) + } + addSchedule, _ := cmd.PersistentFlags().GetBool("addSchedule") + delSchedule, _ := cmd.PersistentFlags().GetBool("delSchedule") + if isSchedule, err := comp.HandleScheduler(addSchedule, delSchedule); err != nil { + return err + } else if isSchedule { + return nil + } + + return comp.Start() + }, + PreRun: func(cmd *cobra.Command, args []string) { + // subcmd.SetLogger(nil, &subcmd.BaseOptions{Uid: "rotate_binlog", NodeId: "0"}) + }, +} + +func init() { + var Config string + rootCmd.PersistentFlags().StringVarP(&Config, "config", "c", "config.yaml", "config file") + rootCmd.PersistentFlags().StringSlice("removeConfig", nil, "remove binlog instance rotate config from config file") + rootCmd.PersistentFlags().Bool("addSchedule", false, "add schedule to crond") + rootCmd.PersistentFlags().Bool("delSchedule", false, "del schedule from crond") + viper.BindPFlag("config", rootCmd.PersistentFlags().Lookup("config")) +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/config.example.yaml b/dbm-services/mysql/db-tools/mysql-rotatebinlog/config.example.yaml new file mode 100644 index 0000000000..abc77c2880 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/config.example.yaml @@ -0,0 +1,60 @@ +--- +public: + # binlog 保留策略, least: 尽可能少的保留,most:尽可能多的保留 + # 一般情况, master 使用 most, slave 使用 least + keep_policy: most + # 每个实例允许的 binlog 目录大小 + max_binlog_total_size: 200g + # binlog 所在挂载目录的空间使用率上限百分比,0-100 + max_disk_used_pct: 80 + # 超过最大保留时间的 binlog,会直接删除。不进行备份 + max_keep_duration: 61d + # 是否执行 purge binary logs 来从 binlog.index 清除已删除 binlog 文件 + purge_interval: 5m + rotate_interval: 10m + rotate_timeout: 4m + +# crond 调度 rotate_binlog 的任务 +crond: + api_url: "http://127.0.0.1:9999" + item_name: rotate_binlog + schedule: "*/5 * * * *" + +servers: + - port: 20000 + host: "x.x.x.x" + username: "MONITOR" + password: "MONITOR" + tags: + bk_biz_id: 100 + cluster_domain: testdb.my.app.db + cluster_id: 11111 + db_role: master + - port: 20001 + host: "x.x.x.x" + username: "MONITOR" + password: "MONITOR" + tags: + bk_biz_id: 100 + cluster_domain: testdb.my.app.db + cluster_id: 11111 + db_role: master + +report: + enable: true + filepath: "/home/mysql/dbareport/mysql/binlog" + log_maxsize: 5 + log_maxbackups: 10 + log_maxage: 30 + +encrypt: + enable: false + key_prefix: "bkdbm" + +backup_client: + ibs: + enable: true + ibs_mode: hdfs + with_md5: true + file_tag: INCREMENT_BACKUP + tool_path: backup_client \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/go.mod b/dbm-services/mysql/db-tools/mysql-rotatebinlog/go.mod new file mode 100644 index 0000000000..2e6409f6da --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/go.mod @@ -0,0 +1,65 @@ +module dbm-services/mysql/db-tools/mysql-rotatebinlog + +go 1.19 + +require ( + github.com/Masterminds/squirrel v1.5.4 + github.com/ghodss/yaml v1.0.0 + github.com/go-mysql-org/go-mysql v1.7.0 + github.com/golang-migrate/migrate/v4 v4.15.2 + github.com/jmoiron/sqlx v1.3.5 + github.com/mitchellh/go-homedir v1.1.0 + github.com/mitchellh/mapstructure v1.5.0 + github.com/pkg/errors v0.9.1 + github.com/spf13/cast v1.5.0 + github.com/spf13/cobra v1.7.0 + github.com/spf13/viper v1.15.0 + github.com/stretchr/testify v1.8.2 + modernc.org/sqlite v1.22.1 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-sql-driver/mysql v1.7.1 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/shopspring/decimal v1.2.0 // indirect + github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 // indirect + github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + go.uber.org/atomic v1.9.0 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + golang.org/x/tools v0.6.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + lukechampine.com/uint128 v1.2.0 // indirect + modernc.org/cc/v3 v3.40.0 // indirect + modernc.org/ccgo/v3 v3.16.13 // indirect + modernc.org/libc v1.22.5 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/memory v1.5.0 // indirect + modernc.org/opt v0.1.3 // indirect + modernc.org/strutil v1.1.3 // indirect + modernc.org/token v1.0.1 // indirect +) diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/go.sum b/dbm-services/mysql/db-tools/mysql-rotatebinlog/go.sum new file mode 100644 index 0000000000..19beefad46 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/go.sum @@ -0,0 +1,1942 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.28.0/go.mod h1:7m6mtQZn/hMbMfx62ct5EWrGND4DNqkXyrmBPRS+OJo= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= +github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20210818145353-234c94e4ce64/go.mod h1:2qMFB56yOP3KzkB3PbYZ4AlUFg3a88F67TIx5lB/WwY= +github.com/apache/arrow/go/arrow v0.0.0-20211013220434-5962184e7a30/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go-v2 v1.8.0/go.mod h1:xEFuWz+3TYdlPRuo+CqATbeDWIWyaT5uAPwPaWtgse0= +github.com/aws/aws-sdk-go-v2 v1.9.2/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/config v1.6.0/go.mod h1:TNtBVmka80lRPk5+S9ZqVfFszOQAGJJ9KbT3EM3CHNU= +github.com/aws/aws-sdk-go-v2/config v1.8.3/go.mod h1:4AEiLtAb8kLs7vgw2ZV3p2VZ1+hBavOc84hqxVNpCyw= +github.com/aws/aws-sdk-go-v2/credentials v1.3.2/go.mod h1:PACKuTJdt6AlXvEq8rFI4eDmoqDFC5DpVKQbWysaDgM= +github.com/aws/aws-sdk-go-v2/credentials v1.4.3/go.mod h1:FNNC6nQZQUuyhq5aE5c7ata8o9e4ECGmS4lAXC7o1mQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.4.0/go.mod h1:Mj/U8OpDbcVcoctrYwA2bak8k/HFPdcLzI/vaiXMwuM= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.6.0/go.mod h1:gqlclDEZp4aqJOancXK6TN24aKhT0W0Ae9MHk3wzTMM= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.4.0/go.mod h1:eHwXu2+uE/T6gpnYWwBwqoeqRf9IXyCcolyOWDRAErQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.5.4/go.mod h1:Ex7XQmbFmgFHrjUX6TN3mApKW5Hglyga+F7wZHTtYhA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.0/go.mod h1:Q5jATQc+f1MfZp3PDMhn6ry18hGvE0i8yvbXoKbnZaE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.2.4/go.mod h1:ZcBrrI3zBKlhGFNYWvju0I3TR93I7YIgAfy82Fh4lcQ= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.2.2/go.mod h1:EASdTcM1lGhUe1/p4gkojHwlGJkeoRjjr1sRCzup3Is= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.3.0/go.mod h1:v8ygadNyATSm6elwJ/4gzJwcFhri9RqS8skgHKiwXPU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.2.2/go.mod h1:NXmNI41bdEsJMrD0v9rUvbGCB5GwdBEpKvUvIY3vTFg= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.3.2/go.mod h1:72HRZDLMtmVQiLG2tLfQcaWLCssELvGl+Zf2WVxMmR8= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.5.2/go.mod h1:QuL2Ym8BkrLmN4lUofXYq6000/i5jPjosCNK//t6gak= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.7.2/go.mod h1:np7TMuJNT83O0oDOSF8i4dF3dvGqA6hPYYo6YYkzgRA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.12.0/go.mod h1:6J++A5xpo7QDsIeSqPK4UHqMSyPOCopa+zKtqAMhqVQ= +github.com/aws/aws-sdk-go-v2/service/s3 v1.16.1/go.mod h1:CQe/KvWV1AqRc65KqeJjrLzr5X2ijnFTTVzJW0VBRCI= +github.com/aws/aws-sdk-go-v2/service/sso v1.3.2/go.mod h1:J21I6kF+d/6XHVk7kp/cx9YVD2TMD2TbLwtRGVcinXo= +github.com/aws/aws-sdk-go-v2/service/sso v1.4.2/go.mod h1:NBvT9R1MEF+Ud6ApJKM0G+IkPchKS7p7c2YPKwHmBOk= +github.com/aws/aws-sdk-go-v2/service/sts v1.6.1/go.mod h1:hLZ/AnkIKHLuPGjEiyghNEdvJ2PP0MgOxcmv9EBJ4xs= +github.com/aws/aws-sdk-go-v2/service/sts v1.7.2/go.mod h1:8EzeIqfWt2wWT4rJVu3f21TfrhJ8AEMzVybRNSb/b4g= +github.com/aws/smithy-go v1.7.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go/v2 v2.1.1/go.mod h1:7NtUnP6eK+l6k483WSYNrq3Kb23bWV10IRV1TyeSpwM= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s= +github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/cznic/sortutil v0.0.0-20181122101858-f5f958428db8/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= +github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dhui/dktest v0.3.10/go.mod h1:h5Enh0nG3Qbo9WjNFRrwmKUaePEBhXMOygbz3Ww7Sz0= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.13+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/gabriel-vasile/mimetype v1.4.0/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-mysql-org/go-mysql v1.7.0 h1:qE5FTRb3ZeTQmlk3pjE+/m2ravGxxRDrVDTyDe9tvqI= +github.com/go-mysql-org/go-mysql v1.7.0/go.mod h1:9cRWLtuXNKhamUPMkrDVzBhaomGvqLRLtBiyjvjc4pk= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gocql/gocql v0.0.0-20210515062232-b7ef815b4556/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-migrate/migrate/v4 v4.15.2 h1:vU+M05vs6jWHKDdmE1Ecwj0BznygFc4QsdRe2E/L7kc= +github.com/golang-migrate/migrate/v4 v4.15.2/go.mod h1:f2toGLkYqD3JH+Todi4aZ2ZdbeUNx4sIwiOK96rE9Lw= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-github/v39 v39.2.0/go.mod h1:C1s8C5aCC9L+JXIYpJM5GYytdX52vC1bLvHEF1IhBrE= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451/go.mod h1:a/s9Lp5W7n/DD0VrVoyJ00FbP2ytTPDVOivvn2bMlds= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.7/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.6.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.10.1/go.mod h1:QlrWebbs3kqEZPHCTGyxecvzG6tvIsYu+A5b1raylkA= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jmoiron/sqlx v1.3.1/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.10/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.8/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8 h1:USx2/E1bX46VG32FIw034Au6seQ2fY9NEILmNh/UlQg= +github.com/pingcap/check v0.0.0-20190102082844-67f458068fc8/go.mod h1:B1+S9LNcuMyLH/4HMTViQOJevkGiik3wW2AN9zb2fNQ= +github.com/pingcap/errors v0.11.0/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63 h1:+FZIDR/D97YOPik4N4lPDaUcLDF/EQPogxtlHB2ZZRM= +github.com/pingcap/errors v0.11.5-0.20210425183316-da1aaba5fb63/go.mod h1:X2r9ueLEUZgtx2cIogM0v4Zj5uvvzhuuiu7Pn8HzMPg= +github.com/pingcap/log v0.0.0-20210625125904-98ed8e2eb1c7/go.mod h1:8AanEdAHATuRurdGxZXBz0At+9avep+ub7U1AGYLIMM= +github.com/pingcap/tidb/parser v0.0.0-20221126021158-6b02a5d8ba7d/go.mod h1:ElJiub4lRy6UZDb+0JHDkGEdr6aOli+ykhyej7VCLoI= +github.com/pkg/browser v0.0.0-20210706143420-7d21f8c997e2/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726 h1:xT+JlYxNGqyT+XcU8iUrN18JYed2TvG9yN5ULG2jATM= +github.com/siddontang/go v0.0.0-20180604090527-bdc77568d726/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw= +github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07 h1:oI+RNwuC9jF2g2lP0u0cVEEZrc/AYBCuFdvwrLWM/6Q= +github.com/siddontang/go-log v0.0.0-20180807004314-8d05993dda07/go.mod h1:yFdBgwXP24JziuRl2NMUahT7nGLNOKi1SIiFxMttVD4= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/gosnowflake v1.6.3/go.mod h1:6hLajn6yxuJ4xUHZegMekpq9rnQbGJ7TMwXjgTmA6lg= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= +github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mongodb.org/mongo-driver v1.7.0/go.mod h1:Q4oFMbo1+MSNqICAdYMlC/zSTrwCogR4R8NzkI+yfU8= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20181106170214-d68db9428509/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191002040644-a1355ae1e2c3/go.mod h1:NOZ3BPKG0ec/BKJQgnvsSFpcKLM5xXVWnvZS97DWHgE= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190225153610-fe579d43d832/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210505024714-0287a6fb4125/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220111093109-d55c255bac03/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201126233918-771906719818/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220317061510-51cd9980dadf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201125231158-b5590deeca9b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220314164441-57ef72a4c106/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/postgres v1.0.8/go.mod h1:4eOzrI1MUfm6ObJU/UcmbXyiHSs8jSwH95G5P5dxcAg= +gorm.io/gorm v1.20.12/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.21.4/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/cc/v3 v3.32.4/go.mod h1:0R6jl1aZlIl2avnYfbfHBS1QB6/f+16mihBObaBC878= +modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw= +modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0= +modernc.org/ccgo/v3 v3.9.2/go.mod h1:gnJpy6NIVqkETT+L5zPsQFj7L2kkhfPMzOghRNv/CFo= +modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw= +modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY= +modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/golex v1.0.1/go.mod h1:QCA53QtsT1NdGkaZZkF5ezFwk4IXh4BGNafAARTC254= +modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/lex v1.0.0/go.mod h1:G6rxMTy3cH2iA0iXL/HRRv4Znu8MK4higxph/lE7ypk= +modernc.org/lexer v1.0.0/go.mod h1:F/Dld0YKYdZCLQ7bD0USbWL4YKCyTDRDHiDTOs0q0vk= +modernc.org/libc v1.7.13-0.20210308123627-12f642a52bb8/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.9.5/go.mod h1:U1eq8YWr/Kc1RWCMFUWEdkTg8OTcfLw2kY8EDwl039w= +modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE= +modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/mathutil v1.1.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.0.4/go.mod h1:nV2OApxradM3/OVbs2/0OsP6nPfakXpi50C7dcoHXlc= +modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds= +modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/parser v1.0.0/go.mod h1:H20AntYJ2cHHL6MHthJ8LZzXCdDCHMWt1KZXtIMjejA= +modernc.org/parser v1.0.2/go.mod h1:TXNq3HABP3HMaqLK7brD1fLA/LfN0KS6JxZn71QdDqs= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/scanner v1.0.1/go.mod h1:OIzD2ZtjYk6yTuyqZr57FmifbM9fIH74SumloSsajuE= +modernc.org/sortutil v1.0.0/go.mod h1:1QO0q8IlIlmjBIwm6t/7sof874+xCfZouyqZMLIAtxM= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/sqlite v1.10.6/go.mod h1:Z9FEjUtZP4qFEg6/SiADg9XCER7aYy9a/j7Pg9P7CPs= +modernc.org/sqlite v1.22.1 h1:P2+Dhp5FR1RlVRkQ3dDfCiv3Ok8XPxqpe70IjYVA9oE= +modernc.org/sqlite v1.22.1/go.mod h1:OrDj17Mggn6MhE+iPbBNf7RGKODDE9NFT0f3EwDzJqk= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.5.2/go.mod h1:pmJYOLgpiys3oI4AeAafkcUfE+TKKilminxNyU/+Zlo= +modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/y v1.0.1/go.mod h1:Ho86I+LVHEI+LYXoUKlmOMAM1JTXOCfj8qi1T8PsClE= +modernc.org/z v1.0.1-0.20210308123920-1f282aa71362/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.0.1/go.mod h1:8/SRk5C/HgiQWCgXdfpb+1RvhORdkz5sw72d3jjtyqA= +modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/main.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/main.go new file mode 100644 index 0000000000..4c43e0eb04 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/main.go @@ -0,0 +1,7 @@ +package main + +import "dbm-services/mysql/db-tools/mysql-rotatebinlog/cmd" + +func main() { + cmd.Execute() +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup.go new file mode 100644 index 0000000000..eb8d0ac513 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup.go @@ -0,0 +1,9 @@ +// Package backup TODO +package backup + +// BackupClient TODO +type BackupClient interface { + Init() error + Upload(fileName string) (string, error) + Query(taskId string) (int, error) +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_cos.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_cos.go new file mode 100644 index 0000000000..7ff47c2e3b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_cos.go @@ -0,0 +1,20 @@ +package backup + +// COSBackupClient TODO +type COSBackupClient struct { +} + +// Init TODO +func (o *COSBackupClient) Init() error { + return nil +} + +// Upload TODO +func (o *COSBackupClient) Upload(fileName string) (string, error) { + return "", nil +} + +// Query TODO +func (o *COSBackupClient) Query(taskId string) (int, error) { + return 0, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_ibs.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_ibs.go new file mode 100644 index 0000000000..2e7fcc8618 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/backup_ibs.go @@ -0,0 +1,104 @@ +package backup + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/common/go-pubpkg/validate" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util" + "fmt" + "regexp" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + // IBSModeCos TODO + IBSModeCos = "cos" + // IBSModeHdfs TODO + IBSModeHdfs = "hdfs" +) + +// IBSBackupClient TODO +type IBSBackupClient struct { + Enable bool `mapstructure:"enable" json:"enable"` + ToolPath string `mapstructure:"tool_path" json:"tool_path" validate:"required"` + WithMD5 bool `mapstructure:"with_md5" json:"with_md5"` + FileTag string `mapstructure:"file_tag" json:"file_tag" validate:"required"` + IBSMode string `mapstructure:"ibs_mode" json:"ibs_mode" validate:"required" enums:"hdfs,cos"` + + // ibsBackup string + ibsQueryCmd string + ibsUploadCmd string +} + +// Init TODO +func (o *IBSBackupClient) Init() error { + if err := validate.GoValidateStruct(o, false, false); err != nil { + return err + } + o.ibsQueryCmd = o.ToolPath + o.ibsUploadCmd = fmt.Sprintf("%s -n --tag %s", o.ToolPath, o.FileTag) + if o.WithMD5 { + o.ibsUploadCmd += " --with-md5" + } + if o.IBSMode == IBSModeCos { + o.ibsUploadCmd += " -c" + } + return nil +} + +// Upload 提交上传任务,等候调度,异步上传 +func (o *IBSBackupClient) Upload(fileName string) (taskId string, err error) { + logger.Info("backup upload to ibs: %s", fileName) + backupCmd := fmt.Sprintf(`%s -f %s`, o.ibsUploadCmd, fileName) + var stdout, stderr string + if stdout, stderr, err = util.ExecCommand(true, "", backupCmd); err != nil { + return "", errors.Wrapf(err, "upload failed:%s", stderr) + } + reTaskId := regexp.MustCompile(`taskid:(\d+)`) + if matches := reTaskId.FindStringSubmatch(stdout); len(matches) == 2 { + if _, err = strconv.ParseInt(matches[1], 10, 64); err != nil { + return "", errors.Errorf("parse taskid failed for %s: %v", fileName, matches) + } + taskId = matches[1] + return taskId, nil + } else { + return "", errors.Errorf("failed to match backup taskid for %s", fileName) + } +} + +// Query QueryTaskStatus +func (o *IBSBackupClient) Query(taskid string) (taskStatus int, err error) { + // queryCmd := fmt.Sprintf(`%s -q --taskid=%s`, o.IBSBackup, taskid) + queryCmd := fmt.Sprintf(`%s -q --taskid %s`, o.ibsQueryCmd, taskid) + var stdout, stderr string + if stdout, stderr, err = util.ExecCommand(true, "", queryCmd); err != nil { + return 0, errors.Wrapf(err, "query failed:%s", stderr) + } + outLines := strings.Split(stdout, "\n") + lineMap := map[string]string{ + "sendup datetime": "", + "status": "", + "status info": "", + "expire_time": "", + "complete_time": "", + } + for _, l := range outLines { + if !strings.Contains(l, ":") { // key : value 才是合法的一行输出 + continue + } + if lkv := strings.SplitN(l, ":", 2); len(lkv) != 2 { + return 0, errors.Errorf("error format parsing backup query result:%s", l) + } else { + k := strings.TrimSpace(lkv[0]) + lineMap[k] = strings.TrimSpace(lkv[1]) + } + } + // logger.Info("query task[%s]: %+v", queryCmd, lineMap) + if taskStatus, err = strconv.Atoi(lineMap["status"]); err != nil { + return 0, errors.Errorf("invalid backup task status %s. queryCmd:%s", lineMap["status"], queryCmd) + } else { + return taskStatus, nil + } +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/init.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/init.go new file mode 100644 index 0000000000..8bfb5c962b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup/init.go @@ -0,0 +1,41 @@ +package backup + +import ( + "dbm-services/common/go-pubpkg/logger" + + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + "github.com/spf13/viper" +) + +// InitBackupClient init backup client +func InitBackupClient() (backupClient BackupClient, err error) { + backupClients := viper.GetStringMap("backup_client") + for name, cfgClient := range backupClients { + if name == "ibs" && viper.GetBool("backup_client.ibs.enable") { + var ibsClient IBSBackupClient + if err := mapstructure.Decode(cfgClient, &ibsClient); err != nil { + return nil, err + } else { + backupClient = &ibsClient + } + } else if name == "cos" && viper.GetBool("backup_client.cos.enable") { + var ibsClient COSBackupClient + if err := mapstructure.Decode(cfgClient, &ibsClient); err != nil { + return nil, err + } else { + backupClient = &ibsClient + } + } else { + logger.Warn("unknown backup_client", name) + // return nil, errors.Errorf("unknown backup_client: %s", name) + } + } + if backupClient == nil { + logger.Warn("backup_client config failed") + } else if err = backupClient.Init(); err != nil { + backupClient = nil + return nil, errors.Wrapf(err, "backup_client init failed: %+v", backupClient) + } + return backupClient, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time.go new file mode 100644 index 0000000000..b96af4acac --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time.go @@ -0,0 +1,289 @@ +package binlog_parser + +import ( + "bytes" + "dbm-services/common/go-pubpkg/cmutil" + "encoding/json" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" + "time" + + "github.com/go-mysql-org/go-mysql/replication" + "github.com/pkg/errors" +) + +// BinlogTimeComp TODO +type BinlogTimeComp struct { + Params BinlogTimeParam `json:"extend"` +} + +// Example TODO +func (t *BinlogTimeComp) Example() interface{} { + return &BinlogTimeComp{ + Params: BinlogTimeParam{ + BinlogDir: "/data/dbbak", + BinlogFiles: []string{"binlog20000.00001", "binlog20000.00002"}, + Format: "json", + }, + } +} + +// BinlogTimeParam TODO +type BinlogTimeParam struct { + BinlogDir string `json:"binlog_dir" validate:"required"` + BinlogFiles []string `json:"binlog_files" validate:"required"` + Format string `json:"format" enums:",json,dump"` + parser *BinlogParse +} + +// Init TODO +func (t *BinlogTimeComp) Init() error { + bp, err := NewBinlogParse("mysql", 0) + if err != nil { + return err + } + t.Params.parser = bp + return nil +} + +// Start TODO +func (t *BinlogTimeComp) Start() error { + for _, f := range t.Params.BinlogFiles { + filename := filepath.Join(t.Params.BinlogDir, f) + if err := cmutil.FileExistsErr(filename); err != nil { + fmt.Printf("%s: %v\n", filename, err) + continue + } + if events, err := t.Params.parser.GetTime(filename, true, true); err != nil { + fmt.Printf("%s: %v\n", filename, err) + } else { + b, _ := json.Marshal(events) + fmt.Printf("%s: %s\n", filename, b) + } + } + return nil +} + +const ( + // MaxFirstBufSize FormatDescriptionEvent 最大长度 + MaxFirstBufSize = 150 + // MaxLastBufSize 我们认为的 RotateEvent 最大长度 + MaxLastBufSize = 100 + // RotateEventPosLen 下一个 binlog 时间开始位置,8 bytes, 现在固定是 4 + RotateEventPosLen = 8 + // MaxTimestamp 我们认为的最大合法的 timestamp 值 + MaxTimestamp = 2177424000 +) + +// BinlogParse 解析选项 +type BinlogParse struct { + // binlog full filename with path + FileName string `json:"file_name"` + // 第一个 event FormatDescriptionEvent 的大小 + FirstBufSize int `json:"first_buf_size"` + // 取最后的多少个字节来获取 RotateEvent,最大取 100 + LastBufSize int `json:"last_buf_size"` + // 输出格式,json 或者 其它 + Output string `json:"output" enums:",json,dump"` + // 输出的日期格式 + TimeLayout string `json:"time_layout"` + // mysql or mariadb + Flavor string `json:"flavor" enums:"mysql,mariadb"` + + parser *replication.BinlogParser + // 第一个 event 开始位置,4。前 4 个字节是 [0xfe 0x62 0x69 0x6e] + firstEventPos int64 + // event header 固定大小 19 + eventHeaderLen int +} + +// NewBinlogParse godoc +// lastBufSize 默认会根据当前 binlog filename 来算,见 GetRotateEvent +func NewBinlogParse(flavor string, lastBufSize int) (*BinlogParse, error) { + bp := BinlogParse{ + Flavor: flavor, + LastBufSize: lastBufSize, + } + if err := bp.init(); err != nil { + return nil, err + } + return &bp, nil +} + +func (b *BinlogParse) init() error { + b.eventHeaderLen = replication.EventHeaderSize + b.firstEventPos = int64(len(replication.BinLogFileHeader)) // int64(4) // fe 62 69 6e + b.FirstBufSize = MaxFirstBufSize + /* + if b.LastBufSize == 0 { + b.LastBufSize = 49 // 这里默认第一次取 49 + } + */ + b.parser = replication.NewBinlogParser() + if b.Flavor == "" { + b.parser.SetFlavor("mysql") + } + if b.TimeLayout == "" { + b.TimeLayout = "2006-01-02 15:04:05" + } + if b.Output == "" { + b.Output = "json" + } + return nil +} + +// GetTime 获取binlog 开始或结束时间 +func (b *BinlogParse) GetTime(fileName string, start, stop bool) ([]BinlogEventHeaderWrapper, error) { + b.FileName = fileName + if err := cmutil.FileExistsErr(b.FileName); err != nil { + return nil, err + } + f, err := os.Open(b.FileName) + if err != nil { + return nil, errors.Wrap(err, "get time from binlog") + } + defer f.Close() + + var events []*replication.EventHeader + if start { + if evh, err := b.GetFormatDescriptionEvent(f); err != nil { + return nil, err + } else { + events = append(events, evh) + } + } + if stop { + evh, err := b.GetRotateEvent(f) + if err != nil { + b.LastBufSize = MaxLastBufSize + if evh, err = b.GetRotateEvent(f); err != nil { + return nil, err + } + } + events = append(events, evh) + } + var evhWrappers []BinlogEventHeaderWrapper + for _, header := range events { + w := b.NewBinlogEventHeaderWrapper(header) + evhWrappers = append(evhWrappers, w) + } + return evhWrappers, nil +} + +// BinlogEventHeaderWrapper 输出结果的格式 +type BinlogEventHeaderWrapper struct { + // replication.BinlogEvent + evHeader replication.EventHeader + EventType string `json:"event_type"` + EventTime string `json:"event_time"` + Timestamp uint32 `json:"timestamp"` + ServerID uint32 `json:"server_id"` + EventSize uint32 `json:"event_size"` +} + +// NewBinlogEventHeaderWrapper 封装EventHeader用于输出 +func (b *BinlogParse) NewBinlogEventHeaderWrapper(evh *replication.EventHeader) BinlogEventHeaderWrapper { + w := BinlogEventHeaderWrapper{ + evHeader: *evh, + EventType: replication.EventType(evh.EventType).String(), + EventTime: time.Unix(int64(evh.Timestamp), 0).Format(b.TimeLayout), + Timestamp: evh.Timestamp, + ServerID: evh.ServerID, + EventSize: evh.EventSize, + } + // evh.Dump(os.Stdout) + return w +} + +// GetFormatDescriptionEvent 获取 header +func (b *BinlogParse) GetFormatDescriptionEvent(f *os.File) (*replication.EventHeader, error) { + _, err := f.Seek(b.firstEventPos, io.SeekStart) + desc := make([]byte, b.FirstBufSize) + if _, err = f.ReadAt(desc, b.firstEventPos); err != nil { + return nil, errors.Wrap(err, b.FileName) + } + r := bytes.NewReader(desc) + evHeader := &replication.EventHeader{} + if err := evHeader.Decode(desc); err != nil { + return nil, errors.Wrap(err, b.FileName) + } + + _, err = b.parser.ParseSingleEvent( + r, func(e *replication.BinlogEvent) error { + if e.Header.EventType == replication.FORMAT_DESCRIPTION_EVENT { + // evHeader = e.Header + return nil + } else { + return errors.Errorf("%s: failed to find FormatDescriptionEvent at pos 4", b.FileName) + } + }, + ) + if err != nil { + return nil, errors.Wrap(err, b.FileName) + } + return evHeader, nil +} + +// GetRotateEvent 获取 rotate event header +func (b *BinlogParse) GetRotateEvent(f *os.File) (*replication.EventHeader, error) { + // RotateEvent event size: 19(header size) + 8(pos length) + name_len + 4 + // 暂且估计下一个 binlog filename 长度与当前分析的相同(且是ascii字符,即一个字符占一个字节),所以避免手动 rename 文件名 + filename := path.Base(b.FileName) + rotateEventSize := b.eventHeaderLen + RotateEventPosLen + len(filename) + len(replication.BinLogFileHeader) + if b.LastBufSize < rotateEventSize && rotateEventSize <= MaxLastBufSize { + b.LastBufSize = rotateEventSize + } + // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_replication_binlog_event.html#sect_protocol_replication_event_rotate + bufSize := b.LastBufSize // 取最后的多少个字节 + buf := make([]byte, bufSize) + _, err := f.Seek(-int64(bufSize), io.SeekEnd) + if _, err = f.Read(buf); err != nil { + return nil, errors.Wrap(err, b.FileName) + } + minBegin := b.eventHeaderLen // StopEvent 最小长度 + stopWhere := bufSize - minBegin + + evHeader := &replication.EventHeader{} + for i := 0; i <= stopWhere; i++ { // i is startPos + endPos := i + b.eventHeaderLen + if err := evHeader.Decode(buf[i:endPos]); err != nil { + if evHeader.EventSize < uint32(b.eventHeaderLen) || strings.Contains(err.Error(), "invalid event size") { + // 解析到非法的 event,移动到下一个字节开始 + continue + } else { + return nil, errors.Wrap(err, b.FileName) + } + } + if evHeader.EventType == replication.UNKNOWN_EVENT { // 非法 event + continue + } else if evHeader.EventType == replication.ROTATE_EVENT { + if evHeader.EventSize > MaxLastBufSize { // not RotateEvent + continue + } else if evHeader.Timestamp > MaxTimestamp { // invalid timestamp + continue + } + r := bytes.NewReader(buf[i:]) + _, err = b.parser.ParseSingleEvent( + r, func(e *replication.BinlogEvent) error { + // valid event + return nil + }, + ) + if err != nil { + return nil, errors.Wrap(err, b.FileName) + } + return evHeader, nil + } else if evHeader.EventType == replication.STOP_EVENT { + // stopEventLen := []int{19, 23} // StopEvent 有 19,23 两种长度,见 replication.parser_test.go + if evHeader.EventSize <= 23 && evHeader.Timestamp < MaxTimestamp { + // 认为是合法的 StopEvent + return evHeader, nil + } + } + } + return nil, errors.Errorf("%s: get RotateEvent or StopEvent failed", b.FileName) +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time_test.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time_test.go new file mode 100644 index 0000000000..d326ae465b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser/parse_binlog_time_test.go @@ -0,0 +1,30 @@ +package binlog_parser + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "encoding/hex" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetTime(t *testing.T) { + binlogContent := "fe62696eb1db64630f70003604890000008d00000000000400352e362e32342d746d7973716c2d322e322e322d6c6f670000000000000000000000000000000000000000000000000000000000000013380d0008001200040404041200007100041a08000000080808020000000a0a0a19190000000000000000000000000000000d0808080a0a0a0102311b69e1dc6463047000360431000000cdee1a010000040000000000000062696e6c6f6732303030302e333530363738776eb630" + testFile := "/tmp/binlog_testfile.00001" + b, err := hex.DecodeString(binlogContent) + assert.Nil(t, err) + cmutil.ExecShellCommand(false, fmt.Sprintf("rm -f %s", testFile)) + f, err := os.OpenFile(testFile, os.O_RDWR|os.O_CREATE, 644) + assert.Nil(t, err) + defer f.Close() + _, err = f.Write(b) + assert.Nil(t, err) + + // testFile = "./binlog20000.000002" + binParse, _ := NewBinlogParse("mysql", 0) + _, err = binParse.GetTime(testFile, true, true) + assert.Nil(t, err) + // fmt.Printf("%+v\n%+v\n", v[0], v[1]) +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst/cst.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst/cst.go new file mode 100644 index 0000000000..5f4d2de22a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst/cst.go @@ -0,0 +1,21 @@ +// Package cst TODO +package cst + +import "time" + +// DBTimeLayout TODO +const DBTimeLayout = "2006-01-02 15:04:05" + +// ReBinlogFilename binlog 文件名 +const ReBinlogFilename = `binlog\d*\.\d+$` + +const ( + // ReserveMinSizeMB 最少保留 binlog 大小 + ReserveMinSizeMB = 5 * 1 + // ReserveMinBinlogNum 最少保留的 binlog 个数, 以防 slave 拉取太慢 + ReserveMinBinlogNum = 10 + // ReduceStepSizeMB 删除的最小单位 + ReduceStepSizeMB = 5 * 1 + // MaxKeepDurationMin 最少保留时间 + MaxKeepDurationMin = 10 * time.Minute +) diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/log.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/log.go new file mode 100644 index 0000000000..0adc2fc0a8 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/log.go @@ -0,0 +1,2 @@ +// Package log TODO +package log diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/logger.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/logger.go new file mode 100644 index 0000000000..42bfb9c31c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/logger.go @@ -0,0 +1,32 @@ +package log + +import ( + "dbm-services/common/go-pubpkg/logger" + "os" + "path/filepath" +) + +// InitLogger TODO +func InitLogger() error { + executable, _ := os.Executable() + executeDir := filepath.Dir(executable) + if err := os.Chdir(executeDir); err != nil { + os.Stderr.WriteString(err.Error()) + os.Exit(1) + } + logFileDir := filepath.Join(executeDir, "logs") + _ = os.MkdirAll(logFileDir, 0755) + fileName := filepath.Join(logFileDir, "rotatebinlog.log") + fi, err := os.OpenFile(fileName, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) + if err != nil { + os.Stderr.WriteString(err.Error()) + os.Exit(1) + // return errors.WithMessage(err, "init logger") + } + + extMap := map[string]string{} + l := logger.New(fi, true, logger.InfoLevel, extMap) + logger.ResetDefault(l) + logger.Sync() + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/reporter.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/reporter.go new file mode 100644 index 0000000000..fcddb14827 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log/reporter.go @@ -0,0 +1,54 @@ +package log + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/common/go-pubpkg/reportlog" + + "github.com/pkg/errors" + "github.com/spf13/viper" +) + +// RotateReporters TODO +type RotateReporters struct { + Result reportlog.Reporter + Status reportlog.Reporter +} + +var reporter *RotateReporters + +// InitReporter TODO +func InitReporter() (err error) { + reporter, err = NewRotateReporter() + return err +} + +// Reporter TODO +func Reporter() *RotateReporters { + return reporter +} + +// NewRotateReporter TODO +func NewRotateReporter() (*RotateReporters, error) { + reportDir := viper.GetString("report.filepath") + logOpt := reportlog.LoggerOption{ + MaxSize: viper.GetInt("report.log_maxsize"), // MB + MaxBackups: viper.GetInt("report.log_maxbackups"), + MaxAge: viper.GetInt("report.log_maxage"), + } + resultReport, err := reportlog.NewReporter(reportDir, "binlog_result.log", &logOpt) + if err != nil { + logger.Warn("fail to init resultReporter:%s", err.Error()) + resultReport.Disable = true + return nil, errors.WithMessage(err, "fail to init resultReporter") + } + statusReport, err := reportlog.NewReporter(reportDir, "binlog_status.log", &logOpt) + if err != nil { + logger.Warn("fail to init statusReporter:%s", err.Error()) + statusReport.Disable = true + return nil, errors.WithMessage(err, "fail to init statusReporter") + } + return &RotateReporters{ + Result: *resultReport, + Status: *statusReport, + }, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/dbmodel.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/dbmodel.go new file mode 100644 index 0000000000..649e213430 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/dbmodel.go @@ -0,0 +1,444 @@ +package models + +import ( + "database/sql" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst" + "fmt" + "log" + "os" + "path/filepath" + "time" + + sq "github.com/Masterminds/squirrel" + "github.com/golang-migrate/migrate/v4" + "github.com/jmoiron/sqlx" + "github.com/mitchellh/mapstructure" + "github.com/pkg/errors" + _ "modernc.org/sqlite" // sqlite TODO +) + +// DBO db object wrapper +type DBO struct { + Conn *sqlx.DB +} + +// DB TODO +var DB *DBO + +/* +func init() { + // //_ "github.com/glebarez/go-sqlite" + // sql.Register("go-sqlite", &sqlite.Driver{}) +} +*/ + +// InitDB godoc +// 因为下面的 migrate 库用的 sqlite driver 是 modernc.org/sqlite (gitlab.com/cznic/sqlite) +// 不好再用其它 sqlite 库,否则会报 panic: sql: Register called twice for driver sqlite +// 当然一种处理方法是重新注册一个 driver name,如上。但没必要了 +func InitDB() error { + homeDir, _ := os.Executable() + if err := os.Chdir(filepath.Dir(homeDir)); err != nil { + return err + } + dbFile := "binlog_rotate.db" + dsName := fmt.Sprintf( + `%s?_pragma=busy_timeout(5000)&_pragma=journal_mode(WAL)`, + filepath.Join(".", dbFile), + ) + if dbConn, err := sqlx.Open("sqlite", dsName); err != nil { + return err + } else { + DB = &DBO{ + Conn: dbConn, + } + } + return nil +} + +// SetupTable create table +func SetupTable() (err error) { + if err := DoMigrate(DB.Conn); err != nil && err != migrate.ErrNoChange { + log.Fatal(err) + // logger.Error(err.Error()) + return err + } + return nil +} + +// BinlogFileModel TODO +type BinlogFileModel struct { + BkBizId int `json:"bk_biz_id,omitempty" db:"bk_biz_id"` + ClusterId int `json:"cluster_id,omitempty" db:"cluster_id"` + // immutable domain, 如果是从库,也使用主域名。cluster_domain 至少作为备注信息,一般不作为查询条件 + ClusterDomain string `json:"cluster_domain" db:"cluster_domain"` + DBRole string `json:"db_role" db:"db_role"` + Host string `json:"host,omitempty" db:"host"` + Port int `json:"port,omitempty" db:"port"` + Filename string `json:"filename,omitempty" db:"filename"` + Filesize int64 `json:"size" db:"filesize"` + FileMtime string `json:"file_mtime" db:"file_mtime"` + StartTime string `json:"start_time" db:"start_time"` + StopTime string `json:"stop_time" db:"stop_time"` + BackupStatus int `json:"backup_status,omitempty" db:"backup_status"` + BackupStatusInfo string `json:"backup_status_info" db:"backup_status_info"` + BackupTaskid string `json:"backup_taskid,omitempty" db:"backup_taskid"` + *ModelAutoDatetime +} + +// String 用于打印 +func (m *BinlogFileModel) String() string { + return fmt.Sprintf( + "{filename:%s, start_time: %s, stop_time: %s, backup_status:%d, backup_taskid: %s}", + m.Filename, m.StartTime, m.StopTime, m.BackupStatus, m.BackupTaskid, + ) +} + +// ModelAutoDatetime TODO +type ModelAutoDatetime struct { + CreatedAt string `json:"created_at,omitempty" db:"created_at"` + UpdatedAt string `json:"updated_at,omitempty" db:"updated_at"` +} + +func (m *BinlogFileModel) instanceWhere() map[string]interface{} { + return map[string]interface{}{ + "bk_biz_id": m.BkBizId, + "cluster_id": m.ClusterId, + "host": m.Host, + "port": m.Port, + } +} + +func (m *BinlogFileModel) autoTime() { + if m.ModelAutoDatetime == nil { + m.ModelAutoDatetime = &ModelAutoDatetime{} + } + m.ModelAutoDatetime.autoTime() +} + +func (d *ModelAutoDatetime) autoTime() { + nowTime := time.Now() + if d.CreatedAt == "" { + d.CreatedAt = nowTime.Format(cst.DBTimeLayout) + } + d.UpdatedAt = nowTime.Format(cst.DBTimeLayout) +} + +// TableName TODO +func (m *BinlogFileModel) TableName() string { + return "binlog_rotate" +} + +// Save TODO +func (m *BinlogFileModel) Save(db *sqlx.DB) error { + m.autoTime() + sqlBuilder := sq.Insert("").Into(m.TableName()). + Columns( + "bk_biz_id", "cluster_id", "cluster_domain", "db_role", "host", "port", "filename", + "filesize", "start_time", "stop_time", "file_mtime", "backup_status", "backup_taskid", + "created_at", "updated_at", + ). + Values( + m.BkBizId, m.ClusterId, m.ClusterDomain, m.DBRole, m.Host, m.Port, m.Filename, + m.Filesize, m.StartTime, m.StopTime, m.FileMtime, m.BackupStatus, m.BackupTaskid, + m.CreatedAt, m.UpdatedAt, + ) + sqlStr, args, err := sqlBuilder.ToSql() + if err != nil { + return err + } + if res, err := db.Exec(sqlStr, args...); err != nil { + return err + } else { + if num, _ := res.RowsAffected(); num != 1 { + return errors.Errorf("rows_affected expect 1 but got %d", num) + } + } + return nil +} + +// BatchSave TODO +func (m *BinlogFileModel) BatchSave(models []*BinlogFileModel, db *sqlx.DB) error { + if len(models) == 0 { + return nil + } + sqlBuilder := sq.Insert("").Into(m.TableName()). + Columns( + "bk_biz_id", "cluster_id", "cluster_domain", "db_role", "host", "port", "filename", + "filesize", "start_time", "stop_time", "file_mtime", "backup_status", "backup_taskid", + "created_at", "updated_at", + ) + for _, o := range models { + o.autoTime() + sqlBuilder = sqlBuilder.Values( + o.BkBizId, o.ClusterId, o.ClusterDomain, o.DBRole, o.Host, o.Port, o.Filename, + o.Filesize, o.StartTime, o.StopTime, o.FileMtime, o.BackupStatus, o.BackupTaskid, + o.CreatedAt, o.UpdatedAt, + ) + } + sqlStr, args, err := sqlBuilder.ToSql() + if err != nil { + return err + } + if _, err = db.Exec(sqlStr, args...); err != nil { + return err + } + return nil +} + +// Update 根据 host,port,filename 来进行 update +func (m *BinlogFileModel) Update(db *sqlx.DB) error { + m.autoTime() + if m.BackupStatusInfo == "" { + m.BackupStatusInfo = fmt.Sprintf(IBStatusMap[m.BackupStatus]) + } + sqlBuilder := sq.Update("").Table(m.TableName()). + Set("backup_status", m.BackupStatus). + Set("backup_status_info", m.BackupStatusInfo). + Set("updated_at", m.UpdatedAt) + if m.BackupTaskid != "" { + sqlBuilder = sqlBuilder.Set("backup_taskid", m.BackupTaskid) + } + if m.StartTime != "" { + sqlBuilder = sqlBuilder.Set("start_time", m.StartTime) + } + if m.StopTime != "" { + sqlBuilder = sqlBuilder.Set("stop_time", m.StopTime) + } + sqlBuilder = sqlBuilder.Where( + "host = ? and port = ? and filename = ? and cluster_id=?", + m.Host, m.Port, m.Filename, m.ClusterId, + ) + + sqlStr, args, err := sqlBuilder.ToSql() + if err != nil { + return err + } + // logger.Info("update sql:%s, args:%v", sqlStr, args) + if res, err := db.Exec(sqlStr, args...); err != nil { + return err + } else { + if num, _ := res.RowsAffected(); num != 1 { + return errors.Errorf("rows_affected expect 1 but got %d", num) + } + } + return nil +} + +const ( + // IBStatusNew TODO + IBStatusNew = -2 // 文件尚未提交 + // IBStatusClientFail TODO + IBStatusClientFail = -1 // 文件上传 提交失败 + // IBStatusClientFailRetry TODO + IBStatusClientFailRetry = -3 // 客户端错误,需要重新提交 + // IBStatusWaiting TODO + IBStatusWaiting = 1 // 等待调度上传 + // IBStatusUploading TODO + IBStatusUploading = 3 // <= 3 备份上传中 + // IBStatusSuccess TODO + IBStatusSuccess = 4 + // IBStatusFileNotFound TODO + IBStatusFileNotFound = 5 + // IBStatusFail TODO + IBStatusFail = 6 // >= 6 fail + // IBStatusExpired TODO + IBStatusExpired = 44 // 文件已过期 + // FileStatusRemoved TODO + FileStatusRemoved = 201 + // FileStatusAbnormal TODO + FileStatusAbnormal = 202 + // FileStatusNoNeedUpload binlog无需上传 + FileStatusNoNeedUpload = 203 +) + +const ( + // RoleMaster TODO + RoleMaster = "master" + // RoleSlave TODO + RoleSlave = "slave" + // RoleRepeater TODO + RoleRepeater = "repeater" +) + +// IBStatusMap TODO +var IBStatusMap = map[int]string{ + IBStatusClientFail: "submit failed", + 0: "todo, submitted", // 等待确认主机信息 + IBStatusWaiting: "todo, waiting", // 等待备份 + 2: "todo, locking", // Locking + IBStatusUploading: "doing", // 正在备份中 + IBStatusSuccess: "done, success", // 备份完成 + IBStatusFileNotFound: "Fail: file not found", // 源上找不到文件 + IBStatusFail: "Fail: unknown", // 其它错误 + IBStatusExpired: "done, expired", // 备份系统文件已过期 + + FileStatusRemoved: "local removed", + FileStatusAbnormal: "file abnormal", + FileStatusNoNeedUpload: "no need to backup", +} + +// IBStatusUnfinish TODO +var IBStatusUnfinish = []int{IBStatusNew, IBStatusClientFail, 0, IBStatusWaiting, 2, IBStatusUploading} + +// DeleteExpired godoc +// 删除过期记录 +func (m *BinlogFileModel) DeleteExpired(db *sqlx.DB, mTime string) (int64, error) { + sqlBuilder := sq.Delete("").From(m.TableName()). + Where(m.instanceWhere()).Where("file_mtime < ?", mTime) + sqlStr, args, err := sqlBuilder.ToSql() + if err != nil { + return 0, err + } + if res, err := db.Exec(sqlStr, args...); err != nil { + return 0, err + } else { + num, _ := res.RowsAffected() + return num, nil + } +} + +func mapStructureDecodeJson(input interface{}, output interface{}) error { + msCfg := &mapstructure.DecoderConfig{TagName: "json", Result: output} + mapStruct, _ := mapstructure.NewDecoder(msCfg) + return mapStruct.Decode(input) +} + +// QueryUnfinished 查询待上传、上传未完成的列表 +func (m *BinlogFileModel) QueryUnfinished(db *sqlx.DB) ([]*BinlogFileModel, error) { + // 在发生切换场景,slave 变成 master,在变之前的 binlog 是不需要上传的 + return m.Query( + db, "backup_status < ? and db_role=?", + IBStatusSuccess, RoleMaster, + ) +} + +// QuerySuccess 查询上传成功的文件,或者不需要上传的文件 +func (m *BinlogFileModel) QuerySuccess(db *sqlx.DB) ([]*BinlogFileModel, error) { + inWhere := sq.Eq{"backup_status": []int{IBStatusSuccess, FileStatusNoNeedUpload}} + return m.Query(db, inWhere) + // return m.Query(db, "backup_status IN ?", []int{IBStatusSuccess, FileStatusNoNeedUpload}) +} + +// QueryFailed 查询上传失败、过期的文件 +func (m *BinlogFileModel) QueryFailed(db *sqlx.DB) ([]*BinlogFileModel, error) { + return m.Query(db, "backup_status > ?", IBStatusSuccess) +} + +// Query 返回 binlog files 以文件名排序 +func (m *BinlogFileModel) Query(db *sqlx.DB, pred interface{}, params ...interface{}) ([]*BinlogFileModel, error) { + var files []*BinlogFileModel + sqlBuilder := sq.Select( + "bk_biz_id", "cluster_id", "cluster_domain", "db_role", "host", "port", "filename", + "filesize", "start_time", "stop_time", "file_mtime", "backup_status", "backup_taskid", + ). + From(m.TableName()).Where(m.instanceWhere()) + sqlBuilder = sqlBuilder.Where(pred, params...).OrderBy("filename asc") + sqlStr, args, err := sqlBuilder.ToSql() + if err != nil { + return nil, err + } + logger.Info("Query sqlStr: %s, args: %v", sqlStr, args) + if err = db.Select(&files, sqlStr, args...); err != nil { + return nil, err + } + return files, nil +} + +// QueryLastFileReport 获取上一轮最后被处理的文件 +func (m *BinlogFileModel) QueryLastFileReport(db *sqlx.DB) (*BinlogFileModel, error) { + sqlBuilder := sq.Select("filename", "backup_status").From(m.TableName()). + Where(m.instanceWhere()).OrderBy("filename desc").Limit(1) + sqlStr, args, err := sqlBuilder.ToSql() + if err != nil { + return nil, err + } + logger.Info("sqlStr: %s, args: %v", sqlStr, args) + bf := &BinlogFileModel{} + if err = db.Get(bf, sqlStr, args...); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return bf, nil + } + return nil, errors.Wrap(err, "QueryLastFileReport") + } + return bf, nil +} + +// TimeInterval TODO +type TimeInterval struct { + TaskName string `json:"task_name" db:"task_name"` + Tag string `json:"tag" db:"tag"` + // 上一次运行时间 + LastRunAt string `json:"last_run_at" db:"last_run_at"` +} + +// TableName TODO +func (t *TimeInterval) TableName() string { + return "time_interval" +} + +// Update TODO +func (t *TimeInterval) Update(db *sqlx.DB) error { + nowTime := time.Now() + t.LastRunAt = nowTime.Format(cst.DBTimeLayout) // 会吧 utc 转换成当前时区 str + replace := sq.Replace("").Into(t.TableName()). + Columns("task_name", "tag", "last_run_at"). + Values(t.TaskName, t.Tag, t.LastRunAt) + sqlStr, args, err := replace.ToSql() + if err != nil { + return err + } + if _, err = db.Exec(sqlStr, args...); err != nil { + return err + } + return nil +} + +// Query TODO +func (t *TimeInterval) Query(db *sqlx.DB) (string, error) { + selectBuilder := sq.Select("last_run_at").From(t.TableName()). + Where("task_name=? and tag=?", t.TaskName, t.Tag) + sqlstr, args, err := selectBuilder.ToSql() + if err != nil { + return "", err + } + var tt TimeInterval + if err = db.Get(&tt, sqlstr, args...); err != nil { + return "", err + } else { + return tt.LastRunAt, nil + } +} + +// IntervalOut 查询是否超过interval +// true: 已超过 interval, 满足执行频率,可以执行 +func (t *TimeInterval) IntervalOut(db *sqlx.DB, dura time.Duration) bool { + if dura == 0 { + return false + } + lastRunAt, err := t.Query(db) + // logger.Info("purge_interval:%s, lastRunAt:%s", dura.String(), lastRunAt) + + if err == sql.ErrNoRows { + logger.Info("no time_interval item found for %s %s", t.TaskName, t.Tag) + return true + } else if err != nil { + logger.Info("time_interval item found error for %s %s: %s", t.TaskName, t.Tag, err.Error()) + return true + } else if lastRunAt == "" { + return true + } + + nowTime := time.Now() + lastRunTime, err := time.ParseInLocation(cst.DBTimeLayout, lastRunAt, time.Local) + if err != nil { + logger.Error("error time_interval: task_name=%s, tag=%s, last_run_aat", t.TaskName, t.Tag, t.LastRunAt) + return true + } + if nowTime.Sub(lastRunTime).Seconds() > dura.Seconds() { + return true + } else { + return false + } +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrate.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrate.go new file mode 100644 index 0000000000..67fc8fea2a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrate.go @@ -0,0 +1,59 @@ +package models + +import ( + "dbm-services/common/go-pubpkg/logger" + "embed" + "fmt" + + "github.com/golang-migrate/migrate/v4" + "github.com/golang-migrate/migrate/v4/database/sqlite" + "github.com/golang-migrate/migrate/v4/source/iofs" + "github.com/jmoiron/sqlx" + "github.com/pkg/errors" +) + +// Migrations TODO +// +//go:embed migrations/*.sql +var Migrations embed.FS + +// DoMigrate 从 go embed 文件系统查找 migrations +func DoMigrate(db *sqlx.DB) error { + var mig *migrate.Migrate + srcDrv, err := iofs.New(Migrations, "migrations") + if err != nil { + return errors.Wrap(err, "sqlite migrations") + } + dbDrv, err := sqlite.WithInstance(db.DB, &sqlite.Config{}) + if err != nil { + return errors.Wrap(err, "sqlite migrate init dbDriver") + } + if mig, err = migrate.NewWithInstance("iofs", srcDrv, "", dbDrv); err != nil { + return errors.Wrap(err, "sqlite migrate new instance") + } else { + return mig.Up() + } +} + +// DoMigrateWithNewConn 指定 sqlite db path. dbFile 所在目录必须是一个合法路径 +// no changes: return nil +func DoMigrateWithNewConn(dbFile string) error { + var mig *migrate.Migrate + // from embed + if srcDrv, err := iofs.New(Migrations, "migrations"); err != nil { + return err + } else { + dbURL := fmt.Sprintf("sqlite://%s?query", dbFile) + mig, err = migrate.NewWithSourceInstance("iofs", srcDrv, dbURL) + if err != nil { + return errors.WithMessage(err, "migrate from embed") + } + if err = mig.Up(); err == nil || err == migrate.ErrNoChange { + logger.Info("migrate source from embed success with %v", err) + return nil + } else { + logger.Error("migrate source from embed failed: %s", err.Error()) + return err + } + } +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.down.sql b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.down.sql new file mode 100644 index 0000000000..3eb8f0f291 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS binlog_rotate; \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.up.sql b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.up.sql new file mode 100644 index 0000000000..b3c4a2c445 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000001_create_table.up.sql @@ -0,0 +1,22 @@ +CREATE TABLE IF NOT EXISTS binlog_rotate ( + bk_biz_id integer not null, + cluster_id integer not null, + cluster_domain varchar(120) default '', + db_role varchar(20) not null default '', + host varchar(64) not null, + port integer default 0, + filename varchar(64) not null, + filesize integer not null, + file_mtime varchar(32) not null, + start_time varchar(32) default '', + stop_time varchar(32) default '', + backup_status integer default -2, + backup_status_info varchar(120) not null default '', + backup_taskid varchar(64) default '', + created_at varchar(32) default '', + updated_at varchar(32) default '', + PRIMARY KEY(cluster_id,filename,host,port) +); + +CREATE INDEX idx_status + ON binlog_rotate (backup_status); \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.down.sql b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.down.sql new file mode 100644 index 0000000000..28494d9cfe --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS time_interval; \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.up.sql b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.up.sql new file mode 100644 index 0000000000..37a665459a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/migrations/000002_create_time_interval.up.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS time_interval ( + task_name varchar(64) not null, + tag varchar(128) not null default '', + last_run_at varchar(32) default '', + PRIMARY KEY(task_name,tag) +); \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/models.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/models.go new file mode 100644 index 0000000000..0cdf50e4ee --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models/models.go @@ -0,0 +1,2 @@ +// Package models TODO +package models diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/config.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/config.go new file mode 100644 index 0000000000..27d681aa97 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/config.go @@ -0,0 +1,89 @@ +package rotate + +import ( + "log" + "os" + + "dbm-services/common/go-pubpkg/logger" + + "github.com/mitchellh/go-homedir" + "github.com/spf13/viper" +) + +// Config rotate_binlog config +type Config struct { + Public PublicCfg `json:"public" mapstructure:"public" validate:"required"` + Servers []*ServerObj `json:"servers" mapstructure:"servers"` + Report ReportCfg `json:"report" mapstructure:"report"` + Encrypt EncryptCfg `json:"encrypt" mapstructure:"encrypt"` + Crond ScheduleCfg `json:"crond" mapstructure:"crond"` + BackupClient map[string]interface{} `json:"backup_client" mapstructure:"backup_client"` +} + +// PublicCfg public config +type PublicCfg struct { + KeepPolicy string `json:"keep_policy" mapstructure:"keep_policy"` + MaxBinlogTotalSize string `json:"max_binlog_total_size" mapstructure:"max_binlog_total_size"` + MaxDiskUsedPct int `json:"max_disk_used_pct" mapstructure:"max_disk_used_pct" validate:"required"` + // 本地 binlog 最大保留时间,超过会直接删除 + MaxKeepDuration string `json:"max_keep_duration" mapstructure:"max_keep_duration"` + // 间隔多久执行一次 purge index + PurgeInterval string `json:"purge_interval" mapstructure:"purge_interval" validate:"required"` + // 每隔多久执行一次 flush binary logs + RotateInterval string `json:"rotate_interval" mapstructure:"rotate_interval" validate:"required"` + + maxBinlogTotalSizeMB int +} + +// ReportCfg report config +type ReportCfg struct { + Enable bool `json:"enable" mapstructure:"enable"` + Filepath string `json:"filepath" mapstructure:"filepath"` + LogMaxsize int `json:"log_maxsize" mapstructure:"log_maxsize"` + LogMaxbackups int `json:"log_maxbackups" mapstructure:"log_maxbackups"` + LogMaxage int `json:"log_maxage" mapstructure:"log_maxage"` +} + +// EncryptCfg encrypt config +type EncryptCfg struct { + Enable bool `json:"enable" mapstructure:"enable"` + KeyPrefix string `json:"key_prefix" mapstructure:"key_prefix"` +} + +// ScheduleCfg schedule config +type ScheduleCfg struct { + ApiUrl string `json:"api_url" mapstructure:"api_url" validate:"required"` + ItemName string `json:"item_name" mapstructure:"item_name" validate:"required"` + Schedule string `json:"schedule" mapstructure:"schedule" validate:"required"` + Command string `json:"command" mapstructure:"command"` +} + +// InitConfig 读取 config.yaml 配置 +func InitConfig(confFile string) (*Config, error) { + viper.SetConfigType("yaml") + if confFile != "" { + viper.SetConfigFile(confFile) + } else { + viper.SetConfigName("config") + viper.AddConfigPath(".") // 搜索路径可以设置多个,viper 会根据设置顺序依次查找 + home, _ := homedir.Dir() + viper.AddConfigPath(home) + } + if err := viper.ReadInConfig(); err != nil { + log.Fatalf("read config failed: %v", err) + } + var configObj = &Config{} + configBytes, err := os.ReadFile(confFile) + if err != nil { + logger.Error(err.Error()) + return nil, err + } else { + logger.Debug("configs: %s", string(configBytes)) + } + if err = viper.Unmarshal(configObj); err != nil { + // if err = yaml.Unmarshal(configBytes, configObj); err != nil { + return nil, err + } + logger.Debug("configObj: %+v", configObj) + return configObj, nil +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/main.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/main.go new file mode 100644 index 0000000000..bc48010a18 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/main.go @@ -0,0 +1,281 @@ +package rotate + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/common/go-pubpkg/validate" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + ma "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util" + "fmt" + "io/ioutil" + "os" + "os/signal" + "path/filepath" + "strings" + "syscall" + + "github.com/ghodss/yaml" + "github.com/pkg/errors" + "github.com/spf13/cast" + "github.com/spf13/viper" +) + +// RotateBinlogComp TODO +type RotateBinlogComp struct { + Config string `json:"config"` + configObj *Config +} + +// Example TODO +func (c *RotateBinlogComp) Example() interface{} { + return RotateBinlogComp{ + Config: "config.yaml", + } +} + +// Init TODO +func (c *RotateBinlogComp) Init() (err error) { + + envPATH := os.Getenv("PATH") + if envPATH == "" { + envPATH = "." + } + envPATHs := []string{envPATH} + envPATHs = append(envPATHs, "/bin:/usr/bin:/usr/local/bin:/sbin:/usr/sbin:/usr/local/sbin") + _ = os.Setenv("PATH", strings.Join(envPATHs, ":")) + return nil +} + +// Start TODO +func (c *RotateBinlogComp) Start() (err error) { + if err = log.InitLogger(); err != nil { + return err + } + if c.configObj, err = InitConfig(c.Config); err != nil { + return err + } + if err = log.InitReporter(); err != nil { + return err + } + if err = models.InitDB(); err != nil { + return err + } + setupCloseHandler() + defer models.DB.Conn.Close() + if err = models.SetupTable(); err != nil { + return err + } + var backupClient backup.BackupClient + if backupClient, err = backup.InitBackupClient(); err != nil { + return err + } + + var servers []*ServerObj + if err = viper.UnmarshalKey("servers", &servers); err != nil { + return errors.Wrap(err, "parse config servers") + } else { + logger.Info("config servers: %+v", servers) + } + for _, inst := range servers { + inst.backupClient = backupClient + inst.instance = &native.InsObject{ + Host: inst.Host, + Port: inst.Port, + User: inst.Username, + Pwd: inst.Password, + Socket: inst.Socket, + } + if err = validate.GoValidateStruct(inst, true, false); err != nil { + logger.Error("validate instance failed: %s", inst) + continue + } + if err = inst.Rotate(); err != nil { + logger.Error("fail to rotate_binlog: %d, err: %+v", inst.Port, err) + continue + } + } + if err = c.decideSizeToFree(servers); err != nil { + return err + } + for _, inst := range servers { + if err = inst.FreeSpace(); err != nil { + logger.Error(err.Error()) + } + if err = inst.rotate.Backup(); err != nil { + logger.Error("%+v", err) + } + } + return nil +} + +// RemoveConfig 删除某个 binlog 实例的 rotate 配置 +func (c *RotateBinlogComp) RemoveConfig(ports []string) (err error) { + if c.configObj, err = InitConfig(c.Config); err != nil { + return err + } + for _, binlogPort := range ports { + port := cast.ToInt(binlogPort) + newServers := make([]*ServerObj, 0) + var portFound bool + for _, binlogInst := range c.configObj.Servers { + if binlogInst.Port == port { + portFound = true + } else { + newServers = append(newServers, binlogInst) + } + } + if !portFound { + logger.Warn("port instance %d not found when running removeConfig", port) + } + c.configObj.Servers = newServers + } + yamlData, err := yaml.Marshal(c.configObj) // use json tag + if err != nil { + return err + } + cfgFile := c.Config // viper.ConfigFileUsed() + if err = cmutil.FileExistsErr(cfgFile); err != nil { + return err + } + if err := ioutil.WriteFile(cfgFile, yamlData, 0644); err != nil { + return err + } + return nil +} + +// HandleScheduler 处理调度选项,返回 handled=true 代表 add/del 选项工作中 +func (c *RotateBinlogComp) HandleScheduler(addSchedule, delSchedule bool) (handled bool, err error) { + if err = log.InitLogger(); err != nil { + return false, err + } + if c.configObj, err = InitConfig(c.Config); err != nil { + return handled, err + } + crondManager := ma.NewManager(viper.GetString("crond.api_url")) + if delSchedule == true { + handled = true + _, err = crondManager.Delete(viper.GetString("crond.item_name"), true) + if err != nil { + return handled, err + } + return handled, nil + } + if addSchedule { + handled = true + executable, _ := os.Executable() + executeDir := filepath.Dir(executable) + jobItem := ma.JobDefine{ + Name: viper.GetString("crond.item_name"), + // Command: fmt.Sprintf(`cd %s && %s`, executeDir, executable), + Command: executable, + WorkDir: executeDir, + Args: []string{"-c", "config.yaml", "1>>logs/main.log 2>&1"}, + Schedule: viper.GetString("crond.schedule"), + Creator: "sys", + Enable: true, + } + logger.Info("adding job_item to crond: %+v", jobItem) + _, err = crondManager.CreateOrReplace(jobItem, true) + if err != nil { + return handled, err + } + return handled, nil + } + return handled, nil +} + +// decideSizeToFree 根据 binlog 所在分区剩余空间 与 binlog 大小,算出需要清理的 binlog size +// 再挑选出可以删除的 binlog 进行删除 +// 计算的结果在 i.rotate.sizeToFreeMB 里,表示该实例需要释放的binlog空间 +func (c *RotateBinlogComp) decideSizeToFree(servers []*ServerObj) error { + keepPolicy := viper.GetString("public.keep_policy") + if keepPolicy == KeepPolicyLeast { + for _, inst := range servers { + inst.rotate.sizeToFreeMB = PolicyLeastMaxSize + } + logger.Info("keep_policy=%s will try to delete binlog files as much as possible", keepPolicy) + return nil + } else if keepPolicy == "" || keepPolicy == KeepPolicyMost { + logger.Info("keep_policy=%s will calculate size to delete for every binlog instance", keepPolicy) + } else { + return fmt.Errorf("unknown keep_policy %s", keepPolicy) + } + + var diskPartInst = make(map[string][]*ServerObj) // 每个挂载目录上,放了哪些binlog实例以及对应的binlog空间 + var diskParts = make(map[string]*util.DiskDfResult) // 目录对应的空间信息 + for _, inst := range servers { + diskPart, err := util.GetDiskPartitionWithDir(inst.binlogDir) + if err != nil { + logger.Warn("fail to get binlog_dir %s disk partition info", inst.binlogDir) + continue + } + mkey := diskPart.MountedOn + diskPartInst[mkey] = append(diskPartInst[mkey], inst) + diskParts[mkey] = diskPart + } + logger.Info("binlogDir location: %v, binlogDir Info:%v", diskPartInst, diskParts) + // 根据 binlog 目录大小来决定删除空间 + var maxBinlogSizeAllowedMB int64 = 1 + if maxBinlogSizeAllowed, err := cmutil.ViperGetSizeInBytesE("public.max_binlog_total_size"); err != nil { + return err + } else { + maxBinlogSizeAllowedMB = maxBinlogSizeAllowed / 1024 / 1024 + } + logger.Info( + "viper config:%s, parsed_mb:%d", + viper.GetString("public.max_binlog_total_size"), + maxBinlogSizeAllowedMB, + ) + + for diskPartName, diskPart := range diskParts { + var instBinlogSizeMB = make(map[int]int64) // binlog 端口对应的 binlog大小信息 + for _, inst := range diskPartInst[diskPartName] { + binlogSizeMB, err := util.GetDirectorySizeMB(inst.binlogDir) + if err != nil { + return err + } + inst.rotate.binlogSizeMB = binlogSizeMB + instBinlogSizeMB[inst.Port] = binlogSizeMB + logger.Info("%d binlogDirSize:%d MB, disk %+v", inst.Port, binlogSizeMB, diskPart) + + if binlogSizeMB > maxBinlogSizeAllowedMB { + sizeToFree := binlogSizeMB - maxBinlogSizeAllowedMB + inst.rotate.sizeToFreeMB = sizeToFree + logger.Info("plan to free space: %+v", inst.rotate) + } + } + + // 根据磁盘使用率来决定删除空间 + maxDiskUsedPctAllowed := cast.ToFloat32(viper.GetFloat64("public.max_disk_used_pct") / float64(100)) + maxDiskUsedAllowedMB := cast.ToInt64(maxDiskUsedPctAllowed * float32(diskPart.TotalSizeMB)) + if diskPart.UsedPct < maxDiskUsedPctAllowed { + continue + } + diskPart.SizeToFreeMB = diskPart.UsedMB - maxDiskUsedAllowedMB + portSizeToFreeMB := util.DecideSizeToRemove(instBinlogSizeMB, diskPart.SizeToFreeMB) + logger.Info("plan to free space MB: %+v", portSizeToFreeMB) + for _, inst := range diskPartInst[diskPartName] { + if portSizeToFreeMB[inst.Port] > inst.rotate.sizeToFreeMB { + inst.rotate.sizeToFreeMB = portSizeToFreeMB[inst.Port] + logger.Info("plan to free space fixed: %+v", inst.rotate) + } + } + } + return nil +} + +// setupCloseHandler 尽可能保证正常关闭 db +func setupCloseHandler() { + c := make(chan os.Signal, 2) + signal.Notify(c, os.Interrupt, syscall.SIGTERM) + go func() { + <-c + fmt.Println("\r- Ctrl+C pressed in Terminal") + models.DB.Conn.Close() + os.Exit(0) + }() +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate.go new file mode 100644 index 0000000000..43d7e149c6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate.go @@ -0,0 +1,366 @@ +// Package rotate TODO +package rotate + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup" + binlog_parser "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/binlog-parser" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/log" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/pkg/errors" +) + +// BinlogRotateConfig TODO +type BinlogRotateConfig struct { +} + +// BinlogRotate TODO +type BinlogRotate struct { + backupClient backup.BackupClient + binlogDir string + binlogInst models.BinlogFileModel + sizeToFreeMB int64 // MB + binlogSizeMB int64 // MB + purgeInterval time.Duration + rotateInterval time.Duration + maxKeepDuration time.Duration +} + +// String 用于打印 +func (r *BinlogRotate) String() string { + return fmt.Sprintf( + "{binlogDir:%s, sizeToFreeMB:%dMB, binlogSizeMB:%dMB, purgeInterval:%s, rotateInterval:%s maxKeepDuration:%s}", + r.binlogDir, + r.sizeToFreeMB, + r.binlogSizeMB, + r.purgeInterval.String(), + r.rotateInterval.String(), + r.maxKeepDuration.String(), + ) +} + +// BinlogReport TODO +type BinlogReport struct { + BKBizID int `json:"bk_biz_id"` + ClusterId int `json:"cluster_id"` + ClusterDomain string `json:"cluster_domain"` + Host string `json:"host"` + Port int `json:"port"` + FileName string `json:"file_name"` + FileSize int64 `json:"file_size"` + FileMtime string `json:"file_mtime"` + MD5 string `json:"md5"` + StartTime string `json:"start_time"` + StopTime string `json:"stop_time"` + SourceDir string `json:"source_dir"` + FileTag string `json:"file_tag"` + + TaskId string `json:"task_id"` +} + +// BinlogBackupStatus TODO +type BinlogBackupStatus struct { + Status string `json:"status"` + Desc string `json:"desc"` + ExpireTime string `json:"expire_time"` + SubmitTime string `json:"submit_time"` + CompleteTime string `json:"complete_time"` +} + +// BinlogStatusReport TODO +type BinlogStatusReport struct { + BinlogReport + BinlogBackupStatus +} + +const ( + // FileSubmitted 备份提交成功 + FileSubmitted = "submitted" + // FileWaiting 等待备份系统调度上传 + FileWaiting = "waiting" + // FileUploading 上传中、拉取中 + FileUploading = "uploading" + // FileUploaded 备份上传成功 + FileUploaded = "uploaded" + // FileFailed 备份失败 + FileFailed = "fail" + // FileCancel 取消上传、取消检查该文件状态 + FileCancel = "cancel" + // KeepPolicyMost 尽可能多的保留binlog + KeepPolicyMost = "most" + // KeepPolicyLeast 尽可能少的保留binlog + KeepPolicyLeast = "least" + // PolicyLeastMaxSize keep_policy=least 尽可能少的保留 binlog 时,使用一个特殊常量代表需要删除的binlog大小 + PolicyLeastMaxSize int64 = 99999999 +) + +// FlushLogs TODO +func (i *ServerObj) FlushLogs() error { + var err error + _, binlogFilesObj, err := i.getBinlogFilesLocal() // todo 精简参数,是否需要改成 SHOW BINARY LOGS? + if err != nil { + return err + } + i.binlogFiles = binlogFilesObj + _ = i.RemoveMaxKeepDuration() // ignore error + + // 最后一个文件是当前正在写入的,获取倒数第二个文件的结束时间,在 5m 内,说明 mysqld 自己已经做了切换 + if len(binlogFilesObj) >= 1 { + fileName := filepath.Join(i.binlogDir, binlogFilesObj[len(binlogFilesObj)-1].Filename) + bp, _ := binlog_parser.NewBinlogParse("", 0) + events, err := bp.GetTime(fileName, true, false) // 只获取start_time + if err != nil { + logger.Warn(err.Error()) + _ = i.flushLogs() + } else { + lastRotateTime, _ := time.ParseInLocation(cst.DBTimeLayout, events[0].EventTime, time.Local) + lastRotateSince := time.Now().Sub(lastRotateTime).Seconds() - i.rotate.rotateInterval.Seconds() + if lastRotateSince > -5 { + // 留 5s 的误差。比如rotateInterval=300s, 那么实际等到 295s 也可以进行rotate,不然等到下一轮还需要 300s + _ = i.flushLogs() + } + } + } else { + _ = i.flushLogs() + } + + var lastFileBefore *models.BinlogFileModel // 之前登记处理过的最后一个文件 + if lastFileBefore, err = i.rotate.binlogInst.QueryLastFileReport(models.DB.Conn); err != nil { + return err + } + logger.Info("last binlog file processed: %s", lastFileBefore) + return i.RegisterBinlog(lastFileBefore.Filename) +} + +func (i *ServerObj) flushLogs() error { + // >= 5.5.0: flush binary logs + // < 5.5.0: flush logs + // //ti := TimeInterval{TaskName: "flush_binary_logs", Tag: cast.ToString(i.Port)} + // //if ti.IntervalOut(DB.Conn, i.rotate.rotateInterval) { + logger.Info("flush binary logs for %d", i.Port) + if _, err := i.dbWorker.ExecWithTimeout(5*time.Second, "FLUSH BINARY LOGS"); err != nil { + return errors.Wrap(err, "flush logs") + } else { + // if err = ti.Update(DB.Conn); err != nil { + // logger.Error(err.Error()) + // } + } + // } + return nil +} + +// RemoveMaxKeepDuration 超过最大保留时间的 binlogFiles 直接删除 +// 同时也会删除 sqlite 里面的元数据 +func (i *ServerObj) RemoveMaxKeepDuration() error { + if i.rotate.maxKeepDuration == 0 { + return nil + } + nowTime := time.Now() + fileTimeExpire := nowTime.Add(-1 * i.rotate.maxKeepDuration).Format(cst.DBTimeLayout) + + num := len(i.binlogFiles) + var binlogFilesNew []*BinlogFile + var binlogFilesDel []*BinlogFile + for j, f := range i.binlogFiles { + if f.Mtime < fileTimeExpire { + binlogFilesDel = append(binlogFilesDel, f) + logger.Info("%s [%s]has exceed max_keep_duration=%s", f.Filename, f.Mtime, i.rotate.maxKeepDuration) + if num-j-cst.ReserveMinBinlogNum < 0 { + binlogFilesNew = append(binlogFilesNew, f) + // logger.Info("RemoveMaxKeepDuration keep ReserveMinBinlogNum=%d", ReserveMinBinlogNum) + continue + } + if err := os.Remove(filepath.Join(i.binlogDir, f.Filename)); err != nil { + logger.Error(err.Error()) + } + } else { + binlogFilesNew = append(binlogFilesNew, f) + } + } + if len(binlogFilesDel) > 0 { + if _, err := i.rotate.binlogInst.DeleteExpired(models.DB.Conn, fileTimeExpire); err != nil { + logger.Error("delete expired file from sqlite: %s", fileTimeExpire) + } + } + + i.binlogFiles = binlogFilesNew + return nil +} + +// RegisterBinlog 将新产生的 binlog 记录存入 本地 sqlite db +// lastFileBefore 是上一次处理的最后一个文件 +// 实例最后一个 binlog 正在使用,不登记 +func (i *ServerObj) RegisterBinlog(lastFileBefore string) error { + fLen := len(i.binlogFiles) + var filesModel []*models.BinlogFileModel + for j, fileObj := range i.binlogFiles { + if fileObj.Filename <= lastFileBefore || j == fLen-1 { // 忽略最后一个binlog + continue + } + backupStatus := models.IBStatusNew + backupStatusInfo := "" + bp, _ := binlog_parser.NewBinlogParse("", 0) + fileName := filepath.Join(i.binlogDir, fileObj.Filename) + events, err := bp.GetTime(fileName, true, true) + if err != nil { + logger.Warn(err.Error()) + backupStatus = models.IBStatusClientFail + backupStatusInfo = err.Error() + } + if i.Tags.DBRole == models.RoleSlave { // slave 无需备份 binlog + backupStatus = models.FileStatusNoNeedUpload + } + startTime := events[0].EventTime + stopTime := events[1].EventTime + ff := &models.BinlogFileModel{ + BkBizId: i.Tags.BkBizId, + ClusterId: i.Tags.ClusterId, + ClusterDomain: i.Tags.ClusterDomain, + DBRole: i.Tags.DBRole, + Host: i.Host, + Port: i.Port, + Filename: fileObj.Filename, + Filesize: fileObj.Size, + FileMtime: fileObj.Mtime, + BackupStatus: backupStatus, + BackupStatusInfo: backupStatusInfo, + StartTime: startTime, + StopTime: stopTime, + } + filesModel = append(filesModel, ff) + } + if err := i.rotate.binlogInst.BatchSave(filesModel, models.DB.Conn); err != nil { + return err + } else { + logger.Info("binlog files to process: %+v", filesModel) + } + return nil +} + +// Backup binlog 提交到备份系统 +// 下一轮运行时判断上一次以及之前的提交任务状态 +func (r *BinlogRotate) Backup() error { + if r.backupClient == nil { + logger.Warn("no backup_client found. ignoring backup") + return nil + } + files, err := r.binlogInst.QueryUnfinished(models.DB.Conn) + if err != nil { + return errors.Wrap(err, "query unfinished") + } + logger.Info("%d binlog files unfinished: %d", r.binlogInst.Port, len(files)) + for _, f := range files { + // 需要上传的,提交上传任务 + if f.BackupStatus == models.IBStatusNew || f.BackupStatus == models.IBStatusClientFail { + filename := filepath.Join(r.binlogDir, f.Filename) + // 需要修改 binlog 的权限 + if err := os.Chmod(filename, 0655); err != nil { + return errors.Wrap(err, "chmod 655") + } + if f.StartTime == "" || f.StopTime == "" { + bp, _ := binlog_parser.NewBinlogParse("", 0) + events, err := bp.GetTime(filename, true, true) + if err != nil { + logger.Warn(err.Error()) + // f.BackupStatus = FileStatusAbnormal + } else { + f.StartTime = events[0].EventTime + f.StopTime = events[0].EventTime + } + } + if taskid, err := r.backupClient.Upload(filename); err != nil { + logger.Error("fail to upload file %s. err: %v", filename, err.Error()) + f.BackupStatus = models.IBStatusClientFail + f.BackupStatusInfo = err.Error() + } else { + f.BackupTaskid = taskid + f.BackupStatus = models.IBStatusWaiting + } + } else { // 等待上传的,查询上传结果 + if f.BackupTaskid == "" { + logger.Error("binlog backup_taskid should not empty %s", f.Filename) + f.BackupStatus = models.IBStatusFail + } else { + taskStatus, err := r.backupClient.Query(f.BackupTaskid) + if err != nil { + return err + } + if taskStatus == f.BackupStatus { // 上传状态没有进展 + continue + } else { + f.BackupStatus = taskStatus + if taskStatus == models.IBStatusSuccess { + log.Reporter().Result.Println(f) + } + log.Reporter().Status.Println(f) + } + } + } + if err = f.Update(models.DB.Conn); err != nil { + return err + } + } + return nil +} + +// Remove 删除本地 binlog +// 将本地 done,success 的超过阈值的 binlog 文件删除,更新 binlog 列表状态 +// 超过 max_keep_days 的强制删除,单位 bytes +// sizeBytesToFree=999999999 代表尽可能删除 +func (r *BinlogRotate) Remove(sizeBytesToFree int64) error { + if sizeBytesToFree == 0 { + logger.Info("no need to free %d binlog size", r.binlogInst.Port) + return nil + } + binlogFiles, err := r.binlogInst.QuerySuccess(models.DB.Conn) + if err != nil { + return err + } + // sort.Slice(binlogFiles, func(i, j int) bool { return binlogFiles[i].Filename < binlogFiles[j].Filename }) // 升序 + var sizeDeleted int64 + var fileDeleted int + stopFile := "" + num := len(binlogFiles) + for i, f := range binlogFiles { + if num-i-cst.ReserveMinBinlogNum < 0 { + logger.Info("rotate binlog keep ReserveMinBinlogNum=%d", cst.ReserveMinBinlogNum) + break + } + fileFullPath := filepath.Join(r.binlogDir, f.Filename) + logger.Info("remove file: %s", fileFullPath) + if err = os.Remove(fileFullPath); err != nil { + logger.Error(err.Error()) + // return err + } + if !cmutil.FileExists(fileFullPath) { + f.BackupStatus = models.FileStatusRemoved + if err = f.Update(models.DB.Conn); err != nil { + logger.Error(err.Error()) + // return err + } + sizeDeleted += f.Filesize + fileDeleted += 1 + stopFile = f.Filename + if sizeDeleted >= sizeBytesToFree { + break + } + } + } + if sizeDeleted < sizeBytesToFree && sizeBytesToFree != PolicyLeastMaxSize*1024*1024 { + logger.Warn( + "disk space freed does not satisfy needed after delete all allowed binlog files. "+ + "sizeDeleted:%d sizeBytesToFree:%d", + sizeDeleted, sizeBytesToFree, + ) + // todo 可能需要开始删除 备份未完成的 binlog + } + logger.Info("sizeBytesDeleted:%d, fileDeleted:%d. binlog lastDeleted: %s", sizeDeleted, fileDeleted, stopFile) + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate_binlog.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate_binlog.go new file mode 100644 index 0000000000..ef1d808a11 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/rotate/rotate_binlog.go @@ -0,0 +1,210 @@ +package rotate + +import ( + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/common/go-pubpkg/timeutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/backup" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/models" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strings" + "time" + + "github.com/pkg/errors" + "github.com/spf13/cast" +) + +// ServerObj rotate_binlog.yaml servers 配置 +type ServerObj struct { + Host string `json:"host" mapstructure:"host" validate:"required"` // 当前实例的主机地址 + Port int `json:"port" mapstructure:"port" validate:"required"` // 当前实例的端口 + Username string `json:"username,omitempty" mapstructure:"username"` // 连接当前实例的User + Password string `json:"password,omitempty" mapstructure:"password"` // 连接当前实例的User Pwd + Socket string `json:"socket,omitempty" mapstructure:"socket"` // 连接socket + Tags InstanceMeta `json:"tags" mapstructure:"tags" validate:"required"` + + dbWorker *native.DbWorker + binlogDir string + // 已按文件名升序排序 + binlogFiles []*BinlogFile + backupClient backup.BackupClient + instance *native.InsObject + rotate *BinlogRotate +} + +// InstanceMeta servers.tags +type InstanceMeta struct { + BkBizId int `json:"bk_biz_id" mapstructure:"bk_biz_id"` + ClusterDomain string `json:"cluster_domain" mapstructure:"cluster_domain"` + ClusterId int `json:"cluster_id" mapstructure:"cluster_id"` + DBRole string `json:"db_role" mapstructure:"db_role" validate:"required" enums:"master,slave"` +} + +// String 用于打印 +func (i *ServerObj) String() string { + return fmt.Sprintf( + "{Host:%s, Port:%d, Username:%s Tags.ClusterDomain:%s Tags.DBRole:%s}", + i.Host, i.Port, i.Username, i.Tags.ClusterDomain, i.Tags.DBRole, + ) +} + +// Rotate 实例 rotate 主逻辑 +// 如果返回有错误,该实例不参与后续binlog处理。不返回错误 nil 时,ServerObj.rotate 对象有效 +func (i *ServerObj) Rotate() (err error) { + maxKeepDuration := timeutil.ViperGetDuration("public.max_keep_duration") + if maxKeepDuration < cst.MaxKeepDurationMin { + logger.Warn("max_keep_duration=%s is too small, set to %s", maxKeepDuration, cst.MaxKeepDurationMin) + maxKeepDuration = cst.MaxKeepDurationMin + } + rotate := &BinlogRotate{ + // binlogDir: i.binlogDir, + backupClient: i.backupClient, + binlogInst: models.BinlogFileModel{ + BkBizId: i.Tags.BkBizId, + ClusterId: i.Tags.ClusterId, + Host: i.Host, + Port: i.Port, + }, + purgeInterval: timeutil.ViperGetDuration("public.purge_interval"), + rotateInterval: timeutil.ViperGetDuration("public.rotate_interval"), + maxKeepDuration: maxKeepDuration, + } + i.rotate = rotate + logger.Info("rotate obj: %+v", rotate) + if i.dbWorker, err = i.instance.Conn(); err != nil { + return err + } + if i.binlogDir, _, err = i.dbWorker.GetBinlogDir(i.Port); err != nil { + return err + } else { + i.rotate.binlogDir = i.binlogDir + } + if err := os.Chmod(i.binlogDir, 0755); err != nil { + return errors.Wrap(err, "chmod 655") + } + + if err = i.FlushLogs(); err != nil { + logger.Error(err.Error()) + logger.Error("%+v", err) + } + return nil +} + +// FreeSpace 实例 rotate 主逻辑 +// Remove, Backup, Purge +func (i *ServerObj) FreeSpace() (err error) { + sizeToFreeBytes := i.rotate.sizeToFreeMB * 1024 * 1024 // MB to bytes + logger.Info("plan to free port %d binlog bytes %d", i.Port, sizeToFreeBytes) + if err = i.rotate.Remove(sizeToFreeBytes); err != nil { + logger.Error("%+v", err) + } + if err = i.PurgeIndex(); err != nil { + logger.Error("%+v", err) + } + defer i.dbWorker.Stop() + return nil +} + +// GetEarliestAliveBinlog TODO +func (i *ServerObj) GetEarliestAliveBinlog() (string, error) { + if len(i.binlogFiles) == 0 { + return "", errors.Errorf("no binlog files found from binlog_dir=%s", i.binlogDir) + } + for _, f := range i.binlogFiles { + filePath := filepath.Join(i.binlogDir, f.Filename) + if cmutil.FileExists(filePath) { + return f.Filename, nil + } + } + return "", errors.Errorf("cannot get earliest binlog file from %s", i.binlogDir) +} + +// BinlogFile TODO +type BinlogFile struct { + Filename string + Mtime string + Size int64 +} + +// String 用于打印 +func (f *BinlogFile) String() string { + return fmt.Sprintf("{Filename:%s Mtime:%s Size:%d}", f.Filename, f.Mtime, f.Size) +} + +// getBinlogFilesLocal 获取实例的 本地 binlog 列表,会按文件名排序 +func (i *ServerObj) getBinlogFilesLocal() (string, []*BinlogFile, error) { + // 临时关闭 binlog 删除 + files, err := ioutil.ReadDir(i.binlogDir) // 已经按文件名排序 + if err != nil { + return "", nil, errors.Wrap(err, "read binlog dir") + } + reFilename := regexp.MustCompile(cst.ReBinlogFilename) + for _, fi := range files { + if !reFilename.MatchString(fi.Name()) { + if !strings.HasSuffix(fi.Name(), ".index") { + logger.Warn("illegal binlog file name %s", fi.Name()) + } + continue + } else { + i.binlogFiles = append( + i.binlogFiles, &BinlogFile{ + Filename: fi.Name(), + Mtime: fi.ModTime().Format(cst.DBTimeLayout), + Size: fi.Size(), + }, + ) + } + } + // 确认排序 + sort.Slice( + i.binlogFiles, + func(m, n int) bool { return i.binlogFiles[m].Filename < i.binlogFiles[n].Filename }, + ) // 升序 + logger.Info("getBinlogFilesLocal: %+v", i.binlogFiles) + return i.binlogDir, i.binlogFiles, nil +} + +// PurgeIndex purge binlog files that has been removed +func (i *ServerObj) PurgeIndex() error { + timeIntvl := models.TimeInterval{TaskName: "purge_index", Tag: cast.ToString(i.Port)} + if !timeIntvl.IntervalOut(models.DB.Conn, i.rotate.purgeInterval) { + return nil + } + fileName, err := i.GetEarliestAliveBinlog() + if err != nil { + return err + } + if err := i.PurgeLogs(fileName, ""); err != nil { + return err + } + if err = timeIntvl.Update(models.DB.Conn); err != nil { + logger.Error(err.Error()) + } + return nil +} + +// PurgeLogs godoc +func (i *ServerObj) PurgeLogs(toFile, beforeTime string) error { + if toFile != "" { + purgeCmd := fmt.Sprintf("PURGE BINARY LOGS TO '%s'", toFile) + logger.Info("purgeCmd: %s", purgeCmd) + if _, err := i.dbWorker.ExecWithTimeout(60*time.Second, purgeCmd); err != nil { + return err + } + } + if beforeTime != "" { + purgeCmd := fmt.Sprintf("PURGE BINARY LOGS BEFORE '%s'", beforeTime) + logger.Info("purgeCmd: %s", purgeCmd) + if _, err := i.dbWorker.ExecWithTimeout(60*time.Second, purgeCmd); err != nil { + return err + } + } + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/balance.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/balance.go new file mode 100644 index 0000000000..11caeaab16 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/balance.go @@ -0,0 +1,48 @@ +package util + +import ( + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/cst" +) + +// DecideSizeToRemove 尽量保证每个 port 的 binlog 大小接近 +// 输入单位 MB +func DecideSizeToRemove(ports map[int]int64, sizeToFree int64) map[int]int64 { + sizeDeleted := int64(0) // 实际计划删除的总单位数 + var portSizeToFree = make(map[int]int64) // 实际计划的每个port删除的 MB + if sizeToFree <= 0 { + return portSizeToFree + } + for { + port := reduceFromMax(ports, 1) + if port == 0 { + logger.Warn("没有找到完全满足删除条件的实例. portSizeToFree:%+v", portSizeToFree) + return portSizeToFree + } + sizeDeleted += 1 // 每次删除一个单位 + portSizeToFree[port] += cst.ReduceStepSizeMB * 1 + + if sizeDeleted*cst.ReduceStepSizeMB >= sizeToFree { + break + } + } + logger.Info("规划出每个实例删除binlog大小,portSizeToFree MB:%+v", portSizeToFree) + return portSizeToFree +} + +// reduceFromMax ports代表当前实例的binlog大小 +func reduceFromMax(ports map[int]int64, incr int) (port int) { + maxSize := int64(0) + var maxSizePort int = 0 + for p, s := range ports { + if s > maxSize && s >= cst.ReserveMinSizeMB+cst.ReduceStepSizeMB { + maxSize = s + maxSizePort = p + } + } + if ports[maxSizePort]-int64(incr) >= 0 { + return maxSizePort + } else { + return 0 + } +} diff --git a/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/util.go b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/util.go new file mode 100644 index 0000000000..e8566f81e8 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-rotatebinlog/pkg/util/util.go @@ -0,0 +1,149 @@ +// Package util TODO +package util + +import ( + "bytes" + "dbm-services/common/go-pubpkg/cmutil" + "dbm-services/common/go-pubpkg/logger" + "fmt" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/pkg/errors" + "github.com/spf13/cast" +) + +// DiskDfResult disk df output +type DiskDfResult struct { + Filesystem string + TotalSizeMB int64 + UsedMB int64 + AvailableMB int64 + Use string + MountedOn string + + UsedPct float32 + TotalSizeMBReal int64 + + SizeToFreeMB int64 +} + +// String 用于打印 +func (d DiskDfResult) String() string { + return fmt.Sprintf( + "{Filesystem:%s, MountedOn:%s, UsedMB:%d, TotalSizeMB:%d}", + d.Filesystem, d.MountedOn, d.UsedMB, d.TotalSizeMB, + ) +} + +// GetDiskPartitionWithDir TODO +func GetDiskPartitionWithDir(dirName string) (*DiskDfResult, error) { + /* + $ df -m /data/dbbak/data1 + Filesystem 1M-blocks Used Available Use% Mounted on + /dev/vdc 3604645 175526 3246014 6% /data1 + */ + // cmd := fmt.Sprintf("-k %s", dirName) // df -k /xx/ + if dirName == "" { + return nil, errors.New("df -m dirName should not be empty") + } + cmdArgs := []string{"-m", dirName} + stdout, stderr, err := ExecCommand(false, "df", cmdArgs...) + if err != nil { + return nil, errors.Wrapf(err, "dir:%s, err:%+v", dirName, stderr) + } + lines := cmutil.SplitAnyRuneTrim(stdout, "\n") + if len(lines) != 2 { + return nil, errors.Errorf("df result expect lines 2, got: %v", lines) + } + dfLine := squashSpace(lines[1]) + if dfLineVals := cmutil.SplitAnyRuneTrim(dfLine, " "); len(dfLineVals) != 6 { + return nil, errors.Errorf("df result expect line2 has 6 columns, got: %v", dfLineVals) + } else { + res := &DiskDfResult{ + Filesystem: dfLineVals[0], + TotalSizeMB: cast.ToInt64(dfLineVals[1]), + UsedMB: cast.ToInt64(dfLineVals[2]), + AvailableMB: cast.ToInt64(dfLineVals[3]), + MountedOn: dfLineVals[5], + } + res.TotalSizeMBReal = res.UsedMB + res.AvailableMB + res.UsedPct = float32(res.UsedMB) / float32(res.TotalSizeMBReal) + return res, nil + } +} + +// GetDirectorySizeMB du 获取 binlog 目录大小 +// 如果 binlog 目录有其它文件,会一起计算 +func GetDirectorySizeMB(binlogDir string) (int64, error) { + /* + du -sm /data/ + du: cannot read directory `/data/lost+found': Permission denied + 27435 /data/ + */ + // cmdArgs := fmt.Sprintf("-sm %s", binlogDir) // du -sh /xx + cmdArgs := []string{"-sm", binlogDir} + stdout, stderr, err := ExecCommand(false, "du", cmdArgs...) + errStr := strings.SplitN(stderr, "\n", 1)[0] + if err != nil { + if strings.Contains(stdout, binlogDir) && strings.Contains(stderr, "lost+found") { + // 忽略该错误 + } else { + // 错误信息只返回第一行 + return 0, errors.Wrap(err, errStr) + } + } + if strings.TrimSpace(stderr) != "" { + return 0, errors.New(errStr) + } + reSize := regexp.MustCompile(`(\d+)\s+`) + if matches := reSize.FindStringSubmatch(stdout); len(matches) != 2 { + return 0, errors.Errorf("fail to parse binlogDir du size: %s", stdout) + } else { + totalSizeMB, _ := strconv.ParseInt(matches[1], 10, 64) + return totalSizeMB, nil + } +} + +func squashSpace(ss string) string { + reSpaces := regexp.MustCompile(`\s+`) + return reSpaces.ReplaceAllString(ss, " ") +} + +// ExecCommand bash=true: bash -c 'cmdName args', bash=false: ./cmdName args list +// ExecCommand(false, "df", "-k /data") will get `df '-k /data'` error command. you need change it to (false, "df", "-k", "/data") or (true, "df -k /data") +// bash=false need PATH +func ExecCommand(bash bool, cmdName string, args ...string) (string, string, error) { + var cmd *exec.Cmd + if bash { + if cmdName != "" { + cmdName += " " + } + cmdStr := fmt.Sprintf(`%s%s`, cmdName, strings.Join(args, " ")) + cmd = exec.Command("bash", "-c", cmdStr) + } else { + if cmdName == "" { + return "", "", errors.Errorf("command name should not be empty:%v", args) + } + // args should be list + cmd = exec.Command(cmdName, args...) + } + cmd.Env = []string{ + fmt.Sprintf( + "PATH=%s:/bin:/usr/bin:/usr/local/bin:/sbin:/usr/sbin:/usr/local/sbin", + os.Getenv("PATH"), + ), + } + //logger.Info("PATH:%s cmd.Env:%v", os.Getenv("PATH"), cmd.Env) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + logger.Error("stdout:%s, stderr:%s, cmd:%s", stdout.String(), stderr.String(), cmd.String()) + return stdout.String(), stdout.String(), errors.Wrap(err, cmd.String()) + } + return stdout.String(), stderr.String(), nil +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/.gitignore b/dbm-services/mysql/db-tools/mysql-table-checksum/.gitignore new file mode 100644 index 0000000000..a53dda87d5 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/.gitignore @@ -0,0 +1,4 @@ +checksum.sqlite +.idea +*.log +build \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/.golangci.yml b/dbm-services/mysql/db-tools/mysql-table-checksum/.golangci.yml new file mode 100644 index 0000000000..6651690902 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/.golangci.yml @@ -0,0 +1,57 @@ +linters-settings: + lll: + line-length: 120 + funlen: + lines: 80 + statements: 80 + gocritic: + enabled-checks: + - nestingReduce + - commentFormatting + +run: + # default concurrency is a available CPU number + concurrency: 4 + # timeout for analysis, e.g. 30s, 5m, default is 1m + timeout: 2m + # exit code when at least one issue was found, default is 1 + issues-exit-code: 1 + # include test files or not, default is true + tests: false + # default is true. Enables skipping of directories: + # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$ + skip-dirs-use-default: true + + skip-files: + - ".*/mock/.*.go" + - ".*testing.go" + +linters: + # enable-all: true + # disable-all: true + disable: + - errcheck + enable: + - nilerr + - nakedret + - lll + - gofmt + - gocritic + - gocyclo + - whitespace + - sqlclosecheck + - deadcode + - govet + - bodyclose + - staticcheck + # - errorlint + # - varcheck + # - typecheck + # - nestif + # - gofumpt + # - godox + # - wsl + # - funlen + # - golint + # - cyclop + fast: false \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/LICENSE b/dbm-services/mysql/db-tools/mysql-table-checksum/LICENSE new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/Makefile b/dbm-services/mysql/db-tools/mysql-table-checksum/Makefile new file mode 100644 index 0000000000..f918e27db7 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/Makefile @@ -0,0 +1,25 @@ +PROJ="mysql-table-checksum" +MODULE="dbm-services/mysql/db-tools/mysql-table-checksum" +VERSION = $(error please set VERSION flag) +PKG="mysql-checksum.tar.gz" # 这是个不太好改的错误了 +OUTPUT_DIR=build +RELEASE_BUILD_FLAG = "-X ${MODULE}/cmd.version=${VERSION} -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash=`git rev-parse HEAD` " +DEV_BUILD_FLAG = "-X ${MODULE}/cmd.version="develop" -X ${MODULE}/cmd.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X ${MODULE}/cmd.gitHash="" " + +.PHONY: release-bin +release-bin: + @CGO_ENABLE=0 GOARCH=amd64 GOOS=linux go build -ldflags ${RELEASE_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ} + @cp pt-table-checksum ${OUTPUT_DIR}/ + @cp pt-table-sync ${OUTPUT_DIR}/ + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} ${PROJ} pt-table-checksum pt-table-sync + +.PHONY: dev-bin +dev-bin: + @go build -ldflags ${DEV_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ} + @cp pt-table-checksum ${OUTPUT_DIR}/ + @cp pt-table-sync ${OUTPUT_DIR}/ + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PKG} ${PROJ} pt-table-checksum pt-table-sync + +.PHONY: clean +clean: + @rm -rf ${OUTPUT_DIR} \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/README.md b/dbm-services/mysql/db-tools/mysql-table-checksum/README.md new file mode 100644 index 0000000000..eec1a8123b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/README.md @@ -0,0 +1,2 @@ +mysql 例行数据校验程序 +以 crontab 形式部署在 db 机器上 \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/init.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/init.go new file mode 100644 index 0000000000..3ae5f83d55 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/init.go @@ -0,0 +1,101 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "fmt" + "io" + "os" + "path" + "path/filepath" + + "github.com/spf13/viper" + "golang.org/x/exp/slog" + "gopkg.in/natefinch/lumberjack.v2" +) + +var executable string +var executableName string +var executableDir string + +func init() { + // Here you will define your flags and configuration settings. + // Cobra supports persistent flags, which, if defined here, + // will be global for your application. + + // Cobra also supports local flags, which will only run + // when this action is called directly. + + executable, _ = os.Executable() + executableName = filepath.Base(executable) + executableDir = filepath.Dir(executable) +} + +func initLogger(cfg *config.LogConfig, mode config.CheckMode) { + var ioWriters []io.Writer + + if cfg.Console { + ioWriters = append(ioWriters, os.Stdout) + } + + if cfg.LogFileDir != nil { + if !path.IsAbs(*cfg.LogFileDir) { + err := fmt.Errorf("log_file_dir need absolute dir") + panic(err) + } + + err := os.MkdirAll(*cfg.LogFileDir, 0755) + if err != nil { + panic(err) + } + + // ToDo 修改目录宿主 + var logFile string + if mode == config.GeneralMode { + logFile = path.Join( + *cfg.LogFileDir, + fmt.Sprintf("%s_%d.log", executableName, config.ChecksumConfig.Port), + ) + } else { + logFile = path.Join( + *cfg.LogFileDir, + fmt.Sprintf( + "%s_%d_%s.log", + executableName, + config.ChecksumConfig.Port, + viper.GetString("uuid"), + ), + ) + } + + _, err = os.Stat(logFile) + if err != nil { + if os.IsNotExist(err) { + _, err := os.Create(logFile) + if err != nil { + panic(err) + } + // ToDo 修改日志文件宿主 + } else { + panic(err) + } + } + + ioWriters = append(ioWriters, &lumberjack.Logger{Filename: logFile}) + } + + handleOpt := slog.HandlerOptions{AddSource: cfg.Source} + if cfg.Debug { + handleOpt.Level = slog.LevelDebug + } else { + handleOpt.Level = slog.LevelInfo + } + + var logger *slog.Logger + if cfg.Json { + logger = slog.New(handleOpt.NewJSONHandler(io.MultiWriter(ioWriters...))) + } else { + logger = slog.New(handleOpt.NewTextHandler(io.MultiWriter(ioWriters...))) + } + + slog.SetDefault(logger) +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/root.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/root.go new file mode 100644 index 0000000000..fe446e93f6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/root.go @@ -0,0 +1,26 @@ +/* +Package cmd +Copyright © 2022 NAME HERE +*/ +package cmd + +import ( + "os" + + "github.com/spf13/cobra" +) + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "mysql-table-checksum", + Short: "mysql-table-checksum", +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + err := rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/run_checksum.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/run_checksum.go new file mode 100644 index 0000000000..929502a54c --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/run_checksum.go @@ -0,0 +1,92 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "fmt" + "os" + + "github.com/juju/fslock" + "golang.org/x/exp/slog" +) + +func generateRun(mode config.CheckMode, configPath string) /*func(cmd *cobra.Command, args []string)*/ error { + // return func(cmd *cobra.Command, args []string) error { + err := config.InitConfig(configPath) + if err != nil { + return err + } + + initLogger(config.ChecksumConfig.Log, mode) + + ck, err := checker.NewChecker(mode) + if err != nil { + return err + } + + lockFilePath := fmt.Sprintf(".%s_%d_%s.lock", ck.Config.Ip, ck.Config.Port, ck.Mode) + lock := fslock.New(lockFilePath) + defer func() { + _ = os.Remove(lockFilePath) + }() + + switch ck.Config.InnerRole { + case config.RoleMaster: + err = lock.TryLock() + if err != nil { + slog.Error("another checksum already running", err) + return err + } + slog.Info("run checksum on master start") + err = ck.Run() + if err != nil { + slog.Error("run checksum on master", err) + return err + } + slog.Info("run checksum on master finish") + return nil + case config.RoleRepeater: + err = lock.TryLock() + if err != nil { + slog.Error("another checksum already running", err) + return err + } + + slog.Info("run checksum on repeater start") + err = ck.Run() + if err != nil { + slog.Error("run checksum on repeater", err) + return err + } + if ck.Mode == config.GeneralMode { + slog.Info("run checksum on repeater to report start") + err = ck.Report() + if err != nil { + slog.Error("run report on repeater", err) + return err + } + slog.Info("run checksum on repeater to report finish") + } + slog.Info("run checksum on repeater finish") + return nil + case config.RoleSlave: + slog.Info("run checksum on slave") + if ck.Mode == config.DemandMode { + err = fmt.Errorf("checksum bill should not run on slave") + slog.Error("role is slave", err) + return err + } + slog.Info("run checksum on slave to report start") + err = ck.Report() + if err != nil { + slog.Error("run report on slave", err) + return err + } + slog.Info("run checksum on slave to report finish") + return nil + default: + err := fmt.Errorf("unknown instance inner role: %s", ck.Config.InnerRole) + slog.Error("general run", err) + return err + } +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_clean.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_clean.go new file mode 100644 index 0000000000..cb6a2946da --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_clean.go @@ -0,0 +1,61 @@ +package cmd + +import ( + ma "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "fmt" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slog" +) + +var subCmdClean = &cobra.Command{ + Use: "clean", + Short: "clean checksum entry", + Long: "clean checksum entry", + RunE: func(cmd *cobra.Command, args []string) error { + err := config.InitConfig(viper.GetString("clean-config")) + if err != nil { + return err + } + + manager := ma.NewManager(config.ChecksumConfig.ApiUrl) + entries, err := manager.Entries() + if err != nil { + slog.Error("clean list entries", err) + return err + } + + for _, entry := range entries { + if strings.HasPrefix( + entry.Job.Name, + fmt.Sprintf("mysql-checksum-%d", config.ChecksumConfig.Port), + ) { + eid, err := manager.Delete(entry.Job.Name, true) + if err != nil { + slog.Error( + "reschedule delete entry", err, + slog.String("name", entry.Job.Name), + ) + return err + } + slog.Info( + "reschedule delete entry", + slog.String("name", entry.Job.Name), + slog.Int("ID", eid), + ) + } + } + return nil + }, +} + +func init() { + subCmdClean.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdClean.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("clean-config", subCmdClean.PersistentFlags().Lookup("config")) + + rootCmd.AddCommand(subCmdClean) +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_demand.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_demand.go new file mode 100644 index 0000000000..7ae92d1209 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_demand.go @@ -0,0 +1,29 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var subCmdDemand = &cobra.Command{ + Use: "demand", + Short: "demand checksum", + Long: "demand checksum", + RunE: func(cmd *cobra.Command, args []string) error { + return generateRun(config.DemandMode, viper.GetString("demand-config")) + }, +} + +func init() { + subCmdDemand.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdDemand.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("demand-config", subCmdDemand.PersistentFlags().Lookup("config")) + + subCmdDemand.PersistentFlags().StringP("uuid", "", "", "unique id for each demand") + _ = subCmdDemand.MarkPersistentFlagRequired("uuid") + _ = viper.BindPFlag("uuid", subCmdDemand.PersistentFlags().Lookup("uuid")) + + rootCmd.AddCommand(subCmdDemand) +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_general.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_general.go new file mode 100644 index 0000000000..dfd53b9950 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_general.go @@ -0,0 +1,25 @@ +package cmd + +import ( + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var subCmdGeneral = &cobra.Command{ + Use: "general", + Short: "general checksum", + Long: "general checksum", + RunE: func(cmd *cobra.Command, args []string) error { + return generateRun(config.GeneralMode, viper.GetString("general-config")) + }, +} + +func init() { + subCmdGeneral.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdGeneral.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("general-config", subCmdGeneral.PersistentFlags().Lookup("config")) + + rootCmd.AddCommand(subCmdGeneral) +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_reschedule.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_reschedule.go new file mode 100644 index 0000000000..f8915ab39d --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_reschedule.go @@ -0,0 +1,101 @@ +package cmd + +import ( + ma "dbm-services/mysql/db-tools/mysql-crond/api" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + "golang.org/x/exp/slog" +) + +var subCmdReschedule = &cobra.Command{ + Use: "reschedule", + Short: "reschedule mysql-crond entry", + Long: "reschedule mysql-crond entry", + RunE: func(cmd *cobra.Command, args []string) error { + /* + 就只有这个子命令需要这样把配置转换成绝对路径 + 因为注册到crond后cwd是其他目录了 + */ + configPath := viper.GetString("reschedule-config") + if !filepath.IsAbs(configPath) { + cwd, err := os.Getwd() + if err != nil { + slog.Error("reschedule get config abs path", err) + return err + } + configPath = filepath.Join(cwd, configPath) + } + + err := config.InitConfig(configPath) + if err != nil { + return err + } + + manager := ma.NewManager(config.ChecksumConfig.ApiUrl) + entries, err := manager.Entries() + if err != nil { + slog.Error("reschedule list entries", err) + return err + } + + for _, entry := range entries { + if strings.HasPrefix( + entry.Job.Name, + fmt.Sprintf("mysql-checksum-%d", config.ChecksumConfig.Port), + ) { + eid, err := manager.Delete(entry.Job.Name, true) + if err != nil { + slog.Error( + "reschedule delete entry", err, + slog.String("name", entry.Job.Name), + ) + return err + } + slog.Info( + "reschedule delete entry", + slog.String("name", entry.Job.Name), + slog.Int("ID", eid), + ) + } + } + + eid, err := manager.CreateOrReplace( + ma.JobDefine{ + Name: fmt.Sprintf("mysql-checksum-%d", config.ChecksumConfig.Port), + Command: executable, + Args: []string{ + "general", + "--config", configPath, + }, + Schedule: config.ChecksumConfig.Schedule, + Creator: viper.GetString("staff"), + Enable: true, + }, true, + ) + if err != nil { + slog.Error("reschedule add entry", err) + return err + } + slog.Info("reschedule add entry", slog.Int("entry id", eid)) + + return nil + }, +} + +func init() { + subCmdReschedule.PersistentFlags().StringP("config", "c", "", "config file") + _ = subCmdReschedule.MarkPersistentFlagRequired("config") + _ = viper.BindPFlag("reschedule-config", subCmdReschedule.PersistentFlags().Lookup("config")) + + subCmdReschedule.PersistentFlags().StringP("staff", "", "", "staff name") + _ = subCmdReschedule.MarkPersistentFlagRequired("staff") + _ = viper.BindPFlag("staff", subCmdReschedule.PersistentFlags().Lookup("staff")) + + rootCmd.AddCommand(subCmdReschedule) +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_version.go b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_version.go new file mode 100644 index 0000000000..a48f577194 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/cmd/subcmd_version.go @@ -0,0 +1,42 @@ +package cmd + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +// versionCmd represents the version command +var versionCmd = &cobra.Command{ + Use: "version", + Short: "A brief description of your command", + Long: `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + Run: func(cmd *cobra.Command, args []string) { + printVersion() + }, +} +var version = "" +var buildStamp = "" +var gitHash = "" + +func init() { + rootCmd.AddCommand(versionCmd) + + // Here you will define your flags and configuration settings. + + // Cobra supports Persistent Flags which will work for this command + // and all subcommands, e.g.: + // versionCmd.PersistentFlags().String("foo", "", "A help for foo") + + // Cobra supports local flags which will only run when this command + // is called directly, e.g.: + // versionCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") +} +func printVersion() { + fmt.Printf("Version: %s, GitHash: %s, BuildAt: %s\n", version, gitHash, buildStamp) +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/go.mod b/dbm-services/mysql/db-tools/mysql-table-checksum/go.mod new file mode 100644 index 0000000000..75576d7c92 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/go.mod @@ -0,0 +1,38 @@ +module dbm-services/mysql/db-tools/mysql-table-checksum + +go 1.19 + +require ( + github.com/go-sql-driver/mysql v1.7.1 + github.com/jmoiron/sqlx v1.3.5 + github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b + github.com/pkg/errors v0.9.1 + github.com/spf13/cobra v1.7.0 + github.com/spf13/viper v1.15.0 + golang.org/x/exp v0.0.0-20230418202329-0354be287a23 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/lib/pq v1.10.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-sqlite3 v1.14.16 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.8.2 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/go.sum b/dbm-services/mysql/db-tools/mysql-table-checksum/go.sum new file mode 100644 index 0000000000..6321230ad5 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/go.sum @@ -0,0 +1,512 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= +github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b h1:FQ7+9fxhyp82ks9vAuyPzG0/vVbWwMwLJ+P6yJI5FN8= +github.com/juju/fslock v0.0.0-20160525022230-4d5c94c67b4b/go.mod h1:HMcgvsgd0Fjj4XXDkbjdmlbI505rUPBs6WBMYg2pXks= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/main.go b/dbm-services/mysql/db-tools/mysql-table-checksum/main.go new file mode 100644 index 0000000000..6cca2f477e --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/main.go @@ -0,0 +1,11 @@ +// Package main main +/* +Copyright © 2022 NAME HERE +*/ +package main + +import "dbm-services/mysql/db-tools/mysql-table-checksum/cmd" + +func main() { + cmd.Execute() +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/mysql-table-checksum.sh.tpl b/dbm-services/mysql/db-tools/mysql-table-checksum/mysql-table-checksum.sh.tpl new file mode 100644 index 0000000000..0911db2e78 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/mysql-table-checksum.sh.tpl @@ -0,0 +1,25 @@ +#!/usr/bin/env sh +wrapper_log={{.ChecksumInstallPath}}/checksum.log + +if [ -e $wrapper_log ] +then + chown mysql $wrapper_log + SIZE=$(stat $wrapper_log -c %s) + + if [ $SIZE -gt 100000000 ] + then + mv $wrapper_log $wrapper_log.old + chown mysql $wrapper_log.old + fi +fi + +echo $(date) "begin schedule checksum">>$wrapper_log +chown mysql $wrapper_log + +for PORT in "${@:1}" +do + echo $(date) "schedule port=$PORT">>$wrapper_log + {{.ChecksumPath}} --config {{.ChecksumInstallPath}}/checksum_$PORT.yaml --mode general --log-file-json --log-file-path {{.ChecksumInstallPath}}/checksum_$PORT.log 1>>$wrapper_log 2>&1 & +done + +echo $(date) "all checksum scheduled">>$wrapper_log \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/checker.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/checker.go new file mode 100644 index 0000000000..2443191a3b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/checker.go @@ -0,0 +1,295 @@ +// Package checker 检查库 +package checker + +import ( + "database/sql" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter" + "fmt" + "os" + "strings" + "time" + + _ "github.com/go-sql-driver/mysql" // mysql + "github.com/jmoiron/sqlx" + "golang.org/x/exp/slices" + "golang.org/x/exp/slog" +) + +// NewChecker 新建检查器 +func NewChecker(mode config.CheckMode) (*Checker, error) { + if mode == config.GeneralMode { + err := os.MkdirAll(config.ChecksumConfig.ReportPath, 0755) + if err != nil { + slog.Error("new checker create report path", err) + return nil, err + } + } + + checker := &Checker{ + Config: config.ChecksumConfig, + reporter: reporter.NewReporter(config.ChecksumConfig), + Mode: mode, + } + + // checker 需要一个序列化器方便打日志 + + splitR := strings.Split(checker.Config.PtChecksum.Replicate, ".") + checker.resultDB = splitR[0] + checker.resultTbl = splitR[1] + checker.resultHistoryTable = fmt.Sprintf("%s_history", splitR[1]) + + if err := checker.connect(); err != nil { + slog.Error("connect host", err) + return nil, err + } + + if err := checker.ptPrecheck(); err != nil { + return nil, err + } + + checker.applyForceSwitchStrategy(commonForceSwitchStrategies) + checker.applyDefaultSwitchStrategy(commonDefaultSwitchStrategies) + checker.applyForceKVStrategy(commonForceKVStrategies) + checker.applyDefaultKVStrategy(commonDefaultKVStrategies) + + if checker.Mode == config.GeneralMode { + checker.applyForceSwitchStrategy(generalForceSwitchStrategies) + checker.applyDefaultSwitchStrategy(generalDefaultSwitchStrategies) + checker.applyForceKVStrategy(generalForceKVStrategies) + checker.applyDefaultKVStrategy(generalDefaultKVStrategies) + + if err := checker.validateHistoryTable(); err != nil { + return nil, err + } + } else { + checker.applyForceSwitchStrategy(demandForceSwitchStrategies) + checker.applyDefaultSwitchStrategy(demandDefaultSwitchStrategies) + checker.applyForceKVStrategy(demandForceKVStrategies) + checker.applyDefaultKVStrategy(demandDefaultKVStrategies) + + if err := checker.validateSlaves(); err != nil { + return nil, err + } + + if err := checker.prepareDsnsTable(); err != nil { + return nil, err + } + } + + checker.buildCommandArgs() + + return checker, nil +} + +func (r *Checker) connect() (err error) { + r.db, err = sqlx.Connect( + "mysql", + fmt.Sprintf( + "%s:%s@tcp(%s:%d)/%s?parseTime=true&loc=%s", + r.Config.User, + r.Config.Password, + r.Config.Ip, + r.Config.Port, + r.resultDB, + time.Local.String(), + ), + ) + return err +} + +func (r *Checker) validateSlaves() error { + if len(r.Config.Slaves) < 1 { + err := fmt.Errorf("demand checksum need at least 1 slave") + slog.Error("validate slaves counts", err) + return err + } + + /* + 实际是要能 select 所有库表, 但是权限不好查 + 这里只查下能不能连接 + */ + for _, slave := range r.Config.Slaves { + _, err := sqlx.Connect( + "mysql", + fmt.Sprintf( + "%s:%s@tcp(%s:%d)/", + slave.User, + slave.Password, + slave.Ip, + slave.Port, + ), + ) + if err != nil { + slog.Error("validate slaves connect", err) + return err + } + } + return nil +} + +func (r *Checker) prepareDsnsTable() error { + _, err := r.db.Exec(`DROP TABLE IF EXISTS dsns`) + if err != nil { + slog.Error("drop exists dsns table", err) + return err + } + + _, err = r.db.Exec( + `CREATE TABLE dsns (` + + `id int NOT NULL AUTO_INCREMENT,` + + `parent_id int DEFAULT NULL,` + + `dsn varchar(255) NOT NULL,` + + `PRIMARY KEY(id)) ENGINE=InnoDB`, + ) + if err != nil { + slog.Error("create dsns table", err) + return err + } + + for _, slave := range r.Config.Slaves { + _, err := r.db.Exec( + `INSERT INTO dsns (dsn) VALUES (?)`, + fmt.Sprintf(`h=%s,u=%s,p=%s,P=%d`, slave.Ip, slave.User, slave.Password, slave.Port), + ) + if err != nil { + slog.Error("add slave dsn record", err) + return err + } + } + return nil +} + +func (r *Checker) validateHistoryTable() error { + r.hasHistoryTable = false + + var _r interface{} + err := r.db.Get( + &_r, + `SELECT 1 FROM INFORMATION_SCHEMA.TABLES `+ + `WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ? AND TABLE_TYPE='BASE TABLE'`, + r.resultDB, + r.resultHistoryTable, + ) + if err != nil { + if err == sql.ErrNoRows { + slog.Info("history table not found") + if r.Config.InnerRole == config.RoleSlave { + slog.Info("no need create history table", slog.String("inner role", string(r.Config.InnerRole))) + return nil + } else { + slog.Info("create history table", slog.String("inner role", string(r.Config.InnerRole))) + + err := r.db.Get( + &_r, + `SELECT 1 FROM INFORMATION_SCHEMA.TABLES `+ + `WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ? AND TABLE_TYPE='BASE TABLE'`, + r.resultDB, + r.resultTbl, + ) + + if err != nil { + if err == sql.ErrNoRows { + slog.Info("checksum result table not found") + return nil + } else { + slog.Error("try to find checksum result table failed", err) + return err + } + } + + _, err = r.db.Exec( + fmt.Sprintf( + `CREATE TABLE IF NOT EXISTS %s LIKE %s`, + r.resultHistoryTable, + r.resultTbl, + ), + ) + if err != nil { + slog.Error("create history table", err) + return err + } + _, err = r.db.Exec( + fmt.Sprintf( + `ALTER TABLE %s ADD reported int default 0, `+ + `ADD INDEX idx_reported(reported), `+ + `DROP PRIMARY KEY, `+ + `MODIFY ts timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, `+ + `ADD PRIMARY KEY(master_ip, master_port, db, tbl, chunk, ts)`, + r.resultHistoryTable, + ), + ) + if err != nil { + slog.Error("add column and index to history table", err) + return err + } + } + } else { + slog.Error("check history table exists", err) + return err + } + } + r.hasHistoryTable = true + + /* + 1. 对比结果表和历史表结构, 历史表应该多出一个 reported int default 0 + 2. 历史表主键检查 + */ + var diffColumn struct { + TableName string `db:"TABLE_NAME"` + ColumnName string `db:"COLUMN_NAME"` + OrdinalPosition int `db:"ORDINAL_POSITION"` + DataType string `db:"DATA_TYPE"` + ColumnType string `db:"COLUMN_TYPE"` + RowCount int `db:"ROW_COUNT"` + } + err = r.db.Get( + &diffColumn, + fmt.Sprintf( + `SELECT `+ + `TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, DATA_TYPE, COLUMN_TYPE, COUNT(1) as ROW_COUNT `+ + `FROM INFORMATION_SCHEMA.COLUMNS WHERE `+ + `TABLE_SCHEMA = '%s' AND TABLE_NAME in ('%s', '%s') `+ + `GROUP BY COLUMN_NAME, ORDINAL_POSITION, DATA_TYPE, COLUMN_TYPE HAVING ROW_COUNT <> 2`, + r.resultDB, + r.resultTbl, + r.resultHistoryTable, + ), + ) + if err != nil { + slog.Error("compare result table column", err) + return err + } + + if diffColumn.TableName != r.resultHistoryTable || + diffColumn.ColumnName != "reported" || + diffColumn.DataType != "int" { + err = fmt.Errorf("%s need column as 'reported int default 0'", r.resultHistoryTable) + slog.Error("check history table reported column", err) + return nil + } + + var pkColumns []string + err = r.db.Select( + &pkColumns, + fmt.Sprintf( + `SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.STATISTICS `+ + `WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s' AND INDEX_NAME = 'PRIMARY' `+ + `ORDER BY SEQ_IN_INDEX`, + r.resultDB, + r.resultHistoryTable, + ), + ) + if err != nil { + slog.Error("check history table primary key", err) + return err + } + + if slices.Compare(pkColumns, []string{"master_ip", "master_port", "db", "tbl", "chunk", "ts"}) != 0 { + err = fmt.Errorf("history table must has primary as (master_ip, master_port, db, tbl, chunk, ts])") + slog.Error("check history table primary key", err) + return err + } + + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/command_args.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/command_args.go new file mode 100644 index 0000000000..d72b80cd1a --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/command_args.go @@ -0,0 +1,125 @@ +package checker + +import ( + "fmt" + "os" + "strings" + + "golang.org/x/exp/slog" +) + +func (r *Checker) ptPrecheck() error { + if _, err := os.Stat(r.Config.PtChecksum.Path); err != nil { + slog.Error("pt pre check", err) + return err + } + return nil +} + +func (r *Checker) buildCommandArgs() { + r.args = append(r.args, r.ptArgsFilters()...) + r.args = append(r.args, r.ptArgsConnectInfo()...) + r.args = append(r.args, r.ptArgsReplicate()...) + r.args = append(r.args, r.ptArgsSwitches()...) + r.args = append(r.args, r.ptArgsKV()...) +} + +func (r *Checker) ptArgsFilters() []string { + var res []string + + if len(r.Config.Filter.Databases) > 0 { + res = append( + res, []string{ + fmt.Sprintf("--databases=%s", strings.Join(r.Config.Filter.Databases, ",")), + }..., + ) + } + if len(r.Config.Filter.Tables) > 0 { + res = append( + res, []string{ + fmt.Sprintf("--tables=%s", strings.Join(r.Config.Filter.Tables, ",")), + }..., + ) + } + if len(r.Config.Filter.IgnoreDatabases) > 0 { + res = append( + res, []string{ + fmt.Sprintf("--ignore-databases=%s", strings.Join(r.Config.Filter.IgnoreDatabases, ",")), + }..., + ) + } + if len(r.Config.Filter.IgnoreTables) > 0 { + res = append( + res, []string{ + fmt.Sprintf("--ignore-tables=%s", strings.Join(r.Config.Filter.IgnoreTables, ",")), + }..., + ) + } + if r.Config.Filter.DatabasesRegex != "" { + res = append( + res, []string{ + fmt.Sprintf("--databases-regex=%s", r.Config.Filter.DatabasesRegex), + }..., + ) + } + if r.Config.Filter.TablesRegex != "" { + res = append( + res, []string{ + fmt.Sprintf("--tables-regex=%s", r.Config.Filter.TablesRegex), + }..., + ) + } + if r.Config.Filter.IgnoreDatabasesRegex != "" { + res = append( + res, []string{ + fmt.Sprintf("--ignore-databases-regex=%s", r.Config.Filter.IgnoreDatabasesRegex), + }..., + ) + } + if r.Config.Filter.IgnoreTablesRegex != "" { + res = append( + res, []string{ + fmt.Sprintf("--ignore-tables-regex=%s", r.Config.Filter.IgnoreTablesRegex), + }..., + ) + } + return res +} + +func (r *Checker) ptArgsConnectInfo() []string { + return []string{ + fmt.Sprintf("--host=%s", r.Config.Ip), + fmt.Sprintf("--port=%d", r.Config.Port), + fmt.Sprintf("--user=%s", r.Config.User), + fmt.Sprintf("--password=%s", r.Config.Password), + } +} + +func (r *Checker) ptArgsReplicate() []string { + return []string{ + fmt.Sprintf("--replicate=%s", r.Config.PtChecksum.Replicate), + } +} + +func (r *Checker) ptArgsSwitches() []string { + var res []string + for _, sw := range r.Config.PtChecksum.Switches { + res = append(res, fmt.Sprintf(`--%s`, sw)) + } + return res +} + +func (r *Checker) ptArgsKV() []string { + var res []string + for _, arg := range r.Config.PtChecksum.Args { + key := arg["name"] + value := arg["value"] + switch value := value.(type) { + case int: + res = append(res, fmt.Sprintf(`--%s=%d`, key, value)) + default: + res = append(res, fmt.Sprintf(`--%s=%s`, key, value)) + } + } + return res +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/define.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/define.go new file mode 100644 index 0000000000..9e14ce271f --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/define.go @@ -0,0 +1,52 @@ +package checker + +import ( + "context" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter" + "time" + + "github.com/jmoiron/sqlx" +) + +// Checker 检查器 +type Checker struct { + Config *config.Config + Mode config.CheckMode + db *sqlx.DB + args []string + cancel context.CancelFunc + startTS time.Time + resultHistoryTable string + resultDB string + resultTbl string + hasHistoryTable bool + reporter *reporter.Reporter +} + +// ChecksumSummary 结果汇总报表 +type ChecksumSummary struct { + Ts time.Time `json:"ts"` + Errors int `json:"errors"` + Diffs int `json:"diffs"` + Rows int `json:"rows"` + DiffRows int `json:"diff_rows"` + Chunks int `json:"chunks"` + Skipped int `json:"skipped"` + Time int `json:"time"` + Table string `json:"table"` +} + +// PtExitFlag 退出位 +type PtExitFlag struct { + Flag string `json:"flag"` + Meaning string `json:"meaning"` + BitValue int `json:"bit_value"` +} + +// Output pt checksum输出 +type Output struct { + PtStderr string `json:"pt_stderr"` + Summaries []ChecksumSummary `json:"summaries"` + PtExitFlags []PtExitFlag `json:"pt_exit_flags"` +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/init.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/init.go new file mode 100644 index 0000000000..b0e264a516 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/init.go @@ -0,0 +1,146 @@ +package checker + +import ( + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "fmt" + "time" +) + +// PtExitFlagMap pt-table-check 退出位映射 +var PtExitFlagMap map[int]PtExitFlag + +// Mode 模式变量 +var Mode config.CheckMode + +var commonForceSwitchStrategies []switchStrategy +var commonDefaultSwitchStrategies []switchStrategy +var generalForceSwitchStrategies []switchStrategy +var generalDefaultSwitchStrategies []switchStrategy +var demandForceSwitchStrategies []switchStrategy +var demandDefaultSwitchStrategies []switchStrategy + +var commonDefaultKVStrategies []kvStrategy +var commonForceKVStrategies []kvStrategy +var generalDefaultKVStrategies []kvStrategy +var generalForceKVStrategies []kvStrategy +var demandDefaultKVStrategies []kvStrategy +var demandForceKVStrategies []kvStrategy + +func init() { + PtExitFlagMap = map[int]PtExitFlag{ + 1: {Flag: "ERROR", BitValue: 1, Meaning: "A non-fatal error occurred"}, + 2: {Flag: "ALREADY_RUNNING", BitValue: 2, Meaning: "--pid file exists and the PID is running"}, + 4: {Flag: "CAUGHT_SIGNAL", BitValue: 4, Meaning: "Caught SIGHUP, SIGINT, SIGPIPE, or SIGTERM"}, + 8: {Flag: "NO_SLAVES_FOUND", BitValue: 8, Meaning: "No replicas or cluster nodes were found"}, + 16: {Flag: "TABLE_DIFF", BitValue: 16, Meaning: "At least one diff was found"}, + 32: {Flag: "SKIP_CHUNK", BitValue: 32, Meaning: "At least one chunk was skipped"}, + 64: {Flag: "SKIP_TABLE", BitValue: 64, Meaning: "At least one table was skipped"}, + } + + /* + 各种场景下的参数约束 + 没有做什么优先级, 所以 general, demand 的 配置不要和 common 重复 + 有很多和安全相关的参数这里没有出现, 不代表那些参数没有用, 而是默认值刚好非常合适 + */ + commonForceSwitchStrategies = []switchStrategy{ + {Name: "check-binlog-format", Value: false, HasOpposite: true}, + {Name: "check-replication-filters", Value: false, HasOpposite: true}, + {Name: "quiet", Value: false, HasOpposite: false}, + {Name: "binary-index", Value: true, HasOpposite: false}, + {Name: "version-check", Value: false, HasOpposite: true}, + } + commonDefaultSwitchStrategies = []switchStrategy{} + commonForceKVStrategies = []kvStrategy{ + // kv 中不允许出现 replicate, 只能在 pt_checksum.replicate 中指定 + {Name: "replicate", Value: nil, Enable: false}, + // kv 中不允许出现库表过滤, 只能在 Filter 中定义 + {Name: "databases", Value: nil, Enable: false}, + {Name: "tables", Value: nil, Enable: false}, + {Name: "ignore-databases", Value: nil, Enable: false}, + {Name: "ignore-tables", Value: nil, Enable: false}, + {Name: "databases-regex", Value: nil, Enable: false}, + {Name: "tables-regex", Value: nil, Enable: false}, + {Name: "ignore-databases-regex", Value: nil, Enable: false}, + {Name: "ignore-tables-regex", Value: nil, Enable: false}, + } + commonDefaultKVStrategies = []kvStrategy{ + { + Name: "chunk-size-limit", + Value: func(checker *Checker) interface{} { + return 5 + }, + Enable: true, + }, + { + Name: "chunk-time", + Value: func(checker *Checker) interface{} { + return 1 + }, + Enable: true, + }, + } + + /* + 例行校验的个性化配置 + 例行校验不要增加和 slave check 相关的任何参数 + 因为目前没有可靠的办法提供从 master 以 select 访问 slave 的帐号 + */ + generalForceSwitchStrategies = []switchStrategy{ + {Name: "resume", Value: true, HasOpposite: false}, + {Name: "replicate-check", Value: false, HasOpposite: true}, + // {Name: "check-slave-tables", Value: false, HasOpposite: true}, + } + generalDefaultSwitchStrategies = []switchStrategy{} + generalForceKVStrategies = []kvStrategy{ + { + Name: "recursion-method", + Value: func(checker *Checker) interface{} { + return "none" + }, + Enable: true, + }, + } + generalDefaultKVStrategies = []kvStrategy{ + { + Name: "run-time", + Value: func(checker *Checker) interface{} { + return time.Hour * 2 + }, + Enable: true, + }, + } + + /* + 单据校验的个性化配置 + */ + demandForceSwitchStrategies = []switchStrategy{ + {Name: "resume", Value: false, HasOpposite: false}, + {Name: "replicate-check", Value: true, HasOpposite: true}, + } + demandDefaultSwitchStrategies = []switchStrategy{} + demandForceKVStrategies = []kvStrategy{ + { + Name: "recursion-method", + Value: func(checker *Checker) interface{} { + return fmt.Sprintf("dsn=D=%s,t=dsns", checker.resultDB) + }, + Enable: true, + }, + } + demandDefaultKVStrategies = []kvStrategy{ + { + Name: "run-time", + Value: func(checker *Checker) interface{} { + return time.Hour * 48 + }, + Enable: true, + }, + { + Name: "max-lag", + Value: func(checker *Checker) interface{} { + return 10 + }, + Enable: true, + }, + } +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/move_result.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/move_result.go new file mode 100644 index 0000000000..d1beb76f48 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/move_result.go @@ -0,0 +1,77 @@ +package checker + +import ( + "context" + "fmt" + "strings" + + "golang.org/x/exp/slog" +) + +func (r *Checker) moveResult() error { + // 在 master 上以这样的方式转存当次的校验结果可以让 slave 转存真实结果 + rows, err := r.db.Queryx( + `SELECT COLUMN_NAME FROM information_schema.COLUMNS WHERE TABLE_SCHEMA = ? AND TABLE_NAME = ?`, + r.resultDB, + r.resultTbl, + ) + if err != nil { + slog.Error( + "fetch result table columns", + err, + slog.String("result table", r.resultTbl), + slog.String("result db", r.resultDB), + ) + return err + } + var columns []string + for rows.Next() { + var col string + err := rows.Scan(&col) + if err != nil { + slog.Error("iterator columns", err) + return err + } + + columns = append(columns, col) + } + + err = r.validateHistoryTable() + if err != nil { + slog.Error("move result validate history table again", err) + return err + } + slog.Info("move result validate history table again success") + + slog.Info("move result", slog.Time("ts", r.startTS)) + + conn, err := r.db.Conn(context.Background()) + if err != nil { + slog.Error("get connect", err) + return err + } + defer func() { + _ = conn.Close() + }() + + _, err = conn.ExecContext(context.Background(), `SET BINLOG_FORMAT = 'STATEMENT'`) + if err != nil { + slog.Error("set binlog_format = 'statement'", err) + return err + } + _, err = conn.ExecContext( + context.Background(), + fmt.Sprintf( + `INSERT INTO %s (%[2]s) SELECT %[2]s FROM %s WHERE ts >= ?`, + r.resultHistoryTable, + strings.Join(columns, ","), + r.resultTbl, + ), r.startTS, + ) + if err != nil { + slog.Error("move result", err) + return err + } + + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/report.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/report.go new file mode 100644 index 0000000000..7ac3766b58 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/report.go @@ -0,0 +1,116 @@ +package checker + +import ( + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter" + "fmt" + + "golang.org/x/exp/slog" +) + +// Report 只在 repeater, slave 的例行校验做上报 +func (r *Checker) Report() error { + if !r.hasHistoryTable { + slog.Info("report history table not found") + return nil + } + + // initReporter(path.Join(r.Config.ReportPath, "checksum_report.log")) + + // ToDo 清理太老的历史记录 + + rows, err := r.db.Queryx( + fmt.Sprintf( + `SELECT `+ + `master_ip, master_port, db, tbl, `+ + `chunk, chunk_time, chunk_index, lower_boundary, upper_boundary, `+ + `this_crc, this_cnt, master_crc, master_cnt, ts `+ + `FROM %s WHERE CONCAT(master_ip, ":", master_port) <> ? AND reported = 0`, + r.resultHistoryTable, + ), + fmt.Sprintf("%s:%d", r.Config.Ip, r.Config.Port), + ) + + if err != nil { + slog.Error("query unreported result", err) + return err + } + + // Todo 这里其实有个风险, 单独在 slave 上修改数据了 + stmt, err := r.db.Preparex( + fmt.Sprintf( + `UPDATE %s `+ + `SET reported = 1 `+ + `WHERE master_ip = ? AND master_port = ? AND db = ? AND tbl = ? AND chunk = ? AND ts = ?`, + r.resultHistoryTable, + ), + ) + if err != nil { + slog.Error("prepare update statement", err) + return err + } + + for rows.Next() { + var cs reporter.ChecksumResult + err := rows.StructScan(&cs) + if err != nil { + slog.Error("scan unreported result", err) + return err + } + + slog.Debug("scan checksum history", slog.Any("checksum result", cs)) + + // err = writeReportRecord( + // ReportRecord{ + // ChecksumResult: cs, + // Ip: r.Config.Ip, + // Port: r.Config.Port, + // BKBizId: r.Config.BkBizId, + // ImmuteDomain: r.Config.Cluster.ImmuteDomain, + // ClusterId: r.Config.Cluster.Id, + // InnerRole: string(r.Config.InnerRole), + // }, + // ) + + err = r.reporter.Report(&cs) + if err != nil { + return err + } + + _, err = stmt.Exec( + cs.MasterIp, + cs.MasterPort, + cs.Db, + cs.Tbl, + cs.Chunk, + cs.Ts, + ) + + if err != nil { + slog.Error("update reported", err) + return err + } + } + return nil +} + +// /* +// json 序列化时间的时候用了默认的 format +// 上报的时间格式和 db 中的看起来不太一样 +// */ +// func writeReportRecord(record ReportRecord) error { +// row, err := json.Marshal(record) +// if err != nil { +// slog.Error("marshal report", err) +// return err +// } +// +// slog.Debug("write report record", slog.String("record", string(row))) +// row = append(row, []byte("\n")...) +// +// _, err = reporter.Write(row) +// if err != nil { +// slog.Error("write report record", err) +// } +// +// return nil +// } diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/run_command.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/run_command.go new file mode 100644 index 0000000000..f238c69a79 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/run_command.go @@ -0,0 +1,147 @@ +package checker + +import ( + "bytes" + "context" + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "encoding/json" + "fmt" + "os" + "os/exec" + "time" + + "github.com/pkg/errors" + "golang.org/x/exp/slog" +) + +// Run 执行 +func (r *Checker) Run() error { + var stdout, stderr bytes.Buffer + + ctx, cancel := context.WithCancel(context.Background()) + r.cancel = cancel + + // stderr.Reset() + command := exec.CommandContext(ctx, r.Config.PtChecksum.Path, r.args...) + command.Stdout = &stdout + command.Stderr = &stderr + slog.Info("build command", slog.String("pt-table-checksum command", command.String())) + + r.startTS = time.Now() // .In(time.Local) + slog.Info("sleep 2s") + time.Sleep(2 * time.Second) // 故意休眠 2s, 让时间往前走一下, mysql 时间戳精度不够, 这里太快了会有问题 + err := command.Run() + if err != nil { + if _, ok := err.(*exec.ExitError); !ok { + slog.Error("run pt-table-checksum got unexpected error", err) + return err + } + } + + ptErr, _ := err.(*exec.ExitError) + slog.Info( + "run pt-table-checksum success", + slog.Any("pt err", ptErr), + ) + + /* + 这一段是最难受的逻辑, 根据 pt-table-checksum 的文档 + + pt-table-checksum has three possible exit statuses: zero, 255, and any other value is a bitmask with flags for different problems. + + A zero exit status indicates no errors, warnings, or checksum differences, or skipped chunks or tables. + + A 255 exit status indicates a fatal error. In other words: the tool died or crashed. The error is printed to STDERR. + + If the exit status is not zero or 255, then its value functions as a bitmask with these flags: + ... balabala... + + 看起来似乎把错误都归类到各种 bit flag 了, 其实根本不是, 在它代码中有大量的 die, 这些全都不在文档描述的 flag 里面 + 而它的这些 flag 又和系统的 errno 严重冲突, 所以照着文档写出来的错误捕捉根本不能用 + 只能暴力的, 不管怎样, 只要有 stderr 就返回错误, 然后再按照 flag 来 + + 然而 + FLAG BIT VALUE MEANING + ================ ========= ========================================== + ERROR 1 A non-fatal error occurred + ALREADY_RUNNING 2 --pid file exists and the PID is running + CAUGHT_SIGNAL 4 Caught SIGHUP, SIGINT, SIGPIPE, or SIGTERM + NO_SLAVES_FOUND 8 No replicas or cluster nodes were found + TABLE_DIFF 16 At least one diff was found + SKIP_CHUNK 32 At least one chunk was skipped + SKIP_TABLE 64 At least one table was skipped + REPLICATION_STOPPED 128 Replica is down or stopped + + 这些 flag 咋办 + 是当作错误抛出还是当作正常的执行结果返回给调用方, 让调用方自己去处理? + + 1, 2, 4 肯定要当错误, 其他的先扔回去? + */ + if stderr.Len() > 0 { + err = errors.New(stderr.String()) + slog.Error("run pt-table-checksum got un-docoument error", err) + return err + } + + ptFlags := make([]PtExitFlag, 0) + if ptErr != nil { + ptFlags = collectFlags(ptErr) + } + + summaries, err := summary(stdout.String()) + if err != nil { + slog.Error( + "trans pt-table-checksum stdout to summary", + err, + slog.String("pt stdout", stdout.String()), + ) + return err + } + + if r.Mode == config.GeneralMode { + slog.Info("run in general mode") + err := r.moveResult() + if err != nil { + return err + } + /* + 能运行到这里说明没有意外的错误 + 如果啥也没干, 则认为完成了一轮 + */ + if len(summaries) == 0 { + _, err := r.db.Exec(fmt.Sprintf(`TRUNCATE TABLE %s`, r.Config.PtChecksum.Replicate)) + if err != nil { + slog.Error( + "truncate regular result table", + err, + slog.String("table name", r.Config.PtChecksum.Replicate), + ) + return err + } + } + } else { + slog.Info("run in demand mode") + } + + output := Output{ + PtStderr: stderr.String(), + Summaries: summaries, + PtExitFlags: ptFlags, + } + ojson, err := json.Marshal(output) + if err != nil { + slog.Error("marshal output", err, slog.String("output", fmt.Sprintf("%v", output))) + return err + } + + fmt.Println(string(ojson)) + + if ptErr != nil && (ptErr.ExitCode()&1 != 0 || ptErr.ExitCode()&2 != 0 || ptErr.ExitCode()&4 != 0) { + err = errors.New(string(ojson)) + slog.Error("run pt-table-checksum bad flag found", err) + fmt.Fprintf(os.Stderr, string(ojson)) + return err + } + + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/strategy.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/strategy.go new file mode 100644 index 0000000000..b506206cd6 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/strategy.go @@ -0,0 +1,99 @@ +package checker + +import ( + "fmt" + + "golang.org/x/exp/slices" +) + +type switchStrategy struct { + Name string + Value bool + // Enable bool + HasOpposite bool +} + +type kvStrategy struct { + Name string + Value func(*Checker) interface{} + Enable bool +} + +func (r *Checker) applyForceSwitchStrategy(strategies []switchStrategy) { + for _, st := range strategies { + if idx := slices.Index(r.Config.PtChecksum.Switches, st.Name); idx > -1 { + r.Config.PtChecksum.Switches = slices.Delete(r.Config.PtChecksum.Switches, idx, idx+1) + } + if idx := slices.Index(r.Config.PtChecksum.Switches, fmt.Sprintf("no-%s", st.Name)); idx > -1 { + r.Config.PtChecksum.Switches = slices.Delete(r.Config.PtChecksum.Switches, idx, idx+1) + } + if st.Value { + r.Config.PtChecksum.Switches = append(r.Config.PtChecksum.Switches, st.Name) + } else { + if st.HasOpposite { + r.Config.PtChecksum.Switches = append(r.Config.PtChecksum.Switches, fmt.Sprintf("no-%s", st.Name)) + } + } + } +} + +func (r *Checker) applyDefaultSwitchStrategy(strategies []switchStrategy) { + for _, st := range strategies { + if slices.Index(r.Config.PtChecksum.Switches, st.Name) == -1 && + slices.Index(r.Config.PtChecksum.Switches, fmt.Sprintf("no-%s", st.Name)) == -1 { + if st.Value { + r.Config.PtChecksum.Switches = append(r.Config.PtChecksum.Switches, st.Name) + } else { + if st.HasOpposite { + r.Config.PtChecksum.Switches = append(r.Config.PtChecksum.Switches, fmt.Sprintf("no-%s", st.Name)) + } + } + } + } +} + +func (r *Checker) applyForceKVStrategy(strategies []kvStrategy) { + for _, st := range strategies { + idx := slices.IndexFunc( + r.Config.PtChecksum.Args, func(kvArg map[string]interface{}) bool { + return kvArg["name"] == st.Name + }, + ) + if idx == -1 { + if st.Enable { + r.Config.PtChecksum.Args = append( + r.Config.PtChecksum.Args, map[string]interface{}{ + "name": st.Name, + "value": st.Value(r), + }, + ) + } + } else { + if st.Enable { + r.Config.PtChecksum.Args[idx]["value"] = st.Value(r) + } else { + r.Config.PtChecksum.Args = slices.Delete(r.Config.PtChecksum.Args, idx, idx+1) + } + } + } +} + +func (r *Checker) applyDefaultKVStrategy(strategies []kvStrategy) { + for _, st := range strategies { + idx := slices.IndexFunc( + r.Config.PtChecksum.Args, func(kvArg map[string]interface{}) bool { + return kvArg["name"] == st.Name + }, + ) + if idx == -1 { + if st.Enable { + r.Config.PtChecksum.Args = append( + r.Config.PtChecksum.Args, map[string]interface{}{ + "name": st.Name, + "value": st.Value(r), + }, + ) + } + } + } +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go new file mode 100644 index 0000000000..86c0b7282b --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/summary.go @@ -0,0 +1,77 @@ +package checker + +import ( + "bufio" + "fmt" + "os/exec" + "regexp" + "strconv" + "strings" + "time" + + "golang.org/x/exp/slog" +) + +func summary(stdout string) (summaries []ChecksumSummary, err error) { + scanner := bufio.NewScanner(strings.NewReader(stdout)) + scanner.Split(bufio.ScanLines) + + isSummary := false + SplitRe, _ := regexp.Compile(`\s+`) + + for scanner.Scan() { + line := scanner.Text() + /* + Checking if all tables can be checksummed ... + Starting checksum ... + TS ERRORS DIFFS ROWS DIFF_ROWS CHUNKS SKIPPED TIME TABLE + 11-07T15:22:36 0 0 0 0 1 0 0.563 mysql.time_zone_leap_second + 11-07T15:22:38 0 0 1826 0 4 0 2.242 mysql.time_zone_name + + pt-table-checksum 的标准输出是这样子, 如果找到 TS 行 + 就认为接下来的是报表 + */ + if strings.HasPrefix(strings.TrimSpace(line), "TS") { + isSummary = true + continue + } + if isSummary { + var cs ChecksumSummary + splitRow := SplitRe.Split(line, -1) + + // pt-table-checksum 摘要的 ts 缺少年份信息, 得自己加上 + ts, err := time.ParseInLocation( + "2006-01-02T15:04:05", + fmt.Sprintf(`%d-%s`, time.Now().Year(), splitRow[0]), + time.Local, + ) + if err != nil { + slog.Error("parse time", err, slog.String("original row", line)) + return nil, err + } + + cs.Ts = ts + cs.Errors, _ = strconv.Atoi(splitRow[1]) + cs.Diffs, _ = strconv.Atoi(splitRow[2]) + cs.Rows, _ = strconv.Atoi(splitRow[3]) + cs.DiffRows, _ = strconv.Atoi(splitRow[4]) + cs.Chunks, _ = strconv.Atoi(splitRow[5]) + cs.Skipped, _ = strconv.Atoi(splitRow[6]) + cs.Time, _ = strconv.Atoi(splitRow[7]) + cs.Table = splitRow[8] + + summaries = append(summaries, cs) + } + } + return summaries, nil +} + +func collectFlags(exitErr *exec.ExitError) (ptFlags []PtExitFlag) { + exitCode := exitErr.ExitCode() + for k, v := range PtExitFlagMap { + if exitCode&k != 0 { + ptFlags = append(ptFlags, v) + } + } + return +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/utils.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/utils.go new file mode 100644 index 0000000000..abafb200a9 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/checker/utils.go @@ -0,0 +1,15 @@ +package checker + +import "strings" + +func rewritePattern(pattern string) string { + return strings.Replace( + strings.Replace( + strings.Replace( + strings.Replace(pattern, "%", ".*", -1), + "?", ".", -1, + ), + `\.*`, `\%`, -1, + ), `\.`, `\?`, -1, + ) +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/config.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/config.go new file mode 100644 index 0000000000..d9483c4748 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/config.go @@ -0,0 +1,96 @@ +// Package config 配置库 +package config + +import ( + "os" + "path" + "path/filepath" + + "golang.org/x/exp/slog" + "gopkg.in/yaml.v2" +) + +// ChecksumConfig 校验配置 +var ChecksumConfig *Config + +type host struct { + Ip string `yaml:"ip"` + Port int `yaml:"port"` + User string `yaml:"user"` + Password string `yaml:"password"` +} + +type filter struct { + Databases []string `yaml:"databases"` + Tables []string `yaml:"tables"` + IgnoreDatabases []string `yaml:"ignore_databases"` + IgnoreTables []string `yaml:"ignore_tables"` + DatabasesRegex string `yaml:"databases_regex"` + TablesRegex string `yaml:"tables_regex"` + IgnoreDatabasesRegex string `yaml:"ignore_databases_regex"` + IgnoreTablesRegex string `yaml:"ignore_tables_regex"` +} + +type ptChecksum struct { + Path string `yaml:"path"` + Switches []string `yaml:"switches"` + Args []map[string]interface{} `yaml:"args"` + Replicate string `yaml:"replicate"` +} + +// InnerRoleEnum 枚举 +type InnerRoleEnum string + +const ( + // RoleMaster master + RoleMaster InnerRoleEnum = "master" + // RoleRepeater repeater + RoleRepeater InnerRoleEnum = "repeater" + // RoleSlave slave + RoleSlave InnerRoleEnum = "slave" +) + +// Config 配置结构 +type Config struct { + BkBizId int `yaml:"bk_biz_id"` + Cluster struct { + Id int `yaml:"id"` + ImmuteDomain string `yaml:"immute_domain"` + } `yaml:"cluster"` + host `yaml:",inline"` + InnerRole InnerRoleEnum `yaml:"inner_role"` + ReportPath string `yaml:"report_path"` + Slaves []host `yaml:"slaves"` + Filter filter `yaml:"filter"` + PtChecksum ptChecksum `yaml:"pt_checksum"` + Log *LogConfig `yaml:"log"` + Schedule string `yaml:"schedule"` + ApiUrl string `yaml:"api_url"` +} + +// InitConfig 初始化配置 +func InitConfig(configPath string) error { + if !path.IsAbs(configPath) { + cwd, err := os.Getwd() + if err != nil { + slog.Error("init config", err) + return err + } + configPath = filepath.Join(cwd, configPath) + } + + content, err := os.ReadFile(configPath) + if err != nil { + slog.Error("init config", err) + return err + } + + ChecksumConfig = &Config{} + err = yaml.UnmarshalStrict(content, ChecksumConfig) + if err != nil { + slog.Error("init config", err) + return err + } + + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/init.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/init.go new file mode 100644 index 0000000000..043daf3e38 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/init.go @@ -0,0 +1,20 @@ +package config + +// CheckMode 校验模式 +type CheckMode string + +const ( + // GeneralMode 常规校验 + GeneralMode CheckMode = "general" + // DemandMode 单据校验 + DemandMode = "demand" +) + +// String 用于打印 +func (c CheckMode) String() string { + return string(c) +} + +func init() { + +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/log_config.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/log_config.go new file mode 100644 index 0000000000..face128c16 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config/log_config.go @@ -0,0 +1,10 @@ +package config + +// LogConfig 日志配置结构 +type LogConfig struct { + Console bool `yaml:"console"` + LogFileDir *string `yaml:"log_file_dir"` + Debug bool `yaml:"debug"` + Source bool `yaml:"source"` + Json bool `yaml:"json"` +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/init.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/init.go new file mode 100644 index 0000000000..74ad6f65f5 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/init.go @@ -0,0 +1,32 @@ +package reporter + +import "time" + +// ChecksumResult 校验结果 +type ChecksumResult struct { + MasterIp string `db:"master_ip" json:"master_ip"` + MasterPort int `db:"master_port" json:"master_port"` + Db string `db:"db" json:"db"` + Tbl string `db:"tbl" json:"tbl"` + Chunk int `db:"chunk" json:"chunk"` + ChunkTime float64 `db:"chunk_time" json:"chunk_time"` + ChunkIndex *string `db:"chunk_index" json:"chunk_index"` + LowerBoundary *string `db:"lower_boundary" json:"lower_boundary"` + UpperBoundary *string `db:"upper_boundary" json:"upper_boundary"` + ThisCrc string `db:"this_crc" json:"this_crc"` + ThisCnt int `db:"this_cnt" json:"this_cnt"` + MasterCrc string `db:"master_crc" json:"master_crc"` + MasterCnt int `db:"master_cnt" json:"master_cnt"` + Ts time.Time `db:"ts" json:"ts"` +} + +// ReportRecord 上报记录 +type ReportRecord struct { + *ChecksumResult `json:",inline"` + BKBizId int `json:"bk_biz_id"` + ImmuteDomain string `json:"immute_domain"` + ClusterId int `json:"cluster_id"` + Ip string `json:"ip"` + Port int `json:"port"` + InnerRole string `json:"inner_role"` +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/report.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/report.go new file mode 100644 index 0000000000..acd6485f44 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/report.go @@ -0,0 +1,36 @@ +package reporter + +import ( + "encoding/json" + + "golang.org/x/exp/slog" +) + +// Report 执行上报 +func (r *Reporter) Report(cs *ChecksumResult) error { + rr := ReportRecord{ + ChecksumResult: cs, + BKBizId: r.cfg.BkBizId, + ImmuteDomain: r.cfg.Cluster.ImmuteDomain, + ClusterId: r.cfg.Cluster.Id, + Ip: r.cfg.Ip, + Port: r.cfg.Port, + InnerRole: string(r.cfg.InnerRole), + } + + row, err := json.Marshal(rr) + if err != nil { + slog.Error("marshal report", err) + return err + } + + row = append(row, []byte("\n")...) + + _, err = r.writer.Write(row) + if err != nil { + slog.Error("write report", err) + return err + } + + return nil +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/reporter.go b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/reporter.go new file mode 100644 index 0000000000..7c8eef7276 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pkg/reporter/reporter.go @@ -0,0 +1,30 @@ +// Package reporter 上报 +package reporter + +import ( + "dbm-services/mysql/db-tools/mysql-table-checksum/pkg/config" + "path" + + "gopkg.in/natefinch/lumberjack.v2" +) + +// Reporter 上报bk log +type Reporter struct { + writer *lumberjack.Logger + cfg *config.Config +} + +// var reporter *lumberjack.Logger + +// NewReporter 新建上报 +func NewReporter(cfg *config.Config) *Reporter { + return &Reporter{ + cfg: cfg, + writer: &lumberjack.Logger{ + Filename: path.Join(cfg.ReportPath, "checksum_report.log"), + MaxSize: 100, + MaxAge: 30, + MaxBackups: 50, + }, + } +} diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-checksum b/dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-checksum new file mode 100755 index 0000000000..bd9de17438 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-checksum @@ -0,0 +1,14202 @@ +#!/usr/bin/env perl + +# This program is part of Percona Toolkit: http://www.percona.com/software/ +# See "COPYRIGHT, LICENSE, AND WARRANTY" at the end of this file for legal +# notices and disclaimers. + +use strict; +use warnings FATAL => 'all'; + +# This tool is "fat-packed": most of its dependent modules are embedded +# in this file. Setting %INC to this file for each module makes Perl aware +# of this so it will not try to load the module from @INC. See the tool's +# documentation for a full list of dependencies. +BEGIN { + $INC{$_} = __FILE__ for map { (my $pkg = "$_.pm") =~ s!::!/!g; $pkg } (qw( + Percona::Toolkit + HTTP::Micro + VersionCheck + DSNParser + OptionParser + Lmo::Utils + Lmo::Meta + Lmo::Object + Lmo::Types + Lmo + Cxn + Percona::XtraDB::Cluster + Quoter + VersionParser + TableParser + TableNibbler + MasterSlave + RowChecksum + NibbleIterator + OobNibbleIterator + Daemon + SchemaIterator + Retry + Transformers + Progress + ReplicaLagWaiter + MySQLConfig + MySQLStatusWaiter + WeightedAvgRate + IndexLength + Runtime + )); +} + +# ########################################################################### +# Percona::Toolkit package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/Toolkit.pm +# t/lib/Percona/Toolkit.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::Toolkit; + +our $VERSION = '3.3.2'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Carp qw(carp cluck); +use Data::Dumper qw(); + +require Exporter; +our @ISA = qw(Exporter); +our @EXPORT_OK = qw( + have_required_args + Dumper + _d +); + +sub have_required_args { + my ($args, @required_args) = @_; + my $have_required_args = 1; + foreach my $arg ( @required_args ) { + if ( !defined $args->{$arg} ) { + $have_required_args = 0; + carp "Argument $arg is not defined"; + } + } + cluck unless $have_required_args; # print backtrace + return $have_required_args; +} + +sub Dumper { + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Sortkeys = 1; + local $Data::Dumper::Quotekeys = 0; + Data::Dumper::Dumper(@_); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Percona::Toolkit package +# ########################################################################### + +# ########################################################################### +# HTTP::Micro package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/HTTP/Micro.pm +# t/lib/HTTP/Micro.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package HTTP::Micro; + +our $VERSION = '0.01'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Carp (); + +my @attributes; +BEGIN { + @attributes = qw(agent timeout); + no strict 'refs'; + for my $accessor ( @attributes ) { + *{$accessor} = sub { + @_ > 1 ? $_[0]->{$accessor} = $_[1] : $_[0]->{$accessor}; + }; + } +} + +sub new { + my($class, %args) = @_; + (my $agent = $class) =~ s{::}{-}g; + my $self = { + agent => $agent . "/" . ($class->VERSION || 0), + timeout => 60, + }; + for my $key ( @attributes ) { + $self->{$key} = $args{$key} if exists $args{$key} + } + return bless $self, $class; +} + +my %DefaultPort = ( + http => 80, + https => 443, +); + +sub request { + my ($self, $method, $url, $args) = @_; + @_ == 3 || (@_ == 4 && ref $args eq 'HASH') + or Carp::croak(q/Usage: $http->request(METHOD, URL, [HASHREF])/); + $args ||= {}; # we keep some state in this during _request + + my $response; + for ( 0 .. 1 ) { + $response = eval { $self->_request($method, $url, $args) }; + last unless $@ && $method eq 'GET' + && $@ =~ m{^(?:Socket closed|Unexpected end)}; + } + + if (my $e = "$@") { + $response = { + success => q{}, + status => 599, + reason => 'Internal Exception', + content => $e, + headers => { + 'content-type' => 'text/plain', + 'content-length' => length $e, + } + }; + } + return $response; +} + +sub _request { + my ($self, $method, $url, $args) = @_; + + my ($scheme, $host, $port, $path_query) = $self->_split_url($url); + + my $request = { + method => $method, + scheme => $scheme, + host_port => ($port == $DefaultPort{$scheme} ? $host : "$host:$port"), + uri => $path_query, + headers => {}, + }; + + my $handle = HTTP::Micro::Handle->new(timeout => $self->{timeout}); + + $handle->connect($scheme, $host, $port); + + $self->_prepare_headers_and_cb($request, $args); + $handle->write_request_header(@{$request}{qw/method uri headers/}); + $handle->write_content_body($request) if $request->{content}; + + my $response; + do { $response = $handle->read_response_header } + until (substr($response->{status},0,1) ne '1'); + + if (!($method eq 'HEAD' || $response->{status} =~ /^[23]04/)) { + $response->{content} = ''; + $handle->read_content_body(sub { $_[1]->{content} .= $_[0] }, $response); + } + + $handle->close; + $response->{success} = substr($response->{status},0,1) eq '2'; + return $response; +} + +sub _prepare_headers_and_cb { + my ($self, $request, $args) = @_; + + for ($args->{headers}) { + next unless defined; + while (my ($k, $v) = each %$_) { + $request->{headers}{lc $k} = $v; + } + } + $request->{headers}{'host'} = $request->{host_port}; + $request->{headers}{'connection'} = "close"; + $request->{headers}{'user-agent'} ||= $self->{agent}; + + if (defined $args->{content}) { + $request->{headers}{'content-type'} ||= "application/octet-stream"; + utf8::downgrade($args->{content}, 1) + or Carp::croak(q/Wide character in request message body/); + $request->{headers}{'content-length'} = length $args->{content}; + $request->{content} = $args->{content}; + } + return; +} + +sub _split_url { + my $url = pop; + + my ($scheme, $authority, $path_query) = $url =~ m<\A([^:/?#]+)://([^/?#]*)([^#]*)> + or Carp::croak(qq/Cannot parse URL: '$url'/); + + $scheme = lc $scheme; + $path_query = "/$path_query" unless $path_query =~ m<\A/>; + + my $host = (length($authority)) ? lc $authority : 'localhost'; + $host =~ s/\A[^@]*@//; # userinfo + my $port = do { + $host =~ s/:([0-9]*)\z// && length $1 + ? $1 + : $DefaultPort{$scheme} + }; + + return ($scheme, $host, $port, $path_query); +} + +} # HTTP::Micro + +{ + package HTTP::Micro::Handle; + + use strict; + use warnings FATAL => 'all'; + use English qw(-no_match_vars); + + use Carp qw(croak); + use Errno qw(EINTR EPIPE); + use IO::Socket qw(SOCK_STREAM); + + sub BUFSIZE () { 32768 } + + my $Printable = sub { + local $_ = shift; + s/\r/\\r/g; + s/\n/\\n/g; + s/\t/\\t/g; + s/([^\x20-\x7E])/sprintf('\\x%.2X', ord($1))/ge; + $_; + }; + + sub new { + my ($class, %args) = @_; + return bless { + rbuf => '', + timeout => 60, + max_line_size => 16384, + %args + }, $class; + } + + my $ssl_verify_args = { + check_cn => "when_only", + wildcards_in_alt => "anywhere", + wildcards_in_cn => "anywhere" + }; + + sub connect { + @_ == 4 || croak(q/Usage: $handle->connect(scheme, host, port)/); + my ($self, $scheme, $host, $port) = @_; + + if ( $scheme eq 'https' ) { + eval "require IO::Socket::SSL" + unless exists $INC{'IO/Socket/SSL.pm'}; + croak(qq/IO::Socket::SSL must be installed for https support\n/) + unless $INC{'IO/Socket/SSL.pm'}; + } + elsif ( $scheme ne 'http' ) { + croak(qq/Unsupported URL scheme '$scheme'\n/); + } + + $self->{fh} = IO::Socket::INET->new( + PeerHost => $host, + PeerPort => $port, + Proto => 'tcp', + Type => SOCK_STREAM, + Timeout => $self->{timeout} + ) or croak(qq/Could not connect to '$host:$port': $@/); + + binmode($self->{fh}) + or croak(qq/Could not binmode() socket: '$!'/); + + if ( $scheme eq 'https') { + IO::Socket::SSL->start_SSL($self->{fh}); + ref($self->{fh}) eq 'IO::Socket::SSL' + or die(qq/SSL connection failed for $host\n/); + if ( $self->{fh}->can("verify_hostname") ) { + $self->{fh}->verify_hostname( $host, $ssl_verify_args ) + or die(qq/SSL certificate not valid for $host\n/); + } + else { + my $fh = $self->{fh}; + _verify_hostname_of_cert($host, _peer_certificate($fh), $ssl_verify_args) + or die(qq/SSL certificate not valid for $host\n/); + } + } + + $self->{host} = $host; + $self->{port} = $port; + + return $self; + } + + sub close { + @_ == 1 || croak(q/Usage: $handle->close()/); + my ($self) = @_; + CORE::close($self->{fh}) + or croak(qq/Could not close socket: '$!'/); + } + + sub write { + @_ == 2 || croak(q/Usage: $handle->write(buf)/); + my ($self, $buf) = @_; + + my $len = length $buf; + my $off = 0; + + local $SIG{PIPE} = 'IGNORE'; + + while () { + $self->can_write + or croak(q/Timed out while waiting for socket to become ready for writing/); + my $r = syswrite($self->{fh}, $buf, $len, $off); + if (defined $r) { + $len -= $r; + $off += $r; + last unless $len > 0; + } + elsif ($! == EPIPE) { + croak(qq/Socket closed by remote server: $!/); + } + elsif ($! != EINTR) { + croak(qq/Could not write to socket: '$!'/); + } + } + return $off; + } + + sub read { + @_ == 2 || @_ == 3 || croak(q/Usage: $handle->read(len)/); + my ($self, $len) = @_; + + my $buf = ''; + my $got = length $self->{rbuf}; + + if ($got) { + my $take = ($got < $len) ? $got : $len; + $buf = substr($self->{rbuf}, 0, $take, ''); + $len -= $take; + } + + while ($len > 0) { + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $buf, $len, length $buf); + if (defined $r) { + last unless $r; + $len -= $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + if ($len) { + croak(q/Unexpected end of stream/); + } + return $buf; + } + + sub readline { + @_ == 1 || croak(q/Usage: $handle->readline()/); + my ($self) = @_; + + while () { + if ($self->{rbuf} =~ s/\A ([^\x0D\x0A]* \x0D?\x0A)//x) { + return $1; + } + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $self->{rbuf}, BUFSIZE, length $self->{rbuf}); + if (defined $r) { + last unless $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + croak(q/Unexpected end of stream while looking for line/); + } + + sub read_header_lines { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->read_header_lines([headers])/); + my ($self, $headers) = @_; + $headers ||= {}; + my $lines = 0; + my $val; + + while () { + my $line = $self->readline; + + if ($line =~ /\A ([^\x00-\x1F\x7F:]+) : [\x09\x20]* ([^\x0D\x0A]*)/x) { + my ($field_name) = lc $1; + $val = \($headers->{$field_name} = $2); + } + elsif ($line =~ /\A [\x09\x20]+ ([^\x0D\x0A]*)/x) { + $val + or croak(q/Unexpected header continuation line/); + next unless length $1; + $$val .= ' ' if length $$val; + $$val .= $1; + } + elsif ($line =~ /\A \x0D?\x0A \z/x) { + last; + } + else { + croak(q/Malformed header line: / . $Printable->($line)); + } + } + return $headers; + } + + sub write_header_lines { + (@_ == 2 && ref $_[1] eq 'HASH') || croak(q/Usage: $handle->write_header_lines(headers)/); + my($self, $headers) = @_; + + my $buf = ''; + while (my ($k, $v) = each %$headers) { + my $field_name = lc $k; + $field_name =~ /\A [\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5A\x5E-\x7A\x7C\x7E]+ \z/x + or croak(q/Invalid HTTP header field name: / . $Printable->($field_name)); + $field_name =~ s/\b(\w)/\u$1/g; + $buf .= "$field_name: $v\x0D\x0A"; + } + $buf .= "\x0D\x0A"; + return $self->write($buf); + } + + sub read_content_body { + @_ == 3 || @_ == 4 || croak(q/Usage: $handle->read_content_body(callback, response, [read_length])/); + my ($self, $cb, $response, $len) = @_; + $len ||= $response->{headers}{'content-length'}; + + croak("No content-length in the returned response, and this " + . "UA doesn't implement chunking") unless defined $len; + + while ($len > 0) { + my $read = ($len > BUFSIZE) ? BUFSIZE : $len; + $cb->($self->read($read), $response); + $len -= $read; + } + + return; + } + + sub write_content_body { + @_ == 2 || croak(q/Usage: $handle->write_content_body(request)/); + my ($self, $request) = @_; + my ($len, $content_length) = (0, $request->{headers}{'content-length'}); + + $len += $self->write($request->{content}); + + $len == $content_length + or croak(qq/Content-Length missmatch (got: $len expected: $content_length)/); + + return $len; + } + + sub read_response_header { + @_ == 1 || croak(q/Usage: $handle->read_response_header()/); + my ($self) = @_; + + my $line = $self->readline; + + $line =~ /\A (HTTP\/(0*\d+\.0*\d+)) [\x09\x20]+ ([0-9]{3}) [\x09\x20]+ ([^\x0D\x0A]*) \x0D?\x0A/x + or croak(q/Malformed Status-Line: / . $Printable->($line)); + + my ($protocol, $version, $status, $reason) = ($1, $2, $3, $4); + + return { + status => $status, + reason => $reason, + headers => $self->read_header_lines, + protocol => $protocol, + }; + } + + sub write_request_header { + @_ == 4 || croak(q/Usage: $handle->write_request_header(method, request_uri, headers)/); + my ($self, $method, $request_uri, $headers) = @_; + + return $self->write("$method $request_uri HTTP/1.1\x0D\x0A") + + $self->write_header_lines($headers); + } + + sub _do_timeout { + my ($self, $type, $timeout) = @_; + $timeout = $self->{timeout} + unless defined $timeout && $timeout >= 0; + + my $fd = fileno $self->{fh}; + defined $fd && $fd >= 0 + or croak(q/select(2): 'Bad file descriptor'/); + + my $initial = time; + my $pending = $timeout; + my $nfound; + + vec(my $fdset = '', $fd, 1) = 1; + + while () { + $nfound = ($type eq 'read') + ? select($fdset, undef, undef, $pending) + : select(undef, $fdset, undef, $pending) ; + if ($nfound == -1) { + $! == EINTR + or croak(qq/select(2): '$!'/); + redo if !$timeout || ($pending = $timeout - (time - $initial)) > 0; + $nfound = 0; + } + last; + } + $! = 0; + return $nfound; + } + + sub can_read { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_read([timeout])/); + my $self = shift; + return $self->_do_timeout('read', @_) + } + + sub can_write { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_write([timeout])/); + my $self = shift; + return $self->_do_timeout('write', @_) + } +} # HTTP::Micro::Handle + +my $prog = <<'EOP'; +BEGIN { + if ( defined &IO::Socket::SSL::CAN_IPV6 ) { + *CAN_IPV6 = \*IO::Socket::SSL::CAN_IPV6; + } + else { + constant->import( CAN_IPV6 => '' ); + } + my %const = ( + NID_CommonName => 13, + GEN_DNS => 2, + GEN_IPADD => 7, + ); + while ( my ($name,$value) = each %const ) { + no strict 'refs'; + *{$name} = UNIVERSAL::can( 'Net::SSLeay', $name ) || sub { $value }; + } +} +{ + use Carp qw(croak); + my %dispatcher = ( + issuer => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_issuer_name( shift )) }, + subject => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_subject_name( shift )) }, + ); + if ( $Net::SSLeay::VERSION >= 1.30 ) { + $dispatcher{commonName} = sub { + my $cn = Net::SSLeay::X509_NAME_get_text_by_NID( + Net::SSLeay::X509_get_subject_name( shift ), NID_CommonName); + $cn =~s{\0$}{}; # work around Bug in Net::SSLeay <1.33 + $cn; + } + } else { + $dispatcher{commonName} = sub { + croak "you need at least Net::SSLeay version 1.30 for getting commonName" + } + } + + if ( $Net::SSLeay::VERSION >= 1.33 ) { + $dispatcher{subjectAltNames} = sub { Net::SSLeay::X509_get_subjectAltNames( shift ) }; + } else { + $dispatcher{subjectAltNames} = sub { + return; + }; + } + + $dispatcher{authority} = $dispatcher{issuer}; + $dispatcher{owner} = $dispatcher{subject}; + $dispatcher{cn} = $dispatcher{commonName}; + + sub _peer_certificate { + my ($self, $field) = @_; + my $ssl = $self->_get_ssl_object or return; + + my $cert = ${*$self}{_SSL_certificate} + ||= Net::SSLeay::get_peer_certificate($ssl) + or return $self->error("Could not retrieve peer certificate"); + + if ($field) { + my $sub = $dispatcher{$field} or croak + "invalid argument for peer_certificate, valid are: ".join( " ",keys %dispatcher ). + "\nMaybe you need to upgrade your Net::SSLeay"; + return $sub->($cert); + } else { + return $cert + } + } + + + my %scheme = ( + ldap => { + wildcards_in_cn => 0, + wildcards_in_alt => 'leftmost', + check_cn => 'always', + }, + http => { + wildcards_in_cn => 'anywhere', + wildcards_in_alt => 'anywhere', + check_cn => 'when_only', + }, + smtp => { + wildcards_in_cn => 0, + wildcards_in_alt => 0, + check_cn => 'always' + }, + none => {}, # do not check + ); + + $scheme{www} = $scheme{http}; # alias + $scheme{xmpp} = $scheme{http}; # rfc 3920 + $scheme{pop3} = $scheme{ldap}; # rfc 2595 + $scheme{imap} = $scheme{ldap}; # rfc 2595 + $scheme{acap} = $scheme{ldap}; # rfc 2595 + $scheme{nntp} = $scheme{ldap}; # rfc 4642 + $scheme{ftp} = $scheme{http}; # rfc 4217 + + + sub _verify_hostname_of_cert { + my $identity = shift; + my $cert = shift; + my $scheme = shift || 'none'; + if ( ! ref($scheme) ) { + $scheme = $scheme{$scheme} or croak "scheme $scheme not defined"; + } + + return 1 if ! %$scheme; # 'none' + + my $commonName = $dispatcher{cn}->($cert); + my @altNames = $dispatcher{subjectAltNames}->($cert); + + if ( my $sub = $scheme->{callback} ) { + return $sub->($identity,$commonName,@altNames); + } + + + my $ipn; + if ( CAN_IPV6 and $identity =~m{:} ) { + $ipn = IO::Socket::SSL::inet_pton(IO::Socket::SSL::AF_INET6,$identity) + or croak "'$identity' is not IPv6, but neither IPv4 nor hostname"; + } elsif ( $identity =~m{^\d+\.\d+\.\d+\.\d+$} ) { + $ipn = IO::Socket::SSL::inet_aton( $identity ) or croak "'$identity' is not IPv4, but neither IPv6 nor hostname"; + } else { + if ( $identity =~m{[^a-zA-Z0-9_.\-]} ) { + $identity =~m{\0} and croak("name '$identity' has \\0 byte"); + $identity = IO::Socket::SSL::idn_to_ascii($identity) or + croak "Warning: Given name '$identity' could not be converted to IDNA!"; + } + } + + my $check_name = sub { + my ($name,$identity,$wtyp) = @_; + $wtyp ||= ''; + my $pattern; + if ( $wtyp eq 'anywhere' and $name =~m{^([a-zA-Z0-9_\-]*)\*(.+)} ) { + $pattern = qr{^\Q$1\E[a-zA-Z0-9_\-]*\Q$2\E$}i; + } elsif ( $wtyp eq 'leftmost' and $name =~m{^\*(\..+)$} ) { + $pattern = qr{^[a-zA-Z0-9_\-]*\Q$1\E$}i; + } else { + $pattern = qr{^\Q$name\E$}i; + } + return $identity =~ $pattern; + }; + + my $alt_dnsNames = 0; + while (@altNames) { + my ($type, $name) = splice (@altNames, 0, 2); + if ( $ipn and $type == GEN_IPADD ) { + return 1 if $ipn eq $name; + + } elsif ( ! $ipn and $type == GEN_DNS ) { + $name =~s/\s+$//; $name =~s/^\s+//; + $alt_dnsNames++; + $check_name->($name,$identity,$scheme->{wildcards_in_alt}) + and return 1; + } + } + + if ( ! $ipn and ( + $scheme->{check_cn} eq 'always' or + $scheme->{check_cn} eq 'when_only' and !$alt_dnsNames)) { + $check_name->($commonName,$identity,$scheme->{wildcards_in_cn}) + and return 1; + } + + return 0; # no match + } +} +EOP + +eval { require IO::Socket::SSL }; +if ( $INC{"IO/Socket/SSL.pm"} ) { + eval $prog; + die $@ if $@; +} + +1; +# ########################################################################### +# End HTTP::Micro package +# ########################################################################### + +# ########################################################################### +# VersionCheck package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/VersionCheck.pm +# t/lib/VersionCheck.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package VersionCheck; + + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +local $Data::Dumper::Indent = 1; +local $Data::Dumper::Sortkeys = 1; +local $Data::Dumper::Quotekeys = 0; + +use Digest::MD5 qw(md5_hex); +use Sys::Hostname qw(hostname); +use File::Basename qw(); +use File::Spec; +use FindBin qw(); + +eval { + require Percona::Toolkit; + require HTTP::Micro; +}; + +my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; +my @vc_dirs = ( + '/etc/percona', + '/etc/percona-toolkit', + '/tmp', + "$home", +); + +{ + my $file = 'percona-version-check'; + + sub version_check_file { + foreach my $dir ( @vc_dirs ) { + if ( -d $dir && -w $dir ) { + PTDEBUG && _d('Version check file', $file, 'in', $dir); + return $dir . '/' . $file; + } + } + PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD}); + return $file; # in the CWD + } +} + +sub version_check_time_limit { + return 60 * 60 * 24; # one day +} + + +sub version_check { + my (%args) = @_; + + my $instances = $args{instances} || []; + my $instances_to_check; + + PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin); + if ( !$args{force} ) { + if ( $FindBin::Bin + && (-d "$FindBin::Bin/../.bzr" || + -d "$FindBin::Bin/../../.bzr" || + -d "$FindBin::Bin/../.git" || + -d "$FindBin::Bin/../../.git" + ) + ) { + PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check"); + return; + } + } + + eval { + foreach my $instance ( @$instances ) { + my ($name, $id) = get_instance_id($instance); + $instance->{name} = $name; + $instance->{id} = $id; + } + + push @$instances, { name => 'system', id => 0 }; + + $instances_to_check = get_instances_to_check( + instances => $instances, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + PTDEBUG && _d(scalar @$instances_to_check, 'instances to check'); + return unless @$instances_to_check; + + my $protocol = 'https'; + eval { require IO::Socket::SSL; }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + PTDEBUG && _d("SSL not available, won't run version_check"); + return; + } + PTDEBUG && _d('Using', $protocol); + + my $advice = pingback( + instances => $instances_to_check, + protocol => $protocol, + url => $args{url} # testing + || $ENV{PERCONA_VERSION_CHECK_URL} # testing + || "$protocol://v.percona.com", + ); + if ( $advice ) { + PTDEBUG && _d('Advice:', Dumper($advice)); + if ( scalar @$advice > 1) { + print "\n# " . scalar @$advice . " software updates are " + . "available:\n"; + } + else { + print "\n# A software update is available:\n"; + } + print join("\n", map { "# * $_" } @$advice), "\n\n"; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Version check failed:', $EVAL_ERROR); + } + + if ( @$instances_to_check ) { + eval { + update_check_times( + instances => $instances_to_check, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error updating version check file:', $EVAL_ERROR); + } + } + + if ( $ENV{PTDEBUG_VERSION_CHECK} ) { + warn "Exiting because the PTDEBUG_VERSION_CHECK " + . "environment variable is defined.\n"; + exit 255; + } + + return; +} + +sub get_instances_to_check { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + + if ( !-f $vc_file ) { + PTDEBUG && _d('Version check file', $vc_file, 'does not exist;', + 'version checking all instances'); + return $instances; + } + + open my $fh, '<', $vc_file or die "Cannot open $vc_file: $OS_ERROR"; + chomp(my $file_contents = do { local $/ = undef; <$fh> }); + PTDEBUG && _d('Version check file', $vc_file, 'contents:', $file_contents); + close $fh; + my %last_check_time_for = $file_contents =~ /^([^,]+),(.+)$/mg; + + my $check_time_limit = version_check_time_limit(); + my @instances_to_check; + foreach my $instance ( @$instances ) { + my $last_check_time = $last_check_time_for{ $instance->{id} }; + PTDEBUG && _d('Intsance', $instance->{id}, 'last checked', + $last_check_time, 'now', $now, 'diff', $now - ($last_check_time || 0), + 'hours until next check', + sprintf '%.2f', + ($check_time_limit - ($now - ($last_check_time || 0))) / 3600); + if ( !defined $last_check_time + || ($now - $last_check_time) >= $check_time_limit ) { + PTDEBUG && _d('Time to check', Dumper($instance)); + push @instances_to_check, $instance; + } + } + + return \@instances_to_check; +} + +sub update_check_times { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + PTDEBUG && _d('Updating last check time:', $now); + + my %all_instances = map { + $_->{id} => { name => $_->{name}, ts => $now } + } @$instances; + + if ( -f $vc_file ) { + open my $fh, '<', $vc_file or die "Cannot read $vc_file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + + foreach my $line ( split("\n", ($contents || '')) ) { + my ($id, $ts) = split(',', $line); + if ( !exists $all_instances{$id} ) { + $all_instances{$id} = { ts => $ts }; # original ts, not updated + } + } + } + + open my $fh, '>', $vc_file or die "Cannot write to $vc_file: $OS_ERROR"; + foreach my $id ( sort keys %all_instances ) { + PTDEBUG && _d('Updated:', $id, Dumper($all_instances{$id})); + print { $fh } $id . ',' . $all_instances{$id}->{ts} . "\n"; + } + close $fh; + + return; +} + +sub get_instance_id { + my ($instance) = @_; + + my $dbh = $instance->{dbh}; + my $dsn = $instance->{dsn}; + + my $sql = q{SELECT CONCAT(@@hostname, @@port)}; + PTDEBUG && _d($sql); + my ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $sql = q{SELECT @@hostname}; + PTDEBUG && _d($sql); + ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $name = ($dsn->{h} || 'localhost') . ($dsn->{P} || 3306); + } + else { + $sql = q{SHOW VARIABLES LIKE 'port'}; + PTDEBUG && _d($sql); + my (undef, $port) = eval { $dbh->selectrow_array($sql) }; + PTDEBUG && _d('port:', $port); + $name .= $port || ''; + } + } + my $id = md5_hex($name); + + PTDEBUG && _d('MySQL instance:', $id, $name, Dumper($dsn)); + + return $name, $id; +} + + +sub get_uuid { + my $uuid_file = '/.percona-toolkit.uuid'; + foreach my $dir (@vc_dirs) { + my $filename = $dir.$uuid_file; + my $uuid=_read_uuid($filename); + return $uuid if $uuid; + } + + my $filename = $ENV{"HOME"} . $uuid_file; + my $uuid = _generate_uuid(); + + open(my $fh, '>', $filename) or die "Could not open file '$filename' $!"; + print $fh $uuid; + close $fh; + + return $uuid; +} + +sub _generate_uuid { + return sprintf+($}="%04x")."$}-$}-$}-$}-".$}x3,map rand 65537,0..7; +} + +sub _read_uuid { + my $filename = shift; + my $fh; + + eval { + open($fh, '<:encoding(UTF-8)', $filename); + }; + return if ($EVAL_ERROR); + + my $uuid; + eval { $uuid = <$fh>; }; + return if ($EVAL_ERROR); + + chomp $uuid; + return $uuid; +} + + +sub pingback { + my (%args) = @_; + my @required_args = qw(url instances); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my $url = $args{url}; + my $instances = $args{instances}; + + my $ua = $args{ua} || HTTP::Micro->new( timeout => 3 ); + + my $response = $ua->request('GET', $url); + PTDEBUG && _d('Server response:', Dumper($response)); + die "No response from GET $url" + if !$response; + die("GET on $url returned HTTP status $response->{status}; expected 200\n", + ($response->{content} || '')) if $response->{status} != 200; + die("GET on $url did not return any programs to check") + if !$response->{content}; + + my $items = parse_server_response( + response => $response->{content} + ); + die "Failed to parse server requested programs: $response->{content}" + if !scalar keys %$items; + + my $versions = get_versions( + items => $items, + instances => $instances, + ); + die "Failed to get any program versions; should have at least gotten Perl" + if !scalar keys %$versions; + + my $client_content = encode_client_response( + items => $items, + versions => $versions, + general_id => get_uuid(), + ); + + my $client_response = { + headers => { "X-Percona-Toolkit-Tool" => File::Basename::basename($0) }, + content => $client_content, + }; + PTDEBUG && _d('Client response:', Dumper($client_response)); + + $response = $ua->request('POST', $url, $client_response); + PTDEBUG && _d('Server suggestions:', Dumper($response)); + die "No response from POST $url $client_response" + if !$response; + die "POST $url returned HTTP status $response->{status}; expected 200" + if $response->{status} != 200; + + return unless $response->{content}; + + $items = parse_server_response( + response => $response->{content}, + split_vars => 0, + ); + die "Failed to parse server suggestions: $response->{content}" + if !scalar keys %$items; + my @suggestions = map { $_->{vars} } + sort { $a->{item} cmp $b->{item} } + values %$items; + + return \@suggestions; +} + +sub encode_client_response { + my (%args) = @_; + my @required_args = qw(items versions general_id); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items, $versions, $general_id) = @args{@required_args}; + + my @lines; + foreach my $item ( sort keys %$items ) { + next unless exists $versions->{$item}; + if ( ref($versions->{$item}) eq 'HASH' ) { + my $mysql_versions = $versions->{$item}; + for my $id ( sort keys %$mysql_versions ) { + push @lines, join(';', $id, $item, $mysql_versions->{$id}); + } + } + else { + push @lines, join(';', $general_id, $item, $versions->{$item}); + } + } + + my $client_response = join("\n", @lines) . "\n"; + return $client_response; +} + +sub parse_server_response { + my (%args) = @_; + my @required_args = qw(response); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($response) = @args{@required_args}; + + my %items = map { + my ($item, $type, $vars) = split(";", $_); + if ( !defined $args{split_vars} || $args{split_vars} ) { + $vars = [ split(",", ($vars || '')) ]; + } + $item => { + item => $item, + type => $type, + vars => $vars, + }; + } split("\n", $response); + + PTDEBUG && _d('Items:', Dumper(\%items)); + + return \%items; +} + +my %sub_for_type = ( + os_version => \&get_os_version, + perl_version => \&get_perl_version, + perl_module_version => \&get_perl_module_version, + mysql_variable => \&get_mysql_variable, +); + +sub valid_item { + my ($item) = @_; + return unless $item; + if ( !exists $sub_for_type{ $item->{type} } ) { + PTDEBUG && _d('Invalid type:', $item->{type}); + return 0; + } + return 1; +} + +sub get_versions { + my (%args) = @_; + my @required_args = qw(items); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items) = @args{@required_args}; + + my %versions; + foreach my $item ( values %$items ) { + next unless valid_item($item); + eval { + my $version = $sub_for_type{ $item->{type} }->( + item => $item, + instances => $args{instances}, + ); + if ( $version ) { + chomp $version unless ref($version); + $versions{$item->{item}} = $version; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error getting version for', Dumper($item), $EVAL_ERROR); + } + } + + return \%versions; +} + + +sub get_os_version { + if ( $OSNAME eq 'MSWin32' ) { + require Win32; + return Win32::GetOSDisplayName(); + } + + chomp(my $platform = `uname -s`); + PTDEBUG && _d('platform:', $platform); + return $OSNAME unless $platform; + + chomp(my $lsb_release + = `which lsb_release 2>/dev/null | awk '{print \$1}'` || ''); + PTDEBUG && _d('lsb_release:', $lsb_release); + + my $release = ""; + + if ( $platform eq 'Linux' ) { + if ( -f "/etc/fedora-release" ) { + $release = `cat /etc/fedora-release`; + } + elsif ( -f "/etc/redhat-release" ) { + $release = `cat /etc/redhat-release`; + } + elsif ( -f "/etc/system-release" ) { + $release = `cat /etc/system-release`; + } + elsif ( $lsb_release ) { + $release = `$lsb_release -ds`; + } + elsif ( -f "/etc/lsb-release" ) { + $release = `grep DISTRIB_DESCRIPTION /etc/lsb-release`; + $release =~ s/^\w+="([^"]+)".+/$1/; + } + elsif ( -f "/etc/debian_version" ) { + chomp(my $rel = `cat /etc/debian_version`); + $release = "Debian $rel"; + if ( -f "/etc/apt/sources.list" ) { + chomp(my $code_name = `awk '/^deb/ {print \$3}' /etc/apt/sources.list | awk -F/ '{print \$1}'| awk 'BEGIN {FS="|"} {print \$1}' | sort | uniq -c | sort -rn | head -n1 | awk '{print \$2}'`); + $release .= " ($code_name)" if $code_name; + } + } + elsif ( -f "/etc/os-release" ) { # openSUSE + chomp($release = `grep PRETTY_NAME /etc/os-release`); + $release =~ s/^PRETTY_NAME="(.+)"$/$1/; + } + elsif ( `ls /etc/*release 2>/dev/null` ) { + if ( `grep DISTRIB_DESCRIPTION /etc/*release 2>/dev/null` ) { + $release = `grep DISTRIB_DESCRIPTION /etc/*release | head -n1`; + } + else { + $release = `cat /etc/*release | head -n1`; + } + } + } + elsif ( $platform =~ m/(?:BSD|^Darwin)$/ ) { + my $rel = `uname -r`; + $release = "$platform $rel"; + } + elsif ( $platform eq "SunOS" ) { + my $rel = `head -n1 /etc/release` || `uname -r`; + $release = "$platform $rel"; + } + + if ( !$release ) { + PTDEBUG && _d('Failed to get the release, using platform'); + $release = $platform; + } + chomp($release); + + $release =~ s/^"|"$//g; + + PTDEBUG && _d('OS version =', $release); + return $release; +} + +sub get_perl_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $version = sprintf '%vd', $PERL_VERSION; + PTDEBUG && _d('Perl version', $version); + return $version; +} + +sub get_perl_module_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $var = '$' . $item->{item} . '::VERSION'; + my $version = eval "use $item->{item}; $var;"; + PTDEBUG && _d('Perl version for', $var, '=', $version); + return $version; +} + +sub get_mysql_variable { + return get_from_mysql( + show => 'VARIABLES', + @_, + ); +} + +sub get_from_mysql { + my (%args) = @_; + my $show = $args{show}; + my $item = $args{item}; + my $instances = $args{instances}; + return unless $show && $item; + + if ( !$instances || !@$instances ) { + PTDEBUG && _d('Cannot check', $item, + 'because there are no MySQL instances'); + return; + } + + if ($item->{item} eq 'MySQL' && $item->{type} eq 'mysql_variable') { + @{$item->{vars}} = grep { $_ eq 'version' || $_ eq 'version_comment' } @{$item->{vars}}; + } + + + my @versions; + my %version_for; + foreach my $instance ( @$instances ) { + next unless $instance->{id}; # special system instance has id=0 + my $dbh = $instance->{dbh}; + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $sql = qq/SHOW $show/; + PTDEBUG && _d($sql); + my $rows = $dbh->selectall_hashref($sql, 'variable_name'); + + my @versions; + foreach my $var ( @{$item->{vars}} ) { + $var = lc($var); + my $version = $rows->{$var}->{value}; + PTDEBUG && _d('MySQL version for', $item->{item}, '=', $version, + 'on', $instance->{name}); + push @versions, $version; + } + $version_for{ $instance->{id} } = join(' ', @versions); + } + + return \%version_for; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End VersionCheck package +# ########################################################################### + +# ########################################################################### +# DSNParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/DSNParser.pm +# t/lib/DSNParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package DSNParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 0; +$Data::Dumper::Quotekeys = 0; + +my $dsn_sep = qr/(? {} # h, P, u, etc. Should come from DSN OPTIONS section in POD. + }; + foreach my $opt ( @{$args{opts}} ) { + if ( !$opt->{key} || !$opt->{desc} ) { + die "Invalid DSN option: ", Dumper($opt); + } + PTDEBUG && _d('DSN option:', + join(', ', + map { "$_=" . (defined $opt->{$_} ? ($opt->{$_} || '') : 'undef') } + keys %$opt + ) + ); + $self->{opts}->{$opt->{key}} = { + dsn => $opt->{dsn}, + desc => $opt->{desc}, + copy => $opt->{copy} || 0, + }; + } + return bless $self, $class; +} + +sub prop { + my ( $self, $prop, $value ) = @_; + if ( @_ > 2 ) { + PTDEBUG && _d('Setting', $prop, 'property'); + $self->{$prop} = $value; + } + return $self->{$prop}; +} + +sub parse { + my ( $self, $dsn, $prev, $defaults ) = @_; + if ( !$dsn ) { + PTDEBUG && _d('No DSN to parse'); + return; + } + PTDEBUG && _d('Parsing', $dsn); + $prev ||= {}; + $defaults ||= {}; + my %given_props; + my %final_props; + my $opts = $self->{opts}; + + foreach my $dsn_part ( split($dsn_sep, $dsn) ) { + $dsn_part =~ s/\\,/,/g; + if ( my ($prop_key, $prop_val) = $dsn_part =~ m/^(.)=(.*)$/ ) { + $given_props{$prop_key} = $prop_val; + } + else { + PTDEBUG && _d('Interpreting', $dsn_part, 'as h=', $dsn_part); + $given_props{h} = $dsn_part; + } + } + + foreach my $key ( keys %$opts ) { + PTDEBUG && _d('Finding value for', $key); + $final_props{$key} = $given_props{$key}; + if ( !defined $final_props{$key} + && defined $prev->{$key} && $opts->{$key}->{copy} ) + { + $final_props{$key} = $prev->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from previous DSN'); + } + if ( !defined $final_props{$key} ) { + $final_props{$key} = $defaults->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from defaults'); + } + } + + foreach my $key ( keys %given_props ) { + die "Unknown DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless exists $opts->{$key}; + } + if ( (my $required = $self->prop('required')) ) { + foreach my $key ( keys %$required ) { + die "Missing required DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless $final_props{$key}; + } + } + + return \%final_props; +} + +sub parse_options { + my ( $self, $o ) = @_; + die 'I need an OptionParser object' unless ref $o eq 'OptionParser'; + my $dsn_string + = join(',', + map { "$_=".$o->get($_); } + grep { $o->has($_) && $o->get($_) } + keys %{$self->{opts}} + ); + PTDEBUG && _d('DSN string made from options:', $dsn_string); + return $self->parse($dsn_string); +} + +sub as_string { + my ( $self, $dsn, $props ) = @_; + return $dsn unless ref $dsn; + my @keys = $props ? @$props : sort keys %$dsn; + return join(',', + map { "$_=" . ($_ eq 'p' ? '...' : $dsn->{$_}) } + grep { + exists $self->{opts}->{$_} + && exists $dsn->{$_} + && defined $dsn->{$_} + } @keys); +} + +sub usage { + my ( $self ) = @_; + my $usage + = "DSN syntax is key=value[,key=value...] Allowable DSN keys:\n\n" + . " KEY COPY MEANING\n" + . " === ==== =============================================\n"; + my %opts = %{$self->{opts}}; + foreach my $key ( sort keys %opts ) { + $usage .= " $key " + . ($opts{$key}->{copy} ? 'yes ' : 'no ') + . ($opts{$key}->{desc} || '[No description]') + . "\n"; + } + $usage .= "\n If the DSN is a bareword, the word is treated as the 'h' key.\n"; + return $usage; +} + +sub get_cxn_params { + my ( $self, $info ) = @_; + my $dsn; + my %opts = %{$self->{opts}}; + my $driver = $self->prop('dbidriver') || ''; + if ( $driver eq 'Pg' ) { + $dsn = 'DBI:Pg:dbname=' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(h P)); + } + else { + $dsn = 'DBI:mysql:' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(F h P S A)) + . ';mysql_read_default_group=client' + . ($info->{L} ? ';mysql_local_infile=1' : ''); + } + PTDEBUG && _d($dsn); + return ($dsn, $info->{u}, $info->{p}); +} + +sub fill_in_dsn { + my ( $self, $dbh, $dsn ) = @_; + my $vars = $dbh->selectall_hashref('SHOW VARIABLES', 'Variable_name'); + my ($user, $db) = $dbh->selectrow_array('SELECT USER(), DATABASE()'); + $user =~ s/@.*//; + $dsn->{h} ||= $vars->{hostname}->{Value}; + $dsn->{S} ||= $vars->{'socket'}->{Value}; + $dsn->{P} ||= $vars->{port}->{Value}; + $dsn->{u} ||= $user; + $dsn->{D} ||= $db; +} + +sub get_dbh { + my ( $self, $cxn_string, $user, $pass, $opts ) = @_; + $opts ||= {}; + my $defaults = { + AutoCommit => 0, + RaiseError => 1, + PrintError => 0, + ShowErrorStatement => 1, + mysql_enable_utf8 => ($cxn_string =~ m/charset=utf8/i ? 1 : 0), + }; + @{$defaults}{ keys %$opts } = values %$opts; + if (delete $defaults->{L}) { # L for LOAD DATA LOCAL INFILE, our own extension + $defaults->{mysql_local_infile} = 1; + } + + if ( $opts->{mysql_use_result} ) { + $defaults->{mysql_use_result} = 1; + } + + if ( !$have_dbi ) { + die "Cannot connect to MySQL because the Perl DBI module is not " + . "installed or not found. Run 'perl -MDBI' to see the directories " + . "that Perl searches for DBI. If DBI is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbi-perl\n" + . " RHEL/CentOS yum install perl-DBI\n" + . " OpenSolaris pkg install pkg:/SUNWpmdbi\n"; + + } + + my $dbh; + my $tries = 2; + while ( !$dbh && $tries-- ) { + PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass, + join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults )); + + $dbh = eval { DBI->connect($cxn_string, $user, $pass, $defaults) }; + + if ( !$dbh && $EVAL_ERROR ) { + if ( $EVAL_ERROR =~ m/locate DBD\/mysql/i ) { + die "Cannot connect to MySQL because the Perl DBD::mysql module is " + . "not installed or not found. Run 'perl -MDBD::mysql' to see " + . "the directories that Perl searches for DBD::mysql. If " + . "DBD::mysql is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbd-mysql-perl\n" + . " RHEL/CentOS yum install perl-DBD-MySQL\n" + . " OpenSolaris pgk install pkg:/SUNWapu13dbd-mysql\n"; + } + elsif ( $EVAL_ERROR =~ m/not a compiled character set|character set utf8/ ) { + PTDEBUG && _d('Going to try again without utf8 support'); + delete $defaults->{mysql_enable_utf8}; + } + if ( !$tries ) { + die $EVAL_ERROR; + } + } + } + + if ( $cxn_string =~ m/mysql/i ) { + my $sql; + + if ( my ($charset) = $cxn_string =~ m/charset=([\w]+)/ ) { + $sql = qq{/*!40101 SET NAMES "$charset"*/}; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting NAMES to $charset: $EVAL_ERROR"; + } + PTDEBUG && _d('Enabling charset for STDOUT'); + if ( $charset eq 'utf8' ) { + binmode(STDOUT, ':utf8') + or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR"; + } + else { + binmode(STDOUT) or die "Can't binmode(STDOUT): $OS_ERROR"; + } + } + + if ( my $vars = $self->prop('set-vars') ) { + $self->set_vars($dbh, $vars); + } + + $sql = 'SELECT @@SQL_MODE'; + PTDEBUG && _d($dbh, $sql); + my ($sql_mode) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + die "Error getting the current SQL_MODE: $EVAL_ERROR"; + } + + $sql = 'SET @@SQL_QUOTE_SHOW_CREATE = 1' + . '/*!40101, @@SQL_MODE=\'NO_AUTO_VALUE_ON_ZERO' + . ($sql_mode ? ",$sql_mode" : '') + . '\'*/'; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting SQL_QUOTE_SHOW_CREATE, SQL_MODE" + . ($sql_mode ? " and $sql_mode" : '') + . ": $EVAL_ERROR"; + } + } + my ($mysql_version) = eval { $dbh->selectrow_array('SELECT VERSION()') }; + if ($EVAL_ERROR) { + die "Cannot get MySQL version: $EVAL_ERROR"; + } + + my (undef, $character_set_server) = eval { $dbh->selectrow_array("SHOW VARIABLES LIKE 'character_set_server'") }; + if ($EVAL_ERROR) { + die "Cannot get MySQL var character_set_server: $EVAL_ERROR"; + } + + if ($mysql_version =~ m/^(\d+)\.(\d)\.(\d+).*/) { + if ($1 >= 8 && $character_set_server =~ m/^utf8/) { + $dbh->{mysql_enable_utf8} = 1; + my $msg = "MySQL version $mysql_version >= 8 and character_set_server = $character_set_server\n". + "Setting: SET NAMES $character_set_server"; + PTDEBUG && _d($msg); + eval { $dbh->do("SET NAMES 'utf8mb4'") }; + if ($EVAL_ERROR) { + die "Cannot SET NAMES $character_set_server: $EVAL_ERROR"; + } + } + } + + PTDEBUG && _d('DBH info: ', + $dbh, + Dumper($dbh->selectrow_hashref( + 'SELECT DATABASE(), CONNECTION_ID(), VERSION()/*!50038 , @@hostname*/')), + 'Connection info:', $dbh->{mysql_hostinfo}, + 'Character set info:', Dumper($dbh->selectall_arrayref( + "SHOW VARIABLES LIKE 'character_set%'", { Slice => {}})), + '$DBD::mysql::VERSION:', $DBD::mysql::VERSION, + '$DBI::VERSION:', $DBI::VERSION, + ); + + return $dbh; +} + +sub get_hostname { + my ( $self, $dbh ) = @_; + if ( my ($host) = ($dbh->{mysql_hostinfo} || '') =~ m/^(\w+) via/ ) { + return $host; + } + my ( $hostname, $one ) = $dbh->selectrow_array( + 'SELECT /*!50038 @@hostname, */ 1'); + return $hostname; +} + +sub disconnect { + my ( $self, $dbh ) = @_; + PTDEBUG && $self->print_active_handles($dbh); + $dbh->disconnect; +} + +sub print_active_handles { + my ( $self, $thing, $level ) = @_; + $level ||= 0; + printf("# Active %sh: %s %s %s\n", ($thing->{Type} || 'undef'), "\t" x $level, + $thing, (($thing->{Type} || '') eq 'st' ? $thing->{Statement} || '' : '')) + or die "Cannot print: $OS_ERROR"; + foreach my $handle ( grep {defined} @{ $thing->{ChildHandles} } ) { + $self->print_active_handles( $handle, $level + 1 ); + } +} + +sub copy { + my ( $self, $dsn_1, $dsn_2, %args ) = @_; + die 'I need a dsn_1 argument' unless $dsn_1; + die 'I need a dsn_2 argument' unless $dsn_2; + my %new_dsn = map { + my $key = $_; + my $val; + if ( $args{overwrite} ) { + $val = defined $dsn_1->{$key} ? $dsn_1->{$key} : $dsn_2->{$key}; + } + else { + $val = defined $dsn_2->{$key} ? $dsn_2->{$key} : $dsn_1->{$key}; + } + $key => $val; + } keys %{$self->{opts}}; + return \%new_dsn; +} + +sub set_vars { + my ($self, $dbh, $vars) = @_; + + return unless $vars; + + foreach my $var ( sort keys %$vars ) { + my $val = $vars->{$var}->{val}; + + (my $quoted_var = $var) =~ s/_/\\_/; + my ($var_exists, $current_val); + eval { + ($var_exists, $current_val) = $dbh->selectrow_array( + "SHOW VARIABLES LIKE '$quoted_var'"); + }; + my $e = $EVAL_ERROR; + if ( $e ) { + PTDEBUG && _d($e); + } + + if ( $vars->{$var}->{default} && !$var_exists ) { + PTDEBUG && _d('Not setting default var', $var, + 'because it does not exist'); + next; + } + + if ( $current_val && $current_val eq $val ) { + PTDEBUG && _d('Not setting var', $var, 'because its value', + 'is already', $val); + next; + } + + my $sql = "SET SESSION $var=$val"; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( my $set_error = $EVAL_ERROR ) { + chomp($set_error); + $set_error =~ s/ at \S+ line \d+//; + my $msg = "Error setting $var: $set_error"; + if ( $current_val ) { + $msg .= " The current value for $var is $current_val. " + . "If the variable is read only (not dynamic), specify " + . "--set-vars $var=$current_val to avoid this warning, " + . "else manually set the variable and restart MySQL."; + } + warn $msg . "\n\n"; + } + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End DSNParser package +# ########################################################################### + +# ########################################################################### +# OptionParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/OptionParser.pm +# t/lib/OptionParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package OptionParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use List::Util qw(max); +use Getopt::Long; +use Data::Dumper; + +my $POD_link_re = '[LC]<"?([^">]+)"?>'; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my ($program_name) = $PROGRAM_NAME =~ m/([.A-Za-z-]+)$/; + $program_name ||= $PROGRAM_NAME; + my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; + + my %attributes = ( + 'type' => 1, + 'short form' => 1, + 'group' => 1, + 'default' => 1, + 'cumulative' => 1, + 'negatable' => 1, + 'repeatable' => 1, # means it can be specified more than once + ); + + my $self = { + head1 => 'OPTIONS', # These args are used internally + skip_rules => 0, # to instantiate another Option- + item => '--(.*)', # Parser obj that parses the + attributes => \%attributes, # DSN OPTIONS section. Tools + parse_attributes => \&_parse_attribs, # don't tinker with these args. + + %args, + + strict => 1, # disabled by a special rule + program_name => $program_name, + opts => {}, + got_opts => 0, + short_opts => {}, + defaults => {}, + groups => {}, + allowed_groups => {}, + errors => [], + rules => [], # desc of rules for --help + mutex => [], # rule: opts are mutually exclusive + atleast1 => [], # rule: at least one opt is required + disables => {}, # rule: opt disables other opts + defaults_to => {}, # rule: opt defaults to value of other opt + DSNParser => undef, + default_files => [ + "/etc/percona-toolkit/percona-toolkit.conf", + "/etc/percona-toolkit/$program_name.conf", + "$home/.percona-toolkit.conf", + "$home/.$program_name.conf", + ], + types => { + string => 's', # standard Getopt type + int => 'i', # standard Getopt type + float => 'f', # standard Getopt type + Hash => 'H', # hash, formed from a comma-separated list + hash => 'h', # hash as above, but only if a value is given + Array => 'A', # array, similar to Hash + array => 'a', # array, similar to hash + DSN => 'd', # DSN + size => 'z', # size with kMG suffix (powers of 2^10) + time => 'm', # time, with an optional suffix of s/h/m/d + }, + }; + + return bless $self, $class; +} + +sub get_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + my @specs = $self->_pod_to_specs($file); + $self->_parse_specs(@specs); + + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + if ( $contents =~ m/^=head1 DSN OPTIONS/m ) { + PTDEBUG && _d('Parsing DSN OPTIONS'); + my $dsn_attribs = { + dsn => 1, + copy => 1, + }; + my $parse_dsn_attribs = sub { + my ( $self, $option, $attribs ) = @_; + map { + my $val = $attribs->{$_}; + if ( $val ) { + $val = $val eq 'yes' ? 1 + : $val eq 'no' ? 0 + : $val; + $attribs->{$_} = $val; + } + } keys %$attribs; + return { + key => $option, + %$attribs, + }; + }; + my $dsn_o = new OptionParser( + description => 'DSN OPTIONS', + head1 => 'DSN OPTIONS', + dsn => 0, # XXX don't infinitely recurse! + item => '\* (.)', # key opts are a single character + skip_rules => 1, # no rules before opts + attributes => $dsn_attribs, + parse_attributes => $parse_dsn_attribs, + ); + my @dsn_opts = map { + my $opts = { + key => $_->{spec}->{key}, + dsn => $_->{spec}->{dsn}, + copy => $_->{spec}->{copy}, + desc => $_->{desc}, + }; + $opts; + } $dsn_o->_pod_to_specs($file); + $self->{DSNParser} = DSNParser->new(opts => \@dsn_opts); + } + + if ( $contents =~ m/^=head1 VERSION\n\n^(.+)$/m ) { + $self->{version} = $1; + PTDEBUG && _d($self->{version}); + } + + return; +} + +sub DSNParser { + my ( $self ) = @_; + return $self->{DSNParser}; +}; + +sub get_defaults_files { + my ( $self ) = @_; + return @{$self->{default_files}}; +} + +sub _pod_to_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + + my @specs = (); + my @rules = (); + my $para; + + local $INPUT_RECORD_SEPARATOR = ''; + while ( $para = <$fh> ) { + next unless $para =~ m/^=head1 $self->{head1}/; + last; + } + + while ( $para = <$fh> ) { + last if $para =~ m/^=over/; + next if $self->{skip_rules}; + chomp $para; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + PTDEBUG && _d('Option rule:', $para); + push @rules, $para; + } + + die "POD has no $self->{head1} section" unless $para; + + do { + if ( my ($option) = $para =~ m/^=item $self->{item}/ ) { + chomp $para; + PTDEBUG && _d($para); + my %attribs; + + $para = <$fh>; # read next paragraph, possibly attributes + + if ( $para =~ m/: / ) { # attributes + $para =~ s/\s+\Z//g; + %attribs = map { + my ( $attrib, $val) = split(/: /, $_); + die "Unrecognized attribute for --$option: $attrib" + unless $self->{attributes}->{$attrib}; + ($attrib, $val); + } split(/; /, $para); + if ( $attribs{'short form'} ) { + $attribs{'short form'} =~ s/-//; + } + $para = <$fh>; # read next paragraph, probably short help desc + } + else { + PTDEBUG && _d('Option has no attributes'); + } + + $para =~ s/\s+\Z//g; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + + $para =~ s/\.(?:\n.*| [A-Z].*|\Z)//s; + PTDEBUG && _d('Short help:', $para); + + die "No description after option spec $option" if $para =~ m/^=item/; + + if ( my ($base_option) = $option =~ m/^\[no\](.*)/ ) { + $option = $base_option; + $attribs{'negatable'} = 1; + } + + push @specs, { + spec => $self->{parse_attributes}->($self, $option, \%attribs), + desc => $para + . (defined $attribs{default} ? " (default $attribs{default})" : ''), + group => ($attribs{'group'} ? $attribs{'group'} : 'default'), + attributes => \%attribs + }; + } + while ( $para = <$fh> ) { + last unless $para; + if ( $para =~ m/^=head1/ ) { + $para = undef; # Can't 'last' out of a do {} block. + last; + } + last if $para =~ m/^=item /; + } + } while ( $para ); + + die "No valid specs in $self->{head1}" unless @specs; + + close $fh; + return @specs, @rules; +} + +sub _parse_specs { + my ( $self, @specs ) = @_; + my %disables; # special rule that requires deferred checking + + foreach my $opt ( @specs ) { + if ( ref $opt ) { # It's an option spec, not a rule. + PTDEBUG && _d('Parsing opt spec:', + map { ($_, '=>', $opt->{$_}) } keys %$opt); + + my ( $long, $short ) = $opt->{spec} =~ m/^([\w-]+)(?:\|([^!+=]*))?/; + if ( !$long ) { + die "Cannot parse long option from spec $opt->{spec}"; + } + $opt->{long} = $long; + + die "Duplicate long option --$long" if exists $self->{opts}->{$long}; + $self->{opts}->{$long} = $opt; + + if ( length $long == 1 ) { + PTDEBUG && _d('Long opt', $long, 'looks like short opt'); + $self->{short_opts}->{$long} = $long; + } + + if ( $short ) { + die "Duplicate short option -$short" + if exists $self->{short_opts}->{$short}; + $self->{short_opts}->{$short} = $long; + $opt->{short} = $short; + } + else { + $opt->{short} = undef; + } + + $opt->{is_negatable} = $opt->{spec} =~ m/!/ ? 1 : 0; + $opt->{is_cumulative} = $opt->{spec} =~ m/\+/ ? 1 : 0; + $opt->{is_repeatable} = $opt->{attributes}->{repeatable} ? 1 : 0; + $opt->{is_required} = $opt->{desc} =~ m/required/ ? 1 : 0; + + $opt->{group} ||= 'default'; + $self->{groups}->{ $opt->{group} }->{$long} = 1; + + $opt->{value} = undef; + $opt->{got} = 0; + + my ( $type ) = $opt->{spec} =~ m/=(.)/; + $opt->{type} = $type; + PTDEBUG && _d($long, 'type:', $type); + + + $opt->{spec} =~ s/=./=s/ if ( $type && $type =~ m/[HhAadzm]/ ); + + if ( (my ($def) = $opt->{desc} =~ m/default\b(?: ([^)]+))?/) ) { + $self->{defaults}->{$long} = defined $def ? $def : 1; + PTDEBUG && _d($long, 'default:', $def); + } + + if ( $long eq 'config' ) { + $self->{defaults}->{$long} = join(',', $self->get_defaults_files()); + } + + if ( (my ($dis) = $opt->{desc} =~ m/(disables .*)/) ) { + $disables{$long} = $dis; + PTDEBUG && _d('Deferring check of disables rule for', $opt, $dis); + } + + $self->{opts}->{$long} = $opt; + } + else { # It's an option rule, not a spec. + PTDEBUG && _d('Parsing rule:', $opt); + push @{$self->{rules}}, $opt; + my @participants = $self->_get_participants($opt); + my $rule_ok = 0; + + if ( $opt =~ m/mutually exclusive|one and only one/ ) { + $rule_ok = 1; + push @{$self->{mutex}}, \@participants; + PTDEBUG && _d(@participants, 'are mutually exclusive'); + } + if ( $opt =~ m/at least one|one and only one/ ) { + $rule_ok = 1; + push @{$self->{atleast1}}, \@participants; + PTDEBUG && _d(@participants, 'require at least one'); + } + if ( $opt =~ m/default to/ ) { + $rule_ok = 1; + $self->{defaults_to}->{$participants[0]} = $participants[1]; + PTDEBUG && _d($participants[0], 'defaults to', $participants[1]); + } + if ( $opt =~ m/restricted to option groups/ ) { + $rule_ok = 1; + my ($groups) = $opt =~ m/groups ([\w\s\,]+)/; + my @groups = split(',', $groups); + %{$self->{allowed_groups}->{$participants[0]}} = map { + s/\s+//; + $_ => 1; + } @groups; + } + if( $opt =~ m/accepts additional command-line arguments/ ) { + $rule_ok = 1; + $self->{strict} = 0; + PTDEBUG && _d("Strict mode disabled by rule"); + } + + die "Unrecognized option rule: $opt" unless $rule_ok; + } + } + + foreach my $long ( keys %disables ) { + my @participants = $self->_get_participants($disables{$long}); + $self->{disables}->{$long} = \@participants; + PTDEBUG && _d('Option', $long, 'disables', @participants); + } + + return; +} + +sub _get_participants { + my ( $self, $str ) = @_; + my @participants; + foreach my $long ( $str =~ m/--(?:\[no\])?([\w-]+)/g ) { + die "Option --$long does not exist while processing rule $str" + unless exists $self->{opts}->{$long}; + push @participants, $long; + } + PTDEBUG && _d('Participants for', $str, ':', @participants); + return @participants; +} + +sub opts { + my ( $self ) = @_; + my %opts = %{$self->{opts}}; + return %opts; +} + +sub short_opts { + my ( $self ) = @_; + my %short_opts = %{$self->{short_opts}}; + return %short_opts; +} + +sub set_defaults { + my ( $self, %defaults ) = @_; + $self->{defaults} = {}; + foreach my $long ( keys %defaults ) { + die "Cannot set default for nonexistent option $long" + unless exists $self->{opts}->{$long}; + $self->{defaults}->{$long} = $defaults{$long}; + PTDEBUG && _d('Default val for', $long, ':', $defaults{$long}); + } + return; +} + +sub get_defaults { + my ( $self ) = @_; + return $self->{defaults}; +} + +sub get_groups { + my ( $self ) = @_; + return $self->{groups}; +} + +sub _set_option { + my ( $self, $opt, $val ) = @_; + my $long = exists $self->{opts}->{$opt} ? $opt + : exists $self->{short_opts}->{$opt} ? $self->{short_opts}->{$opt} + : die "Getopt::Long gave a nonexistent option: $opt"; + $opt = $self->{opts}->{$long}; + if ( $opt->{is_cumulative} ) { + $opt->{value}++; + } + elsif ( ($opt->{type} || '') eq 's' && $val =~ m/^--?(.+)/ ) { + my $next_opt = $1; + if ( exists $self->{opts}->{$next_opt} + || exists $self->{short_opts}->{$next_opt} ) { + $self->save_error("--$long requires a string value"); + return; + } + else { + if ($opt->{is_repeatable}) { + push @{$opt->{value}} , $val; + } + else { + $opt->{value} = $val; + } + } + } + else { + if ($opt->{is_repeatable}) { + push @{$opt->{value}} , $val; + } + else { + $opt->{value} = $val; + } + } + $opt->{got} = 1; + PTDEBUG && _d('Got option', $long, '=', $val); +} + +sub get_opts { + my ( $self ) = @_; + + foreach my $long ( keys %{$self->{opts}} ) { + $self->{opts}->{$long}->{got} = 0; + $self->{opts}->{$long}->{value} + = exists $self->{defaults}->{$long} ? $self->{defaults}->{$long} + : $self->{opts}->{$long}->{is_cumulative} ? 0 + : undef; + } + $self->{got_opts} = 0; + + $self->{errors} = []; + + if ( @ARGV && $ARGV[0] =~/^--config=/ ) { + $ARGV[0] = substr($ARGV[0],9); + $ARGV[0] =~ s/^'(.*)'$/$1/; + $ARGV[0] =~ s/^"(.*)"$/$1/; + $self->_set_option('config', shift @ARGV); + } + if ( @ARGV && $ARGV[0] eq "--config" ) { + shift @ARGV; + $self->_set_option('config', shift @ARGV); + } + if ( $self->has('config') ) { + my @extra_args; + foreach my $filename ( split(',', $self->get('config')) ) { + eval { + push @extra_args, $self->_read_config_file($filename); + }; + if ( $EVAL_ERROR ) { + if ( $self->got('config') ) { + die $EVAL_ERROR; + } + elsif ( PTDEBUG ) { + _d($EVAL_ERROR); + } + } + } + unshift @ARGV, @extra_args; + } + + Getopt::Long::Configure('no_ignore_case', 'bundling'); + GetOptions( + map { $_->{spec} => sub { $self->_set_option(@_); } } + grep { $_->{long} ne 'config' } # --config is handled specially above. + values %{$self->{opts}} + ) or $self->save_error('Error parsing options'); + + if ( exists $self->{opts}->{version} && $self->{opts}->{version}->{got} ) { + if ( $self->{version} ) { + print $self->{version}, "\n"; + exit 0; + } + else { + print "Error parsing version. See the VERSION section of the tool's documentation.\n"; + exit 1; + } + } + + if ( @ARGV && $self->{strict} ) { + $self->save_error("Unrecognized command-line options @ARGV"); + } + + foreach my $mutex ( @{$self->{mutex}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$mutex; + if ( @set > 1 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$mutex}[ 0 .. scalar(@$mutex) - 2] ) + . ' and --'.$self->{opts}->{$mutex->[-1]}->{long} + . ' are mutually exclusive.'; + $self->save_error($err); + } + } + + foreach my $required ( @{$self->{atleast1}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$required; + if ( @set == 0 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$required}[ 0 .. scalar(@$required) - 2] ) + .' or --'.$self->{opts}->{$required->[-1]}->{long}; + $self->save_error("Specify at least one of $err"); + } + } + + $self->_check_opts( keys %{$self->{opts}} ); + $self->{got_opts} = 1; + return; +} + +sub _check_opts { + my ( $self, @long ) = @_; + my $long_last = scalar @long; + while ( @long ) { + foreach my $i ( 0..$#long ) { + my $long = $long[$i]; + next unless $long; + my $opt = $self->{opts}->{$long}; + if ( $opt->{got} ) { + if ( exists $self->{disables}->{$long} ) { + my @disable_opts = @{$self->{disables}->{$long}}; + map { $self->{opts}->{$_}->{value} = undef; } @disable_opts; + PTDEBUG && _d('Unset options', @disable_opts, + 'because', $long,'disables them'); + } + + if ( exists $self->{allowed_groups}->{$long} ) { + + my @restricted_groups = grep { + !exists $self->{allowed_groups}->{$long}->{$_} + } keys %{$self->{groups}}; + + my @restricted_opts; + foreach my $restricted_group ( @restricted_groups ) { + RESTRICTED_OPT: + foreach my $restricted_opt ( + keys %{$self->{groups}->{$restricted_group}} ) + { + next RESTRICTED_OPT if $restricted_opt eq $long; + push @restricted_opts, $restricted_opt + if $self->{opts}->{$restricted_opt}->{got}; + } + } + + if ( @restricted_opts ) { + my $err; + if ( @restricted_opts == 1 ) { + $err = "--$restricted_opts[0]"; + } + else { + $err = join(', ', + map { "--$self->{opts}->{$_}->{long}" } + grep { $_ } + @restricted_opts[0..scalar(@restricted_opts) - 2] + ) + . ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long}; + } + $self->save_error("--$long is not allowed with $err"); + } + } + + } + elsif ( $opt->{is_required} ) { + $self->save_error("Required option --$long must be specified"); + } + + $self->_validate_type($opt); + if ( $opt->{parsed} ) { + delete $long[$i]; + } + else { + PTDEBUG && _d('Temporarily failed to parse', $long); + } + } + + die "Failed to parse options, possibly due to circular dependencies" + if @long == $long_last; + $long_last = @long; + } + + return; +} + +sub _validate_type { + my ( $self, $opt ) = @_; + return unless $opt; + + if ( !$opt->{type} ) { + $opt->{parsed} = 1; + return; + } + + my $val = $opt->{value}; + + if ( $val && $opt->{type} eq 'm' ) { # type time + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a time value'); + my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/; + if ( !$suffix ) { + my ( $s ) = $opt->{desc} =~ m/\(suffix (.)\)/; + $suffix = $s || 's'; + PTDEBUG && _d('No suffix given; using', $suffix, 'for', + $opt->{long}, '(value:', $val, ')'); + } + if ( $suffix =~ m/[smhd]/ ) { + $val = $suffix eq 's' ? $num # Seconds + : $suffix eq 'm' ? $num * 60 # Minutes + : $suffix eq 'h' ? $num * 3600 # Hours + : $num * 86400; # Days + $opt->{value} = ($prefix || '') . $val; + PTDEBUG && _d('Setting option', $opt->{long}, 'to', $val); + } + else { + $self->save_error("Invalid time suffix for --$opt->{long}"); + } + } + elsif ( $val && $opt->{type} eq 'd' ) { # type DSN + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a DSN'); + my $prev = {}; + my $from_key = $self->{defaults_to}->{ $opt->{long} }; + if ( $from_key ) { + PTDEBUG && _d($opt->{long}, 'DSN copies from', $from_key, 'DSN'); + if ( $self->{opts}->{$from_key}->{parsed} ) { + $prev = $self->{opts}->{$from_key}->{value}; + } + else { + PTDEBUG && _d('Cannot parse', $opt->{long}, 'until', + $from_key, 'parsed'); + return; + } + } + my $defaults = $self->{DSNParser}->parse_options($self); + if (!$opt->{attributes}->{repeatable}) { + $opt->{value} = $self->{DSNParser}->parse($val, $prev, $defaults); + } else { + my $values = []; + for my $dsn_string (@$val) { + push @$values, $self->{DSNParser}->parse($dsn_string, $prev, $defaults); + } + $opt->{value} = $values; + } + } + elsif ( $val && $opt->{type} eq 'z' ) { # type size + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a size value'); + $self->_parse_size($opt, $val); + } + elsif ( $opt->{type} eq 'H' || (defined $val && $opt->{type} eq 'h') ) { + $opt->{value} = { map { $_ => 1 } split(/(?{type} eq 'A' || (defined $val && $opt->{type} eq 'a') ) { + $opt->{value} = [ split(/(?{long}, 'type', $opt->{type}, 'value', $val); + } + + $opt->{parsed} = 1; + return; +} + +sub get { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{value}; +} + +sub got { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{got}; +} + +sub has { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + return defined $long ? exists $self->{opts}->{$long} : 0; +} + +sub set { + my ( $self, $opt, $val ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + $self->{opts}->{$long}->{value} = $val; + return; +} + +sub save_error { + my ( $self, $error ) = @_; + push @{$self->{errors}}, $error; + return; +} + +sub errors { + my ( $self ) = @_; + return $self->{errors}; +} + +sub usage { + my ( $self ) = @_; + warn "No usage string is set" unless $self->{usage}; # XXX + return "Usage: " . ($self->{usage} || '') . "\n"; +} + +sub descr { + my ( $self ) = @_; + warn "No description string is set" unless $self->{description}; # XXX + my $descr = ($self->{description} || $self->{program_name} || '') + . " For more details, please use the --help option, " + . "or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation."; + $descr = join("\n", $descr =~ m/(.{0,80})(?:\s+|$)/g) + unless $ENV{DONT_BREAK_LINES}; + $descr =~ s/ +$//mg; + return $descr; +} + +sub usage_or_errors { + my ( $self, $file, $return ) = @_; + $file ||= $self->{file} || __FILE__; + + if ( !$self->{description} || !$self->{usage} ) { + PTDEBUG && _d("Getting description and usage from SYNOPSIS in", $file); + my %synop = $self->_parse_synopsis($file); + $self->{description} ||= $synop{description}; + $self->{usage} ||= $synop{usage}; + PTDEBUG && _d("Description:", $self->{description}, + "\nUsage:", $self->{usage}); + } + + if ( $self->{opts}->{help}->{got} ) { + print $self->print_usage() or die "Cannot print usage: $OS_ERROR"; + exit 0 unless $return; + } + elsif ( scalar @{$self->{errors}} ) { + print $self->print_errors() or die "Cannot print errors: $OS_ERROR"; + exit 1 unless $return; + } + + return; +} + +sub print_errors { + my ( $self ) = @_; + my $usage = $self->usage() . "\n"; + if ( (my @errors = @{$self->{errors}}) ) { + $usage .= join("\n * ", 'Errors in command-line arguments:', @errors) + . "\n"; + } + return $usage . "\n" . $self->descr(); +} + +sub print_usage { + my ( $self ) = @_; + die "Run get_opts() before print_usage()" unless $self->{got_opts}; + my @opts = values %{$self->{opts}}; + + my $maxl = max( + map { + length($_->{long}) # option long name + + ($_->{is_negatable} ? 4 : 0) # "[no]" if opt is negatable + + ($_->{type} ? 2 : 0) # "=x" where x is the opt type + } + @opts); + + my $maxs = max(0, + map { + length($_) + + ($self->{opts}->{$_}->{is_negatable} ? 4 : 0) + + ($self->{opts}->{$_}->{type} ? 2 : 0) + } + values %{$self->{short_opts}}); + + my $lcol = max($maxl, ($maxs + 3)); + my $rcol = 80 - $lcol - 6; + my $rpad = ' ' x ( 80 - $rcol ); + + $maxs = max($lcol - 3, $maxs); + + my $usage = $self->descr() . "\n" . $self->usage(); + + my @groups = reverse sort grep { $_ ne 'default'; } keys %{$self->{groups}}; + push @groups, 'default'; + + foreach my $group ( reverse @groups ) { + $usage .= "\n".($group eq 'default' ? 'Options' : $group).":\n\n"; + foreach my $opt ( + sort { $a->{long} cmp $b->{long} } + grep { $_->{group} eq $group } + @opts ) + { + my $long = $opt->{is_negatable} ? "[no]$opt->{long}" : $opt->{long}; + my $short = $opt->{short}; + my $desc = $opt->{desc}; + + $long .= $opt->{type} ? "=$opt->{type}" : ""; + + if ( $opt->{type} && $opt->{type} eq 'm' ) { + my ($s) = $desc =~ m/\(suffix (.)\)/; + $s ||= 's'; + $desc =~ s/\s+\(suffix .\)//; + $desc .= ". Optional suffix s=seconds, m=minutes, h=hours, " + . "d=days; if no suffix, $s is used."; + } + $desc = join("\n$rpad", grep { $_ } $desc =~ m/(.{0,$rcol}(?!\W))(?:\s+|(?<=\W)|$)/g); + $desc =~ s/ +$//mg; + if ( $short ) { + $usage .= sprintf(" --%-${maxs}s -%s %s\n", $long, $short, $desc); + } + else { + $usage .= sprintf(" --%-${lcol}s %s\n", $long, $desc); + } + } + } + + $usage .= "\nOption types: s=string, i=integer, f=float, h/H/a/A=comma-separated list, d=DSN, z=size, m=time\n"; + + if ( (my @rules = @{$self->{rules}}) ) { + $usage .= "\nRules:\n\n"; + $usage .= join("\n", map { " $_" } @rules) . "\n"; + } + if ( $self->{DSNParser} ) { + $usage .= "\n" . $self->{DSNParser}->usage(); + } + $usage .= "\nOptions and values after processing arguments:\n\n"; + foreach my $opt ( sort { $a->{long} cmp $b->{long} } @opts ) { + my $val = $opt->{value}; + my $type = $opt->{type} || ''; + my $bool = $opt->{spec} =~ m/^[\w-]+(?:\|[\w-])?!?$/; + $val = $bool ? ( $val ? 'TRUE' : 'FALSE' ) + : !defined $val ? '(No value)' + : $type eq 'd' ? $self->{DSNParser}->as_string($val) + : $type =~ m/H|h/ ? join(',', sort keys %$val) + : $type =~ m/A|a/ ? join(',', @$val) + : $val; + $usage .= sprintf(" --%-${lcol}s %s\n", $opt->{long}, $val); + } + return $usage; +} + +sub prompt_noecho { + shift @_ if ref $_[0] eq __PACKAGE__; + my ( $prompt ) = @_; + local $OUTPUT_AUTOFLUSH = 1; + print STDERR $prompt + or die "Cannot print: $OS_ERROR"; + my $response; + eval { + require Term::ReadKey; + Term::ReadKey::ReadMode('noecho'); + chomp($response = ); + Term::ReadKey::ReadMode('normal'); + print "\n" + or die "Cannot print: $OS_ERROR"; + }; + if ( $EVAL_ERROR ) { + die "Cannot read response; is Term::ReadKey installed? $EVAL_ERROR"; + } + return $response; +} + +sub _read_config_file { + my ( $self, $filename ) = @_; + open my $fh, "<", $filename or die "Cannot open $filename: $OS_ERROR\n"; + my @args; + my $prefix = '--'; + my $parse = 1; + + LINE: + while ( my $line = <$fh> ) { + chomp $line; + next LINE if $line =~ m/^\s*(?:\#|\;|$)/; + $line =~ s/\s+#.*$//g; + $line =~ s/^\s+|\s+$//g; + if ( $line eq '--' ) { + $prefix = ''; + $parse = 0; + next LINE; + } + + if ( $parse + && !$self->has('version-check') + && $line =~ /version-check/ + ) { + next LINE; + } + + if ( $parse + && (my($opt, $arg) = $line =~ m/^\s*([^=\s]+?)(?:\s*=\s*(.*?)\s*)?$/) + ) { + push @args, grep { defined $_ } ("$prefix$opt", $arg); + } + elsif ( $line =~ m/./ ) { + push @args, $line; + } + else { + die "Syntax error in file $filename at line $INPUT_LINE_NUMBER"; + } + } + close $fh; + return @args; +} + +sub read_para_after { + my ( $self, $file, $regex ) = @_; + open my $fh, "<", $file or die "Can't open $file: $OS_ERROR"; + local $INPUT_RECORD_SEPARATOR = ''; + my $para; + while ( $para = <$fh> ) { + next unless $para =~ m/^=pod$/m; + last; + } + while ( $para = <$fh> ) { + next unless $para =~ m/$regex/; + last; + } + $para = <$fh>; + chomp($para); + close $fh or die "Can't close $file: $OS_ERROR"; + return $para; +} + +sub clone { + my ( $self ) = @_; + + my %clone = map { + my $hashref = $self->{$_}; + my $val_copy = {}; + foreach my $key ( keys %$hashref ) { + my $ref = ref $hashref->{$key}; + $val_copy->{$key} = !$ref ? $hashref->{$key} + : $ref eq 'HASH' ? { %{$hashref->{$key}} } + : $ref eq 'ARRAY' ? [ @{$hashref->{$key}} ] + : $hashref->{$key}; + } + $_ => $val_copy; + } qw(opts short_opts defaults); + + foreach my $scalar ( qw(got_opts) ) { + $clone{$scalar} = $self->{$scalar}; + } + + return bless \%clone; +} + +sub _parse_size { + my ( $self, $opt, $val ) = @_; + + if ( lc($val || '') eq 'null' ) { + PTDEBUG && _d('NULL size for', $opt->{long}); + $opt->{value} = 'null'; + return; + } + + my %factor_for = (k => 1_024, M => 1_048_576, G => 1_073_741_824); + my ($pre, $num, $factor) = $val =~ m/^([+-])?(\d+)([kMG])?$/; + if ( defined $num ) { + if ( $factor ) { + $num *= $factor_for{$factor}; + PTDEBUG && _d('Setting option', $opt->{y}, + 'to num', $num, '* factor', $factor); + } + $opt->{value} = ($pre || '') . $num; + } + else { + $self->save_error("Invalid size for --$opt->{long}: $val"); + } + return; +} + +sub _parse_attribs { + my ( $self, $option, $attribs ) = @_; + my $types = $self->{types}; + return $option + . ($attribs->{'short form'} ? '|' . $attribs->{'short form'} : '' ) + . ($attribs->{'negatable'} ? '!' : '' ) + . ($attribs->{'cumulative'} ? '+' : '' ) + . ($attribs->{'type'} ? '=' . $types->{$attribs->{type}} : '' ); +} + +sub _parse_synopsis { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + PTDEBUG && _d("Parsing SYNOPSIS in", $file); + + local $INPUT_RECORD_SEPARATOR = ''; # read paragraphs + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $para; + 1 while defined($para = <$fh>) && $para !~ m/^=head1 SYNOPSIS/; + die "$file does not contain a SYNOPSIS section" unless $para; + my @synop; + for ( 1..2 ) { # 1 for the usage, 2 for the description + my $para = <$fh>; + push @synop, $para; + } + close $fh; + PTDEBUG && _d("Raw SYNOPSIS text:", @synop); + my ($usage, $desc) = @synop; + die "The SYNOPSIS section in $file is not formatted properly" + unless $usage && $desc; + + $usage =~ s/^\s*Usage:\s+(.+)/$1/; + chomp $usage; + + $desc =~ s/\n/ /g; + $desc =~ s/\s{2,}/ /g; + $desc =~ s/\. ([A-Z][a-z])/. $1/g; + $desc =~ s/\s+$//; + + return ( + description => $desc, + usage => $usage, + ); +}; + +sub set_vars { + my ($self, $file) = @_; + $file ||= $self->{file} || __FILE__; + + my %user_vars; + my $user_vars = $self->has('set-vars') ? $self->get('set-vars') : undef; + if ( $user_vars ) { + foreach my $var_val ( @$user_vars ) { + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && defined $val; + $user_vars{$var} = { + val => $val, + default => 0, + }; + } + } + + my %default_vars; + my $default_vars = $self->read_para_after($file, qr/MAGIC_set_vars/); + if ( $default_vars ) { + %default_vars = map { + my $var_val = $_; + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && defined $val; + $var => { + val => $val, + default => 1, + }; + } split("\n", $default_vars); + } + + my %vars = ( + %default_vars, # first the tool's defaults + %user_vars, # then the user's which overwrite the defaults + ); + PTDEBUG && _d('--set-vars:', Dumper(\%vars)); + return \%vars; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +if ( PTDEBUG ) { + print STDERR '# ', $^X, ' ', $], "\n"; + if ( my $uname = `uname -a` ) { + $uname =~ s/\s+/ /g; + print STDERR "# $uname\n"; + } + print STDERR '# Arguments: ', + join(' ', map { my $a = "_[$_]_"; $a =~ s/\n/\n# /g; $a; } @ARGV), "\n"; +} + +1; +} +# ########################################################################### +# End OptionParser package +# ########################################################################### + +# ########################################################################### +# Lmo::Utils package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Utils.pm +# t/lib/Lmo/Utils.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Utils; + +use strict; +use warnings qw( FATAL all ); +require Exporter; +our (@ISA, @EXPORT, @EXPORT_OK); + +BEGIN { + @ISA = qw(Exporter); + @EXPORT = @EXPORT_OK = qw( + _install_coderef + _unimport_coderefs + _glob_for + _stash_for + ); +} + +{ + no strict 'refs'; + sub _glob_for { + return \*{shift()} + } + + sub _stash_for { + return \%{ shift() . "::" }; + } +} + +sub _install_coderef { + my ($to, $code) = @_; + + return *{ _glob_for $to } = $code; +} + +sub _unimport_coderefs { + my ($target, @names) = @_; + return unless @names; + my $stash = _stash_for($target); + foreach my $name (@names) { + if ($stash->{$name} and defined(&{$stash->{$name}})) { + delete $stash->{$name}; + } + } +} + +1; +} +# ########################################################################### +# End Lmo::Utils package +# ########################################################################### + +# ########################################################################### +# Lmo::Meta package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Meta.pm +# t/lib/Lmo/Meta.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Meta; +use strict; +use warnings qw( FATAL all ); + +my %metadata_for; + +sub new { + my $class = shift; + return bless { @_ }, $class +} + +sub metadata_for { + my $self = shift; + my ($class) = @_; + + return $metadata_for{$class} ||= {}; +} + +sub class { shift->{class} } + +sub attributes { + my $self = shift; + return keys %{$self->metadata_for($self->class)} +} + +sub attributes_for_new { + my $self = shift; + my @attributes; + + my $class_metadata = $self->metadata_for($self->class); + while ( my ($attr, $meta) = each %$class_metadata ) { + if ( exists $meta->{init_arg} ) { + push @attributes, $meta->{init_arg} + if defined $meta->{init_arg}; + } + else { + push @attributes, $attr; + } + } + return @attributes; +} + +1; +} +# ########################################################################### +# End Lmo::Meta package +# ########################################################################### + +# ########################################################################### +# Lmo::Object package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Object.pm +# t/lib/Lmo/Object.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Object; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(blessed); + +use Lmo::Meta; +use Lmo::Utils qw(_glob_for); + +sub new { + my $class = shift; + my $args = $class->BUILDARGS(@_); + + my $class_metadata = Lmo::Meta->metadata_for($class); + + my @args_to_delete; + while ( my ($attr, $meta) = each %$class_metadata ) { + next unless exists $meta->{init_arg}; + my $init_arg = $meta->{init_arg}; + + if ( defined $init_arg ) { + $args->{$attr} = delete $args->{$init_arg}; + } + else { + push @args_to_delete, $attr; + } + } + + delete $args->{$_} for @args_to_delete; + + for my $attribute ( keys %$args ) { + if ( my $coerce = $class_metadata->{$attribute}{coerce} ) { + $args->{$attribute} = $coerce->($args->{$attribute}); + } + if ( my $isa_check = $class_metadata->{$attribute}{isa} ) { + my ($check_name, $check_sub) = @$isa_check; + $check_sub->($args->{$attribute}); + } + } + + while ( my ($attribute, $meta) = each %$class_metadata ) { + next unless $meta->{required}; + Carp::confess("Attribute ($attribute) is required for $class") + if ! exists $args->{$attribute} + } + + my $self = bless $args, $class; + + my @build_subs; + my $linearized_isa = mro::get_linear_isa($class); + + for my $isa_class ( @$linearized_isa ) { + unshift @build_subs, *{ _glob_for "${isa_class}::BUILD" }{CODE}; + } + my @args = %$args; + for my $sub (grep { defined($_) && exists &$_ } @build_subs) { + $sub->( $self, @args); + } + return $self; +} + +sub BUILDARGS { + shift; # No need for the classname + if ( @_ == 1 && ref($_[0]) ) { + Carp::confess("Single parameters to new() must be a HASH ref, not $_[0]") + unless ref($_[0]) eq ref({}); + return {%{$_[0]}} # We want a new reference, always + } + else { + return { @_ }; + } +} + +sub meta { + my $class = shift; + $class = Scalar::Util::blessed($class) || $class; + return Lmo::Meta->new(class => $class); +} + +1; +} +# ########################################################################### +# End Lmo::Object package +# ########################################################################### + +# ########################################################################### +# Lmo::Types package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Types.pm +# t/lib/Lmo/Types.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Types; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + + +our %TYPES = ( + Bool => sub { !$_[0] || (defined $_[0] && looks_like_number($_[0]) && $_[0] == 1) }, + Num => sub { defined $_[0] && looks_like_number($_[0]) }, + Int => sub { defined $_[0] && looks_like_number($_[0]) && $_[0] == int($_[0]) }, + Str => sub { defined $_[0] }, + Object => sub { defined $_[0] && blessed($_[0]) }, + FileHandle => sub { local $@; require IO::Handle; fileno($_[0]) && $_[0]->opened }, + + map { + my $type = /R/ ? $_ : uc $_; + $_ . "Ref" => sub { ref $_[0] eq $type } + } qw(Array Code Hash Regexp Glob Scalar) +); + +sub check_type_constaints { + my ($attribute, $type_check, $check_name, $val) = @_; + ( ref($type_check) eq 'CODE' + ? $type_check->($val) + : (ref $val eq $type_check + || ($val && $val eq $type_check) + || (exists $TYPES{$type_check} && $TYPES{$type_check}->($val))) + ) + || Carp::confess( + qq + . qq + . (defined $val ? Lmo::Dumper($val) : 'undef') ) +} + +sub _nested_constraints { + my ($attribute, $aggregate_type, $type) = @_; + + my $inner_types; + if ( $type =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $inner_types = _nested_constraints($1, $2); + } + else { + $inner_types = $TYPES{$type}; + } + + if ( $aggregate_type eq 'ArrayRef' ) { + return sub { + my ($val) = @_; + return unless ref($val) eq ref([]); + + if ($inner_types) { + for my $value ( @{$val} ) { + return unless $inner_types->($value) + } + } + else { + for my $value ( @{$val} ) { + return unless $value && ($value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type))); + } + } + return 1; + }; + } + elsif ( $aggregate_type eq 'Maybe' ) { + return sub { + my ($value) = @_; + return 1 if ! defined($value); + if ($inner_types) { + return unless $inner_types->($value) + } + else { + return unless $value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type)); + } + return 1; + } + } + else { + Carp::confess("Nested aggregate types are only implemented for ArrayRefs and Maybe"); + } +} + +1; +} +# ########################################################################### +# End Lmo::Types package +# ########################################################################### + +# ########################################################################### +# Lmo package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo.pm +# t/lib/Lmo.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +BEGIN { +$INC{"Lmo.pm"} = __FILE__; +package Lmo; +our $VERSION = '0.30_Percona'; # Forked from 0.30 of Mo. + + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + +use Lmo::Meta; +use Lmo::Object; +use Lmo::Types; + +use Lmo::Utils; + +my %export_for; +sub import { + warnings->import(qw(FATAL all)); + strict->import(); + + my $caller = scalar caller(); # Caller's package + my %exports = ( + extends => \&extends, + has => \&has, + with => \&with, + override => \&override, + confess => \&Carp::confess, + ); + + $export_for{$caller} = \%exports; + + for my $keyword ( keys %exports ) { + _install_coderef "${caller}::$keyword" => $exports{$keyword}; + } + + if ( !@{ *{ _glob_for "${caller}::ISA" }{ARRAY} || [] } ) { + @_ = "Lmo::Object"; + goto *{ _glob_for "${caller}::extends" }{CODE}; + } +} + +sub extends { + my $caller = scalar caller(); + for my $class ( @_ ) { + _load_module($class); + } + _set_package_isa($caller, @_); + _set_inherited_metadata($caller); +} + +sub _load_module { + my ($class) = @_; + + (my $file = $class) =~ s{::|'}{/}g; + $file .= '.pm'; + { local $@; eval { require "$file" } } # or warn $@; + return; +} + +sub with { + my $package = scalar caller(); + require Role::Tiny; + for my $role ( @_ ) { + _load_module($role); + _role_attribute_metadata($package, $role); + } + Role::Tiny->apply_roles_to_package($package, @_); +} + +sub _role_attribute_metadata { + my ($package, $role) = @_; + + my $package_meta = Lmo::Meta->metadata_for($package); + my $role_meta = Lmo::Meta->metadata_for($role); + + %$package_meta = (%$role_meta, %$package_meta); +} + +sub has { + my $names = shift; + my $caller = scalar caller(); + + my $class_metadata = Lmo::Meta->metadata_for($caller); + + for my $attribute ( ref $names ? @$names : $names ) { + my %args = @_; + my $method = ($args{is} || '') eq 'ro' + ? sub { + Carp::confess("Cannot assign a value to a read-only accessor at reader ${caller}::${attribute}") + if $#_; + return $_[0]{$attribute}; + } + : sub { + return $#_ + ? $_[0]{$attribute} = $_[1] + : $_[0]{$attribute}; + }; + + $class_metadata->{$attribute} = (); + + if ( my $type_check = $args{isa} ) { + my $check_name = $type_check; + + if ( my ($aggregate_type, $inner_type) = $type_check =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $type_check = Lmo::Types::_nested_constraints($attribute, $aggregate_type, $inner_type); + } + + my $check_sub = sub { + my ($new_val) = @_; + Lmo::Types::check_type_constaints($attribute, $type_check, $check_name, $new_val); + }; + + $class_metadata->{$attribute}{isa} = [$check_name, $check_sub]; + my $orig_method = $method; + $method = sub { + $check_sub->($_[1]) if $#_; + goto &$orig_method; + }; + } + + if ( my $builder = $args{builder} ) { + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$builder + : goto &$original_method + }; + } + + if ( my $code = $args{default} ) { + Carp::confess("${caller}::${attribute}'s default is $code, but should be a coderef") + unless ref($code) eq 'CODE'; + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$code + : goto &$original_method + }; + } + + if ( my $role = $args{does} ) { + my $original_method = $method; + $method = sub { + if ( $#_ ) { + Carp::confess(qq) + unless Scalar::Util::blessed($_[1]) && eval { $_[1]->does($role) } + } + goto &$original_method + }; + } + + if ( my $coercion = $args{coerce} ) { + $class_metadata->{$attribute}{coerce} = $coercion; + my $original_method = $method; + $method = sub { + if ( $#_ ) { + return $original_method->($_[0], $coercion->($_[1])) + } + goto &$original_method; + } + } + + _install_coderef "${caller}::$attribute" => $method; + + if ( $args{required} ) { + $class_metadata->{$attribute}{required} = 1; + } + + if ($args{clearer}) { + _install_coderef "${caller}::$args{clearer}" + => sub { delete shift->{$attribute} } + } + + if ($args{predicate}) { + _install_coderef "${caller}::$args{predicate}" + => sub { exists shift->{$attribute} } + } + + if ($args{handles}) { + _has_handles($caller, $attribute, \%args); + } + + if (exists $args{init_arg}) { + $class_metadata->{$attribute}{init_arg} = $args{init_arg}; + } + } +} + +sub _has_handles { + my ($caller, $attribute, $args) = @_; + my $handles = $args->{handles}; + + my $ref = ref $handles; + my $kv; + if ( $ref eq ref [] ) { + $kv = { map { $_,$_ } @{$handles} }; + } + elsif ( $ref eq ref {} ) { + $kv = $handles; + } + elsif ( $ref eq ref qr// ) { + Carp::confess("Cannot delegate methods based on a Regexp without a type constraint (isa)") + unless $args->{isa}; + my $target_class = $args->{isa}; + $kv = { + map { $_, $_ } + grep { $_ =~ $handles } + grep { !exists $Lmo::Object::{$_} && $target_class->can($_) } + grep { !$export_for{$target_class}->{$_} } + keys %{ _stash_for $target_class } + }; + } + else { + Carp::confess("handles for $ref not yet implemented"); + } + + while ( my ($method, $target) = each %{$kv} ) { + my $name = _glob_for "${caller}::$method"; + Carp::confess("You cannot overwrite a locally defined method ($method) with a delegation") + if defined &$name; + + my ($target, @curried_args) = ref($target) ? @$target : $target; + *$name = sub { + my $self = shift; + my $delegate_to = $self->$attribute(); + my $error = "Cannot delegate $method to $target because the value of $attribute"; + Carp::confess("$error is not defined") unless $delegate_to; + Carp::confess("$error is not an object (got '$delegate_to')") + unless Scalar::Util::blessed($delegate_to) || (!ref($delegate_to) && $delegate_to->can($target)); + return $delegate_to->$target(@curried_args, @_); + } + } +} + +sub _set_package_isa { + my ($package, @new_isa) = @_; + my $package_isa = \*{ _glob_for "${package}::ISA" }; + @{*$package_isa} = @new_isa; +} + +sub _set_inherited_metadata { + my $class = shift; + my $class_metadata = Lmo::Meta->metadata_for($class); + my $linearized_isa = mro::get_linear_isa($class); + my %new_metadata; + + for my $isa_class (reverse @$linearized_isa) { + my $isa_metadata = Lmo::Meta->metadata_for($isa_class); + %new_metadata = ( + %new_metadata, + %$isa_metadata, + ); + } + %$class_metadata = %new_metadata; +} + +sub unimport { + my $caller = scalar caller(); + my $target = caller; + _unimport_coderefs($target, keys %{$export_for{$caller}}); +} + +sub Dumper { + require Data::Dumper; + local $Data::Dumper::Indent = 0; + local $Data::Dumper::Sortkeys = 0; + local $Data::Dumper::Quotekeys = 0; + local $Data::Dumper::Terse = 1; + + Data::Dumper::Dumper(@_) +} + +BEGIN { + if ($] >= 5.010) { + { local $@; require mro; } + } + else { + local $@; + eval { + require MRO::Compat; + } or do { + *mro::get_linear_isa = *mro::get_linear_isa_dfs = sub { + no strict 'refs'; + + my $classname = shift; + + my @lin = ($classname); + my %stored; + foreach my $parent (@{"$classname\::ISA"}) { + my $plin = mro::get_linear_isa_dfs($parent); + foreach (@$plin) { + next if exists $stored{$_}; + push(@lin, $_); + $stored{$_} = 1; + } + } + return \@lin; + }; + } + } +} + +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + +} +1; +} +# ########################################################################### +# End Lmo package +# ########################################################################### + +# ########################################################################### +# Cxn package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Cxn.pm +# t/lib/Cxn.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Cxn; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Scalar::Util qw(blessed); +use constant { + PTDEBUG => $ENV{PTDEBUG} || 0, + PERCONA_TOOLKIT_TEST_USE_DSN_NAMES => $ENV{PERCONA_TOOLKIT_TEST_USE_DSN_NAMES} || 0, +}; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(DSNParser OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my ($dp, $o) = @args{@required_args}; + + my $dsn_defaults = $dp->parse_options($o); + my $prev_dsn = $args{prev_dsn}; + my $dsn = $args{dsn}; + if ( !$dsn ) { + $args{dsn_string} ||= 'h=' . ($dsn_defaults->{h} || 'localhost'); + + $dsn = $dp->parse( + $args{dsn_string}, $prev_dsn, $dsn_defaults); + } + elsif ( $prev_dsn ) { + $dsn = $dp->copy($prev_dsn, $dsn); + } + + my $dsn_name = $dp->as_string($dsn, [qw(h P S)]) + || $dp->as_string($dsn, [qw(F)]) + || ''; + + my $self = { + dsn => $dsn, + dbh => $args{dbh}, + dsn_name => $dsn_name, + hostname => '', + set => $args{set}, + NAME_lc => defined($args{NAME_lc}) ? $args{NAME_lc} : 1, + dbh_set => 0, + ask_pass => $o->get('ask-pass'), + DSNParser => $dp, + is_cluster_node => undef, + parent => $args{parent}, + }; + + return bless $self, $class; +} + +sub connect { + my ( $self, %opts ) = @_; + my $dsn = $opts{dsn} || $self->{dsn}; + my $dp = $self->{DSNParser}; + + my $dbh = $self->{dbh}; + if ( !$dbh || !$dbh->ping() ) { + if ( $self->{ask_pass} && !$self->{asked_for_pass} && !defined $dsn->{p} ) { + $dsn->{p} = OptionParser::prompt_noecho("Enter MySQL password: "); + $self->{asked_for_pass} = 1; + } + $dbh = $dp->get_dbh( + $dp->get_cxn_params($dsn), + { + AutoCommit => 1, + %opts, + }, + ); + } + + $dbh = $self->set_dbh($dbh); + if ( $opts{dsn} ) { + $self->{dsn} = $dsn; + $self->{dsn_name} = $dp->as_string($dsn, [qw(h P S)]) + || $dp->as_string($dsn, [qw(F)]) + || ''; + + } + PTDEBUG && _d($dbh, 'Connected dbh to', $self->{hostname},$self->{dsn_name}); + return $dbh; +} + +sub set_dbh { + my ($self, $dbh) = @_; + + if ( $self->{dbh} && $self->{dbh} == $dbh && $self->{dbh_set} ) { + PTDEBUG && _d($dbh, 'Already set dbh'); + return $dbh; + } + + PTDEBUG && _d($dbh, 'Setting dbh'); + + $dbh->{FetchHashKeyName} = 'NAME_lc' if $self->{NAME_lc}; + + my $sql = 'SELECT @@server_id /*!50038 , @@hostname*/'; + PTDEBUG && _d($dbh, $sql); + my ($server_id, $hostname) = $dbh->selectrow_array($sql); + PTDEBUG && _d($dbh, 'hostname:', $hostname, $server_id); + if ( $hostname ) { + $self->{hostname} = $hostname; + } + + if ( $self->{parent} ) { + PTDEBUG && _d($dbh, 'Setting InactiveDestroy=1 in parent'); + $dbh->{InactiveDestroy} = 1; + } + + if ( my $set = $self->{set}) { + $set->($dbh); + } + + $self->{dbh} = $dbh; + $self->{dbh_set} = 1; + return $dbh; +} + +sub lost_connection { + my ($self, $e) = @_; + return 0 unless $e; + return $e =~ m/MySQL server has gone away/ + || $e =~ m/Lost connection to MySQL server/ + || $e =~ m/Server shutdown in progress/; +} + +sub dbh { + my ($self) = @_; + return $self->{dbh}; +} + +sub dsn { + my ($self) = @_; + return $self->{dsn}; +} + +sub name { + my ($self) = @_; + return $self->{dsn_name} if PERCONA_TOOLKIT_TEST_USE_DSN_NAMES; + return $self->{hostname} || $self->{dsn_name} || 'unknown host'; +} + +sub description { + my ($self) = @_; + return sprintf("%s -> %s:%s", $self->name(), $self->{dsn}->{h}, $self->{dsn}->{P} || 'socket'); +} + +sub get_id { + my ($self, $cxn) = @_; + + $cxn ||= $self; + + my $unique_id; + if ($cxn->is_cluster_node()) { # for cluster we concatenate various variables to maximize id 'uniqueness' across versions + my $sql = q{SHOW STATUS LIKE 'wsrep\_local\_index'}; + my (undef, $wsrep_local_index) = $cxn->dbh->selectrow_array($sql); + PTDEBUG && _d("Got cluster wsrep_local_index: ",$wsrep_local_index); + $unique_id = $wsrep_local_index."|"; + foreach my $val ('server\_id', 'wsrep\_sst\_receive\_address', 'wsrep\_node\_name', 'wsrep\_node\_address') { + my $sql = "SHOW VARIABLES LIKE '$val'"; + PTDEBUG && _d($cxn->name, $sql); + my (undef, $val) = $cxn->dbh->selectrow_array($sql); + $unique_id .= "|$val"; + } + } else { + my $sql = 'SELECT @@SERVER_ID'; + PTDEBUG && _d($sql); + $unique_id = $cxn->dbh->selectrow_array($sql); + } + PTDEBUG && _d("Generated unique id for cluster:", $unique_id); + return $unique_id; +} + + +sub is_cluster_node { + my ($self, $cxn) = @_; + + $cxn ||= $self; + + my $sql = "SHOW VARIABLES LIKE 'wsrep\_on'"; + + my $dbh; + if ($cxn->isa('DBI::db')) { + $dbh = $cxn; + PTDEBUG && _d($sql); #don't invoke name() if it's not a Cxn! + } + else { + $dbh = $cxn->dbh(); + PTDEBUG && _d($cxn->name, $sql); + } + + my $row = $dbh->selectrow_arrayref($sql); + return $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1') ? 1 : 0; + +} + +sub remove_duplicate_cxns { + my ($self, %args) = @_; + my @cxns = @{$args{cxns}}; + my $seen_ids = $args{seen_ids} || {}; + PTDEBUG && _d("Removing duplicates from ", join(" ", map { $_->name } @cxns)); + my @trimmed_cxns; + + for my $cxn ( @cxns ) { + + my $id = $cxn->get_id(); + PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id); + + if ( ! $seen_ids->{$id}++ ) { + push @trimmed_cxns, $cxn + } + else { + PTDEBUG && _d("Removing ", $cxn->name, + ", ID ", $id, ", because we've already seen it"); + } + } + + return \@trimmed_cxns; +} + +sub DESTROY { + my ($self) = @_; + + PTDEBUG && _d('Destroying cxn'); + + if ( $self->{parent} ) { + PTDEBUG && _d($self->{dbh}, 'Not disconnecting dbh in parent'); + } + elsif ( $self->{dbh} + && blessed($self->{dbh}) + && $self->{dbh}->can("disconnect") ) + { + PTDEBUG && _d($self->{dbh}, 'Disconnecting dbh on', $self->{hostname}, + $self->{dsn_name}); + $self->{dbh}->disconnect(); + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Cxn package +# ########################################################################### + +# ########################################################################### +# Percona::XtraDB::Cluster package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/XtraDB/Cluster.pm +# t/lib/Percona/XtraDB/Cluster.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::XtraDB::Cluster; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Lmo; +use Data::Dumper; + +{ local $EVAL_ERROR; eval { require Cxn } }; + +sub get_cluster_name { + my ($self, $cxn) = @_; + my $sql = "SHOW VARIABLES LIKE 'wsrep\_cluster\_name'"; + PTDEBUG && _d($cxn->name, $sql); + my (undef, $cluster_name) = $cxn->dbh->selectrow_array($sql); + return $cluster_name; +} + +sub is_cluster_node { + my ($self, $cxn) = @_; + + my $sql = "SHOW VARIABLES LIKE 'wsrep\_on'"; + PTDEBUG && _d($cxn->name, $sql); + my $row = $cxn->dbh->selectrow_arrayref($sql); + PTDEBUG && _d(Dumper($row)); + return unless $row && $row->[1] && ($row->[1] eq 'ON' || $row->[1] eq '1'); + + my $cluster_name = $self->get_cluster_name($cxn); + return $cluster_name; +} + +sub same_node { + my ($self, $cxn1, $cxn2) = @_; + + foreach my $val ('wsrep\_sst\_receive\_address', 'wsrep\_node\_name', 'wsrep\_node\_address') { + my $sql = "SHOW VARIABLES LIKE '$val'"; + PTDEBUG && _d($cxn1->name, $cxn2->name, $sql); + my (undef, $val1) = $cxn1->dbh->selectrow_array($sql); + my (undef, $val2) = $cxn2->dbh->selectrow_array($sql); + + return unless ($val1 || '') eq ($val2 || ''); + } + + return 1; +} + +sub find_cluster_nodes { + my ($self, %args) = @_; + + my $dbh = $args{dbh}; + my $dsn = $args{dsn}; + my $dp = $args{DSNParser}; + my $make_cxn = $args{make_cxn}; + + + my $sql = q{SHOW STATUS LIKE 'wsrep\_incoming\_addresses'}; + PTDEBUG && _d($sql); + my (undef, $addresses) = $dbh->selectrow_array($sql); + PTDEBUG && _d("Cluster nodes found: ", $addresses); + return unless $addresses; + + my @addresses = grep { !/\Aunspecified\z/i } + split /,\s*/, $addresses; + + my @nodes; + foreach my $address ( @addresses ) { + my ($host, $port) = split /:/, $address; + my $spec = "h=$host" + . ($port ? ",P=$port" : ""); + my $node_dsn = $dp->parse($spec, $dsn); + my $node_dbh = eval { $dp->get_dbh( + $dp->get_cxn_params($node_dsn), { AutoCommit => 1 }) }; + if ( $EVAL_ERROR ) { + print STDERR "Cannot connect to ", $dp->as_string($node_dsn), + ", discovered through $sql: $EVAL_ERROR\n"; + if ( !$port && $dsn->{P} != 3306 ) { + $address .= ":3306"; + redo; + } + next; + } + PTDEBUG && _d('Connected to', $dp->as_string($node_dsn)); + $node_dbh->disconnect(); + + push @nodes, $make_cxn->(dsn => $node_dsn); + } + + return \@nodes; +} + +sub remove_duplicate_cxns { + my ($self, %args) = @_; + my @cxns = @{$args{cxns}}; + my $seen_ids = $args{seen_ids} || {}; + PTDEBUG && _d("Removing duplicates nodes from ", join(" ", map { $_->name } @cxns)); + my @trimmed_cxns; + + for my $cxn ( @cxns ) { + my $id = $cxn->get_id(); + PTDEBUG && _d('Server ID for ', $cxn->name, ': ', $id); + + if ( ! $seen_ids->{$id}++ ) { + push @trimmed_cxns, $cxn + } + else { + PTDEBUG && _d("Removing ", $cxn->name, + ", ID ", $id, ", because we've already seen it"); + } + } + return \@trimmed_cxns; +} + +sub same_cluster { + my ($self, $cxn1, $cxn2) = @_; + + return 0 if !$self->is_cluster_node($cxn1) || !$self->is_cluster_node($cxn2); + + my $cluster1 = $self->get_cluster_name($cxn1); + my $cluster2 = $self->get_cluster_name($cxn2); + + return ($cluster1 || '') eq ($cluster2 || ''); +} + +sub autodetect_nodes { + my ($self, %args) = @_; + my $ms = $args{MasterSlave}; + my $dp = $args{DSNParser}; + my $make_cxn = $args{make_cxn}; + my $nodes = $args{nodes}; + my $seen_ids = $args{seen_ids}; + + my $new_nodes = []; + + return $new_nodes unless @$nodes; + + for my $node ( @$nodes ) { + my $nodes_found = $self->find_cluster_nodes( + dbh => $node->dbh(), + dsn => $node->dsn(), + make_cxn => $make_cxn, + DSNParser => $dp, + ); + push @$new_nodes, @$nodes_found; + } + + $new_nodes = $self->remove_duplicate_cxns( + cxns => $new_nodes, + seen_ids => $seen_ids + ); + + my $new_slaves = []; + foreach my $node (@$new_nodes) { + my $node_slaves = $ms->get_slaves( + dbh => $node->dbh(), + dsn => $node->dsn(), + make_cxn => $make_cxn, + ); + push @$new_slaves, @$node_slaves; + } + + $new_slaves = $self->remove_duplicate_cxns( + cxns => $new_slaves, + seen_ids => $seen_ids + ); + + my @new_slave_nodes = grep { $self->is_cluster_node($_) } @$new_slaves; + + my $slaves_of_slaves = $self->autodetect_nodes( + %args, + nodes => \@new_slave_nodes, + ); + + my @autodetected_nodes = ( @$new_nodes, @$new_slaves, @$slaves_of_slaves ); + return \@autodetected_nodes; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Percona::XtraDB::Cluster package +# ########################################################################### + +# ########################################################################### +# Quoter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Quoter.pm +# t/lib/Quoter.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Quoter; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + return bless {}, $class; +} + +sub quote { + my ( $self, @vals ) = @_; + foreach my $val ( @vals ) { + $val =~ s/`/``/g; + } + return join('.', map { '`' . $_ . '`' } @vals); +} + +sub quote_val { + my ( $self, $val, %args ) = @_; + + return 'NULL' unless defined $val; # undef = NULL + return "''" if $val eq ''; # blank string = '' + return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data + && !$args{is_char}; # unless is_char is true + + $val =~ s/(['\\])/\\$1/g; + return "'$val'"; +} + +sub split_unquote { + my ( $self, $db_tbl, $default_db ) = @_; + my ( $db, $tbl ) = split(/[.]/, $db_tbl); + if ( !$tbl ) { + $tbl = $db; + $db = $default_db; + } + for ($db, $tbl) { + next unless $_; + s/\A`//; + s/`\z//; + s/``/`/g; + } + + return ($db, $tbl); +} + +sub literal_like { + my ( $self, $like ) = @_; + return unless $like; + $like =~ s/([%_])/\\$1/g; + return "'$like'"; +} + +sub join_quote { + my ( $self, $default_db, $db_tbl ) = @_; + return unless $db_tbl; + my ($db, $tbl) = split(/[.]/, $db_tbl); + if ( !$tbl ) { + $tbl = $db; + $db = $default_db; + } + $db = "`$db`" if $db && $db !~ m/^`/; + $tbl = "`$tbl`" if $tbl && $tbl !~ m/^`/; + return $db ? "$db.$tbl" : $tbl; +} + +sub serialize_list { + my ( $self, @args ) = @_; + PTDEBUG && _d('Serializing', Dumper(\@args)); + return unless @args; + + my @parts; + foreach my $arg ( @args ) { + if ( defined $arg ) { + $arg =~ s/,/\\,/g; # escape commas + $arg =~ s/\\N/\\\\N/g; # escape literal \N + push @parts, $arg; + } + else { + push @parts, '\N'; + } + } + + my $string = join(',', @parts); + PTDEBUG && _d('Serialized: <', $string, '>'); + return $string; +} + +sub deserialize_list { + my ( $self, $string ) = @_; + PTDEBUG && _d('Deserializing <', $string, '>'); + die "Cannot deserialize an undefined string" unless defined $string; + + my @parts; + foreach my $arg ( split(/(? $ENV{PTDEBUG} || 0; + +use overload ( + '""' => "version", + '<=>' => "cmp", + 'cmp' => "cmp", + fallback => 1, +); + +use Carp (); + +our $VERSION = 0.01; + +has major => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has [qw( minor revision )] => ( + is => 'ro', + isa => 'Num', +); + +has flavor => ( + is => 'ro', + isa => 'Str', + default => sub { 'Unknown' }, +); + +has innodb_version => ( + is => 'ro', + isa => 'Str', + default => sub { 'NO' }, +); + +sub series { + my $self = shift; + return $self->_join_version($self->major, $self->minor); +} + +sub version { + my $self = shift; + return $self->_join_version($self->major, $self->minor, $self->revision); +} + +sub is_in { + my ($self, $target) = @_; + + return $self eq $target; +} + +sub _join_version { + my ($self, @parts) = @_; + + return join ".", map { my $c = $_; $c =~ s/^0\./0/; $c } grep defined, @parts; +} +sub _split_version { + my ($self, $str) = @_; + my @version_parts = map { s/^0(?=\d)/0./; $_ } $str =~ m/(\d+)/g; + return @version_parts[0..2]; +} + +sub normalized_version { + my ( $self ) = @_; + my $result = sprintf('%d%02d%02d', map { $_ || 0 } $self->major, + $self->minor, + $self->revision); + PTDEBUG && _d($self->version, 'normalizes to', $result); + return $result; +} + +sub comment { + my ( $self, $cmd ) = @_; + my $v = $self->normalized_version(); + + return "/*!$v $cmd */" +} + +my @methods = qw(major minor revision); +sub cmp { + my ($left, $right) = @_; + my $right_obj = (blessed($right) && $right->isa(ref($left))) + ? $right + : ref($left)->new($right); + + my $retval = 0; + for my $m ( @methods ) { + last unless defined($left->$m) && defined($right_obj->$m); + $retval = $left->$m <=> $right_obj->$m; + last if $retval; + } + return $retval; +} + +sub BUILDARGS { + my $self = shift; + + if ( @_ == 1 ) { + my %args; + if ( blessed($_[0]) && $_[0]->can("selectrow_hashref") ) { + PTDEBUG && _d("VersionParser got a dbh, trying to get the version"); + my $dbh = $_[0]; + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $query = eval { + $dbh->selectall_arrayref(q/SHOW VARIABLES LIKE 'version%'/, { Slice => {} }) + }; + if ( $query ) { + $query = { map { $_->{variable_name} => $_->{value} } @$query }; + @args{@methods} = $self->_split_version($query->{version}); + $args{flavor} = delete $query->{version_comment} + if $query->{version_comment}; + } + elsif ( eval { ($query) = $dbh->selectrow_array(q/SELECT VERSION()/) } ) { + @args{@methods} = $self->_split_version($query); + } + else { + Carp::confess("Couldn't get the version from the dbh while " + . "creating a VersionParser object: $@"); + } + $args{innodb_version} = eval { $self->_innodb_version($dbh) }; + } + elsif ( !ref($_[0]) ) { + @args{@methods} = $self->_split_version($_[0]); + } + + for my $method (@methods) { + delete $args{$method} unless defined $args{$method}; + } + @_ = %args if %args; + } + + return $self->SUPER::BUILDARGS(@_); +} + +sub _innodb_version { + my ( $self, $dbh ) = @_; + return unless $dbh; + my $innodb_version = "NO"; + + my ($innodb) = + grep { $_->{engine} =~ m/InnoDB/i } + map { + my %hash; + @hash{ map { lc $_ } keys %$_ } = values %$_; + \%hash; + } + @{ $dbh->selectall_arrayref("SHOW ENGINES", {Slice=>{}}) }; + if ( $innodb ) { + PTDEBUG && _d("InnoDB support:", $innodb->{support}); + if ( $innodb->{support} =~ m/YES|DEFAULT/i ) { + my $vars = $dbh->selectrow_hashref( + "SHOW VARIABLES LIKE 'innodb_version'"); + $innodb_version = !$vars ? "BUILTIN" + : ($vars->{Value} || $vars->{value}); + } + else { + $innodb_version = $innodb->{support}; # probably DISABLED or NO + } + } + + PTDEBUG && _d("InnoDB version:", $innodb_version); + return $innodb_version; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +no Lmo; +1; +} +# ########################################################################### +# End VersionParser package +# ########################################################################### + +# ########################################################################### +# TableParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableParser.pm +# t/lib/TableParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +local $EVAL_ERROR; +eval { + require Quoter; +}; + +sub new { + my ( $class, %args ) = @_; + my $self = { %args }; + $self->{Quoter} ||= Quoter->new(); + return bless $self, $class; +} + +sub Quoter { shift->{Quoter} } + +sub get_create_table { + my ( $self, $dbh, $db, $tbl ) = @_; + die "I need a dbh parameter" unless $dbh; + die "I need a db parameter" unless $db; + die "I need a tbl parameter" unless $tbl; + my $q = $self->{Quoter}; + + my $new_sql_mode + = q{/*!40101 SET @OLD_SQL_MODE := @@SQL_MODE, } + . q{@@SQL_MODE := '', } + . q{@OLD_QUOTE := @@SQL_QUOTE_SHOW_CREATE, } + . q{@@SQL_QUOTE_SHOW_CREATE := 1 */}; + + my $old_sql_mode + = q{/*!40101 SET @@SQL_MODE := @OLD_SQL_MODE, } + . q{@@SQL_QUOTE_SHOW_CREATE := @OLD_QUOTE */}; + + PTDEBUG && _d($new_sql_mode); + eval { $dbh->do($new_sql_mode); }; + PTDEBUG && $EVAL_ERROR && _d($EVAL_ERROR); + + my $use_sql = 'USE ' . $q->quote($db); + PTDEBUG && _d($dbh, $use_sql); + $dbh->do($use_sql); + + my $show_sql = "SHOW CREATE TABLE " . $q->quote($db, $tbl); + PTDEBUG && _d($show_sql); + my $href; + eval { $href = $dbh->selectrow_hashref($show_sql); }; + if ( my $e = $EVAL_ERROR ) { + PTDEBUG && _d($old_sql_mode); + $dbh->do($old_sql_mode); + + die $e; + } + + PTDEBUG && _d($old_sql_mode); + $dbh->do($old_sql_mode); + + my ($key) = grep { m/create (?:table|view)/i } keys %$href; + if ( !$key ) { + die "Error: no 'Create Table' or 'Create View' in result set from " + . "$show_sql: " . Dumper($href); + } + + return $href->{$key}; +} + +sub parse { + my ( $self, $ddl, $opts ) = @_; + return unless $ddl; + + if ( $ddl =~ m/CREATE (?:TEMPORARY )?TABLE "/ ) { + $ddl = $self->ansi_to_legacy($ddl); + } + elsif ( $ddl !~ m/CREATE (?:TEMPORARY )?TABLE `/ ) { + die "TableParser doesn't handle CREATE TABLE without quoting."; + } + + my ($name) = $ddl =~ m/CREATE (?:TEMPORARY )?TABLE\s+(`.+?`)/; + (undef, $name) = $self->{Quoter}->split_unquote($name) if $name; + + $ddl =~ s/(`[^`\n]+`)/\L$1/gm; + + my $engine = $self->get_engine($ddl); + + my @defs = $ddl =~ m/^(\s+`.*?),?$/gm; + my @cols = map { $_ =~ m/`([^`]+)`/ } @defs; + PTDEBUG && _d('Table cols:', join(', ', map { "`$_`" } @cols)); + + my %def_for; + @def_for{@cols} = @defs; + + my (@nums, @null, @non_generated); + my (%type_for, %is_nullable, %is_numeric, %is_autoinc, %is_generated); + foreach my $col ( @cols ) { + my $def = $def_for{$col}; + + $def =~ s/``//g; + + my ( $type ) = $def =~ m/`[^`]+`\s([a-z]+)/; + die "Can't determine column type for $def" unless $type; + $type_for{$col} = $type; + if ( $type =~ m/(?:(?:tiny|big|medium|small)?int|float|double|decimal|year)/ ) { + push @nums, $col; + $is_numeric{$col} = 1; + } + if ( $def !~ m/NOT NULL/ ) { + push @null, $col; + $is_nullable{$col} = 1; + } + if ( remove_quoted_text($def) =~ m/\WGENERATED\W/i ) { + $is_generated{$col} = 1; + } else { + push @non_generated, $col; + } + $is_autoinc{$col} = $def =~ m/AUTO_INCREMENT/i ? 1 : 0; + } + + my ($keys, $clustered_key) = $self->get_keys($ddl, $opts, \%is_nullable); + + my ($charset) = $ddl =~ m/DEFAULT CHARSET=(\w+)/; + + return { + name => $name, + cols => \@cols, + col_posn => { map { $cols[$_] => $_ } 0..$#cols }, + is_col => { map { $_ => 1 } @non_generated }, + null_cols => \@null, + is_nullable => \%is_nullable, + non_generated_cols => \@non_generated, + is_autoinc => \%is_autoinc, + is_generated => \%is_generated, + clustered_key => $clustered_key, + keys => $keys, + defs => \%def_for, + numeric_cols => \@nums, + is_numeric => \%is_numeric, + engine => $engine, + type_for => \%type_for, + charset => $charset, + }; +} + +sub remove_quoted_text { + my ($string) = @_; + $string =~ s/\\['"]//g; + $string =~ s/`[^`]*?`//g; + $string =~ s/"[^"]*?"//g; + $string =~ s/'[^']*?'//g; + return $string; +} + +sub sort_indexes { + my ( $self, $tbl ) = @_; + + my @indexes + = sort { + (($a ne 'PRIMARY') <=> ($b ne 'PRIMARY')) + || ( !$tbl->{keys}->{$a}->{is_unique} <=> !$tbl->{keys}->{$b}->{is_unique} ) + || ( $tbl->{keys}->{$a}->{is_nullable} <=> $tbl->{keys}->{$b}->{is_nullable} ) + || ( scalar(@{$tbl->{keys}->{$a}->{cols}}) <=> scalar(@{$tbl->{keys}->{$b}->{cols}}) ) + } + grep { + $tbl->{keys}->{$_}->{type} eq 'BTREE' + } + sort keys %{$tbl->{keys}}; + + PTDEBUG && _d('Indexes sorted best-first:', join(', ', @indexes)); + return @indexes; +} + +sub find_best_index { + my ( $self, $tbl, $index ) = @_; + my $best; + if ( $index ) { + ($best) = grep { uc $_ eq uc $index } keys %{$tbl->{keys}}; + } + if ( !$best ) { + if ( $index ) { + die "Index '$index' does not exist in table"; + } + else { + ($best) = $self->sort_indexes($tbl); + } + } + PTDEBUG && _d('Best index found is', $best); + return $best; +} + +sub find_possible_keys { + my ( $self, $dbh, $database, $table, $quoter, $where ) = @_; + return () unless $where; + my $sql = 'EXPLAIN SELECT * FROM ' . $quoter->quote($database, $table) + . ' WHERE ' . $where; + PTDEBUG && _d($sql); + my $expl = $dbh->selectrow_hashref($sql); + $expl = { map { lc($_) => $expl->{$_} } keys %$expl }; + if ( $expl->{possible_keys} ) { + PTDEBUG && _d('possible_keys =', $expl->{possible_keys}); + my @candidates = split(',', $expl->{possible_keys}); + my %possible = map { $_ => 1 } @candidates; + if ( $expl->{key} ) { + PTDEBUG && _d('MySQL chose', $expl->{key}); + unshift @candidates, grep { $possible{$_} } split(',', $expl->{key}); + PTDEBUG && _d('Before deduping:', join(', ', @candidates)); + my %seen; + @candidates = grep { !$seen{$_}++ } @candidates; + } + PTDEBUG && _d('Final list:', join(', ', @candidates)); + return @candidates; + } + else { + PTDEBUG && _d('No keys in possible_keys'); + return (); + } +} + +sub check_table { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db, $tbl) = @args{@required_args}; + my $q = $self->{Quoter} || 'Quoter'; + my $db_tbl = $q->quote($db, $tbl); + PTDEBUG && _d('Checking', $db_tbl); + + $self->{check_table_error} = undef; + + my $sql = "SHOW TABLES FROM " . $q->quote($db) + . ' LIKE ' . $q->literal_like($tbl); + PTDEBUG && _d($sql); + my $row; + eval { + $row = $dbh->selectrow_arrayref($sql); + }; + if ( my $e = $EVAL_ERROR ) { + PTDEBUG && _d($e); + $self->{check_table_error} = $e; + return 0; + } + if ( !$row->[0] || $row->[0] ne $tbl ) { + PTDEBUG && _d('Table does not exist'); + return 0; + } + + PTDEBUG && _d('Table', $db, $tbl, 'exists'); + return 1; + +} + +sub get_engine { + my ( $self, $ddl, $opts ) = @_; + my ( $engine ) = $ddl =~ m/\).*?(?:ENGINE|TYPE)=(\w+)/; + PTDEBUG && _d('Storage engine:', $engine); + return $engine || undef; +} + +sub get_keys { + my ( $self, $ddl, $opts, $is_nullable ) = @_; + my $engine = $self->get_engine($ddl); + my $keys = {}; + my $clustered_key = undef; + + KEY: + foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) { + + next KEY if $key =~ m/FOREIGN/; + + my $key_ddl = $key; + PTDEBUG && _d('Parsed key:', $key_ddl); + + if ( !$engine || $engine !~ m/MEMORY|HEAP/ ) { + $key =~ s/USING HASH/USING BTREE/; + } + + my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \((.+)\)/; + my ( $special ) = $key =~ m/(FULLTEXT|SPATIAL)/; + $type = $type || $special || 'BTREE'; + my ($name) = $key =~ m/(PRIMARY|`[^`]*`)/; + my $unique = $key =~ m/PRIMARY|UNIQUE/ ? 1 : 0; + my @cols; + my @col_prefixes; + foreach my $col_def ( $cols =~ m/`[^`]+`(?:\(\d+\))?/g ) { + my ($name, $prefix) = $col_def =~ m/`([^`]+)`(?:\((\d+)\))?/; + push @cols, $name; + push @col_prefixes, $prefix; + } + $name =~ s/`//g; + + PTDEBUG && _d( $name, 'key cols:', join(', ', map { "`$_`" } @cols)); + + $keys->{$name} = { + name => $name, + type => $type, + colnames => $cols, + cols => \@cols, + col_prefixes => \@col_prefixes, + is_unique => $unique, + is_nullable => scalar(grep { $is_nullable->{$_} } @cols), + is_col => { map { $_ => 1 } @cols }, + ddl => $key_ddl, + }; + + if ( ($engine || '') =~ m/InnoDB/i && !$clustered_key ) { + my $this_key = $keys->{$name}; + if ( $this_key->{name} eq 'PRIMARY' ) { + $clustered_key = 'PRIMARY'; + } + elsif ( $this_key->{is_unique} && !$this_key->{is_nullable} ) { + $clustered_key = $this_key->{name}; + } + PTDEBUG && $clustered_key && _d('This key is the clustered key'); + } + } + + return $keys, $clustered_key; +} + +sub get_fks { + my ( $self, $ddl, $opts ) = @_; + my $q = $self->{Quoter}; + my $fks = {}; + + foreach my $fk ( + $ddl =~ m/CONSTRAINT .* FOREIGN KEY .* REFERENCES [^\)]*\)/mg ) + { + my ( $name ) = $fk =~ m/CONSTRAINT `(.*?)`/; + my ( $cols ) = $fk =~ m/FOREIGN KEY \(([^\)]+)\)/; + my ( $parent, $parent_cols ) = $fk =~ m/REFERENCES (\S+) \(([^\)]+)\)/; + + my ($db, $tbl) = $q->split_unquote($parent, $opts->{database}); + my %parent_tbl = (tbl => $tbl); + $parent_tbl{db} = $db if $db; + + if ( $parent !~ m/\./ && $opts->{database} ) { + $parent = $q->quote($opts->{database}) . ".$parent"; + } + + $fks->{$name} = { + name => $name, + colnames => $cols, + cols => [ map { s/[ `]+//g; $_; } split(',', $cols) ], + parent_tbl => \%parent_tbl, + parent_tblname => $parent, + parent_cols => [ map { s/[ `]+//g; $_; } split(',', $parent_cols) ], + parent_colnames=> $parent_cols, + ddl => $fk, + }; + } + + return $fks; +} + +sub remove_auto_increment { + my ( $self, $ddl ) = @_; + $ddl =~ s/(^\).*?) AUTO_INCREMENT=\d+\b/$1/m; + return $ddl; +} + +sub get_table_status { + my ( $self, $dbh, $db, $like ) = @_; + my $q = $self->{Quoter}; + my $sql = "SHOW TABLE STATUS FROM " . $q->quote($db); + my @params; + if ( $like ) { + $sql .= ' LIKE ?'; + push @params, $like; + } + PTDEBUG && _d($sql, @params); + my $sth = $dbh->prepare($sql); + eval { $sth->execute(@params); }; + if ($EVAL_ERROR) { + PTDEBUG && _d($EVAL_ERROR); + return; + } + my @tables = @{$sth->fetchall_arrayref({})}; + @tables = map { + my %tbl; # Make a copy with lowercased keys + @tbl{ map { lc $_ } keys %$_ } = values %$_; + $tbl{engine} ||= $tbl{type} || $tbl{comment}; + delete $tbl{type}; + \%tbl; + } @tables; + return @tables; +} + +my $ansi_quote_re = qr/" [^"]* (?: "" [^"]* )* (?<=.) "/ismx; +sub ansi_to_legacy { + my ($self, $ddl) = @_; + $ddl =~ s/($ansi_quote_re)/ansi_quote_replace($1)/ge; + return $ddl; +} + +sub ansi_quote_replace { + my ($val) = @_; + $val =~ s/^"|"$//g; + $val =~ s/`/``/g; + $val =~ s/""/"/g; + return "`$val`"; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableParser package +# ########################################################################### + +# ########################################################################### +# TableNibbler package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableNibbler.pm +# t/lib/TableNibbler.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableNibbler; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(TableParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub generate_asc_stmt { + my ( $self, %args ) = @_; + my @required_args = qw(tbl_struct index); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my ($tbl_struct, $index) = @args{@required_args}; + my @cols = $args{cols} ? @{$args{cols}} : @{$tbl_struct->{cols}}; + my $q = $self->{Quoter}; + + die "Index '$index' does not exist in table" + unless exists $tbl_struct->{keys}->{$index}; + PTDEBUG && _d('Will ascend index', $index); + + my @asc_cols = @{$tbl_struct->{keys}->{$index}->{cols}}; + if ( $args{asc_first} ) { + PTDEBUG && _d('Ascending only first column'); + @asc_cols = $asc_cols[0]; + } + elsif ( my $n = $args{n_index_cols} ) { + $n = scalar @asc_cols if $n > @asc_cols; + PTDEBUG && _d('Ascending only first', $n, 'columns'); + @asc_cols = @asc_cols[0..($n-1)]; + } + PTDEBUG && _d('Will ascend columns', join(', ', @asc_cols)); + + my @asc_slice; + my %col_posn = do { my $i = 0; map { $_ => $i++ } @cols }; + foreach my $col ( @asc_cols ) { + if ( !exists $col_posn{$col} ) { + push @cols, $col; + $col_posn{$col} = $#cols; + } + push @asc_slice, $col_posn{$col}; + } + PTDEBUG && _d('Will ascend, in ordinal position:', join(', ', @asc_slice)); + + my $asc_stmt = { + cols => \@cols, + index => $index, + where => '', + slice => [], + scols => [], + }; + + if ( @asc_slice ) { + my $cmp_where; + foreach my $cmp ( qw(< <= >= >) ) { + $cmp_where = $self->generate_cmp_where( + type => $cmp, + slice => \@asc_slice, + cols => \@cols, + quoter => $q, + is_nullable => $tbl_struct->{is_nullable}, + type_for => $tbl_struct->{type_for}, + ); + $asc_stmt->{boundaries}->{$cmp} = $cmp_where->{where}; + } + my $cmp = $args{asc_only} ? '>' : '>='; + $asc_stmt->{where} = $asc_stmt->{boundaries}->{$cmp}; + $asc_stmt->{slice} = $cmp_where->{slice}; + $asc_stmt->{scols} = $cmp_where->{scols}; + } + + return $asc_stmt; +} + +sub generate_cmp_where { + my ( $self, %args ) = @_; + foreach my $arg ( qw(type slice cols is_nullable) ) { + die "I need a $arg arg" unless defined $args{$arg}; + } + my @slice = @{$args{slice}}; + my @cols = @{$args{cols}}; + my $is_nullable = $args{is_nullable}; + my $type_for = $args{type_for}; + my $type = $args{type}; + my $q = $self->{Quoter}; + + (my $cmp = $type) =~ s/=//; + + my @r_slice; # Resulting slice columns, by ordinal + my @r_scols; # Ditto, by name + + my @clauses; + foreach my $i ( 0 .. $#slice ) { + my @clause; + + foreach my $j ( 0 .. $i - 1 ) { + my $ord = $slice[$j]; + my $col = $cols[$ord]; + my $quo = $q->quote($col); + my $val = ($col && ($type_for->{$col} || '')) eq 'enum' ? "CAST(? AS UNSIGNED)" : "?"; + if ( $is_nullable->{$col} ) { + push @clause, "(($val IS NULL AND $quo IS NULL) OR ($quo = $val))"; + push @r_slice, $ord, $ord; + push @r_scols, $col, $col; + } + else { + push @clause, "$quo = $val"; + push @r_slice, $ord; + push @r_scols, $col; + } + } + + my $ord = $slice[$i]; + my $col = $cols[$ord]; + my $quo = $q->quote($col); + my $end = $i == $#slice; # Last clause of the whole group. + my $val = ($col && ($type_for->{$col} || '')) eq 'enum' ? "CAST(? AS UNSIGNED)" : "?"; + if ( $is_nullable->{$col} ) { + if ( $type =~ m/=/ && $end ) { + push @clause, "($val IS NULL OR $quo $type $val)"; + } + elsif ( $type =~ m/>/ ) { + push @clause, "($val IS NULL AND $quo IS NOT NULL) OR ($quo $cmp $val)"; + } + else { # If $type =~ m/ \@r_slice, + scols => \@r_scols, + where => $result, + }; + return $where; +} + +sub generate_del_stmt { + my ( $self, %args ) = @_; + + my $tbl = $args{tbl_struct}; + my @cols = $args{cols} ? @{$args{cols}} : (); + my $tp = $self->{TableParser}; + my $q = $self->{Quoter}; + + my @del_cols; + my @del_slice; + + my $index = $tp->find_best_index($tbl, $args{index}); + die "Cannot find an ascendable index in table" unless $index; + + if ( $index && $tbl->{keys}->{$index}->{is_unique}) { + @del_cols = @{$tbl->{keys}->{$index}->{cols}}; + } + else { + @del_cols = @{$tbl->{cols}}; + } + PTDEBUG && _d('Columns needed for DELETE:', join(', ', @del_cols)); + + my %col_posn = do { my $i = 0; map { $_ => $i++ } @cols }; + foreach my $col ( @del_cols ) { + if ( !exists $col_posn{$col} ) { + push @cols, $col; + $col_posn{$col} = $#cols; + } + push @del_slice, $col_posn{$col}; + } + PTDEBUG && _d('Ordinals needed for DELETE:', join(', ', @del_slice)); + + my $del_stmt = { + cols => \@cols, + index => $index, + where => '', + slice => [], + scols => [], + }; + + my @clauses; + foreach my $i ( 0 .. $#del_slice ) { + my $ord = $del_slice[$i]; + my $col = $cols[$ord]; + my $quo = $q->quote($col); + if ( $tbl->{is_nullable}->{$col} ) { + push @clauses, "((? IS NULL AND $quo IS NULL) OR ($quo = ?))"; + push @{$del_stmt->{slice}}, $ord, $ord; + push @{$del_stmt->{scols}}, $col, $col; + } + else { + push @clauses, "$quo = ?"; + push @{$del_stmt->{slice}}, $ord; + push @{$del_stmt->{scols}}, $col; + } + } + + $del_stmt->{where} = '(' . join(' AND ', @clauses) . ')'; + + return $del_stmt; +} + +sub generate_ins_stmt { + my ( $self, %args ) = @_; + foreach my $arg ( qw(ins_tbl sel_cols) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $ins_tbl = $args{ins_tbl}; + my @sel_cols = @{$args{sel_cols}}; + + die "You didn't specify any SELECT columns" unless @sel_cols; + + my @ins_cols; + my @ins_slice; + for my $i ( 0..$#sel_cols ) { + next unless $ins_tbl->{is_col}->{$sel_cols[$i]}; + push @ins_cols, $sel_cols[$i]; + push @ins_slice, $i; + } + + return { + cols => \@ins_cols, + slice => \@ins_slice, + }; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableNibbler package +# ########################################################################### + +# ########################################################################### +# MasterSlave package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/MasterSlave.pm +# t/lib/MasterSlave.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package MasterSlave; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub check_recursion_method { + my ($methods) = @_; + if ( @$methods != 1 ) { + if ( grep({ !m/processlist|hosts/i } @$methods) + && $methods->[0] !~ /^dsn=/i ) + { + die "Invalid combination of recursion methods: " + . join(", ", map { defined($_) ? $_ : 'undef' } @$methods) . ". " + . "Only hosts and processlist may be combined.\n" + } + } + else { + my ($method) = @$methods; + die "Invalid recursion method: " . ( $method || 'undef' ) + unless $method && $method =~ m/^(?:processlist$|hosts$|none$|cluster$|dsn=)/i; + } +} + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(OptionParser DSNParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $self = { + %args, + replication_thread => {}, + }; + return bless $self, $class; +} + +sub get_slaves { + my ($self, %args) = @_; + my @required_args = qw(make_cxn); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($make_cxn) = @args{@required_args}; + + my $slaves = []; + my $dp = $self->{DSNParser}; + my $methods = $self->_resolve_recursion_methods($args{dsn}); + + return $slaves unless @$methods; + + if ( grep { m/processlist|hosts/i } @$methods ) { + my @required_args = qw(dbh dsn); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $dsn) = @args{@required_args}; + my $o = $self->{OptionParser}; + + $self->recurse_to_slaves( + { dbh => $dbh, + dsn => $dsn, + slave_user => $o->got('slave-user') ? $o->get('slave-user') : '', + slave_password => $o->got('slave-password') ? $o->get('slave-password') : '', + callback => sub { + my ( $dsn, $dbh, $level, $parent ) = @_; + return unless $level; + PTDEBUG && _d('Found slave:', $dp->as_string($dsn)); + my $slave_dsn = $dsn; + if ($o->got('slave-user')) { + $slave_dsn->{u} = $o->get('slave-user'); + PTDEBUG && _d("Using slave user ".$o->get('slave-user')." on ".$slave_dsn->{h}.":".$slave_dsn->{P}); + } + if ($o->got('slave-password')) { + $slave_dsn->{p} = $o->get('slave-password'); + PTDEBUG && _d("Slave password set"); + } + push @$slaves, $make_cxn->(dsn => $slave_dsn, dbh => $dbh); + return; + }, + } + ); + } elsif ( $methods->[0] =~ m/^dsn=/i ) { + (my $dsn_table_dsn = join ",", @$methods) =~ s/^dsn=//i; + $slaves = $self->get_cxn_from_dsn_table( + %args, + dsn_table_dsn => $dsn_table_dsn, + ); + } + elsif ( $methods->[0] =~ m/none/i ) { + PTDEBUG && _d('Not getting to slaves'); + } + else { + die "Unexpected recursion methods: @$methods"; + } + + return $slaves; +} + +sub _resolve_recursion_methods { + my ($self, $dsn) = @_; + my $o = $self->{OptionParser}; + if ( $o->got('recursion-method') ) { + return $o->get('recursion-method'); + } + elsif ( $dsn && ($dsn->{P} || 3306) != 3306 ) { + PTDEBUG && _d('Port number is non-standard; using only hosts method'); + return [qw(hosts)]; + } + else { + return $o->get('recursion-method'); + } +} + +sub recurse_to_slaves { + my ( $self, $args, $level ) = @_; + $level ||= 0; + my $dp = $self->{DSNParser}; + my $recurse = $args->{recurse} || $self->{OptionParser}->get('recurse'); + my $dsn = $args->{dsn}; + my $slave_user = $args->{slave_user} || ''; + my $slave_password = $args->{slave_password} || ''; + + my $methods = $self->_resolve_recursion_methods($dsn); + PTDEBUG && _d('Recursion methods:', @$methods); + if ( lc($methods->[0]) eq 'none' ) { + PTDEBUG && _d('Not recursing to slaves'); + return; + } + + my $slave_dsn = $dsn; + if ($slave_user) { + $slave_dsn->{u} = $slave_user; + PTDEBUG && _d("Using slave user $slave_user on ".$slave_dsn->{h}.":".$slave_dsn->{P}); + } + if ($slave_password) { + $slave_dsn->{p} = $slave_password; + PTDEBUG && _d("Slave password set"); + } + + my $dbh; + eval { + $dbh = $args->{dbh} || $dp->get_dbh( + $dp->get_cxn_params($slave_dsn), { AutoCommit => 1 }); + PTDEBUG && _d('Connected to', $dp->as_string($slave_dsn)); + }; + if ( $EVAL_ERROR ) { + print STDERR "Cannot connect to ", $dp->as_string($slave_dsn), "\n" + or die "Cannot print: $OS_ERROR"; + return; + } + + my $sql = 'SELECT @@SERVER_ID'; + PTDEBUG && _d($sql); + my ($id) = $dbh->selectrow_array($sql); + PTDEBUG && _d('Working on server ID', $id); + my $master_thinks_i_am = $dsn->{server_id}; + if ( !defined $id + || ( defined $master_thinks_i_am && $master_thinks_i_am != $id ) + || $args->{server_ids_seen}->{$id}++ + ) { + PTDEBUG && _d('Server ID seen, or not what master said'); + if ( $args->{skip_callback} ) { + $args->{skip_callback}->($dsn, $dbh, $level, $args->{parent}); + } + return; + } + + $args->{callback}->($dsn, $dbh, $level, $args->{parent}); + + if ( !defined $recurse || $level < $recurse ) { + + my @slaves = + grep { !$_->{master_id} || $_->{master_id} == $id } # Only my slaves. + $self->find_slave_hosts($dp, $dbh, $dsn, $methods); + + foreach my $slave ( @slaves ) { + PTDEBUG && _d('Recursing from', + $dp->as_string($dsn), 'to', $dp->as_string($slave)); + $self->recurse_to_slaves( + { %$args, dsn => $slave, dbh => undef, parent => $dsn, slave_user => $slave_user, $slave_password => $slave_password }, $level + 1 ); + } + } +} + +sub find_slave_hosts { + my ( $self, $dsn_parser, $dbh, $dsn, $methods ) = @_; + + PTDEBUG && _d('Looking for slaves on', $dsn_parser->as_string($dsn), + 'using methods', @$methods); + + my @slaves; + METHOD: + foreach my $method ( @$methods ) { + my $find_slaves = "_find_slaves_by_$method"; + PTDEBUG && _d('Finding slaves with', $find_slaves); + @slaves = $self->$find_slaves($dsn_parser, $dbh, $dsn); + last METHOD if @slaves; + } + + PTDEBUG && _d('Found', scalar(@slaves), 'slaves'); + return @slaves; +} + +sub _find_slaves_by_processlist { + my ( $self, $dsn_parser, $dbh, $dsn ) = @_; + my @connected_slaves = $self->get_connected_slaves($dbh); + my @slaves = $self->_process_slaves_list($dsn_parser, $dsn, \@connected_slaves); + return @slaves; +} + +sub _process_slaves_list { + my ($self, $dsn_parser, $dsn, $connected_slaves) = @_; + my @slaves = map { + my $slave = $dsn_parser->parse("h=$_", $dsn); + $slave->{source} = 'processlist'; + $slave; + } + grep { $_ } + map { + my ( $host ) = $_->{host} =~ m/^(.*):\d+$/; + if ( $host eq 'localhost' ) { + $host = '127.0.0.1'; # Replication never uses sockets. + } + if ($host =~ m/::/) { + $host = '['.$host.']'; + } + $host; + } @$connected_slaves; + + return @slaves; +} + +sub _find_slaves_by_hosts { + my ( $self, $dsn_parser, $dbh, $dsn ) = @_; + + my @slaves; + my $sql = 'SHOW SLAVE HOSTS'; + PTDEBUG && _d($dbh, $sql); + @slaves = @{$dbh->selectall_arrayref($sql, { Slice => {} })}; + + if ( @slaves ) { + PTDEBUG && _d('Found some SHOW SLAVE HOSTS info'); + @slaves = map { + my %hash; + @hash{ map { lc $_ } keys %$_ } = values %$_; + my $spec = "h=$hash{host},P=$hash{port}" + . ( $hash{user} ? ",u=$hash{user}" : '') + . ( $hash{password} ? ",p=$hash{password}" : ''); + my $dsn = $dsn_parser->parse($spec, $dsn); + $dsn->{server_id} = $hash{server_id}; + $dsn->{master_id} = $hash{master_id}; + $dsn->{source} = 'hosts'; + $dsn; + } @slaves; + } + + return @slaves; +} + +sub get_connected_slaves { + my ( $self, $dbh ) = @_; + + my $show = "SHOW GRANTS FOR "; + my $user = 'CURRENT_USER()'; + my $sql = $show . $user; + PTDEBUG && _d($dbh, $sql); + + my $proc; + eval { + $proc = grep { + m/ALL PRIVILEGES.*?\*\.\*|PROCESS/ + } @{$dbh->selectcol_arrayref($sql)}; + }; + if ( $EVAL_ERROR ) { + + if ( $EVAL_ERROR =~ m/no such grant defined for user/ ) { + PTDEBUG && _d('Retrying SHOW GRANTS without host; error:', + $EVAL_ERROR); + ($user) = split('@', $user); + $sql = $show . $user; + PTDEBUG && _d($sql); + eval { + $proc = grep { + m/ALL PRIVILEGES.*?\*\.\*|PROCESS/ + } @{$dbh->selectcol_arrayref($sql)}; + }; + } + + die "Failed to $sql: $EVAL_ERROR" if $EVAL_ERROR; + } + if ( !$proc ) { + die "You do not have the PROCESS privilege"; + } + + $sql = 'SHOW FULL PROCESSLIST'; + PTDEBUG && _d($dbh, $sql); + grep { $_->{command} =~ m/Binlog Dump/i } + map { # Lowercase the column names + my %hash; + @hash{ map { lc $_ } keys %$_ } = values %$_; + \%hash; + } + @{$dbh->selectall_arrayref($sql, { Slice => {} })}; +} + +sub is_master_of { + my ( $self, $master, $slave ) = @_; + my $master_status = $self->get_master_status($master) + or die "The server specified as a master is not a master"; + my $slave_status = $self->get_slave_status($slave) + or die "The server specified as a slave is not a slave"; + my @connected = $self->get_connected_slaves($master) + or die "The server specified as a master has no connected slaves"; + my (undef, $port) = $master->selectrow_array("SHOW VARIABLES LIKE 'port'"); + + if ( $port != $slave_status->{master_port} ) { + die "The slave is connected to $slave_status->{master_port} " + . "but the master's port is $port"; + } + + if ( !grep { $slave_status->{master_user} eq $_->{user} } @connected ) { + die "I don't see any slave I/O thread connected with user " + . $slave_status->{master_user}; + } + + if ( ($slave_status->{slave_io_state} || '') + eq 'Waiting for master to send event' ) + { + my ( $master_log_name, $master_log_num ) + = $master_status->{file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/; + my ( $slave_log_name, $slave_log_num ) + = $slave_status->{master_log_file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/; + if ( $master_log_name ne $slave_log_name + || abs($master_log_num - $slave_log_num) > 1 ) + { + die "The slave thinks it is reading from " + . "$slave_status->{master_log_file}, but the " + . "master is writing to $master_status->{file}"; + } + } + return 1; +} + +sub get_master_dsn { + my ( $self, $dbh, $dsn, $dsn_parser ) = @_; + my $master = $self->get_slave_status($dbh) or return undef; + my $spec = "h=$master->{master_host},P=$master->{master_port}"; + return $dsn_parser->parse($spec, $dsn); +} + +sub get_slave_status { + my ( $self, $dbh ) = @_; + + if ( !$self->{not_a_slave}->{$dbh} ) { + my $sth = $self->{sths}->{$dbh}->{SLAVE_STATUS} + ||= $dbh->prepare('SHOW SLAVE STATUS'); + PTDEBUG && _d($dbh, 'SHOW SLAVE STATUS'); + $sth->execute(); + my ($sss_rows) = $sth->fetchall_arrayref({}); # Show Slave Status rows + + my $ss; + if ( $sss_rows && @$sss_rows ) { + if (scalar @$sss_rows > 1) { + if (!$self->{channel}) { + die 'This server returned more than one row for SHOW SLAVE STATUS but "channel" was not specified on the command line'; + } + my $slave_use_channels; + for my $row (@$sss_rows) { + $row = { map { lc($_) => $row->{$_} } keys %$row }; # lowercase the keys + if ($row->{channel_name}) { + $slave_use_channels = 1; + } + if ($row->{channel_name} eq $self->{channel}) { + $ss = $row; + last; + } + } + if (!$ss && $slave_use_channels) { + die 'This server is using replication channels but "channel" was not specified on the command line'; + } + } else { + if ($sss_rows->[0]->{channel_name} && $sss_rows->[0]->{channel_name} ne $self->{channel}) { + die 'This server is using replication channels but "channel" was not specified on the command line'; + } else { + $ss = $sss_rows->[0]; + } + } + + if ( $ss && %$ss ) { + $ss = { map { lc($_) => $ss->{$_} } keys %$ss }; # lowercase the keys + return $ss; + } + if (!$ss && $self->{channel}) { + die "Specified channel name is invalid"; + } + } + + PTDEBUG && _d('This server returns nothing for SHOW SLAVE STATUS'); + $self->{not_a_slave}->{$dbh}++; + } +} + +sub get_master_status { + my ( $self, $dbh ) = @_; + + if ( $self->{not_a_master}->{$dbh} ) { + PTDEBUG && _d('Server on dbh', $dbh, 'is not a master'); + return; + } + + my $sth = $self->{sths}->{$dbh}->{MASTER_STATUS} + ||= $dbh->prepare('SHOW MASTER STATUS'); + PTDEBUG && _d($dbh, 'SHOW MASTER STATUS'); + $sth->execute(); + my ($ms) = @{$sth->fetchall_arrayref({})}; + PTDEBUG && _d( + $ms ? map { "$_=" . (defined $ms->{$_} ? $ms->{$_} : '') } keys %$ms + : ''); + + if ( !$ms || scalar keys %$ms < 2 ) { + PTDEBUG && _d('Server on dbh', $dbh, 'does not seem to be a master'); + $self->{not_a_master}->{$dbh}++; + } + + return { map { lc($_) => $ms->{$_} } keys %$ms }; # lowercase the keys +} + +sub wait_for_master { + my ( $self, %args ) = @_; + my @required_args = qw(master_status slave_dbh); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($master_status, $slave_dbh) = @args{@required_args}; + my $timeout = $args{timeout} || 60; + + my $result; + my $waited; + if ( $master_status ) { + my $slave_status; + eval { + $slave_status = $self->get_slave_status($slave_dbh); + }; + if ($EVAL_ERROR) { + return { + result => undef, + waited => 0, + error =>'Wait for master: this is a multi-master slave but "channel" was not specified on the command line', + }; + } + my $server_version = VersionParser->new($slave_dbh); + my $channel_sql = $server_version > '5.6' && $self->{channel} ? ", '$self->{channel}'" : ''; + my $sql = "SELECT MASTER_POS_WAIT('$master_status->{file}', $master_status->{position}, $timeout $channel_sql)"; + PTDEBUG && _d($slave_dbh, $sql); + my $start = time; + ($result) = $slave_dbh->selectrow_array($sql); + + $waited = time - $start; + + PTDEBUG && _d('Result of waiting:', $result); + PTDEBUG && _d("Waited", $waited, "seconds"); + } + else { + PTDEBUG && _d('Not waiting: this server is not a master'); + } + + return { + result => $result, + waited => $waited, + }; +} + +sub stop_slave { + my ( $self, $dbh ) = @_; + my $sth = $self->{sths}->{$dbh}->{STOP_SLAVE} + ||= $dbh->prepare('STOP SLAVE'); + PTDEBUG && _d($dbh, $sth->{Statement}); + $sth->execute(); +} + +sub start_slave { + my ( $self, $dbh, $pos ) = @_; + if ( $pos ) { + my $sql = "START SLAVE UNTIL MASTER_LOG_FILE='$pos->{file}', " + . "MASTER_LOG_POS=$pos->{position}"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + else { + my $sth = $self->{sths}->{$dbh}->{START_SLAVE} + ||= $dbh->prepare('START SLAVE'); + PTDEBUG && _d($dbh, $sth->{Statement}); + $sth->execute(); + } +} + +sub catchup_to_master { + my ( $self, $slave, $master, $timeout ) = @_; + $self->stop_slave($master); + $self->stop_slave($slave); + my $slave_status = $self->get_slave_status($slave); + my $slave_pos = $self->repl_posn($slave_status); + my $master_status = $self->get_master_status($master); + my $master_pos = $self->repl_posn($master_status); + PTDEBUG && _d('Master position:', $self->pos_to_string($master_pos), + 'Slave position:', $self->pos_to_string($slave_pos)); + + my $result; + if ( $self->pos_cmp($slave_pos, $master_pos) < 0 ) { + PTDEBUG && _d('Waiting for slave to catch up to master'); + $self->start_slave($slave, $master_pos); + + $result = $self->wait_for_master( + master_status => $master_status, + slave_dbh => $slave, + timeout => $timeout, + master_status => $master_status + ); + if ($result->{error}) { + die $result->{error}; + } + if ( !defined $result->{result} ) { + $slave_status = $self->get_slave_status($slave); + if ( !$self->slave_is_running($slave_status) ) { + PTDEBUG && _d('Master position:', + $self->pos_to_string($master_pos), + 'Slave position:', $self->pos_to_string($slave_pos)); + $slave_pos = $self->repl_posn($slave_status); + if ( $self->pos_cmp($slave_pos, $master_pos) != 0 ) { + die "MASTER_POS_WAIT() returned NULL but slave has not " + . "caught up to master"; + } + PTDEBUG && _d('Slave is caught up to master and stopped'); + } + else { + die "Slave has not caught up to master and it is still running"; + } + } + } + else { + PTDEBUG && _d("Slave is already caught up to master"); + } + + return $result; +} + +sub catchup_to_same_pos { + my ( $self, $s1_dbh, $s2_dbh ) = @_; + $self->stop_slave($s1_dbh); + $self->stop_slave($s2_dbh); + my $s1_status = $self->get_slave_status($s1_dbh); + my $s2_status = $self->get_slave_status($s2_dbh); + my $s1_pos = $self->repl_posn($s1_status); + my $s2_pos = $self->repl_posn($s2_status); + if ( $self->pos_cmp($s1_pos, $s2_pos) < 0 ) { + $self->start_slave($s1_dbh, $s2_pos); + } + elsif ( $self->pos_cmp($s2_pos, $s1_pos) < 0 ) { + $self->start_slave($s2_dbh, $s1_pos); + } + + $s1_status = $self->get_slave_status($s1_dbh); + $s2_status = $self->get_slave_status($s2_dbh); + $s1_pos = $self->repl_posn($s1_status); + $s2_pos = $self->repl_posn($s2_status); + + if ( $self->slave_is_running($s1_status) + || $self->slave_is_running($s2_status) + || $self->pos_cmp($s1_pos, $s2_pos) != 0) + { + die "The servers aren't both stopped at the same position"; + } + +} + +sub slave_is_running { + my ( $self, $slave_status ) = @_; + return ($slave_status->{slave_sql_running} || 'No') eq 'Yes'; +} + +sub has_slave_updates { + my ( $self, $dbh ) = @_; + my $sql = q{SHOW VARIABLES LIKE 'log_slave_updates'}; + PTDEBUG && _d($dbh, $sql); + my ($name, $value) = $dbh->selectrow_array($sql); + return $value && $value =~ m/^(1|ON)$/; +} + +sub repl_posn { + my ( $self, $status ) = @_; + if ( exists $status->{file} && exists $status->{position} ) { + return { + file => $status->{file}, + position => $status->{position}, + }; + } + else { + return { + file => $status->{relay_master_log_file}, + position => $status->{exec_master_log_pos}, + }; + } +} + +sub get_slave_lag { + my ( $self, $dbh ) = @_; + my $stat = $self->get_slave_status($dbh); + return unless $stat; # server is not a slave + return $stat->{seconds_behind_master}; +} + +sub pos_cmp { + my ( $self, $a, $b ) = @_; + return $self->pos_to_string($a) cmp $self->pos_to_string($b); +} + +sub short_host { + my ( $self, $dsn ) = @_; + my ($host, $port); + if ( $dsn->{master_host} ) { + $host = $dsn->{master_host}; + $port = $dsn->{master_port}; + } + else { + $host = $dsn->{h}; + $port = $dsn->{P}; + } + return ($host || '[default]') . ( ($port || 3306) == 3306 ? '' : ":$port" ); +} + +sub is_replication_thread { + my ( $self, $query, %args ) = @_; + return unless $query; + + my $type = lc($args{type} || 'all'); + die "Invalid type: $type" + unless $type =~ m/^binlog_dump|slave_io|slave_sql|all$/i; + + my $match = 0; + if ( $type =~ m/binlog_dump|all/i ) { + $match = 1 + if ($query->{Command} || $query->{command} || '') eq "Binlog Dump"; + } + if ( !$match ) { + if ( ($query->{User} || $query->{user} || '') eq "system user" ) { + PTDEBUG && _d("Slave replication thread"); + if ( $type ne 'all' ) { + my $state = $query->{State} || $query->{state} || ''; + + if ( $state =~ m/^init|end$/ ) { + PTDEBUG && _d("Special state:", $state); + $match = 1; + } + else { + my ($slave_sql) = $state =~ m/ + ^(Waiting\sfor\sthe\snext\sevent + |Reading\sevent\sfrom\sthe\srelay\slog + |Has\sread\sall\srelay\slog;\swaiting + |Making\stemp\sfile + |Waiting\sfor\sslave\smutex\son\sexit)/xi; + + $match = $type eq 'slave_sql' && $slave_sql ? 1 + : $type eq 'slave_io' && !$slave_sql ? 1 + : 0; + } + } + else { + $match = 1; + } + } + else { + PTDEBUG && _d('Not system user'); + } + + if ( !defined $args{check_known_ids} || $args{check_known_ids} ) { + my $id = $query->{Id} || $query->{id}; + if ( $match ) { + $self->{replication_thread}->{$id} = 1; + } + else { + if ( $self->{replication_thread}->{$id} ) { + PTDEBUG && _d("Thread ID is a known replication thread ID"); + $match = 1; + } + } + } + } + + PTDEBUG && _d('Matches', $type, 'replication thread:', + ($match ? 'yes' : 'no'), '; match:', $match); + + return $match; +} + + +sub get_replication_filters { + my ( $self, %args ) = @_; + my @required_args = qw(dbh); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh) = @args{@required_args}; + + my %filters = (); + + my $status = $self->get_master_status($dbh); + if ( $status ) { + map { $filters{$_} = $status->{$_} } + grep { defined $status->{$_} && $status->{$_} ne '' } + qw( + binlog_do_db + binlog_ignore_db + ); + } + + $status = $self->get_slave_status($dbh); + if ( $status ) { + map { $filters{$_} = $status->{$_} } + grep { defined $status->{$_} && $status->{$_} ne '' } + qw( + replicate_do_db + replicate_ignore_db + replicate_do_table + replicate_ignore_table + replicate_wild_do_table + replicate_wild_ignore_table + ); + + my $sql = "SHOW VARIABLES LIKE 'slave_skip_errors'"; + PTDEBUG && _d($dbh, $sql); + my $row = $dbh->selectrow_arrayref($sql); + $filters{slave_skip_errors} = $row->[1] if $row->[1] && $row->[1] ne 'OFF'; + } + + return \%filters; +} + + +sub pos_to_string { + my ( $self, $pos ) = @_; + my $fmt = '%s/%020d'; + return sprintf($fmt, @{$pos}{qw(file position)}); +} + +sub reset_known_replication_threads { + my ( $self ) = @_; + $self->{replication_thread} = {}; + return; +} + +sub get_cxn_from_dsn_table { + my ($self, %args) = @_; + my @required_args = qw(dsn_table_dsn make_cxn); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dsn_table_dsn, $make_cxn) = @args{@required_args}; + PTDEBUG && _d('DSN table DSN:', $dsn_table_dsn); + + my $dp = $self->{DSNParser}; + my $q = $self->{Quoter}; + + my $dsn = $dp->parse($dsn_table_dsn); + my $dsn_table; + if ( $dsn->{D} && $dsn->{t} ) { + $dsn_table = $q->quote($dsn->{D}, $dsn->{t}); + } + elsif ( $dsn->{t} && $dsn->{t} =~ m/\./ ) { + $dsn_table = $q->quote($q->split_unquote($dsn->{t})); + } + else { + die "DSN table DSN does not specify a database (D) " + . "or a database-qualified table (t)"; + } + + my $dsn_tbl_cxn = $make_cxn->(dsn => $dsn); + my $dbh = $dsn_tbl_cxn->connect(); + my $sql = "SELECT dsn FROM $dsn_table ORDER BY id"; + PTDEBUG && _d($sql); + my $dsn_strings = $dbh->selectcol_arrayref($sql); + my @cxn; + if ( $dsn_strings ) { + foreach my $dsn_string ( @$dsn_strings ) { + PTDEBUG && _d('DSN from DSN table:', $dsn_string); + push @cxn, $make_cxn->(dsn_string => $dsn_string); + } + } + return \@cxn; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End MasterSlave package +# ########################################################################### + +# ########################################################################### +# RowChecksum package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/RowChecksum.pm +# t/lib/RowChecksum.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package RowChecksum; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use List::Util qw(max); +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(OptionParser Quoter) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub make_row_checksum { + my ( $self, %args ) = @_; + my @required_args = qw(tbl); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($tbl) = @args{@required_args}; + + my $o = $self->{OptionParser}; + my $q = $self->{Quoter}; + my $tbl_struct = $tbl->{tbl_struct}; + my $func = $args{func} || uc($o->get('function')); + my $cols = $self->get_checksum_columns(%args); + + die "all columns are excluded by --columns or --ignore-columns" + unless @{$cols->{select}}; + + my $query; + if ( !$args{no_cols} ) { + $query = join(', ', + map { + my $col = $_; + if ( $col =~ m/UNIX_TIMESTAMP/ ) { + my ($real_col) = /^UNIX_TIMESTAMP\((.+?)\)/; + $col .= " AS $real_col"; + } + elsif ( $col =~ m/TRIM/ ) { + my ($real_col) = m/TRIM\(([^\)]+)\)/; + $col .= " AS $real_col"; + } + $col; + } @{$cols->{select}}) + . ', '; + } + + if ( uc $func ne 'FNV_64' && uc $func ne 'FNV1A_64' ) { + my $sep = $o->get('separator') || '#'; + $sep =~ s/'//g; + $sep ||= '#'; + + my @converted_cols; + for my $col(@{$cols->{select}}) { + my $colname = $col; + $colname =~ s/`//g; + my $type = $tbl_struct->{type_for}->{$colname} || ''; + if ($type =~ m/^(CHAR|VARCHAR|BINARY|VARBINARY|BLOB|TEXT|ENUM|SET|JSON)$/i) { + push @converted_cols, "convert($col using utf8mb4)"; + } else { + push @converted_cols, "$col"; + } + } + + my @nulls = grep { $cols->{allowed}->{$_} } @{$tbl_struct->{null_cols}}; + if ( @nulls ) { + my $bitmap = "CONCAT(" + . join(', ', map { 'ISNULL(' . $q->quote($_) . ')' } @nulls) + . ")"; + push @converted_cols, $bitmap; + } + + $query .= scalar @converted_cols > 1 + ? "$func(CONCAT_WS('$sep', " . join(', ', @converted_cols) . '))' + : "$func($converted_cols[0])"; + } + else { + my $fnv_func = uc $func; + $query .= "$fnv_func(" . join(', ', @{$cols->{select}}) . ')'; + } + + PTDEBUG && _d('Row checksum:', $query); + return $query; +} + +sub make_chunk_checksum { + my ( $self, %args ) = @_; + my @required_args = qw(tbl); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + if ( !$args{dbh} && !($args{func} && $args{crc_width} && $args{crc_type}) ) { + die "I need a dbh argument" + } + my ($tbl) = @args{@required_args}; + my $o = $self->{OptionParser}; + my $q = $self->{Quoter}; + + my %crc_args = $self->get_crc_args(%args); + PTDEBUG && _d("Checksum strat:", Dumper(\%crc_args)); + + my $row_checksum = $self->make_row_checksum( + %args, + %crc_args, + no_cols => 1 + ); + my $crc; + if ( $crc_args{crc_type} =~ m/int$/ ) { + $crc = "COALESCE(LOWER(CONV(BIT_XOR(CAST($row_checksum AS UNSIGNED)), " + . "10, 16)), 0)"; + } + else { + my $slices = $self->_make_xor_slices( + row_checksum => $row_checksum, + %crc_args, + ); + $crc = "COALESCE(LOWER(CONCAT($slices)), 0)"; + } + + my $select = "COUNT(*) AS cnt, $crc AS crc"; + PTDEBUG && _d('Chunk checksum:', $select); + return $select; +} + +sub get_checksum_columns { + my ($self, %args) = @_; + my @required_args = qw(tbl); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($tbl) = @args{@required_args}; + my $o = $self->{OptionParser}; + my $q = $self->{Quoter}; + + my $trim = $o->get('trim'); + my $float_precision = $o->get('float-precision'); + + my $tbl_struct = $tbl->{tbl_struct}; + my $ignore_col = $o->get('ignore-columns') || {}; + my $all_cols = $o->get('columns') || $tbl_struct->{cols}; + my %cols = map { lc($_) => 1 } grep { !$ignore_col->{$_} } @$all_cols; + my %seen; + my @cols = + map { + my $type = $tbl_struct->{type_for}->{$_}; + my $result = $q->quote($_); + if ( $type eq 'timestamp' ) { + $result = "UNIX_TIMESTAMP($result)"; + } + elsif ( $float_precision && $type =~ m/float|double/ ) { + $result = "ROUND($result, $float_precision)"; + } + elsif ( $trim && $type =~ m/varchar/ ) { + $result = "TRIM($result)"; + } + elsif ( $type =~ m/blob|text|binary/ ) { + $result = "CRC32($result)"; + } + $result; + } + grep { + $cols{$_} && !$seen{$_}++ + } + @{$tbl_struct->{cols}}; + + return { + select => \@cols, + allowed => \%cols, + }; +} + +sub get_crc_args { + my ($self, %args) = @_; + my $func = $args{func} || $self->_get_hash_func(%args); + my $crc_width = $args{crc_width}|| $self->_get_crc_width(%args, func=>$func); + my $crc_type = $args{crc_type} || $self->_get_crc_type(%args, func=>$func); + my $opt_slice; + if ( $args{dbh} && $crc_type !~ m/int$/ ) { + $opt_slice = $self->_optimize_xor(%args, func=>$func); + } + + return ( + func => $func, + crc_width => $crc_width, + crc_type => $crc_type, + opt_slice => $opt_slice, + ); +} + +sub _get_hash_func { + my ( $self, %args ) = @_; + my @required_args = qw(dbh); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh) = @args{@required_args}; + my $o = $self->{OptionParser}; + my @funcs = qw(CRC32 FNV1A_64 FNV_64 MURMUR_HASH MD5 SHA1); + + if ( my $func = $o->get('function') ) { + unshift @funcs, $func; + } + + my $error; + foreach my $func ( @funcs ) { + eval { + my $sql = "SELECT $func('test-string')"; + PTDEBUG && _d($sql); + $args{dbh}->do($sql); + }; + if ( $EVAL_ERROR && $EVAL_ERROR =~ m/failed: (.*?) at \S+ line/ ) { + $error .= qq{$func cannot be used because "$1"\n}; + PTDEBUG && _d($func, 'cannot be used because', $1); + next; + } + PTDEBUG && _d('Chosen hash func:', $func); + return $func; + } + die($error || 'No hash functions (CRC32, MD5, etc.) are available'); +} + +sub _get_crc_width { + my ( $self, %args ) = @_; + my @required_args = qw(dbh func); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $func) = @args{@required_args}; + + my $crc_width = 16; + if ( uc $func ne 'FNV_64' && uc $func ne 'FNV1A_64' ) { + eval { + my ($val) = $dbh->selectrow_array("SELECT $func('a')"); + $crc_width = max(16, length($val)); + }; + } + return $crc_width; +} + +sub _get_crc_type { + my ( $self, %args ) = @_; + my @required_args = qw(dbh func); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $func) = @args{@required_args}; + + my $type = ''; + my $length = 0; + my $sql = "SELECT $func('a')"; + my $sth = $dbh->prepare($sql); + eval { + $sth->execute(); + $type = $sth->{mysql_type_name}->[0]; + $length = $sth->{mysql_length}->[0]; + PTDEBUG && _d($sql, $type, $length); + if ( $type eq 'integer' && $length < 11 ) { + $type = 'int'; + } + elsif ( $type eq 'bigint' && $length < 20 ) { + $type = 'int'; + } + }; + $sth->finish; + PTDEBUG && _d('crc_type:', $type, 'length:', $length); + return $type; +} + +sub _optimize_xor { + my ( $self, %args ) = @_; + my @required_args = qw(dbh func); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $func) = @args{@required_args}; + + die "$func never needs BIT_XOR optimization" + if $func =~ m/^(?:FNV1A_64|FNV_64|CRC32)$/i; + + my $opt_slice = 0; + my $unsliced = uc $dbh->selectall_arrayref("SELECT $func('a')")->[0]->[0]; + my $sliced = ''; + my $start = 1; + my $crc_width = length($unsliced) < 16 ? 16 : length($unsliced); + + do { # Try different positions till sliced result equals non-sliced. + PTDEBUG && _d('Trying slice', $opt_slice); + $dbh->do(q{SET @crc := '', @cnt := 0}); + my $slices = $self->_make_xor_slices( + row_checksum => "\@crc := $func('a')", + crc_width => $crc_width, + opt_slice => $opt_slice, + ); + + my $sql = "SELECT CONCAT($slices) AS TEST FROM (SELECT NULL) AS x"; + $sliced = ($dbh->selectrow_array($sql))[0]; + if ( $sliced ne $unsliced ) { + PTDEBUG && _d('Slice', $opt_slice, 'does not work'); + $start += 16; + ++$opt_slice; + } + } while ( $start < $crc_width && $sliced ne $unsliced ); + + if ( $sliced eq $unsliced ) { + PTDEBUG && _d('Slice', $opt_slice, 'works'); + return $opt_slice; + } + else { + PTDEBUG && _d('No slice works'); + return undef; + } +} + +sub _make_xor_slices { + my ( $self, %args ) = @_; + my @required_args = qw(row_checksum crc_width); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($row_checksum, $crc_width) = @args{@required_args}; + my ($opt_slice) = $args{opt_slice}; + + my @slices; + for ( my $start = 1; $start <= $crc_width; $start += 16 ) { + my $len = $crc_width - $start + 1; + if ( $len > 16 ) { + $len = 16; + } + push @slices, + "LPAD(CONV(BIT_XOR(" + . "CAST(CONV(SUBSTRING(\@crc, $start, $len), 16, 10) AS UNSIGNED))" + . ", 10, 16), $len, '0')"; + } + + if ( defined $opt_slice && $opt_slice < @slices ) { + $slices[$opt_slice] =~ s/\@crc/\@crc := $row_checksum/; + } + else { + map { s/\@crc/$row_checksum/ } @slices; + } + + return join(', ', @slices); +} + +sub find_replication_differences { + my ($self, %args) = @_; + my @required_args = qw(dbh repl_table); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $repl_table) = @args{@required_args}; + + my $tries = $self->{'OptionParser'}->get('replicate-check-retries') || 1; + my $diffs; + + while ($tries--) { + my $sql + = "SELECT CONCAT(db, '.', tbl) AS `table`, " + . "chunk, chunk_index, lower_boundary, upper_boundary, " + . "COALESCE(this_cnt-master_cnt, 0) AS cnt_diff, " + . "COALESCE(" + . "this_crc <> master_crc OR ISNULL(master_crc) <> ISNULL(this_crc), 0" + . ") AS crc_diff, this_cnt, master_cnt, this_crc, master_crc " + . "FROM $repl_table " + . "WHERE (master_cnt <> this_cnt OR master_crc <> this_crc " + . "OR ISNULL(master_crc) <> ISNULL(this_crc)) " + . ($args{where} ? " AND ($args{where})" : ""); + PTDEBUG && _d($sql); + $diffs = $dbh->selectall_arrayref($sql, { Slice => {} }); + if (!@$diffs || !$tries) { # if no differences are found OR we are out of tries left... + last; # get out now + } + sleep 1; + } + return $diffs; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End RowChecksum package +# ########################################################################### + +# ########################################################################### +# NibbleIterator package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/NibbleIterator.pm +# t/lib/NibbleIterator.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package NibbleIterator; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(Cxn tbl chunk_size OptionParser Quoter TableNibbler TableParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn, $tbl, $chunk_size, $o, $q) = @args{@required_args}; + + my $nibble_params = can_nibble(%args); + + my %comments = ( + bite => "bite table", + nibble => "nibble table", + ); + if ( $args{comments} ) { + map { $comments{$_} = $args{comments}->{$_} } + grep { defined $args{comments}->{$_} } + keys %{$args{comments}}; + } + + my $where = $o->has('where') ? $o->get('where') : ''; + my $tbl_struct = $tbl->{tbl_struct}; + my $ignore_col = $o->has('ignore-columns') + ? ($o->get('ignore-columns') || {}) + : {}; + my $all_cols = $o->has('columns') + ? ($o->get('columns') || $tbl_struct->{cols}) + : $tbl_struct->{cols}; + my @cols = grep { !$ignore_col->{$_} } @$all_cols; + my $self; + if ( $nibble_params->{one_nibble} ) { + my $params = _one_nibble(\%args, \@cols, $where, $tbl, \%comments); + $self = { + %args, + one_nibble => 1, + limit => 0, + nibble_sql => $params->{nibble_sql}, + explain_nibble_sql => $params->{explain_nibble_sql}, + }; + } else { + my $params = _nibble_params($nibble_params, $tbl, \%args, \@cols, $chunk_size, $where, \%comments, $q); + $self = { + %args, + index => $params->{index}, + limit => $params->{limit}, + first_lb_sql => $params->{first_lb_sql}, + last_ub_sql => $params->{last_ub_sql}, + ub_sql => $params->{ub_sql}, + nibble_sql => $params->{nibble_sql}, + explain_first_lb_sql => $params->{explain_first_lb_sql}, + explain_ub_sql => $params->{explain_ub_sql}, + explain_nibble_sql => $params->{explain_nibble_sql}, + resume_lb_sql => $params->{resume_lb_sql}, + sql => $params->{sql}, + }; + } + + $self->{row_est} = $nibble_params->{row_est}, + $self->{nibbleno} = 0; + $self->{have_rows} = 0; + $self->{rowno} = 0; + $self->{oktonibble} = 1; + $self->{pause_file} = $nibble_params->{pause_file}; + $self->{sleep} = $args{sleep} || 60; + + $self->{nibble_params} = $nibble_params; + $self->{tbl} = $tbl; + $self->{args} = \%args; + $self->{cols} = \@cols; + $self->{chunk_size} = $chunk_size; + $self->{where} = $where; + $self->{comments} = \%comments; + + return bless $self, $class; +} + +sub switch_to_nibble { + my $self = shift; + my $params = _nibble_params($self->{nibble_params}, $self->{tbl}, $self->{args}, $self->{cols}, + $self->{chunk_size}, $self->{where}, $self->{comments}, $self->{Quoter}); + + $self->{one_nibble} = 0; + $self->{index} = $params->{index}; + $self->{limit} = $params->{limit}; + $self->{first_lb_sql} = $params->{first_lb_sql}; + $self->{last_ub_sql} = $params->{last_ub_sql}; + $self->{ub_sql} = $params->{ub_sql}; + $self->{nibble_sql} = $params->{nibble_sql}; + $self->{explain_first_lb_sql} = $params->{explain_first_lb_sql}; + $self->{explain_ub_sql} = $params->{explain_ub_sql}; + $self->{explain_nibble_sql} = $params->{explain_nibble_sql}; + $self->{resume_lb_sql} = $params->{resume_lb_sql}; + $self->{sql} = $params->{sql}; + $self->_get_bounds(); + $self->_prepare_sths(); +} + +sub _one_nibble { + my ($args, $cols, $where, $tbl, $comments) = @_; + my $q = new Quoter(); + + my $nibble_sql + = ($args->{dml} ? "$args->{dml} " : "SELECT ") + . ($args->{select} ? $args->{select} + : join(', ', map{ $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum' ? + "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_) } @$cols)) + . " FROM $tbl->{name}" + . ($where ? " WHERE $where" : '') + . ($args->{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "") + . " /*$comments->{bite}*/"; + PTDEBUG && _d('One nibble statement:', $nibble_sql); + + my $explain_nibble_sql + = "EXPLAIN SELECT " + . ($args->{select} ? $args->{select} + : join(', ', map{ $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum' + ? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_) } @$cols)) + . " FROM $tbl->{name}" + . ($where ? " WHERE $where" : '') + . ($args->{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "") + . " /*explain $comments->{bite}*/"; + PTDEBUG && _d('Explain one nibble statement:', $explain_nibble_sql); + + return { + one_nibble => 1, + limit => 0, + nibble_sql => $nibble_sql, + explain_nibble_sql => $explain_nibble_sql, + }; +} + +sub _nibble_params { + my ($nibble_params, $tbl, $args, $cols, $chunk_size, $where, $comments, $q) = @_; + my $index = $nibble_params->{index}; # brevity + my $index_cols = $tbl->{tbl_struct}->{keys}->{$index}->{cols}; + + my $asc = $args->{TableNibbler}->generate_asc_stmt( + %$args, + tbl_struct => $tbl->{tbl_struct}, + index => $index, + n_index_cols => $args->{n_chunk_index_cols}, + cols => $cols, + asc_only => 1, + ); + PTDEBUG && _d('Ascend params:', Dumper($asc)); + + my $from = "$tbl->{name} FORCE INDEX(`$index`)"; + my $order_by = join(', ', map {$q->quote($_)} @{$index_cols}); + my $order_by_dec = join(' DESC,', map {$q->quote($_)} @{$index_cols}); + + my $first_lb_sql + = "SELECT /*!40001 SQL_NO_CACHE */ " + . join(', ', map { $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum' ? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_)} @{$asc->{scols}}) + . " FROM $from" + . ($where ? " WHERE $where" : '') + . " ORDER BY $order_by" + . " LIMIT 1" + . " /*first lower boundary*/"; + PTDEBUG && _d('First lower boundary statement:', $first_lb_sql); + + my $resume_lb_sql; + if ( $args->{resume} ) { + $resume_lb_sql + = "SELECT /*!40001 SQL_NO_CACHE */ " + . join(', ', map { $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum' ? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_)} @{$asc->{scols}}) + . " FROM $from" + . " WHERE " . $asc->{boundaries}->{'>'} + . ($where ? " AND ($where)" : '') + . " ORDER BY $order_by" + . " LIMIT 1" + . " /*resume lower boundary*/"; + PTDEBUG && _d('Resume lower boundary statement:', $resume_lb_sql); + } + + my $last_ub_sql + = "SELECT /*!40001 SQL_NO_CACHE */ " + . join(', ', map { $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum' ? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_)} @{$asc->{scols}}) + . " FROM $from" + . ($where ? " WHERE $where" : '') + . " ORDER BY " + . $order_by_dec . ' DESC' + . " LIMIT 1" + . " /*last upper boundary*/"; + PTDEBUG && _d('Last upper boundary statement:', $last_ub_sql); + + my $ub_sql + = "SELECT /*!40001 SQL_NO_CACHE */ " + . join(', ', map { $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum' ? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_)} @{$asc->{scols}}) + . " FROM $from" + . " WHERE " . $asc->{boundaries}->{'>='} + . ($where ? " AND ($where)" : '') + . " ORDER BY $order_by" + . " LIMIT ?, 2" + . " /*next chunk boundary*/"; + PTDEBUG && _d('Upper boundary statement:', $ub_sql); + + my $nibble_sql + = ($args->{dml} ? "$args->{dml} " : "SELECT ") + . ($args->{select} ? $args->{select} + : join(', ', map { $tbl->{tbl_struct}->{type_for}->{$_} eq 'enum' ? "CAST(".$q->quote($_)." AS UNSIGNED)" : $q->quote($_)} @{$asc->{cols}})) + . " FROM $from" + . " WHERE " . $asc->{boundaries}->{'>='} # lower boundary + . " AND " . $asc->{boundaries}->{'<='} # upper boundary + . ($where ? " AND ($where)" : '') + . ($args->{order_by} ? " ORDER BY $order_by" : "") + . ($args->{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "") + . " /*$comments->{nibble}*/"; + PTDEBUG && _d('Nibble statement:', $nibble_sql); + + my $explain_nibble_sql + = "EXPLAIN SELECT " + . ($args->{select} ? $args->{select} + : join(', ', map { $q->quote($_) } @{$asc->{cols}})) + . " FROM $from" + . " WHERE " . $asc->{boundaries}->{'>='} # lower boundary + . " AND " . $asc->{boundaries}->{'<='} # upper boundary + . ($where ? " AND ($where)" : '') + . ($args->{order_by} ? " ORDER BY $order_by" : "") + . ($args->{lock_in_share_mode} ? " LOCK IN SHARE MODE" : "") + . " /*explain $comments->{nibble}*/"; + PTDEBUG && _d('Explain nibble statement:', $explain_nibble_sql); + + my $limit = $chunk_size - 1; + PTDEBUG && _d('Initial chunk size (LIMIT):', $limit); + + my $params = { + one_nibble => 0, + index => $index, + limit => $limit, + first_lb_sql => $first_lb_sql, + last_ub_sql => $last_ub_sql, + ub_sql => $ub_sql, + nibble_sql => $nibble_sql, + explain_first_lb_sql => "EXPLAIN $first_lb_sql", + explain_ub_sql => "EXPLAIN $ub_sql", + explain_nibble_sql => $explain_nibble_sql, + resume_lb_sql => $resume_lb_sql, + sql => { + columns => $asc->{scols}, + from => $from, + where => $where, + boundaries => $asc->{boundaries}, + order_by => $order_by, + }, + }; + return $params; +} + +sub next { + my ($self) = @_; + + if ( !$self->{oktonibble} ) { + PTDEBUG && _d('Not ok to nibble'); + return; + } + + my %callback_args = ( + Cxn => $self->{Cxn}, + tbl => $self->{tbl}, + NibbleIterator => $self, + ); + + if ($self->{nibbleno} == 0) { + $self->_prepare_sths(); + $self->_get_bounds(); + if ( my $callback = $self->{callbacks}->{init} ) { + $self->{oktonibble} = $callback->(%callback_args); + PTDEBUG && _d('init callback returned', $self->{oktonibble}); + if ( !$self->{oktonibble} ) { + $self->{no_more_boundaries} = 1; + return; + } + } + if ( !$self->{one_nibble} && !$self->{first_lower} ) { + PTDEBUG && _d('No first lower boundary, table must be empty'); + $self->{no_more_boundaries} = 1; + return; + } + } + + NIBBLE: + while ( $self->{have_rows} || $self->_next_boundaries() ) { + if ($self->{pause_file}) { + while(-f $self->{pause_file}) { + print "Sleeping $self->{sleep} seconds because $self->{pause_file} exists\n"; + my $dbh = $self->{Cxn}->dbh(); + if ( !$dbh || !$dbh->ping() ) { + eval { $dbh = $self->{Cxn}->connect() }; # connect or die trying + if ( $EVAL_ERROR ) { + chomp $EVAL_ERROR; + die "Lost connection to " . $self->{Cxn}->name() . " while waiting for " + . "replica lag ($EVAL_ERROR)\n"; + } + } + $dbh->do("SELECT 'nibble iterator keepalive'"); + sleep($self->{sleep}); + } + } + + if ( !$self->{have_rows} ) { + $self->{nibbleno}++; + PTDEBUG && _d('Nibble:', $self->{nibble_sth}->{Statement}, 'params:', + join(', ', (@{$self->{lower} || []}, @{$self->{upper} || []}))); + if ( my $callback = $self->{callbacks}->{exec_nibble} ) { + $self->{have_rows} = $callback->(%callback_args); + } + else { + $self->{nibble_sth}->execute(@{$self->{lower}}, @{$self->{upper}}); + $self->{have_rows} = $self->{nibble_sth}->rows(); + } + PTDEBUG && _d($self->{have_rows}, 'rows in nibble', $self->{nibbleno}); + } + + if ( $self->{have_rows} ) { + my $row = $self->{nibble_sth}->fetchrow_arrayref(); + if ( $row ) { + $self->{rowno}++; + PTDEBUG && _d('Row', $self->{rowno}, 'in nibble',$self->{nibbleno}); + return [ @$row ]; + } + } + + PTDEBUG && _d('No rows in nibble or nibble skipped'); + if ( my $callback = $self->{callbacks}->{after_nibble} ) { + $callback->(%callback_args); + } + $self->{rowno} = 0; + $self->{have_rows} = 0; + + } + + PTDEBUG && _d('Done nibbling'); + if ( my $callback = $self->{callbacks}->{done} ) { + $callback->(%callback_args); + } + + return; +} + +sub nibble_number { + my ($self) = @_; + return $self->{nibbleno}; +} + +sub set_nibble_number { + my ($self, $n) = @_; + die "I need a number" unless $n; + $self->{nibbleno} = $n; + PTDEBUG && _d('Set new nibble number:', $n); + return; +} + +sub nibble_index { + my ($self) = @_; + return $self->{index}; +} + +sub statements { + my ($self) = @_; + return { + explain_first_lower_boundary => $self->{explain_first_lb_sth}, + nibble => $self->{nibble_sth}, + explain_nibble => $self->{explain_nibble_sth}, + upper_boundary => $self->{ub_sth}, + explain_upper_boundary => $self->{explain_ub_sth}, + } +} + +sub boundaries { + my ($self) = @_; + return { + first_lower => $self->{first_lower}, + lower => $self->{lower}, + upper => $self->{upper}, + next_lower => $self->{next_lower}, + last_upper => $self->{last_upper}, + }; +} + +sub set_boundary { + my ($self, $boundary, $values) = @_; + die "I need a boundary parameter" + unless $boundary; + die "Invalid boundary: $boundary" + unless $boundary =~ m/^(?:lower|upper|next_lower|last_upper)$/; + die "I need a values arrayref parameter" + unless $values && ref $values eq 'ARRAY'; + $self->{$boundary} = $values; + PTDEBUG && _d('Set new', $boundary, 'boundary:', Dumper($values)); + return; +} + +sub one_nibble { + my ($self) = @_; + return $self->{one_nibble}; +} + +sub limit { + my ($self) = @_; + return $self->{limit}; +} + +sub set_chunk_size { + my ($self, $limit) = @_; + return if $self->{one_nibble}; + die "Chunk size must be > 0" unless $limit; + $self->{limit} = $limit - 1; + PTDEBUG && _d('Set new chunk size (LIMIT):', $limit); + return; +} + +sub sql { + my ($self) = @_; + return $self->{sql}; +} + +sub more_boundaries { + my ($self) = @_; + return !$self->{no_more_boundaries}; +} + +sub row_estimate { + my ($self) = @_; + return $self->{row_est}; +} + +sub can_nibble { + my (%args) = @_; + my @required_args = qw(Cxn tbl chunk_size OptionParser TableParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn, $tbl, $chunk_size, $o) = @args{@required_args}; + + my $where = $o->has('where') ? $o->get('where') : ''; + + my ($row_est, $mysql_index) = get_row_estimate( + Cxn => $cxn, + tbl => $tbl, + where => $where, + ); + + if ( !$where ) { + $mysql_index = undef; + } + + my $chunk_size_limit = $o->get('chunk-size-limit') || 1; + my $one_nibble = !defined $args{one_nibble} || $args{one_nibble} + ? $row_est <= $chunk_size * $chunk_size_limit + : 0; + PTDEBUG && _d('One nibble:', $one_nibble ? 'yes' : 'no'); + + if ( $args{resume} + && !defined $args{resume}->{lower_boundary} + && !defined $args{resume}->{upper_boundary} ) { + PTDEBUG && _d('Resuming from one nibble table'); + $one_nibble = 1; + } + + my $index = _find_best_index(%args, mysql_index => $mysql_index); + if ( !$index && !$one_nibble ) { + die "There is no good index and the table is oversized."; + } + + my $pause_file = ($o->has('pause-file') && $o->get('pause-file')) || undef; + + return { + row_est => $row_est, # nibble about this many rows + index => $index, # using this index + one_nibble => $one_nibble, # if the table fits in one nibble/chunk + pause_file => $pause_file, + }; +} + +sub _find_best_index { + my (%args) = @_; + my @required_args = qw(Cxn tbl TableParser); + my ($cxn, $tbl, $tp) = @args{@required_args}; + my $tbl_struct = $tbl->{tbl_struct}; + my $indexes = $tbl_struct->{keys}; + + my $best_index; + my $want_index = $args{chunk_index}; + if ( $want_index ) { + PTDEBUG && _d('User wants to use index', $want_index); + if ( !exists $indexes->{$want_index} ) { + PTDEBUG && _d('Cannot use user index because it does not exist'); + $want_index = undef; + } else { + $best_index = $want_index; + } + } + + if ( !$best_index && !$want_index && $args{mysql_index} ) { + PTDEBUG && _d('MySQL wants to use index', $args{mysql_index}); + $want_index = $args{mysql_index}; + } + + + my @possible_indexes; + if ( !$best_index && $want_index ) { + if ( $indexes->{$want_index}->{is_unique} ) { + PTDEBUG && _d('Will use wanted index'); + $best_index = $want_index; + } + else { + PTDEBUG && _d('Wanted index is a possible index'); + push @possible_indexes, $want_index; + } + } + + if (!$best_index) { + PTDEBUG && _d('Auto-selecting best index'); + foreach my $index ( $tp->sort_indexes($tbl_struct) ) { + if ( $index eq 'PRIMARY' || $indexes->{$index}->{is_unique} ) { + $best_index = $index; + last; + } + else { + push @possible_indexes, $index; + } + } + } + + if ( !$best_index && @possible_indexes ) { + PTDEBUG && _d('No PRIMARY or unique indexes;', + 'will use index with highest cardinality'); + foreach my $index ( @possible_indexes ) { + $indexes->{$index}->{cardinality} = _get_index_cardinality( + %args, + index => $index, + ); + } + @possible_indexes = sort { + my $cmp + = $indexes->{$b}->{cardinality} <=> $indexes->{$a}->{cardinality}; + if ( $cmp == 0 ) { + $cmp = scalar @{$indexes->{$b}->{cols}} + <=> scalar @{$indexes->{$a}->{cols}}; + } + $cmp; + } @possible_indexes; + $best_index = $possible_indexes[0]; + } + + PTDEBUG && _d('Best index:', $best_index); + return $best_index; +} + +sub _get_index_cardinality { + my (%args) = @_; + my @required_args = qw(Cxn tbl index); + my ($cxn, $tbl, $index) = @args{@required_args}; + + my $sql = "SHOW INDEXES FROM $tbl->{name} " + . "WHERE Key_name = '$index'"; + PTDEBUG && _d($sql); + my $cardinality = 1; + my $dbh = $cxn->dbh(); + my $key_name = $dbh && ($dbh->{FetchHashKeyName} || '') eq 'NAME_lc' + ? 'key_name' + : 'Key_name'; + my $rows = $dbh->selectall_hashref($sql, $key_name); + foreach my $row ( values %$rows ) { + $cardinality *= $row->{cardinality} if $row->{cardinality}; + } + PTDEBUG && _d('Index', $index, 'cardinality:', $cardinality); + return $cardinality; +} + +sub get_row_estimate { + my (%args) = @_; + my @required_args = qw(Cxn tbl); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn, $tbl) = @args{@required_args}; + + my $sql = "EXPLAIN SELECT * FROM $tbl->{name} " + . "WHERE " . ($args{where} || '1=1'); + PTDEBUG && _d($sql); + my $expl = $cxn->dbh()->selectrow_hashref($sql); + PTDEBUG && _d(Dumper($expl)); + my $mysql_index = $expl->{key} || ''; + if ( $mysql_index ne 'PRIMARY' ) { + $mysql_index = lc($mysql_index); + } + return ($expl->{rows} || 0), $mysql_index; +} + +sub _prepare_sths { + my ($self) = @_; + PTDEBUG && _d('Preparing statement handles'); + + my $dbh = $self->{Cxn}->dbh(); + + $self->{nibble_sth} = $dbh->prepare($self->{nibble_sql}); + $self->{explain_nibble_sth} = $dbh->prepare($self->{explain_nibble_sql}); + + if ( !$self->{one_nibble} ) { + $self->{explain_first_lb_sth} = $dbh->prepare($self->{explain_first_lb_sql}); + $self->{ub_sth} = $dbh->prepare($self->{ub_sql}); + $self->{explain_ub_sth} = $dbh->prepare($self->{explain_ub_sql}); + } + + return; +} + +sub _get_bounds { + my ($self) = @_; + + if ( $self->{one_nibble} ) { + if ( $self->{resume} ) { + $self->{no_more_boundaries} = 1; + } + return; + } + + my $dbh = $self->{Cxn}->dbh(); + + $self->{first_lower} = $dbh->selectrow_arrayref($self->{first_lb_sql}); + PTDEBUG && _d('First lower boundary:', Dumper($self->{first_lower})); + + if ( my $nibble = $self->{resume} ) { + if ( defined $nibble->{lower_boundary} + && defined $nibble->{upper_boundary} ) { + my $sth = $dbh->prepare($self->{resume_lb_sql}); + my @ub = split ',', $nibble->{upper_boundary}; + PTDEBUG && _d($sth->{Statement}, 'params:', @ub); + $sth->execute(@ub); + $self->{next_lower} = $sth->fetchrow_arrayref(); + $sth->finish(); + } + } + else { + $self->{next_lower} = $self->{first_lower}; + } + PTDEBUG && _d('Next lower boundary:', Dumper($self->{next_lower})); + + if ( !$self->{next_lower} ) { + PTDEBUG && _d('At end of table, or no more boundaries to resume'); + $self->{no_more_boundaries} = 1; + + $self->{last_upper} = $dbh->selectrow_arrayref($self->{last_ub_sql}); + PTDEBUG && _d('Last upper boundary:', Dumper($self->{last_upper})); + } + + return; +} + +sub _next_boundaries { + my ($self) = @_; + + if ( $self->{no_more_boundaries} ) { + PTDEBUG && _d('No more boundaries'); + return; # stop nibbling + } + + if ( $self->{one_nibble} ) { + $self->{lower} = $self->{upper} = []; + $self->{no_more_boundaries} = 1; # for next call + return 1; # continue nibbling + } + + + + if ( $self->identical_boundaries($self->{lower}, $self->{next_lower}) ) { + PTDEBUG && _d('Infinite loop detected'); + my $tbl = $self->{tbl}; + my $index = $tbl->{tbl_struct}->{keys}->{$self->{index}}; + my $n_cols = scalar @{$index->{cols}}; + my $chunkno = $self->{nibbleno}; + + die "Possible infinite loop detected! " + . "The lower boundary for chunk $chunkno is " + . "<" . join(', ', @{$self->{lower}}) . "> and the lower " + . "boundary for chunk " . ($chunkno + 1) . " is also " + . "<" . join(', ', @{$self->{next_lower}}) . ">. " + . "This usually happens when using a non-unique single " + . "column index. The current chunk index for table " + . "$tbl->{db}.$tbl->{tbl} is $self->{index} which is" + . ($index->{is_unique} ? '' : ' not') . " unique and covers " + . ($n_cols > 1 ? "$n_cols columns" : "1 column") . ".\n"; + } + $self->{lower} = $self->{next_lower}; + + if ( my $callback = $self->{callbacks}->{next_boundaries} ) { + my $oktonibble = $callback->( + Cxn => $self->{Cxn}, + tbl => $self->{tbl}, + NibbleIterator => $self, + ); + PTDEBUG && _d('next_boundaries callback returned', $oktonibble); + if ( !$oktonibble ) { + $self->{no_more_boundaries} = 1; + return; # stop nibbling + } + } + + + PTDEBUG && _d($self->{ub_sth}->{Statement}, 'params:', + join(', ', @{$self->{lower}}), $self->{limit}); + $self->{ub_sth}->execute(@{$self->{lower}}, $self->{limit}); + my $boundary = $self->{ub_sth}->fetchall_arrayref(); + PTDEBUG && _d('Next boundary:', Dumper($boundary)); + if ( $boundary && @$boundary ) { + $self->{upper} = $boundary->[0]; + + if ( $boundary->[1] ) { + $self->{next_lower} = $boundary->[1]; + } + else { + PTDEBUG && _d('End of table boundary:', Dumper($boundary->[0])); + $self->{no_more_boundaries} = 1; # for next call + + $self->{last_upper} = $boundary->[0]; + } + } + else { + my $dbh = $self->{Cxn}->dbh(); + $self->{upper} = $dbh->selectrow_arrayref($self->{last_ub_sql}); + PTDEBUG && _d('Last upper boundary:', Dumper($self->{upper})); + $self->{no_more_boundaries} = 1; # for next call + + $self->{last_upper} = $self->{upper}; + } + $self->{ub_sth}->finish(); + + return 1; # continue nibbling +} + +sub identical_boundaries { + my ($self, $b1, $b2) = @_; + + return 0 if ($b1 && !$b2) || (!$b1 && $b2); + + return 1 if !$b1 && !$b2; + + die "Boundaries have different numbers of values" + if scalar @$b1 != scalar @$b2; # shouldn't happen + my $n_vals = scalar @$b1; + for my $i ( 0..($n_vals-1) ) { + return 0 if ($b1->[$i] || '') ne ($b2->[$i] || ''); # diff + } + return 1; +} + +sub DESTROY { + my ( $self ) = @_; + foreach my $key ( keys %$self ) { + if ( $key =~ m/_sth$/ ) { + PTDEBUG && _d('Finish', $key); + $self->{$key}->finish(); + } + } + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End NibbleIterator package +# ########################################################################### + +# ########################################################################### +# OobNibbleIterator package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/OobNibbleIterator.pm +# t/lib/OobNibbleIterator.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package OobNibbleIterator; +use base 'NibbleIterator'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my $self = $class->SUPER::new(%args); + + my $q = $self->{Quoter}; + my $o = $self->{OptionParser}; + my $where = $o->has('where') ? $o->get('where') : undef; + + if ( !$self->one_nibble() ) { + my $head_sql + = ($args{past_dml} || "SELECT ") + . ($args{past_select} + || join(', ', map { $q->quote($_) } @{$self->{sql}->{columns}})) + . " FROM " . $self->{sql}->{from}; + + my $tail_sql + = ($where ? " AND ($where)" : '') + . " ORDER BY " . $self->{sql}->{order_by}; + + my $past_lower_sql + = $head_sql + . " WHERE " . $self->{sql}->{boundaries}->{'<'} + . $tail_sql + . " /*past lower chunk*/"; + PTDEBUG && _d('Past lower statement:', $past_lower_sql); + + my $explain_past_lower_sql + = "EXPLAIN SELECT " + . ($args{past_select} + || join(', ', map { $q->quote($_) } @{$self->{sql}->{columns}})) + . " FROM " . $self->{sql}->{from} + . " WHERE " . $self->{sql}->{boundaries}->{'<'} + . $tail_sql + . " /*explain past lower chunk*/"; + PTDEBUG && _d('Past lower statement:', $explain_past_lower_sql); + + my $past_upper_sql + = $head_sql + . " WHERE " . $self->{sql}->{boundaries}->{'>'} + . $tail_sql + . " /*past upper chunk*/"; + PTDEBUG && _d('Past upper statement:', $past_upper_sql); + + my $explain_past_upper_sql + = "EXPLAIN SELECT " + . ($args{past_select} + || join(', ', map { $q->quote($_) } @{$self->{sql}->{columns}})) + . " FROM " . $self->{sql}->{from} + . " WHERE " . $self->{sql}->{boundaries}->{'>'} + . $tail_sql + . " /*explain past upper chunk*/"; + PTDEBUG && _d('Past upper statement:', $explain_past_upper_sql); + + $self->{past_lower_sql} = $past_lower_sql; + $self->{past_upper_sql} = $past_upper_sql; + $self->{explain_past_lower_sql} = $explain_past_lower_sql; + $self->{explain_past_upper_sql} = $explain_past_upper_sql; + + $self->{past_nibbles} = [qw(lower upper)]; + if ( my $nibble = $args{resume} ) { + if ( !defined $nibble->{lower_boundary} + || !defined $nibble->{upper_boundary} ) { + $self->{past_nibbles} = !defined $nibble->{lower_boundary} + ? ['upper'] + : []; + } + } + PTDEBUG && _d('Nibble past', @{$self->{past_nibbles}}); + + } # not one nibble + + return bless $self, $class; +} + +sub more_boundaries { + my ($self) = @_; + return $self->SUPER::more_boundaries() if $self->{one_nibble}; + return scalar @{$self->{past_nibbles}} ? 1 : 0; +} + +sub statements { + my ($self) = @_; + + my $sths = $self->SUPER::statements(); + + $sths->{past_lower_boundary} = $self->{past_lower_sth}; + $sths->{past_upper_boundary} = $self->{past_upper_sth}; + + return $sths; +} + +sub _prepare_sths { + my ($self) = @_; + PTDEBUG && _d('Preparing out-of-bound statement handles'); + + if ( !$self->{one_nibble} ) { + my $dbh = $self->{Cxn}->dbh(); + $self->{past_lower_sth} = $dbh->prepare($self->{past_lower_sql}); + $self->{past_upper_sth} = $dbh->prepare($self->{past_upper_sql}); + $self->{explain_past_lower_sth} = $dbh->prepare($self->{explain_past_lower_sql}); + $self->{explain_past_upper_sth} = $dbh->prepare($self->{explain_past_upper_sql}); + } + + return $self->SUPER::_prepare_sths(); +} + +sub _next_boundaries { + my ($self) = @_; + + return $self->SUPER::_next_boundaries() unless $self->{no_more_boundaries}; + + if ( my $past = shift @{$self->{past_nibbles}} ) { + if ( $past eq 'lower' ) { + PTDEBUG && _d('Nibbling values below lower boundary'); + $self->{nibble_sth} = $self->{past_lower_sth}; + $self->{explain_nibble_sth} = $self->{explain_past_lower_sth}; + $self->{lower} = []; + $self->{upper} = $self->boundaries()->{first_lower}; + $self->{next_lower} = undef; + } + elsif ( $past eq 'upper' ) { + PTDEBUG && _d('Nibbling values above upper boundary'); + $self->{nibble_sth} = $self->{past_upper_sth}; + $self->{explain_nibble_sth} = $self->{explain_past_upper_sth}; + $self->{lower} = $self->boundaries()->{last_upper}; + $self->{upper} = []; + $self->{next_lower} = undef; + } + else { + die "Invalid past nibble: $past"; + } + return 1; # continue nibbling + } + + PTDEBUG && _d('Done nibbling past boundaries'); + return; # stop nibbling +} + +sub DESTROY { + my ( $self ) = @_; + foreach my $key ( keys %$self ) { + if ( $key =~ m/_sth$/ ) { + PTDEBUG && _d('Finish', $key); + $self->{$key}->finish(); + } + } + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End OobNibbleIterator package +# ########################################################################### + +# ########################################################################### +# Daemon package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Daemon.pm +# t/lib/Daemon.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Daemon; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(setsid); +use Fcntl qw(:DEFAULT); + +sub new { + my ($class, %args) = @_; + my $self = { + log_file => $args{log_file}, + pid_file => $args{pid_file}, + daemonize => $args{daemonize}, + force_log_file => $args{force_log_file}, + parent_exit => $args{parent_exit}, + pid_file_owner => 0, + }; + return bless $self, $class; +} + +sub run { + my ($self) = @_; + + my $daemonize = $self->{daemonize}; + my $pid_file = $self->{pid_file}; + my $log_file = $self->{log_file}; + my $force_log_file = $self->{force_log_file}; + my $parent_exit = $self->{parent_exit}; + + PTDEBUG && _d('Starting daemon'); + + if ( $pid_file ) { + eval { + $self->_make_pid_file( + pid => $PID, # parent's pid + pid_file => $pid_file, + ); + }; + die "$EVAL_ERROR\n" if $EVAL_ERROR; + if ( !$daemonize ) { + $self->{pid_file_owner} = $PID; # parent's pid + } + } + + if ( $daemonize ) { + defined (my $child_pid = fork()) or die "Cannot fork: $OS_ERROR"; + if ( $child_pid ) { + PTDEBUG && _d('Forked child', $child_pid); + $parent_exit->($child_pid) if $parent_exit; + exit 0; + } + + POSIX::setsid() or die "Cannot start a new session: $OS_ERROR"; + chdir '/' or die "Cannot chdir to /: $OS_ERROR"; + + if ( $pid_file ) { + $self->_update_pid_file( + pid => $PID, # child's pid + pid_file => $pid_file, + ); + $self->{pid_file_owner} = $PID; + } + } + + if ( $daemonize || $force_log_file ) { + PTDEBUG && _d('Redirecting STDIN to /dev/null'); + close STDIN; + open STDIN, '/dev/null' + or die "Cannot reopen STDIN to /dev/null: $OS_ERROR"; + if ( $log_file ) { + PTDEBUG && _d('Redirecting STDOUT and STDERR to', $log_file); + close STDOUT; + open STDOUT, '>>', $log_file + or die "Cannot open log file $log_file: $OS_ERROR"; + + close STDERR; + open STDERR, ">&STDOUT" + or die "Cannot dupe STDERR to STDOUT: $OS_ERROR"; + } + else { + if ( -t STDOUT ) { + PTDEBUG && _d('No log file and STDOUT is a terminal;', + 'redirecting to /dev/null'); + close STDOUT; + open STDOUT, '>', '/dev/null' + or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR"; + } + if ( -t STDERR ) { + PTDEBUG && _d('No log file and STDERR is a terminal;', + 'redirecting to /dev/null'); + close STDERR; + open STDERR, '>', '/dev/null' + or die "Cannot reopen STDERR to /dev/null: $OS_ERROR"; + } + } + + $OUTPUT_AUTOFLUSH = 1; + } + + PTDEBUG && _d('Daemon running'); + return; +} + +sub _make_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + eval { + sysopen(PID_FH, $pid_file, O_RDWR|O_CREAT|O_EXCL) or die $OS_ERROR; + print PID_FH $PID, "\n"; + close PID_FH; + }; + if ( my $e = $EVAL_ERROR ) { + if ( $e =~ m/file exists/i ) { + my $old_pid = $self->_check_pid_file( + pid_file => $pid_file, + pid => $PID, + ); + if ( $old_pid ) { + warn "Overwriting PID file $pid_file because PID $old_pid " + . "is not running.\n"; + } + $self->_update_pid_file( + pid => $PID, + pid_file => $pid_file + ); + } + else { + die "Error creating PID file $pid_file: $e\n"; + } + } + + return; +} + +sub _check_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid_file pid); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid_file = $args{pid_file}; + my $pid = $args{pid}; + + PTDEBUG && _d('Checking if PID in', $pid_file, 'is running'); + + if ( ! -f $pid_file ) { + PTDEBUG && _d('PID file', $pid_file, 'does not exist'); + return; + } + + open my $fh, '<', $pid_file + or die "Error opening $pid_file: $OS_ERROR"; + my $existing_pid = do { local $/; <$fh> }; + chomp($existing_pid) if $existing_pid; + close $fh + or die "Error closing $pid_file: $OS_ERROR"; + + if ( $existing_pid ) { + if ( $existing_pid == $pid ) { + warn "The current PID $pid already holds the PID file $pid_file\n"; + return; + } + else { + PTDEBUG && _d('Checking if PID', $existing_pid, 'is running'); + my $pid_is_alive = kill 0, $existing_pid; + if ( $pid_is_alive ) { + die "PID file $pid_file exists and PID $existing_pid is running\n"; + } + } + } + else { + die "PID file $pid_file exists but it is empty. Remove the file " + . "if the process is no longer running.\n"; + } + + return $existing_pid; +} + +sub _update_pid_file { + my ($self, %args) = @_; + my @required_args = qw(pid pid_file); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my $pid = $args{pid}; + my $pid_file = $args{pid_file}; + + open my $fh, '>', $pid_file + or die "Cannot open $pid_file: $OS_ERROR"; + print { $fh } $pid, "\n" + or die "Cannot print to $pid_file: $OS_ERROR"; + close $fh + or warn "Cannot close $pid_file: $OS_ERROR"; + + return; +} + +sub remove_pid_file { + my ($self, $pid_file) = @_; + $pid_file ||= $self->{pid_file}; + if ( $pid_file && -f $pid_file ) { + unlink $self->{pid_file} + or warn "Cannot remove PID file $pid_file: $OS_ERROR"; + PTDEBUG && _d('Removed PID file'); + } + else { + PTDEBUG && _d('No PID to remove'); + } + return; +} + +sub DESTROY { + my ($self) = @_; + + if ( $self->{pid_file_owner} == $PID ) { + $self->remove_pid_file(); + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Daemon package +# ########################################################################### + +# ########################################################################### +# SchemaIterator package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/SchemaIterator.pm +# t/lib/SchemaIterator.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package SchemaIterator; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +my $open_comment = qr{/\*!\d{5} }; +my $tbl_name = qr{ + CREATE\s+ + (?:TEMPORARY\s+)? + TABLE\s+ + (?:IF NOT EXISTS\s+)? + ([^\(]+) +}x; + + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(OptionParser TableParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my ($file_itr, $dbh) = @args{qw(file_itr dbh)}; + die "I need either a dbh or file_itr argument" + if (!$dbh && !$file_itr) || ($dbh && $file_itr); + + my %resume; + if ( my $table = $args{resume} ) { + PTDEBUG && _d('Will resume from or after', $table); + my ($db, $tbl) = $args{Quoter}->split_unquote($table); + die "Resume table must be database-qualified: $table" + unless $db && $tbl; + $resume{db} = $db; + $resume{tbl} = $tbl; + } + + my $self = { + %args, + resume => \%resume, + filters => _make_filters(%args), + }; + return bless $self, $class; +} + +sub _make_filters { + my ( %args ) = @_; + my @required_args = qw(OptionParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($o, $q) = @args{@required_args}; + + my %filters; + + + my @simple_filters = qw( + databases tables engines + ignore-databases ignore-tables ignore-engines); + FILTER: + foreach my $filter ( @simple_filters ) { + if ( $o->has($filter) ) { + my $objs = $o->get($filter); + next FILTER unless $objs && scalar keys %$objs; + my $is_table = $filter =~ m/table/ ? 1 : 0; + foreach my $obj ( keys %$objs ) { + die "Undefined value for --$filter" unless $obj; + $obj = lc $obj; + if ( $is_table ) { + my ($db, $tbl) = $q->split_unquote($obj); + $db ||= '*'; + PTDEBUG && _d('Filter', $filter, 'value:', $db, $tbl); + $filters{$filter}->{$db}->{$tbl} = 1; + } + else { # database + PTDEBUG && _d('Filter', $filter, 'value:', $obj); + $filters{$filter}->{$obj} = 1; + } + } + } + } + + my @regex_filters = qw( + databases-regex tables-regex + ignore-databases-regex ignore-tables-regex); + REGEX_FILTER: + foreach my $filter ( @regex_filters ) { + if ( $o->has($filter) ) { + my $pat = $o->get($filter); + next REGEX_FILTER unless $pat; + $filters{$filter} = qr/$pat/; + PTDEBUG && _d('Filter', $filter, 'value:', $filters{$filter}); + } + } + + PTDEBUG && _d('Schema object filters:', Dumper(\%filters)); + return \%filters; +} + +sub next { + my ( $self ) = @_; + + if ( !$self->{initialized} ) { + $self->{initialized} = 1; + if ( $self->{resume}->{tbl} ) { + if ( !$self->table_is_allowed(@{$self->{resume}}{qw(db tbl)}) ) { + PTDEBUG && _d('Will resume after', + join('.', @{$self->{resume}}{qw(db tbl)})); + $self->{resume}->{after}->{tbl} = 1; + } + if ( !$self->database_is_allowed($self->{resume}->{db}) ) { + PTDEBUG && _d('Will resume after', $self->{resume}->{db}); + $self->{resume}->{after}->{db} = 1; + } + } + } + + my $schema_obj; + if ( $self->{file_itr} ) { + $schema_obj= $self->_iterate_files(); + } + else { # dbh + $schema_obj= $self->_iterate_dbh(); + } + + if ( $schema_obj ) { + if ( my $schema = $self->{Schema} ) { + $schema->add_schema_object($schema_obj); + } + PTDEBUG && _d('Next schema object:', + $schema_obj->{db}, $schema_obj->{tbl}); + } + + return $schema_obj; +} + +sub _iterate_files { + my ( $self ) = @_; + + if ( !$self->{fh} ) { + my ($fh, $file) = $self->{file_itr}->(); + if ( !$fh ) { + PTDEBUG && _d('No more files to iterate'); + return; + } + $self->{fh} = $fh; + $self->{file} = $file; + } + my $fh = $self->{fh}; + PTDEBUG && _d('Getting next schema object from', $self->{file}); + + local $INPUT_RECORD_SEPARATOR = ''; + CHUNK: + while (defined(my $chunk = <$fh>)) { + if ($chunk =~ m/Database: (\S+)/) { + my $db = $1; # XXX + $db =~ s/^`//; # strip leading ` + $db =~ s/`$//; # and trailing ` + if ( $self->database_is_allowed($db) + && $self->_resume_from_database($db) ) { + $self->{db} = $db; + } + } + elsif ($self->{db} && $chunk =~ m/CREATE TABLE/) { + if ($chunk =~ m/DROP VIEW IF EXISTS/) { + PTDEBUG && _d('Table is a VIEW, skipping'); + next CHUNK; + } + + my ($tbl) = $chunk =~ m/$tbl_name/; + $tbl =~ s/^\s*`//; + $tbl =~ s/`\s*$//; + if ( $self->_resume_from_table($tbl) + && $self->table_is_allowed($self->{db}, $tbl) ) { + my ($ddl) = $chunk =~ m/^(?:$open_comment)?(CREATE TABLE.+?;)$/ms; + if ( !$ddl ) { + warn "Failed to parse CREATE TABLE from\n" . $chunk; + next CHUNK; + } + $ddl =~ s/ \*\/;\Z/;/; # remove end of version comment + my $tbl_struct = $self->{TableParser}->parse($ddl); + if ( $self->engine_is_allowed($tbl_struct->{engine}) ) { + return { + db => $self->{db}, + tbl => $tbl, + name => $self->{Quoter}->quote($self->{db}, $tbl), + ddl => $ddl, + tbl_struct => $tbl_struct, + }; + } + } + } + } # CHUNK + + PTDEBUG && _d('No more schema objects in', $self->{file}); + close $self->{fh}; + $self->{fh} = undef; + + return $self->_iterate_files(); +} + +sub _iterate_dbh { + my ( $self ) = @_; + my $q = $self->{Quoter}; + my $tp = $self->{TableParser}; + my $dbh = $self->{dbh}; + PTDEBUG && _d('Getting next schema object from dbh', $dbh); + + if ( !defined $self->{dbs} ) { + my $sql = 'SHOW DATABASES'; + PTDEBUG && _d($sql); + my @dbs = grep { + $self->_resume_from_database($_) + && + $self->database_is_allowed($_) + } @{$dbh->selectcol_arrayref($sql)}; + PTDEBUG && _d('Found', scalar @dbs, 'databases'); + $self->{dbs} = \@dbs; + } + + DATABASE: + while ( $self->{db} || defined(my $db = shift @{$self->{dbs}}) ) { + if ( !$self->{db} ) { + PTDEBUG && _d('Next database:', $db); + $self->{db} = $db; + } + + if ( !$self->{tbls} ) { + my $sql = 'SHOW /*!50002 FULL*/ TABLES FROM ' . $q->quote($self->{db}); + PTDEBUG && _d($sql); + my @tbls = map { + $_->[0]; # (tbl, type) + } + grep { + my ($tbl, $type) = @$_; + (!$type || ($type ne 'VIEW')) + && $self->_resume_from_table($tbl) + && $self->table_is_allowed($self->{db}, $tbl); + } + + eval { @{$dbh->selectall_arrayref($sql)}; }; + if ($EVAL_ERROR) { + warn "Skipping $self->{db}..."; + $self->{db} = undef; + next; + } + + PTDEBUG && _d('Found', scalar @tbls, 'tables in database',$self->{db}); + $self->{tbls} = \@tbls; + } + + TABLE: + while ( my $tbl = shift @{$self->{tbls}} ) { + my $ddl = eval { $tp->get_create_table($dbh, $self->{db}, $tbl) }; + if ( my $e = $EVAL_ERROR ) { + my $table_name = "$self->{db}.$tbl"; + if ( $e =~ /\QTable '$table_name' doesn't exist/ ) { + PTDEBUG && _d("$table_name no longer exists"); + } + else { + warn "Skipping $table_name because SHOW CREATE TABLE failed: $e"; + } + next TABLE; + } + + my $tbl_struct = $tp->parse($ddl); + if ( $self->engine_is_allowed($tbl_struct->{engine}) ) { + return { + db => $self->{db}, + tbl => $tbl, + name => $q->quote($self->{db}, $tbl), + ddl => $ddl, + tbl_struct => $tbl_struct, + }; + } + } + + PTDEBUG && _d('No more tables in database', $self->{db}); + $self->{db} = undef; + $self->{tbls} = undef; + } # DATABASE + + PTDEBUG && _d('No more databases'); + return; +} + +sub database_is_allowed { + my ( $self, $db ) = @_; + die "I need a db argument" unless $db; + + $db = lc $db; + + my $filter = $self->{filters}; + + if ( $db =~ m/^(information_schema|performance_schema|lost\+found|percona_schema)$/ ) { + PTDEBUG && _d('Database', $db, 'is a system database, ignoring'); + return 0; + } + + if ( $self->{filters}->{'ignore-databases'}->{$db} ) { + PTDEBUG && _d('Database', $db, 'is in --ignore-databases list'); + return 0; + } + + if ( $filter->{'ignore-databases-regex'} + && $db =~ $filter->{'ignore-databases-regex'} ) { + PTDEBUG && _d('Database', $db, 'matches --ignore-databases-regex'); + return 0; + } + + if ( $filter->{'databases'} + && !$filter->{'databases'}->{$db} ) { + PTDEBUG && _d('Database', $db, 'is not in --databases list, ignoring'); + return 0; + } + + if ( $filter->{'databases-regex'} + && $db !~ $filter->{'databases-regex'} ) { + PTDEBUG && _d('Database', $db, 'does not match --databases-regex, ignoring'); + return 0; + } + + return 1; +} + +sub table_is_allowed { + my ( $self, $db, $tbl ) = @_; + die "I need a db argument" unless $db; + die "I need a tbl argument" unless $tbl; + + $db = lc $db; + $tbl = lc $tbl; + + my $filter = $self->{filters}; + + return 0 if $db eq 'mysql' && $tbl =~ m/^(?: + general_log + |gtid_executed + |innodb_index_stats + |innodb_table_stats + |slave_master_info + |slave_relay_log_info + |slave_worker_info + |slow_log + )$/x; + + if ( $filter->{'ignore-tables'}->{'*'}->{$tbl} + || $filter->{'ignore-tables'}->{$db}->{$tbl}) { + PTDEBUG && _d('Table', $tbl, 'is in --ignore-tables list'); + return 0; + } + + if ( $filter->{'ignore-tables-regex'} + && $tbl =~ $filter->{'ignore-tables-regex'} ) { + PTDEBUG && _d('Table', $tbl, 'matches --ignore-tables-regex'); + return 0; + } + + if ( $filter->{'tables'} + && (!$filter->{'tables'}->{'*'}->{$tbl} && !$filter->{'tables'}->{$db}->{$tbl}) ) { + PTDEBUG && _d('Table', $tbl, 'is not in --tables list, ignoring'); + return 0; + } + + if ( $filter->{'tables-regex'} + && $tbl !~ $filter->{'tables-regex'} ) { + PTDEBUG && _d('Table', $tbl, 'does not match --tables-regex, ignoring'); + return 0; + } + + if ( $filter->{'tables'} + && $filter->{'tables'}->{$tbl} + && $filter->{'tables'}->{$tbl} ne '*' + && $filter->{'tables'}->{$tbl} ne $db ) { + PTDEBUG && _d('Table', $tbl, 'is only allowed in database', + $filter->{'tables'}->{$tbl}); + return 0; + } + + return 1; +} + +sub engine_is_allowed { + my ( $self, $engine ) = @_; + + if ( !$engine ) { + PTDEBUG && _d('No engine specified; allowing the table'); + return 1; + } + + $engine = lc $engine; + + my $filter = $self->{filters}; + + if ( $filter->{'ignore-engines'}->{$engine} ) { + PTDEBUG && _d('Engine', $engine, 'is in --ignore-databases list'); + return 0; + } + + if ( $filter->{'engines'} + && !$filter->{'engines'}->{$engine} ) { + PTDEBUG && _d('Engine', $engine, 'is not in --engines list, ignoring'); + return 0; + } + + return 1; +} + +sub _resume_from_database { + my ($self, $db) = @_; + + return 1 unless $self->{resume}->{db}; + if ( $db eq $self->{resume}->{db} ) { + if ( !$self->{resume}->{after}->{db} ) { + PTDEBUG && _d('Resuming from db', $db); + delete $self->{resume}->{db}; + return 1; + } + else { + PTDEBUG && _d('Resuming after db', $db); + delete $self->{resume}->{db}; + delete $self->{resume}->{tbl}; + } + } + + return 0; +} + +sub _resume_from_table { + my ($self, $tbl) = @_; + + return 1 unless $self->{resume}->{tbl}; + + if ( $tbl eq $self->{resume}->{tbl} ) { + if ( !$self->{resume}->{after}->{tbl} ) { + PTDEBUG && _d('Resuming from table', $tbl); + delete $self->{resume}->{tbl}; + return 1; + } + else { + PTDEBUG && _d('Resuming after table', $tbl); + delete $self->{resume}->{tbl}; + } + } + + return 0; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End SchemaIterator package +# ########################################################################### + +# ########################################################################### +# Retry package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Retry.pm +# t/lib/Retry.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Retry; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Time::HiRes qw(sleep); + +sub new { + my ( $class, %args ) = @_; + my $self = { + %args, + }; + return bless $self, $class; +} + +sub retry { + my ( $self, %args ) = @_; + my @required_args = qw(try fail final_fail); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my ($try, $fail, $final_fail) = @args{@required_args}; + my $wait = $args{wait} || sub { sleep 1; }; + my $tries = $args{tries} || 3; + + my $last_error; + my $tryno = 0; + TRY: + while ( ++$tryno <= $tries ) { + PTDEBUG && _d("Try", $tryno, "of", $tries); + my $result; + eval { + $result = $try->(tryno=>$tryno); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d("Try code failed:", $EVAL_ERROR); + $last_error = $EVAL_ERROR; + + if ( $tryno < $tries ) { # more retries + my $retry = $fail->(tryno=>$tryno, error=>$last_error); + last TRY unless $retry; + PTDEBUG && _d("Calling wait code"); + $wait->(tryno=>$tryno); + } + } + else { + PTDEBUG && _d("Try code succeeded"); + return $result; + } + } + + PTDEBUG && _d('Try code did not succeed'); + return $final_fail->(error=>$last_error); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Retry package +# ########################################################################### + +# ########################################################################### +# Transformers package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Transformers.pm +# t/lib/Transformers.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Transformers; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Time::Local qw(timegm timelocal); +use Digest::MD5 qw(md5_hex); +use B qw(); + +BEGIN { + require Exporter; + our @ISA = qw(Exporter); + our %EXPORT_TAGS = (); + our @EXPORT = (); + our @EXPORT_OK = qw( + micro_t + percentage_of + secs_to_time + time_to_secs + shorten + ts + parse_timestamp + unix_timestamp + any_unix_timestamp + make_checksum + crc32 + encode_json + ); +} + +our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/; +our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/; +our $n_ts = qr/(\d{1,5})([shmd]?)/; # Limit \d{1,5} because \d{6} looks + +sub micro_t { + my ( $t, %args ) = @_; + my $p_ms = defined $args{p_ms} ? $args{p_ms} : 0; # precision for ms vals + my $p_s = defined $args{p_s} ? $args{p_s} : 0; # precision for s vals + my $f; + + $t = 0 if $t < 0; + + $t = sprintf('%.17f', $t) if $t =~ /e/; + + $t =~ s/\.(\d{1,6})\d*/\.$1/; + + if ($t > 0 && $t <= 0.000999) { + $f = ($t * 1000000) . 'us'; + } + elsif ($t >= 0.001000 && $t <= 0.999999) { + $f = sprintf("%.${p_ms}f", $t * 1000); + $f = ($f * 1) . 'ms'; # * 1 to remove insignificant zeros + } + elsif ($t >= 1) { + $f = sprintf("%.${p_s}f", $t); + $f = ($f * 1) . 's'; # * 1 to remove insignificant zeros + } + else { + $f = 0; # $t should = 0 at this point + } + + return $f; +} + +sub percentage_of { + my ( $is, $of, %args ) = @_; + my $p = $args{p} || 0; # float precision + my $fmt = $p ? "%.${p}f" : "%d"; + return sprintf $fmt, ($is * 100) / ($of ||= 1); +} + +sub secs_to_time { + my ( $secs, $fmt ) = @_; + $secs ||= 0; + return '00:00' unless $secs; + + $fmt ||= $secs >= 86_400 ? 'd' + : $secs >= 3_600 ? 'h' + : 'm'; + + return + $fmt eq 'd' ? sprintf( + "%d+%02d:%02d:%02d", + int($secs / 86_400), + int(($secs % 86_400) / 3_600), + int(($secs % 3_600) / 60), + $secs % 60) + : $fmt eq 'h' ? sprintf( + "%02d:%02d:%02d", + int(($secs % 86_400) / 3_600), + int(($secs % 3_600) / 60), + $secs % 60) + : sprintf( + "%02d:%02d", + int(($secs % 3_600) / 60), + $secs % 60); +} + +sub time_to_secs { + my ( $val, $default_suffix ) = @_; + die "I need a val argument" unless defined $val; + my $t = 0; + my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/; + $suffix = $suffix || $default_suffix || 's'; + if ( $suffix =~ m/[smhd]/ ) { + $t = $suffix eq 's' ? $num * 1 # Seconds + : $suffix eq 'm' ? $num * 60 # Minutes + : $suffix eq 'h' ? $num * 3600 # Hours + : $num * 86400; # Days + + $t *= -1 if $prefix && $prefix eq '-'; + } + else { + die "Invalid suffix for $val: $suffix"; + } + return $t; +} + +sub shorten { + my ( $num, %args ) = @_; + my $p = defined $args{p} ? $args{p} : 2; # float precision + my $d = defined $args{d} ? $args{d} : 1_024; # divisor + my $n = 0; + my @units = ('', qw(k M G T P E Z Y)); + while ( $num >= $d && $n < @units - 1 ) { + $num /= $d; + ++$n; + } + return sprintf( + $num =~ m/\./ || $n + ? '%1$.'.$p.'f%2$s' + : '%1$d', + $num, $units[$n]); +} + +sub ts { + my ( $time, $gmt ) = @_; + my ( $sec, $min, $hour, $mday, $mon, $year ) + = $gmt ? gmtime($time) : localtime($time); + $mon += 1; + $year += 1900; + my $val = sprintf("%d-%02d-%02dT%02d:%02d:%02d", + $year, $mon, $mday, $hour, $min, $sec); + if ( my ($us) = $time =~ m/(\.\d+)$/ ) { + $us = sprintf("%.6f", $us); + $us =~ s/^0\././; + $val .= $us; + } + return $val; +} + +sub parse_timestamp { + my ( $val ) = @_; + if ( my($y, $m, $d, $h, $i, $s, $f) + = $val =~ m/^$mysql_ts$/ ) + { + return sprintf "%d-%02d-%02d %02d:%02d:" + . (defined $f ? '%09.6f' : '%02d'), + $y + 2000, $m, $d, $h, $i, (defined $f ? $s + $f : $s); + } + elsif ( $val =~ m/^$proper_ts$/ ) { + return $val; + } + return $val; +} + +sub unix_timestamp { + my ( $val, $gmt ) = @_; + if ( my($y, $m, $d, $h, $i, $s, $us) = $val =~ m/^$proper_ts$/ ) { + $val = $gmt + ? timegm($s, $i, $h, $d, $m - 1, $y) + : timelocal($s, $i, $h, $d, $m - 1, $y); + if ( defined $us ) { + $us = sprintf('%.6f', $us); + $us =~ s/^0\././; + $val .= $us; + } + } + return $val; +} + +sub any_unix_timestamp { + my ( $val, $callback ) = @_; + + if ( my ($n, $suffix) = $val =~ m/^$n_ts$/ ) { + $n = $suffix eq 's' ? $n # Seconds + : $suffix eq 'm' ? $n * 60 # Minutes + : $suffix eq 'h' ? $n * 3600 # Hours + : $suffix eq 'd' ? $n * 86400 # Days + : $n; # default: Seconds + PTDEBUG && _d('ts is now - N[shmd]:', $n); + return time - $n; + } + elsif ( $val =~ m/^\d{9,}/ ) { + PTDEBUG && _d('ts is already a unix timestamp'); + return $val; + } + elsif ( my ($ymd, $hms) = $val =~ m/^(\d{6})(?:\s+(\d+:\d+:\d+))?/ ) { + PTDEBUG && _d('ts is MySQL slow log timestamp'); + $val .= ' 00:00:00' unless $hms; + return unix_timestamp(parse_timestamp($val)); + } + elsif ( ($ymd, $hms) = $val =~ m/^(\d{4}-\d\d-\d\d)(?:[T ](\d+:\d+:\d+))?/) { + PTDEBUG && _d('ts is properly formatted timestamp'); + $val .= ' 00:00:00' unless $hms; + return unix_timestamp($val); + } + else { + PTDEBUG && _d('ts is MySQL expression'); + return $callback->($val) if $callback && ref $callback eq 'CODE'; + } + + PTDEBUG && _d('Unknown ts type:', $val); + return; +} + +sub make_checksum { + my ( $val ) = @_; + my $checksum = uc substr(md5_hex($val), -16); + PTDEBUG && _d($checksum, 'checksum for', $val); + return $checksum; +} + +sub crc32 { + my ( $string ) = @_; + return unless $string; + my $poly = 0xEDB88320; + my $crc = 0xFFFFFFFF; + foreach my $char ( split(//, $string) ) { + my $comp = ($crc ^ ord($char)) & 0xFF; + for ( 1 .. 8 ) { + $comp = $comp & 1 ? $poly ^ ($comp >> 1) : $comp >> 1; + } + $crc = (($crc >> 8) & 0x00FFFFFF) ^ $comp; + } + return $crc ^ 0xFFFFFFFF; +} + +my $got_json = eval { require JSON }; +sub encode_json { + return JSON::encode_json(@_) if $got_json; + my ( $data ) = @_; + return (object_to_json($data) || ''); +} + + +sub object_to_json { + my ($obj) = @_; + my $type = ref($obj); + + if($type eq 'HASH'){ + return hash_to_json($obj); + } + elsif($type eq 'ARRAY'){ + return array_to_json($obj); + } + else { + return value_to_json($obj); + } +} + +sub hash_to_json { + my ($obj) = @_; + my @res; + for my $k ( sort { $a cmp $b } keys %$obj ) { + push @res, string_to_json( $k ) + . ":" + . ( object_to_json( $obj->{$k} ) || value_to_json( $obj->{$k} ) ); + } + return '{' . ( @res ? join( ",", @res ) : '' ) . '}'; +} + +sub array_to_json { + my ($obj) = @_; + my @res; + + for my $v (@$obj) { + push @res, object_to_json($v) || value_to_json($v); + } + + return '[' . ( @res ? join( ",", @res ) : '' ) . ']'; +} + +sub value_to_json { + my ($value) = @_; + + return 'null' if(!defined $value); + + my $b_obj = B::svref_2object(\$value); # for round trip problem + my $flags = $b_obj->FLAGS; + return $value # as is + if $flags & ( B::SVp_IOK | B::SVp_NOK ) and !( $flags & B::SVp_POK ); # SvTYPE is IV or NV? + + my $type = ref($value); + + if( !$type ) { + return string_to_json($value); + } + else { + return 'null'; + } + +} + +my %esc = ( + "\n" => '\n', + "\r" => '\r', + "\t" => '\t', + "\f" => '\f', + "\b" => '\b', + "\"" => '\"', + "\\" => '\\\\', + "\'" => '\\\'', +); + +sub string_to_json { + my ($arg) = @_; + + $arg =~ s/([\x22\x5c\n\r\t\f\b])/$esc{$1}/g; + $arg =~ s/\//\\\//g; + $arg =~ s/([\x00-\x08\x0b\x0e-\x1f])/'\\u00' . unpack('H2', $1)/eg; + + utf8::upgrade($arg); + utf8::encode($arg); + + return '"' . $arg . '"'; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Transformers package +# ########################################################################### + +# ########################################################################### +# Progress package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Progress.pm +# t/lib/Progress.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Progress; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + foreach my $arg (qw(jobsize)) { + die "I need a $arg argument" unless defined $args{$arg}; + } + if ( (!$args{report} || !$args{interval}) ) { + if ( $args{spec} && @{$args{spec}} == 2 ) { + @args{qw(report interval)} = @{$args{spec}}; + } + else { + die "I need either report and interval arguments, or a spec"; + } + } + + my $name = $args{name} || "Progress"; + $args{start} ||= time(); + my $self; + $self = { + last_reported => $args{start}, + fraction => 0, # How complete the job is + callback => sub { + my ($fraction, $elapsed, $remaining) = @_; + printf STDERR "$name: %3d%% %s remain\n", + $fraction * 100, + Transformers::secs_to_time($remaining); + }, + %args, + }; + return bless $self, $class; +} + +sub validate_spec { + shift @_ if $_[0] eq 'Progress'; # Permit calling as Progress-> or Progress:: + my ( $spec ) = @_; + if ( @$spec != 2 ) { + die "spec array requires a two-part argument\n"; + } + if ( $spec->[0] !~ m/^(?:percentage|time|iterations)$/ ) { + die "spec array's first element must be one of " + . "percentage,time,iterations\n"; + } + if ( $spec->[1] !~ m/^\d+$/ ) { + die "spec array's second element must be an integer\n"; + } +} + +sub set_callback { + my ( $self, $callback ) = @_; + $self->{callback} = $callback; +} + +sub start { + my ( $self, $start ) = @_; + $self->{start} = $self->{last_reported} = $start || time(); + $self->{first_report} = 0; +} + +sub update { + my ( $self, $callback, %args ) = @_; + my $jobsize = $self->{jobsize}; + my $now ||= $args{now} || time; + + $self->{iterations}++; # How many updates have happened; + + if ( !$self->{first_report} && $args{first_report} ) { + $args{first_report}->(); + $self->{first_report} = 1; + } + + if ( $self->{report} eq 'time' + && $self->{interval} > $now - $self->{last_reported} + ) { + return; + } + elsif ( $self->{report} eq 'iterations' + && ($self->{iterations} - 1) % $self->{interval} > 0 + ) { + return; + } + $self->{last_reported} = $now; + + my $completed = $callback->(); + $self->{updates}++; # How many times we have run the update callback + + return if $completed > $jobsize; + + my $fraction = $completed > 0 ? $completed / $jobsize : 0; + + if ( $self->{report} eq 'percentage' + && $self->fraction_modulo($self->{fraction}) + >= $self->fraction_modulo($fraction) + ) { + $self->{fraction} = $fraction; + return; + } + $self->{fraction} = $fraction; + + my $elapsed = $now - $self->{start}; + my $remaining = 0; + my $eta = $now; + if ( $completed > 0 && $completed <= $jobsize && $elapsed > 0 ) { + my $rate = $completed / $elapsed; + if ( $rate > 0 ) { + $remaining = ($jobsize - $completed) / $rate; + $eta = $now + int($remaining); + } + } + $self->{callback}->($fraction, $elapsed, $remaining, $eta, $completed); +} + +sub fraction_modulo { + my ( $self, $num ) = @_; + $num *= 100; # Convert from fraction to percentage + return sprintf('%d', + sprintf('%d', $num / $self->{interval}) * $self->{interval}); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Progress package +# ########################################################################### + +# ########################################################################### +# ReplicaLagWaiter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/ReplicaLagWaiter.pm +# t/lib/ReplicaLagWaiter.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package ReplicaLagWaiter; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Time::HiRes qw(sleep time); +use Data::Dumper; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(oktorun get_lag sleep max_lag slaves); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my $self = { + %args, + }; + + return bless $self, $class; +} + +sub wait { + my ( $self, %args ) = @_; + my @required_args = qw(); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $pr = $args{Progress}; + + my $oktorun = $self->{oktorun}; + my $get_lag = $self->{get_lag}; + my $sleep = $self->{sleep}; + my $slaves = $self->{slaves}; + my $max_lag = $self->{max_lag}; + + my $worst; # most lagging slave + my $pr_callback; + my $pr_first_report; + if ( $pr ) { + $pr_callback = sub { + my ($fraction, $elapsed, $remaining, $eta, $completed) = @_; + my $dsn_name = $worst->{cxn}->name(); + if ( defined $worst->{lag} ) { + print STDERR "Replica lag is " . ($worst->{lag} || '?') + . " seconds on $dsn_name. Waiting.\n"; + } + else { + if ($self->{fail_on_stopped_replication}) { + die 'replication is stopped'; + } + print STDERR "Replica $dsn_name is stopped. Waiting.\n"; + } + return; + }; + $pr->set_callback($pr_callback); + + $pr_first_report = sub { + my $dsn_name = $worst->{cxn}->name(); + if ( !defined $worst->{lag} ) { + if ($self->{fail_on_stopped_replication}) { + die 'replication is stopped'; + } + print STDERR "Replica $dsn_name is stopped. Waiting.\n"; + } + return; + }; + } + + my @lagged_slaves = map { {cxn=>$_, lag=>undef} } @$slaves; + while ( $oktorun->() && @lagged_slaves ) { + PTDEBUG && _d('Checking slave lag'); + for my $i ( 0..$#lagged_slaves ) { + my $lag = $get_lag->($lagged_slaves[$i]->{cxn}); + PTDEBUG && _d($lagged_slaves[$i]->{cxn}->name(), + 'slave lag:', $lag); + if ( !defined $lag || $lag > $max_lag ) { + $lagged_slaves[$i]->{lag} = $lag; + } + else { + delete $lagged_slaves[$i]; + } + } + + @lagged_slaves = grep { defined $_ } @lagged_slaves; + if ( @lagged_slaves ) { + @lagged_slaves = reverse sort { + defined $a->{lag} && defined $b->{lag} ? $a->{lag} <=> $b->{lag} + : defined $a->{lag} ? -1 + : 1; + } @lagged_slaves; + $worst = $lagged_slaves[0]; + PTDEBUG && _d(scalar @lagged_slaves, 'slaves are lagging, worst:', + $worst->{lag}, 'on', Dumper($worst->{cxn}->dsn())); + + if ( $pr ) { + $pr->update( + sub { return 0; }, + first_report => $pr_first_report, + ); + } + + PTDEBUG && _d('Calling sleep callback'); + $sleep->($worst->{cxn}, $worst->{lag}); + } + } + + PTDEBUG && _d('All slaves caught up'); + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End ReplicaLagWaiter package +# ########################################################################### + +# This program is copyright 2010-2011 Percona Ireland Ltd. +# Feedback and improvements are welcome. +# +# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. +# +# This program is free software; you can redistribute it and/or modify it under +# the terms of the GNU General Public License as published by the Free Software +# Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +# systems, you can issue `man perlgpl' or `man perlartistic' to read these +# licenses. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, write to the Free Software Foundation, Inc., 59 Temple +# Place, Suite 330, Boston, MA 02111-1307 USA. +# ########################################################################### +# MySQLConfig package +# ########################################################################### +{ +# Package: MySQLConfig +# MySQLConfig parses and encapsulates system variables and values from +# SHOW VARIABLES, option files, mysqld --help --verbose or my_print_defaults. +# A MySQLConfig object represents how MySQL is or would be configured given +# one of those inputs. If the input is SHOW VARIABLES, then the config is +# acive, i.e. MySQL's running config. All other inputs are inactive, i.e. +# how MySQL should or would be running if started with the config. +# +# Inactive configs are made to mimic SHOW VARIABLES so that MySQLConfig +# objects can be reliably compared with MySQLConfigComparer. This is +# necessary because the inputs are different in how they list values, +# how they treat variables with optional values, etc. +# +# Only variables present in the input are saved in the MySQLConfig object. +# So if returns false, then the variable did not appear in the input. +package MySQLConfig; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +my %can_be_duplicate = ( + replicate_wild_do_table => 1, + replicate_wild_ignore_table => 1, + replicate_rewrite_db => 1, + replicate_ignore_table => 1, + replicate_ignore_db => 1, + replicate_do_table => 1, + replicate_do_db => 1, +); + +# Sub: new +# +# Parameters: +# %args - Arguments +# +# Arguments: +# file - Filename of an option file, or containing output of +# mysqld --help --verbose, my_print_defaults or SHOW VARIABLES +# output - Text output of one of ^ if you want to slurp the file manually +# result_set - Arrayref of SHOW VARIABLES +# dbh - dbh to get SHOW VARIABLES from +# TextResultSetParser - object if file or output +# arg is given +# +# Returns: +# MySQLConfig object +sub new { + my ( $class, %args ) = @_; + my @requires_one_of = qw(file output result_set dbh); + my $required_arg = grep { $args{$_} } @requires_one_of; + if ( !$required_arg ) { + die "I need a " . join(', ', @requires_one_of[0..$#requires_one_of-1]) + . " or " . $requires_one_of[-1] . " argument"; + } + if ( $required_arg > 1 ) { + die "Specify only one " + . join(', ', @requires_one_of[0..$#requires_one_of-1]) + . " or " . $requires_one_of[-1] . " argument"; + } + if ( $args{file} || $args{output} ) { + die "I need a TextResultSetParser argument" + unless $args{TextResultSetParser}; + } + + if ( $args{file} ) { + $args{output} = _slurp_file($args{file}); + } + + my %config_data = _parse_config(%args); + + my $self = { + %args, + %config_data, + }; + + return bless $self, $class; +} + +sub _parse_config { + my ( %args ) = @_; + + my %config_data; + if ( $args{output} ) { + %config_data = _parse_config_output(%args); + } + elsif ( my $rows = $args{result_set} ) { + $config_data{format} = $args{format} || 'show_variables'; + $config_data{vars} = { map { @$_ } @$rows }; + } + elsif ( my $dbh = $args{dbh} ) { + $config_data{format} = $args{format} || 'show_variables'; + my $sql = "SHOW /*!40103 GLOBAL*/ VARIABLES"; + PTDEBUG && _d($dbh, $sql); + my $rows = $dbh->selectall_arrayref($sql); + $config_data{vars} = { map { @$_ } @$rows }; + $config_data{mysql_version} = _get_version($dbh); + } + else { + die "Unknown config source"; + } + + handle_special_vars(\%config_data); + + return %config_data; +} + +sub handle_special_vars { + my ($config_data) = @_; + + if ( $config_data->{vars}->{wsrep_provider_options} ) { + my $vars = $config_data->{vars}; + my $dupes = $config_data->{duplicate_vars}; + for my $wpo ( $vars->{wsrep_provider_options}, @{$dupes->{wsrep_provider_options} || [] } ) { + my %opts = $wpo =~ /(\S+)\s*=\s*(\S*)(?:;|;?$)/g; + while ( my ($var, $val) = each %opts ) { + $val =~ s/;$//; + if ( exists $vars->{$var} ) { + push @{$dupes->{$var} ||= []}, $val; + } + $vars->{$var} = $val; + } + } + # Delete from vars, but not from dupes, since we still want that + delete $vars->{wsrep_provider_options}; + } + + return; +} + +sub _parse_config_output { + my ( %args ) = @_; + my @required_args = qw(output TextResultSetParser); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + PTDEBUG && _d("Parsing config output"); + + my $format = $args{format} || detect_config_output_format(%args); + if ( !$format ) { + die "Cannot auto-detect the MySQL config format"; + } + + my $vars; # variables hashref + my $dupes; # duplicate vars hashref + my $opt_files; # option files arrayref + if ( $format eq 'show_variables' ) { + $vars = parse_show_variables(%args); + } + elsif ( $format eq 'mysqld' ) { + ($vars, $opt_files) = parse_mysqld(%args); + } + elsif ( $format eq 'my_print_defaults' ) { + ($vars, $dupes) = parse_my_print_defaults(%args); + } + elsif ( $format eq 'option_file' ) { + ($vars, $dupes) = parse_option_file(%args); + } + else { + die "Invalid MySQL config format: $format"; + } + + die "Failed to parse MySQL config" unless $vars && keys %$vars; + + if ( $format ne 'show_variables' ) { + _mimic_show_variables( + %args, + format => $format, + vars => $vars, + ); + } + + return ( + format => $format, + vars => $vars, + option_files => $opt_files, + duplicate_vars => $dupes, + ); +} + +sub detect_config_output_format { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + my $format; + if ( $output =~ m/\|\s+\w+\s+\|\s+.+?\|/ + || $output =~ m/\*+ \d/ + || $output =~ m/Variable_name:\s+\w+/ + || $output =~ m/Variable_name\s+Value$/m ) + { + PTDEBUG && _d('show variables format'); + $format = 'show_variables'; + } + elsif ( $output =~ m/Starts the MySQL database server/ + || $output =~ m/Default options are read from / + || $output =~ m/^help\s+TRUE /m ) + { + PTDEBUG && _d('mysqld format'); + $format = 'mysqld'; + } + elsif ( $output =~ m/^--\w+/m ) { + PTDEBUG && _d('my_print_defaults format'); + $format = 'my_print_defaults'; + } + elsif ( $output =~ m/^\s*\[[a-zA-Z]+\]\s*$/m ) { + PTDEBUG && _d('option file format'); + $format = 'option_file', + } + + return $format; +} + +sub parse_show_variables { + my ( %args ) = @_; + my @required_args = qw(output TextResultSetParser); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output, $trp) = @args{@required_args}; + + my %config = map { + $_->{Variable_name} => $_->{Value} + } @{ $trp->parse($output) }; + + return \%config; +} + +# Parse "mysqld --help --verbose" and return a hashref of variable=>values +# and an arrayref of default defaults files if possible. The "default +# defaults files" are the defaults file that mysqld reads by default if no +# defaults file is explicitly given by --default-file. +sub parse_mysqld { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + # First look for the list of option files like + # Default options are read from the following files in the given order: + # /etc/my.cnf /usr/local/mysql/etc/my.cnf ~/.my.cnf + my @opt_files; + if ( $output =~ m/^Default options are read.+\n/mg ) { + my ($opt_files) = $output =~ m/\G^(.+)\n/m; + my %seen; + my @opt_files = grep { !$seen{$_} } split(' ', $opt_files); + PTDEBUG && _d('Option files:', @opt_files); + } + else { + PTDEBUG && _d("mysqld help output doesn't list option files"); + } + + # The list of sys vars and their default vals begins like: + # Variables (--variable-name=value) + # and boolean options {FALSE|TRUE} Value (after reading options) + # --------------------------------- ----------------------------- + # help TRUE + # abort-slave-event-count 0 + # So we search for that line of hypens. + # + # It also ends with something like + # + # wait_timeout 28800 + # + # To see what values a running MySQL server is using, type + # 'mysqladmin variables' instead of 'mysqld --verbose --help'. + # + # So try to find it by locating a double newline, and strip it away + if ( $output !~ m/^-+ -+$(.+?)(?:\n\n.+)?\z/sm ) { + PTDEBUG && _d("mysqld help output doesn't list vars and vals"); + return; + } + + # Grab the varval list. + my $varvals = $1; + + # Parse the "var val" lines. 2nd retval is duplicates but there + # shouldn't be any with mysqld. + my ($config, undef) = _parse_varvals( + qr/^(\S+)(.*)$/, + $varvals, + ); + + return $config, \@opt_files; +} + +# Parse "my_print_defaults" output and return a hashref of variable=>values +# and a hashref of any duplicated variables. +sub parse_my_print_defaults { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + # Parse the "--var=val" lines. + my ($config, $dupes) = _parse_varvals( + qr/^--([^=]+)(?:=(.*))?$/, + $output, + ); + + return $config, $dupes; +} + +# Parse the [mysqld] section of an option file and return a hashref of +# variable=>values and a hashref of any duplicated variables. +sub parse_option_file { + my ( %args ) = @_; + my @required_args = qw(output); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($output) = @args{@required_args}; + + my ($mysqld_section) = $output =~ m/\[mysqld\](.+?)(?:^\s*\[\w+\]|\Z)/xms; + die "Failed to parse the [mysqld] section" unless $mysqld_section; + + # Parse the "var=val" lines. + my ($config, $dupes) = _parse_varvals( + qr/^([^=]+)(?:=(.*))?$/, + $mysqld_section, + ); + + return $config, $dupes; +} + +# Called by _parse_varvals(), takes two arguments: a regex, and +# a string to match against. The string will be split in lines, +# and each line will be matched against the regex. +# The regex must return to captures, although the second doesn't +# have to match anything. +# Returns a hashref of arrayrefs ala +# { port => [ 12345, 12346 ], key_buffer_size => [ "16M" ] } +sub _preprocess_varvals { + my ($re, $to_parse) = @_; + + my %vars; + LINE: + foreach my $line ( split /\n/, $to_parse ) { + next LINE if $line =~ m/^\s*$/; # no empty lines + next LINE if $line =~ /^\s*[#;]/; # no # or ; comment lines + + if ( $line !~ $re ) { + PTDEBUG && _d("Line <", $line, "> didn't match $re"); + next LINE; + } + + my ($var, $val) = ($1, $2); + + # Variable names are usually specified like "log-bin" + # but in SHOW VARIABLES they're all like "log_bin". + $var =~ tr/-/_/; + + # Remove trailing comments + $var =~ s/\s*#.*$//; + + if ( !defined $val ) { + $val = ''; + } + + # Strip leading and trailing whitespace. + for my $item ($var, $val) { + $item =~ s/^\s+//; + $item =~ s/\s+$//; + } + + push @{$vars{$var} ||= []}, $val + } + + return \%vars; +} + +# Parses a string of variables and their values ("varvals"), returns two +# hashrefs: one with normalized variable=>value, the other with duplicate +# vars. +sub _parse_varvals { + my ( $vars ) = _preprocess_varvals(@_); + + # Config built from parsing the given varvals. + my %config; + + # Discover duplicate vars. + my %duplicates; + + while ( my ($var, $vals) = each %$vars ) { + my $val = _process_val( pop @$vals ); + # If the variable has duplicates, then @$vals will contain + # the rest of the values + if ( @$vals && !$can_be_duplicate{$var} ) { + # The var is a duplicate (in the bad sense, i.e. where user is + # probably unaware that there's two different values for this var + # but only the last is used). + PTDEBUG && _d("Duplicate var:", $var); + foreach my $current_val ( map { _process_val($_) } @$vals ) { + push @{$duplicates{$var} ||= []}, $current_val; + } + } + + PTDEBUG && _d("Var:", $var, "val:", $val); + + # Save this var-val. + $config{$var} = $val; + } + + return \%config, \%duplicates; +} + +my $quote_re = qr/ + \A # Start of value + (['"]) # Opening quote + (.*) # Value + \1 # Closing quote + \s*(?:\#.*)? # End of line comment + [\n\r]*\z # End of value +/x; +sub _process_val { + my ($val) = @_; + + if ( $val =~ $quote_re ) { + # If it matches the quote re, then $2 holds the value + $val = $2; + } + else { + # Otherwise, remove possible trailing comments + $val =~ s/\s*#.*//; + } + + if ( my ($num, $factor) = $val =~ m/(\d+)([KMGT])b?$/i ) { + # value is a size like 1k, 16M, etc. + my %factor_for = ( + k => 1_024, + m => 1_048_576, + g => 1_073_741_824, + t => 1_099_511_627_776, + ); + $val = $num * $factor_for{lc $factor}; + } + elsif ( $val =~ m/No default/ ) { + $val = ''; + } + return $val; +} + +# Sub: _mimic_show_variables +# Make the variables' values mimic SHOW VARIABLES. Different output formats +# list values differently. To make comparisons easier, outputs are made to +# mimic SHOW VARIABLES. +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# vars - Hashref of variables-values +# format - Config output format (mysqld, option_file, etc.) +sub _mimic_show_variables { + my ( %args ) = @_; + my @required_args = qw(vars format); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($vars, $format) = @args{@required_args}; + + foreach my $var ( keys %$vars ) { + if ( $vars->{$var} eq '' ) { + if ( $format eq 'mysqld' ) { + # mysqld lists "(No default value)" for certain variables + # that are not set/configured. _parse_varvals() turns this + # into a blank string. For most vars this means there's no + # value and SHOW VARIABLES will similarly show no value. + # But for log*, skip* and ignore* vars, SHOW VARIABLES will + # show OFF. But, log_error is an exception--it's practically + # always on. + if ( $var ne 'log_error' && $var =~ m/^(?:log|skip|ignore)/ ) { + $vars->{$var} = 'OFF'; + } + } + else { + # Output formats other than mysqld (e.g. option file), if + # a variable is listed then it's enabled, like --skip-federated. + # SHOW VARIBLES will show ON for these. + $vars->{$var} = 'ON'; + } + } + } + + return; +} + +sub _slurp_file { + my ( $file ) = @_; + die "I need a file argument" unless $file; + PTDEBUG && _d("Reading", $file); + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + return $contents; +} + +sub _get_version { + my ( $dbh ) = @_; + return unless $dbh; + my $version = $dbh->selectrow_arrayref('SELECT VERSION()')->[0]; + $version =~ s/(\d\.\d{1,2}.\d{1,2})/$1/; + PTDEBUG && _d('MySQL version', $version); + return $version; +} + +# ############################################################################# +# Accessor methods. +# ############################################################################# + +# Returns true if this MySQLConfig obj has the given variable. +sub has { + my ( $self, $var ) = @_; + return exists $self->{vars}->{$var}; +} + +# Return the value of the given variable. +sub value_of { + my ( $self, $var ) = @_; + return unless $var; + return $self->{vars}->{$var}; +} + +# Return hashref of all variables. +sub variables { + my ( $self, %args ) = @_; + return $self->{vars}; +} + +# Return hashref of duplicate variables. +sub duplicate_variables { + my ( $self ) = @_; + return $self->{duplicate_vars}; +} + +# Return arrayref of option files. +sub option_files { + my ( $self ) = @_; + return $self->{option_files}; +} + +# Return MySQL version. +sub mysql_version { + my ( $self ) = @_; + return $self->{mysql_version}; +} + +# Return the config file format (mysqld, option file, etc.) +sub format { + my ( $self ) = @_; + return $self->{format}; +} + +# Return true if the config is active (i.e. the effective config +# that MySQL is using; only true if config is from SHOW VARIABLES). +sub is_active { + my ( $self ) = @_; + return $self->{dbh} ? 1 : 0; +} + +sub has_engine { + my ($self, $engine) = @_; + if (!$self->{dbh}) { + die "invalid dbh in has_engine method"; + } + + my $rows = $self->{dbh}->selectall_arrayref('SHOW ENGINES', {Slice=>{}}); + my $is_enabled; + for my $row (@$rows) { + if ($row->{engine} eq 'ROCKSDB') { + $is_enabled = 1; + last; + } + } + return $is_enabled; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End MySQLConfig package +# ########################################################################### +# ########################################################################### +# MySQLStatusWaiter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/MySQLStatusWaiter.pm +# t/lib/MySQLStatusWaiter.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package MySQLStatusWaiter; + +use strict; +use warnings FATAL => 'all'; +use POSIX qw( ceil ); +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(max_spec get_status sleep oktorun); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + PTDEBUG && _d('Parsing spec for max thresholds'); + my $max_val_for = _parse_spec($args{max_spec}); + if ( $max_val_for ) { + _check_and_set_vals( + vars => $max_val_for, + get_status => $args{get_status}, + threshold_factor => 0.2, # +20% + ); + } + + PTDEBUG && _d('Parsing spec for critical thresholds'); + my $critical_val_for = _parse_spec($args{critical_spec} || []); + if ( $critical_val_for ) { + _check_and_set_vals( + vars => $critical_val_for, + get_status => $args{get_status}, + threshold_factor => 1.0, # double (x2; +100%) + ); + } + + my $self = { + get_status => $args{get_status}, + sleep => $args{sleep}, + oktorun => $args{oktorun}, + max_val_for => $max_val_for, + critical_val_for => $critical_val_for, + }; + + return bless $self, $class; +} + +sub _parse_spec { + my ($spec) = @_; + + return unless $spec && scalar @$spec; + + my %max_val_for; + foreach my $var_val ( @$spec ) { + die "Empty or undefined spec\n" unless $var_val; + $var_val =~ s/^\s+//; + $var_val =~ s/\s+$//g; + + my ($var, $val) = split /[:=]/, $var_val; + die "$var_val does not contain a variable\n" unless $var; + die "$var is not a variable name\n" unless $var =~ m/^[a-zA-Z_]+$/; + + if ( !$val ) { + PTDEBUG && _d('Will get intial value for', $var, 'later'); + $max_val_for{$var} = undef; + } + else { + die "The value for $var must be a number\n" + unless $val =~ m/^[\d\.]+$/; + $max_val_for{$var} = $val; + } + } + + return \%max_val_for; +} + +sub max_values { + my ($self) = @_; + return $self->{max_val_for}; +} + +sub critical_values { + my ($self) = @_; + return $self->{critical_val_for}; +} + +sub wait { + my ( $self, %args ) = @_; + + return unless $self->{max_val_for}; + + my $pr = $args{Progress}; # optional + + my $oktorun = $self->{oktorun}; + my $get_status = $self->{get_status}; + my $sleep = $self->{sleep}; + + my %vals_too_high = %{$self->{max_val_for}}; + my $pr_callback; + if ( $pr ) { + $pr_callback = sub { + print STDERR "Pausing because " + . join(', ', + map { + "$_=" + . (defined $vals_too_high{$_} ? $vals_too_high{$_} + : 'unknown') + } sort keys %vals_too_high + ) + . ".\n"; + return; + }; + $pr->set_callback($pr_callback); + } + + while ( $oktorun->() ) { + PTDEBUG && _d('Checking status variables'); + foreach my $var ( sort keys %vals_too_high ) { + my $val = $get_status->($var); + PTDEBUG && _d($var, '=', $val); + if ( $val + && exists $self->{critical_val_for}->{$var} + && $val >= $self->{critical_val_for}->{$var} ) { + die "$var=$val exceeds its critical threshold " + . "$self->{critical_val_for}->{$var}\n"; + } + if ( $val >= $self->{max_val_for}->{$var} ) { + $vals_too_high{$var} = $val; + } + else { + delete $vals_too_high{$var}; + } + } + + last unless scalar keys %vals_too_high; + + PTDEBUG && _d(scalar keys %vals_too_high, 'values are too high:', + %vals_too_high); + if ( $pr ) { + $pr->update(sub { return 0; }); + } + PTDEBUG && _d('Calling sleep callback'); + $sleep->(); + %vals_too_high = %{$self->{max_val_for}}; # recheck all vars + } + + PTDEBUG && _d('All var vals are low enough'); + return; +} + +sub _check_and_set_vals { + my (%args) = @_; + my @required_args = qw(vars get_status threshold_factor); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my ($vars, $get_status, $threshold_factor) = @args{@required_args}; + + PTDEBUG && _d('Checking and setting values'); + return unless $vars && scalar %$vars; + + foreach my $var ( keys %$vars ) { + my $init_val = $get_status->($var); + die "Variable $var does not exist or its value is undefined\n" + unless defined $init_val; + my $val; + if ( defined $vars->{$var} ) { + $val = $vars->{$var}; + } + else { + PTDEBUG && _d('Initial', $var, 'value:', $init_val); + $val = ($init_val * $threshold_factor) + $init_val; + $vars->{$var} = int(ceil($val)); + } + PTDEBUG && _d('Wait if', $var, '>=', $val); + } +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End MySQLStatusWaiter package +# ########################################################################### + +# ########################################################################### +# WeightedAvgRate package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/WeightedAvgRate.pm +# t/lib/WeightedAvgRate.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package WeightedAvgRate; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(target_t); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my $self = { + %args, + avg_n => 0, + avg_t => 0, + weight => $args{weight} || 0.75, + }; + + return bless $self, $class; +} + +sub update { + my ($self, $n, $t) = @_; + PTDEBUG && _d('Master op time:', $n, 'n /', $t, 's'); + + if ( $self->{avg_n} && $self->{avg_t} ) { + $self->{avg_n} = ($self->{avg_n} * $self->{weight}) + $n; + $self->{avg_t} = ($self->{avg_t} * $self->{weight}) + $t; + $self->{avg_rate} = $self->{avg_n} / $self->{avg_t}; + PTDEBUG && _d('Weighted avg rate:', $self->{avg_rate}, 'n/s'); + } + else { + $self->{avg_n} = $n; + $self->{avg_t} = $t; + $self->{avg_rate} = $self->{avg_n} / $self->{avg_t}; + PTDEBUG && _d('Initial avg rate:', $self->{avg_rate}, 'n/s'); + } + + my $new_n = int($self->{avg_rate} * $self->{target_t}); + PTDEBUG && _d('Adjust n to', $new_n); + return $new_n; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End WeightedAvgRate package +# ########################################################################### + +# ########################################################################### +# IndexLength package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/IndexLength.pm +# t/lib/IndexLength.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ + +package IndexLength; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +use Carp; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my $self = { + Quoter => $args{Quoter}, + }; + + return bless $self, $class; +} + +sub index_length { + my ($self, %args) = @_; + my @required_args = qw(Cxn tbl index); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn) = @args{@required_args}; + + die "The tbl argument does not have a tbl_struct" + unless exists $args{tbl}->{tbl_struct}; + die "Index $args{index} does not exist in table $args{tbl}->{name}" + unless $args{tbl}->{tbl_struct}->{keys}->{$args{index}}; + + my $index_struct = $args{tbl}->{tbl_struct}->{keys}->{$args{index}}; + my $index_cols = $index_struct->{cols}; + my $n_index_cols = $args{n_index_cols}; + if ( !$n_index_cols || $n_index_cols > @$index_cols ) { + $n_index_cols = scalar @$index_cols; + } + + my $vals = $self->_get_first_values( + %args, + n_index_cols => $n_index_cols, + ); + + my $sql = $self->_make_range_query( + %args, + n_index_cols => $n_index_cols, + vals => $vals, + ); + my $sth = $cxn->dbh()->prepare($sql); + PTDEBUG && _d($sth->{Statement}, 'params:', @$vals); + $sth->execute(@$vals); + my $row = $sth->fetchrow_hashref(); + $sth->finish(); + PTDEBUG && _d('Range scan:', Dumper($row)); + return $row->{key_len}, $row->{key}; +} + +sub _get_first_values { + my ($self, %args) = @_; + my @required_args = qw(Cxn tbl index n_index_cols); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn, $tbl, $index, $n_index_cols) = @args{@required_args}; + + my $q = $self->{Quoter}; + + my $index_struct = $tbl->{tbl_struct}->{keys}->{$index}; + my $index_cols = $index_struct->{cols}; + my $index_columns; + eval { + $index_columns = join (', ', + map { $q->quote($_) } @{$index_cols}[0..($n_index_cols - 1)]); + }; + if ($EVAL_ERROR) { + confess "$EVAL_ERROR"; + } + + + + my @where; + foreach my $col ( @{$index_cols}[0..($n_index_cols - 1)] ) { + push @where, $q->quote($col) . " IS NOT NULL" + } + + my $sql = "SELECT /*!40001 SQL_NO_CACHE */ $index_columns " + . "FROM $tbl->{name} FORCE INDEX (" . $q->quote($index) . ") " + . "WHERE " . join(' AND ', @where) + . " ORDER BY $index_columns " + . "LIMIT 1 /*key_len*/"; # only need 1 row + PTDEBUG && _d($sql); + my $vals = $cxn->dbh()->selectrow_arrayref($sql); + return $vals; +} + +sub _make_range_query { + my ($self, %args) = @_; + my @required_args = qw(tbl index n_index_cols vals); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($tbl, $index, $n_index_cols, $vals) = @args{@required_args}; + + my $q = $self->{Quoter}; + + my $index_struct = $tbl->{tbl_struct}->{keys}->{$index}; + my $index_cols = $index_struct->{cols}; + + my @where; + if ( $n_index_cols > 1 ) { + foreach my $n ( 0..($n_index_cols - 2) ) { + my $col = $index_cols->[$n]; + my $val = $tbl->{tbl_struct}->{type_for}->{$col} eq 'enum' ? "CAST(? AS UNSIGNED)" : "?"; + push @where, $q->quote($col) . " = " . $val; + } + } + + my $col = $index_cols->[$n_index_cols - 1]; + my $val = $vals->[-1]; # should only be as many vals as cols + my $condition = $tbl->{tbl_struct}->{type_for}->{$col} eq 'enum' ? "CAST(? AS UNSIGNED)" : "?"; + push @where, $q->quote($col) . " >= " . $condition; + + my $sql = "EXPLAIN SELECT /*!40001 SQL_NO_CACHE */ * " + . "FROM $tbl->{name} FORCE INDEX (" . $q->quote($index) . ") " + . "WHERE " . join(' AND ', @where) + . " /*key_len*/"; + return $sql; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End IndexLength package +# ########################################################################### + +# ########################################################################### +# Runtime package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Runtime.pm +# t/lib/Runtime.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Runtime; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(now); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless exists $args{$arg}; + } + + my $run_time = $args{run_time}; + if ( defined $run_time ) { + die "run_time must be > 0" if $run_time <= 0; + } + + my $now = $args{now}; + die "now must be a callback" unless ref $now eq 'CODE'; + + my $self = { + run_time => $run_time, + now => $now, + start_time => undef, + end_time => undef, + time_left => undef, + stop => 0, + }; + + return bless $self, $class; +} + +sub time_left { + my ( $self, %args ) = @_; + + if ( $self->{stop} ) { + PTDEBUG && _d("No time left because stop was called"); + return 0; + } + + my $now = $self->{now}->(%args); + PTDEBUG && _d("Current time:", $now); + + if ( !defined $self->{start_time} ) { + $self->{start_time} = $now; + } + + return unless defined $now; + + my $run_time = $self->{run_time}; + return unless defined $run_time; + + if ( !$self->{end_time} ) { + $self->{end_time} = $now + $run_time; + PTDEBUG && _d("End time:", $self->{end_time}); + } + + $self->{time_left} = $self->{end_time} - $now; + PTDEBUG && _d("Time left:", $self->{time_left}); + return $self->{time_left}; +} + +sub have_time { + my ( $self, %args ) = @_; + my $time_left = $self->time_left(%args); + return 1 if !defined $time_left; # run forever + return $time_left <= 0 ? 0 : 1; # <=0s means run time has elapsed +} + +sub time_elapsed { + my ( $self, %args ) = @_; + + my $start_time = $self->{start_time}; + return 0 unless $start_time; + + my $now = $self->{now}->(%args); + PTDEBUG && _d("Current time:", $now); + + my $time_elapsed = $now - $start_time; + PTDEBUG && _d("Time elapsed:", $time_elapsed); + if ( $time_elapsed < 0 ) { + warn "Current time $now is earlier than start time $start_time"; + } + return $time_elapsed; +} + +sub reset { + my ( $self ) = @_; + $self->{start_time} = undef; + $self->{end_time} = undef; + $self->{time_left} = undef; + $self->{stop} = 0; + PTDEBUG && _d("Reset run time"); + return; +} + +sub stop { + my ( $self ) = @_; + $self->{stop} = 1; + return; +} + +sub start { + my ( $self ) = @_; + $self->{stop} = 0; + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Runtime package +# ########################################################################### + +# ########################################################################### +# This is a combination of modules and programs in one -- a runnable module. +# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last +# Or, look it up in the Camel book on pages 642 and 643 in the 3rd edition. +# +# Check at the end of this package for the call to main() which actually runs +# the program. +# ########################################################################### +package pt_table_checksum; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use Percona::Toolkit; +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(signal_h); +use List::Util qw(max); +use Time::HiRes qw(sleep time); +use Data::Dumper; +use Carp; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +use sigtrap 'handler', \&sig_int, 'normal-signals'; + +my $oktorun = 1; +my $print_header = 1; +my $exit_status = 0; +my $original_qrt_plugin_master_status = undef; + +# "exit codes 1 - 2, 126 - 165, and 255 [1] have special meanings, +# and should therefore be avoided for user-specified exit parameters" +# http://www.tldp.org/LDP/abs/html/exitcodes.html +our %PTC_EXIT_STATUS = ( + # General flags: + ERROR => 1, + ALREADY_RUNNING => 2, + CAUGHT_SIGNAL => 4, + NO_SLAVES_FOUND => 8, + # Tool-specific flags: + TABLE_DIFF => 16, + SKIP_CHUNK => 32, + SKIP_TABLE => 64, + REPLICATION_STOPPED => 128, +); + +# The following two hashes are used in exec_nibble(). +# They're static, so they do not need to be reset in main(). +# See also https://bugs.launchpad.net/percona-toolkit/+bug/919499 + +# Completely ignore these error codes. +my %ignore_code = ( + # Error: 1592 SQLSTATE: HY000 (ER_BINLOG_UNSAFE_STATEMENT) + # Message: Statement may not be safe to log in statement format. + # Ignore this warning because we have purposely set statement-based + # replication. + 1592 => 1, + 1300 => 1, +); + +# Warn once per-table for these error codes if the error message +# matches the pattern. +my %warn_code = ( + # Error: 1265 SQLSTATE: 01000 (WARN_DATA_TRUNCATED) + # Message: Data truncated for column '%s' at row %ld + 1265 => { + # any pattern + # use MySQL's message for this warning + }, + 1406 => { + # any pattern + # use MySQL's message for this warning + }, +); + +sub main { + # Reset global vars else tests will fail in strange ways. + local @ARGV = @_; + $oktorun = 1; + $print_header = 1; + $exit_status = 0; + + + # ######################################################################## + # Get configuration information. + # ######################################################################## + my $o = new OptionParser(); + $o->get_specs(); + $o->get_opts(); + + my $dp = $o->DSNParser(); + $dp->prop('set-vars', $o->set_vars()); + + # Add the --replicate table to --ignore-tables. + my %ignore_tables = ( + %{$o->get('ignore-tables')}, + $o->get('replicate') => 1, + ); + $o->set('ignore-tables', \%ignore_tables); + + $o->set('chunk-time', 0) if $o->got('chunk-size'); + + foreach my $opt ( qw(max-load critical-load) ) { + next unless $o->has($opt); + my $spec = $o->get($opt); + eval { + MySQLStatusWaiter::_parse_spec($o->get($opt)); + }; + if ( $EVAL_ERROR ) { + chomp $EVAL_ERROR; + $o->save_error("Invalid --$opt: $EVAL_ERROR"); + } + } + + # https://bugs.launchpad.net/percona-toolkit/+bug/1010232 + my $n_chunk_index_cols = $o->get('chunk-index-columns'); + if ( defined $n_chunk_index_cols + && (!$n_chunk_index_cols + || $n_chunk_index_cols =~ m/\D/ + || $n_chunk_index_cols < 1) ) { + $o->save_error('Invalid number of --chunk-index columns: ' + . $n_chunk_index_cols); + } + + if ( !$o->get('help') ) { + if ( @ARGV > 1 ) { + $o->save_error("More than one host specified; only one allowed"); + } + + if ( ($o->get('replicate') || '') !~ m/[\w`]\.[\w`]/ ) { + $o->save_error('The --replicate table must be database-qualified'); + } + + if ( my $limit = $o->get('chunk-size-limit') ) { + if ( $limit < 0 || ($limit > 0 && $limit < 1) ) { + $o->save_error('--chunk-size-limit must be >= 1 or 0 to disable'); + } + } + + if ( $o->get('progress') ) { + eval { Progress->validate_spec($o->get('progress')) }; + if ( $EVAL_ERROR ) { + chomp $EVAL_ERROR; + $o->save_error("--progress $EVAL_ERROR"); + } + } + } + + my $autodiscover_cluster; + my $recursion_method = []; + foreach my $method ( @{$o->get('recursion-method')} ) { + if ( $method eq 'cluster' ) { + $autodiscover_cluster = 1; + } + else { + push @$recursion_method, $method + } + } + $o->set('recursion-method', $recursion_method); + eval { + MasterSlave::check_recursion_method($o->get('recursion-method')); + }; + if ( $EVAL_ERROR ) { + $o->save_error($EVAL_ERROR) + } + + $o->usage_or_errors(); + + + if ( $o->get('truncate-replicate-table') && $o->get('resume') ) { + die "--resume and truncate-replicate-table are mutually exclusive"; + } + + if ( $o->get('truncate-replicate-table') && !$o->get('empty-replicate-table') ) { + die "--resume and --no-empty-replicate-table are mutually exclusive"; + } + + # ######################################################################## + # If --pid, check it first since we'll die if it already exists. + # ######################################################################## + # We're not daemoninzing, it just handles PID stuff. Keep $daemon + # in the the scope of main() because when it's destroyed it automatically + # removes the PID file. + my $pid_file = $o->get('pid'); + my $daemon = new Daemon( + pid_file => $pid_file, + ); + eval { + $daemon->run(); + }; + if ( my $e = $EVAL_ERROR ) { + # TODO quite hackish but it should work for now + if ( $e =~ m/PID file $pid_file exists/ ) { + $exit_status |= $PTC_EXIT_STATUS{ALREADY_RUNNING}; + warn "$e\n"; + return $exit_status; + } + else { + die $e; + } + } + + # ######################################################################## + # Connect to the master. + # ######################################################################## + + my $set_on_connect = sub { + my ($dbh) = @_; + return if $o->get('explain'); + my $sql; + + # https://bugs.launchpad.net/percona-toolkit/+bug/1019479 + # sql_mode ONLY_FULL_GROUP_BY often raises error even when query is + # safe and deterministic. It's best to turn it off for the session + # at this point. + $sql = 'SELECT @@SQL_MODE'; + PTDEBUG && _d($dbh, $sql); + my ($sql_mode) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + die "Error getting the current SQL_MODE: $EVAL_ERROR"; + } + $sql_mode =~ s/ONLY_FULL_GROUP_BY//i; + $sql = qq[SET SQL_MODE='$sql_mode']; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting SQL_MODE" + . ": $EVAL_ERROR"; + } + + + # https://bugs.launchpad.net/percona-toolkit/+bug/919352 + # The tool shouldn't blindly attempt to change binlog_format; + # instead, it should check if it's already set to STATEMENT. + # This is becase starting with MySQL 5.1.29, changing the format + # requires a SUPER user. + if ( VersionParser->new($dbh) >= '5.1.5' ) { + $sql = 'SELECT @@binlog_format'; + PTDEBUG && _d($dbh, $sql); + my ($original_binlog_format) = $dbh->selectrow_array($sql); + PTDEBUG && _d('Original binlog_format:', $original_binlog_format); + if ( $original_binlog_format !~ /STATEMENT/i ) { + $sql = q{/*!50108 SET @@binlog_format := 'STATEMENT'*/}; + eval { + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + }; + if ( $EVAL_ERROR ) { + die "Failed to $sql: $EVAL_ERROR\n" + . "This tool requires binlog_format=STATEMENT, " + . "but the current binlog_format is set to " + ."$original_binlog_format and an error occurred while " + . "attempting to change it. If running MySQL 5.1.29 or newer, " + . "setting binlog_format requires the SUPER privilege. " + . "You will need to manually set binlog_format to 'STATEMENT' " + . "before running this tool.\n"; + } + } + } + + # Set transaction isolation level. We set binlog_format to STATEMENT, + # but if the transaction isolation level is set to READ COMMITTED and the + # --replicate table is in InnoDB format, the tool fails with the following + # message: + # + # Binary logging not possible. Message: Transaction level 'READ-COMMITTED' + # in InnoDB is not safe for binlog mode 'STATEMENT' + # + # See also http://code.google.com/p/maatkit/issues/detail?id=720 + $sql = 'SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ'; + eval { + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + }; + if ( $EVAL_ERROR ) { + die "Failed to $sql: $EVAL_ERROR\n" + . "If the --replicate table is InnoDB and the default server " + . "transaction isolation level is not REPEATABLE-READ then " + . "checksumming may fail with errors such as \"Binary logging not " + . "possible. Message: Transaction level 'READ-COMMITTED' in " + . "InnoDB is not safe for binlog mode 'STATEMENT'\". In that " + . "case you will need to manually set the transaction isolation " + . "level to REPEATABLE-READ.\n"; + } + + + return; + }; + + # Do not call "new Cxn(" directly; use this sub so that set_on_connect + # is applied to every cxn. + # TODO: maybe this stuff only needs to be set on master cxn? + my $make_cxn = sub { + my (%args) = @_; + my $cxn = new Cxn( + %args, + DSNParser => $dp, + OptionParser => $o, + set => $args{set_vars} ? $set_on_connect : undef, + ); + eval { $cxn->connect() }; # connect or die trying + if ( $EVAL_ERROR ) { + die ts($EVAL_ERROR); + } + return $cxn; + }; + + # The dbh and dsn can be used before checksumming starts, but once + # inside the main TABLE loop, only use the master cxn because its + # dbh may be recreated. + my $master_cxn = $make_cxn->(set_vars => 1, dsn_string => shift @ARGV); + my $master_dbh = $master_cxn->dbh(); # just for brevity + my $master_dsn = $master_cxn->dsn(); # just for brevity + + if ($o->get('disable-qrt-plugin')) { + eval { + $master_dbh->selectrow_arrayref('SELECT @@query_response_time_session_stats' ); + }; + if ($EVAL_ERROR) { + $original_qrt_plugin_master_status = undef; + PTDEBUG && _d('QRT plugin is not installed: '.$EVAL_ERROR); + } else { + ($original_qrt_plugin_master_status) = $master_dbh->selectrow_arrayref('SELECT @@query_response_time_stats' ); + PTDEBUG && _d("Disabling qrt plugin on master server"); + $master_dbh->do('SET GLOBAL query_response_time_stats = off'); + } + } + + my @ignored_engines = keys %{$o->get('ignore-engines')}; + my @rocksdb_ignored = grep(/^ROCKSDB$/i, @ignored_engines); + if (!@rocksdb_ignored) { + print STDOUT "Checking if all tables can be checksummed ...\n"; + my $mysql_config = MySQLConfig->new(dbh => $master_dbh); + my $has_rocksdb = $mysql_config->has_engine('ROCKSDB'); + if ($has_rocksdb) { + my $sql = "SELECT DISTINCT `table_name`, `table_schema`, `engine` FROM `information_schema`.`tables` " . + " WHERE `table_schema` NOT IN ('mysql', 'information_schema', 'performance_schema') " . + " AND `engine` LIKE 'ROCKSDB'"; + my $rows = $master_dbh->selectall_arrayref($sql, {Slice=>{}}); + my $not_ignored_rocks_db_tables_count= scalar @$rows; + if (@$rows) { + my ($tables_list, $separator) = ('', ''); + for my $row (@$rows) { + $tables_list .= $separator.$row->{table_schema}.".".$row->{table_name}; + $separator = ", "; + if ($o->get('ignore-tables')->{"$row->{table_schema}.$row->{table_name}"}) { + $not_ignored_rocks_db_tables_count--; + } + } + if ($not_ignored_rocks_db_tables_count > 0) { + print STDERR "\nThe RocksDB storage engine is not supported with pt-table-checksum " . + "since RocksDB does not support binlog_format=STATEMENT.\n". + "We have identified the following tables using MyRocks storage engine:\n"; + for my $row (@$rows) { + print "$row->{table_schema}.$row->{table_name}\n"; + } + print STDERR "\nPlease add ROCKSDB to the list of --ignore-engines\n"; + print STDERR "--ignore-engines=FEDERATED,MRG_MyISAM,RocksDB\n"; + print STDERR "\nConversely exclude the MyRocks tables explicitly:\n"; + print STDERR "--ignore-tables=$tables_list\n\n"; + print STDERR "Aborting"; + exit($PTC_EXIT_STATUS{SKIP_TABLE}); + } + } + } + print STDOUT "Starting checksum ...\n"; + } + # ######################################################################## + # Set up the run time, if any. Anything that waits should check this + # between waits, else this will happen: + # https://bugs.launchpad.net/percona-toolkit/+bug/1043438 + # ######################################################################## + my $have_time; + if ( my $run_time = $o->get('run-time') ) { + my $rt = Runtime->new( + now => sub { return time; }, + run_time => $run_time, + ); + $have_time = sub { return $rt->have_time(); }; + } + else { + $have_time = sub { return 1; }; + } + + # ######################################################################## + # Set up PXC stuff. + # ######################################################################## + my $cluster = Percona::XtraDB::Cluster->new(); + my %cluster_name_for; + $cluster_name_for{$master_cxn} = $cluster->is_cluster_node($master_cxn); + + if ( $cluster_name_for{$master_cxn} ) { + # Because of https://bugs.launchpad.net/codership-mysql/+bug/1040108 + # ptc and pt-osc check Threads_running by default for --max-load. + # Strictly speaking, they can run on 5.5.27 as long as that bug doesn't + # manifest itself. If it does, however, then the tools will wait forever. + my $pxc_version = VersionParser->new($master_dbh); + if ( $pxc_version < '5.5.28' ) { + die "Percona XtraDB Cluster 5.5.28 or newer is required to run " + . "this tool on a cluster, but node " . $master_cxn->name + . " is running version " . $pxc_version->version + . ". Please upgrade the node, or run the tool on a newer node, " + . "or contact Percona for support.\n"; + } + } + + # ######################################################################## + # If this is not a dry run (--explain was not specified), then we're + # going to checksum the tables, so do the necessary preparations and + # checks. Else, this all can be skipped because all we need for a + # dry run is a connection to the master. + # ######################################################################## + my $q = new Quoter(); + my $tp = new TableParser(Quoter => $q); + my $rc = new RowChecksum(Quoter=> $q, OptionParser => $o); + my $ms = new MasterSlave( + OptionParser => $o, + DSNParser => $dp, + Quoter => $q, + channel => $o->get('channel') + ); + + my $slaves = []; # all slaves (that we can find) + my $slave_lag_cxns; # slaves whose lag we'll check + + # ######################################################################## + # Create --plugin. + # ######################################################################## + my $plugin; + if ( my $file = $o->get('plugin') ) { + die "--plugin file $file does not exist\n" unless -f $file; + eval { + require $file; + }; + die "Error loading --plugin $file: $EVAL_ERROR" if $EVAL_ERROR; + eval { + $plugin = pt_table_checksum_plugin->new( + master_cxn => $master_cxn, + explain => $o->get('explain'), + quiet => $o->get('quiet'), + resume => $o->get('resume'), + Quoter => $q, + TableParser => $tp, + ); + }; + die "Error creating --plugin: $EVAL_ERROR" if $EVAL_ERROR; + print "Created plugin from $file.\n"; + } + + my $replica_lag; # ReplicaLagWaiter object + my $replica_lag_pr; # Progress for ReplicaLagWaiter + my $sys_load; # MySQLStatusWaiter object + my $sys_load_pr; # Progress for MySQLStatusWaiter object + + my $repl_table = $q->quote($q->split_unquote($o->get('replicate'))); + my $fetch_sth; # fetch chunk from repl table + my $update_sth; # update master_cnt and master_cnt in repl table + my $delete_sth; # delete checksums for one db.tbl from repl table + + if ( $o->get('truncate-replicate-table') ) { + eval { + $master_dbh->do("TRUNCATE TABLE $repl_table"); + }; + if ($EVAL_ERROR) { + PTDEBUG && _d( "Cannot truncate replicate table $repl_table. $EVAL_ERROR"); + } + } + + if ( !$o->get('explain') ) { + # ##################################################################### + # Find and connect to slaves. + # ##################################################################### + my $make_cxn_cluster = sub { + my $cxn = $make_cxn->(@_, prev_dsn => $master_cxn->dsn()); + $cluster_name_for{$cxn} = $cluster->is_cluster_node($cxn); + return $cxn; + }; + + $slaves = $ms->get_slaves( + dbh => $master_dbh, + dsn => $master_dsn, + make_cxn => $make_cxn_cluster, + ); + + my %seen_ids; + for my $cxn ($master_cxn, @$slaves) { + my $dbh = $cxn->dbh(); + # get server/node unique id ( https://bugs.launchpad.net/percona-toolkit/+bug/1217466 ) + my $id = $cxn->get_id(); + $seen_ids{$id}++; + } + + if ( $autodiscover_cluster ) { + my @known_nodes = grep { $cluster_name_for{$_} } $master_cxn, @$slaves; + my $new_cxns = $cluster->autodetect_nodes( + nodes => \@known_nodes, + MasterSlave => $ms, + DSNParser => $dp, + make_cxn => $make_cxn_cluster, + seen_ids => \%seen_ids, + ); + push @$slaves, @$new_cxns; + } + + my $trimmed_nodes = Cxn->remove_duplicate_cxns( + cxns => [ $master_cxn, @$slaves ], + ); + ($master_cxn, @$slaves) = @$trimmed_nodes; + + # If no slaves or nodes were found, and a recursion method was given + # (implicitly or explicitly), and that method is not none, then warn + # and continue but exit non-zero because there won't be any diffs but + # this could be a false-positive from having no slaves/nodes to check. + # https://bugs.launchpad.net/percona-toolkit/+bug/1210537 + PTDEBUG && _d(scalar @$slaves, 'slaves found'); + if ( !@$slaves + && (($o->get('recursion-method')->[0] || '') ne 'none' + || $autodiscover_cluster)) + { + $exit_status |= $PTC_EXIT_STATUS{NO_SLAVES_FOUND}; + if ( $o->get('quiet') < 2 ) { + my $type = $autodiscover_cluster ? 'cluster nodes' : 'slaves'; + warn "Diffs cannot be detected because no $type were found. " + . "Please read the --recursion-method documentation for " + . "information.\n"; + } + } + + + # https://bugs.launchpad.net/percona-toolkit/+bug/938068 + if ( $o->get('check-binlog-format') ) { + my $master_binlog = 'STATEMENT'; + if ( VersionParser->new($master_dbh) >= '5.1.5' ) { + ($master_binlog) = $master_dbh->selectrow_array( + 'SELECT @@binlog_format'); + } + + my $err = ''; + for my $slave_cxn ( @$slaves ) { + # https://bugs.launchpad.net/percona-toolkit/+bug/1080385 + next if $cluster_name_for{$slave_cxn}; + + my $slave_binlog = 'STATEMENT'; + if ( VersionParser->new($slave_cxn->dbh) >= '5.1.5' ) { + ($slave_binlog) = $slave_cxn->dbh->selectrow_array( + 'SELECT @@binlog_format'); + } + + if ( $master_binlog ne $slave_binlog ) { + $err .= "Replica " . $slave_cxn->name() + . qq{ has binlog_format $slave_binlog which could cause } + . qq{pt-table-checksum to break replication. Please read } + . qq{"Replicas using row-based replication" in the } + . qq{LIMITATIONS section of the tool's documentation. } + . qq{If you understand the risks, specify } + . qq{--no-check-binlog-format to disable this check.\n}; + } + } + die $err if $err; + } + + if ( $cluster_name_for{$master_cxn} ) { + if ( !@$slaves ) { + if ( ($o->get('recursion-method')->[0] || '') ne 'none' ) { + die $master_cxn->name() . " is a cluster node but no other nodes " + . "or regular replicas were found. Use --recursion-method=dsn " + . "to specify the other nodes in the cluster.\n"; + } + } + + # Make sure the master and all node are in the same cluster. + my @other_cluster; + foreach my $slave ( @$slaves ) { + next unless $cluster_name_for{$slave}; + if ( $cluster_name_for{$master_cxn} ne $cluster_name_for{$slave}) { + push @other_cluster, $slave; + } + } + if ( @other_cluster ) { + die $master_cxn->name . " is in cluster " + . $cluster_name_for{$master_cxn} . " but these nodes are " + . "in other clusters:\n" + . join("\n", + map {' ' . $_->name . " is in cluster $cluster_name_for{$_}"} + @other_cluster) . "\n" + . "All nodes must be in the same cluster. " + . "For more information, please read the Percona XtraDB " + . "Cluster section of the tool's documentation.\n"; + } + } + elsif ( @$slaves ) { + # master is not a cluster node, but what about the slaves? + my $direct_slave; # master -> direct_slave + my @slaves; # traditional slaves + my @nodes; # cluster nodes + foreach my $slave ( @$slaves ) { + if ( !$cluster_name_for{$slave} ) { + push @slaves, $slave; + next; + } + + my $is_master_of = eval { + $ms->is_master_of($master_cxn->dbh, $slave->dbh); + }; + if ( $EVAL_ERROR && $EVAL_ERROR =~ m/is not a slave/ ) { + push @nodes, $slave; + } + elsif ( $is_master_of ) { + $direct_slave = $slave; + } + else { + # Another error could have happened but we don't really + # care. We know for sure the slave is a node, so just + # presume that and carry on. + push @nodes, $slave; + } + } + + my $err = ''; + if ( @nodes ) { + if ( $direct_slave ) { + warn "Diffs will only be detected if the cluster is " + . "consistent with " . $direct_slave->name . " because " + . $master_cxn->name . " is a traditional replication master " + . "but these replicas are cluster nodes:\n" + . join("\n", map { ' ' . $_->name } @nodes) . "\n" + . "For more information, please read the Percona XtraDB " + . "Cluster section of the tool's documentation.\n"; + } + else { + warn "Diffs may not be detected on these cluster nodes " + . "because the direct replica of " . $master_cxn->name + . " was not found or specified:\n" + . join("\n", map { ' ' . $_->name } @nodes) . "\n" + . "For more information, please read the Percona XtraDB " + . "Cluster section of the tool's documentation.\n"; + } + + if ( @slaves ) { + warn "Diffs will only be detected on these replicas if " + . "they replicate from " . $master_cxn->name . ":\n" + . join("\n", map { ' ' . $_->name } @slaves) . "\n" + . "For more information, please read the Percona XtraDB " + . "Cluster section of the tool's documentation.\n"; + } + } + } + + for my $slave (@$slaves) { + my $qrt_plugin_status; + eval { + ($qrt_plugin_status) = $slave->{dbh}->selectrow_arrayref('SELECT @@QUERY_RESPONSE_TIME_SESSION_STATS' ); + }; + if ($EVAL_ERROR) { + PTDEBUG && _d('QRT plugin is not installed on slave '.$slave->{dsn_name}); + $slave->{qrt_plugin_status} = undef; + next; + } + $slave->{qrt_plugin_status} = $qrt_plugin_status->[0]; + if ($slave->{qrt_plugin_status}) { + PTDEBUG && _d("Disabling qrt plugin state on slave ".$slave->{dsn_name}); + $slave->{dbh}->do('SET GLOBAL query_response_time_stats = off'); + } + } + + if ( $o->get('check-slave-lag') ) { + PTDEBUG && _d('Will use --check-slave-lag to check for slave lag'); + my $cxn = $make_cxn->( + dsn_string => $o->get('check-slave-lag'), + prev_dsn => $master_cxn->dsn(), + ); + $slave_lag_cxns = [ $cxn ]; + } + else { + PTDEBUG && _d('Will check slave lag on all slaves'); + $slave_lag_cxns = [ map { $_ } @$slaves ]; + } + + # Cluster nodes aren't slaves, so SHOW SLAVE STATUS doesn't work. + # Nodes shouldn't be out of sync anyway because the cluster is + # (virtually) synchronous, so waiting for the last checksum chunk + # to appear should be sufficient. + @$slave_lag_cxns = grep { + my $slave_cxn = $_; + if ( $cluster_name_for{$slave_cxn} ) { + warn "Not checking replica lag on " . $slave_cxn->name() + . " because it is a cluster node.\n"; + 0; + } + else { + PTDEBUG && _d('May check slave lag on', $slave_cxn->name()); + $slave_cxn; + } + } @$slave_lag_cxns; + + if ( $slave_lag_cxns && scalar @$slave_lag_cxns ) { + if ($o->get('skip-check-slave-lag')) { + my $slaves_to_skip = $o->get('skip-check-slave-lag'); + my $filtered_slaves = []; + for my $slave (@$slave_lag_cxns) { + my $found=0; + for my $slave_to_skip (@$slaves_to_skip) { + my $h_eq_h = $slave->{dsn}->{h} eq $slave_to_skip->{h}; + my $p_eq_p; + if (defined($slave->{dsn}->{P}) || defined($slave_to_skip->{P})) { + $p_eq_p = $slave->{dsn}->{P} eq $slave_to_skip->{P}; + } else { + PTDEBUG && _d("Both port DSNs are undefined, setting p_eq_p to true"); + $p_eq_p = 1; + } + if ($h_eq_h && $p_eq_p) { + $found=1; + } + } + if ($found) { + printf("Skipping slave %s\n", $slave->name()); + } else { + push @$filtered_slaves, $slave; + } + } + $slave_lag_cxns = $filtered_slaves; + } + } + + # ##################################################################### + # Possibly check replication slaves and exit. + # ##################################################################### + if ( $o->get('replicate-check') && $o->get('replicate-check-only') ) { + PTDEBUG && _d('Will --replicate-check and exit'); + + # --plugin hook + if ( $plugin && $plugin->can('before_replicate_check') ) { + $plugin->before_replicate_check(); + } + + foreach my $slave ( @$slaves ) { + my $diffs = $rc->find_replication_differences( + dbh => $slave->dbh(), + repl_table => $repl_table, + ); + PTDEBUG && _d(scalar @$diffs, 'checksum diffs on', + $slave->name()); + $diffs = filter_tables_replicate_check_only($diffs, $o); + if ( @$diffs ) { + $exit_status |= $PTC_EXIT_STATUS{TABLE_DIFF}; + if ( $o->get('quiet') < 2 ) { + print_checksum_diffs( + cxn => $slave, + diffs => $diffs, + ); + } + } + } + + # --plugin hook + if ( $plugin && $plugin->can('after_replicate_check') ) { + $plugin->after_replicate_check(); + } + + PTDEBUG && _d('Exit status', $exit_status, 'oktorun', $oktorun); + return $exit_status; + } + + # ##################################################################### + # Check for replication filters. + # ##################################################################### + if ( $o->get('check-replication-filters') ) { + PTDEBUG && _d("Checking slave replication filters"); + my @all_repl_filters; + foreach my $slave ( @$slaves ) { + my $repl_filters = $ms->get_replication_filters( + dbh => $slave->dbh(), + ); + if ( keys %$repl_filters ) { + push @all_repl_filters, + { name => $slave->name(), + filters => $repl_filters, + }; + } + } + if ( @all_repl_filters ) { + my $msg = "Replication filters are set on these hosts:\n"; + foreach my $host ( @all_repl_filters ) { + my $filters = $host->{filters}; + $msg .= " $host->{name}\n" + . join("\n", map { " $_ = $host->{filters}->{$_}" } + keys %{$host->{filters}}) + . "\n"; + } + $msg .= "Please read the --check-replication-filters documentation " + . "to learn how to solve this problem."; + die ts($msg); + } + } + + # ##################################################################### + # Check that the replication table exists, or possibly create it. + # ##################################################################### + eval { + check_repl_table( + dbh => $master_dbh, + repl_table => $repl_table, + slaves => $slaves, + have_time => $have_time, + OptionParser => $o, + TableParser => $tp, + Quoter => $q, + ); + }; + if ( $EVAL_ERROR ) { + die ts($EVAL_ERROR); + } + + # ##################################################################### + # Make a ReplicaLagWaiter to help wait for slaves after each chunk. + # ##################################################################### + my $sleep = sub { + # Don't let the master dbh die while waiting for slaves because we + # may wait a very long time for slaves. + + # This is called from within the main TABLE loop, so use the + # master cxn; do not use $master_dbh. + my $dbh = $master_cxn->dbh(); + if ( !$dbh || !$dbh->ping() ) { + PTDEBUG && _d('Lost connection to master while waiting for slave lag'); + eval { $dbh = $master_cxn->connect() }; # connect or die trying + if ( $EVAL_ERROR ) { + $oktorun = 0; # Fatal error + chomp $EVAL_ERROR; + die "Lost connection to master while waiting for replica lag " + . "($EVAL_ERROR)"; + } + } + $dbh->do("SELECT 'pt-table-checksum keepalive'"); + sleep $o->get('check-interval'); + return; + }; + + my $get_lag; + # The plugin is able to override the slavelag check so tools like + # pt-heartbeat or other replicators (Tungsten...) can be used to + # measure replication lag + if ( $plugin && $plugin->can('get_slave_lag') ) { + $get_lag = $plugin->get_slave_lag(oktorun => \$oktorun); + } else { + $get_lag = sub { + my ($cxn) = @_; + my $dbh = $cxn->dbh(); + if ( !$dbh || !$dbh->ping() ) { + PTDEBUG && _d('Lost connection to slave', $cxn->name(), + 'while waiting for slave lag'); + eval { $dbh = $cxn->connect() }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Failed to connect to slave', $cxn->name(), + ':', $EVAL_ERROR); + return; # keep waiting and trying to reconnect + } + } + my $slave_lag; + eval { + $slave_lag = $ms->get_slave_lag($dbh); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error getting slave lag', $cxn->name(), + ':', $EVAL_ERROR); + return; # keep waiting and trying to reconnect + } + return $slave_lag; + }; + } + + $replica_lag = new ReplicaLagWaiter( + slaves => $slave_lag_cxns, + max_lag => $o->get('max-lag'), + oktorun => sub { return $oktorun && $have_time->(); }, + get_lag => $get_lag, + sleep => $sleep, + fail_on_stopped_replication => $o->get('fail-on-stopped-replication'), + ); + + my $get_status; + { + my $sql = "SHOW GLOBAL STATUS LIKE ?"; + my $sth = $master_cxn->dbh()->prepare($sql); + + $get_status = sub { + my ($var) = @_; + PTDEBUG && _d($sth->{Statement}, $var); + $sth->execute($var); + my (undef, $val) = $sth->fetchrow_array(); + return $val; + }; + } + + eval { + $sys_load = new MySQLStatusWaiter( + max_spec => $o->get('max-load'), + get_status => $get_status, + oktorun => sub { return $oktorun && $have_time->(); }, + sleep => $sleep, + ); + }; + if ( $EVAL_ERROR ) { + chomp $EVAL_ERROR; + die "Error checking --max-load: $EVAL_ERROR. " + . "Check that the variables specified for --max-load " + . "are spelled correctly and exist in " + . "SHOW GLOBAL STATUS. Current value for this option is:\n" + . " --max-load " . (join(',', @{$o->get('max-load')})) . "\n"; + } + + if ( $o->get('progress') ) { + $replica_lag_pr = new Progress( + jobsize => scalar @$slaves, + spec => $o->get('progress'), + name => "Waiting for replicas to catch up", # not used + ); + + $sys_load_pr = new Progress( + jobsize => scalar @{$o->get('max-load')}, + spec => $o->get('progress'), + name => "Waiting for --max-load", # not used + ); + } + + # ##################################################################### + # Prepare statement handles to update the repl table on the master. + # ##################################################################### + $fetch_sth = $master_dbh->prepare( + "SELECT this_crc, this_cnt FROM $repl_table " + . "WHERE master_ip = ? AND master_port = ? AND db = ? AND tbl = ? AND chunk = ?"); + $update_sth = $master_dbh->prepare( + "UPDATE $repl_table SET chunk_time = ?, master_crc = ?, master_cnt = ? " + . "WHERE master_ip = ? AND master_port = ? AND db = ? AND tbl = ? AND chunk = ?"); + $delete_sth = $master_dbh->prepare( + "DELETE FROM $repl_table WHERE master_ip = ? AND master_port = ? AND db = ? AND tbl = ?"); + } # !$o->get('explain') + + # ######################################################################## + # Do the version-check + # ######################################################################## + if ( $o->get('version-check') && (!$o->has('quiet') || !$o->get('quiet')) ) { + VersionCheck::version_check( + force => $o->got('version-check'), + instances => [ + { dbh => $master_dbh, dsn => $master_dsn }, + map({ +{ dbh => $_->dbh(), dsn => $_->dsn() } } @$slaves) + ], + ); + } + + # ######################################################################## + # Checksum args and the DMS part of the checksum query for each table. + # ######################################################################## + my %crc_args = $rc->get_crc_args(dbh => $master_dbh); + my $checksum_dml = "REPLACE INTO $repl_table " + . "(master_ip, master_port, db, tbl, chunk, chunk_index," + . " lower_boundary, upper_boundary, this_cnt, this_crc) " + . "SELECT" + . ($cluster->is_cluster_node($master_cxn) ? ' /*!99997*/' : '') + . " ?, ?, ?, ?, ?, ?, ?, ?,"; + my $past_cols = " COUNT(*), '0'"; + + # ######################################################################## + # Get last chunk for --resume. + # ######################################################################## + my $last_chunk; + if ( $o->get('resume') ) { + $last_chunk = last_chunk( + dbh => $master_dbh, + repl_table => $repl_table, + master_ip => $o->get('host'), + master_port => $o->get('port'), + ); + } + + my $schema_iter = new SchemaIterator( + dbh => $master_dbh, + resume => $last_chunk ? $q->quote(@{$last_chunk}{qw(db tbl)}) + : "", + OptionParser => $o, + TableParser => $tp, + Quoter => $q, + ); + + if ( $last_chunk && + !$schema_iter->table_is_allowed(@{$last_chunk}{qw(db tbl)}) ) { + PTDEBUG && _d('Ignoring last table', @{$last_chunk}{qw(db tbl)}, + 'and resuming from next table'); + $last_chunk = undef; + } + + # ######################################################################## + # Various variables and modules for checksumming the tables. + # ######################################################################## + my $total_rows = 0; + my $total_time = 0; + my $total_rate = 0; + my $tn = new TableNibbler(TableParser => $tp, Quoter => $q); + my $retry = new Retry(); + + # --chunk-size-limit has two purposes. The 1st, as documented, is + # to prevent oversized chunks when the chunk index is not unique. + # The 2nd is to determine if the table can be processed in one chunk + # (WHERE 1=1 instead of nibbling). This creates a problem when + # the user does --chunk-size-limit=0 to disable the 1st, documented + # purpose because, apparently, they're using non-unique indexes and + # they don't care about potentially large chunks. But disabling the + # 1st purpose adversely affects the 2nd purpose becuase 0 * the chunk size + # will always be zero, so tables will only be single-chunked if EXPLAIN + # says there are 0 rows, but sometimes EXPLAIN says there is 1 row + # even when the table is empty. This wouldn't matter except that nibbling + # an empty table doesn't currently work becuase there are no boundaries, + # so no checksum is written for the empty table. To fix this and + # preserve the two purposes of this option, usages of the 2nd purpose + # do || 1 so the limit is never 0 and empty tables are single-chunked. + # See: + # https://bugs.launchpad.net/percona-toolkit/+bug/987393 + # https://bugs.launchpad.net/percona-toolkit/+bug/938660 + # https://bugs.launchpad.net/percona-toolkit/+bug/987495 + # This is used for the 2nd purpose: + my $chunk_size_limit = $o->get('chunk-size-limit') || 1; + + # ######################################################################## + # Callbacks for each table's nibble iterator. All checksum work is done + # in these callbacks and the subs that they call. + # ######################################################################## + my $callbacks = { + init => sub { + my (%args) = @_; + my $tbl = $args{tbl}; + my $nibble_iter = $args{NibbleIterator}; + my $statements = $nibble_iter->statements(); + my $oktonibble = 1; + + if ( $last_chunk ) { # resuming + if ( have_more_chunks(%args, last_chunk => $last_chunk) ) { + $nibble_iter->set_nibble_number($last_chunk->{chunk}); + PTDEBUG && _d('Have more chunks; resuming from', + $last_chunk->{chunk}, 'at', $last_chunk->{ts}); + if ( !$o->get('quiet') ) { + print "Resuming from $tbl->{db}.$tbl->{tbl} chunk " + . "$last_chunk->{chunk}, timestamp $last_chunk->{ts}\n"; + } + } + else { + # Problem resuming or no next lower boundary. + PTDEBUG && _d('No more chunks; resuming from next table'); + $oktonibble = 0; # don't nibble table; next table + } + + # Just need to call us once to kick-start the resume process. + $last_chunk = undef; + } + + if ( $o->get('check-slave-tables') ) { + eval { + check_slave_tables( + slaves => $slaves, + db => $tbl->{db}, + tbl => $tbl->{tbl}, + checksum_cols => $tbl->{checksum_cols}, + have_time => $have_time, + TableParser => $tp, + OptionParser => $o, + ); + }; + if ( $EVAL_ERROR ) { + my $msg + = "Skipping table $tbl->{db}.$tbl->{tbl} because it has " + . "problems on these replicas:\n" + . $EVAL_ERROR + . "This can break replication. If you understand the risks, " + . "specify --no-check-slave-tables to disable this check.\n"; + warn ts($msg); + $exit_status |= $PTC_EXIT_STATUS{SKIP_TABLE}; + $oktonibble = 0; + } + } + + if ( $o->get('explain') ) { + # --explain level 1: print the checksum and next boundary + # statements. + print "--\n", + "-- $tbl->{db}.$tbl->{tbl}\n", + "--\n\n"; + + foreach my $sth ( sort keys %$statements ) { + next if $sth =~ m/^explain/; + if ( $statements->{$sth} ) { + print $statements->{$sth}->{Statement}, "\n\n"; + } + } + + if ( $o->get('explain') < 2 ) { + $oktonibble = 0; # don't nibble table; next table + } + } + else { + if ( $nibble_iter->one_nibble() ) { + my @too_large; + SLAVE: + foreach my $slave ( @$slaves ) { + PTDEBUG && _d('Getting table row estimate on', $slave->name()); + my $have_warned = 0; + while ( $oktorun && $have_time->() ) { + my $n_rows; + eval { + # TODO: This duplicates NibbleIterator::can_nibble(); + # probably best to have 1 code path to determine if + # a given table is oversized on a given host. + ($n_rows) = NibbleIterator::get_row_estimate( + Cxn => $slave, + tbl => $tbl, + where => $o->get('where'), + ); + }; + if ( my $e = $EVAL_ERROR ) { + if ( $slave->lost_connection($e) ) { + PTDEBUG && _d($e); + eval { $slave->connect() }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Failed to connect to slave', $slave->name(), + ':', $EVAL_ERROR); + if ( !$have_warned && $o->get('quiet') < 2 ) { + my $msg = "Trying to connect to replica " + . $slave->name() . " to get row count of" + . " table $tbl->{db}.$tbl->{tbl}...\n"; + warn ts($msg); + $have_warned = 1; + } + sleep 2; + } + next; # try again + } + die "Error getting row count estimate of table" + . " $tbl->{db}.$tbl->{tbl} on replica " + . $slave->name() . ": $e"; + } + PTDEBUG && _d('Table on', $slave->name(), 'has', $n_rows, 'rows'); + my $slave_skip_tolerance = $o->get('slave-skip-tolerance') || 1; + if ( $n_rows + && $n_rows > ($tbl->{chunk_size} * $chunk_size_limit) * $slave_skip_tolerance ) + { + PTDEBUG && _d('Table too large on', $slave->name()); + push @too_large, [$slave->name(), $n_rows || 0]; + } + next SLAVE; + } + } + if ( @too_large ) { + if ( $o->get('quiet') < 2 ) { + my $msg + = "Skipping table $tbl->{db}.$tbl->{tbl} because" + . " on the master it would be checksummed in one chunk" + . " but on these replicas it has too many rows:\n"; + foreach my $info ( @too_large ) { + $msg .= " $info->[1] rows on $info->[0]\n"; + } + $msg .= "The current chunk size limit is " + . ($tbl->{chunk_size} * $chunk_size_limit) + . " rows (chunk size=$tbl->{chunk_size}" + . " * chunk size limit=$chunk_size_limit).\n"; + warn ts($msg); + } + $exit_status |= $PTC_EXIT_STATUS{SKIP_TABLE}; + $oktonibble = 0; + } + } + else { # chunking the table + if ( $o->get('check-plan') ) { + my $idx_len = new IndexLength(Quoter => $q); + my ($key_len, $key) = $idx_len->index_length( + Cxn => $args{Cxn}, + tbl => $tbl, + index => $nibble_iter->nibble_index(), + n_index_cols => $o->get('chunk-index-columns'), + ); + if ( !$key || lc($key) ne lc($nibble_iter->nibble_index()) ) { + die "Cannot determine the key_len of the chunk index " + . "because MySQL chose " + . ($key ? "the $key" : "no") . " index " + . "instead of the " . $nibble_iter->nibble_index() + . " index for the first lower boundary statement. " + . "See --[no]check-plan in the documentation for more " + . "information."; + } + elsif ( !$key_len ) { + die "The key_len of the $key index is " + . (defined $key_len ? "zero" : "NULL") + . ", but this should not be possible. " + . "See --[no]check-plan in the documentation for more " + . "information."; + } + $tbl->{key_len} = $key_len; + } + } + + if ( $oktonibble && $o->get('empty-replicate-table') ) { + use_repl_db( + dbh => $master_cxn->dbh(), + repl_table => $repl_table, + OptionParser => $o, + Quoter => $q, + ); + PTDEBUG && _d($delete_sth->{Statement}); + $delete_sth->execute($o->get('host'), $o->get('port'), $tbl->{db}, $tbl->{tbl}); + } + + # USE the correct db while checksumming this table. The "correct" + # db is a complicated subject; see sub for comments. + use_repl_db( + dbh => $master_cxn->dbh(), + tbl => $tbl, # XXX working on this table + repl_table => $repl_table, + OptionParser => $o, + Quoter => $q, + ); + # ######################################################### + # XXX DO NOT CHANGE THE DB UNTIL THIS TABLE IS FINISHED XXX + # ######################################################### + } + + return $oktonibble; # continue nibbling table? + }, + next_boundaries => sub { + my (%args) = @_; + my $tbl = $args{tbl}; + my $nibble_iter = $args{NibbleIterator}; + my $sth = $nibble_iter->statements(); + my $boundary = $nibble_iter->boundaries(); + + return 1 if $nibble_iter->one_nibble(); + + # Check that MySQL will use the nibble index for the next upper + # boundary sql. This check applies to the next nibble. So if + # the current nibble number is 5, then nibble 5 is already done + # and we're checking nibble number 6. + + # XXX This call and others like it are relying on a Perl oddity. + # See https://bugs.launchpad.net/percona-toolkit/+bug/987393 + my $expl = explain_statement( + tbl => $tbl, + sth => $sth->{explain_upper_boundary}, + vals => [ @{$boundary->{lower}}, $nibble_iter->limit() ], + ); + if ( lc($expl->{key} || '') + ne lc($nibble_iter->nibble_index() || '') ) { + PTDEBUG && _d('Cannot nibble next chunk, aborting table'); + if ( $o->get('quiet') < 2 ) { + warn ts("Aborting table $tbl->{db}.$tbl->{tbl} at chunk " + . ($nibble_iter->nibble_number() + 1) + . " because it is not safe to chunk. Chunking should " + . "use the " + . ($nibble_iter->nibble_index() || '?') + . " index, but MySQL chose " + . ($expl->{key} ? "the $expl->{key}" : "no") + . " index.\n"); + } + $tbl->{checksum_results}->{errors}++; + return 0; # stop nibbling table + } + + # Once nibbling begins for a table, control does not return to this + # tool until nibbling is done because, as noted above, all work is + # done in these callbacks. This callback is the only place where we + # can prematurely stop nibbling by returning false. This allows + # Ctrl-C to stop the tool between nibbles instead of between tables. + return $oktorun && $have_time->(); # continue nibbling table? + }, + exec_nibble => sub { + my (%args) = @_; + my $tbl = $args{tbl}; + my $nibble_iter = $args{NibbleIterator}; + my $sth = $nibble_iter->statements(); + my $boundary = $nibble_iter->boundaries(); + + # Count every chunk, even if it's ultimately skipped, etc. + $tbl->{checksum_results}->{n_chunks}++; + + # Reset the nibble_time because this nibble hasn't been + # executed yet. If nibble_time is undef, then it's marked + # as skipped in after_nibble. + $tbl->{nibble_time} = undef; + + # --explain level 2: print chunk,lower boundary values,upper + # boundary values. + if ( $o->get('explain') > 1 ) { + my $chunk = $nibble_iter->nibble_number(); + if ( $nibble_iter->one_nibble() ) { + printf "%d 1=1\n", $chunk; + } + else { + # XXX This call and others like it are relying on a Perl oddity. + # See https://bugs.launchpad.net/percona-toolkit/+bug/987393 + my $lb_quoted = join( + ',', map { defined $_ ? $_ : 'NULL'} @{$boundary->{lower}}); + my $ub_quoted = join( + ',', map { defined $_ ? $_ : 'NULL'} @{$boundary->{upper}}); + printf "%d %s %s\n", $chunk, $lb_quoted, $ub_quoted; + } + if ( !$nibble_iter->more_boundaries() ) { + print "\n"; # blank line between this table and the next table + } + return 0; # next boundary + } + + # Skip this nibble unless it's safe. + return 0 unless nibble_is_safe( + %args, + OptionParser => $o, + ); + + # Exec and time the nibble. + $tbl->{nibble_time} = exec_nibble( + %args, + Retry => $retry, + Quoter => $q, + OptionParser => $o, + ); + PTDEBUG && _d('Nibble time:', $tbl->{nibble_time}); + + # We're executing REPLACE queries which don't return rows. + # Returning 0 from this callback causes the nibble iter to + # get the next boundaries/nibble. + return 0; + }, + after_nibble => sub { + my (%args) = @_; + my $tbl = $args{tbl}; + my $nibble_iter = $args{NibbleIterator}; + + # Don't need to do anything here if we're just --explain'ing. + return if $o->get('explain'); + + # Chunk/nibble number that we just inserted or skipped. + my $chunk = $nibble_iter->nibble_number(); + + # Nibble time will be zero if the chunk was skipped. + if ( !defined $tbl->{nibble_time} ) { + PTDEBUG && _d('Skipping chunk', $chunk); + $exit_status |= $PTC_EXIT_STATUS{SKIP_CHUNK}; + $tbl->{checksum_results}->{skipped}++; + return; + } + + # Max chunk number that worked. This may be less than the total + # number of chunks if, for example, chunk 16 of 16 times out, but + # chunk 15 worked. The max chunk is used for checking for diffs + # on the slaves, in the done callback. + $tbl->{max_chunk} = $chunk; + + # Fetch the checksum that we just executed from the replicate table. + $fetch_sth->execute($o->get('host'), $o->get('port'), @{$tbl}{qw(db tbl)}, $chunk); + my ($crc, $cnt) = $fetch_sth->fetchrow_array(); + + $tbl->{checksum_results}->{n_rows} += $cnt || 0; + + # We're working on the master, so update the checksum's master_cnt + # and master_crc. + $update_sth->execute( + # UPDATE repl_table SET + sprintf('%.6f', $tbl->{nibble_time}), # chunk_time + $crc, # master_crc + $cnt, # master_cnt + # WHERE + $o->get('host'), + $o->get('port'), + $tbl->{db}, + $tbl->{tbl}, + $chunk, + ); + + # Should be done automatically, but I like to be explicit. + $fetch_sth->finish(); + $update_sth->finish(); + $delete_sth->finish(); + + # Update rate, chunk size, and progress if the nibble actually + # selected some rows. + if ( ($cnt || 0) > 0 ) { + # Update the rate of rows per second for the entire server. + # This is used for the initial chunk size of the next table. + $total_rows += $cnt; + $total_time += ($tbl->{nibble_time} || 0); + $total_rate = $total_time ? int($total_rows / $total_time) : 0; + PTDEBUG && _d('Total avg rate:', $total_rate); + + # Adjust chunk size. This affects the next chunk. + if ( $o->get('chunk-time') ) { + $tbl->{chunk_size} = $tbl->{nibble_time} + ? $tbl->{rate}->update($cnt, $tbl->{nibble_time}) + : $o->get('chunk-time'); + + if ( $tbl->{chunk_size} < 1 ) { + # This shouldn't happen. WeightedAvgRate::update() may return + # a value < 1, but minimum chunk size is 1. + $tbl->{chunk_size} = 1; + + # This warning is printed once per table. + if ( !$tbl->{warned}->{slow}++ && $o->get('quiet') < 2 ) { + warn ts("Checksum queries for table " + . "$tbl->{db}.$tbl->{tbl} are executing very slowly. " + . "--chunk-size has been automatically reduced to 1. " + . "Check that the server is not being overloaded, " + . "or increase --chunk-time. The last chunk, number " + . "$chunk of table $tbl->{db}.$tbl->{tbl}, " + . "selected $cnt rows and took " + . sprintf('%.3f', $tbl->{nibble_time} || 0) + . " seconds to execute.\n"); + } + } + + # Update chunk-size based on rows/s checksum rate. + $nibble_iter->set_chunk_size($tbl->{chunk_size}); + PTDEBUG && _d('Updated chunk size: '.$tbl->{chunk_size}); + } + + # Every table should have a Progress obj; update it. + if ( my $tbl_pr = $tbl->{progress} ) { + $tbl_pr->update(sub {return $tbl->{checksum_results}->{n_rows}}); + } + } + + # Wait forever for slaves to catch up. + $replica_lag_pr->start() if $replica_lag_pr; + $replica_lag->wait(Progress => $replica_lag_pr); + + # Wait forever for system load to abate. + $sys_load_pr->start() if $sys_load_pr; + $sys_load->wait(Progress => $sys_load_pr); + + return; + }, + done => sub { # done nibbling table + my (%args) = @_; + my $tbl = $args{tbl}; + my $nibble_iter = $args{NibbleIterator}; + my $max_chunk = $tbl->{max_chunk}; + + # Don't need to do anything here if we're just --explain'ing. + return if $o->get('explain'); + + # Wait for all slaves to run all checksum chunks, + # then check for differences. + if ( $max_chunk && $o->get('replicate-check') && scalar @$slaves ) { + PTDEBUG && _d('Checking slave diffs'); + + my $check_pr; + if ( $o->get('progress') ) { + $check_pr = new Progress( + jobsize => $max_chunk, + spec => $o->get('progress'), + name => "Waiting to check replicas for differences", + ); + } + + # Wait for the last checksum of this table to replicate + # to each slave. + # MySQL 8+ replication is slower than 5.7 and the old wait_for_last_checksum alone + # was failing. The new wait_for_slaves checks that Read_Master_Log_Pos on slaves is + # greather or equal Position in the master + if (!$args{Cxn}->is_cluster_node()) { + wait_for_slaves(master_dbh => $args{Cxn}->dbh(), master_slave => $ms, slaves => $slaves); + } + wait_for_last_checksum( + tbl => $tbl, + repl_table => $repl_table, + slaves => $slaves, + max_chunk => $max_chunk, + check_pr => $check_pr, + have_time => $have_time, + OptionParser => $o, + ); + + # Check each slave for checksum diffs. + my %diff_chunks; + foreach my $slave ( @$slaves ) { + eval { + my $diffs = $rc->find_replication_differences( + dbh => $slave->dbh(), + repl_table => $repl_table, + where => "db='$tbl->{db}' AND tbl='$tbl->{tbl}'", + ); + PTDEBUG && _d(scalar @$diffs, 'checksum diffs on', + $slave->name()); + # Save unique chunks that differ. + # https://bugs.launchpad.net/percona-toolkit/+bug/1030031 + if ( scalar @$diffs ) { + # "chunk" is the chunk number. See the SELECT + # statement in RowChecksum::find_replication_differences() + # for the full list of columns. + map { $diff_chunks{ $_->{chunk} }++ } @$diffs; + $exit_status |= $PTC_EXIT_STATUS{TABLE_DIFF}; + } + + my $max_cnt_diff=0; + for my $diff (@$diffs) { + if (abs($diff->{cnt_diff}) > $max_cnt_diff) { + $tbl->{checksum_results}->{max_rows_cnt_diff} = abs($diff->{cnt_diff}); + } + } + }; + if ($EVAL_ERROR) { + if ( $o->get('quiet') < 2 ) { + warn ts("Error checking for checksum differences of table " + . "$tbl->{db}.$tbl->{tbl} on replica " . $slave->name() + . ": $EVAL_ERROR\n" + . "Check that the replica is running and has the " + . "replicate table $repl_table.\n"); + } + $tbl->{checksum_results}->{errors}++; + } + } + $tbl->{checksum_results}->{diffs} = scalar keys %diff_chunks; + } + + # Print table's checksum results if we're not being quiet, + # else print if table has diffs and we're not being completely + # quiet. + if ( !$o->get('quiet') + || $o->get('quiet') < 2 && $tbl->{checksum_results}->{diffs} ) { + print_checksum_results(tbl => $tbl); + } + + return; + }, + }; + + # ######################################################################## + # Init the --plugin. + # ######################################################################## + + # --plugin hook + if ( $plugin && $plugin->can('init') ) { + $plugin->init( + slaves => $slaves, + slave_lag_cxns => $slave_lag_cxns, + repl_table => $repl_table, + ); + } + + # ######################################################################## + # Checksum each table. + # ######################################################################## + + TABLE: + while ( $oktorun && $have_time->() && (my $tbl = $schema_iter->next()) ) { + eval { + # Results, stats, and info related to checksuming this table can + # be saved here. print_checksum_results() uses this info. + $tbl->{checksum_results} = {}; + + # Set table's initial chunk size. If this is the first table, + # then total rate will be zero, so use --chunk-size. Or, if + # --chunk-time=0, then only use --chunk-size for every table. + # Else, the initial chunk size is based on the total rates of + # rows/s from all previous tables. If --chunk-time is really + # small, like 0.001, then Perl int() will probably round the + # chunk size to zero, which is invalid, so we default to 1. + my $chunk_time = $o->get('chunk-time'); + my $chunk_size = $chunk_time && $total_rate + ? int($total_rate * $chunk_time) || 1 + : $o->get('chunk-size'); + $tbl->{chunk_size} = $chunk_size; + + # Make a nibble iterator for this table. This should only fail + # if the table has no indexes and is too large to checksum in + # one chunk. + my $checksum_cols = eval { + $rc->make_chunk_checksum( + dbh => $master_cxn->dbh(), + tbl => $tbl, + %crc_args + ); + }; + + if ( $EVAL_ERROR ) { + warn ts("Skipping table $tbl->{db}.$tbl->{tbl} because " + . "$EVAL_ERROR\n"); + $exit_status |= $PTC_EXIT_STATUS{SKIP_TABLE}; + return; + } + + my $nibble_iter; + eval { + $nibble_iter = new OobNibbleIterator( + Cxn => $master_cxn, + tbl => $tbl, + chunk_size => $tbl->{chunk_size}, + chunk_index => $o->get('chunk-index'), + n_chunk_index_cols => $o->get('chunk-index-columns'), + dml => $checksum_dml, + select => $checksum_cols, + past_dml => $checksum_dml, + past_select => $past_cols, + callbacks => $callbacks, + resume => $last_chunk, + OptionParser => $o, + Quoter => $q, + TableNibbler => $tn, + TableParser => $tp, + RowChecksum => $rc, + comments => { + bite => "checksum table", + nibble => "checksum chunk", + }, + ); + }; + if ( $EVAL_ERROR ) { + if ( $o->get('quiet') < 2 ) { + warn ts("Cannot checksum table $tbl->{db}.$tbl->{tbl}: " + . "$EVAL_ERROR\n"); + } + $tbl->{checksum_results}->{errors}++; + } + else { + # Init a new weighted avg rate calculator for the table. + $tbl->{rate} = new WeightedAvgRate(target_t => $chunk_time); + + # Make a Progress obj for this table. It may not be used; + # depends on how many rows, chunk size, how fast the server + # is, etc. But just in case, all tables have a Progress obj. + if ( $o->get('progress') + && !$nibble_iter->one_nibble() + && $nibble_iter->row_estimate() ) + { + $tbl->{progress} = new Progress( + jobsize => $nibble_iter->row_estimate(), + spec => $o->get('progress'), + name => "Checksumming $tbl->{db}.$tbl->{tbl}", + ); + } + + # Make a list of the columns being checksummed. As the option's + # docs note, this really only makes sense when checksumming one + # table, unless the tables have a common set of columns. + # TODO: this now happens in 3 places, search for 'columns'. + my $tbl_struct = $tbl->{tbl_struct}; + my $ignore_col = $o->get('ignore-columns') || {}; + my $all_cols = $o->get('columns') || $tbl_struct->{non_generated_cols}; + my @cols = map { lc $_ } + grep { !$ignore_col->{$_} } + @$all_cols; + $tbl->{checksum_cols} = \@cols; + + # --plugin hook + if ( $plugin && $plugin->can('before_checksum_table') ) { + $plugin->before_checksum_table( + tbl => $tbl); + } + + # Finally, checksum the table. + # The "1 while" loop is necessary because we're executing REPLACE + # statements which don't return rows and NibbleIterator only + # returns if it has rows to return. So all the work is done via + # the callbacks. -- print_checksum_results(), which is called + # from the done callback, uses this start time. + $tbl->{checksum_results}->{start_time} = time; + 1 while $nibble_iter->next(); + + # --plugin hook + if ( $plugin && $plugin->can('after_checksum_table') ) { + $plugin->after_checksum_table(); + } + } + }; + if ( $EVAL_ERROR ) { + if ($EVAL_ERROR =~ m/replication/) { + exit($PTC_EXIT_STATUS{REPLICATION_STOPPED}); + } + # This should not happen. If it does, it's probably some bug + # or error that we're not catching. + warn ts(($oktorun ? "Error " : "Fatal error ") + . "checksumming table $tbl->{db}.$tbl->{tbl}: " + . "$EVAL_ERROR\n"); + $tbl->{checksum_results}->{errors}++; + + # Print whatever checksums results we got before dying, regardless + # of --quiet because at this point we need all the info we can get. + print_checksum_results(tbl => $tbl); + } + + # Update the tool's exit status. + if ( $tbl->{checksum_results}->{errors} ) { + $exit_status |= $PTC_EXIT_STATUS{ERROR}; + } + } + + # Restore origin QRT pligin state + if ($o->get('disable-qrt-plugin')) { + eval { + if ($original_qrt_plugin_master_status) { + PTDEBUG && _d("Restoring qrt plugin state on master server"); + $master_dbh->do("SET GLOBAL query_response_time_stats = $original_qrt_plugin_master_status->[0]"); + } + for my $slave (@$slaves) { + if ($slave->{qrt_plugin_status}) { + PTDEBUG && _d("Restoring qrt plugin state on slave ".$slave->{dsn_name}); + $slave->{dbh}->do("SET GLOBAL query_response_time_stats = $slave->{qrt_plugin_status}"); + } + } + }; + if ($EVAL_ERROR) { + warn "Cannot restore qrt_plugin status: $EVAL_ERROR"; + } + } + + PTDEBUG && _d('Exit status', $exit_status, + 'oktorun', $oktorun, + 'have time', $have_time->()); + return $exit_status; +} + +# ############################################################################ +# Subroutines +# ############################################################################ +sub ts { + my ($msg) = @_; + my ($s, $m, $h, $d, $M) = localtime; + my $ts = sprintf('%02d-%02dT%02d:%02d:%02d', $M+1, $d, $h, $m, $s); + return $msg ? "$ts $msg" : $ts; +} + + +sub nibble_is_safe { + my (%args) = @_; + my @required_args = qw(Cxn tbl NibbleIterator OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn, $tbl, $nibble_iter, $o)= @args{@required_args}; + + # EXPLAIN the checksum chunk query to get its row estimate and index. + # XXX This call and others like it are relying on a Perl oddity. + # See https://bugs.launchpad.net/percona-toolkit/+bug/987393 + my $sth = $nibble_iter->statements(); + my $boundary = $nibble_iter->boundaries(); + if (!defined($boundary) || !$boundary || (!$boundary->{lower} || !$boundary->{upper})) { + return 0; + } + my $expl = explain_statement( + tbl => $tbl, + sth => $sth->{explain_nibble}, + vals => [ @{$boundary->{lower}}, @{$boundary->{upper}} ], + ); + + # Ensure that MySQL is using the chunk index if the table is being chunked. + if ( !$nibble_iter->one_nibble() + && lc($expl->{key} || '') ne lc($nibble_iter->nibble_index() || '') ) { + if ( !$tbl->{warned}->{not_using_chunk_index}++ + && $o->get('quiet') < 2 ) { + warn ts("Skipping chunk " . $nibble_iter->nibble_number() + . " of $tbl->{db}.$tbl->{tbl} because MySQL chose " + . ($expl->{key} ? "the $expl->{key}" : "no") . " index " + . " instead of the " . $nibble_iter->nibble_index() . "index.\n"); + } + $exit_status |= $PTC_EXIT_STATUS{SKIP_CHUNK}; + return 0; # not safe + } + + # Ensure that the chunk isn't too large if there's a --chunk-size-limit. + # If single-chunking the table, this has already been checked, so it + # shouldn't have changed. If chunking the table with a non-unique key, + # oversize chunks are possible. + if ( my $limit = $o->get('chunk-size-limit') ) { + my $oversize_chunk = ($expl->{rows} || 0) >= $tbl->{chunk_size} * $limit; + if ( $oversize_chunk + && $nibble_iter->identical_boundaries($boundary->{upper}, + $boundary->{next_lower}) ) { + if ( !$tbl->{warned}->{oversize_chunk}++ + && $o->get('quiet') < 2 ) { + warn ts("Skipping chunk " . $nibble_iter->nibble_number() + . " of $tbl->{db}.$tbl->{tbl} because it is oversized. " + . "The current chunk size limit is " + . ($tbl->{chunk_size} * $limit) + . " rows (chunk size=$tbl->{chunk_size}" + . " * chunk size limit=$limit), but MySQL estimates " + . "that there are " . ($expl->{rows} || 0) + . " rows in the chunk.\n"); + } + $exit_status |= $PTC_EXIT_STATUS{SKIP_CHUNK}; + return 0; # not safe + } + } + + # Ensure that MySQL is still using the entire index. + # https://bugs.launchpad.net/percona-toolkit/+bug/1010232 + if ( !$nibble_iter->one_nibble() + && $tbl->{key_len} + && ($expl->{key_len} || 0) < $tbl->{key_len} ) { + if ( !$tbl->{warned}->{key_len}++ + && $o->get('quiet') < 2 ) { + warn ts("Skipping chunk " . $nibble_iter->nibble_number() + . " of $tbl->{db}.$tbl->{tbl} because MySQL used " + . "only " . ($expl->{key_len} || 0) . " bytes " + . "of the " . ($expl->{key} || '?') . " index instead of " + . $tbl->{key_len} . ". See the --[no]check-plan documentation " + . "for more information.\n"); + } + $exit_status |= $PTC_EXIT_STATUS{SKIP_CHUNK}; + return 0; # not safe + } + + return 1; # safe +} + +sub exec_nibble { + my (%args) = @_; + my @required_args = qw(Cxn tbl NibbleIterator Retry Quoter OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn, $tbl, $nibble_iter, $retry, $q, $o)= @args{@required_args}; + + my $dbh = $cxn->dbh(); + my $sth = $nibble_iter->statements(); + my $boundary = $nibble_iter->boundaries(); + # XXX This call and others like it are relying on a Perl oddity. + # See https://bugs.launchpad.net/percona-toolkit/+bug/987393 + my $lb_quoted = $q->serialize_list(@{$boundary->{lower}}); + my $ub_quoted = $q->serialize_list(@{$boundary->{upper}}); + my $chunk = $nibble_iter->nibble_number(); + my $chunk_index = $nibble_iter->nibble_index(); + + return $retry->retry( + tries => $o->get('retries'), + wait => sub { return; }, + try => sub { + # ################################################################### + # Start timing the checksum query. + # ################################################################### + my $t_start = time; + + # Execute the REPLACE...SELECT checksum query. + # XXX This call and others like it are relying on a Perl oddity. + # See https://bugs.launchpad.net/percona-toolkit/+bug/987393 + PTDEBUG && _d($sth->{nibble}->{Statement}, + 'lower boundary:', @{$boundary->{lower}}, + 'upper boundary:', @{$boundary->{upper}}); + $sth->{nibble}->execute( + # REPLACE INTO repl_table SELECT + $o->get('host'), # master ip + $o->get('port'), # master port + $tbl->{db}, # db + $tbl->{tbl}, # tbl + $chunk, # chunk (number) + $chunk_index, # chunk_index + $lb_quoted, # lower_boundary + $ub_quoted, # upper_boundary + # this_cnt, this_crc WHERE + @{$boundary->{lower}}, # upper boundary values + @{$boundary->{upper}}, # lower boundary values + ); + + my $t_end = time; + # ################################################################### + # End timing the checksum query. + # ################################################################### + + # Check if checksum query caused any warnings. + my $sql_warn = 'SHOW WARNINGS'; + PTDEBUG && _d($sql_warn); + my $warnings = $dbh->selectall_arrayref($sql_warn, { Slice => {} } ); + foreach my $warning ( @$warnings ) { + my $code = ($warning->{code} || 0); + my $message = $warning->{message}; + if ( $ignore_code{$code} ) { + PTDEBUG && _d('Ignoring warning:', $code, $message); + next; + } + elsif ( $warn_code{$code} + && (!$warn_code{$code}->{pattern} + || $message =~ m/$warn_code{$code}->{pattern}/) ) + { + if ( !$tbl->{warned}->{$code}++ ) { # warn once per table + if ( $o->get('quiet') < 2 ) { + warn ts("Checksum query for table $tbl->{db}.$tbl->{tbl} " + . "caused MySQL error $code: " + . ($warn_code{$code}->{message} + ? $warn_code{$code}->{message} + : $message) + . "\n"); + } + $tbl->{checksum_results}->{errors}++; + } + } + else { + # This die will propagate to fail which will return 0 + # and propagate it to final_fail which will die with + # this error message. (So don't wrap it in ts().) + die "Checksum query for table $tbl->{db}.$tbl->{tbl} " + . "caused MySQL error $code:\n" + . " Level: " . ($warning->{level} || '') . "\n" + . " Code: " . ($warning->{code} || '') . "\n" + . " Message: " . ($warning->{message} || '') . "\n" + . " Query: " . $sth->{nibble}->{Statement} . "\n"; + } + } + + # Success: no warnings, no errors. Return nibble time. + return $t_end - $t_start; + }, + fail => sub { + my (%args) = @_; + my $error = $args{error}; + + if ( $error =~ m/Lock wait timeout exceeded/ + || $error =~ m/Query execution was interrupted/ + || $error =~ m/Deadlock found/ + ) { + # These errors/warnings can be retried, so don't print + # a warning yet; do that in final_fail. + return 1; + } + elsif ( $error =~ m/MySQL server has gone away/ + || $error =~ m/Lost connection to MySQL server/ + ) { + # The 2nd pattern means that MySQL itself died or was stopped. + # The 3rd pattern means that our cxn was killed (KILL ). + eval { $dbh = $cxn->connect(); }; + return 1 unless $EVAL_ERROR; # reconnected, retry checksum query + $oktorun = 0; # failed to reconnect, exit tool + } + + # At this point, either the error/warning cannot be retried, + # or we failed to reconnect. So stop trying and call final_fail. + return 0; + }, + final_fail => sub { + my (%args) = @_; + my $error = $args{error}; + + if ( $error =~ m/Lock wait timeout exceeded/ + || $error =~ m/Query execution was interrupted/ + || $error =~ m/Deadlock found/ + ) { + # These errors/warnings are not fatal but only cause this + # nibble to be skipped. + my $err = $error =~ /Lock wait timeout exceeded/ + ? 'lock_wait_timeout' + : 'query_interrupted'; + if ( !$tbl->{warned}->{$err}++ && $o->get('quiet') < 2 ) { + my $msg = "Skipping chunk " . ($nibble_iter->nibble_number() || '?') + . " of $tbl->{db}.$tbl->{tbl} because $error.\n"; + warn ts($msg); + } + $exit_status |= $PTC_EXIT_STATUS{SKIP_CHUNK}; + return; # skip this nibble + } + + # This die will be caught by the eval inside the TABLE loop. + # Checksumming for this table will stop, which is probably + # good because by this point the error or warning indicates + # that something fundamental is broken or wrong. Checksumming + # will continue with the next table, unless the fail code set + # oktorun=0, in which case the error/warning is fatal. + die "Error executing checksum query: $args{error}\n"; + } + ); +} + +{ +my $line_fmt = "%14s %6s %6s %8s % 10s %7s %7s %7s %-s\n"; +my @headers = qw(TS ERRORS DIFFS ROWS DIFF_ROWS CHUNKS SKIPPED TIME TABLE); + +sub print_checksum_results { + my (%args) = @_; + my @required_args = qw(tbl); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($tbl) = @args{@required_args}; + + if ($print_header) { + printf $line_fmt, @headers; + $print_header = 0; + } + + my $res = $tbl->{checksum_results}; + printf $line_fmt, + ts(), + $res->{errors} || 0, + $res->{diffs} || 0, + $res->{n_rows} || 0, + $tbl->{checksum_results}->{max_rows_cnt_diff} || 0, + $res->{n_chunks} || 0, + $res->{skipped} || 0, + sprintf('%.3f', $res->{start_time} ? time - $res->{start_time} : 0), + "$tbl->{db}.$tbl->{tbl}"; + + return; +} +} + +{ +my @headers = qw(table chunk cnt_diff crc_diff chunk_index lower_boundary upper_boundary); + +sub print_checksum_diffs { + my ( %args ) = @_; + my @required_args = qw(cxn diffs); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($cxn, $diffs) = @args{@required_args}; + + print "Differences on ", $cxn->name(), "\n"; + print join(' ', map { uc $_ } @headers), "\n"; + foreach my $diff ( @$diffs ) { + print join(' ', map { defined $_ ? $_ : '' } @{$diff}{@headers}), "\n"; + } + print "\n"; + + return; +} +} + +sub filter_tables_replicate_check_only { + my ($diffs, $o) = @_; + my @filtered_diffs; + + # TODO: SchemaIterator has the methods to filter the dbs & tables, + # but we don't actually need a real iterator beyond that + my $filter = new SchemaIterator( + file_itr => "Fake", + OptionParser => $o, + Quoter => "Quoter", + TableParser => "TableParser", + ); + + for my $diff (@$diffs) { + my ($db, $table) = Quoter->split_unquote($diff->{table}); + next unless $filter->database_is_allowed($db) + && $filter->table_is_allowed($db, $table); + push @filtered_diffs, $diff; + + } + + return \@filtered_diffs; +} + +sub check_repl_table { + my ( %args ) = @_; + my @required_args = qw(dbh repl_table slaves have_time + OptionParser TableParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $repl_table, $slaves, $have_time, $o, $tp, $q) = @args{@required_args}; + + PTDEBUG && _d('Checking --replicate table', $repl_table); + + # ######################################################################## + # Create the --replicate database. + # ######################################################################## + + # If the repl db doesn't exit, auto-create it, maybe. + my ($db, $tbl) = $q->split_unquote($repl_table); + my $show_db_sql = "SHOW DATABASES LIKE '$db'"; + PTDEBUG && _d($show_db_sql); + my @db_exists = $dbh->selectrow_array($show_db_sql); + if ( !@db_exists && !$o->get('create-replicate-table') ) { + die "--replicate database $db does not exist and " + . "--no-create-replicate-table was specified. You need " + . "to create the database.\n"; + } + + if ( $o->get('create-replicate-table') ) { + # Even if the db already exists, do this in case it does not exist + # on a slave. + my $create_db_sql + = "CREATE DATABASE IF NOT EXISTS " + . $q->quote($db) + . " /* pt-table-checksum */"; + PTDEBUG && _d($create_db_sql); + eval { + $dbh->do($create_db_sql); + }; + if ( $EVAL_ERROR ) { + # CREATE DATABASE IF NOT EXISTS failed but the db could already + # exist and the error could be due, for example, to the user not + # having privs to create it, but they still have privs to use it. + + if ( @db_exists ) { + # Repl db already exists on the master, so check if it's also + # on all slaves. If not, and given that creating it failed, + # we'll die because we can't be sure if it's ok on all slaves. + # The user can verify and disable this check if it's ok. + my $e = $EVAL_ERROR; # CREATE DATABASE error + my @slaves_missing_db; + foreach my $slave ( @$slaves ) { + PTDEBUG && _d($show_db_sql, 'on', $slave->name()); + my @db_exists_in_slave + = $slave->dbh->selectrow_array($show_db_sql); + if ( !@db_exists_in_slave ) { + push @slaves_missing_db, $slave; + } + } + if ( @slaves_missing_db ) { + warn $e; # CREATE DATABASE error + die "The --replicate database $db exists on the master but " + . "$create_db_sql on the master failed (see the error above) " + . "and the database does not exist on these replicas:\n" + . join("\n", map { " " . $_->name() } @slaves_missing_db) + . "\nThis can break replication. If you understand " + . "the risks, specify --no-create-replicate-table to disable " + . "this check.\n"; + } + } + else { + warn $EVAL_ERROR; + die "--replicate database $db does not exist and it cannot be " + . "created automatically. You need to create the database.\n"; + } + } + } + + + # USE the correct db (probably the repl db, but maybe --replicate-database). + use_repl_db(%args); + + # ######################################################################## + # Create the --replicate table. + # ######################################################################## + + # Check if the repl table exists; if not, create it, maybe. + my $tbl_exists = $tp->check_table( + dbh => $dbh, + db => $db, + tbl => $tbl, + ); + PTDEBUG && _d('--replicate table exists:', $tbl_exists ? 'yes' : 'no'); + + if ( !$tbl_exists && !$o->get('create-replicate-table') ) { + die "--replicate table $repl_table does not exist and " + . "--no-create-replicate-table was specified. " + . "You need to create the table.\n"; + } + + + # We used to check the table privs here, but: + # https://bugs.launchpad.net/percona-toolkit/+bug/916168 + + # Always create the table, unless --no-create-replicate-table + # was passed in; see https://bugs.launchpad.net/percona-toolkit/+bug/950294 + if ( $o->get('create-replicate-table') ) { + eval { + create_repl_table(%args); + }; + if ( $EVAL_ERROR ) { + # CREATE TABLE IF NOT EXISTS failed but the table could already + # exist and the error could be due, for example, to the user not + # having privs to create it, but they still have privs to use it. + + if ( $tbl_exists ) { + # Repl table already exists on the master, so check if it's also + # on all slaves. If not, and given that creating it failed, + # we'll die because we can't be sure if it's ok on all slaves. + # The user can verify and disable this check if it's ok. + my $e = $EVAL_ERROR; # CREATE TABLE error + my $ddl = $tp->get_create_table($dbh, $db, $tbl); + my $tbl_struct = $tp->parse($ddl); + eval { + check_slave_tables( + slaves => $slaves, + db => $db, + tbl => $tbl, + checksum_cols => $tbl_struct->{cols}, + have_time => $have_time, + TableParser => $tp, + OptionParser => $o, + ); + }; + if ( $EVAL_ERROR ) { + warn $e; # CREATE TABLE error + die "The --replicate table $repl_table exists on the master but " + . "but it has problems on these replicas:\n" + . $EVAL_ERROR + . "\nThis can break replication. If you understand " + . "the risks, specify --no-create-replicate-table to disable " + . "this check.\n"; + } + } + else { + warn $EVAL_ERROR; + die "--replicate table $tbl does not exist and it cannot be " + . "created automatically. You need to create the table.\n" + } + } + } + + # Check and wait for the repl table to appear on all slaves. + # https://bugs.launchpad.net/percona-toolkit/+bug/1008778 + if ( scalar @$slaves ) { + my $waiting_for; + my $pr; + if ( $o->get('progress') ) { + $pr = new Progress( + jobsize => scalar @$slaves, + spec => $o->get('progress'), + callback => sub { + print STDERR "Waiting for the --replicate table to replicate to " + . $waiting_for->name() . "...\n"; + }, + ); + $pr->start(); + } + + foreach my $slave ( @$slaves ) { + PTDEBUG && _d('Checking if', $slave->name(), 'has repl table'); + $waiting_for = $slave; + my $slave_has_repl_table = $tp->check_table( + dbh => $slave->dbh(), + db => $db, + tbl => $tbl, + ); + while ( !$slave_has_repl_table ) { + $pr->update(sub { return 0; }) if $pr; + sleep 0.5; + $slave_has_repl_table = $tp->check_table( + dbh => $slave->dbh(), + db => $db, + tbl => $tbl, + ); + } + } + } + + if ( $o->get('binary-index') ) { + PTDEBUG && _d('--binary-index : checking if replicate table has binary type columns'); + my $create_table = $tp->get_create_table( $dbh, $db, $tbl ); + if ( $create_table !~ /lower_boundary`?\s+BLOB/si + || $create_table !~ /upper_boundary`?\s+BLOB/si ) + { + die "--binary-index was specified but the current checksum table ($db.$tbl) uses" + ." TEXT columns. To use BLOB columns, drop the current checksum table, then recreate" + ." it by specifying --create-replicate-table --binary-index."; + } + } + + return; # success, repl table is ready to go +} + +# Check that db.tbl exists on all slaves and has the checksum cols, +# else when we check for diffs we'll break replication by selecting +# a nonexistent column. +sub check_slave_tables { + my (%args) = @_; + my @required_args = qw(slaves db tbl checksum_cols have_time TableParser OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($slaves, $db, $tbl, $checksum_cols, $have_time, $tp, $o) = @args{@required_args}; + + my @problems; + SLAVE: + foreach my $slave ( @$slaves ) { + my $slave_has_table = 0; + my $have_warned = 0; + while ( $oktorun && $have_time->() ) { + eval { + # TableParser::check_table() does not die on error, it sets + # check_table_error and return 0. + $slave_has_table = $tp->check_table( + dbh => $slave->dbh, + db => $db, + tbl => $tbl, + ); + die $tp->{check_table_error} if defined $tp->{check_table_error}; + if ( !$slave_has_table ) { + push @problems, "Table $db.$tbl does not exist on replica " + . $slave->name; + } + else { + # TableParser::get_create_table() will die on error. + my $slave_tbl_struct = $tp->parse( + $tp->get_create_table($slave->dbh, $db, $tbl) + ); + my @slave_missing_cols; + foreach my $col ( @$checksum_cols ) { + if ( !$slave_tbl_struct->{is_col}->{$col} ) { + push @slave_missing_cols, $col; + } + } + if ( @slave_missing_cols ) { + push @problems, "Table $db.$tbl on replica " . $slave->name + . " is missing these columns: " + . join(", ", @slave_missing_cols); + } + } + }; + if ( my $e = $EVAL_ERROR ) { + PTDEBUG && _d($e); + if ( !$slave->lost_connection($e) ) { + push @problems, "Error checking table $db.$tbl on replica " + . $slave->name . ": $e"; + next SLAVE; + } + + # Lost connection to slave. Reconnect and try again. + eval { $slave->connect() }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Failed to connect to slave', $slave->name(), + ':', $EVAL_ERROR); + if ( !$have_warned && $o->get('quiet') < 2 ) { + my $msg = "Trying to connect to replica " + . $slave->name() . " to check $db.$tbl...\n"; + warn ts($msg); + $have_warned = 1; + } + sleep 2; # wait between failed reconnect attempts + } + next; # try again + } # eval error + + # No error, so we successfully queried this slave. + next SLAVE; + + } # while oktorun && have_time + } # foreach slave + + die join("\n", @problems) . "\n" if @problems; + + return; +} + +# Sub: use_repl_db +# USE the correct database for the --replicate table. +# This sub must be called before any work is done with the --replicatte +# table because replication filters can really complicate replicating the +# checksums. The originally issue is, +# http://code.google.com/p/maatkit/issues/detail?id=982, +# but here's what you need to know: +# - If there is no active DB, then if there's any do-db or ignore-db +# settings, the checksums will get filtered out of replication. So we +# have to have some DB be the current one. +# - Other places in the code may change the DB and we might not know it. +# Opportunity for bugs. The SHOW CREATE TABLE, for example. In the +# end, a bunch of USE statements isn't a big deal, it just looks noisy +# when you analyze the logs this tool creates. But it's better to just +# have them even if they're no-op. +# - We need to always let the user specify, because there are so many +# possibilities that the tool can't guess the right thing in all of +# them. +# - The right default behavior, which the user can override, is: +# * When running queries on the --replicate table itself, such as +# emptying it, USE that table's database. +# * When running checksum queries, USE the database of the table that's +# being checksummed. +# * When the user specifies --replicate-database, in contrast, always +# USE that database. +# - This behavior is the best compromise by default, because users who +# explicitly replicate some databases and filter out others will be +# very likely to run pt-table-checksum and limit its checksumming to +# only the databases that are replicated. I've seen people do this, +# including Peter. In this case, the tool will work okay even without +# an explicit --replicate-database setting. +# +# Required Arguments: +# dbh - dbh +# repl_table - Full quoted --replicate table name +# OptionParser - +# Quoter - +# +# Optional Arguments: +# tbl - Standard tbl hashref of table being checksummed +# +# Returns: +# Nothing or dies on error +sub use_repl_db { + my ( %args ) = @_; + my @required_args = qw(dbh repl_table OptionParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $repl_table, $o, $q) = @args{@required_args}; + PTDEBUG && _d('use_repl_db'); + + my ($db, $tbl) = $q->split_unquote($repl_table); + if ( my $tbl = $args{tbl} ) { + # If there's a tbl arg then its db will be used unless + # --replicate-database was specified. A tbl arg means + # we're checksumming that table. Other callers won't + # pass a tbl arg when they're just doing something to + # the --replicate table. + $db = $o->get('replicate-database') ? $o->get('replicate-database') + : $tbl->{db}; + } + else { + # Caller is doing something just to the --replicate table. + # Use the db from --replicate db.tbl (gotten earlier) unless + # --replicate-database is in effect. + $db = $o->get('replicate-database') if $o->get('replicate-database'); + } + + eval { + my $sql = "USE " . $q->quote($db); + PTDEBUG && _d($sql); + $dbh->do($sql); + }; + if ( $EVAL_ERROR ) { + # Report which option db really came from. + my $opt = $o->get('replicate-database') ? "--replicate-database" + : "--replicate database"; + if ( $EVAL_ERROR =~ m/unknown database/i ) { + die "$opt $db does not exist. You need to create the " + . "database or specify a database for $opt that exists.\n"; + } + else { + die "Error using $opt $db: $EVAL_ERROR\n"; + } + } + + return; +} + +sub create_repl_table { + my ( %args ) = @_; + my @required_args = qw(dbh repl_table OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $repl_table, $o) = @args{@required_args}; + PTDEBUG && _d('Creating --replicate table', $repl_table); + my $sql = $o->read_para_after(__FILE__, qr/MAGIC_create_replicate/); + $sql =~ s/CREATE TABLE checksums/CREATE TABLE IF NOT EXISTS $repl_table/; + $sql =~ s/;$//; + if ( $o->get('binary-index') ) { + $sql =~ s/`?lower_boundary`?\s+TEXT/`lower_boundary` BLOB/is; + $sql =~ s/`?upper_boundary`?\s+TEXT/`upper_boundary` BLOB/is; + } + PTDEBUG && _d($dbh, $sql); + eval { + $dbh->do($sql); + }; + if ( $EVAL_ERROR ) { + die ts("--create-replicate-table failed: $EVAL_ERROR"); + } + + return; +} + +# Sub: explain_statement +# EXPLAIN a statement. +# +# Required Arguments: +# * tbl - Standard tbl hashref +# * sth - Sth with EXLAIN +# * vals - Values for sth, if any +# +# Returns: +# Hashref with EXPLAIN plan +sub explain_statement { + my ( %args ) = @_; + my @required_args = qw(tbl sth vals); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my ($tbl, $sth, $vals) = @args{@required_args}; + + my $expl; + eval { + PTDEBUG && _d($sth->{Statement}, 'params:', @$vals); + $sth->execute(@$vals); + $expl = $sth->fetchrow_hashref(); + $sth->finish(); + }; + if ( $EVAL_ERROR ) { + # This shouldn't happen. + warn ts("Error executing " . $sth->{Statement} . ": $EVAL_ERROR\n"); + $tbl->{checksum_results}->{errors}++; + } + PTDEBUG && _d('EXPLAIN plan:', Dumper($expl)); + return $expl; +} + +sub last_chunk { + my (%args) = @_; + my @required_args = qw(dbh repl_table master_ip master_port); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $repl_table, $master_ip, $master_port, $q) = @args{@required_args}; + PTDEBUG && _d('Getting last chunk for --resume'); + + my $sql = "SELECT * FROM $repl_table FORCE INDEX (ts_db_tbl) " + . "WHERE master_cnt IS NOT NULL AND master_ip = ? AND master_port = ? " + . "ORDER BY ts DESC, db DESC, tbl DESC LIMIT 1"; + PTDEBUG && _d($sql); + my $sth = $dbh->prepare($sql); + $sth->execute($master_ip, $master_port); + my $last_chunk = $sth->fetchrow_hashref(); + $sth->finish(); + PTDEBUG && _d('Last chunk:', Dumper($last_chunk)); + + if ( !$last_chunk || !$last_chunk->{ts} ) { + PTDEBUG && _d('Replicate table is empty; will not resume'); + return; + } + + return $last_chunk; +} + +sub have_more_chunks { + my (%args) = @_; + my @required_args = qw(tbl last_chunk NibbleIterator); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($tbl, $last_chunk, $nibble_iter) = @args{@required_args}; + PTDEBUG && _d('Checking for more chunks beyond last chunk'); + + # If there's no next lower boundary, then this is the last + # chunk of the table. + if ( !$nibble_iter->more_boundaries() ) { + PTDEBUG && _d('No more boundaries'); + return 0; + } + + # The previous chunk index must match the current chunk index, + # else we don't know what to do. + my $chunk_index = lc($nibble_iter->nibble_index() || ''); + if (lc($last_chunk->{chunk_index} || '') ne $chunk_index) { + warn ts("Cannot resume from table $tbl->{db}.$tbl->{tbl} chunk " + . "$last_chunk->{chunk} because the chunk indexes are different: " + . ($last_chunk->{chunk_index} ? $last_chunk->{chunk_index} + : "no index") + . " was used originally but " + . ($chunk_index ? $chunk_index : "no index") + . " is used now. If the table has not changed significantly, " + . "this may be caused by running the tool with different command " + . "line options. This table will be skipped and checksumming " + . "will resume with the next table.\n"); + $tbl->{checksum_results}->{errors}++; + return 0; + } + + return 1; # more chunks +} + +sub wait_for_slaves { + my (%args) = @_; + my @required_args = qw(master_dbh master_slave slaves); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($master_dbh, $ms, $slaves) = @args{@required_args}; + + my $master_status = $ms->get_master_status($master_dbh); + foreach my $slave ( @$slaves ) { + $ms->wait_for_master(master_status => $master_status, slave_dbh => $slave->dbh()); + } +} + +sub wait_for_last_checksum { + my (%args) = @_; + my @required_args = qw(tbl repl_table slaves max_chunk have_time OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my ($tbl, $repl_table, $slaves, $max_chunk, $have_time, $o) = @args{@required_args}; + my $check_pr = $args{check_pr}; + my $master_ip = $o->get('host'); + my $master_port = $o->get('port'); + # Requiring "AND master_crc IS NOT NULL" avoids a race condition + # when the system is fast but replication is slow. In such cases, + # we can select on the slave before the update for $update_sth + # replicates; this causes a false-positive diff. + my $sql = "SELECT MAX(chunk) FROM $repl_table " + . "WHERE db='$tbl->{db}' AND tbl='$tbl->{tbl}' " + . "AND master_crc IS NOT NULL AND master_ip='$master_ip' AND master_port=$master_port"; + PTDEBUG && _d($sql); + + my $sleep_time = 0; + my $n_slaves = scalar @$slaves - 1; + my @chunks; + my %skip_slave; + my %have_warned; + my $checked_all; + while ( $oktorun && $have_time->() && (!$checked_all || (($chunks[0] || 0) < $max_chunk)) ) { + @chunks = (); + $checked_all = 1; + for my $i ( 0..$n_slaves ) { + my $slave = $slaves->[$i]; + if ( $skip_slave{$i} ) { + PTDEBUG && _d('Skipping slave', $slave->name(), + 'due to previous error it caused'); + next; + } + PTDEBUG && _d('Getting last checksum on', $slave->name()); + eval { + my ($chunk) = $slave->dbh()->selectrow_array($sql); + PTDEBUG && _d($slave->name(), 'max chunk:', $chunk); + push @chunks, $chunk || 0; + }; + if (my $e = $EVAL_ERROR) { + PTDEBUG && _d($e); + if ( $slave->lost_connection($e) ) { + if ( !$have_warned{$i} && $o->get('quiet') < 2 ) { + warn ts("Lost connection to " . $slave->name() . " while " + . "waiting for the last checksum of table " + . "$tbl->{db}.$tbl->{tbl} to replicate. Will reconnect " + . "and try again. No more warnings for this replica will " + . "be printed.\n"); + $have_warned{$i}++; + } + eval { $slave->connect() }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + sleep 1; # wait between failed reconnect attempts + } + $checked_all = 0; + } + else { + if ( $o->get('quiet') < 2 ) { + warn ts("Error waiting for the last checksum of table " + . "$tbl->{db}.$tbl->{tbl} to replicate to " + . "replica " . $slave->name() . ": $e\n" + . "Check that the replica is running and has the " + . "replicate table $repl_table. Checking the replica " + . "for checksum differences will probably cause " + . "another error.\n"); + } + $tbl->{checksum_results}->{errors}++; + $skip_slave{$i} = 1; + } + next; + } + } + + # If we have no chunks, which can happen if the slaves + # were skipped due to errors, then @chunks will be empty + # and nothing of the following applies. In fact, it + # leads to an uninit warning because of $chunks[0]; See + # https://bugs.launchpad.net/percona-toolkit/+bug/1052475 + next unless @chunks; + @chunks = sort { $a <=> $b } @chunks; + if ( $chunks[0] < $max_chunk ) { + if ( $check_pr ) { + $check_pr->update(sub { return $chunks[0]; }); + } + + # We shouldn't wait long here because we already waited + # for all slaves to catchup at least until --max-lag. + $sleep_time += 0.25 if $sleep_time <= $o->get('max-lag'); + PTDEBUG && _d('Sleep', $sleep_time, 'waiting for chunks'); + sleep $sleep_time; + } + } + return; +} + +# Catches signals so we can exit gracefully. +sub sig_int { + my ( $signal ) = @_; + $exit_status |= $PTC_EXIT_STATUS{CAUGHT_SIGNAL}; + if ( $oktorun ) { + warn "# Caught SIG$signal.\n"; + $oktorun = 0; + } + else { + warn "# Exiting on SIG$signal.\n"; + exit $exit_status; + } +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +# ############################################################################ +# Run the program. +# ############################################################################ + +# https://bugs.launchpad.net/percona-toolkit/+bug/916999 +# http://www.mysqlperformanceblog.com/2012/02/21/dbd-mysql-4-014-breaks-pt-table-checksum-2-0/ +eval { + require DBD::mysql; +}; +if ( !$EVAL_ERROR && $DBD::mysql::VERSION eq "4.014" ) { + die "DBD::mysql v4.014 is installed, but it has as bug which causes " + . "pt-table-checksum to fail. Please upgrade DBD::mysql to any " + . "newer version.\n" +} + +if ( !caller ) { exit main(@ARGV); } + +1; # Because this is a module as well as a script. + +# ############################################################################ +# Documentation +# ############################################################################ +=pod + +=head1 NAME + +pt-table-checksum - Verify MySQL replication integrity. + +=head1 SYNOPSIS + +Usage: pt-table-checksum [OPTIONS] [DSN] + +pt-table-checksum performs an online replication consistency check by executing +checksum queries on the master, which produces different results on replicas +that are inconsistent with the master. The optional DSN specifies the master +host. The tool's L<"EXIT STATUS"> is non-zero if any differences are found, +or if any warnings or errors occur. + +The following command will connect to the replication master on localhost, +checksum every table, and report the results on every detected replica: + + pt-table-checksum + +This tool is focused on finding data differences efficiently. If any data is +different, you can resolve the problem with pt-table-sync. + +=head1 RISKS + +Percona Toolkit is mature, proven in the real world, and well tested, +but all database tools can pose a risk to the system and the database +server. Before using this tool, please: + +=over + +=item * Read the tool's documentation + +=item * Review the tool's known L<"BUGS"> + +=item * Test the tool on a non-production server + +=item * Backup your production server and verify the backups + +=back + +See also L<"LIMITATIONS">. + +=head1 DESCRIPTION + +pt-table-checksum is designed to do the right thing by default in almost every +case. When in doubt, use L<"--explain"> to see how the tool will checksum a +table. The following is a high-level overview of how the tool functions. + +In contrast to older versions of pt-table-checksum, this tool is focused on a +single purpose, and does not have a lot of complexity or support many different +checksumming techniques. It executes checksum queries on only one server, and +these flow through replication to re-execute on replicas. If you need the older +behavior, you can use Percona Toolkit version 1.0. + +pt-table-checksum connects to the server you specify, and finds databases and +tables that match the filters you specify (if any). It works one table at a +time, so it does not accumulate large amounts of memory or do a lot of work +before beginning to checksum. This makes it usable on very large servers. We +have used it on servers with hundreds of thousands of databases and tables, and +trillions of rows. No matter how large the server is, pt-table-checksum works +equally well. + +One reason it can work on very large tables is that it divides each table into +chunks of rows, and checksums each chunk with a single REPLACE..SELECT query. +It varies the chunk size to make the checksum queries run in the desired amount +of time. The goal of chunking the tables, instead of doing each table with a +single big query, is to ensure that checksums are unintrusive and don't cause +too much replication lag or load on the server. That's why the target time for +each chunk is 0.5 seconds by default. + +The tool keeps track of how quickly the server is able to execute the queries, +and adjusts the chunks as it learns more about the server's performance. It +uses an exponentially decaying weighted average to keep the chunk size stable, +yet remain responsive if the server's performance changes during checksumming +for any reason. This means that the tool will quickly throttle itself if your +server becomes heavily loaded during a traffic spike or a background task, for +example. + +Chunking is accomplished by a technique that we used to call "nibbling" in other +tools in Percona Toolkit. It is the same technique used for pt-archiver, for +example. The legacy chunking algorithms used in older versions of +pt-table-checksum are removed, because they did not result in predictably sized +chunks, and didn't work well on many tables. All that is required to divide a +table into chunks is an index of some sort (preferably a primary key or unique +index). If there is no index, and the table contains a suitably small number of +rows, the tool will checksum the table in a single chunk. + +pt-table-checksum has many other safeguards to ensure that it does not interfere +with any server's operation, including replicas. To accomplish this, +pt-table-checksum detects replicas and connects to them automatically. (If this +fails, you can give it a hint with the L<"--recursion-method"> option.) + +The tool monitors replicas continually. If any replica falls too far behind in +replication, pt-table-checksum pauses to allow it to catch up. If any replica +has an error, or replication stops, pt-table-checksum pauses and waits. In +addition, pt-table-checksum looks for common causes of problems, such as +replication filters, and refuses to operate unless you force it to. Replication +filters are dangerous, because the queries that pt-table-checksum executes could +potentially conflict with them and cause replication to fail. + +pt-table-checksum verifies that chunks are not too large to checksum safely. It +performs an EXPLAIN query on each chunk, and skips chunks that might be larger +than the desired number of rows. You can configure the sensitivity of this +safeguard with the L<"--chunk-size-limit"> option. If a table will be +checksummed in a single chunk because it has a small number of rows, then +pt-table-checksum additionally verifies that the table isn't oversized on +replicas. This avoids the following scenario: a table is empty on the master +but is very large on a replica, and is checksummed in a single large query, +which causes a very long delay in replication. + +There are several other safeguards. For example, pt-table-checksum sets its +session-level innodb_lock_wait_timeout to 1 second, so that if there is a lock +wait, it will be the victim instead of causing other queries to time out. +Another safeguard checks the load on the database server, and pauses if the load +is too high. There is no single right answer for how to do this, but by default +pt-table-checksum will pause if there are more than 25 concurrently executing +queries. You should probably set a sane value for your server with the +L<"--max-load"> option. + +Checksumming usually is a low-priority task that should yield to other work on +the server. However, a tool that must be restarted constantly is difficult to +use. Thus, pt-table-checksum is very resilient to errors. For example, if the +database administrator needs to kill pt-table-checksum's queries for any reason, +that is not a fatal error. Users often run pt-kill to kill any long-running +checksum queries. The tool will retry a killed query once, and if it fails +again, it will move on to the next chunk of that table. The same behavior +applies if there is a lock wait timeout. The tool will print a warning if such +an error happens, but only once per table. If the connection to any server +fails, pt-table-checksum will attempt to reconnect and continue working. + +If pt-table-checksum encounters a condition that causes it to stop completely, +it is easy to resume it with the L<"--resume"> option. It will begin from the +last chunk of the last table that it processed. You can also safely stop the +tool with CTRL-C. It will finish the chunk it is currently processing, and then +exit. You can resume it as usual afterwards. + +After pt-table-checksum finishes checksumming all of the chunks in a table, it +pauses and waits for all detected replicas to finish executing the checksum +queries. Once that is finished, it checks all of the replicas to see if they +have the same data as the master, and then prints a line of output with the +results. You can see a sample of its output later in this documentation. + +The tool prints progress indicators during time-consuming operations. It prints +a progress indicator as each table is checksummed. The progress is computed by +the estimated number of rows in the table. It will also print a progress report +when it pauses to wait for replication to catch up, and when it is waiting to +check replicas for differences from the master. You can make the output less +verbose with the L<"--quiet"> option. + +If you wish, you can query the checksum tables manually to get a report of which +tables and chunks have differences from the master. The following query will +report every database and table with differences, along with a summary of the +number of chunks and rows possibly affected: + + SELECT db, tbl, SUM(this_cnt) AS total_rows, COUNT(*) AS chunks + FROM percona.checksums + WHERE ( + master_cnt <> this_cnt + OR master_crc <> this_crc + OR ISNULL(master_crc) <> ISNULL(this_crc)) + GROUP BY db, tbl; + +The table referenced in that query is the checksum table, where the checksums +are stored. Each row in the table contains the checksum of one chunk of data +from some table in the server. + +Version 2.0 of pt-table-checksum is not backwards compatible with pt-table-sync +version 1.0. In some cases this is not a serious problem. Adding a +"boundaries" column to the table, and then updating it with a manually generated +WHERE clause, may suffice to let pt-table-sync version 1.0 interoperate with +pt-table-checksum version 2.0. Assuming an integer primary key named 'id', You +can try something like the following: + + ALTER TABLE checksums ADD boundaries VARCHAR(500); + UPDATE checksums + SET boundaries = COALESCE(CONCAT('id BETWEEN ', lower_boundary, + ' AND ', upper_boundary), '1=1'); + +Take into consideration that by default, pt-table-checksum use C checksums. +C is not a cryptographic algorithm and for that reason it is prone to have +collisions. On the other hand, C algorithm is faster and less CPU-intensive +than C and C. + +Related reading material: +Percona Toolkit UDFs: L +How to avoid hash collisions when using MySQL’s CRC32 function: L + +=head1 LIMITATIONS + +=over + +=item Replicas using row-based replication + +pt-table-checksum requires statement-based replication, and it sets +C on the master, but due to a MySQL limitation +replicas do not honor this change. Therefore, checksums will not replicate +past any replicas using row-based replication that are masters for +further replicas. + +The tool automatically checks the C on all servers. +See L<"--[no]check-binlog-format"> . + +(L) + +=item Schema and table differences + +The tool presumes that schemas and tables are identical on the master and +all replicas. Replication will break if, for example, a replica does not +have a schema that exists on the master (and that schema is checksummed), +or if the structure of a table on a replica is different than on the master. + +=back + +=head1 Percona XtraDB Cluster + +pt-table-checksum works with Percona XtraDB Cluster (PXC) 5.5.28-23.7 and newer. +The number of possible Percona XtraDB Cluster setups is large given that +it can be used with regular replication as well. Therefore, only the setups +listed below are supported and known to work. Other setups, like cluster +to cluster, are not support and probably don't work. + +Except where noted, all of the following supported setups require that you +use the C method for L<"--recursion-method"> to specify cluster nodes. +Also, the lag check (see L<"REPLICA CHECKS">) is not performed for cluster +nodes. + +=over + +=item Single cluster + +The simple +st PXC setup is a single cluster: all servers are cluster nodes, +and there are no regular replicas. If all nodes are specified in the +DSN table (see L<"--recursion-method">), then you can run the tool on any +node and any diffs on any other nodes will be detected. + +All nodes must be in the same cluster (have the same C +value), else the tool exits with an error. Although it's possible to have +different clusters with the same name, this should not be done and is not +supported. This applies to all supported setups. + +=item Single cluster with replicas + +Cluster nodes can also be regular masters and replicate to regular replicas. +However, the tool can only detect diffs on a replica if ran on the replica's +"master node". For example, if the cluster setup is, + + node1 <-> node2 <-> node3 + | | + | +-> replica3 + +-> replica2 + +you can detect diffs on replica3 by running the tool on node3, but to detect +diffs on replica2 you must run the tool again on node2. If you run the tool +on node1, it will not detect diffs on either replica. + +Currently, the tool does not detect this setup or warn about replicas that +cannot be checked (e.g. replica2 when running on node3). + +Replicas in this setup are still subject to L<"--[no]check-binlog-format">. + +=item Master to single cluster + +It is possible for a regular master to replicate to a cluster, as if the +cluster were one logical slave, like: + + master -> node1 <-> node2 <-> node3 + +The tool supports this setup but only if ran on the master and if all nodes +in the cluster are consistent with the "direct replica" (node1 in this example) +of the master. For example, if all nodes have value "foo" for row 1 but +the master has value "bar" for the same row, this diff will be detected. +Or if only node1 has this diff, it will also be detected. But if only node2 +or node3 has this diff, it will not be detected. Therefore, this setup is +used to check that the master and the cluster as a whole are consistent. + +In this setup, the tool can automatically detect the "direct replica" (node1) +when ran on the master, so you do not have to use the C method for +L<"--recursion-method"> because node1 will represent the entire cluster, +which is why all other nodes must be consistent with it. + +The tool warns when it detects this setup to remind you that it only works +when used as described above. These warnings do not affect the exit status +of the tool; they're only reminders to help avoid false-positive results. + +=item RocksDB support + +Due to the limitations in the RocksDB engine like not suporting binlog_format=STATEMENT +or they way RocksDB handles Gap locks, pt-table-cheksum will skip tables using RocksDB engine. +More Information: (L) + +=back + +=head1 OUTPUT + +The tool prints tabular results, one line per table: + + TS ERRORS DIFFS ROWS DIFF_ROWS CHUNKS SKIPPED TIME TABLE + 10-20T08:36:50 0 0 200 0 1 0 0.005 db1.tbl1 + 10-20T08:36:50 0 0 603 3 7 0 0.035 db1.tbl2 + 10-20T08:36:50 0 0 16 0 1 0 0.003 db2.tbl3 + 10-20T08:36:50 0 0 600 0 6 0 0.024 db2.tbl4 + +Errors, warnings, and progress reports are printed to standard error. See also +L<"--quiet">. + +Each table's results are printed when the tool finishes checksumming the table. +The columns are as follows: + +=over + +=item TS + +The timestamp (without the year) when the tool finished checksumming the table. + +=item ERRORS + +The number of errors and warnings that occurred while checksumming the table. +Errors and warnings are printed to standard error while the table is in +progress. + +=item DIFFS + +The number of chunks that differ from the master on one or more replicas. +If C<--no-replicate-check> is specified, this column will always have zeros. +If L<"--replicate-check-only"> is specified, then only tables with differences +are printed. + +=item ROWS + +The number of rows selected and checksummed from the table. It might be +different from the number of rows in the table if you use the --where option. + +=item DIFF_ROWS + +The maximum number of differences per chunk. If a chunk has 2 different rows and +another chunk has 3 different rows, this value will be 3. + +=item CHUNKS + +The number of chunks into which the table was divided. + +=item SKIPPED + +The number of chunks that were skipped due one or more of these problems: + + * MySQL not using the --chunk-index + * MySQL not using the full chunk index (--[no]check-plan) + * Chunk size is greater than --chunk-size * --chunk-size-limit + * Lock wait timeout exceeded (--retries) + * Checksum query killed (--retries) + +As of pt-table-checksum 2.2.5, skipped chunks cause a non-zero L<"EXIT STATUS">. + +=item TIME + +The time elapsed while checksumming the table. + +=item TABLE + +The database and table that was checksummed. + +=back + +If L<"--replicate-check-only"> is specified, only checksum differences on +detected replicas are printed. The output is different: one paragraph per +replica, one checksum difference per line, and values are separated by spaces: + + Differences on h=127.0.0.1,P=12346 + TABLE CHUNK CNT_DIFF CRC_DIFF CHUNK_INDEX LOWER_BOUNDARY UPPER_BOUNDARY + db1.tbl1 1 0 1 PRIMARY 1 100 + db1.tbl1 6 0 1 PRIMARY 501 600 + + Differences on h=127.0.0.1,P=12347 + TABLE CHUNK CNT_DIFF CRC_DIFF CHUNK_INDEX LOWER_BOUNDARY UPPER_BOUNDARY + db1.tbl1 1 0 1 PRIMARY 1 100 + db2.tbl2 9 5 0 PRIMARY 101 200 + +The first line of a paragraph indicates the replica with differences. +In this example there are two: h=127.0.0.1,P=12346 and h=127.0.0.1,P=12347. +The columns are as follows: + +=over + +=item TABLE + +The database and table that differs from the master. + +=item CHUNK + +The chunk number of the table that differs from the master. + +=item CNT_DIFF + +The number of chunk rows on the replica minus the number of chunk rows +on the master. + +=item CRC_DIFF + +1 if the CRC of the chunk on the replica is different than the CRC of the +chunk on the master, else 0. + +=item CHUNK_INDEX + +The index used to chunk the table. + +=item LOWER_BOUNDARY + +The index values that define the lower boundary of the chunk. + +=item UPPER_BOUNDARY + +The index values that define the upper boundary of the chunk. + +=back + +=head1 EXIT STATUS + +pt-table-checksum has three possible exit statuses: zero, 255, and any other +value is a bitmask with flags for different problems. + +A zero exit status indicates no errors, warnings, or checksum differences, +or skipped chunks or tables. + +A 255 exit status indicates a fatal error. In other words: the tool died +or crashed. The error is printed to C. + +If the exit status is not zero or 255, then its value functions as a bitmask +with these flags: + + FLAG BIT VALUE MEANING + ================ ========= ========================================== + ERROR 1 A non-fatal error occurred + ALREADY_RUNNING 2 --pid file exists and the PID is running + CAUGHT_SIGNAL 4 Caught SIGHUP, SIGINT, SIGPIPE, or SIGTERM + NO_SLAVES_FOUND 8 No replicas or cluster nodes were found + TABLE_DIFF 16 At least one diff was found + SKIP_CHUNK 32 At least one chunk was skipped + SKIP_TABLE 64 At least one table was skipped + +If any flag is set, the exit status will be non-zero. Use the bitwise C +operation to check for a particular flag. For example, if C<$exit_status & 16> +is true, then at least one diff was found. + +As of pt-table-checksum 2.2.5, skipped chunks cause a non-zero exit status. +An exit status of zero or 32 is equivalent to a zero exit status with skipped +chunks in previous versions of the tool. + +=head1 OPTIONS + +This tool accepts additional command-line arguments. Refer to the +L<"SYNOPSIS"> and usage information for details. + +=over + +=item --ask-pass + +group: Connection + +Prompt for a password when connecting to MySQL. + +=item --channel + +type: string + +Channel name used when connected to a server using replication channels. +Suppose you have two masters, master_a at port 12345, master_b at port 1236 and +a slave connected to both masters using channels chan_master_a and chan_master_b. +If you want to run pt-table-sync to syncronize the slave against master_a, pt-table-sync +won't be able to determine what's the correct master since SHOW SLAVE STATUS +will return 2 rows. In this case, you can use --channel=chan_master_a to specify +the channel name to use in the SHOW SLAVE STATUS command. + +=item --[no]check-binlog-format + +default: yes + +Check that the C is the same on all servers. + +See "Replicas using row-based replication" under L<"LIMITATIONS">. + +=item --binary-index + +This option modifies the behavior of L<"--create-replicate-table"> such that the +replicate table's upper and lower boundary columns are created with the BLOB +data type. +This is useful in cases where you have trouble checksuming tables with keys that +include a binary data type or that have non-standard character sets. +See L<"--replicate">. + +=item --check-interval + +type: time; default: 1; group: Throttle + +Sleep time between checks for L<"--max-lag">. + +=item --[no]check-plan + +default: yes + +Check query execution plans for safety. By default, this option causes +pt-table-checksum to run EXPLAIN before running queries that are meant to access +a small amount of data, but which could access many rows if MySQL chooses a bad +execution plan. These include the queries to determine chunk boundaries and the +chunk queries themselves. If it appears that MySQL will use a bad query +execution plan, the tool will skip the chunk of the table. + +The tool uses several heuristics to determine whether an execution plan is bad. +The first is whether EXPLAIN reports that MySQL intends to use the desired index +to access the rows. If MySQL chooses a different index, the tool considers the +query unsafe. + +The tool also checks how much of the index MySQL reports that it will use for +the query. The EXPLAIN output shows this in the key_len column. The tool +remembers the largest key_len seen, and skips chunks where MySQL reports that it +will use a smaller prefix of the index. This heuristic can be understood as +skipping chunks that have a worse execution plan than other chunks. + +The tool prints a warning the first time a chunk is skipped due to +a bad execution plan in each table. Subsequent chunks are skipped silently, +although you can see the count of skipped chunks in the SKIPPED column in +the tool's output. + +This option adds some setup work to each table and chunk. Although the work is +not intrusive for MySQL, it results in more round-trips to the server, which +consumes time. Making chunks too small will cause the overhead to become +relatively larger. It is therefore recommended that you not make chunks too +small, because the tool may take a very long time to complete if you do. + +=item --[no]check-replication-filters + +default: yes; group: Safety + +Do not checksum if any replication filters are set on any replicas. +The tool looks for server options that filter replication, such as +binlog_ignore_db and replicate_do_db. If it finds any such filters, +it aborts with an error. + +If the replicas are configured with any filtering options, you should be careful +not to checksum any databases or tables that exist on the master and not the +replicas. Changes to such tables might normally be skipped on the replicas +because of the filtering options, but the checksum queries modify the contents +of the table that stores the checksums, not the tables whose data you are +checksumming. Therefore, these queries will be executed on the replica, and if +the table or database you're checksumming does not exist, the queries will cause +replication to fail. For more information on replication rules, see +L. + +Replication filtering makes it impossible to be sure that the checksum queries +won't break replication (or simply fail to replicate). If you are sure that +it's OK to run the checksum queries, you can negate this option to disable the +checks. See also L<"--replicate-database">. + +See also L<"REPLICA CHECKS">. + +=item --check-slave-lag + +type: string; group: Throttle + +Pause checksumming until this replica's lag is less than L<"--max-lag">. The +value is a DSN that inherits properties from the master host and the connection +options (L<"--port">, L<"--user">, etc.). By default, pt-table-checksum +monitors lag on all connected replicas, but this option limits lag monitoring +to the specified replica. This is useful if certain replicas are intentionally +lagged (with L for example), in which case you can specify +a normal replica to monitor. + +See also L<"REPLICA CHECKS">. + +=item --[no]check-slave-tables + +default: yes; group: Safety + +Checks that tables on slaves exist and have all the checksum L<"--columns">. +Tables missing on slaves or not having all the checksum L<"--columns"> can +cause the tool to break replication when it tries to check for differences. +Only disable this check if you are aware of the risks and are sure that all +tables on all slaves exist and are identical to the master. + +=item --chunk-index + +type: string + +Prefer this index for chunking tables. By default, pt-table-checksum chooses +the most appropriate index for chunking. This option lets you specify the index +that you prefer. If the index doesn't exist, then pt-table-checksum will fall +back to its default behavior of choosing an index. pt-table-checksum adds the +index to the checksum SQL statements in a C clause. Be careful +when using this option; a poor choice of index could cause bad performance. +This is probably best to use when you are checksumming only a single table, not +an entire server. + +=item --chunk-index-columns + +type: int + +Use only this many left-most columns of a L<"--chunk-index">. This works +only for compound indexes, and is useful in cases where a bug in the MySQL +query optimizer (planner) causes it to scan a large range of rows instead +of using the index to locate starting and ending points precisely. This +problem sometimes occurs on indexes with many columns, such as 4 or more. +If this happens, the tool might print a warning related to the +L<"--[no]check-plan"> option. Instructing the tool to use only the first +N columns of the index is a workaround for the bug in some cases. + +=item --chunk-size + +type: size; default: 1000 + +Number of rows to select for each checksum query. Allowable suffixes are +k, M, G. You should not use this option in most cases; prefer L<"--chunk-time"> +instead. + +This option can override the default behavior, which is to adjust chunk size +dynamically to try to make chunks run in exactly L<"--chunk-time"> seconds. +When this option isn't set explicitly, its default value is used as a starting +point, but after that, the tool ignores this option's value. If you set this +option explicitly, however, then it disables the dynamic adjustment behavior and +tries to make all chunks exactly the specified number of rows. + +There is a subtlety: if the chunk index is not unique, then it's possible that +chunks will be larger than desired. For example, if a table is chunked by an +index that contains 10,000 of a given value, there is no way to write a WHERE +clause that matches only 1,000 of the values, and that chunk will be at least +10,000 rows large. Such a chunk will probably be skipped because of +L<"--chunk-size-limit">. + +Selecting a small chunk size will cause the tool to become much slower, in part +because of the setup work required for L<"--[no]check-plan">. + +=item --chunk-size-limit + +type: float; default: 2.0; group: Safety + +Do not checksum chunks this much larger than the desired chunk size. + +When a table has no unique indexes, chunk sizes can be inaccurate. This option +specifies a maximum tolerable limit to the inaccuracy. The tool uses +to estimate how many rows are in the chunk. If that estimate exceeds the +desired chunk size times the limit (twice as large, by default), then the tool +skips the chunk. + +The minimum value for this option is 1, which means that no chunk can be larger +than L<"--chunk-size">. You probably don't want to specify 1, because rows +reported by EXPLAIN are estimates, which can be different from the real number +of rows in the chunk. If the tool skips too many chunks because they are +oversized, you might want to specify a value larger than the default of 2. + +You can disable oversized chunk checking by specifying a value of 0. + +=item --chunk-time + +type: float; default: 0.5 + +Adjust the chunk size dynamically so each checksum query takes this long to execute. + +The tool tracks the checksum rate (rows per second) for all tables and each +table individually. It uses these rates to adjust the chunk size after each +checksum query, so that the next checksum query takes this amount of time (in +seconds) to execute. + +The algorithm is as follows: at the beginning of each table, the chunk size is +initialized from the overall average rows per second since the tool began +working, or the value of L<"--chunk-size"> if the tool hasn't started working +yet. For each subsequent chunk of a table, the tool adjusts the chunk size to +try to make queries run in the desired amount of time. It keeps an +exponentially decaying moving average of queries per second, so that if the +server's performance changes due to changes in server load, the tool adapts +quickly. This allows the tool to achieve predictably timed queries for each +table, and for the server overall. + +If this option is set to zero, the chunk size doesn't auto-adjust, so query +checksum times will vary, but query checksum sizes will not. Another way to do +the same thing is to specify a value for L<"--chunk-size"> explicitly, instead +of leaving it at the default. + +=item --columns + +short form: -c; type: array; group: Filter + +Checksum only this comma-separated list of columns. If a table doesn't have +any of the specified columns it will be skipped. + +This option applies to all tables, so it really only makes sense when +checksumming one table unless the tables have a common set of columns. + +=item --config + +type: Array; group: Config + +Read this comma-separated list of config files; if specified, this must be the +first option on the command line. + +See the L<"--help"> output for a list of default config files. + +=item --create-replicate-table +=item --no-create-replicate-table + +default: yes + +Create the L<"--replicate"> database and table if they do not exist. +The structure of the replicate table is the same as the suggested table +mentioned in L<"--replicate">. + +=item --databases + +short form: -d; type: hash; group: Filter + +Only checksum this comma-separated list of databases. + +=item --databases-regex + +type: string; group: Filter + +Only checksum databases whose names match this Perl regex. + +=item --defaults-file + +short form: -F; type: string; group: Connection + +Only read mysql options from the given file. You must give an absolute +pathname. + +=item --disable-qrt-plugin + +Disable the QRT (Query Response Time) plugin if it is enabled. + +=item --[no]empty-replicate-table + +default: yes + +Delete previous checksums for each table before checksumming the table. This +option does not truncate the entire table, it only deletes rows (checksums) for +each table just before checksumming the table. Therefore, if checksumming stops +prematurely and there was preexisting data, there will still be rows for tables +that were not checksummed before the tool was stopped. + +If you're resuming from a previous checksum run, then the checksum records for +the table from which the tool resumes won't be emptied. + +To empty the entire replicate table, you must manually execute C +before running the tool. + +=item --engines + +short form: -e; type: hash; group: Filter + +Only checksum tables which use these storage engines. + +=item --explain + +cumulative: yes; default: 0; group: Output + +Show, but do not execute, checksum queries (disables +L<"--[no]empty-replicate-table">). If specified twice, the tool actually +iterates through the chunking algorithm, printing the upper and lower boundary +values for each chunk, but not executing the checksum queries. + +=item --fail-on-stopped-replication + +If replication is stopped, fail with an error (exit status 128) instead of waiting +until replication is restarted. + +=item --float-precision + +type: int + +Precision for FLOAT and DOUBLE number-to-string conversion. Causes FLOAT +and DOUBLE values to be rounded to the specified number of digits after the +decimal point, with the ROUND() function in MySQL. This can help avoid +checksum mismatches due to different floating-point representations of the same +values on different MySQL versions and hardware. The default is no rounding; +the values are converted to strings by the CONCAT() function, and MySQL chooses +the string representation. If you specify a value of 2, for example, then the +values 1.008 and 1.009 will be rounded to 1.01, and will checksum as equal. + +=item --function + +type: string + +Hash function for checksums (FNV1A_64, MURMUR_HASH, SHA1, MD5, CRC32, etc). + +The default is to use CRC32(), but MD5() and SHA1() also work, and you +can use your own function, such as a compiled UDF, if you wish. The +function you specify is run in SQL, not in Perl, so it must be available +to MySQL. + +MySQL doesn't have good built-in hash functions that are fast. CRC32() is too +prone to hash collisions, and MD5() and SHA1() are very CPU-intensive. The +FNV1A_64() UDF that is distributed with Percona Server is a faster alternative. +It is very simple to compile and install; look at the header in the source code +for instructions. If it is installed, it is preferred over MD5(). You can also +use the MURMUR_HASH() function if you compile and install that as a UDF; the +source is also distributed with Percona Server, and it might be better than +FNV1A_64(). + +=item --help + +group: Help + +Show help and exit. + +=item --host + +short form: -h; type: string; default: localhost; group: Connection + +Host to connect to. + +=item --ignore-columns + +type: Hash; group: Filter + +Ignore this comma-separated list of columns when calculating the checksum. +If a table has all of its columns filtered by --ignore-columns, it will +be skipped. + +=item --ignore-databases + +type: Hash; group: Filter + +Ignore this comma-separated list of databases. + +=item --ignore-databases-regex + +type: string; group: Filter + +Ignore databases whose names match this Perl regex. + +=item --ignore-engines + +type: Hash; default: FEDERATED,MRG_MyISAM; group: Filter + +Ignore this comma-separated list of storage engines. + +=item --ignore-tables + +type: Hash; group: Filter + +Ignore this comma-separated list of tables. Table names may be qualified with +the database name. The L<"--replicate"> table is always automatically ignored. + +=item --ignore-tables-regex + +type: string; group: Filter + +Ignore tables whose names match the Perl regex. + +=item --max-lag + +type: time; default: 1s; group: Throttle + +Pause checksumming until all replicas' lag is less than this value. After each +checksum query (each chunk), pt-table-checksum looks at the replication lag of +all replicas to which it connects, using Seconds_Behind_Master. If any replica +is lagging more than the value of this option, then pt-table-checksum will sleep +for L<"--check-interval"> seconds, then check all replicas again. If you +specify L<"--check-slave-lag">, then the tool only examines that server for +lag, not all servers. + +The tool waits forever for replicas to stop lagging. If any replica is +stopped, the tool waits forever until the replica is started. Checksumming +continues once all replicas are running and not lagging too much. + +The tool prints progress reports while waiting. If a replica is stopped, it +prints a progress report immediately, then again at every progress report +interval. + +See also L<"REPLICA CHECKS">. + +=item --max-load + +type: Array; default: Threads_running=25; group: Throttle + +Examine SHOW GLOBAL STATUS after every chunk, and pause if any status variables +are higher than the threshold. The option accepts a comma-separated list of +MySQL status variables to check for a threshold. An optional C<=MAX_VALUE> (or +C<:MAX_VALUE>) can follow each variable. If not given, the tool determines a +threshold by examining the current value and increasing it by 20%. + +For example, if you want the tool to pause when Threads_connected gets too high, +you can specify "Threads_connected", and the tool will check the current value +when it starts working and add 20% to that value. If the current value is 100, +then the tool will pause when Threads_connected exceeds 120, and resume working +when it is below 120 again. If you want to specify an explicit threshold, such +as 110, you can use either "Threads_connected:110" or "Threads_connected=110". + +The purpose of this option is to prevent the tool from adding too much load to +the server. If the checksum queries are intrusive, or if they cause lock waits, +then other queries on the server will tend to block and queue. This will +typically cause Threads_running to increase, and the tool can detect that by +running SHOW GLOBAL STATUS immediately after each checksum query finishes. If +you specify a threshold for this variable, then you can instruct the tool to +wait until queries are running normally again. This will not prevent queueing, +however; it will only give the server a chance to recover from the queueing. If +you notice queueing, it is best to decrease the chunk time. + +=item --password + +short form: -p; type: string; group: Connection + +Password to use when connecting. +If password contains commas they must be escaped with a backslash: "exam\,ple" + +=item --pause-file + +type: string + +Execution will be paused while the file specified by this param exists. + +=item --pid + +type: string + +Create the given PID file. The tool won't start if the PID file already +exists and the PID it contains is different than the current PID. However, +if the PID file exists and the PID it contains is no longer running, the +tool will overwrite the PID file with the current PID. The PID file is +removed automatically when the tool exits. + +=item --plugin + +type: string + +Perl module file that defines a C class. +A plugin allows you to write a Perl module that can hook into many parts +of pt-table-checksum. This requires a good knowledge of Perl and +Percona Toolkit conventions, which are beyond this scope of this +documentation. Please contact Percona if you have questions or need help. + +See L<"PLUGIN"> for more information. + +=item --port + +short form: -P; type: int; group: Connection + +Port number to use for connection. + +=item --progress + +type: array; default: time,30 + +Print progress reports to STDERR. + +The value is a comma-separated list with two parts. The first part can be +percentage, time, or iterations; the second part specifies how often an update +should be printed, in percentage, seconds, or number of iterations. The tool +prints progress reports for a variety of time-consuming operations, including +waiting for replicas to catch up if they become lagged. + +=item --quiet + +short form: -q; cumulative: yes; default: 0 + +Print only the most important information (disables L<"--progress">). +Specifying this option once causes the tool to print only errors, warnings, and +tables that have checksum differences. + +Specifying this option twice causes the tool to print only errors. In this +case, you can use the tool's exit status to determine if there were any warnings +or checksum differences. + +=item --recurse + +type: int + +Number of levels to recurse in the hierarchy when discovering replicas. +Default is infinite. See also L<"--recursion-method"> and L<"REPLICA CHECKS">. + +=item --recursion-method + +type: array; default: processlist,hosts + +Preferred recursion method for discovering replicas. pt-table-checksum +performs several L<"REPLICA CHECKS"> before and while running. + +Although replicas are not required to run pt-table-checksum, the tool +cannot detect diffs on slaves that it cannot discover. Therefore, +a warning is printed and the L<"EXIT STATUS"> is non-zero if no replicas +are found and the method is not C. If this happens, try a different +recursion method, or use the C method to specify the replicas to check. + +Possible methods are: + + METHOD USES + =========== ============================================= + processlist SHOW PROCESSLIST + hosts SHOW SLAVE HOSTS + cluster SHOW STATUS LIKE 'wsrep\_incoming\_addresses' + dsn=DSN DSNs from a table + none Do not find slaves + +The C method is the default, because C is not +reliable. However, if the server uses a non-standard port (not 3306), then +the C method becomes the default because it works better in this case. + +The C method requires replicas to be configured with C, +C, etc. + +The C method requires a cluster based on Galera 23.7.3 or newer, +such as Percona XtraDB Cluster versions 5.5.29 and above. This will +auto-discover nodes in a cluster using +C. You can combine C +with C and C to auto-discover cluster nodes and replicas, +but this functionality is experimental. + +The C method is special: rather than automatically discovering replicas, +this method specifies a table with replica DSNs. The tool will only connect +to these replicas. This method works best when replicas do not use the same +MySQL username or password as the master, or when you want to prevent the tool +from connecting to certain replicas. The C method is specified like: +C<--recursion-method dsn=h=host,D=percona,t=dsns>. The specified DSN must +have D and t parts, or just a database-qualified t part, which specify the +DSN table. The DSN table must have the following structure: + + CREATE TABLE `dsns` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `parent_id` int(11) DEFAULT NULL, + `dsn` varchar(255) NOT NULL, + PRIMARY KEY (`id`) + ); + +DSNs are ordered by C, but C and C are otherwise ignored. +The C column contains a replica DSN like it would be given on the command +line, for example: C<"h=replica_host,u=repl_user,p=repl_pass">. + +The C method makes the tool ignore all slaves and cluster nodes. This +method is not recommended because it effectively disables the +L<"REPLICA CHECKS"> and no differences can be found. It is useful, however, if +you only need to write checksums on the master or a single cluster node. The +safer alternative is C<--no-replicate-check>: the tool finds replicas and +cluster nodes, performs the L<"REPLICA CHECKS">, but does not check for +differences. See L<"--[no]replicate-check">. + +=item --replicate + +type: string; default: percona.checksums + +Write checksum results to this table. The replicate table must have this +structure (MAGIC_create_replicate): + + CREATE TABLE checksums ( + master_ip CHAR(32) default '0.0.0.0', + master_port INT default 3306, + db CHAR(64) NOT NULL, + tbl CHAR(64) NOT NULL, + chunk INT NOT NULL, + chunk_time FLOAT NULL, + chunk_index VARCHAR(200) NULL, + lower_boundary TEXT NULL, + upper_boundary TEXT NULL, + this_crc CHAR(40) NOT NULL, + this_cnt INT NOT NULL, + master_crc CHAR(40) NULL, + master_cnt INT NULL, + ts TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (master_ip, master_port, db, tbl, chunk), + INDEX ts_db_tbl (ts, db, tbl) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8; + +Note: lower_boundary and upper_boundary data type can be BLOB. See L<"--binary-index">. + +By default, L<"--create-replicate-table"> is true, so the database and +the table specified by this option are created automatically if they do not +exist. + +Be sure to choose an appropriate storage engine for the replicate table. If you +are checksumming InnoDB tables, and you use MyISAM for this table, a deadlock +will break replication, because the mixture of transactional and +non-transactional tables in the checksum statements will cause it to be written +to the binlog even though it had an error. It will then replay without a +deadlock on the replicas, and break replication with "different error on master +and slave." This is not a problem with pt-table-checksum; it's a problem with +MySQL replication, and you can read more about it in the MySQL manual. + +The replicate table is never checksummed (the tool automatically adds this +table to L<"--ignore-tables">). + +=item --[no]replicate-check + +default: yes + +Check replicas for data differences after finishing each table. The tool finds +differences by executing a simple SELECT statement on all detected replicas. +The query compares the replica's checksum results to the master's checksum +results. It reports differences in the DIFFS column of the output. + +=item --replicate-check-only + +Check replicas for consistency without executing checksum queries. +This option is used only with L<"--[no]replicate-check">. If specified, +pt-table-checksum doesn't checksum any tables. It checks replicas for +differences found by previous checksumming, and then exits. It might be useful +if you run pt-table-checksum quietly in a cron job, for example, and later want +a report on the results of the cron job, perhaps to implement a Nagios check. + +=item --replicate-check-retries + +type: int; default: 1 + +Retry checksum comparison this many times when a difference is encountered. +Only when a difference persists after this number of checks is it considered valid. +Using this option with a value of 2 or more alleviates spurious differences that +arise when using the --resume option. + +=item --replicate-database + +type: string + +USE only this database. By default, pt-table-checksum executes USE to select +the database that contains the table it's currently working on. This is is a +best effort to avoid problems with replication filters such as binlog_ignore_db +and replicate_ignore_db. However, replication filters can create a situation +where there simply is no one right way to do things. Some statements might not +be replicated, and others might cause replication to fail. In such cases, you +can use this option to specify a default database that pt-table-checksum selects +with USE, and never changes. See also L<"--[no]check-replication-filters">. + +=item --resume + +Resume checksumming from the last completed chunk (disables +L<"--[no]empty-replicate-table">). If the tool stops before it checksums all +tables, this option makes checksumming resume from the last chunk of the last +table that it finished. + +=item --retries + +type: int; default: 2 + +Retry a chunk this many times when there is a nonfatal error. Nonfatal errors +are problems such as a lock wait timeout or the query being killed. + +=item --run-time + +type: time + +How long to run. Default is to run until all tables have been checksummed. +These time value suffixes are allowed: s (seconds), m (minutes), h (hours), +and d (days). Combine this option with L<"--resume"> to checksum as many +tables within an allotted time, resuming from where the tool left off next +time it is ran. + +=item --separator + +type: string; default: # + +The separator character used for CONCAT_WS(). This character is used to join +the values of columns when checksumming. + +=item --skip-check-slave-lag + +type: DSN; repeatable: yes + +DSN to skip when checking slave lag. It can be used multiple times. +Example: --skip-check-slave-lag h=127.1,P=12345 --skip-check-slave-lag h=127.1,P=12346 + +=item --slave-user + +type: string + +Sets the user to be used to connect to the slaves. +This parameter allows you to have a different user with less privileges on the +slaves but that user must exist on all slaves. + +=item --slave-password + +type: string + +Sets the password to be used to connect to the slaves. +It can be used with --slave-user and the password for the user must be the same +on all slaves. + +=item --set-vars + +type: Array; group: Connection + +Set the MySQL variables in this comma-separated list of C pairs. + +By default, the tool sets: + +=for comment ignore-pt-internal-value +MAGIC_set_vars + + wait_timeout=10000 + innodb_lock_wait_timeout=1 + +Variables specified on the command line override these defaults. For +example, specifying C<--set-vars wait_timeout=500> overrides the defaultvalue of C<10000>. + +The tool prints a warning and continues if a variable cannot be set. + +=item --socket + +short form: -S; type: string; group: Connection + +Socket file to use for connection. + +=item --slave-skip-tolerance + +type: float; default: 1.0 + +When a master table is marked to be checksumed in only one chunk but a slave +table exceeds the maximum accepted size for this, the table is skipped. +Since number of rows are often rough estimates, many times tables are skipped +needlessly for very small differences. +This option provides a max row excess tolerance to prevent this. +For example a value of 1.2 will tolerate slave tables with up to 20% excess rows. + +=item --tables + +short form: -t; type: hash; group: Filter + +Checksum only this comma-separated list of tables. +Table names may be qualified with the database name. + +=item --tables-regex + +type: string; group: Filter + +Checksum only tables whose names match this Perl regex. + +=item --trim + +Add TRIM() to VARCHAR columns (helps when comparing 4.1 to >= 5.0). +This is useful when you don't care about the trailing space differences between +MySQL versions that vary in their handling of trailing spaces. MySQL 5.0 and +later all retain trailing spaces in VARCHAR, while previous versions would +remove them. These differences will cause false checksum differences. + +=item --truncate-replicate-table + +Truncate the replicate table before starting the checksum. +This parameter differs from L<--empty-replicate-table> which only deletes the rows +for the table being checksumed when starting the checksum for that table, while +L<--truncate-replicate-table> will truncate the replicate table at the beginning of the +process and thus, all previous checksum information will be losti, even if the process +stops due to an error. + +=item --user + +short form: -u; type: string; group: Connection + +User for login if not current user. + +=item --version + +group: Help + +Show version and exit. + +=item --[no]version-check + +default: yes + +Check for the latest version of Percona Toolkit, MySQL, and other programs. + +This is a standard "check for updates automatically" feature, with two +additional features. First, the tool checks its own version and also the +versions of the following software: operating system, Percona Monitoring and +Management (PMM), MySQL, Perl, MySQL driver for Perl (DBD::mysql), and +Percona Toolkit. Second, it checks for and warns about versions with known +problems. For example, MySQL 5.5.25 had a critical bug and was re-released +as 5.5.25a. + +A secure connection to Percona’s Version Check database server is done to +perform these checks. Each request is logged by the server, including software +version numbers and unique ID of the checked system. The ID is generated by the +Percona Toolkit installation script or when the Version Check database call is +done for the first time. + +Any updates or known problems are printed to STDOUT before the tool's normal +output. This feature should never interfere with the normal operation of the +tool. + +For more information, visit L. + +=item --where + +type: string + +Do only rows matching this WHERE clause. You can use this option to limit +the checksum to only part of the table. This is particularly useful if you have +append-only tables and don't want to constantly re-check all rows; you could run +a daily job to just check yesterday's rows, for instance. + +This option is much like the -w option to mysqldump. Do not specify the WHERE +keyword. You might need to quote the value. Here is an example: + + pt-table-checksum --where "ts > CURRENT_DATE - INTERVAL 1 DAY" + +=back + +=head1 REPLICA CHECKS + +By default, pt-table-checksum attempts to find and connect to all replicas +connected to the master host. This automated process is called +"slave recursion" and is controlled by the L<"--recursion-method"> and +L<"--recurse"> options. The tool performs these checks on all replicas: + +=over + +=item 1. L<"--[no]check-replication-filters"> + +pt-table-checksum checks for replication filters on all replicas because +they can complicate or break the checksum process. By default, the tool +will exit if any replication filters are found, but this check can be +disabled by specifying C<--no-check-replication-filters>. + +=item 2. L<"--replicate"> table + +pt-table-checksum checks that the L<"--replicate"> table exists on all +replicas, else checksumming can break replication when updates to the table +on the master replicate to a replica that doesn't have the table. This +check cannot be disabled, and the tool waits forever until the table +exists on all replicas, printing L<"--progress"> messages while it waits. + +=item 3. Single chunk size + +If a table can be checksummed in a single chunk on the master, +pt-table-checksum will check that the table size on all replicas is less than +L<"--chunk-size"> * L<"--chunk-size-limit">. This prevents a rare problem +where the table on the master is empty or small, but on a replica it is much +larger. In this case, the single chunk checksum on the master would overload +the replica. + +Another rare problem occurs when the table size on a replica is close to +L<"--chunk-size"> * L<"--chunk-size-limit">. In such cases, the table is more +likely to be skipped even though it's safe to checksum in a single chunk. +This happens because table sizes are estimates. When those estimates and +L<"--chunk-size"> * L<"--chunk-size-limit"> are almost equal, this check +becomes more sensitive to the estimates' margin of error rather than actual +significant differences in table sizes. Specifying a larger value for +L<"--chunk-size-limit"> helps avoid this problem. + +This check cannot be disabled. + +=item 4. Lag + +After each chunk, pt-table-checksum checks the lag on all replicas, or only +the replica specified by L<"--check-slave-lag">. This helps the tool +not to overload the replicas with checksum data. There is no way to +disable this check, but you can specify a single replica to check with +L<"--check-slave-lag">, and if that replica is the fastest, it will help +prevent the tool from waiting too long for replica lag to abate. + +=item 5. Checksum chunks + +When pt-table-checksum finishes checksumming a table, it waits for the last +checksum chunk to replicate to all replicas so it can perform the +L<"--[no]replicate-check">. Disabling that option by specifying +L<--no-replicate-check> disables this check, but it also disables +immediate reporting of checksum differences, thereby requiring a second run +of the tool with L<"--replicate-check-only"> to find and print checksum +differences. + +=back + +=head1 PLUGIN + +The file specified by L<"--plugin"> must define a class (i.e. a package) +called C with a C subroutine. +The tool will create an instance of this class and call any hooks that +it defines. No hooks are required, but a plugin isn't very useful without +them. + +These hooks, in this order, are called if defined: + + init + before_replicate_check + after_replicate_check + get_slave_lag + before_checksum_table + after_checksum_table + +Each hook is passed different arguments. To see which arguments are passed +to a hook, search for the hook's name in the tool's source code, like: + + # --plugin hook + if ( $plugin && $plugin->can('init') ) { + $plugin->init( + slaves => $slaves, + slave_lag_cxns => $slave_lag_cxns, + repl_table => $repl_table, + ); + } + +The comment C<# --plugin hook> precedes every hook call. + +Please contact Percona if you have questions or need help. + +=head1 DSN OPTIONS + +These DSN options are used to create a DSN. Each option is given like +C. The options are case-sensitive, so P and p are not the +same option. There cannot be whitespace before or after the C<=> and +if the value contains whitespace it must be quoted. DSN options are +comma-separated. See the L manpage for full details. + +=over + +=item * A + +dsn: charset; copy: yes + +Default character set. + +=item * D + +copy: no + +DSN table database. + +=item * F + +dsn: mysql_read_default_file; copy: yes + +Defaults file for connection values. + +=item * h + +dsn: host; copy: yes + +Connect to host. + +=item * p + +dsn: password; copy: yes + +Password to use when connecting. +If password contains commas they must be escaped with a backslash: "exam\,ple" + +=item * P + +dsn: port; copy: yes + +Port number to use for connection. + +=item * S + +dsn: mysql_socket; copy: no + +Socket file to use for connection. + +=item * t + +copy: no + +DSN table table. + +=item * u + +dsn: user; copy: yes + +User for login if not current user. + +=back + +=head1 ENVIRONMENT + +The environment variable C enables verbose debugging output to STDERR. +To enable debugging and capture all output to a file, run the tool like: + + PTDEBUG=1 pt-table-checksum ... > FILE 2>&1 + +Be careful: debugging output is voluminous and can generate several megabytes +of output. + +=head1 ATTENTION + +Using might expose passwords. When debug is enabled, all command line +parameters are shown in the output. + +=head1 SYSTEM REQUIREMENTS + +You need Perl, DBI, DBD::mysql, and some core packages that ought to be +installed in any reasonably new version of Perl. + +=head1 BUGS + +For a list of known bugs, see L. + +Please report bugs at L. +Include the following information in your bug report: + +=over + +=item * Complete command-line used to run the tool + +=item * Tool L<"--version"> + +=item * MySQL version of all servers involved + +=item * Output from the tool including STDERR + +=item * Input files (log/dump/config files, etc.) + +=back + +If possible, include debugging output by running the tool with C; +see L<"ENVIRONMENT">. + +=head1 DOWNLOADING + +Visit L to download the +latest release of Percona Toolkit. Or, get the latest release from the +command line: + + wget percona.com/get/percona-toolkit.tar.gz + + wget percona.com/get/percona-toolkit.rpm + + wget percona.com/get/percona-toolkit.deb + +You can also get individual tools from the latest release: + + wget percona.com/get/TOOL + +Replace C with the name of any tool. + +=head1 AUTHORS + +Baron Schwartz and Daniel Nichter + +=head1 ACKNOWLEDGMENTS + +Claus Jeppesen, Francois Saint-Jacques, Giuseppe Maxia, Heikki Tuuri, +James Briggs, Martin Friebe, and Sergey Zhuravlev + +=head1 ABOUT PERCONA TOOLKIT + +This tool is part of Percona Toolkit, a collection of advanced command-line +tools for MySQL developed by Percona. Percona Toolkit was forked from two +projects in June, 2011: Maatkit and Aspersa. Those projects were created by +Baron Schwartz and primarily developed by him and Daniel Nichter. Visit +L to learn about other free, open-source +software from Percona. + +=head1 COPYRIGHT, LICENSE, AND WARRANTY + +This program is copyright 2011-2021 Percona LLC and/or its affiliates, +2007-2011 Baron Schwartz. + +THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +systems, you can issue `man perlgpl' or `man perlartistic' to read these +licenses. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA. + +=head1 VERSION + +pt-table-checksum 3.4.0 + +=cut diff --git a/dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-sync b/dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-sync new file mode 100755 index 0000000000..4680f7f430 --- /dev/null +++ b/dbm-services/mysql/db-tools/mysql-table-checksum/pt-table-sync @@ -0,0 +1,13106 @@ +#!/usr/bin/env perl + +# This program is part of Percona Toolkit: http://www.percona.com/software/ +# See "COPYRIGHT, LICENSE, AND WARRANTY" at the end of this file for legal +# notices and disclaimers. + +use strict; +use warnings FATAL => 'all'; + +# This tool is "fat-packed": most of its dependent modules are embedded +# in this file. Setting %INC to this file for each module makes Perl aware +# of this so it will not try to load the module from @INC. See the tool's +# documentation for a full list of dependencies. +BEGIN { + $INC{$_} = __FILE__ for map { (my $pkg = "$_.pm") =~ s!::!/!g; $pkg } (qw( + Percona::Toolkit + OptionParser + Lmo::Utils + Lmo::Meta + Lmo::Object + Lmo::Types + Lmo + Quoter + DSNParser + VersionParser + TableSyncStream + TableParser + RowDiff + ChangeHandler + TableChunker + TableChecksum + TableSyncChunk + TableSyncNibble + TableSyncGroupBy + TableSyncer + TableNibbler + MasterSlave + Daemon + SchemaIterator + Transformers + Retry + HTTP::Micro + VersionCheck + )); +} + +# ########################################################################### +# Percona::Toolkit package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Percona/Toolkit.pm +# t/lib/Percona/Toolkit.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Percona::Toolkit; + +our $VERSION = '3.3.2'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Carp qw(carp cluck); +use Data::Dumper qw(); + +require Exporter; +our @ISA = qw(Exporter); +our @EXPORT_OK = qw( + have_required_args + Dumper + _d +); + +sub have_required_args { + my ($args, @required_args) = @_; + my $have_required_args = 1; + foreach my $arg ( @required_args ) { + if ( !defined $args->{$arg} ) { + $have_required_args = 0; + carp "Argument $arg is not defined"; + } + } + cluck unless $have_required_args; # print backtrace + return $have_required_args; +} + +sub Dumper { + local $Data::Dumper::Indent = 1; + local $Data::Dumper::Sortkeys = 1; + local $Data::Dumper::Quotekeys = 0; + Data::Dumper::Dumper(@_); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Percona::Toolkit package +# ########################################################################### + +# ########################################################################### +# OptionParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/OptionParser.pm +# t/lib/OptionParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package OptionParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use List::Util qw(max); +use Getopt::Long; +use Data::Dumper; + +my $POD_link_re = '[LC]<"?([^">]+)"?>'; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my ($program_name) = $PROGRAM_NAME =~ m/([.A-Za-z-]+)$/; + $program_name ||= $PROGRAM_NAME; + my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; + + my %attributes = ( + 'type' => 1, + 'short form' => 1, + 'group' => 1, + 'default' => 1, + 'cumulative' => 1, + 'negatable' => 1, + 'repeatable' => 1, # means it can be specified more than once + ); + + my $self = { + head1 => 'OPTIONS', # These args are used internally + skip_rules => 0, # to instantiate another Option- + item => '--(.*)', # Parser obj that parses the + attributes => \%attributes, # DSN OPTIONS section. Tools + parse_attributes => \&_parse_attribs, # don't tinker with these args. + + %args, + + strict => 1, # disabled by a special rule + program_name => $program_name, + opts => {}, + got_opts => 0, + short_opts => {}, + defaults => {}, + groups => {}, + allowed_groups => {}, + errors => [], + rules => [], # desc of rules for --help + mutex => [], # rule: opts are mutually exclusive + atleast1 => [], # rule: at least one opt is required + disables => {}, # rule: opt disables other opts + defaults_to => {}, # rule: opt defaults to value of other opt + DSNParser => undef, + default_files => [ + "/etc/percona-toolkit/percona-toolkit.conf", + "/etc/percona-toolkit/$program_name.conf", + "$home/.percona-toolkit.conf", + "$home/.$program_name.conf", + ], + types => { + string => 's', # standard Getopt type + int => 'i', # standard Getopt type + float => 'f', # standard Getopt type + Hash => 'H', # hash, formed from a comma-separated list + hash => 'h', # hash as above, but only if a value is given + Array => 'A', # array, similar to Hash + array => 'a', # array, similar to hash + DSN => 'd', # DSN + size => 'z', # size with kMG suffix (powers of 2^10) + time => 'm', # time, with an optional suffix of s/h/m/d + }, + }; + + return bless $self, $class; +} + +sub get_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + my @specs = $self->_pod_to_specs($file); + $self->_parse_specs(@specs); + + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + if ( $contents =~ m/^=head1 DSN OPTIONS/m ) { + PTDEBUG && _d('Parsing DSN OPTIONS'); + my $dsn_attribs = { + dsn => 1, + copy => 1, + }; + my $parse_dsn_attribs = sub { + my ( $self, $option, $attribs ) = @_; + map { + my $val = $attribs->{$_}; + if ( $val ) { + $val = $val eq 'yes' ? 1 + : $val eq 'no' ? 0 + : $val; + $attribs->{$_} = $val; + } + } keys %$attribs; + return { + key => $option, + %$attribs, + }; + }; + my $dsn_o = new OptionParser( + description => 'DSN OPTIONS', + head1 => 'DSN OPTIONS', + dsn => 0, # XXX don't infinitely recurse! + item => '\* (.)', # key opts are a single character + skip_rules => 1, # no rules before opts + attributes => $dsn_attribs, + parse_attributes => $parse_dsn_attribs, + ); + my @dsn_opts = map { + my $opts = { + key => $_->{spec}->{key}, + dsn => $_->{spec}->{dsn}, + copy => $_->{spec}->{copy}, + desc => $_->{desc}, + }; + $opts; + } $dsn_o->_pod_to_specs($file); + $self->{DSNParser} = DSNParser->new(opts => \@dsn_opts); + } + + if ( $contents =~ m/^=head1 VERSION\n\n^(.+)$/m ) { + $self->{version} = $1; + PTDEBUG && _d($self->{version}); + } + + return; +} + +sub DSNParser { + my ( $self ) = @_; + return $self->{DSNParser}; +}; + +sub get_defaults_files { + my ( $self ) = @_; + return @{$self->{default_files}}; +} + +sub _pod_to_specs { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + open my $fh, '<', $file or die "Cannot open $file: $OS_ERROR"; + + my @specs = (); + my @rules = (); + my $para; + + local $INPUT_RECORD_SEPARATOR = ''; + while ( $para = <$fh> ) { + next unless $para =~ m/^=head1 $self->{head1}/; + last; + } + + while ( $para = <$fh> ) { + last if $para =~ m/^=over/; + next if $self->{skip_rules}; + chomp $para; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + PTDEBUG && _d('Option rule:', $para); + push @rules, $para; + } + + die "POD has no $self->{head1} section" unless $para; + + do { + if ( my ($option) = $para =~ m/^=item $self->{item}/ ) { + chomp $para; + PTDEBUG && _d($para); + my %attribs; + + $para = <$fh>; # read next paragraph, possibly attributes + + if ( $para =~ m/: / ) { # attributes + $para =~ s/\s+\Z//g; + %attribs = map { + my ( $attrib, $val) = split(/: /, $_); + die "Unrecognized attribute for --$option: $attrib" + unless $self->{attributes}->{$attrib}; + ($attrib, $val); + } split(/; /, $para); + if ( $attribs{'short form'} ) { + $attribs{'short form'} =~ s/-//; + } + $para = <$fh>; # read next paragraph, probably short help desc + } + else { + PTDEBUG && _d('Option has no attributes'); + } + + $para =~ s/\s+\Z//g; + $para =~ s/\s+/ /g; + $para =~ s/$POD_link_re/$1/go; + + $para =~ s/\.(?:\n.*| [A-Z].*|\Z)//s; + PTDEBUG && _d('Short help:', $para); + + die "No description after option spec $option" if $para =~ m/^=item/; + + if ( my ($base_option) = $option =~ m/^\[no\](.*)/ ) { + $option = $base_option; + $attribs{'negatable'} = 1; + } + + push @specs, { + spec => $self->{parse_attributes}->($self, $option, \%attribs), + desc => $para + . (defined $attribs{default} ? " (default $attribs{default})" : ''), + group => ($attribs{'group'} ? $attribs{'group'} : 'default'), + attributes => \%attribs + }; + } + while ( $para = <$fh> ) { + last unless $para; + if ( $para =~ m/^=head1/ ) { + $para = undef; # Can't 'last' out of a do {} block. + last; + } + last if $para =~ m/^=item /; + } + } while ( $para ); + + die "No valid specs in $self->{head1}" unless @specs; + + close $fh; + return @specs, @rules; +} + +sub _parse_specs { + my ( $self, @specs ) = @_; + my %disables; # special rule that requires deferred checking + + foreach my $opt ( @specs ) { + if ( ref $opt ) { # It's an option spec, not a rule. + PTDEBUG && _d('Parsing opt spec:', + map { ($_, '=>', $opt->{$_}) } keys %$opt); + + my ( $long, $short ) = $opt->{spec} =~ m/^([\w-]+)(?:\|([^!+=]*))?/; + if ( !$long ) { + die "Cannot parse long option from spec $opt->{spec}"; + } + $opt->{long} = $long; + + die "Duplicate long option --$long" if exists $self->{opts}->{$long}; + $self->{opts}->{$long} = $opt; + + if ( length $long == 1 ) { + PTDEBUG && _d('Long opt', $long, 'looks like short opt'); + $self->{short_opts}->{$long} = $long; + } + + if ( $short ) { + die "Duplicate short option -$short" + if exists $self->{short_opts}->{$short}; + $self->{short_opts}->{$short} = $long; + $opt->{short} = $short; + } + else { + $opt->{short} = undef; + } + + $opt->{is_negatable} = $opt->{spec} =~ m/!/ ? 1 : 0; + $opt->{is_cumulative} = $opt->{spec} =~ m/\+/ ? 1 : 0; + $opt->{is_repeatable} = $opt->{attributes}->{repeatable} ? 1 : 0; + $opt->{is_required} = $opt->{desc} =~ m/required/ ? 1 : 0; + + $opt->{group} ||= 'default'; + $self->{groups}->{ $opt->{group} }->{$long} = 1; + + $opt->{value} = undef; + $opt->{got} = 0; + + my ( $type ) = $opt->{spec} =~ m/=(.)/; + $opt->{type} = $type; + PTDEBUG && _d($long, 'type:', $type); + + + $opt->{spec} =~ s/=./=s/ if ( $type && $type =~ m/[HhAadzm]/ ); + + if ( (my ($def) = $opt->{desc} =~ m/default\b(?: ([^)]+))?/) ) { + $self->{defaults}->{$long} = defined $def ? $def : 1; + PTDEBUG && _d($long, 'default:', $def); + } + + if ( $long eq 'config' ) { + $self->{defaults}->{$long} = join(',', $self->get_defaults_files()); + } + + if ( (my ($dis) = $opt->{desc} =~ m/(disables .*)/) ) { + $disables{$long} = $dis; + PTDEBUG && _d('Deferring check of disables rule for', $opt, $dis); + } + + $self->{opts}->{$long} = $opt; + } + else { # It's an option rule, not a spec. + PTDEBUG && _d('Parsing rule:', $opt); + push @{$self->{rules}}, $opt; + my @participants = $self->_get_participants($opt); + my $rule_ok = 0; + + if ( $opt =~ m/mutually exclusive|one and only one/ ) { + $rule_ok = 1; + push @{$self->{mutex}}, \@participants; + PTDEBUG && _d(@participants, 'are mutually exclusive'); + } + if ( $opt =~ m/at least one|one and only one/ ) { + $rule_ok = 1; + push @{$self->{atleast1}}, \@participants; + PTDEBUG && _d(@participants, 'require at least one'); + } + if ( $opt =~ m/default to/ ) { + $rule_ok = 1; + $self->{defaults_to}->{$participants[0]} = $participants[1]; + PTDEBUG && _d($participants[0], 'defaults to', $participants[1]); + } + if ( $opt =~ m/restricted to option groups/ ) { + $rule_ok = 1; + my ($groups) = $opt =~ m/groups ([\w\s\,]+)/; + my @groups = split(',', $groups); + %{$self->{allowed_groups}->{$participants[0]}} = map { + s/\s+//; + $_ => 1; + } @groups; + } + if( $opt =~ m/accepts additional command-line arguments/ ) { + $rule_ok = 1; + $self->{strict} = 0; + PTDEBUG && _d("Strict mode disabled by rule"); + } + + die "Unrecognized option rule: $opt" unless $rule_ok; + } + } + + foreach my $long ( keys %disables ) { + my @participants = $self->_get_participants($disables{$long}); + $self->{disables}->{$long} = \@participants; + PTDEBUG && _d('Option', $long, 'disables', @participants); + } + + return; +} + +sub _get_participants { + my ( $self, $str ) = @_; + my @participants; + foreach my $long ( $str =~ m/--(?:\[no\])?([\w-]+)/g ) { + die "Option --$long does not exist while processing rule $str" + unless exists $self->{opts}->{$long}; + push @participants, $long; + } + PTDEBUG && _d('Participants for', $str, ':', @participants); + return @participants; +} + +sub opts { + my ( $self ) = @_; + my %opts = %{$self->{opts}}; + return %opts; +} + +sub short_opts { + my ( $self ) = @_; + my %short_opts = %{$self->{short_opts}}; + return %short_opts; +} + +sub set_defaults { + my ( $self, %defaults ) = @_; + $self->{defaults} = {}; + foreach my $long ( keys %defaults ) { + die "Cannot set default for nonexistent option $long" + unless exists $self->{opts}->{$long}; + $self->{defaults}->{$long} = $defaults{$long}; + PTDEBUG && _d('Default val for', $long, ':', $defaults{$long}); + } + return; +} + +sub get_defaults { + my ( $self ) = @_; + return $self->{defaults}; +} + +sub get_groups { + my ( $self ) = @_; + return $self->{groups}; +} + +sub _set_option { + my ( $self, $opt, $val ) = @_; + my $long = exists $self->{opts}->{$opt} ? $opt + : exists $self->{short_opts}->{$opt} ? $self->{short_opts}->{$opt} + : die "Getopt::Long gave a nonexistent option: $opt"; + $opt = $self->{opts}->{$long}; + if ( $opt->{is_cumulative} ) { + $opt->{value}++; + } + elsif ( ($opt->{type} || '') eq 's' && $val =~ m/^--?(.+)/ ) { + my $next_opt = $1; + if ( exists $self->{opts}->{$next_opt} + || exists $self->{short_opts}->{$next_opt} ) { + $self->save_error("--$long requires a string value"); + return; + } + else { + if ($opt->{is_repeatable}) { + push @{$opt->{value}} , $val; + } + else { + $opt->{value} = $val; + } + } + } + else { + if ($opt->{is_repeatable}) { + push @{$opt->{value}} , $val; + } + else { + $opt->{value} = $val; + } + } + $opt->{got} = 1; + PTDEBUG && _d('Got option', $long, '=', $val); +} + +sub get_opts { + my ( $self ) = @_; + + foreach my $long ( keys %{$self->{opts}} ) { + $self->{opts}->{$long}->{got} = 0; + $self->{opts}->{$long}->{value} + = exists $self->{defaults}->{$long} ? $self->{defaults}->{$long} + : $self->{opts}->{$long}->{is_cumulative} ? 0 + : undef; + } + $self->{got_opts} = 0; + + $self->{errors} = []; + + if ( @ARGV && $ARGV[0] =~/^--config=/ ) { + $ARGV[0] = substr($ARGV[0],9); + $ARGV[0] =~ s/^'(.*)'$/$1/; + $ARGV[0] =~ s/^"(.*)"$/$1/; + $self->_set_option('config', shift @ARGV); + } + if ( @ARGV && $ARGV[0] eq "--config" ) { + shift @ARGV; + $self->_set_option('config', shift @ARGV); + } + if ( $self->has('config') ) { + my @extra_args; + foreach my $filename ( split(',', $self->get('config')) ) { + eval { + push @extra_args, $self->_read_config_file($filename); + }; + if ( $EVAL_ERROR ) { + if ( $self->got('config') ) { + die $EVAL_ERROR; + } + elsif ( PTDEBUG ) { + _d($EVAL_ERROR); + } + } + } + unshift @ARGV, @extra_args; + } + + Getopt::Long::Configure('no_ignore_case', 'bundling'); + GetOptions( + map { $_->{spec} => sub { $self->_set_option(@_); } } + grep { $_->{long} ne 'config' } # --config is handled specially above. + values %{$self->{opts}} + ) or $self->save_error('Error parsing options'); + + if ( exists $self->{opts}->{version} && $self->{opts}->{version}->{got} ) { + if ( $self->{version} ) { + print $self->{version}, "\n"; + exit 0; + } + else { + print "Error parsing version. See the VERSION section of the tool's documentation.\n"; + exit 1; + } + } + + if ( @ARGV && $self->{strict} ) { + $self->save_error("Unrecognized command-line options @ARGV"); + } + + foreach my $mutex ( @{$self->{mutex}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$mutex; + if ( @set > 1 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$mutex}[ 0 .. scalar(@$mutex) - 2] ) + . ' and --'.$self->{opts}->{$mutex->[-1]}->{long} + . ' are mutually exclusive.'; + $self->save_error($err); + } + } + + foreach my $required ( @{$self->{atleast1}} ) { + my @set = grep { $self->{opts}->{$_}->{got} } @$required; + if ( @set == 0 ) { + my $err = join(', ', map { "--$self->{opts}->{$_}->{long}" } + @{$required}[ 0 .. scalar(@$required) - 2] ) + .' or --'.$self->{opts}->{$required->[-1]}->{long}; + $self->save_error("Specify at least one of $err"); + } + } + + $self->_check_opts( keys %{$self->{opts}} ); + $self->{got_opts} = 1; + return; +} + +sub _check_opts { + my ( $self, @long ) = @_; + my $long_last = scalar @long; + while ( @long ) { + foreach my $i ( 0..$#long ) { + my $long = $long[$i]; + next unless $long; + my $opt = $self->{opts}->{$long}; + if ( $opt->{got} ) { + if ( exists $self->{disables}->{$long} ) { + my @disable_opts = @{$self->{disables}->{$long}}; + map { $self->{opts}->{$_}->{value} = undef; } @disable_opts; + PTDEBUG && _d('Unset options', @disable_opts, + 'because', $long,'disables them'); + } + + if ( exists $self->{allowed_groups}->{$long} ) { + + my @restricted_groups = grep { + !exists $self->{allowed_groups}->{$long}->{$_} + } keys %{$self->{groups}}; + + my @restricted_opts; + foreach my $restricted_group ( @restricted_groups ) { + RESTRICTED_OPT: + foreach my $restricted_opt ( + keys %{$self->{groups}->{$restricted_group}} ) + { + next RESTRICTED_OPT if $restricted_opt eq $long; + push @restricted_opts, $restricted_opt + if $self->{opts}->{$restricted_opt}->{got}; + } + } + + if ( @restricted_opts ) { + my $err; + if ( @restricted_opts == 1 ) { + $err = "--$restricted_opts[0]"; + } + else { + $err = join(', ', + map { "--$self->{opts}->{$_}->{long}" } + grep { $_ } + @restricted_opts[0..scalar(@restricted_opts) - 2] + ) + . ' or --'.$self->{opts}->{$restricted_opts[-1]}->{long}; + } + $self->save_error("--$long is not allowed with $err"); + } + } + + } + elsif ( $opt->{is_required} ) { + $self->save_error("Required option --$long must be specified"); + } + + $self->_validate_type($opt); + if ( $opt->{parsed} ) { + delete $long[$i]; + } + else { + PTDEBUG && _d('Temporarily failed to parse', $long); + } + } + + die "Failed to parse options, possibly due to circular dependencies" + if @long == $long_last; + $long_last = @long; + } + + return; +} + +sub _validate_type { + my ( $self, $opt ) = @_; + return unless $opt; + + if ( !$opt->{type} ) { + $opt->{parsed} = 1; + return; + } + + my $val = $opt->{value}; + + if ( $val && $opt->{type} eq 'm' ) { # type time + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a time value'); + my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/; + if ( !$suffix ) { + my ( $s ) = $opt->{desc} =~ m/\(suffix (.)\)/; + $suffix = $s || 's'; + PTDEBUG && _d('No suffix given; using', $suffix, 'for', + $opt->{long}, '(value:', $val, ')'); + } + if ( $suffix =~ m/[smhd]/ ) { + $val = $suffix eq 's' ? $num # Seconds + : $suffix eq 'm' ? $num * 60 # Minutes + : $suffix eq 'h' ? $num * 3600 # Hours + : $num * 86400; # Days + $opt->{value} = ($prefix || '') . $val; + PTDEBUG && _d('Setting option', $opt->{long}, 'to', $val); + } + else { + $self->save_error("Invalid time suffix for --$opt->{long}"); + } + } + elsif ( $val && $opt->{type} eq 'd' ) { # type DSN + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a DSN'); + my $prev = {}; + my $from_key = $self->{defaults_to}->{ $opt->{long} }; + if ( $from_key ) { + PTDEBUG && _d($opt->{long}, 'DSN copies from', $from_key, 'DSN'); + if ( $self->{opts}->{$from_key}->{parsed} ) { + $prev = $self->{opts}->{$from_key}->{value}; + } + else { + PTDEBUG && _d('Cannot parse', $opt->{long}, 'until', + $from_key, 'parsed'); + return; + } + } + my $defaults = $self->{DSNParser}->parse_options($self); + if (!$opt->{attributes}->{repeatable}) { + $opt->{value} = $self->{DSNParser}->parse($val, $prev, $defaults); + } else { + my $values = []; + for my $dsn_string (@$val) { + push @$values, $self->{DSNParser}->parse($dsn_string, $prev, $defaults); + } + $opt->{value} = $values; + } + } + elsif ( $val && $opt->{type} eq 'z' ) { # type size + PTDEBUG && _d('Parsing option', $opt->{long}, 'as a size value'); + $self->_parse_size($opt, $val); + } + elsif ( $opt->{type} eq 'H' || (defined $val && $opt->{type} eq 'h') ) { + $opt->{value} = { map { $_ => 1 } split(/(?{type} eq 'A' || (defined $val && $opt->{type} eq 'a') ) { + $opt->{value} = [ split(/(?{long}, 'type', $opt->{type}, 'value', $val); + } + + $opt->{parsed} = 1; + return; +} + +sub get { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{value}; +} + +sub got { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + return $self->{opts}->{$long}->{got}; +} + +sub has { + my ( $self, $opt ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + return defined $long ? exists $self->{opts}->{$long} : 0; +} + +sub set { + my ( $self, $opt, $val ) = @_; + my $long = (length $opt == 1 ? $self->{short_opts}->{$opt} : $opt); + die "Option $opt does not exist" + unless $long && exists $self->{opts}->{$long}; + $self->{opts}->{$long}->{value} = $val; + return; +} + +sub save_error { + my ( $self, $error ) = @_; + push @{$self->{errors}}, $error; + return; +} + +sub errors { + my ( $self ) = @_; + return $self->{errors}; +} + +sub usage { + my ( $self ) = @_; + warn "No usage string is set" unless $self->{usage}; # XXX + return "Usage: " . ($self->{usage} || '') . "\n"; +} + +sub descr { + my ( $self ) = @_; + warn "No description string is set" unless $self->{description}; # XXX + my $descr = ($self->{description} || $self->{program_name} || '') + . " For more details, please use the --help option, " + . "or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation."; + $descr = join("\n", $descr =~ m/(.{0,80})(?:\s+|$)/g) + unless $ENV{DONT_BREAK_LINES}; + $descr =~ s/ +$//mg; + return $descr; +} + +sub usage_or_errors { + my ( $self, $file, $return ) = @_; + $file ||= $self->{file} || __FILE__; + + if ( !$self->{description} || !$self->{usage} ) { + PTDEBUG && _d("Getting description and usage from SYNOPSIS in", $file); + my %synop = $self->_parse_synopsis($file); + $self->{description} ||= $synop{description}; + $self->{usage} ||= $synop{usage}; + PTDEBUG && _d("Description:", $self->{description}, + "\nUsage:", $self->{usage}); + } + + if ( $self->{opts}->{help}->{got} ) { + print $self->print_usage() or die "Cannot print usage: $OS_ERROR"; + exit 0 unless $return; + } + elsif ( scalar @{$self->{errors}} ) { + print $self->print_errors() or die "Cannot print errors: $OS_ERROR"; + exit 1 unless $return; + } + + return; +} + +sub print_errors { + my ( $self ) = @_; + my $usage = $self->usage() . "\n"; + if ( (my @errors = @{$self->{errors}}) ) { + $usage .= join("\n * ", 'Errors in command-line arguments:', @errors) + . "\n"; + } + return $usage . "\n" . $self->descr(); +} + +sub print_usage { + my ( $self ) = @_; + die "Run get_opts() before print_usage()" unless $self->{got_opts}; + my @opts = values %{$self->{opts}}; + + my $maxl = max( + map { + length($_->{long}) # option long name + + ($_->{is_negatable} ? 4 : 0) # "[no]" if opt is negatable + + ($_->{type} ? 2 : 0) # "=x" where x is the opt type + } + @opts); + + my $maxs = max(0, + map { + length($_) + + ($self->{opts}->{$_}->{is_negatable} ? 4 : 0) + + ($self->{opts}->{$_}->{type} ? 2 : 0) + } + values %{$self->{short_opts}}); + + my $lcol = max($maxl, ($maxs + 3)); + my $rcol = 80 - $lcol - 6; + my $rpad = ' ' x ( 80 - $rcol ); + + $maxs = max($lcol - 3, $maxs); + + my $usage = $self->descr() . "\n" . $self->usage(); + + my @groups = reverse sort grep { $_ ne 'default'; } keys %{$self->{groups}}; + push @groups, 'default'; + + foreach my $group ( reverse @groups ) { + $usage .= "\n".($group eq 'default' ? 'Options' : $group).":\n\n"; + foreach my $opt ( + sort { $a->{long} cmp $b->{long} } + grep { $_->{group} eq $group } + @opts ) + { + my $long = $opt->{is_negatable} ? "[no]$opt->{long}" : $opt->{long}; + my $short = $opt->{short}; + my $desc = $opt->{desc}; + + $long .= $opt->{type} ? "=$opt->{type}" : ""; + + if ( $opt->{type} && $opt->{type} eq 'm' ) { + my ($s) = $desc =~ m/\(suffix (.)\)/; + $s ||= 's'; + $desc =~ s/\s+\(suffix .\)//; + $desc .= ". Optional suffix s=seconds, m=minutes, h=hours, " + . "d=days; if no suffix, $s is used."; + } + $desc = join("\n$rpad", grep { $_ } $desc =~ m/(.{0,$rcol}(?!\W))(?:\s+|(?<=\W)|$)/g); + $desc =~ s/ +$//mg; + if ( $short ) { + $usage .= sprintf(" --%-${maxs}s -%s %s\n", $long, $short, $desc); + } + else { + $usage .= sprintf(" --%-${lcol}s %s\n", $long, $desc); + } + } + } + + $usage .= "\nOption types: s=string, i=integer, f=float, h/H/a/A=comma-separated list, d=DSN, z=size, m=time\n"; + + if ( (my @rules = @{$self->{rules}}) ) { + $usage .= "\nRules:\n\n"; + $usage .= join("\n", map { " $_" } @rules) . "\n"; + } + if ( $self->{DSNParser} ) { + $usage .= "\n" . $self->{DSNParser}->usage(); + } + $usage .= "\nOptions and values after processing arguments:\n\n"; + foreach my $opt ( sort { $a->{long} cmp $b->{long} } @opts ) { + my $val = $opt->{value}; + my $type = $opt->{type} || ''; + my $bool = $opt->{spec} =~ m/^[\w-]+(?:\|[\w-])?!?$/; + $val = $bool ? ( $val ? 'TRUE' : 'FALSE' ) + : !defined $val ? '(No value)' + : $type eq 'd' ? $self->{DSNParser}->as_string($val) + : $type =~ m/H|h/ ? join(',', sort keys %$val) + : $type =~ m/A|a/ ? join(',', @$val) + : $val; + $usage .= sprintf(" --%-${lcol}s %s\n", $opt->{long}, $val); + } + return $usage; +} + +sub prompt_noecho { + shift @_ if ref $_[0] eq __PACKAGE__; + my ( $prompt ) = @_; + local $OUTPUT_AUTOFLUSH = 1; + print STDERR $prompt + or die "Cannot print: $OS_ERROR"; + my $response; + eval { + require Term::ReadKey; + Term::ReadKey::ReadMode('noecho'); + chomp($response = ); + Term::ReadKey::ReadMode('normal'); + print "\n" + or die "Cannot print: $OS_ERROR"; + }; + if ( $EVAL_ERROR ) { + die "Cannot read response; is Term::ReadKey installed? $EVAL_ERROR"; + } + return $response; +} + +sub _read_config_file { + my ( $self, $filename ) = @_; + open my $fh, "<", $filename or die "Cannot open $filename: $OS_ERROR\n"; + my @args; + my $prefix = '--'; + my $parse = 1; + + LINE: + while ( my $line = <$fh> ) { + chomp $line; + next LINE if $line =~ m/^\s*(?:\#|\;|$)/; + $line =~ s/\s+#.*$//g; + $line =~ s/^\s+|\s+$//g; + if ( $line eq '--' ) { + $prefix = ''; + $parse = 0; + next LINE; + } + + if ( $parse + && !$self->has('version-check') + && $line =~ /version-check/ + ) { + next LINE; + } + + if ( $parse + && (my($opt, $arg) = $line =~ m/^\s*([^=\s]+?)(?:\s*=\s*(.*?)\s*)?$/) + ) { + push @args, grep { defined $_ } ("$prefix$opt", $arg); + } + elsif ( $line =~ m/./ ) { + push @args, $line; + } + else { + die "Syntax error in file $filename at line $INPUT_LINE_NUMBER"; + } + } + close $fh; + return @args; +} + +sub read_para_after { + my ( $self, $file, $regex ) = @_; + open my $fh, "<", $file or die "Can't open $file: $OS_ERROR"; + local $INPUT_RECORD_SEPARATOR = ''; + my $para; + while ( $para = <$fh> ) { + next unless $para =~ m/^=pod$/m; + last; + } + while ( $para = <$fh> ) { + next unless $para =~ m/$regex/; + last; + } + $para = <$fh>; + chomp($para); + close $fh or die "Can't close $file: $OS_ERROR"; + return $para; +} + +sub clone { + my ( $self ) = @_; + + my %clone = map { + my $hashref = $self->{$_}; + my $val_copy = {}; + foreach my $key ( keys %$hashref ) { + my $ref = ref $hashref->{$key}; + $val_copy->{$key} = !$ref ? $hashref->{$key} + : $ref eq 'HASH' ? { %{$hashref->{$key}} } + : $ref eq 'ARRAY' ? [ @{$hashref->{$key}} ] + : $hashref->{$key}; + } + $_ => $val_copy; + } qw(opts short_opts defaults); + + foreach my $scalar ( qw(got_opts) ) { + $clone{$scalar} = $self->{$scalar}; + } + + return bless \%clone; +} + +sub _parse_size { + my ( $self, $opt, $val ) = @_; + + if ( lc($val || '') eq 'null' ) { + PTDEBUG && _d('NULL size for', $opt->{long}); + $opt->{value} = 'null'; + return; + } + + my %factor_for = (k => 1_024, M => 1_048_576, G => 1_073_741_824); + my ($pre, $num, $factor) = $val =~ m/^([+-])?(\d+)([kMG])?$/; + if ( defined $num ) { + if ( $factor ) { + $num *= $factor_for{$factor}; + PTDEBUG && _d('Setting option', $opt->{y}, + 'to num', $num, '* factor', $factor); + } + $opt->{value} = ($pre || '') . $num; + } + else { + $self->save_error("Invalid size for --$opt->{long}: $val"); + } + return; +} + +sub _parse_attribs { + my ( $self, $option, $attribs ) = @_; + my $types = $self->{types}; + return $option + . ($attribs->{'short form'} ? '|' . $attribs->{'short form'} : '' ) + . ($attribs->{'negatable'} ? '!' : '' ) + . ($attribs->{'cumulative'} ? '+' : '' ) + . ($attribs->{'type'} ? '=' . $types->{$attribs->{type}} : '' ); +} + +sub _parse_synopsis { + my ( $self, $file ) = @_; + $file ||= $self->{file} || __FILE__; + PTDEBUG && _d("Parsing SYNOPSIS in", $file); + + local $INPUT_RECORD_SEPARATOR = ''; # read paragraphs + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $para; + 1 while defined($para = <$fh>) && $para !~ m/^=head1 SYNOPSIS/; + die "$file does not contain a SYNOPSIS section" unless $para; + my @synop; + for ( 1..2 ) { # 1 for the usage, 2 for the description + my $para = <$fh>; + push @synop, $para; + } + close $fh; + PTDEBUG && _d("Raw SYNOPSIS text:", @synop); + my ($usage, $desc) = @synop; + die "The SYNOPSIS section in $file is not formatted properly" + unless $usage && $desc; + + $usage =~ s/^\s*Usage:\s+(.+)/$1/; + chomp $usage; + + $desc =~ s/\n/ /g; + $desc =~ s/\s{2,}/ /g; + $desc =~ s/\. ([A-Z][a-z])/. $1/g; + $desc =~ s/\s+$//; + + return ( + description => $desc, + usage => $usage, + ); +}; + +sub set_vars { + my ($self, $file) = @_; + $file ||= $self->{file} || __FILE__; + + my %user_vars; + my $user_vars = $self->has('set-vars') ? $self->get('set-vars') : undef; + if ( $user_vars ) { + foreach my $var_val ( @$user_vars ) { + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && defined $val; + $user_vars{$var} = { + val => $val, + default => 0, + }; + } + } + + my %default_vars; + my $default_vars = $self->read_para_after($file, qr/MAGIC_set_vars/); + if ( $default_vars ) { + %default_vars = map { + my $var_val = $_; + my ($var, $val) = $var_val =~ m/([^\s=]+)=(\S+)/; + die "Invalid --set-vars value: $var_val\n" unless $var && defined $val; + $var => { + val => $val, + default => 1, + }; + } split("\n", $default_vars); + } + + my %vars = ( + %default_vars, # first the tool's defaults + %user_vars, # then the user's which overwrite the defaults + ); + PTDEBUG && _d('--set-vars:', Dumper(\%vars)); + return \%vars; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +if ( PTDEBUG ) { + print STDERR '# ', $^X, ' ', $], "\n"; + if ( my $uname = `uname -a` ) { + $uname =~ s/\s+/ /g; + print STDERR "# $uname\n"; + } + print STDERR '# Arguments: ', + join(' ', map { my $a = "_[$_]_"; $a =~ s/\n/\n# /g; $a; } @ARGV), "\n"; +} + +1; +} +# ########################################################################### +# End OptionParser package +# ########################################################################### + +# ########################################################################### +# Lmo::Utils package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Utils.pm +# t/lib/Lmo/Utils.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Utils; + +use strict; +use warnings qw( FATAL all ); +require Exporter; +our (@ISA, @EXPORT, @EXPORT_OK); + +BEGIN { + @ISA = qw(Exporter); + @EXPORT = @EXPORT_OK = qw( + _install_coderef + _unimport_coderefs + _glob_for + _stash_for + ); +} + +{ + no strict 'refs'; + sub _glob_for { + return \*{shift()} + } + + sub _stash_for { + return \%{ shift() . "::" }; + } +} + +sub _install_coderef { + my ($to, $code) = @_; + + return *{ _glob_for $to } = $code; +} + +sub _unimport_coderefs { + my ($target, @names) = @_; + return unless @names; + my $stash = _stash_for($target); + foreach my $name (@names) { + if ($stash->{$name} and defined(&{$stash->{$name}})) { + delete $stash->{$name}; + } + } +} + +1; +} +# ########################################################################### +# End Lmo::Utils package +# ########################################################################### + +# ########################################################################### +# Lmo::Meta package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Meta.pm +# t/lib/Lmo/Meta.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Meta; +use strict; +use warnings qw( FATAL all ); + +my %metadata_for; + +sub new { + my $class = shift; + return bless { @_ }, $class +} + +sub metadata_for { + my $self = shift; + my ($class) = @_; + + return $metadata_for{$class} ||= {}; +} + +sub class { shift->{class} } + +sub attributes { + my $self = shift; + return keys %{$self->metadata_for($self->class)} +} + +sub attributes_for_new { + my $self = shift; + my @attributes; + + my $class_metadata = $self->metadata_for($self->class); + while ( my ($attr, $meta) = each %$class_metadata ) { + if ( exists $meta->{init_arg} ) { + push @attributes, $meta->{init_arg} + if defined $meta->{init_arg}; + } + else { + push @attributes, $attr; + } + } + return @attributes; +} + +1; +} +# ########################################################################### +# End Lmo::Meta package +# ########################################################################### + +# ########################################################################### +# Lmo::Object package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Object.pm +# t/lib/Lmo/Object.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Object; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(blessed); + +use Lmo::Meta; +use Lmo::Utils qw(_glob_for); + +sub new { + my $class = shift; + my $args = $class->BUILDARGS(@_); + + my $class_metadata = Lmo::Meta->metadata_for($class); + + my @args_to_delete; + while ( my ($attr, $meta) = each %$class_metadata ) { + next unless exists $meta->{init_arg}; + my $init_arg = $meta->{init_arg}; + + if ( defined $init_arg ) { + $args->{$attr} = delete $args->{$init_arg}; + } + else { + push @args_to_delete, $attr; + } + } + + delete $args->{$_} for @args_to_delete; + + for my $attribute ( keys %$args ) { + if ( my $coerce = $class_metadata->{$attribute}{coerce} ) { + $args->{$attribute} = $coerce->($args->{$attribute}); + } + if ( my $isa_check = $class_metadata->{$attribute}{isa} ) { + my ($check_name, $check_sub) = @$isa_check; + $check_sub->($args->{$attribute}); + } + } + + while ( my ($attribute, $meta) = each %$class_metadata ) { + next unless $meta->{required}; + Carp::confess("Attribute ($attribute) is required for $class") + if ! exists $args->{$attribute} + } + + my $self = bless $args, $class; + + my @build_subs; + my $linearized_isa = mro::get_linear_isa($class); + + for my $isa_class ( @$linearized_isa ) { + unshift @build_subs, *{ _glob_for "${isa_class}::BUILD" }{CODE}; + } + my @args = %$args; + for my $sub (grep { defined($_) && exists &$_ } @build_subs) { + $sub->( $self, @args); + } + return $self; +} + +sub BUILDARGS { + shift; # No need for the classname + if ( @_ == 1 && ref($_[0]) ) { + Carp::confess("Single parameters to new() must be a HASH ref, not $_[0]") + unless ref($_[0]) eq ref({}); + return {%{$_[0]}} # We want a new reference, always + } + else { + return { @_ }; + } +} + +sub meta { + my $class = shift; + $class = Scalar::Util::blessed($class) || $class; + return Lmo::Meta->new(class => $class); +} + +1; +} +# ########################################################################### +# End Lmo::Object package +# ########################################################################### + +# ########################################################################### +# Lmo::Types package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo/Types.pm +# t/lib/Lmo/Types.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Lmo::Types; + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + + +our %TYPES = ( + Bool => sub { !$_[0] || (defined $_[0] && looks_like_number($_[0]) && $_[0] == 1) }, + Num => sub { defined $_[0] && looks_like_number($_[0]) }, + Int => sub { defined $_[0] && looks_like_number($_[0]) && $_[0] == int($_[0]) }, + Str => sub { defined $_[0] }, + Object => sub { defined $_[0] && blessed($_[0]) }, + FileHandle => sub { local $@; require IO::Handle; fileno($_[0]) && $_[0]->opened }, + + map { + my $type = /R/ ? $_ : uc $_; + $_ . "Ref" => sub { ref $_[0] eq $type } + } qw(Array Code Hash Regexp Glob Scalar) +); + +sub check_type_constaints { + my ($attribute, $type_check, $check_name, $val) = @_; + ( ref($type_check) eq 'CODE' + ? $type_check->($val) + : (ref $val eq $type_check + || ($val && $val eq $type_check) + || (exists $TYPES{$type_check} && $TYPES{$type_check}->($val))) + ) + || Carp::confess( + qq + . qq + . (defined $val ? Lmo::Dumper($val) : 'undef') ) +} + +sub _nested_constraints { + my ($attribute, $aggregate_type, $type) = @_; + + my $inner_types; + if ( $type =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $inner_types = _nested_constraints($1, $2); + } + else { + $inner_types = $TYPES{$type}; + } + + if ( $aggregate_type eq 'ArrayRef' ) { + return sub { + my ($val) = @_; + return unless ref($val) eq ref([]); + + if ($inner_types) { + for my $value ( @{$val} ) { + return unless $inner_types->($value) + } + } + else { + for my $value ( @{$val} ) { + return unless $value && ($value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type))); + } + } + return 1; + }; + } + elsif ( $aggregate_type eq 'Maybe' ) { + return sub { + my ($value) = @_; + return 1 if ! defined($value); + if ($inner_types) { + return unless $inner_types->($value) + } + else { + return unless $value eq $type + || (Scalar::Util::blessed($value) && $value->isa($type)); + } + return 1; + } + } + else { + Carp::confess("Nested aggregate types are only implemented for ArrayRefs and Maybe"); + } +} + +1; +} +# ########################################################################### +# End Lmo::Types package +# ########################################################################### + +# ########################################################################### +# Lmo package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Lmo.pm +# t/lib/Lmo.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +BEGIN { +$INC{"Lmo.pm"} = __FILE__; +package Lmo; +our $VERSION = '0.30_Percona'; # Forked from 0.30 of Mo. + + +use strict; +use warnings qw( FATAL all ); + +use Carp (); +use Scalar::Util qw(looks_like_number blessed); + +use Lmo::Meta; +use Lmo::Object; +use Lmo::Types; + +use Lmo::Utils; + +my %export_for; +sub import { + warnings->import(qw(FATAL all)); + strict->import(); + + my $caller = scalar caller(); # Caller's package + my %exports = ( + extends => \&extends, + has => \&has, + with => \&with, + override => \&override, + confess => \&Carp::confess, + ); + + $export_for{$caller} = \%exports; + + for my $keyword ( keys %exports ) { + _install_coderef "${caller}::$keyword" => $exports{$keyword}; + } + + if ( !@{ *{ _glob_for "${caller}::ISA" }{ARRAY} || [] } ) { + @_ = "Lmo::Object"; + goto *{ _glob_for "${caller}::extends" }{CODE}; + } +} + +sub extends { + my $caller = scalar caller(); + for my $class ( @_ ) { + _load_module($class); + } + _set_package_isa($caller, @_); + _set_inherited_metadata($caller); +} + +sub _load_module { + my ($class) = @_; + + (my $file = $class) =~ s{::|'}{/}g; + $file .= '.pm'; + { local $@; eval { require "$file" } } # or warn $@; + return; +} + +sub with { + my $package = scalar caller(); + require Role::Tiny; + for my $role ( @_ ) { + _load_module($role); + _role_attribute_metadata($package, $role); + } + Role::Tiny->apply_roles_to_package($package, @_); +} + +sub _role_attribute_metadata { + my ($package, $role) = @_; + + my $package_meta = Lmo::Meta->metadata_for($package); + my $role_meta = Lmo::Meta->metadata_for($role); + + %$package_meta = (%$role_meta, %$package_meta); +} + +sub has { + my $names = shift; + my $caller = scalar caller(); + + my $class_metadata = Lmo::Meta->metadata_for($caller); + + for my $attribute ( ref $names ? @$names : $names ) { + my %args = @_; + my $method = ($args{is} || '') eq 'ro' + ? sub { + Carp::confess("Cannot assign a value to a read-only accessor at reader ${caller}::${attribute}") + if $#_; + return $_[0]{$attribute}; + } + : sub { + return $#_ + ? $_[0]{$attribute} = $_[1] + : $_[0]{$attribute}; + }; + + $class_metadata->{$attribute} = (); + + if ( my $type_check = $args{isa} ) { + my $check_name = $type_check; + + if ( my ($aggregate_type, $inner_type) = $type_check =~ /\A(ArrayRef|Maybe)\[(.*)\]\z/ ) { + $type_check = Lmo::Types::_nested_constraints($attribute, $aggregate_type, $inner_type); + } + + my $check_sub = sub { + my ($new_val) = @_; + Lmo::Types::check_type_constaints($attribute, $type_check, $check_name, $new_val); + }; + + $class_metadata->{$attribute}{isa} = [$check_name, $check_sub]; + my $orig_method = $method; + $method = sub { + $check_sub->($_[1]) if $#_; + goto &$orig_method; + }; + } + + if ( my $builder = $args{builder} ) { + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$builder + : goto &$original_method + }; + } + + if ( my $code = $args{default} ) { + Carp::confess("${caller}::${attribute}'s default is $code, but should be a coderef") + unless ref($code) eq 'CODE'; + my $original_method = $method; + $method = sub { + $#_ + ? goto &$original_method + : ! exists $_[0]{$attribute} + ? $_[0]{$attribute} = $_[0]->$code + : goto &$original_method + }; + } + + if ( my $role = $args{does} ) { + my $original_method = $method; + $method = sub { + if ( $#_ ) { + Carp::confess(qq) + unless Scalar::Util::blessed($_[1]) && eval { $_[1]->does($role) } + } + goto &$original_method + }; + } + + if ( my $coercion = $args{coerce} ) { + $class_metadata->{$attribute}{coerce} = $coercion; + my $original_method = $method; + $method = sub { + if ( $#_ ) { + return $original_method->($_[0], $coercion->($_[1])) + } + goto &$original_method; + } + } + + _install_coderef "${caller}::$attribute" => $method; + + if ( $args{required} ) { + $class_metadata->{$attribute}{required} = 1; + } + + if ($args{clearer}) { + _install_coderef "${caller}::$args{clearer}" + => sub { delete shift->{$attribute} } + } + + if ($args{predicate}) { + _install_coderef "${caller}::$args{predicate}" + => sub { exists shift->{$attribute} } + } + + if ($args{handles}) { + _has_handles($caller, $attribute, \%args); + } + + if (exists $args{init_arg}) { + $class_metadata->{$attribute}{init_arg} = $args{init_arg}; + } + } +} + +sub _has_handles { + my ($caller, $attribute, $args) = @_; + my $handles = $args->{handles}; + + my $ref = ref $handles; + my $kv; + if ( $ref eq ref [] ) { + $kv = { map { $_,$_ } @{$handles} }; + } + elsif ( $ref eq ref {} ) { + $kv = $handles; + } + elsif ( $ref eq ref qr// ) { + Carp::confess("Cannot delegate methods based on a Regexp without a type constraint (isa)") + unless $args->{isa}; + my $target_class = $args->{isa}; + $kv = { + map { $_, $_ } + grep { $_ =~ $handles } + grep { !exists $Lmo::Object::{$_} && $target_class->can($_) } + grep { !$export_for{$target_class}->{$_} } + keys %{ _stash_for $target_class } + }; + } + else { + Carp::confess("handles for $ref not yet implemented"); + } + + while ( my ($method, $target) = each %{$kv} ) { + my $name = _glob_for "${caller}::$method"; + Carp::confess("You cannot overwrite a locally defined method ($method) with a delegation") + if defined &$name; + + my ($target, @curried_args) = ref($target) ? @$target : $target; + *$name = sub { + my $self = shift; + my $delegate_to = $self->$attribute(); + my $error = "Cannot delegate $method to $target because the value of $attribute"; + Carp::confess("$error is not defined") unless $delegate_to; + Carp::confess("$error is not an object (got '$delegate_to')") + unless Scalar::Util::blessed($delegate_to) || (!ref($delegate_to) && $delegate_to->can($target)); + return $delegate_to->$target(@curried_args, @_); + } + } +} + +sub _set_package_isa { + my ($package, @new_isa) = @_; + my $package_isa = \*{ _glob_for "${package}::ISA" }; + @{*$package_isa} = @new_isa; +} + +sub _set_inherited_metadata { + my $class = shift; + my $class_metadata = Lmo::Meta->metadata_for($class); + my $linearized_isa = mro::get_linear_isa($class); + my %new_metadata; + + for my $isa_class (reverse @$linearized_isa) { + my $isa_metadata = Lmo::Meta->metadata_for($isa_class); + %new_metadata = ( + %new_metadata, + %$isa_metadata, + ); + } + %$class_metadata = %new_metadata; +} + +sub unimport { + my $caller = scalar caller(); + my $target = caller; + _unimport_coderefs($target, keys %{$export_for{$caller}}); +} + +sub Dumper { + require Data::Dumper; + local $Data::Dumper::Indent = 0; + local $Data::Dumper::Sortkeys = 0; + local $Data::Dumper::Quotekeys = 0; + local $Data::Dumper::Terse = 1; + + Data::Dumper::Dumper(@_) +} + +BEGIN { + if ($] >= 5.010) { + { local $@; require mro; } + } + else { + local $@; + eval { + require MRO::Compat; + } or do { + *mro::get_linear_isa = *mro::get_linear_isa_dfs = sub { + no strict 'refs'; + + my $classname = shift; + + my @lin = ($classname); + my %stored; + foreach my $parent (@{"$classname\::ISA"}) { + my $plin = mro::get_linear_isa_dfs($parent); + foreach (@$plin) { + next if exists $stored{$_}; + push(@lin, $_); + $stored{$_} = 1; + } + } + return \@lin; + }; + } + } +} + +sub override { + my ($methods, $code) = @_; + my $caller = scalar caller; + + for my $method ( ref($methods) ? @$methods : $methods ) { + my $full_method = "${caller}::${method}"; + *{_glob_for $full_method} = $code; + } +} + +} +1; +} +# ########################################################################### +# End Lmo package +# ########################################################################### + +# ########################################################################### +# Quoter package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Quoter.pm +# t/lib/Quoter.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Quoter; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + return bless {}, $class; +} + +sub quote { + my ( $self, @vals ) = @_; + foreach my $val ( @vals ) { + $val =~ s/`/``/g; + } + return join('.', map { '`' . $_ . '`' } @vals); +} + +sub quote_val { + my ( $self, $val, %args ) = @_; + + return 'NULL' unless defined $val; # undef = NULL + return "''" if $val eq ''; # blank string = '' + return $val if $val =~ m/^0x[0-9a-fA-F]+$/ # quote hex data + && !$args{is_char}; # unless is_char is true + + return $val if $args{is_float}; + + $val =~ s/(['\\])/\\$1/g; + return "'$val'"; +} + +sub split_unquote { + my ( $self, $db_tbl, $default_db ) = @_; + my ( $db, $tbl ) = split(/[.]/, $db_tbl); + if ( !$tbl ) { + $tbl = $db; + $db = $default_db; + } + for ($db, $tbl) { + next unless $_; + s/\A`//; + s/`\z//; + s/``/`/g; + } + + return ($db, $tbl); +} + +sub literal_like { + my ( $self, $like ) = @_; + return unless $like; + $like =~ s/([%_])/\\$1/g; + return "'$like'"; +} + +sub join_quote { + my ( $self, $default_db, $db_tbl ) = @_; + return unless $db_tbl; + my ($db, $tbl) = split(/[.]/, $db_tbl); + if ( !$tbl ) { + $tbl = $db; + $db = $default_db; + } + $db = "`$db`" if $db && $db !~ m/^`/; + $tbl = "`$tbl`" if $tbl && $tbl !~ m/^`/; + return $db ? "$db.$tbl" : $tbl; +} + +sub serialize_list { + my ( $self, @args ) = @_; + PTDEBUG && _d('Serializing', Dumper(\@args)); + return unless @args; + + my @parts; + foreach my $arg ( @args ) { + if ( defined $arg ) { + $arg =~ s/,/\\,/g; # escape commas + $arg =~ s/\\N/\\\\N/g; # escape literal \N + push @parts, $arg; + } + else { + push @parts, '\N'; + } + } + + my $string = join(',', @parts); + PTDEBUG && _d('Serialized: <', $string, '>'); + return $string; +} + +sub deserialize_list { + my ( $self, $string ) = @_; + PTDEBUG && _d('Deserializing <', $string, '>'); + die "Cannot deserialize an undefined string" unless defined $string; + + my @parts; + foreach my $arg ( split(/(? 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 0; +$Data::Dumper::Quotekeys = 0; + +my $dsn_sep = qr/(? {} # h, P, u, etc. Should come from DSN OPTIONS section in POD. + }; + foreach my $opt ( @{$args{opts}} ) { + if ( !$opt->{key} || !$opt->{desc} ) { + die "Invalid DSN option: ", Dumper($opt); + } + PTDEBUG && _d('DSN option:', + join(', ', + map { "$_=" . (defined $opt->{$_} ? ($opt->{$_} || '') : 'undef') } + keys %$opt + ) + ); + $self->{opts}->{$opt->{key}} = { + dsn => $opt->{dsn}, + desc => $opt->{desc}, + copy => $opt->{copy} || 0, + }; + } + return bless $self, $class; +} + +sub prop { + my ( $self, $prop, $value ) = @_; + if ( @_ > 2 ) { + PTDEBUG && _d('Setting', $prop, 'property'); + $self->{$prop} = $value; + } + return $self->{$prop}; +} + +sub parse { + my ( $self, $dsn, $prev, $defaults ) = @_; + if ( !$dsn ) { + PTDEBUG && _d('No DSN to parse'); + return; + } + PTDEBUG && _d('Parsing', $dsn); + $prev ||= {}; + $defaults ||= {}; + my %given_props; + my %final_props; + my $opts = $self->{opts}; + + foreach my $dsn_part ( split($dsn_sep, $dsn) ) { + $dsn_part =~ s/\\,/,/g; + if ( my ($prop_key, $prop_val) = $dsn_part =~ m/^(.)=(.*)$/ ) { + $given_props{$prop_key} = $prop_val; + } + else { + PTDEBUG && _d('Interpreting', $dsn_part, 'as h=', $dsn_part); + $given_props{h} = $dsn_part; + } + } + + foreach my $key ( keys %$opts ) { + PTDEBUG && _d('Finding value for', $key); + $final_props{$key} = $given_props{$key}; + if ( !defined $final_props{$key} + && defined $prev->{$key} && $opts->{$key}->{copy} ) + { + $final_props{$key} = $prev->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from previous DSN'); + } + if ( !defined $final_props{$key} ) { + $final_props{$key} = $defaults->{$key}; + PTDEBUG && _d('Copying value for', $key, 'from defaults'); + } + } + + foreach my $key ( keys %given_props ) { + die "Unknown DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless exists $opts->{$key}; + } + if ( (my $required = $self->prop('required')) ) { + foreach my $key ( keys %$required ) { + die "Missing required DSN option '$key' in '$dsn'. For more details, " + . "please use the --help option, or try 'perldoc $PROGRAM_NAME' " + . "for complete documentation." + unless $final_props{$key}; + } + } + + return \%final_props; +} + +sub parse_options { + my ( $self, $o ) = @_; + die 'I need an OptionParser object' unless ref $o eq 'OptionParser'; + my $dsn_string + = join(',', + map { "$_=".$o->get($_); } + grep { $o->has($_) && $o->get($_) } + keys %{$self->{opts}} + ); + PTDEBUG && _d('DSN string made from options:', $dsn_string); + return $self->parse($dsn_string); +} + +sub as_string { + my ( $self, $dsn, $props ) = @_; + return $dsn unless ref $dsn; + my @keys = $props ? @$props : sort keys %$dsn; + return join(',', + map { "$_=" . ($_ eq 'p' ? '...' : $dsn->{$_}) } + grep { + exists $self->{opts}->{$_} + && exists $dsn->{$_} + && defined $dsn->{$_} + } @keys); +} + +sub usage { + my ( $self ) = @_; + my $usage + = "DSN syntax is key=value[,key=value...] Allowable DSN keys:\n\n" + . " KEY COPY MEANING\n" + . " === ==== =============================================\n"; + my %opts = %{$self->{opts}}; + foreach my $key ( sort keys %opts ) { + $usage .= " $key " + . ($opts{$key}->{copy} ? 'yes ' : 'no ') + . ($opts{$key}->{desc} || '[No description]') + . "\n"; + } + $usage .= "\n If the DSN is a bareword, the word is treated as the 'h' key.\n"; + return $usage; +} + +sub get_cxn_params { + my ( $self, $info ) = @_; + my $dsn; + my %opts = %{$self->{opts}}; + my $driver = $self->prop('dbidriver') || ''; + if ( $driver eq 'Pg' ) { + $dsn = 'DBI:Pg:dbname=' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(h P)); + } + else { + $dsn = 'DBI:mysql:' . ( $info->{D} || '' ) . ';' + . join(';', map { "$opts{$_}->{dsn}=$info->{$_}" } + grep { defined $info->{$_} } + qw(F h P S A)) + . ';mysql_read_default_group=client' + . ($info->{L} ? ';mysql_local_infile=1' : ''); + } + PTDEBUG && _d($dsn); + return ($dsn, $info->{u}, $info->{p}); +} + +sub fill_in_dsn { + my ( $self, $dbh, $dsn ) = @_; + my $vars = $dbh->selectall_hashref('SHOW VARIABLES', 'Variable_name'); + my ($user, $db) = $dbh->selectrow_array('SELECT USER(), DATABASE()'); + $user =~ s/@.*//; + $dsn->{h} ||= $vars->{hostname}->{Value}; + $dsn->{S} ||= $vars->{'socket'}->{Value}; + $dsn->{P} ||= $vars->{port}->{Value}; + $dsn->{u} ||= $user; + $dsn->{D} ||= $db; +} + +sub get_dbh { + my ( $self, $cxn_string, $user, $pass, $opts ) = @_; + $opts ||= {}; + my $defaults = { + AutoCommit => 0, + RaiseError => 1, + PrintError => 0, + ShowErrorStatement => 1, + mysql_enable_utf8 => ($cxn_string =~ m/charset=utf8/i ? 1 : 0), + }; + @{$defaults}{ keys %$opts } = values %$opts; + if (delete $defaults->{L}) { # L for LOAD DATA LOCAL INFILE, our own extension + $defaults->{mysql_local_infile} = 1; + } + + if ( $opts->{mysql_use_result} ) { + $defaults->{mysql_use_result} = 1; + } + + if ( !$have_dbi ) { + die "Cannot connect to MySQL because the Perl DBI module is not " + . "installed or not found. Run 'perl -MDBI' to see the directories " + . "that Perl searches for DBI. If DBI is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbi-perl\n" + . " RHEL/CentOS yum install perl-DBI\n" + . " OpenSolaris pkg install pkg:/SUNWpmdbi\n"; + + } + + my $dbh; + my $tries = 2; + while ( !$dbh && $tries-- ) { + PTDEBUG && _d($cxn_string, ' ', $user, ' ', $pass, + join(', ', map { "$_=>$defaults->{$_}" } keys %$defaults )); + + $dbh = eval { DBI->connect($cxn_string, $user, $pass, $defaults) }; + + if ( !$dbh && $EVAL_ERROR ) { + if ( $EVAL_ERROR =~ m/locate DBD\/mysql/i ) { + die "Cannot connect to MySQL because the Perl DBD::mysql module is " + . "not installed or not found. Run 'perl -MDBD::mysql' to see " + . "the directories that Perl searches for DBD::mysql. If " + . "DBD::mysql is not installed, try:\n" + . " Debian/Ubuntu apt-get install libdbd-mysql-perl\n" + . " RHEL/CentOS yum install perl-DBD-MySQL\n" + . " OpenSolaris pgk install pkg:/SUNWapu13dbd-mysql\n"; + } + elsif ( $EVAL_ERROR =~ m/not a compiled character set|character set utf8/ ) { + PTDEBUG && _d('Going to try again without utf8 support'); + delete $defaults->{mysql_enable_utf8}; + } + if ( !$tries ) { + die $EVAL_ERROR; + } + } + } + + if ( $cxn_string =~ m/mysql/i ) { + my $sql; + + if ( my ($charset) = $cxn_string =~ m/charset=([\w]+)/ ) { + $sql = qq{/*!40101 SET NAMES "$charset"*/}; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting NAMES to $charset: $EVAL_ERROR"; + } + PTDEBUG && _d('Enabling charset for STDOUT'); + if ( $charset eq 'utf8' ) { + binmode(STDOUT, ':utf8') + or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR"; + } + else { + binmode(STDOUT) or die "Can't binmode(STDOUT): $OS_ERROR"; + } + } + + if ( my $vars = $self->prop('set-vars') ) { + $self->set_vars($dbh, $vars); + } + + $sql = 'SELECT @@SQL_MODE'; + PTDEBUG && _d($dbh, $sql); + my ($sql_mode) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + die "Error getting the current SQL_MODE: $EVAL_ERROR"; + } + + $sql = 'SET @@SQL_QUOTE_SHOW_CREATE = 1' + . '/*!40101, @@SQL_MODE=\'NO_AUTO_VALUE_ON_ZERO' + . ($sql_mode ? ",$sql_mode" : '') + . '\'*/'; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( $EVAL_ERROR ) { + die "Error setting SQL_QUOTE_SHOW_CREATE, SQL_MODE" + . ($sql_mode ? " and $sql_mode" : '') + . ": $EVAL_ERROR"; + } + } + my ($mysql_version) = eval { $dbh->selectrow_array('SELECT VERSION()') }; + if ($EVAL_ERROR) { + die "Cannot get MySQL version: $EVAL_ERROR"; + } + + my (undef, $character_set_server) = eval { $dbh->selectrow_array("SHOW VARIABLES LIKE 'character_set_server'") }; + if ($EVAL_ERROR) { + die "Cannot get MySQL var character_set_server: $EVAL_ERROR"; + } + + if ($mysql_version =~ m/^(\d+)\.(\d)\.(\d+).*/) { + if ($1 >= 8 && $character_set_server =~ m/^utf8/) { + $dbh->{mysql_enable_utf8} = 1; + my $msg = "MySQL version $mysql_version >= 8 and character_set_server = $character_set_server\n". + "Setting: SET NAMES $character_set_server"; + PTDEBUG && _d($msg); + eval { $dbh->do("SET NAMES 'utf8mb4'") }; + if ($EVAL_ERROR) { + die "Cannot SET NAMES $character_set_server: $EVAL_ERROR"; + } + } + } + + PTDEBUG && _d('DBH info: ', + $dbh, + Dumper($dbh->selectrow_hashref( + 'SELECT DATABASE(), CONNECTION_ID(), VERSION()/*!50038 , @@hostname*/')), + 'Connection info:', $dbh->{mysql_hostinfo}, + 'Character set info:', Dumper($dbh->selectall_arrayref( + "SHOW VARIABLES LIKE 'character_set%'", { Slice => {}})), + '$DBD::mysql::VERSION:', $DBD::mysql::VERSION, + '$DBI::VERSION:', $DBI::VERSION, + ); + + return $dbh; +} + +sub get_hostname { + my ( $self, $dbh ) = @_; + if ( my ($host) = ($dbh->{mysql_hostinfo} || '') =~ m/^(\w+) via/ ) { + return $host; + } + my ( $hostname, $one ) = $dbh->selectrow_array( + 'SELECT /*!50038 @@hostname, */ 1'); + return $hostname; +} + +sub disconnect { + my ( $self, $dbh ) = @_; + PTDEBUG && $self->print_active_handles($dbh); + $dbh->disconnect; +} + +sub print_active_handles { + my ( $self, $thing, $level ) = @_; + $level ||= 0; + printf("# Active %sh: %s %s %s\n", ($thing->{Type} || 'undef'), "\t" x $level, + $thing, (($thing->{Type} || '') eq 'st' ? $thing->{Statement} || '' : '')) + or die "Cannot print: $OS_ERROR"; + foreach my $handle ( grep {defined} @{ $thing->{ChildHandles} } ) { + $self->print_active_handles( $handle, $level + 1 ); + } +} + +sub copy { + my ( $self, $dsn_1, $dsn_2, %args ) = @_; + die 'I need a dsn_1 argument' unless $dsn_1; + die 'I need a dsn_2 argument' unless $dsn_2; + my %new_dsn = map { + my $key = $_; + my $val; + if ( $args{overwrite} ) { + $val = defined $dsn_1->{$key} ? $dsn_1->{$key} : $dsn_2->{$key}; + } + else { + $val = defined $dsn_2->{$key} ? $dsn_2->{$key} : $dsn_1->{$key}; + } + $key => $val; + } keys %{$self->{opts}}; + return \%new_dsn; +} + +sub set_vars { + my ($self, $dbh, $vars) = @_; + + return unless $vars; + + foreach my $var ( sort keys %$vars ) { + my $val = $vars->{$var}->{val}; + + (my $quoted_var = $var) =~ s/_/\\_/; + my ($var_exists, $current_val); + eval { + ($var_exists, $current_val) = $dbh->selectrow_array( + "SHOW VARIABLES LIKE '$quoted_var'"); + }; + my $e = $EVAL_ERROR; + if ( $e ) { + PTDEBUG && _d($e); + } + + if ( $vars->{$var}->{default} && !$var_exists ) { + PTDEBUG && _d('Not setting default var', $var, + 'because it does not exist'); + next; + } + + if ( $current_val && $current_val eq $val ) { + PTDEBUG && _d('Not setting var', $var, 'because its value', + 'is already', $val); + next; + } + + my $sql = "SET SESSION $var=$val"; + PTDEBUG && _d($dbh, $sql); + eval { $dbh->do($sql) }; + if ( my $set_error = $EVAL_ERROR ) { + chomp($set_error); + $set_error =~ s/ at \S+ line \d+//; + my $msg = "Error setting $var: $set_error"; + if ( $current_val ) { + $msg .= " The current value for $var is $current_val. " + . "If the variable is read only (not dynamic), specify " + . "--set-vars $var=$current_val to avoid this warning, " + . "else manually set the variable and restart MySQL."; + } + warn $msg . "\n\n"; + } + } + + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End DSNParser package +# ########################################################################### + +# ########################################################################### +# VersionParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/VersionParser.pm +# t/lib/VersionParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package VersionParser; + +use Lmo; +use Scalar::Util qw(blessed); +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use overload ( + '""' => "version", + '<=>' => "cmp", + 'cmp' => "cmp", + fallback => 1, +); + +use Carp (); + +our $VERSION = 0.01; + +has major => ( + is => 'ro', + isa => 'Int', + required => 1, +); + +has [qw( minor revision )] => ( + is => 'ro', + isa => 'Num', +); + +has flavor => ( + is => 'ro', + isa => 'Str', + default => sub { 'Unknown' }, +); + +has innodb_version => ( + is => 'ro', + isa => 'Str', + default => sub { 'NO' }, +); + +sub series { + my $self = shift; + return $self->_join_version($self->major, $self->minor); +} + +sub version { + my $self = shift; + return $self->_join_version($self->major, $self->minor, $self->revision); +} + +sub is_in { + my ($self, $target) = @_; + + return $self eq $target; +} + +sub _join_version { + my ($self, @parts) = @_; + + return join ".", map { my $c = $_; $c =~ s/^0\./0/; $c } grep defined, @parts; +} +sub _split_version { + my ($self, $str) = @_; + my @version_parts = map { s/^0(?=\d)/0./; $_ } $str =~ m/(\d+)/g; + return @version_parts[0..2]; +} + +sub normalized_version { + my ( $self ) = @_; + my $result = sprintf('%d%02d%02d', map { $_ || 0 } $self->major, + $self->minor, + $self->revision); + PTDEBUG && _d($self->version, 'normalizes to', $result); + return $result; +} + +sub comment { + my ( $self, $cmd ) = @_; + my $v = $self->normalized_version(); + + return "/*!$v $cmd */" +} + +my @methods = qw(major minor revision); +sub cmp { + my ($left, $right) = @_; + my $right_obj = (blessed($right) && $right->isa(ref($left))) + ? $right + : ref($left)->new($right); + + my $retval = 0; + for my $m ( @methods ) { + last unless defined($left->$m) && defined($right_obj->$m); + $retval = $left->$m <=> $right_obj->$m; + last if $retval; + } + return $retval; +} + +sub BUILDARGS { + my $self = shift; + + if ( @_ == 1 ) { + my %args; + if ( blessed($_[0]) && $_[0]->can("selectrow_hashref") ) { + PTDEBUG && _d("VersionParser got a dbh, trying to get the version"); + my $dbh = $_[0]; + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $query = eval { + $dbh->selectall_arrayref(q/SHOW VARIABLES LIKE 'version%'/, { Slice => {} }) + }; + if ( $query ) { + $query = { map { $_->{variable_name} => $_->{value} } @$query }; + @args{@methods} = $self->_split_version($query->{version}); + $args{flavor} = delete $query->{version_comment} + if $query->{version_comment}; + } + elsif ( eval { ($query) = $dbh->selectrow_array(q/SELECT VERSION()/) } ) { + @args{@methods} = $self->_split_version($query); + } + else { + Carp::confess("Couldn't get the version from the dbh while " + . "creating a VersionParser object: $@"); + } + $args{innodb_version} = eval { $self->_innodb_version($dbh) }; + } + elsif ( !ref($_[0]) ) { + @args{@methods} = $self->_split_version($_[0]); + } + + for my $method (@methods) { + delete $args{$method} unless defined $args{$method}; + } + @_ = %args if %args; + } + + return $self->SUPER::BUILDARGS(@_); +} + +sub _innodb_version { + my ( $self, $dbh ) = @_; + return unless $dbh; + my $innodb_version = "NO"; + + my ($innodb) = + grep { $_->{engine} =~ m/InnoDB/i } + map { + my %hash; + @hash{ map { lc $_ } keys %$_ } = values %$_; + \%hash; + } + @{ $dbh->selectall_arrayref("SHOW ENGINES", {Slice=>{}}) }; + if ( $innodb ) { + PTDEBUG && _d("InnoDB support:", $innodb->{support}); + if ( $innodb->{support} =~ m/YES|DEFAULT/i ) { + my $vars = $dbh->selectrow_hashref( + "SHOW VARIABLES LIKE 'innodb_version'"); + $innodb_version = !$vars ? "BUILTIN" + : ($vars->{Value} || $vars->{value}); + } + else { + $innodb_version = $innodb->{support}; # probably DISABLED or NO + } + } + + PTDEBUG && _d("InnoDB version:", $innodb_version); + return $innodb_version; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +no Lmo; +1; +} +# ########################################################################### +# End VersionParser package +# ########################################################################### + +# ########################################################################### +# TableSyncStream package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableSyncStream.pm +# t/lib/TableSyncStream.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableSyncStream; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(Quoter) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub name { + return 'Stream'; +} + +sub can_sync { + return 1; # We can sync anything. +} + +sub prepare_to_sync { + my ( $self, %args ) = @_; + my @required_args = qw(cols ChangeHandler); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + $self->{cols} = $args{cols}; + $self->{buffer_in_mysql} = $args{buffer_in_mysql}; + $self->{ChangeHandler} = $args{ChangeHandler}; + + $self->{done} = 0; + + return; +} + +sub uses_checksum { + return 0; # We don't need checksum queries. +} + +sub set_checksum_queries { + return; # This shouldn't be called, but just in case. +} + +sub prepare_sync_cycle { + my ( $self, $host ) = @_; + return; +} + +sub get_sql { + my ( $self, %args ) = @_; + return "SELECT " + . ($self->{buffer_in_mysql} ? 'SQL_BUFFER_RESULT ' : '') + . join(', ', map { $self->{Quoter}->quote($_) } @{$self->{cols}}) + . ' FROM ' . $self->{Quoter}->quote(@args{qw(database table)}) + . ' WHERE ' . ( $args{where} || '1=1' ); +} + +sub same_row { + my ( $self, %args ) = @_; + return; +} + +sub not_in_right { + my ( $self, %args ) = @_; + $self->{ChangeHandler}->change('INSERT', $args{lr}, $self->key_cols()); +} + +sub not_in_left { + my ( $self, %args ) = @_; + $self->{ChangeHandler}->change('DELETE', $args{rr}, $self->key_cols()); +} + +sub done_with_rows { + my ( $self ) = @_; + $self->{done} = 1; +} + +sub done { + my ( $self ) = @_; + return $self->{done}; +} + +sub key_cols { + my ( $self ) = @_; + return $self->{cols}; +} + +sub pending_changes { + my ( $self ) = @_; + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableSyncStream package +# ########################################################################### + +# ########################################################################### +# TableParser package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableParser.pm +# t/lib/TableParser.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableParser; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +local $EVAL_ERROR; +eval { + require Quoter; +}; + +sub new { + my ( $class, %args ) = @_; + my $self = { %args }; + $self->{Quoter} ||= Quoter->new(); + return bless $self, $class; +} + +sub Quoter { shift->{Quoter} } + +sub get_create_table { + my ( $self, $dbh, $db, $tbl ) = @_; + die "I need a dbh parameter" unless $dbh; + die "I need a db parameter" unless $db; + die "I need a tbl parameter" unless $tbl; + my $q = $self->{Quoter}; + + my $new_sql_mode + = q{/*!40101 SET @OLD_SQL_MODE := @@SQL_MODE, } + . q{@@SQL_MODE := '', } + . q{@OLD_QUOTE := @@SQL_QUOTE_SHOW_CREATE, } + . q{@@SQL_QUOTE_SHOW_CREATE := 1 */}; + + my $old_sql_mode + = q{/*!40101 SET @@SQL_MODE := @OLD_SQL_MODE, } + . q{@@SQL_QUOTE_SHOW_CREATE := @OLD_QUOTE */}; + + PTDEBUG && _d($new_sql_mode); + eval { $dbh->do($new_sql_mode); }; + PTDEBUG && $EVAL_ERROR && _d($EVAL_ERROR); + + my $use_sql = 'USE ' . $q->quote($db); + PTDEBUG && _d($dbh, $use_sql); + $dbh->do($use_sql); + + my $show_sql = "SHOW CREATE TABLE " . $q->quote($db, $tbl); + PTDEBUG && _d($show_sql); + my $href; + eval { $href = $dbh->selectrow_hashref($show_sql); }; + if ( my $e = $EVAL_ERROR ) { + PTDEBUG && _d($old_sql_mode); + $dbh->do($old_sql_mode); + + die $e; + } + + PTDEBUG && _d($old_sql_mode); + $dbh->do($old_sql_mode); + + my ($key) = grep { m/create (?:table|view)/i } keys %$href; + if ( !$key ) { + die "Error: no 'Create Table' or 'Create View' in result set from " + . "$show_sql: " . Dumper($href); + } + + return $href->{$key}; +} + +sub parse { + my ( $self, $ddl, $opts ) = @_; + return unless $ddl; + + if ( $ddl =~ m/CREATE (?:TEMPORARY )?TABLE "/ ) { + $ddl = $self->ansi_to_legacy($ddl); + } + elsif ( $ddl !~ m/CREATE (?:TEMPORARY )?TABLE `/ ) { + die "TableParser doesn't handle CREATE TABLE without quoting."; + } + + my ($name) = $ddl =~ m/CREATE (?:TEMPORARY )?TABLE\s+(`.+?`)/; + (undef, $name) = $self->{Quoter}->split_unquote($name) if $name; + + $ddl =~ s/(`[^`\n]+`)/\L$1/gm; + + my $engine = $self->get_engine($ddl); + + my @defs = $ddl =~ m/^(\s+`.*?),?$/gm; + my @cols = map { $_ =~ m/`([^`]+)`/ } @defs; + PTDEBUG && _d('Table cols:', join(', ', map { "`$_`" } @cols)); + + my %def_for; + @def_for{@cols} = @defs; + + my (@nums, @null, @non_generated); + my (%type_for, %is_nullable, %is_numeric, %is_autoinc, %is_generated); + foreach my $col ( @cols ) { + my $def = $def_for{$col}; + + $def =~ s/``//g; + + my ( $type ) = $def =~ m/`[^`]+`\s([a-z]+)/; + die "Can't determine column type for $def" unless $type; + $type_for{$col} = $type; + if ( $type =~ m/(?:(?:tiny|big|medium|small)?int|float|double|decimal|year)/ ) { + push @nums, $col; + $is_numeric{$col} = 1; + } + if ( $def !~ m/NOT NULL/ ) { + push @null, $col; + $is_nullable{$col} = 1; + } + if ( remove_quoted_text($def) =~ m/\WGENERATED\W/i ) { + $is_generated{$col} = 1; + } else { + push @non_generated, $col; + } + $is_autoinc{$col} = $def =~ m/AUTO_INCREMENT/i ? 1 : 0; + } + + my ($keys, $clustered_key) = $self->get_keys($ddl, $opts, \%is_nullable); + + my ($charset) = $ddl =~ m/DEFAULT CHARSET=(\w+)/; + + return { + name => $name, + cols => \@cols, + col_posn => { map { $cols[$_] => $_ } 0..$#cols }, + is_col => { map { $_ => 1 } @non_generated }, + null_cols => \@null, + is_nullable => \%is_nullable, + non_generated_cols => \@non_generated, + is_autoinc => \%is_autoinc, + is_generated => \%is_generated, + clustered_key => $clustered_key, + keys => $keys, + defs => \%def_for, + numeric_cols => \@nums, + is_numeric => \%is_numeric, + engine => $engine, + type_for => \%type_for, + charset => $charset, + }; +} + +sub remove_quoted_text { + my ($string) = @_; + $string =~ s/\\['"]//g; + $string =~ s/`[^`]*?`//g; + $string =~ s/"[^"]*?"//g; + $string =~ s/'[^']*?'//g; + return $string; +} + +sub sort_indexes { + my ( $self, $tbl ) = @_; + + my @indexes + = sort { + (($a ne 'PRIMARY') <=> ($b ne 'PRIMARY')) + || ( !$tbl->{keys}->{$a}->{is_unique} <=> !$tbl->{keys}->{$b}->{is_unique} ) + || ( $tbl->{keys}->{$a}->{is_nullable} <=> $tbl->{keys}->{$b}->{is_nullable} ) + || ( scalar(@{$tbl->{keys}->{$a}->{cols}}) <=> scalar(@{$tbl->{keys}->{$b}->{cols}}) ) + } + grep { + $tbl->{keys}->{$_}->{type} eq 'BTREE' + } + sort keys %{$tbl->{keys}}; + + PTDEBUG && _d('Indexes sorted best-first:', join(', ', @indexes)); + return @indexes; +} + +sub find_best_index { + my ( $self, $tbl, $index ) = @_; + my $best; + if ( $index ) { + ($best) = grep { uc $_ eq uc $index } keys %{$tbl->{keys}}; + } + if ( !$best ) { + if ( $index ) { + die "Index '$index' does not exist in table"; + } + else { + ($best) = $self->sort_indexes($tbl); + } + } + PTDEBUG && _d('Best index found is', $best); + return $best; +} + +sub find_possible_keys { + my ( $self, $dbh, $database, $table, $quoter, $where ) = @_; + return () unless $where; + my $sql = 'EXPLAIN SELECT * FROM ' . $quoter->quote($database, $table) + . ' WHERE ' . $where; + PTDEBUG && _d($sql); + my $expl = $dbh->selectrow_hashref($sql); + $expl = { map { lc($_) => $expl->{$_} } keys %$expl }; + if ( $expl->{possible_keys} ) { + PTDEBUG && _d('possible_keys =', $expl->{possible_keys}); + my @candidates = split(',', $expl->{possible_keys}); + my %possible = map { $_ => 1 } @candidates; + if ( $expl->{key} ) { + PTDEBUG && _d('MySQL chose', $expl->{key}); + unshift @candidates, grep { $possible{$_} } split(',', $expl->{key}); + PTDEBUG && _d('Before deduping:', join(', ', @candidates)); + my %seen; + @candidates = grep { !$seen{$_}++ } @candidates; + } + PTDEBUG && _d('Final list:', join(', ', @candidates)); + return @candidates; + } + else { + PTDEBUG && _d('No keys in possible_keys'); + return (); + } +} + +sub check_table { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db, $tbl) = @args{@required_args}; + my $q = $self->{Quoter} || 'Quoter'; + my $db_tbl = $q->quote($db, $tbl); + PTDEBUG && _d('Checking', $db_tbl); + + $self->{check_table_error} = undef; + + my $sql = "SHOW TABLES FROM " . $q->quote($db) + . ' LIKE ' . $q->literal_like($tbl); + PTDEBUG && _d($sql); + my $row; + eval { + $row = $dbh->selectrow_arrayref($sql); + }; + if ( my $e = $EVAL_ERROR ) { + PTDEBUG && _d($e); + $self->{check_table_error} = $e; + return 0; + } + if ( !$row->[0] || $row->[0] ne $tbl ) { + PTDEBUG && _d('Table does not exist'); + return 0; + } + + PTDEBUG && _d('Table', $db, $tbl, 'exists'); + return 1; + +} + +sub get_engine { + my ( $self, $ddl, $opts ) = @_; + my ( $engine ) = $ddl =~ m/\).*?(?:ENGINE|TYPE)=(\w+)/; + PTDEBUG && _d('Storage engine:', $engine); + return $engine || undef; +} + +sub get_keys { + my ( $self, $ddl, $opts, $is_nullable ) = @_; + my $engine = $self->get_engine($ddl); + my $keys = {}; + my $clustered_key = undef; + + KEY: + foreach my $key ( $ddl =~ m/^ ((?:[A-Z]+ )?KEY .*)$/gm ) { + + next KEY if $key =~ m/FOREIGN/; + + my $key_ddl = $key; + PTDEBUG && _d('Parsed key:', $key_ddl); + + if ( !$engine || $engine !~ m/MEMORY|HEAP/ ) { + $key =~ s/USING HASH/USING BTREE/; + } + + my ( $type, $cols ) = $key =~ m/(?:USING (\w+))? \((.+)\)/; + my ( $special ) = $key =~ m/(FULLTEXT|SPATIAL)/; + $type = $type || $special || 'BTREE'; + my ($name) = $key =~ m/(PRIMARY|`[^`]*`)/; + my $unique = $key =~ m/PRIMARY|UNIQUE/ ? 1 : 0; + my @cols; + my @col_prefixes; + foreach my $col_def ( $cols =~ m/`[^`]+`(?:\(\d+\))?/g ) { + my ($name, $prefix) = $col_def =~ m/`([^`]+)`(?:\((\d+)\))?/; + push @cols, $name; + push @col_prefixes, $prefix; + } + $name =~ s/`//g; + + PTDEBUG && _d( $name, 'key cols:', join(', ', map { "`$_`" } @cols)); + + $keys->{$name} = { + name => $name, + type => $type, + colnames => $cols, + cols => \@cols, + col_prefixes => \@col_prefixes, + is_unique => $unique, + is_nullable => scalar(grep { $is_nullable->{$_} } @cols), + is_col => { map { $_ => 1 } @cols }, + ddl => $key_ddl, + }; + + if ( ($engine || '') =~ m/InnoDB/i && !$clustered_key ) { + my $this_key = $keys->{$name}; + if ( $this_key->{name} eq 'PRIMARY' ) { + $clustered_key = 'PRIMARY'; + } + elsif ( $this_key->{is_unique} && !$this_key->{is_nullable} ) { + $clustered_key = $this_key->{name}; + } + PTDEBUG && $clustered_key && _d('This key is the clustered key'); + } + } + + return $keys, $clustered_key; +} + +sub get_fks { + my ( $self, $ddl, $opts ) = @_; + my $q = $self->{Quoter}; + my $fks = {}; + + foreach my $fk ( + $ddl =~ m/CONSTRAINT .* FOREIGN KEY .* REFERENCES [^\)]*\)/mg ) + { + my ( $name ) = $fk =~ m/CONSTRAINT `(.*?)`/; + my ( $cols ) = $fk =~ m/FOREIGN KEY \(([^\)]+)\)/; + my ( $parent, $parent_cols ) = $fk =~ m/REFERENCES (\S+) \(([^\)]+)\)/; + + my ($db, $tbl) = $q->split_unquote($parent, $opts->{database}); + my %parent_tbl = (tbl => $tbl); + $parent_tbl{db} = $db if $db; + + if ( $parent !~ m/\./ && $opts->{database} ) { + $parent = $q->quote($opts->{database}) . ".$parent"; + } + + $fks->{$name} = { + name => $name, + colnames => $cols, + cols => [ map { s/[ `]+//g; $_; } split(',', $cols) ], + parent_tbl => \%parent_tbl, + parent_tblname => $parent, + parent_cols => [ map { s/[ `]+//g; $_; } split(',', $parent_cols) ], + parent_colnames=> $parent_cols, + ddl => $fk, + }; + } + + return $fks; +} + +sub remove_auto_increment { + my ( $self, $ddl ) = @_; + $ddl =~ s/(^\).*?) AUTO_INCREMENT=\d+\b/$1/m; + return $ddl; +} + +sub get_table_status { + my ( $self, $dbh, $db, $like ) = @_; + my $q = $self->{Quoter}; + my $sql = "SHOW TABLE STATUS FROM " . $q->quote($db); + my @params; + if ( $like ) { + $sql .= ' LIKE ?'; + push @params, $like; + } + PTDEBUG && _d($sql, @params); + my $sth = $dbh->prepare($sql); + eval { $sth->execute(@params); }; + if ($EVAL_ERROR) { + PTDEBUG && _d($EVAL_ERROR); + return; + } + my @tables = @{$sth->fetchall_arrayref({})}; + @tables = map { + my %tbl; # Make a copy with lowercased keys + @tbl{ map { lc $_ } keys %$_ } = values %$_; + $tbl{engine} ||= $tbl{type} || $tbl{comment}; + delete $tbl{type}; + \%tbl; + } @tables; + return @tables; +} + +my $ansi_quote_re = qr/" [^"]* (?: "" [^"]* )* (?<=.) "/ismx; +sub ansi_to_legacy { + my ($self, $ddl) = @_; + $ddl =~ s/($ansi_quote_re)/ansi_quote_replace($1)/ge; + return $ddl; +} + +sub ansi_quote_replace { + my ($val) = @_; + $val =~ s/^"|"$//g; + $val =~ s/`/``/g; + $val =~ s/""/"/g; + return "`$val`"; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableParser package +# ########################################################################### + +# ########################################################################### +# RowDiff package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/RowDiff.pm +# t/lib/RowDiff.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package RowDiff; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + die "I need a dbh" unless $args{dbh}; + my $self = { %args }; + return bless $self, $class; +} + +sub compare_sets { + my ( $self, %args ) = @_; + my @required_args = qw(left_sth right_sth syncer tbl_struct); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $left_sth = $args{left_sth}; + my $right_sth = $args{right_sth}; + my $syncer = $args{syncer}; + my $tbl_struct = $args{tbl_struct}; + + my ($lr, $rr); # Current row from the left/right sths. + $args{key_cols} = $syncer->key_cols(); # for key_cmp() + + my $left_done = 0; + my $right_done = 0; + my $done = $self->{done}; + + do { + if ( !$lr && !$left_done ) { + PTDEBUG && _d('Fetching row from left'); + eval { $lr = $left_sth->fetchrow_hashref(); }; + PTDEBUG && $EVAL_ERROR && _d($EVAL_ERROR); + $left_done = !$lr || $EVAL_ERROR ? 1 : 0; + } + elsif ( PTDEBUG ) { + _d('Left still has rows'); + } + + if ( !$rr && !$right_done ) { + PTDEBUG && _d('Fetching row from right'); + eval { $rr = $right_sth->fetchrow_hashref(); }; + PTDEBUG && $EVAL_ERROR && _d($EVAL_ERROR); + $right_done = !$rr || $EVAL_ERROR ? 1 : 0; + } + elsif ( PTDEBUG ) { + _d('Right still has rows'); + } + + my $cmp; + if ( $lr && $rr ) { + $cmp = $self->key_cmp(%args, lr => $lr, rr => $rr); + PTDEBUG && _d('Key comparison on left and right:', $cmp); + } + if ( $lr || $rr ) { + if ( $lr && $rr && defined $cmp && $cmp == 0 ) { + PTDEBUG && _d('Left and right have the same key'); + $syncer->same_row(%args, lr => $lr, rr => $rr); + $self->{same_row}->(%args, lr => $lr, rr => $rr) + if $self->{same_row}; + $lr = $rr = undef; # Fetch another row from each side. + } + elsif ( !$rr || ( defined $cmp && $cmp < 0 ) ) { + PTDEBUG && _d('Left is not in right'); + $syncer->not_in_right(%args, lr => $lr, rr => $rr); + $self->{not_in_right}->(%args, lr => $lr, rr => $rr) + if $self->{not_in_right}; + $lr = undef; + } + else { + PTDEBUG && _d('Right is not in left'); + $syncer->not_in_left(%args, lr => $lr, rr => $rr); + $self->{not_in_left}->(%args, lr => $lr, rr => $rr) + if $self->{not_in_left}; + $rr = undef; + } + } + $left_done = $right_done = 1 if $done && $done->(%args); + } while ( !($left_done && $right_done) ); + PTDEBUG && _d('No more rows'); + $syncer->done_with_rows(); +} + +sub key_cmp { + my ( $self, %args ) = @_; + my @required_args = qw(lr rr key_cols tbl_struct); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless exists $args{$arg}; + } + my ($lr, $rr, $key_cols, $tbl_struct) = @args{@required_args}; + PTDEBUG && _d('Comparing keys using columns:', join(',', @$key_cols)); + + my $callback = $self->{key_cmp}; + my $trf = $self->{trf}; + + foreach my $col ( @$key_cols ) { + my $l = $lr->{$col}; + my $r = $rr->{$col}; + if ( !defined $l || !defined $r ) { + PTDEBUG && _d($col, 'is not defined in both rows'); + return defined $l ? 1 : defined $r ? -1 : 0; + } + else { + if ( $tbl_struct->{is_numeric}->{$col} ) { # Numeric column + PTDEBUG && _d($col, 'is numeric'); + ($l, $r) = $trf->($l, $r, $tbl_struct, $col) if $trf; + my $cmp = $l <=> $r; + if ( $cmp ) { + PTDEBUG && _d('Column', $col, 'differs:', $l, '!=', $r); + $callback->($col, $l, $r) if $callback; + return $cmp; + } + } + elsif ( $l ne $r ) { + my $cmp; + my $coll = $tbl_struct->{collation_for}->{$col}; + if ( $coll && ( $coll ne 'latin1_swedish_ci' + || $l =~ m/[^\040-\177]/ || $r =~ m/[^\040-\177]/) ) + { + PTDEBUG && _d('Comparing', $col, 'via MySQL'); + $cmp = $self->db_cmp($coll, $l, $r); + } + else { + PTDEBUG && _d('Comparing', $col, 'in lowercase'); + $cmp = lc $l cmp lc $r; + } + if ( $cmp ) { + PTDEBUG && _d('Column', $col, 'differs:', $l, 'ne', $r); + $callback->($col, $l, $r) if $callback; + return $cmp; + } + } + } + } + return 0; +} + +sub db_cmp { + my ( $self, $collation, $l, $r ) = @_; + if ( !$self->{sth}->{$collation} ) { + if ( !$self->{charset_for} ) { + PTDEBUG && _d('Fetching collations from MySQL'); + my @collations = @{$self->{dbh}->selectall_arrayref( + 'SHOW COLLATION', {Slice => { collation => 1, charset => 1 }})}; + foreach my $collation ( @collations ) { + $self->{charset_for}->{$collation->{collation}} + = $collation->{charset}; + } + } + my $sql = "SELECT STRCMP(_$self->{charset_for}->{$collation}? COLLATE $collation, " + . "_$self->{charset_for}->{$collation}? COLLATE $collation) AS res"; + PTDEBUG && _d($sql); + $self->{sth}->{$collation} = $self->{dbh}->prepare($sql); + } + my $sth = $self->{sth}->{$collation}; + $sth->execute($l, $r); + return $sth->fetchall_arrayref()->[0]->[0]; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End RowDiff package +# ########################################################################### + +# ########################################################################### +# ChangeHandler package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/ChangeHandler.pm +# t/lib/ChangeHandler.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package ChangeHandler; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +my $DUPE_KEY = qr/Duplicate entry/; +our @ACTIONS = qw(DELETE REPLACE INSERT UPDATE); + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(Quoter left_db left_tbl right_db right_tbl + replace queue) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $q = $args{Quoter}; + + my $self = { + hex_blob => 1, + %args, + left_db_tbl => $q->quote(@args{qw(left_db left_tbl)}), + right_db_tbl => $q->quote(@args{qw(right_db right_tbl)}), + }; + + $self->{src_db_tbl} = $self->{left_db_tbl}; + $self->{dst_db_tbl} = $self->{right_db_tbl}; + + map { $self->{$_} = [] } @ACTIONS; + $self->{changes} = { map { $_ => 0 } @ACTIONS }; + + return bless $self, $class; +} + +sub fetch_back { + my ( $self, $dbh ) = @_; + $self->{fetch_back} = $dbh; + PTDEBUG && _d('Set fetch back dbh', $dbh); + return; +} + +sub set_src { + my ( $self, $src, $dbh ) = @_; + die "I need a src argument" unless $src; + if ( lc $src eq 'left' ) { + $self->{src_db_tbl} = $self->{left_db_tbl}; + $self->{dst_db_tbl} = $self->{right_db_tbl}; + } + elsif ( lc $src eq 'right' ) { + $self->{src_db_tbl} = $self->{right_db_tbl}; + $self->{dst_db_tbl} = $self->{left_db_tbl}; + } + else { + die "src argument must be either 'left' or 'right'" + } + PTDEBUG && _d('Set src to', $src); + $self->fetch_back($dbh) if $dbh; + return; +} + +sub src { + my ( $self ) = @_; + return $self->{src_db_tbl}; +} + +sub dst { + my ( $self ) = @_; + return $self->{dst_db_tbl}; +} + +sub _take_action { + my ( $self, $sql, $dbh ) = @_; + PTDEBUG && _d('Calling subroutines on', $dbh, $sql); + foreach my $action ( @{$self->{actions}} ) { + $action->($sql, $dbh); + } + return; +} + +sub change { + my ( $self, $action, $row, $cols, $dbh ) = @_; + PTDEBUG && _d($dbh, $action, 'where', $self->make_where_clause($row, $cols)); + + return unless $action; + + $self->{changes}->{ + $self->{replace} && $action ne 'DELETE' ? 'REPLACE' : $action + }++; + if ( $self->{queue} ) { + $self->__queue($action, $row, $cols, $dbh); + } + else { + eval { + my $func = "make_$action"; + $self->_take_action($self->$func($row, $cols), $dbh); + }; + if ( $EVAL_ERROR =~ m/$DUPE_KEY/ ) { + PTDEBUG && _d('Duplicate key violation; will queue and rewrite'); + $self->{queue}++; + $self->{replace} = 1; + $self->__queue($action, $row, $cols, $dbh); + } + elsif ( $EVAL_ERROR ) { + die $EVAL_ERROR; + } + } + return; +} + +sub __queue { + my ( $self, $action, $row, $cols, $dbh ) = @_; + PTDEBUG && _d('Queueing change for later'); + if ( $self->{replace} ) { + $action = $action eq 'DELETE' ? $action : 'REPLACE'; + } + push @{$self->{$action}}, [ $row, $cols, $dbh ]; +} + +sub process_rows { + my ( $self, $queue_level, $trace_msg ) = @_; + my $error_count = 0; + TRY: { + if ( $queue_level && $queue_level < $self->{queue} ) { # see redo below! + PTDEBUG && _d('Not processing now', $queue_level, '<', $self->{queue}); + return; + } + PTDEBUG && _d('Processing rows:'); + my ($row, $cur_act); + eval { + foreach my $action ( @ACTIONS ) { + my $func = "make_$action"; + my $rows = $self->{$action}; + PTDEBUG && _d(scalar(@$rows), 'to', $action); + $cur_act = $action; + while ( @$rows ) { + $row = shift @$rows; + my $sql = $self->$func(@$row); + $sql .= " /*percona-toolkit $trace_msg*/" if $trace_msg; + $self->_take_action($sql, $row->[2]); + } + } + $error_count = 0; + }; + if ( !$error_count++ && $EVAL_ERROR =~ m/$DUPE_KEY/ ) { + PTDEBUG && _d('Duplicate key violation; re-queueing and rewriting'); + $self->{queue}++; # Defer rows to the very end + $self->{replace} = 1; + $self->__queue($cur_act, @$row); + redo TRY; + } + elsif ( $EVAL_ERROR ) { + die $EVAL_ERROR; + } + } +} + +sub make_DELETE { + my ( $self, $row, $cols ) = @_; + PTDEBUG && _d('Make DELETE'); + return "DELETE FROM $self->{dst_db_tbl} WHERE " + . $self->make_where_clause($row, $cols) + . ' LIMIT 1'; +} + +sub make_UPDATE { + my ( $self, $row, $cols ) = @_; + PTDEBUG && _d('Make UPDATE'); + if ( $self->{replace} ) { + return $self->make_row('REPLACE', $row, $cols); + } + my %in_where = map { $_ => 1 } @$cols; + my $where = $self->make_where_clause($row, $cols); + my @cols; + if ( my $dbh = $self->{fetch_back} ) { + my $sql = $self->make_fetch_back_query($where); + PTDEBUG && _d('Fetching data on dbh', $dbh, 'for UPDATE:', $sql); + my $res = $dbh->selectrow_hashref($sql); + @{$row}{keys %$res} = values %$res; + @cols = $self->sort_cols($res); + } + else { + @cols = $self->sort_cols($row); + } + my $types = $self->{tbl_struct}->{type_for}; + return "UPDATE $self->{dst_db_tbl} SET " + . join(', ', map { + my $is_hex = ($types->{$_} || '') =~ m/^0x[0-9a-fA-F]+$/i; + my $is_char = ($types->{$_} || '') =~ m/char|text|enum/i; + my $is_float = ($types->{$_} || '') =~ m/float|double/i; + $self->{Quoter}->quote($_) + . '=' + . $self->{Quoter}->quote_val( + $row->{$_}, + is_char => $is_char && !$is_hex, + is_float => $is_float, + ); + } grep { !$in_where{$_} } @cols) + . " WHERE $where LIMIT 1"; +} + +sub make_INSERT { + my ( $self, $row, $cols ) = @_; + PTDEBUG && _d('Make INSERT'); + if ( $self->{replace} ) { + return $self->make_row('REPLACE', $row, $cols); + } + return $self->make_row('INSERT', $row, $cols); +} + +sub make_REPLACE { + my ( $self, $row, $cols ) = @_; + PTDEBUG && _d('Make REPLACE'); + return $self->make_row('REPLACE', $row, $cols); +} + +sub make_row { + my ( $self, $verb, $row, $cols ) = @_; + my @cols; + if ( my $dbh = $self->{fetch_back} ) { + my $where = $self->make_where_clause($row, $cols); + my $sql = $self->make_fetch_back_query($where); + PTDEBUG && _d('Fetching data on dbh', $dbh, 'for', $verb, ':', $sql); + my $res = $dbh->selectrow_hashref($sql); + @{$row}{keys %$res} = values %$res; + @cols = $self->sort_cols($res); + } + else { + @cols = $self->sort_cols($row); + } + my $q = $self->{Quoter}; + my $type_for = $self->{tbl_struct}->{type_for}; + + return "$verb INTO $self->{dst_db_tbl}(" + . join(', ', map { $q->quote($_) } @cols) + . ') VALUES (' + . join(', ', + map { + my $is_hex = ($type_for->{$_} || '') =~ m/^0x[0-9a-fA-F]+$/i; + my $is_char = ($type_for->{$_} || '') =~ m/char|text|enum/i; + my $is_float = ($type_for->{$_} || '') =~ m/float|double/i; + $q->quote_val( + $row->{$_}, + is_char => $is_char && !$is_hex, + is_float => $is_float, + ) + } @cols) + . ')'; + +} + +sub make_where_clause { + my ( $self, $row, $cols ) = @_; + my @clauses = map { + my $col = $_; + $col = $self->{Quoter}->quote($col); + + my $val = $row->{$_}; + my $sep = defined $val ? '=' : ' IS '; + my $is_char = ($self->{tbl_struct}->{type_for}->{$_} || '') =~ m/char|text|enum/i; + my $is_float = ($self->{tbl_struct}->{type_for}->{$_} || '') =~ m/float|double/i; + my $is_crc32 = ($self->{tbl_struct}->{type_for}->{$_} || '') =~ m/binary|text|blob/i; + $col = "CRC32($col)" if ($is_crc32); + $col . $sep . $self->{Quoter}->quote_val($val, + is_char => $is_char, + is_float => $is_float); + } @$cols; + return join(' AND ', @clauses); +} + +sub get_changes { + my ( $self ) = @_; + return %{$self->{changes}}; +} + + +sub sort_cols { + my ( $self, $row ) = @_; + my @cols; + if ( $self->{tbl_struct} ) { + my $pos = $self->{tbl_struct}->{col_posn}; + my @not_in_tbl; + @cols = sort { + $pos->{$a} <=> $pos->{$b} + } + grep { + if ( !defined $pos->{$_} ) { + push @not_in_tbl, $_; + 0; + } + else { + 1; + } + } + sort keys %$row; + push @cols, @not_in_tbl if @not_in_tbl; + } + else { + @cols = sort keys %$row; + } + return @cols; +} + +sub make_fetch_back_query { + my ( $self, $where ) = @_; + die "I need a where argument" unless $where; + my $cols = '*'; + my $tbl_struct = $self->{tbl_struct}; + if ( $tbl_struct ) { + $cols = join(', ', + map { + my $col = $_; + if ( $self->{hex_blob} + && $tbl_struct->{type_for}->{$col} =~ m/b(?:lob|inary)/ ) { + $col = "IF(BINARY(`$col`)='', '', CONCAT('0x', HEX(`$col`))) AS `$col`"; + } + else { + $col = "`$col`"; + } + $col; + } @{ $tbl_struct->{cols} } + ); + + if ( !$cols ) { + PTDEBUG && _d('Failed to make explicit columns list from tbl struct'); + $cols = '*'; + } + } + return "SELECT $cols FROM $self->{src_db_tbl} WHERE $where LIMIT 1"; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End ChangeHandler package +# ########################################################################### + +# ########################################################################### +# TableChunker package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableChunker.pm +# t/lib/TableChunker.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableChunker; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(floor ceil); +use List::Util qw(min max); +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(Quoter TableParser) ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my %int_types = map { $_ => 1 } qw(bigint date datetime int mediumint smallint time timestamp tinyint year); + my %real_types = map { $_ => 1 } qw(decimal double float); + + my $self = { + %args, + int_types => \%int_types, + real_types => \%real_types, + EPOCH => '1970-01-01', + }; + + return bless $self, $class; +} + +sub find_chunk_columns { + my ( $self, %args ) = @_; + foreach my $arg ( qw(tbl_struct) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $tbl_struct = $args{tbl_struct}; + + my @possible_indexes; + foreach my $index ( values %{ $tbl_struct->{keys} } ) { + + next unless $index->{type} eq 'BTREE'; + + next if grep { defined } @{$index->{col_prefixes}}; + + if ( $args{exact} ) { + next unless $index->{is_unique} && @{$index->{cols}} == 1; + } + + push @possible_indexes, $index; + } + PTDEBUG && _d('Possible chunk indexes in order:', + join(', ', map { $_->{name} } @possible_indexes)); + + my $can_chunk_exact = 0; + my @candidate_cols; + foreach my $index ( @possible_indexes ) { + my $col = $index->{cols}->[0]; + + my $col_type = $tbl_struct->{type_for}->{$col}; + next unless $self->{int_types}->{$col_type} + || $self->{real_types}->{$col_type} + || $col_type =~ m/char/; + + push @candidate_cols, { column => $col, index => $index->{name} }; + } + + $can_chunk_exact = 1 if $args{exact} && scalar @candidate_cols; + + if ( PTDEBUG ) { + my $chunk_type = $args{exact} ? 'Exact' : 'Inexact'; + _d($chunk_type, 'chunkable:', + join(', ', map { "$_->{column} on $_->{index}" } @candidate_cols)); + } + + my @result; + PTDEBUG && _d('Ordering columns by order in tbl, PK first'); + if ( $tbl_struct->{keys}->{PRIMARY} ) { + my $pk_first_col = $tbl_struct->{keys}->{PRIMARY}->{cols}->[0]; + @result = grep { $_->{column} eq $pk_first_col } @candidate_cols; + @candidate_cols = grep { $_->{column} ne $pk_first_col } @candidate_cols; + } + my $i = 0; + my %col_pos = map { $_ => $i++ } @{$tbl_struct->{cols}}; + push @result, sort { $col_pos{$a->{column}} <=> $col_pos{$b->{column}} } + @candidate_cols; + + if ( PTDEBUG ) { + _d('Chunkable columns:', + join(', ', map { "$_->{column} on $_->{index}" } @result)); + _d('Can chunk exactly:', $can_chunk_exact); + } + + return ($can_chunk_exact, @result); +} + +sub calculate_chunks { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl tbl_struct chunk_col rows_in_range chunk_size); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + PTDEBUG && _d('Calculate chunks for', + join(", ", map {"$_=".(defined $args{$_} ? $args{$_} : "undef")} + qw(db tbl chunk_col min max rows_in_range chunk_size zero_chunk exact) + )); + + if ( !$args{rows_in_range} ) { + PTDEBUG && _d("Empty table"); + return '1=1'; + } + + if ( $args{rows_in_range} < $args{chunk_size} ) { + PTDEBUG && _d("Chunk size larger than rows in range"); + return '1=1'; + } + + my $q = $self->{Quoter}; + my $dbh = $args{dbh}; + my $chunk_col = $args{chunk_col}; + my $tbl_struct = $args{tbl_struct}; + my $col_type = $tbl_struct->{type_for}->{$chunk_col}; + PTDEBUG && _d('chunk col type:', $col_type); + + my %chunker; + if ( $tbl_struct->{is_numeric}->{$chunk_col} || $col_type =~ /date|time/ ) { + %chunker = $self->_chunk_numeric(%args); + } + elsif ( $col_type =~ m/char/ ) { + %chunker = $self->_chunk_char(%args); + } + else { + die "Cannot chunk $col_type columns"; + } + PTDEBUG && _d("Chunker:", Dumper(\%chunker)); + my ($col, $start_point, $end_point, $interval, $range_func) + = @chunker{qw(col start_point end_point interval range_func)}; + + my @chunks; + if ( $start_point < $end_point ) { + + push @chunks, "$col = 0" if $chunker{have_zero_chunk}; + + my ($beg, $end); + my $iter = 0; + for ( my $i = $start_point; $i < $end_point; $i += $interval ) { + ($beg, $end) = $self->$range_func($dbh, $i, $interval, $end_point); + + if ( $iter++ == 0 ) { + push @chunks, + ($chunker{have_zero_chunk} ? "$col > 0 AND " : "") + ."$col < " . $q->quote_val($end); + } + else { + push @chunks, "$col >= " . $q->quote_val($beg) . " AND $col < " . $q->quote_val($end); + } + } + + my $chunk_range = lc($args{chunk_range} || 'open'); + my $nullable = $args{tbl_struct}->{is_nullable}->{$args{chunk_col}}; + pop @chunks; + if ( @chunks ) { + push @chunks, "$col >= " . $q->quote_val($beg) + . ($chunk_range eq 'openclosed' + ? " AND $col <= " . $q->quote_val($args{max}) : ""); + } + else { + push @chunks, $nullable ? "$col IS NOT NULL" : '1=1'; + } + if ( $nullable ) { + push @chunks, "$col IS NULL"; + } + } + else { + PTDEBUG && _d('No chunks; using single chunk 1=1'); + push @chunks, '1=1'; + } + + return @chunks; +} + +sub _chunk_numeric { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl tbl_struct chunk_col rows_in_range chunk_size); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $q = $self->{Quoter}; + my $db_tbl = $q->quote($args{db}, $args{tbl}); + my $col_type = $args{tbl_struct}->{type_for}->{$args{chunk_col}}; + + my $range_func; + if ( $col_type =~ m/(?:int|year|float|double|decimal)$/ ) { + $range_func = 'range_num'; + } + elsif ( $col_type =~ m/^(?:timestamp|date|time)$/ ) { + $range_func = "range_$col_type"; + } + elsif ( $col_type eq 'datetime' ) { + $range_func = 'range_datetime'; + } + + my ($start_point, $end_point); + eval { + $start_point = $self->value_to_number( + value => $args{min}, + column_type => $col_type, + dbh => $args{dbh}, + ); + $end_point = $self->value_to_number( + value => $args{max}, + column_type => $col_type, + dbh => $args{dbh}, + ); + }; + if ( $EVAL_ERROR ) { + if ( $EVAL_ERROR =~ m/don't know how to chunk/ ) { + die $EVAL_ERROR; + } + else { + die "Error calculating chunk start and end points for table " + . "`$args{tbl_struct}->{name}` on column `$args{chunk_col}` " + . "with min/max values " + . join('/', + map { defined $args{$_} ? $args{$_} : 'undef' } qw(min max)) + . ":\n\n" + . $EVAL_ERROR + . "\nVerify that the min and max values are valid for the column. " + . "If they are valid, this error could be caused by a bug in the " + . "tool."; + } + } + + if ( !defined $start_point ) { + PTDEBUG && _d('Start point is undefined'); + $start_point = 0; + } + if ( !defined $end_point || $end_point < $start_point ) { + PTDEBUG && _d('End point is undefined or before start point'); + $end_point = 0; + } + PTDEBUG && _d("Actual chunk range:", $start_point, "to", $end_point); + + my $have_zero_chunk = 0; + if ( $args{zero_chunk} ) { + if ( $start_point != $end_point && $start_point >= 0 ) { + PTDEBUG && _d('Zero chunking'); + my $nonzero_val = $self->get_nonzero_value( + %args, + db_tbl => $db_tbl, + col => $args{chunk_col}, + col_type => $col_type, + val => $args{min} + ); + $start_point = $self->value_to_number( + value => $nonzero_val, + column_type => $col_type, + dbh => $args{dbh}, + ); + $have_zero_chunk = 1; + } + else { + PTDEBUG && _d("Cannot zero chunk"); + } + } + PTDEBUG && _d("Using chunk range:", $start_point, "to", $end_point); + + my $interval = $args{chunk_size} + * ($end_point - $start_point) + / $args{rows_in_range}; + if ( $self->{int_types}->{$col_type} ) { + $interval = ceil($interval); + } + $interval ||= $args{chunk_size}; + if ( $args{exact} ) { + $interval = $args{chunk_size}; + } + PTDEBUG && _d('Chunk interval:', $interval, 'units'); + + return ( + col => $q->quote($args{chunk_col}), + start_point => $start_point, + end_point => $end_point, + interval => $interval, + range_func => $range_func, + have_zero_chunk => $have_zero_chunk, + ); +} + +sub _chunk_char { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl tbl_struct chunk_col min max rows_in_range chunk_size); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $q = $self->{Quoter}; + my $db_tbl = $q->quote($args{db}, $args{tbl}); + my $dbh = $args{dbh}; + my $chunk_col = $args{chunk_col}; + my $qchunk_col = $q->quote($args{chunk_col}); + my $row; + my $sql; + + my ($min_col, $max_col) = @{args}{qw(min max)}; + $sql = "SELECT ORD(?) AS min_col_ord, ORD(?) AS max_col_ord"; + PTDEBUG && _d($dbh, $sql); + my $ord_sth = $dbh->prepare($sql); # avoid quoting issues + $ord_sth->execute($min_col, $max_col); + $row = $ord_sth->fetchrow_arrayref(); + my ($min_col_ord, $max_col_ord) = ($row->[0], $row->[1]); + PTDEBUG && _d("Min/max col char code:", $min_col_ord, $max_col_ord); + + my $base; + my @chars; + PTDEBUG && _d("Table charset:", $args{tbl_struct}->{charset}); + if ( ($args{tbl_struct}->{charset} || "") eq "latin1" ) { + my @sorted_latin1_chars = ( + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, + 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, + 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, + 88, 89, 90, 91, 92, 93, 94, 95, 96, 123, 124, 125, 126, 161, + 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, + 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, + 190, 191, 215, 216, 222, 223, 247, 255); + + my ($first_char, $last_char); + for my $i ( 0..$#sorted_latin1_chars ) { + $first_char = $i and last if $sorted_latin1_chars[$i] >= $min_col_ord; + } + for my $i ( $first_char..$#sorted_latin1_chars ) { + $last_char = $i and last if $sorted_latin1_chars[$i] >= $max_col_ord; + }; + + @chars = map { chr $_; } @sorted_latin1_chars[$first_char..$last_char]; + $base = scalar @chars; + } + else { + + my $tmp_tbl = '__maatkit_char_chunking_map'; + my $tmp_db_tbl = $q->quote($args{db}, $tmp_tbl); + $sql = "DROP TABLE IF EXISTS $tmp_db_tbl"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + my $col_def = $args{tbl_struct}->{defs}->{$chunk_col}; + $sql = "CREATE TEMPORARY TABLE $tmp_db_tbl ($col_def) " + . "ENGINE=MEMORY"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + + $sql = "INSERT INTO $tmp_db_tbl VALUES (CHAR(?))"; + PTDEBUG && _d($dbh, $sql); + my $ins_char_sth = $dbh->prepare($sql); # avoid quoting issues + for my $char_code ( $min_col_ord..$max_col_ord ) { + $ins_char_sth->execute($char_code); + } + + $sql = "SELECT $qchunk_col FROM $tmp_db_tbl " + . "WHERE $qchunk_col BETWEEN ? AND ? " + . "ORDER BY $qchunk_col"; + PTDEBUG && _d($dbh, $sql); + my $sel_char_sth = $dbh->prepare($sql); + $sel_char_sth->execute($min_col, $max_col); + + @chars = map { $_->[0] } @{ $sel_char_sth->fetchall_arrayref() }; + $base = scalar @chars; + + $sql = "DROP TABLE $tmp_db_tbl"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + PTDEBUG && _d("Base", $base, "chars:", @chars); + + die "Cannot chunk table $db_tbl using the character column " + . "$chunk_col, most likely because all values start with the " + . "same character. This table must be synced separately by " + . "specifying a list of --algorithms without the Chunk algorithm" + if $base == 1; + + + $sql = "SELECT MAX(LENGTH($qchunk_col)) FROM $db_tbl " + . ($args{where} ? "WHERE $args{where} " : "") + . "ORDER BY $qchunk_col"; + PTDEBUG && _d($dbh, $sql); + $row = $dbh->selectrow_arrayref($sql); + my $max_col_len = $row->[0]; + PTDEBUG && _d("Max column value:", $max_col, $max_col_len); + my $n_values; + for my $n_chars ( 1..$max_col_len ) { + $n_values = $base**$n_chars; + if ( $n_values >= $args{chunk_size} ) { + PTDEBUG && _d($n_chars, "chars in base", $base, "expresses", + $n_values, "values"); + last; + } + } + + my $n_chunks = $args{rows_in_range} / $args{chunk_size}; + my $interval = floor(($n_values+0.00001) / $n_chunks) || 1; + + my $range_func = sub { + my ( $self, $dbh, $start, $interval, $max ) = @_; + my $start_char = $self->base_count( + count_to => $start, + base => $base, + symbols => \@chars, + ); + my $end_char = $self->base_count( + count_to => min($max, $start + $interval), + base => $base, + symbols => \@chars, + ); + return $start_char, $end_char; + }; + + return ( + col => $qchunk_col, + start_point => 0, + end_point => $n_values, + interval => $interval, + range_func => $range_func, + ); +} + +sub get_first_chunkable_column { + my ( $self, %args ) = @_; + foreach my $arg ( qw(tbl_struct) ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my ($exact, @cols) = $self->find_chunk_columns(%args); + my $col = $cols[0]->{column}; + my $idx = $cols[0]->{index}; + + my $wanted_col = $args{chunk_column}; + my $wanted_idx = $args{chunk_index}; + PTDEBUG && _d("Preferred chunk col/idx:", $wanted_col, $wanted_idx); + + if ( $wanted_col && $wanted_idx ) { + foreach my $chunkable_col ( @cols ) { + if ( $wanted_col eq $chunkable_col->{column} + && $wanted_idx eq $chunkable_col->{index} ) { + $col = $wanted_col; + $idx = $wanted_idx; + last; + } + } + } + elsif ( $wanted_col ) { + foreach my $chunkable_col ( @cols ) { + if ( $wanted_col eq $chunkable_col->{column} ) { + $col = $wanted_col; + $idx = $chunkable_col->{index}; + last; + } + } + } + elsif ( $wanted_idx ) { + foreach my $chunkable_col ( @cols ) { + if ( $wanted_idx eq $chunkable_col->{index} ) { + $col = $chunkable_col->{column}; + $idx = $wanted_idx; + last; + } + } + } + + PTDEBUG && _d('First chunkable col/index:', $col, $idx); + return $col, $idx; +} + +sub size_to_rows { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl chunk_size); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db, $tbl, $chunk_size) = @args{@required_args}; + my $q = $self->{Quoter}; + my $tp = $self->{TableParser}; + + my ($n_rows, $avg_row_length); + + my ( $num, $suffix ) = $chunk_size =~ m/^(\d+)([MGk])?$/; + if ( $suffix ) { # Convert to bytes. + $chunk_size = $suffix eq 'k' ? $num * 1_024 + : $suffix eq 'M' ? $num * 1_024 * 1_024 + : $num * 1_024 * 1_024 * 1_024; + } + elsif ( $num ) { + $n_rows = $num; + } + else { + die "Invalid chunk size $chunk_size; must be an integer " + . "with optional suffix kMG"; + } + + if ( $suffix || $args{avg_row_length} ) { + my ($status) = $tp->get_table_status($dbh, $db, $tbl); + $avg_row_length = $status->{avg_row_length}; + if ( !defined $n_rows ) { + $n_rows = $avg_row_length ? ceil($chunk_size / $avg_row_length) : undef; + } + } + + return $n_rows, $avg_row_length; +} + +sub get_range_statistics { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl chunk_col tbl_struct); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db, $tbl, $col) = @args{@required_args}; + my $where = $args{where}; + my $q = $self->{Quoter}; + + my $col_type = $args{tbl_struct}->{type_for}->{$col}; + my $col_is_numeric = $args{tbl_struct}->{is_numeric}->{$col}; + + my $db_tbl = $q->quote($db, $tbl); + $col = $q->quote($col); + + my ($min, $max); + eval { + my $sql = "SELECT MIN($col), MAX($col) FROM $db_tbl" + . ($args{index_hint} ? " $args{index_hint}" : "") + . ($where ? " WHERE ($where)" : ''); + PTDEBUG && _d($dbh, $sql); + ($min, $max) = $dbh->selectrow_array($sql); + PTDEBUG && _d("Actual end points:", $min, $max); + + ($min, $max) = $self->get_valid_end_points( + %args, + dbh => $dbh, + db_tbl => $db_tbl, + col => $col, + col_type => $col_type, + min => $min, + max => $max, + ); + PTDEBUG && _d("Valid end points:", $min, $max); + }; + if ( $EVAL_ERROR ) { + die "Error getting min and max values for table $db_tbl " + . "on column $col: $EVAL_ERROR"; + } + + my $sql = "EXPLAIN SELECT * FROM $db_tbl" + . ($args{index_hint} ? " $args{index_hint}" : "") + . ($where ? " WHERE $where" : ''); + PTDEBUG && _d($sql); + my $expl = $dbh->selectrow_hashref($sql); + + return ( + min => $min, + max => $max, + rows_in_range => $expl->{rows}, + ); +} + +sub inject_chunks { + my ( $self, %args ) = @_; + foreach my $arg ( qw(database table chunks chunk_num query) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + PTDEBUG && _d('Injecting chunk', $args{chunk_num}); + my $query = $args{query}; + my $comment = sprintf("/*%s.%s:%d/%d*/", + $args{database}, $args{table}, + $args{chunk_num} + 1, scalar @{$args{chunks}}); + $query =~ s!/\*PROGRESS_COMMENT\*/!$comment!; + my $where = "WHERE (" . $args{chunks}->[$args{chunk_num}] . ')'; + if ( $args{where} && grep { $_ } @{$args{where}} ) { + $where .= " AND (" + . join(" AND ", map { "($_)" } grep { $_ } @{$args{where}} ) + . ")"; + } + my $db_tbl = $self->{Quoter}->quote(@args{qw(database table)}); + my $index_hint = $args{index_hint} || ''; + + PTDEBUG && _d('Parameters:', + Dumper({WHERE => $where, DB_TBL => $db_tbl, INDEX_HINT => $index_hint})); + $query =~ s!/\*WHERE\*/! $where!; + $query =~ s!/\*DB_TBL\*/!$db_tbl!; + $query =~ s!/\*INDEX_HINT\*/! $index_hint!; + $query =~ s!/\*CHUNK_NUM\*/! $args{chunk_num} AS chunk_num,!; + + return $query; +} + + +sub value_to_number { + my ( $self, %args ) = @_; + my @required_args = qw(column_type dbh); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $val = $args{value}; + my ($col_type, $dbh) = @args{@required_args}; + PTDEBUG && _d('Converting MySQL', $col_type, $val); + + return unless defined $val; # value is NULL + + my %mysql_conv_func_for = ( + timestamp => 'UNIX_TIMESTAMP', + date => 'TO_DAYS', + time => 'TIME_TO_SEC', + datetime => 'TO_DAYS', + ); + + my $num; + if ( $col_type =~ m/(?:int|year|float|double|decimal)$/ ) { + $num = $val; + } + elsif ( $col_type =~ m/^(?:timestamp|date|time)$/ ) { + my $func = $mysql_conv_func_for{$col_type}; + my $sql = "SELECT $func(?)"; + PTDEBUG && _d($dbh, $sql, $val); + my $sth = $dbh->prepare($sql); + $sth->execute($val); + ($num) = $sth->fetchrow_array(); + } + elsif ( $col_type eq 'datetime' ) { + $num = $self->timestampdiff($dbh, $val); + } + else { + die "I don't know how to chunk $col_type\n"; + } + PTDEBUG && _d('Converts to', $num); + return $num; +} + +sub range_num { + my ( $self, $dbh, $start, $interval, $max ) = @_; + my $end = min($max, $start + $interval); + + + $start = sprintf('%.17f', $start) if $start =~ /e/; + $end = sprintf('%.17f', $end) if $end =~ /e/; + + $start =~ s/\.(\d{5}).*$/.$1/; + $end =~ s/\.(\d{5}).*$/.$1/; + + if ( $end > $start ) { + return ( $start, $end ); + } + else { + die "Chunk size is too small: $end !> $start\n"; + } +} + +sub range_time { + my ( $self, $dbh, $start, $interval, $max ) = @_; + my $sql = "SELECT SEC_TO_TIME($start), SEC_TO_TIME(LEAST($max, $start + $interval))"; + PTDEBUG && _d($sql); + return $dbh->selectrow_array($sql); +} + +sub range_date { + my ( $self, $dbh, $start, $interval, $max ) = @_; + my $sql = "SELECT FROM_DAYS($start), FROM_DAYS(LEAST($max, $start + $interval))"; + PTDEBUG && _d($sql); + return $dbh->selectrow_array($sql); +} + +sub range_datetime { + my ( $self, $dbh, $start, $interval, $max ) = @_; + my $sql = "SELECT DATE_ADD('$self->{EPOCH}', INTERVAL $start SECOND), " + . "DATE_ADD('$self->{EPOCH}', INTERVAL LEAST($max, $start + $interval) SECOND)"; + PTDEBUG && _d($sql); + return $dbh->selectrow_array($sql); +} + +sub range_timestamp { + my ( $self, $dbh, $start, $interval, $max ) = @_; + my $sql = "SELECT FROM_UNIXTIME($start), FROM_UNIXTIME(LEAST($max, $start + $interval))"; + PTDEBUG && _d($sql); + return $dbh->selectrow_array($sql); +} + +sub timestampdiff { + my ( $self, $dbh, $time ) = @_; + my $sql = "SELECT (COALESCE(TO_DAYS('$time'), 0) * 86400 + TIME_TO_SEC('$time')) " + . "- TO_DAYS('$self->{EPOCH} 00:00:00') * 86400"; + PTDEBUG && _d($sql); + my ( $diff ) = $dbh->selectrow_array($sql); + $sql = "SELECT DATE_ADD('$self->{EPOCH}', INTERVAL $diff SECOND)"; + PTDEBUG && _d($sql); + my ( $check ) = $dbh->selectrow_array($sql); + die <<" EOF" + Incorrect datetime math: given $time, calculated $diff but checked to $check. + This could be due to a version of MySQL that overflows on large interval + values to DATE_ADD(), or the given datetime is not a valid date. If not, + please report this as a bug. + EOF + unless $check eq $time; + return $diff; +} + + + + +sub get_valid_end_points { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db_tbl col col_type); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db_tbl, $col, $col_type) = @args{@required_args}; + my ($real_min, $real_max) = @args{qw(min max)}; + + my $err_fmt = "Error finding a valid %s value for table $db_tbl on " + . "column $col. The real %s value %s is invalid and " + . "no other valid values were found. Verify that the table " + . "has at least one valid value for this column" + . ($args{where} ? " where $args{where}." : "."); + + my $valid_min = $real_min; + if ( defined $valid_min ) { + PTDEBUG && _d("Validating min end point:", $real_min); + $valid_min = $self->_get_valid_end_point( + %args, + val => $real_min, + endpoint => 'min', + ); + die sprintf($err_fmt, 'minimum', 'minimum', + (defined $real_min ? $real_min : "NULL")) + unless defined $valid_min; + } + + my $valid_max = $real_max; + if ( defined $valid_max ) { + PTDEBUG && _d("Validating max end point:", $real_min); + $valid_max = $self->_get_valid_end_point( + %args, + val => $real_max, + endpoint => 'max', + ); + die sprintf($err_fmt, 'maximum', 'maximum', + (defined $real_max ? $real_max : "NULL")) + unless defined $valid_max; + } + + return $valid_min, $valid_max; +} + +sub _get_valid_end_point { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db_tbl col col_type); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db_tbl, $col, $col_type) = @args{@required_args}; + my $val = $args{val}; + + return $val unless defined $val; + + my $validate = $col_type =~ m/time|date/ ? \&_validate_temporal_value + : undef; + + if ( !$validate ) { + PTDEBUG && _d("No validator for", $col_type, "values"); + return $val; + } + + return $val if defined $validate->($dbh, $val); + + PTDEBUG && _d("Value is invalid, getting first valid value"); + $val = $self->get_first_valid_value( + %args, + val => $val, + validate => $validate, + ); + + return $val; +} + +sub get_first_valid_value { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db_tbl col validate endpoint); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db_tbl, $col, $validate, $endpoint) = @args{@required_args}; + my $tries = defined $args{tries} ? $args{tries} : 5; + my $val = $args{val}; + + return unless defined $val; + + my $cmp = $endpoint =~ m/min/i ? '>' + : $endpoint =~ m/max/i ? '<' + : die "Invalid endpoint arg: $endpoint"; + my $sql = "SELECT $col FROM $db_tbl " + . ($args{index_hint} ? "$args{index_hint} " : "") + . "WHERE $col $cmp ? AND $col IS NOT NULL " + . ($args{where} ? "AND ($args{where}) " : "") + . "ORDER BY $col LIMIT 1"; + PTDEBUG && _d($dbh, $sql); + my $sth = $dbh->prepare($sql); + + my $last_val = $val; + while ( $tries-- ) { + $sth->execute($last_val); + my ($next_val) = $sth->fetchrow_array(); + PTDEBUG && _d('Next value:', $next_val, '; tries left:', $tries); + if ( !defined $next_val ) { + PTDEBUG && _d('No more rows in table'); + last; + } + if ( defined $validate->($dbh, $next_val) ) { + PTDEBUG && _d('First valid value:', $next_val); + $sth->finish(); + return $next_val; + } + $last_val = $next_val; + } + $sth->finish(); + $val = undef; # no valid value found + + return $val; +} + +sub _validate_temporal_value { + my ( $dbh, $val ) = @_; + my $sql = "SELECT IF(TIME_FORMAT(?,'%H:%i:%s')=?, TIME_TO_SEC(?), TO_DAYS(?))"; + my $res; + eval { + PTDEBUG && _d($dbh, $sql, $val); + my $sth = $dbh->prepare($sql); + $sth->execute($val, $val, $val, $val); + ($res) = $sth->fetchrow_array(); + $sth->finish(); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + } + return $res; +} + +sub get_nonzero_value { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db_tbl col col_type); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $db_tbl, $col, $col_type) = @args{@required_args}; + my $tries = defined $args{tries} ? $args{tries} : 5; + my $val = $args{val}; + + my $is_nonzero = $col_type =~ m/time|date/ ? \&_validate_temporal_value + : sub { return $_[1]; }; + + if ( !$is_nonzero->($dbh, $val) ) { # quasi-double-negative, sorry + PTDEBUG && _d('Discarding zero value:', $val); + my $sql = "SELECT $col FROM $db_tbl " + . ($args{index_hint} ? "$args{index_hint} " : "") + . "WHERE $col > ? AND $col IS NOT NULL " + . ($args{where} ? "AND ($args{where}) " : '') + . "ORDER BY $col LIMIT 1"; + PTDEBUG && _d($sql); + my $sth = $dbh->prepare($sql); + + my $last_val = $val; + while ( $tries-- ) { + $sth->execute($last_val); + my ($next_val) = $sth->fetchrow_array(); + if ( $is_nonzero->($dbh, $next_val) ) { + PTDEBUG && _d('First non-zero value:', $next_val); + $sth->finish(); + return $next_val; + } + $last_val = $next_val; + } + $sth->finish(); + $val = undef; # no non-zero value found + } + + return $val; +} + +sub base_count { + my ( $self, %args ) = @_; + my @required_args = qw(count_to base symbols); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my ($n, $base, $symbols) = @args{@required_args}; + + return $symbols->[0] if $n == 0; + + my $highest_power = floor(log($n+0.00001)/log($base)); + if ( $highest_power == 0 ){ + return $symbols->[$n]; + } + + my @base_powers; + for my $power ( 0..$highest_power ) { + push @base_powers, ($base**$power) || 1; + } + + my @base_multiples; + foreach my $base_power ( reverse @base_powers ) { + my $multiples = floor(($n+0.00001) / $base_power); + push @base_multiples, $multiples; + $n -= $multiples * $base_power; + } + return join('', map { $symbols->[$_] } @base_multiples); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableChunker package +# ########################################################################### + +# ########################################################################### +# TableChecksum package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableChecksum.pm +# t/lib/TableChecksum.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableChecksum; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use List::Util qw(max); + +our %ALGOS = ( + CHECKSUM => { pref => 0, hash => 0 }, + BIT_XOR => { pref => 2, hash => 1 }, + ACCUM => { pref => 3, hash => 1 }, +); + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(Quoter) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub crc32 { + my ( $self, $string ) = @_; + my $poly = 0xEDB88320; + my $crc = 0xFFFFFFFF; + foreach my $char ( split(//, $string) ) { + my $comp = ($crc ^ ord($char)) & 0xFF; + for ( 1 .. 8 ) { + $comp = $comp & 1 ? $poly ^ ($comp >> 1) : $comp >> 1; + } + $crc = (($crc >> 8) & 0x00FFFFFF) ^ $comp; + } + return $crc ^ 0xFFFFFFFF; +} + +sub get_crc_wid { + my ( $self, $dbh, $func ) = @_; + my $crc_wid = 16; + if ( uc $func ne 'FNV_64' && uc $func ne 'FNV1A_64' ) { + eval { + my ($val) = $dbh->selectrow_array("SELECT $func('a')"); + $crc_wid = max(16, length($val)); + }; + } + return $crc_wid; +} + +sub get_crc_type { + my ( $self, $dbh, $func ) = @_; + my $type = ''; + my $length = 0; + my $sql = "SELECT $func('a')"; + my $sth = $dbh->prepare($sql); + eval { + $sth->execute(); + $type = $sth->{mysql_type_name}->[0]; + $length = $sth->{mysql_length}->[0]; + PTDEBUG && _d($sql, $type, $length); + if ( $type eq 'integer' && $length < 11 ) { + $type = 'int'; + } + elsif ( $type eq 'bigint' && $length < 20 ) { + $type = 'int'; + } + }; + $sth->finish; + PTDEBUG && _d('crc_type:', $type, 'length:', $length); + return ($type, $length); +} + +sub best_algorithm { + my ( $self, %args ) = @_; + my ( $alg, $dbh ) = @args{ qw(algorithm dbh) }; + my @choices = sort { $ALGOS{$a}->{pref} <=> $ALGOS{$b}->{pref} } keys %ALGOS; + die "Invalid checksum algorithm $alg" + if $alg && !$ALGOS{$alg}; + + if ( + $args{where} || $args{chunk} # CHECKSUM does whole table + || $args{replicate}) # CHECKSUM can't do INSERT.. SELECT + { + PTDEBUG && _d('Cannot use CHECKSUM algorithm'); + @choices = grep { $_ ne 'CHECKSUM' } @choices; + } + + + if ( $alg && grep { $_ eq $alg } @choices ) { + PTDEBUG && _d('User requested', $alg, 'algorithm'); + return $alg; + } + + if ( $args{count} && grep { $_ ne 'CHECKSUM' } @choices ) { + PTDEBUG && _d('Not using CHECKSUM algorithm because COUNT desired'); + @choices = grep { $_ ne 'CHECKSUM' } @choices; + } + + PTDEBUG && _d('Algorithms, in order:', @choices); + return $choices[0]; +} + +sub is_hash_algorithm { + my ( $self, $algorithm ) = @_; + return $ALGOS{$algorithm} && $ALGOS{$algorithm}->{hash}; +} + +sub choose_hash_func { + my ( $self, %args ) = @_; + my @funcs = qw(CRC32 FNV1A_64 FNV_64 MD5 SHA1); + if ( $args{function} ) { + unshift @funcs, $args{function}; + } + my ($result, $error); + do { + my $func; + eval { + $func = shift(@funcs); + my $sql = "SELECT $func('test-string')"; + PTDEBUG && _d($sql); + $args{dbh}->do($sql); + $result = $func; + }; + if ( $EVAL_ERROR && $EVAL_ERROR =~ m/failed: (.*?) at \S+ line/ ) { + $error .= qq{$func cannot be used because "$1"\n}; + PTDEBUG && _d($func, 'cannot be used because', $1); + } + } while ( @funcs && !$result ); + + die $error unless $result; + PTDEBUG && _d('Chosen hash func:', $result); + return $result; +} + +sub optimize_xor { + my ( $self, %args ) = @_; + my ($dbh, $func) = @args{qw(dbh function)}; + + die "$func never needs the BIT_XOR optimization" + if $func =~ m/^(?:FNV1A_64|FNV_64|CRC32)$/i; + + my $opt_slice = 0; + my $unsliced = uc $dbh->selectall_arrayref("SELECT $func('a')")->[0]->[0]; + my $sliced = ''; + my $start = 1; + my $crc_wid = length($unsliced) < 16 ? 16 : length($unsliced); + + do { # Try different positions till sliced result equals non-sliced. + PTDEBUG && _d('Trying slice', $opt_slice); + $dbh->do(q{SET @crc := '', @cnt := 0}); + my $slices = $self->make_xor_slices( + query => "\@crc := $func('a')", + crc_wid => $crc_wid, + opt_slice => $opt_slice, + ); + + my $sql = "SELECT CONCAT($slices) AS TEST FROM (SELECT NULL) AS x"; + $sliced = ($dbh->selectrow_array($sql))[0]; + if ( $sliced ne $unsliced ) { + PTDEBUG && _d('Slice', $opt_slice, 'does not work'); + $start += 16; + ++$opt_slice; + } + } while ( $start < $crc_wid && $sliced ne $unsliced ); + + if ( $sliced eq $unsliced ) { + PTDEBUG && _d('Slice', $opt_slice, 'works'); + return $opt_slice; + } + else { + PTDEBUG && _d('No slice works'); + return undef; + } +} + +sub make_xor_slices { + my ( $self, %args ) = @_; + foreach my $arg ( qw(query crc_wid) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my ( $query, $crc_wid, $opt_slice ) = @args{qw(query crc_wid opt_slice)}; + + my @slices; + for ( my $start = 1; $start <= $crc_wid; $start += 16 ) { + my $len = $crc_wid - $start + 1; + if ( $len > 16 ) { + $len = 16; + } + push @slices, + "LPAD(CONV(BIT_XOR(" + . "CAST(CONV(SUBSTRING(\@crc, $start, $len), 16, 10) AS UNSIGNED))" + . ", 10, 16), $len, '0')"; + } + + if ( defined $opt_slice && $opt_slice < @slices ) { + $slices[$opt_slice] =~ s/\@crc/\@crc := $query/; + } + else { + map { s/\@crc/$query/ } @slices; + } + + return join(', ', @slices); +} + +sub make_row_checksum { + my ( $self, %args ) = @_; + my ( $tbl_struct, $func ) = @args{ qw(tbl_struct function) }; + my $q = $self->{Quoter}; + + my $sep = $args{sep} || '#'; + $sep =~ s/'//g; + $sep ||= '#'; + + my $ignorecols = $args{ignorecols} || {}; + + my %cols = map { lc($_) => 1 } + grep { !exists $ignorecols->{$_} } + ($args{cols} ? @{$args{cols}} : @{$tbl_struct->{cols}}); + my %seen; + my @cols = + map { + my $type = $tbl_struct->{type_for}->{$_}; + my $result = $q->quote($_); + if ( $type eq 'timestamp' ) { + $result .= ' + 0'; + } + elsif ( $args{float_precision} && $type =~ m/float|double/ ) { + $result = "ROUND($result, $args{float_precision})"; + } + elsif ( $args{trim} && $type =~ m/varchar/ ) { + $result = "TRIM($result)"; + } + elsif ( $type =~ m/binary|text|blob/ ) { + $result = "CRC32($result)"; + } + $result; + } + grep { + $cols{$_} && !$seen{$_}++ + } + @{$tbl_struct->{cols}}; + + my $query; + if ( !$args{no_cols} ) { + $query = join(', ', + map { + my $col = $_; + if ( $col =~ m/\+ 0/ ) { + my ($real_col) = /^(\S+)/; + $col .= " AS $real_col"; + } + elsif ( $col =~ m/TRIM/ ) { + my ($real_col) = m/TRIM\(([^\)]+)\)/; + $col .= " AS $real_col"; + } + elsif ( $col =~ m/CRC32/ ) { + my ($real_col) = m/CRC32\(([^\)]+)\)/; + $col .= " AS $real_col"; + } + $col; + } @cols) + . ', '; + } + + if ( uc $func ne 'FNV_64' && uc $func ne 'FNV1A_64' ) { + my @nulls = grep { $cols{$_} } @{$tbl_struct->{null_cols}}; + if ( @nulls ) { + my $bitmap = "CONCAT(" + . join(', ', map { 'ISNULL(' . $q->quote($_) . ')' } @nulls) + . ")"; + push @cols, $bitmap; + } + + $query .= @cols > 1 + ? "$func(CONCAT_WS('$sep', " . join(', ', @cols) . '))' + : "$func($cols[0])"; + } + else { + my $fnv_func = uc $func; + $query .= "$fnv_func(" . join(', ', @cols) . ')'; + } + + return $query; +} + +sub make_checksum_query { + my ( $self, %args ) = @_; + my @required_args = qw(db tbl tbl_struct algorithm crc_wid crc_type); + foreach my $arg( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ( $db, $tbl, $tbl_struct, $algorithm, + $crc_wid, $crc_type) = @args{@required_args}; + my $func = $args{function}; + my $q = $self->{Quoter}; + my $result; + + die "Invalid or missing checksum algorithm" + unless $algorithm && $ALGOS{$algorithm}; + + if ( $algorithm eq 'CHECKSUM' ) { + return "CHECKSUM TABLE " . $q->quote($db, $tbl); + } + + my $expr = $self->make_row_checksum(%args, no_cols=>1); + + if ( $algorithm eq 'BIT_XOR' ) { + if ( $crc_type =~ m/int$/ ) { + $result = "COALESCE(LOWER(CONV(BIT_XOR(CAST($expr AS UNSIGNED)), 10, 16)), 0) AS crc "; + } + else { + my $slices = $self->make_xor_slices( query => $expr, %args ); + $result = "COALESCE(LOWER(CONCAT($slices)), 0) AS crc "; + } + } + else { + if ( $crc_type =~ m/int$/ ) { + $result = "COALESCE(RIGHT(MAX(" + . "\@crc := CONCAT(LPAD(\@cnt := \@cnt + 1, 16, '0'), " + . "CONV(CAST($func(CONCAT(\@crc, $expr)) AS UNSIGNED), 10, 16))" + . "), $crc_wid), 0) AS crc "; + } + else { + $result = "COALESCE(RIGHT(MAX(" + . "\@crc := CONCAT(LPAD(\@cnt := \@cnt + 1, 16, '0'), " + . "$func(CONCAT(\@crc, $expr)))" + . "), $crc_wid), 0) AS crc "; + } + } + if ( $args{replicate} ) { + $result = "REPLACE /*PROGRESS_COMMENT*/ INTO $args{replicate} " + . "(db, tbl, chunk, boundaries, this_cnt, this_crc) " + . "SELECT ?, ?, /*CHUNK_NUM*/ ?, COUNT(*) AS cnt, $result"; + } + else { + $result = "SELECT " + . ($args{buffer} ? 'SQL_BUFFER_RESULT ' : '') + . "/*PROGRESS_COMMENT*//*CHUNK_NUM*/ COUNT(*) AS cnt, $result"; + } + return $result . "FROM /*DB_TBL*//*INDEX_HINT*//*WHERE*/"; +} + +sub find_replication_differences { + my ( $self, $dbh, $table ) = @_; + + my $sql + = "SELECT db, tbl, CONCAT(db, '.', tbl) AS `table`, " + . "chunk, chunk_index, lower_boundary, upper_boundary, " + . "COALESCE(this_cnt-master_cnt, 0) AS cnt_diff, " + . "COALESCE(" + . "this_crc <> master_crc OR ISNULL(master_crc) <> ISNULL(this_crc), 0" + . ") AS crc_diff, this_cnt, master_cnt, this_crc, master_crc " + . "FROM $table " + . "WHERE master_cnt <> this_cnt OR master_crc <> this_crc " + . "OR ISNULL(master_crc) <> ISNULL(this_crc)"; + PTDEBUG && _d($sql); + my $diffs = $dbh->selectall_arrayref($sql, { Slice => {} }); + return $diffs; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableChecksum package +# ########################################################################### + +# ########################################################################### +# TableSyncChunk package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableSyncChunk.pm +# t/lib/TableSyncChunk.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableSyncChunk; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(TableChunker Quoter) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub name { + return 'Chunk'; +} + +sub set_callback { + my ( $self, $callback, $code ) = @_; + $self->{$callback} = $code; + return; +} + +sub can_sync { + my ( $self, %args ) = @_; + foreach my $arg ( qw(tbl_struct) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my ($exact, @chunkable_cols) = $self->{TableChunker}->find_chunk_columns( + %args, + exact => 1, + ); + return unless $exact; + + my $colno; + if ( $args{chunk_col} || $args{chunk_index} ) { + PTDEBUG && _d('Checking requested col', $args{chunk_col}, + 'and/or index', $args{chunk_index}); + for my $i ( 0..$#chunkable_cols ) { + if ( $args{chunk_col} ) { + next unless $chunkable_cols[$i]->{column} eq $args{chunk_col}; + } + if ( $args{chunk_index} ) { + next unless $chunkable_cols[$i]->{index} eq $args{chunk_index}; + } + $colno = $i; + last; + } + + if ( !$colno ) { + PTDEBUG && _d('Cannot chunk on column', $args{chunk_col}, + 'and/or using index', $args{chunk_index}); + return; + } + } + else { + $colno = 0; # First, best chunkable column/index. + } + + PTDEBUG && _d('Can chunk on column', $chunkable_cols[$colno]->{column}, + 'using index', $chunkable_cols[$colno]->{index}); + return ( + 1, + chunk_col => $chunkable_cols[$colno]->{column}, + chunk_index => $chunkable_cols[$colno]->{index}, + ), +} + +sub prepare_to_sync { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl tbl_struct cols chunk_col + chunk_size crc_col ChangeHandler); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $chunker = $self->{TableChunker}; + + $self->{chunk_col} = $args{chunk_col}; + $self->{crc_col} = $args{crc_col}; + $self->{index_hint} = $args{index_hint}; + $self->{buffer_in_mysql} = $args{buffer_in_mysql}; + $self->{ChangeHandler} = $args{ChangeHandler}; + + $self->{ChangeHandler}->fetch_back($args{dbh}); + + push @{$args{cols}}, $args{chunk_col}; + + my @chunks; + my %range_params = $chunker->get_range_statistics(%args); + if ( !grep { !defined $range_params{$_} } qw(min max rows_in_range) ) { + ($args{chunk_size}) = $chunker->size_to_rows(%args); + @chunks = $chunker->calculate_chunks(%args, %range_params); + } + else { + PTDEBUG && _d('No range statistics; using single chunk 1=1'); + @chunks = '1=1'; + } + + $self->{chunks} = \@chunks; + $self->{chunk_num} = 0; + $self->{state} = 0; + + return; +} + +sub uses_checksum { + return 1; +} + +sub set_checksum_queries { + my ( $self, $chunk_sql, $row_sql ) = @_; + die "I need a chunk_sql argument" unless $chunk_sql; + die "I need a row_sql argument" unless $row_sql; + $self->{chunk_sql} = $chunk_sql; + $self->{row_sql} = $row_sql; + return; +} + +sub prepare_sync_cycle { + my ( $self, $host ) = @_; + my $sql = q{SET @crc := '', @cnt := 0}; + PTDEBUG && _d($sql); + $host->{dbh}->do($sql); + return; +} + +sub get_sql { + my ( $self, %args ) = @_; + if ( $self->{state} ) { # select rows in a chunk + my $q = $self->{Quoter}; + return 'SELECT /*rows in chunk*/ ' + . ($self->{buffer_in_mysql} ? 'SQL_BUFFER_RESULT ' : '') + . $self->{row_sql} . " AS $self->{crc_col}" + . ' FROM ' . $self->{Quoter}->quote(@args{qw(database table)}) + . ' '. ($self->{index_hint} || '') + . ' WHERE (' . $self->{chunks}->[$self->{chunk_num}] . ')' + . ($args{where} ? " AND ($args{where})" : '') + . ' ORDER BY ' . join(', ', map {$q->quote($_) } @{$self->key_cols()}); + } + else { # select a chunk of rows + return $self->{TableChunker}->inject_chunks( + database => $args{database}, + table => $args{table}, + chunks => $self->{chunks}, + chunk_num => $self->{chunk_num}, + query => $self->{chunk_sql}, + index_hint => $self->{index_hint}, + where => [ $args{where} ], + ); + } +} + +sub same_row { + my ( $self, %args ) = @_; + my ($lr, $rr) = @args{qw(lr rr)}; + + if ( $self->{state} ) { # checksumming rows + if ( $lr->{$self->{crc_col}} ne $rr->{$self->{crc_col}} ) { + my $action = 'UPDATE'; + my $auth_row = $lr; + my $change_dbh; + + if ( $self->{same_row} ) { + ($action, $auth_row, $change_dbh) = $self->{same_row}->(%args); + } + + $self->{ChangeHandler}->change( + $action, # Execute the action + $auth_row, # with these row values + $self->key_cols(), # identified by these key cols + $change_dbh, # on this dbh + ); + } + } + elsif ( $lr->{cnt} != $rr->{cnt} || $lr->{crc} ne $rr->{crc} ) { + PTDEBUG && _d('Rows:', Dumper($lr, $rr)); + PTDEBUG && _d('Will examine this chunk before moving to next'); + $self->{state} = 1; # Must examine this chunk row-by-row + } +} + +sub not_in_right { + my ( $self, %args ) = @_; + die "Called not_in_right in state 0" unless $self->{state}; + + my $action = 'INSERT'; + my $auth_row = $args{lr}; + my $change_dbh; + + if ( $self->{not_in_right} ) { + ($action, $auth_row, $change_dbh) = $self->{not_in_right}->(%args); + } + + $self->{ChangeHandler}->change( + $action, # Execute the action + $auth_row, # with these row values + $self->key_cols(), # identified by these key cols + $change_dbh, # on this dbh + ); + return; +} + +sub not_in_left { + my ( $self, %args ) = @_; + die "Called not_in_left in state 0" unless $self->{state}; + + my $action = 'DELETE'; + my $auth_row = $args{rr}; + my $change_dbh; + + if ( $self->{not_in_left} ) { + ($action, $auth_row, $change_dbh) = $self->{not_in_left}->(%args); + } + + $self->{ChangeHandler}->change( + $action, # Execute the action + $auth_row, # with these row values + $self->key_cols(), # identified by these key cols + $change_dbh, # on this dbh + ); + return; +} + +sub done_with_rows { + my ( $self ) = @_; + if ( $self->{state} == 1 ) { + $self->{state} = 2; + PTDEBUG && _d('Setting state =', $self->{state}); + } + else { + $self->{state} = 0; + $self->{chunk_num}++; + PTDEBUG && _d('Setting state =', $self->{state}, + 'chunk_num =', $self->{chunk_num}); + } + return; +} + +sub done { + my ( $self ) = @_; + PTDEBUG && _d('Done with', $self->{chunk_num}, 'of', + scalar(@{$self->{chunks}}), 'chunks'); + PTDEBUG && $self->{state} && _d('Chunk differs; must examine rows'); + return $self->{state} == 0 + && $self->{chunk_num} >= scalar(@{$self->{chunks}}) +} + +sub pending_changes { + my ( $self ) = @_; + if ( $self->{state} ) { + PTDEBUG && _d('There are pending changes'); + return 1; + } + else { + PTDEBUG && _d('No pending changes'); + return 0; + } +} + +sub key_cols { + my ( $self ) = @_; + my @cols; + if ( $self->{state} == 0 ) { + @cols = qw(chunk_num); + } + else { + @cols = $self->{chunk_col}; + } + PTDEBUG && _d('State', $self->{state},',', 'key cols', join(', ', @cols)); + return \@cols; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableSyncChunk package +# ########################################################################### + +# ########################################################################### +# TableSyncNibble package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableSyncNibble.pm +# t/lib/TableSyncNibble.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableSyncNibble; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(TableNibbler TableChunker TableParser Quoter) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub name { + return 'Nibble'; +} + +sub can_sync { + my ( $self, %args ) = @_; + foreach my $arg ( qw(tbl_struct) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + my $nibble_index = $self->{TableParser}->find_best_index($args{tbl_struct}); + if ( $nibble_index ) { + PTDEBUG && _d('Best nibble index:', Dumper($nibble_index)); + if ( !$args{tbl_struct}->{keys}->{$nibble_index}->{is_unique} ) { + PTDEBUG && _d('Best nibble index is not unique'); + return; + } + if ( $args{chunk_index} && $args{chunk_index} ne $nibble_index ) { + PTDEBUG && _d('Best nibble index is not requested index', + $args{chunk_index}); + return; + } + } + else { + PTDEBUG && _d('No best nibble index returned'); + return; + } + + my $small_table = 0; + if ( $args{src} && $args{src}->{dbh} ) { + my $dbh = $args{src}->{dbh}; + my $db = $args{src}->{db}; + my $tbl = $args{src}->{tbl}; + my $table_status; + eval { + my $sql = "SHOW TABLE STATUS FROM `$db` LIKE " + . $self->{Quoter}->literal_like($tbl); + PTDEBUG && _d($sql); + $table_status = $dbh->selectrow_hashref($sql); + }; + PTDEBUG && $EVAL_ERROR && _d($EVAL_ERROR); + if ( $table_status ) { + my $n_rows = defined $table_status->{Rows} ? $table_status->{Rows} + : defined $table_status->{rows} ? $table_status->{rows} + : undef; + $small_table = 1 if defined $n_rows && $n_rows <= 100; + } + } + PTDEBUG && _d('Small table:', $small_table); + + PTDEBUG && _d('Can nibble using index', $nibble_index); + return ( + 1, + chunk_index => $nibble_index, + key_cols => $args{tbl_struct}->{keys}->{$nibble_index}->{cols}, + small_table => $small_table, + ); +} + +sub prepare_to_sync { + my ( $self, %args ) = @_; + my @required_args = qw(dbh db tbl tbl_struct chunk_index key_cols chunk_size + crc_col ChangeHandler); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + $self->{dbh} = $args{dbh}; + $self->{tbl_struct} = $args{tbl_struct}; + $self->{crc_col} = $args{crc_col}; + $self->{index_hint} = $args{index_hint}; + $self->{key_cols} = $args{key_cols}; + ($self->{chunk_size}) = $self->{TableChunker}->size_to_rows(%args); + $self->{buffer_in_mysql} = $args{buffer_in_mysql}; + $self->{small_table} = $args{small_table}; + $self->{ChangeHandler} = $args{ChangeHandler}; + + $self->{ChangeHandler}->fetch_back($args{dbh}); + + my %seen; + my @ucols = grep { !$seen{$_}++ } @{$args{cols}}, @{$args{key_cols}}; + $args{cols} = \@ucols; + + $self->{sel_stmt} = $self->{TableNibbler}->generate_asc_stmt( + %args, + index => $args{chunk_index}, # expects an index arg, not chunk_index + asc_only => 1, + ); + + $self->{nibble} = 0; + $self->{cached_row} = undef; + $self->{cached_nibble} = undef; + $self->{cached_boundaries} = undef; + $self->{state} = 0; + + return; +} + +sub uses_checksum { + return 1; +} + +sub set_checksum_queries { + my ( $self, $nibble_sql, $row_sql ) = @_; + die "I need a nibble_sql argument" unless $nibble_sql; + die "I need a row_sql argument" unless $row_sql; + $self->{nibble_sql} = $nibble_sql; + $self->{row_sql} = $row_sql; + return; +} + +sub prepare_sync_cycle { + my ( $self, $host ) = @_; + my $sql = q{SET @crc := '', @cnt := 0}; + PTDEBUG && _d($sql); + $host->{dbh}->do($sql); + return; +} + +sub get_sql { + my ( $self, %args ) = @_; + if ( $self->{state} ) { + my $q = $self->{Quoter}; + return 'SELECT /*rows in nibble*/ ' + . ($self->{buffer_in_mysql} ? 'SQL_BUFFER_RESULT ' : '') + . $self->{row_sql} . " AS $self->{crc_col}" + . ' FROM ' . $q->quote(@args{qw(database table)}) + . ' ' . ($self->{index_hint} ? $self->{index_hint} : '') + . ' WHERE (' . $self->__get_boundaries(%args) . ')' + . ($args{where} ? " AND ($args{where})" : '') + . ' ORDER BY ' . join(', ', map {$q->quote($_) } @{$self->key_cols()}); + } + else { + my $where = $self->__get_boundaries(%args); + return $self->{TableChunker}->inject_chunks( + database => $args{database}, + table => $args{table}, + chunks => [ $where ], + chunk_num => 0, + query => $self->{nibble_sql}, + index_hint => $self->{index_hint}, + where => [ $args{where} ], + ); + } +} + +sub __get_boundaries { + my ( $self, %args ) = @_; + my $q = $self->{Quoter}; + my $s = $self->{sel_stmt}; + + my $lb; # Lower boundary part of WHERE + my $ub; # Upper boundary part of WHERE + my $row; # Next upper boundary row or cached_row + + if ( $self->{cached_boundaries} ) { + PTDEBUG && _d('Using cached boundaries'); + return $self->{cached_boundaries}; + } + + if ( $self->{cached_row} && $self->{cached_nibble} == $self->{nibble} ) { + PTDEBUG && _d('Using cached row for boundaries'); + $row = $self->{cached_row}; + } + else { + PTDEBUG && _d('Getting next upper boundary row'); + my $sql; + ($sql, $lb) = $self->__make_boundary_sql(%args); # $lb from outer scope! + + if ( $self->{nibble} == 0 && !$self->{small_table} ) { + my $explain_index = $self->__get_explain_index($sql); + if ( lc($explain_index || '') ne lc($s->{index}) ) { + die 'Cannot nibble table '.$q->quote($args{database}, $args{table}) + . " because MySQL chose " + . ($explain_index ? "the `$explain_index`" : 'no') . ' index' + . " instead of the `$s->{index}` index"; + } + } + + $row = $self->{dbh}->selectrow_hashref($sql); + PTDEBUG && _d($row ? 'Got a row' : "Didn't get a row"); + } + + if ( $row ) { + my $i = 0; + $ub = $s->{boundaries}->{'<='}; + $ub =~ s/\?/$q->quote_val($row->{$s->{scols}->[$i++]})/eg; + } + else { + PTDEBUG && _d('No upper boundary'); + $ub = '1=1'; + } + + my $where = $lb ? "($lb AND $ub)" : $ub; + + $self->{cached_row} = $row; + $self->{cached_nibble} = $self->{nibble}; + $self->{cached_boundaries} = $where; + + PTDEBUG && _d('WHERE clause:', $where); + return $where; +} + +sub __make_boundary_sql { + my ( $self, %args ) = @_; + my $lb; + my $q = $self->{Quoter}; + my $s = $self->{sel_stmt}; + my $sql = "SELECT /*nibble boundary $self->{nibble}*/ " + . join(',', map { $q->quote($_) } @{$s->{cols}}) + . " FROM " . $q->quote($args{database}, $args{table}) + . ' ' . ($self->{index_hint} || '') + . ($args{where} ? " WHERE ($args{where})" : ""); + + if ( $self->{nibble} ) { + my $tmp = $self->{cached_row}; + my $i = 0; + $lb = $s->{boundaries}->{'>'}; + $lb =~ s/\?/$q->quote_val($tmp->{$s->{scols}->[$i++]})/eg; + $sql .= $args{where} ? " AND $lb" : " WHERE $lb"; + } + $sql .= " ORDER BY " . join(',', map { $q->quote($_) } @{$self->{key_cols}}) + . ' LIMIT ' . ($self->{chunk_size} - 1) . ', 1'; + PTDEBUG && _d('Lower boundary:', $lb); + PTDEBUG && _d('Next boundary sql:', $sql); + return $sql, $lb; +} + +sub __get_explain_index { + my ( $self, $sql ) = @_; + return unless $sql; + my $explain; + eval { + $explain = $self->{dbh}->selectall_arrayref("EXPLAIN $sql",{Slice => {}}); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + return; + } + PTDEBUG && _d('EXPLAIN key:', $explain->[0]->{key}); + return $explain->[0]->{key}; +} + +sub same_row { + my ( $self, %args ) = @_; + my ($lr, $rr) = @args{qw(lr rr)}; + if ( $self->{state} ) { + if ( $lr->{$self->{crc_col}} ne $rr->{$self->{crc_col}} ) { + $self->{ChangeHandler}->change('UPDATE', $lr, $self->key_cols()); + } + } + elsif ( $lr->{cnt} != $rr->{cnt} || $lr->{crc} ne $rr->{crc} ) { + PTDEBUG && _d('Rows:', Dumper($lr, $rr)); + PTDEBUG && _d('Will examine this nibble before moving to next'); + $self->{state} = 1; # Must examine this nibble row-by-row + } +} + +sub not_in_right { + my ( $self, %args ) = @_; + die "Called not_in_right in state 0" unless $self->{state}; + $self->{ChangeHandler}->change('INSERT', $args{lr}, $self->key_cols()); +} + +sub not_in_left { + my ( $self, %args ) = @_; + die "Called not_in_left in state 0" unless $self->{state}; + $self->{ChangeHandler}->change('DELETE', $args{rr}, $self->key_cols()); +} + +sub done_with_rows { + my ( $self ) = @_; + if ( $self->{state} == 1 ) { + $self->{state} = 2; + PTDEBUG && _d('Setting state =', $self->{state}); + } + else { + $self->{state} = 0; + $self->{nibble}++; + delete $self->{cached_boundaries}; + PTDEBUG && _d('Setting state =', $self->{state}, + ', nibble =', $self->{nibble}); + } +} + +sub done { + my ( $self ) = @_; + PTDEBUG && _d('Done with nibble', $self->{nibble}); + PTDEBUG && $self->{state} && _d('Nibble differs; must examine rows'); + return $self->{state} == 0 && $self->{nibble} && !$self->{cached_row}; +} + +sub pending_changes { + my ( $self ) = @_; + if ( $self->{state} ) { + PTDEBUG && _d('There are pending changes'); + return 1; + } + else { + PTDEBUG && _d('No pending changes'); + return 0; + } +} + +sub key_cols { + my ( $self ) = @_; + my @cols; + if ( $self->{state} == 0 ) { + @cols = qw(chunk_num); + } + else { + @cols = @{$self->{key_cols}}; + } + PTDEBUG && _d('State', $self->{state},',', 'key cols', join(', ', @cols)); + return \@cols; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableSyncNibble package +# ########################################################################### + +# ########################################################################### +# TableSyncGroupBy package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableSyncGroupBy.pm +# t/lib/TableSyncGroupBy.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableSyncGroupBy; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(Quoter) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub name { + return 'GroupBy'; +} + +sub can_sync { + return 1; # We can sync anything. +} + +sub prepare_to_sync { + my ( $self, %args ) = @_; + my @required_args = qw(tbl_struct cols ChangeHandler); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + + $self->{cols} = $args{cols}; + $self->{buffer_in_mysql} = $args{buffer_in_mysql}; + $self->{ChangeHandler} = $args{ChangeHandler}; + + $self->{count_col} = '__maatkit_count'; + while ( $args{tbl_struct}->{is_col}->{$self->{count_col}} ) { + $self->{count_col} = "_$self->{count_col}"; + } + PTDEBUG && _d('COUNT column will be named', $self->{count_col}); + + $self->{done} = 0; + + return; +} + +sub uses_checksum { + return 0; # We don't need checksum queries. +} + +sub set_checksum_queries { + return; # This shouldn't be called, but just in case. +} + +sub prepare_sync_cycle { + my ( $self, $host ) = @_; + return; +} + +sub get_sql { + my ( $self, %args ) = @_; + my $cols = join(', ', map { $self->{Quoter}->quote($_) } @{$self->{cols}}); + return "SELECT" + . ($self->{buffer_in_mysql} ? ' SQL_BUFFER_RESULT' : '') + . " $cols, COUNT(*) AS $self->{count_col}" + . ' FROM ' . $self->{Quoter}->quote(@args{qw(database table)}) + . ' WHERE ' . ( $args{where} || '1=1' ) + . " GROUP BY $cols ORDER BY $cols"; +} + +sub same_row { + my ( $self, %args ) = @_; + my ($lr, $rr) = @args{qw(lr rr)}; + my $cc = $self->{count_col}; + my $lc = $lr->{$cc}; + my $rc = $rr->{$cc}; + my $diff = abs($lc - $rc); + return unless $diff; + $lr = { %$lr }; + delete $lr->{$cc}; + $rr = { %$rr }; + delete $rr->{$cc}; + foreach my $i ( 1 .. $diff ) { + if ( $lc > $rc ) { + $self->{ChangeHandler}->change('INSERT', $lr, $self->key_cols()); + } + else { + $self->{ChangeHandler}->change('DELETE', $rr, $self->key_cols()); + } + } +} + +sub not_in_right { + my ( $self, %args ) = @_; + my $lr = $args{lr}; + $lr = { %$lr }; + my $cnt = delete $lr->{$self->{count_col}}; + foreach my $i ( 1 .. $cnt ) { + $self->{ChangeHandler}->change('INSERT', $lr, $self->key_cols()); + } +} + +sub not_in_left { + my ( $self, %args ) = @_; + my $rr = $args{rr}; + $rr = { %$rr }; + my $cnt = delete $rr->{$self->{count_col}}; + foreach my $i ( 1 .. $cnt ) { + $self->{ChangeHandler}->change('DELETE', $rr, $self->key_cols()); + } +} + +sub done_with_rows { + my ( $self ) = @_; + $self->{done} = 1; +} + +sub done { + my ( $self ) = @_; + return $self->{done}; +} + +sub key_cols { + my ( $self ) = @_; + return $self->{cols}; +} + +sub pending_changes { + my ( $self ) = @_; + return; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableSyncGroupBy package +# ########################################################################### + +# ########################################################################### +# TableSyncer package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableSyncer.pm +# t/lib/TableSyncer.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableSyncer; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(MasterSlave Quoter TableChecksum Retry); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub get_best_plugin { + my ( $self, %args ) = @_; + foreach my $arg ( qw(plugins tbl_struct) ) { + die "I need a $arg argument" unless $args{$arg}; + } + PTDEBUG && _d('Getting best plugin'); + foreach my $plugin ( @{$args{plugins}} ) { + PTDEBUG && _d('Trying plugin', $plugin->name); + my ($can_sync, %plugin_args) = $plugin->can_sync(%args); + if ( $can_sync ) { + PTDEBUG && _d('Can sync with', $plugin->name, Dumper(\%plugin_args)); + return $plugin, %plugin_args; + } + } + PTDEBUG && _d('No plugin can sync the table'); + return; +} + +sub sync_table { + my ( $self, %args ) = @_; + my @required_args = qw(plugins src dst tbl_struct cols chunk_size + RowDiff ChangeHandler); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + PTDEBUG && _d('Syncing table with args:', + map { "$_: " . Dumper($args{$_}) } + qw(plugins src dst tbl_struct cols chunk_size)); + + my ($plugins, $src, $dst, $tbl_struct, $cols, $chunk_size, $rd, $ch) + = @args{@required_args}; + my $dp = $self->{DSNParser}; + $args{trace} = 1 unless defined $args{trace}; + + if ( $args{bidirectional} && $args{ChangeHandler}->{queue} ) { + die "Queueing does not work with bidirectional syncing"; + } + + $args{index_hint} = 1 unless defined $args{index_hint}; + $args{lock} ||= 0; + $args{wait} ||= 0; + $args{transaction} ||= 0; + $args{timeout_ok} ||= 0; + + my $q = $self->{Quoter}; + + my ($plugin, %plugin_args) = $self->get_best_plugin(%args); + die "No plugin can sync $src->{db}.$src->{tbl}" unless $plugin; + + my $crc_col = '__crc'; + while ( $tbl_struct->{is_col}->{$crc_col} ) { + $crc_col = "_$crc_col"; # Prepend more _ until not a column. + } + PTDEBUG && _d('CRC column:', $crc_col); + + my $index_hint; + if ( $args{chunk_index} ) { + PTDEBUG && _d('Using given chunk index for index hint'); + $index_hint = "FORCE INDEX (" . $q->quote($args{chunk_index}) . ")"; + } + elsif ( $plugin_args{chunk_index} && $args{index_hint} ) { + PTDEBUG && _d('Using chunk index chosen by plugin for index hint'); + $index_hint = "FORCE INDEX (" . $q->quote($plugin_args{chunk_index}) . ")"; + } + PTDEBUG && _d('Index hint:', $index_hint); + + eval { + $plugin->prepare_to_sync( + %args, + %plugin_args, + dbh => $src->{dbh}, + db => $src->{db}, + tbl => $src->{tbl}, + crc_col => $crc_col, + index_hint => $index_hint, + ); + }; + if ( $EVAL_ERROR ) { + die 'Failed to prepare TableSync', $plugin->name, ' plugin: ', + $EVAL_ERROR; + } + + if ( $plugin->uses_checksum() ) { + eval { + my ($chunk_sql, $row_sql) = $self->make_checksum_queries(%args); + $plugin->set_checksum_queries($chunk_sql, $row_sql); + }; + if ( $EVAL_ERROR ) { + die "Failed to make checksum queries: $EVAL_ERROR"; + } + } + + if ( $args{dry_run} ) { + return $ch->get_changes(), ALGORITHM => $plugin->name; + } + + + eval { + $src->{dbh}->do("USE `$src->{db}`"); + $dst->{dbh}->do("USE `$dst->{db}`"); + }; + if ( $EVAL_ERROR ) { + die "Failed to USE database on source or destination: $EVAL_ERROR"; + } + + PTDEBUG && _d('left dbh', $src->{dbh}); + PTDEBUG && _d('right dbh', $dst->{dbh}); + + chomp(my $hostname = `hostname`); + my $trace_msg + = $args{trace} ? "src_db:$src->{db} src_tbl:$src->{tbl} " + . ($dp && $src->{dsn} ? "src_dsn:".$dp->as_string($src->{dsn}) : "") + . " dst_db:$dst->{db} dst_tbl:$dst->{tbl} " + . ($dp && $dst->{dsn} ? "dst_dsn:".$dp->as_string($dst->{dsn}) : "") + . " " . join(" ", map { "$_:" . ($args{$_} || 0) } + qw(lock transaction changing_src replicate bidirectional)) + . " pid:$PID " + . ($ENV{USER} ? "user:$ENV{USER} " : "") + . ($hostname ? "host:$hostname" : "") + : ""; + PTDEBUG && _d("Binlog trace message:", $trace_msg); + + $self->lock_and_wait(%args, lock_level => 2); # per-table lock + + my $callback = $args{callback}; + my $cycle = 0; + while ( !$plugin->done() ) { + + PTDEBUG && _d('Beginning sync cycle', $cycle); + my $src_sql = $plugin->get_sql( + database => $src->{db}, + table => $src->{tbl}, + where => $args{where}, + ); + my $dst_sql = $plugin->get_sql( + database => $dst->{db}, + table => $dst->{tbl}, + where => $args{where}, + ); + + if ( $args{transaction} ) { + if ( $args{bidirectional} ) { + $src_sql .= ' FOR UPDATE'; + $dst_sql .= ' FOR UPDATE'; + } + elsif ( $args{changing_src} ) { + $src_sql .= ' FOR UPDATE'; + $dst_sql .= ' LOCK IN SHARE MODE'; + } + else { + $src_sql .= ' LOCK IN SHARE MODE'; + $dst_sql .= ' FOR UPDATE'; + } + } + PTDEBUG && _d('src:', $src_sql); + PTDEBUG && _d('dst:', $dst_sql); + + $callback->($src_sql, $dst_sql) if $callback; + + $plugin->prepare_sync_cycle($src); + $plugin->prepare_sync_cycle($dst); + + my $src_sth = $src->{dbh}->prepare($src_sql); + my $dst_sth = $dst->{dbh}->prepare($dst_sql); + if ( $args{buffer_to_client} ) { + $src_sth->{mysql_use_result} = 1; + $dst_sth->{mysql_use_result} = 1; + } + + my $executed_src = 0; + if ( !$cycle || !$plugin->pending_changes() ) { + $executed_src + = $self->lock_and_wait(%args, src_sth => $src_sth, lock_level => 1); + } + + $src_sth->execute() unless $executed_src; + $dst_sth->execute(); + + $rd->compare_sets( + left_sth => $src_sth, + right_sth => $dst_sth, + left_dbh => $src->{dbh}, + right_dbh => $dst->{dbh}, + syncer => $plugin, + tbl_struct => $tbl_struct, + ); + $ch->process_rows(1, $trace_msg); + + PTDEBUG && _d('Finished sync cycle', $cycle); + $cycle++; + } + + $ch->process_rows(0, $trace_msg); + + $self->unlock(%args, lock_level => 2); + + return $ch->get_changes(), ALGORITHM => $plugin->name; +} + +sub make_checksum_queries { + my ( $self, %args ) = @_; + my @required_args = qw(src dst tbl_struct); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($src, $dst, $tbl_struct) = @args{@required_args}; + my $checksum = $self->{TableChecksum}; + + my $src_algo = $checksum->best_algorithm( + algorithm => 'BIT_XOR', + dbh => $src->{dbh}, + where => 1, + chunk => 1, + count => 1, + ); + my $dst_algo = $checksum->best_algorithm( + algorithm => 'BIT_XOR', + dbh => $dst->{dbh}, + where => 1, + chunk => 1, + count => 1, + ); + if ( $src_algo ne $dst_algo ) { + die "Source and destination checksum algorithms are different: ", + "$src_algo on source, $dst_algo on destination" + } + PTDEBUG && _d('Chosen algo:', $src_algo); + + my $src_func = $checksum->choose_hash_func(dbh => $src->{dbh}, %args); + my $dst_func = $checksum->choose_hash_func(dbh => $dst->{dbh}, %args); + if ( $src_func ne $dst_func ) { + die "Source and destination hash functions are different: ", + "$src_func on source, $dst_func on destination"; + } + PTDEBUG && _d('Chosen hash func:', $src_func); + + + my $crc_wid = $checksum->get_crc_wid($src->{dbh}, $src_func); + my ($crc_type) = $checksum->get_crc_type($src->{dbh}, $src_func); + my $opt_slice; + if ( $src_algo eq 'BIT_XOR' && $crc_type !~ m/int$/ ) { + $opt_slice = $checksum->optimize_xor( + dbh => $src->{dbh}, + function => $src_func + ); + } + + my $chunk_sql = $checksum->make_checksum_query( + %args, + db => $src->{db}, + tbl => $src->{tbl}, + algorithm => $src_algo, + function => $src_func, + crc_wid => $crc_wid, + crc_type => $crc_type, + opt_slice => $opt_slice, + replicate => undef, # replicate means something different to this sub + ); # than what we use it for; do not pass it! + PTDEBUG && _d('Chunk sql:', $chunk_sql); + my $row_sql = $checksum->make_row_checksum( + %args, + function => $src_func, + ); + PTDEBUG && _d('Row sql:', $row_sql); + return $chunk_sql, $row_sql; +} + +sub lock_table { + my ( $self, $dbh, $where, $db_tbl, $mode ) = @_; + my $query = "LOCK TABLES $db_tbl $mode"; + PTDEBUG && _d($query); + $dbh->do($query); + PTDEBUG && _d('Acquired table lock on', $where, 'in', $mode, 'mode'); +} + +sub unlock { + my ( $self, %args ) = @_; + + foreach my $arg ( qw(src dst lock transaction lock_level) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $src = $args{src}; + my $dst = $args{dst}; + + return unless $args{lock} && $args{lock} <= $args{lock_level}; + + foreach my $dbh ( $src->{dbh}, $dst->{dbh} ) { + if ( $args{transaction} ) { + PTDEBUG && _d('Committing', $dbh); + $dbh->commit(); + } + else { + my $sql = 'UNLOCK TABLES'; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + } + + return; +} + +sub lock_and_wait { + my ( $self, %args ) = @_; + my $result = 0; + + foreach my $arg ( qw(src dst lock lock_level) ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my $src = $args{src}; + my $dst = $args{dst}; + + return unless $args{lock} && $args{lock} == $args{lock_level}; + PTDEBUG && _d('lock and wait, lock level', $args{lock}); + + foreach my $dbh ( $src->{dbh}, $dst->{dbh} ) { + if ( $args{transaction} ) { + PTDEBUG && _d('Committing', $dbh); + $dbh->commit(); + } + else { + my $sql = 'UNLOCK TABLES'; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + } + + if ( $args{lock} == 3 ) { + my $sql = 'FLUSH TABLES WITH READ LOCK'; + PTDEBUG && _d($src->{dbh}, $sql); + $src->{dbh}->do($sql); + } + else { + if ( $args{transaction} ) { + if ( $args{src_sth} ) { + PTDEBUG && _d('Executing statement on source to lock rows'); + + my $sql = "START TRANSACTION /*!40108 WITH CONSISTENT SNAPSHOT */"; + PTDEBUG && _d($src->{dbh}, $sql); + $src->{dbh}->do($sql); + + $args{src_sth}->execute(); + $result = 1; + } + } + else { + $self->lock_table($src->{dbh}, 'source', + $self->{Quoter}->quote($src->{db}, $src->{tbl}), + $args{changing_src} ? 'WRITE' : 'READ'); + } + } + + eval { + if ( my $timeout = $args{wait} ) { + my $ms = $self->{MasterSlave}; + my $tries = $args{wait_retry_args}->{tries} || 3; + my $wait; + my $sleep = $args{wait_retry_args}->{wait} || 10; + $self->{Retry}->retry( + tries => $tries, + wait => sub { sleep($sleep) }, + try => sub { + my ( %args ) = @_; + + if ( $args{tryno} > 1 ) { + warn "Retrying MASTER_POS_WAIT() for --wait $timeout..."; + } + + $wait = $ms->wait_for_master( + master_status => $ms->get_master_status($src->{misc_dbh}), + slave_dbh => $dst->{dbh}, + timeout => $timeout, + ); + if ($wait->{error}) { + die $result->{error}; + } + if ( defined $wait->{result} && $wait->{result} != -1 ) { + return; # slave caught up + } + die; # call fail + }, + fail => sub { + my (%args) = @_; + if ( !defined $wait->{result} ) { + my $msg; + if ( $wait->{waited} ) { + $msg = "The slave was stopped while waiting with " + . "MASTER_POS_WAIT()."; + } + else { + $msg = "MASTER_POS_WAIT() returned NULL. Verify that " + . "the slave is running."; + } + if ( $tries - $args{tryno} ) { + $msg .= " Sleeping $sleep seconds then retrying " + . ($tries - $args{tryno}) . " more times."; + } + warn "$msg\n"; + return 1; # call wait, call try + } + elsif ( $wait->{result} == -1 ) { + return 0; # call final_fail + } + }, + final_fail => sub { + die "Slave did not catch up to its master after $tries attempts " + . "of waiting $timeout seconds with MASTER_POS_WAIT. " + . "Check that the slave is running, increase the --wait " + . "time, or disable this feature by specifying --wait 0."; + }, + ); # retry MasterSlave::wait_for_master() + } + + if ( $args{changing_src} ) { + PTDEBUG && _d('Not locking destination because changing source ', + '(syncing via replication or sync-to-master)'); + } + else { + if ( $args{lock} == 3 ) { + my $sql = 'FLUSH TABLES WITH READ LOCK'; + PTDEBUG && _d($dst->{dbh}, ',', $sql); + $dst->{dbh}->do($sql); + } + elsif ( !$args{transaction} ) { + $self->lock_table($dst->{dbh}, 'dest', + $self->{Quoter}->quote($dst->{db}, $dst->{tbl}), + $args{execute} ? 'WRITE' : 'READ'); + } + } + }; + if ( $EVAL_ERROR ) { + if ( $args{src_sth}->{Active} ) { + $args{src_sth}->finish(); + } + foreach my $dbh ( $src->{dbh}, $dst->{dbh}, $src->{misc_dbh} ) { + next unless $dbh; + PTDEBUG && _d('Caught error, unlocking/committing on', $dbh); + $dbh->do('UNLOCK TABLES'); + $dbh->commit() unless $dbh->{AutoCommit}; + } + die $EVAL_ERROR; + } + + return $result; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableSyncer package +# ########################################################################### + +# ########################################################################### +# TableNibbler package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/TableNibbler.pm +# t/lib/TableNibbler.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package TableNibbler; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(TableParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $self = { %args }; + return bless $self, $class; +} + +sub generate_asc_stmt { + my ( $self, %args ) = @_; + my @required_args = qw(tbl_struct index); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless defined $args{$arg}; + } + my ($tbl_struct, $index) = @args{@required_args}; + my @cols = $args{cols} ? @{$args{cols}} : @{$tbl_struct->{cols}}; + my $q = $self->{Quoter}; + + die "Index '$index' does not exist in table" + unless exists $tbl_struct->{keys}->{$index}; + PTDEBUG && _d('Will ascend index', $index); + + my @asc_cols = @{$tbl_struct->{keys}->{$index}->{cols}}; + if ( $args{asc_first} ) { + PTDEBUG && _d('Ascending only first column'); + @asc_cols = $asc_cols[0]; + } + elsif ( my $n = $args{n_index_cols} ) { + $n = scalar @asc_cols if $n > @asc_cols; + PTDEBUG && _d('Ascending only first', $n, 'columns'); + @asc_cols = @asc_cols[0..($n-1)]; + } + PTDEBUG && _d('Will ascend columns', join(', ', @asc_cols)); + + my @asc_slice; + my %col_posn = do { my $i = 0; map { $_ => $i++ } @cols }; + foreach my $col ( @asc_cols ) { + if ( !exists $col_posn{$col} ) { + push @cols, $col; + $col_posn{$col} = $#cols; + } + push @asc_slice, $col_posn{$col}; + } + PTDEBUG && _d('Will ascend, in ordinal position:', join(', ', @asc_slice)); + + my $asc_stmt = { + cols => \@cols, + index => $index, + where => '', + slice => [], + scols => [], + }; + + if ( @asc_slice ) { + my $cmp_where; + foreach my $cmp ( qw(< <= >= >) ) { + $cmp_where = $self->generate_cmp_where( + type => $cmp, + slice => \@asc_slice, + cols => \@cols, + quoter => $q, + is_nullable => $tbl_struct->{is_nullable}, + type_for => $tbl_struct->{type_for}, + ); + $asc_stmt->{boundaries}->{$cmp} = $cmp_where->{where}; + } + my $cmp = $args{asc_only} ? '>' : '>='; + $asc_stmt->{where} = $asc_stmt->{boundaries}->{$cmp}; + $asc_stmt->{slice} = $cmp_where->{slice}; + $asc_stmt->{scols} = $cmp_where->{scols}; + } + + return $asc_stmt; +} + +sub generate_cmp_where { + my ( $self, %args ) = @_; + foreach my $arg ( qw(type slice cols is_nullable) ) { + die "I need a $arg arg" unless defined $args{$arg}; + } + my @slice = @{$args{slice}}; + my @cols = @{$args{cols}}; + my $is_nullable = $args{is_nullable}; + my $type_for = $args{type_for}; + my $type = $args{type}; + my $q = $self->{Quoter}; + + (my $cmp = $type) =~ s/=//; + + my @r_slice; # Resulting slice columns, by ordinal + my @r_scols; # Ditto, by name + + my @clauses; + foreach my $i ( 0 .. $#slice ) { + my @clause; + + foreach my $j ( 0 .. $i - 1 ) { + my $ord = $slice[$j]; + my $col = $cols[$ord]; + my $quo = $q->quote($col); + my $val = ($col && ($type_for->{$col} || '')) eq 'enum' ? "CAST(? AS UNSIGNED)" : "?"; + if ( $is_nullable->{$col} ) { + push @clause, "(($val IS NULL AND $quo IS NULL) OR ($quo = $val))"; + push @r_slice, $ord, $ord; + push @r_scols, $col, $col; + } + else { + push @clause, "$quo = $val"; + push @r_slice, $ord; + push @r_scols, $col; + } + } + + my $ord = $slice[$i]; + my $col = $cols[$ord]; + my $quo = $q->quote($col); + my $end = $i == $#slice; # Last clause of the whole group. + my $val = ($col && ($type_for->{$col} || '')) eq 'enum' ? "CAST(? AS UNSIGNED)" : "?"; + if ( $is_nullable->{$col} ) { + if ( $type =~ m/=/ && $end ) { + push @clause, "($val IS NULL OR $quo $type $val)"; + } + elsif ( $type =~ m/>/ ) { + push @clause, "($val IS NULL AND $quo IS NOT NULL) OR ($quo $cmp $val)"; + } + else { # If $type =~ m/ \@r_slice, + scols => \@r_scols, + where => $result, + }; + return $where; +} + +sub generate_del_stmt { + my ( $self, %args ) = @_; + + my $tbl = $args{tbl_struct}; + my @cols = $args{cols} ? @{$args{cols}} : (); + my $tp = $self->{TableParser}; + my $q = $self->{Quoter}; + + my @del_cols; + my @del_slice; + + my $index = $tp->find_best_index($tbl, $args{index}); + die "Cannot find an ascendable index in table" unless $index; + + if ( $index && $tbl->{keys}->{$index}->{is_unique}) { + @del_cols = @{$tbl->{keys}->{$index}->{cols}}; + } + else { + @del_cols = @{$tbl->{cols}}; + } + PTDEBUG && _d('Columns needed for DELETE:', join(', ', @del_cols)); + + my %col_posn = do { my $i = 0; map { $_ => $i++ } @cols }; + foreach my $col ( @del_cols ) { + if ( !exists $col_posn{$col} ) { + push @cols, $col; + $col_posn{$col} = $#cols; + } + push @del_slice, $col_posn{$col}; + } + PTDEBUG && _d('Ordinals needed for DELETE:', join(', ', @del_slice)); + + my $del_stmt = { + cols => \@cols, + index => $index, + where => '', + slice => [], + scols => [], + }; + + my @clauses; + foreach my $i ( 0 .. $#del_slice ) { + my $ord = $del_slice[$i]; + my $col = $cols[$ord]; + my $quo = $q->quote($col); + if ( $tbl->{is_nullable}->{$col} ) { + push @clauses, "((? IS NULL AND $quo IS NULL) OR ($quo = ?))"; + push @{$del_stmt->{slice}}, $ord, $ord; + push @{$del_stmt->{scols}}, $col, $col; + } + else { + push @clauses, "$quo = ?"; + push @{$del_stmt->{slice}}, $ord; + push @{$del_stmt->{scols}}, $col; + } + } + + $del_stmt->{where} = '(' . join(' AND ', @clauses) . ')'; + + return $del_stmt; +} + +sub generate_ins_stmt { + my ( $self, %args ) = @_; + foreach my $arg ( qw(ins_tbl sel_cols) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $ins_tbl = $args{ins_tbl}; + my @sel_cols = @{$args{sel_cols}}; + + die "You didn't specify any SELECT columns" unless @sel_cols; + + my @ins_cols; + my @ins_slice; + for my $i ( 0..$#sel_cols ) { + next unless $ins_tbl->{is_col}->{$sel_cols[$i]}; + push @ins_cols, $sel_cols[$i]; + push @ins_slice, $i; + } + + return { + cols => \@ins_cols, + slice => \@ins_slice, + }; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End TableNibbler package +# ########################################################################### + +# ########################################################################### +# MasterSlave package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/MasterSlave.pm +# t/lib/MasterSlave.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package MasterSlave; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +sub check_recursion_method { + my ($methods) = @_; + if ( @$methods != 1 ) { + if ( grep({ !m/processlist|hosts/i } @$methods) + && $methods->[0] !~ /^dsn=/i ) + { + die "Invalid combination of recursion methods: " + . join(", ", map { defined($_) ? $_ : 'undef' } @$methods) . ". " + . "Only hosts and processlist may be combined.\n" + } + } + else { + my ($method) = @$methods; + die "Invalid recursion method: " . ( $method || 'undef' ) + unless $method && $method =~ m/^(?:processlist$|hosts$|none$|cluster$|dsn=)/i; + } +} + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(OptionParser DSNParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $self = { + %args, + replication_thread => {}, + }; + return bless $self, $class; +} + +sub get_slaves { + my ($self, %args) = @_; + my @required_args = qw(make_cxn); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($make_cxn) = @args{@required_args}; + + my $slaves = []; + my $dp = $self->{DSNParser}; + my $methods = $self->_resolve_recursion_methods($args{dsn}); + + return $slaves unless @$methods; + + if ( grep { m/processlist|hosts/i } @$methods ) { + my @required_args = qw(dbh dsn); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh, $dsn) = @args{@required_args}; + my $o = $self->{OptionParser}; + + $self->recurse_to_slaves( + { dbh => $dbh, + dsn => $dsn, + slave_user => $o->got('slave-user') ? $o->get('slave-user') : '', + slave_password => $o->got('slave-password') ? $o->get('slave-password') : '', + callback => sub { + my ( $dsn, $dbh, $level, $parent ) = @_; + return unless $level; + PTDEBUG && _d('Found slave:', $dp->as_string($dsn)); + my $slave_dsn = $dsn; + if ($o->got('slave-user')) { + $slave_dsn->{u} = $o->get('slave-user'); + PTDEBUG && _d("Using slave user ".$o->get('slave-user')." on ".$slave_dsn->{h}.":".$slave_dsn->{P}); + } + if ($o->got('slave-password')) { + $slave_dsn->{p} = $o->get('slave-password'); + PTDEBUG && _d("Slave password set"); + } + push @$slaves, $make_cxn->(dsn => $slave_dsn, dbh => $dbh); + return; + }, + } + ); + } elsif ( $methods->[0] =~ m/^dsn=/i ) { + (my $dsn_table_dsn = join ",", @$methods) =~ s/^dsn=//i; + $slaves = $self->get_cxn_from_dsn_table( + %args, + dsn_table_dsn => $dsn_table_dsn, + ); + } + elsif ( $methods->[0] =~ m/none/i ) { + PTDEBUG && _d('Not getting to slaves'); + } + else { + die "Unexpected recursion methods: @$methods"; + } + + return $slaves; +} + +sub _resolve_recursion_methods { + my ($self, $dsn) = @_; + my $o = $self->{OptionParser}; + if ( $o->got('recursion-method') ) { + return $o->get('recursion-method'); + } + elsif ( $dsn && ($dsn->{P} || 3306) != 3306 ) { + PTDEBUG && _d('Port number is non-standard; using only hosts method'); + return [qw(hosts)]; + } + else { + return $o->get('recursion-method'); + } +} + +sub recurse_to_slaves { + my ( $self, $args, $level ) = @_; + $level ||= 0; + my $dp = $self->{DSNParser}; + my $recurse = $args->{recurse} || $self->{OptionParser}->get('recurse'); + my $dsn = $args->{dsn}; + my $slave_user = $args->{slave_user} || ''; + my $slave_password = $args->{slave_password} || ''; + + my $methods = $self->_resolve_recursion_methods($dsn); + PTDEBUG && _d('Recursion methods:', @$methods); + if ( lc($methods->[0]) eq 'none' ) { + PTDEBUG && _d('Not recursing to slaves'); + return; + } + + my $slave_dsn = $dsn; + if ($slave_user) { + $slave_dsn->{u} = $slave_user; + PTDEBUG && _d("Using slave user $slave_user on ".$slave_dsn->{h}.":".$slave_dsn->{P}); + } + if ($slave_password) { + $slave_dsn->{p} = $slave_password; + PTDEBUG && _d("Slave password set"); + } + + my $dbh; + eval { + $dbh = $args->{dbh} || $dp->get_dbh( + $dp->get_cxn_params($slave_dsn), { AutoCommit => 1 }); + PTDEBUG && _d('Connected to', $dp->as_string($slave_dsn)); + }; + if ( $EVAL_ERROR ) { + print STDERR "Cannot connect to ", $dp->as_string($slave_dsn), "\n" + or die "Cannot print: $OS_ERROR"; + return; + } + + my $sql = 'SELECT @@SERVER_ID'; + PTDEBUG && _d($sql); + my ($id) = $dbh->selectrow_array($sql); + PTDEBUG && _d('Working on server ID', $id); + my $master_thinks_i_am = $dsn->{server_id}; + if ( !defined $id + || ( defined $master_thinks_i_am && $master_thinks_i_am != $id ) + || $args->{server_ids_seen}->{$id}++ + ) { + PTDEBUG && _d('Server ID seen, or not what master said'); + if ( $args->{skip_callback} ) { + $args->{skip_callback}->($dsn, $dbh, $level, $args->{parent}); + } + return; + } + + $args->{callback}->($dsn, $dbh, $level, $args->{parent}); + + if ( !defined $recurse || $level < $recurse ) { + + my @slaves = + grep { !$_->{master_id} || $_->{master_id} == $id } # Only my slaves. + $self->find_slave_hosts($dp, $dbh, $dsn, $methods); + + foreach my $slave ( @slaves ) { + PTDEBUG && _d('Recursing from', + $dp->as_string($dsn), 'to', $dp->as_string($slave)); + $self->recurse_to_slaves( + { %$args, dsn => $slave, dbh => undef, parent => $dsn, slave_user => $slave_user, $slave_password => $slave_password }, $level + 1 ); + } + } +} + +sub find_slave_hosts { + my ( $self, $dsn_parser, $dbh, $dsn, $methods ) = @_; + + PTDEBUG && _d('Looking for slaves on', $dsn_parser->as_string($dsn), + 'using methods', @$methods); + + my @slaves; + METHOD: + foreach my $method ( @$methods ) { + my $find_slaves = "_find_slaves_by_$method"; + PTDEBUG && _d('Finding slaves with', $find_slaves); + @slaves = $self->$find_slaves($dsn_parser, $dbh, $dsn); + last METHOD if @slaves; + } + + PTDEBUG && _d('Found', scalar(@slaves), 'slaves'); + return @slaves; +} + +sub _find_slaves_by_processlist { + my ( $self, $dsn_parser, $dbh, $dsn ) = @_; + my @connected_slaves = $self->get_connected_slaves($dbh); + my @slaves = $self->_process_slaves_list($dsn_parser, $dsn, \@connected_slaves); + return @slaves; +} + +sub _process_slaves_list { + my ($self, $dsn_parser, $dsn, $connected_slaves) = @_; + my @slaves = map { + my $slave = $dsn_parser->parse("h=$_", $dsn); + $slave->{source} = 'processlist'; + $slave; + } + grep { $_ } + map { + my ( $host ) = $_->{host} =~ m/^(.*):\d+$/; + if ( $host eq 'localhost' ) { + $host = '127.0.0.1'; # Replication never uses sockets. + } + if ($host =~ m/::/) { + $host = '['.$host.']'; + } + $host; + } @$connected_slaves; + + return @slaves; +} + +sub _find_slaves_by_hosts { + my ( $self, $dsn_parser, $dbh, $dsn ) = @_; + + my @slaves; + my $sql = 'SHOW SLAVE HOSTS'; + PTDEBUG && _d($dbh, $sql); + @slaves = @{$dbh->selectall_arrayref($sql, { Slice => {} })}; + + if ( @slaves ) { + PTDEBUG && _d('Found some SHOW SLAVE HOSTS info'); + @slaves = map { + my %hash; + @hash{ map { lc $_ } keys %$_ } = values %$_; + my $spec = "h=$hash{host},P=$hash{port}" + . ( $hash{user} ? ",u=$hash{user}" : '') + . ( $hash{password} ? ",p=$hash{password}" : ''); + my $dsn = $dsn_parser->parse($spec, $dsn); + $dsn->{server_id} = $hash{server_id}; + $dsn->{master_id} = $hash{master_id}; + $dsn->{source} = 'hosts'; + $dsn; + } @slaves; + } + + return @slaves; +} + +sub get_connected_slaves { + my ( $self, $dbh ) = @_; + + my $show = "SHOW GRANTS FOR "; + my $user = 'CURRENT_USER()'; + my $sql = $show . $user; + PTDEBUG && _d($dbh, $sql); + + my $proc; + eval { + $proc = grep { + m/ALL PRIVILEGES.*?\*\.\*|PROCESS/ + } @{$dbh->selectcol_arrayref($sql)}; + }; + if ( $EVAL_ERROR ) { + + if ( $EVAL_ERROR =~ m/no such grant defined for user/ ) { + PTDEBUG && _d('Retrying SHOW GRANTS without host; error:', + $EVAL_ERROR); + ($user) = split('@', $user); + $sql = $show . $user; + PTDEBUG && _d($sql); + eval { + $proc = grep { + m/ALL PRIVILEGES.*?\*\.\*|PROCESS/ + } @{$dbh->selectcol_arrayref($sql)}; + }; + } + + die "Failed to $sql: $EVAL_ERROR" if $EVAL_ERROR; + } + if ( !$proc ) { + die "You do not have the PROCESS privilege"; + } + + $sql = 'SHOW FULL PROCESSLIST'; + PTDEBUG && _d($dbh, $sql); + grep { $_->{command} =~ m/Binlog Dump/i } + map { # Lowercase the column names + my %hash; + @hash{ map { lc $_ } keys %$_ } = values %$_; + \%hash; + } + @{$dbh->selectall_arrayref($sql, { Slice => {} })}; +} + +sub is_master_of { + my ( $self, $master, $slave ) = @_; + my $master_status = $self->get_master_status($master) + or die "The server specified as a master is not a master"; + my $slave_status = $self->get_slave_status($slave) + or die "The server specified as a slave is not a slave"; + my @connected = $self->get_connected_slaves($master) + or die "The server specified as a master has no connected slaves"; + my (undef, $port) = $master->selectrow_array("SHOW VARIABLES LIKE 'port'"); + + if ( $port != $slave_status->{master_port} ) { + die "The slave is connected to $slave_status->{master_port} " + . "but the master's port is $port"; + } + + if ( !grep { $slave_status->{master_user} eq $_->{user} } @connected ) { + die "I don't see any slave I/O thread connected with user " + . $slave_status->{master_user}; + } + + if ( ($slave_status->{slave_io_state} || '') + eq 'Waiting for master to send event' ) + { + my ( $master_log_name, $master_log_num ) + = $master_status->{file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/; + my ( $slave_log_name, $slave_log_num ) + = $slave_status->{master_log_file} =~ m/^(.*?)\.0*([1-9][0-9]*)$/; + if ( $master_log_name ne $slave_log_name + || abs($master_log_num - $slave_log_num) > 1 ) + { + die "The slave thinks it is reading from " + . "$slave_status->{master_log_file}, but the " + . "master is writing to $master_status->{file}"; + } + } + return 1; +} + +sub get_master_dsn { + my ( $self, $dbh, $dsn, $dsn_parser ) = @_; + my $master = $self->get_slave_status($dbh) or return undef; + my $spec = "h=$master->{master_host},P=$master->{master_port}"; + return $dsn_parser->parse($spec, $dsn); +} + +sub get_slave_status { + my ( $self, $dbh ) = @_; + + if ( !$self->{not_a_slave}->{$dbh} ) { + my $sth = $self->{sths}->{$dbh}->{SLAVE_STATUS} + ||= $dbh->prepare('SHOW SLAVE STATUS'); + PTDEBUG && _d($dbh, 'SHOW SLAVE STATUS'); + $sth->execute(); + my ($sss_rows) = $sth->fetchall_arrayref({}); # Show Slave Status rows + + my $ss; + if ( $sss_rows && @$sss_rows ) { + if (scalar @$sss_rows > 1) { + if (!$self->{channel}) { + die 'This server returned more than one row for SHOW SLAVE STATUS but "channel" was not specified on the command line'; + } + my $slave_use_channels; + for my $row (@$sss_rows) { + $row = { map { lc($_) => $row->{$_} } keys %$row }; # lowercase the keys + if ($row->{channel_name}) { + $slave_use_channels = 1; + } + if ($row->{channel_name} eq $self->{channel}) { + $ss = $row; + last; + } + } + if (!$ss && $slave_use_channels) { + die 'This server is using replication channels but "channel" was not specified on the command line'; + } + } else { + if ($sss_rows->[0]->{channel_name} && $sss_rows->[0]->{channel_name} ne $self->{channel}) { + die 'This server is using replication channels but "channel" was not specified on the command line'; + } else { + $ss = $sss_rows->[0]; + } + } + + if ( $ss && %$ss ) { + $ss = { map { lc($_) => $ss->{$_} } keys %$ss }; # lowercase the keys + return $ss; + } + if (!$ss && $self->{channel}) { + die "Specified channel name is invalid"; + } + } + + PTDEBUG && _d('This server returns nothing for SHOW SLAVE STATUS'); + $self->{not_a_slave}->{$dbh}++; + } +} + +sub get_master_status { + my ( $self, $dbh ) = @_; + + if ( $self->{not_a_master}->{$dbh} ) { + PTDEBUG && _d('Server on dbh', $dbh, 'is not a master'); + return; + } + + my $sth = $self->{sths}->{$dbh}->{MASTER_STATUS} + ||= $dbh->prepare('SHOW MASTER STATUS'); + PTDEBUG && _d($dbh, 'SHOW MASTER STATUS'); + $sth->execute(); + my ($ms) = @{$sth->fetchall_arrayref({})}; + PTDEBUG && _d( + $ms ? map { "$_=" . (defined $ms->{$_} ? $ms->{$_} : '') } keys %$ms + : ''); + + if ( !$ms || scalar keys %$ms < 2 ) { + PTDEBUG && _d('Server on dbh', $dbh, 'does not seem to be a master'); + $self->{not_a_master}->{$dbh}++; + } + + return { map { lc($_) => $ms->{$_} } keys %$ms }; # lowercase the keys +} + +sub wait_for_master { + my ( $self, %args ) = @_; + my @required_args = qw(master_status slave_dbh); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($master_status, $slave_dbh) = @args{@required_args}; + my $timeout = $args{timeout} || 60; + + my $result; + my $waited; + if ( $master_status ) { + my $slave_status; + eval { + $slave_status = $self->get_slave_status($slave_dbh); + }; + if ($EVAL_ERROR) { + return { + result => undef, + waited => 0, + error =>'Wait for master: this is a multi-master slave but "channel" was not specified on the command line', + }; + } + my $server_version = VersionParser->new($slave_dbh); + my $channel_sql = $server_version > '5.6' && $self->{channel} ? ", '$self->{channel}'" : ''; + my $sql = "SELECT MASTER_POS_WAIT('$master_status->{file}', $master_status->{position}, $timeout $channel_sql)"; + PTDEBUG && _d($slave_dbh, $sql); + my $start = time; + ($result) = $slave_dbh->selectrow_array($sql); + + $waited = time - $start; + + PTDEBUG && _d('Result of waiting:', $result); + PTDEBUG && _d("Waited", $waited, "seconds"); + } + else { + PTDEBUG && _d('Not waiting: this server is not a master'); + } + + return { + result => $result, + waited => $waited, + }; +} + +sub stop_slave { + my ( $self, $dbh ) = @_; + my $sth = $self->{sths}->{$dbh}->{STOP_SLAVE} + ||= $dbh->prepare('STOP SLAVE'); + PTDEBUG && _d($dbh, $sth->{Statement}); + $sth->execute(); +} + +sub start_slave { + my ( $self, $dbh, $pos ) = @_; + if ( $pos ) { + my $sql = "START SLAVE UNTIL MASTER_LOG_FILE='$pos->{file}', " + . "MASTER_LOG_POS=$pos->{position}"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + else { + my $sth = $self->{sths}->{$dbh}->{START_SLAVE} + ||= $dbh->prepare('START SLAVE'); + PTDEBUG && _d($dbh, $sth->{Statement}); + $sth->execute(); + } +} + +sub catchup_to_master { + my ( $self, $slave, $master, $timeout ) = @_; + $self->stop_slave($master); + $self->stop_slave($slave); + my $slave_status = $self->get_slave_status($slave); + my $slave_pos = $self->repl_posn($slave_status); + my $master_status = $self->get_master_status($master); + my $master_pos = $self->repl_posn($master_status); + PTDEBUG && _d('Master position:', $self->pos_to_string($master_pos), + 'Slave position:', $self->pos_to_string($slave_pos)); + + my $result; + if ( $self->pos_cmp($slave_pos, $master_pos) < 0 ) { + PTDEBUG && _d('Waiting for slave to catch up to master'); + $self->start_slave($slave, $master_pos); + + $result = $self->wait_for_master( + master_status => $master_status, + slave_dbh => $slave, + timeout => $timeout, + master_status => $master_status + ); + if ($result->{error}) { + die $result->{error}; + } + if ( !defined $result->{result} ) { + $slave_status = $self->get_slave_status($slave); + if ( !$self->slave_is_running($slave_status) ) { + PTDEBUG && _d('Master position:', + $self->pos_to_string($master_pos), + 'Slave position:', $self->pos_to_string($slave_pos)); + $slave_pos = $self->repl_posn($slave_status); + if ( $self->pos_cmp($slave_pos, $master_pos) != 0 ) { + die "MASTER_POS_WAIT() returned NULL but slave has not " + . "caught up to master"; + } + PTDEBUG && _d('Slave is caught up to master and stopped'); + } + else { + die "Slave has not caught up to master and it is still running"; + } + } + } + else { + PTDEBUG && _d("Slave is already caught up to master"); + } + + return $result; +} + +sub catchup_to_same_pos { + my ( $self, $s1_dbh, $s2_dbh ) = @_; + $self->stop_slave($s1_dbh); + $self->stop_slave($s2_dbh); + my $s1_status = $self->get_slave_status($s1_dbh); + my $s2_status = $self->get_slave_status($s2_dbh); + my $s1_pos = $self->repl_posn($s1_status); + my $s2_pos = $self->repl_posn($s2_status); + if ( $self->pos_cmp($s1_pos, $s2_pos) < 0 ) { + $self->start_slave($s1_dbh, $s2_pos); + } + elsif ( $self->pos_cmp($s2_pos, $s1_pos) < 0 ) { + $self->start_slave($s2_dbh, $s1_pos); + } + + $s1_status = $self->get_slave_status($s1_dbh); + $s2_status = $self->get_slave_status($s2_dbh); + $s1_pos = $self->repl_posn($s1_status); + $s2_pos = $self->repl_posn($s2_status); + + if ( $self->slave_is_running($s1_status) + || $self->slave_is_running($s2_status) + || $self->pos_cmp($s1_pos, $s2_pos) != 0) + { + die "The servers aren't both stopped at the same position"; + } + +} + +sub slave_is_running { + my ( $self, $slave_status ) = @_; + return ($slave_status->{slave_sql_running} || 'No') eq 'Yes'; +} + +sub has_slave_updates { + my ( $self, $dbh ) = @_; + my $sql = q{SHOW VARIABLES LIKE 'log_slave_updates'}; + PTDEBUG && _d($dbh, $sql); + my ($name, $value) = $dbh->selectrow_array($sql); + return $value && $value =~ m/^(1|ON)$/; +} + +sub repl_posn { + my ( $self, $status ) = @_; + if ( exists $status->{file} && exists $status->{position} ) { + return { + file => $status->{file}, + position => $status->{position}, + }; + } + else { + return { + file => $status->{relay_master_log_file}, + position => $status->{exec_master_log_pos}, + }; + } +} + +sub get_slave_lag { + my ( $self, $dbh ) = @_; + my $stat = $self->get_slave_status($dbh); + return unless $stat; # server is not a slave + return $stat->{seconds_behind_master}; +} + +sub pos_cmp { + my ( $self, $a, $b ) = @_; + return $self->pos_to_string($a) cmp $self->pos_to_string($b); +} + +sub short_host { + my ( $self, $dsn ) = @_; + my ($host, $port); + if ( $dsn->{master_host} ) { + $host = $dsn->{master_host}; + $port = $dsn->{master_port}; + } + else { + $host = $dsn->{h}; + $port = $dsn->{P}; + } + return ($host || '[default]') . ( ($port || 3306) == 3306 ? '' : ":$port" ); +} + +sub is_replication_thread { + my ( $self, $query, %args ) = @_; + return unless $query; + + my $type = lc($args{type} || 'all'); + die "Invalid type: $type" + unless $type =~ m/^binlog_dump|slave_io|slave_sql|all$/i; + + my $match = 0; + if ( $type =~ m/binlog_dump|all/i ) { + $match = 1 + if ($query->{Command} || $query->{command} || '') eq "Binlog Dump"; + } + if ( !$match ) { + if ( ($query->{User} || $query->{user} || '') eq "system user" ) { + PTDEBUG && _d("Slave replication thread"); + if ( $type ne 'all' ) { + my $state = $query->{State} || $query->{state} || ''; + + if ( $state =~ m/^init|end$/ ) { + PTDEBUG && _d("Special state:", $state); + $match = 1; + } + else { + my ($slave_sql) = $state =~ m/ + ^(Waiting\sfor\sthe\snext\sevent + |Reading\sevent\sfrom\sthe\srelay\slog + |Has\sread\sall\srelay\slog;\swaiting + |Making\stemp\sfile + |Waiting\sfor\sslave\smutex\son\sexit)/xi; + + $match = $type eq 'slave_sql' && $slave_sql ? 1 + : $type eq 'slave_io' && !$slave_sql ? 1 + : 0; + } + } + else { + $match = 1; + } + } + else { + PTDEBUG && _d('Not system user'); + } + + if ( !defined $args{check_known_ids} || $args{check_known_ids} ) { + my $id = $query->{Id} || $query->{id}; + if ( $match ) { + $self->{replication_thread}->{$id} = 1; + } + else { + if ( $self->{replication_thread}->{$id} ) { + PTDEBUG && _d("Thread ID is a known replication thread ID"); + $match = 1; + } + } + } + } + + PTDEBUG && _d('Matches', $type, 'replication thread:', + ($match ? 'yes' : 'no'), '; match:', $match); + + return $match; +} + + +sub get_replication_filters { + my ( $self, %args ) = @_; + my @required_args = qw(dbh); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dbh) = @args{@required_args}; + + my %filters = (); + + my $status = $self->get_master_status($dbh); + if ( $status ) { + map { $filters{$_} = $status->{$_} } + grep { defined $status->{$_} && $status->{$_} ne '' } + qw( + binlog_do_db + binlog_ignore_db + ); + } + + $status = $self->get_slave_status($dbh); + if ( $status ) { + map { $filters{$_} = $status->{$_} } + grep { defined $status->{$_} && $status->{$_} ne '' } + qw( + replicate_do_db + replicate_ignore_db + replicate_do_table + replicate_ignore_table + replicate_wild_do_table + replicate_wild_ignore_table + ); + + my $sql = "SHOW VARIABLES LIKE 'slave_skip_errors'"; + PTDEBUG && _d($dbh, $sql); + my $row = $dbh->selectrow_arrayref($sql); + $filters{slave_skip_errors} = $row->[1] if $row->[1] && $row->[1] ne 'OFF'; + } + + return \%filters; +} + + +sub pos_to_string { + my ( $self, $pos ) = @_; + my $fmt = '%s/%020d'; + return sprintf($fmt, @{$pos}{qw(file position)}); +} + +sub reset_known_replication_threads { + my ( $self ) = @_; + $self->{replication_thread} = {}; + return; +} + +sub get_cxn_from_dsn_table { + my ($self, %args) = @_; + my @required_args = qw(dsn_table_dsn make_cxn); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($dsn_table_dsn, $make_cxn) = @args{@required_args}; + PTDEBUG && _d('DSN table DSN:', $dsn_table_dsn); + + my $dp = $self->{DSNParser}; + my $q = $self->{Quoter}; + + my $dsn = $dp->parse($dsn_table_dsn); + my $dsn_table; + if ( $dsn->{D} && $dsn->{t} ) { + $dsn_table = $q->quote($dsn->{D}, $dsn->{t}); + } + elsif ( $dsn->{t} && $dsn->{t} =~ m/\./ ) { + $dsn_table = $q->quote($q->split_unquote($dsn->{t})); + } + else { + die "DSN table DSN does not specify a database (D) " + . "or a database-qualified table (t)"; + } + + my $dsn_tbl_cxn = $make_cxn->(dsn => $dsn); + my $dbh = $dsn_tbl_cxn->connect(); + my $sql = "SELECT dsn FROM $dsn_table ORDER BY id"; + PTDEBUG && _d($sql); + my $dsn_strings = $dbh->selectcol_arrayref($sql); + my @cxn; + if ( $dsn_strings ) { + foreach my $dsn_string ( @$dsn_strings ) { + PTDEBUG && _d('DSN from DSN table:', $dsn_string); + push @cxn, $make_cxn->(dsn_string => $dsn_string); + } + } + return \@cxn; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End MasterSlave package +# ########################################################################### + +# ########################################################################### +# Daemon package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Daemon.pm +# t/lib/Daemon.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Daemon; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use POSIX qw(setsid); + +sub new { + my ( $class, %args ) = @_; + foreach my $arg ( qw(o) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $o = $args{o}; + my $self = { + o => $o, + log_file => $o->has('log') ? $o->get('log') : undef, + PID_file => $o->has('pid') ? $o->get('pid') : undef, + }; + + check_PID_file(undef, $self->{PID_file}); + + PTDEBUG && _d('Daemonized child will log to', $self->{log_file}); + return bless $self, $class; +} + +sub daemonize { + my ( $self ) = @_; + + PTDEBUG && _d('About to fork and daemonize'); + defined (my $pid = fork()) or die "Cannot fork: $OS_ERROR"; + if ( $pid ) { + PTDEBUG && _d('Parent PID', $PID, 'exiting after forking child PID',$pid); + exit; + } + + PTDEBUG && _d('Daemonizing child PID', $PID); + $self->{PID_owner} = $PID; + $self->{child} = 1; + + POSIX::setsid() or die "Cannot start a new session: $OS_ERROR"; + chdir '/' or die "Cannot chdir to /: $OS_ERROR"; + + $self->_make_PID_file(); + + $OUTPUT_AUTOFLUSH = 1; + + PTDEBUG && _d('Redirecting STDIN to /dev/null'); + close STDIN; + open STDIN, '/dev/null' + or die "Cannot reopen STDIN to /dev/null: $OS_ERROR"; + + if ( $self->{log_file} ) { + PTDEBUG && _d('Redirecting STDOUT and STDERR to', $self->{log_file}); + close STDOUT; + open STDOUT, '>>', $self->{log_file} + or die "Cannot open log file $self->{log_file}: $OS_ERROR"; + + close STDERR; + open STDERR, ">&STDOUT" + or die "Cannot dupe STDERR to STDOUT: $OS_ERROR"; + } + else { + if ( -t STDOUT ) { + PTDEBUG && _d('No log file and STDOUT is a terminal;', + 'redirecting to /dev/null'); + close STDOUT; + open STDOUT, '>', '/dev/null' + or die "Cannot reopen STDOUT to /dev/null: $OS_ERROR"; + } + if ( -t STDERR ) { + PTDEBUG && _d('No log file and STDERR is a terminal;', + 'redirecting to /dev/null'); + close STDERR; + open STDERR, '>', '/dev/null' + or die "Cannot reopen STDERR to /dev/null: $OS_ERROR"; + } + } + + return; +} + +sub check_PID_file { + my ( $self, $file ) = @_; + my $PID_file = $self ? $self->{PID_file} : $file; + PTDEBUG && _d('Checking PID file', $PID_file); + if ( $PID_file && -f $PID_file ) { + my $pid; + eval { + chomp($pid = (slurp_file($PID_file) || '')); + }; + if ( $EVAL_ERROR ) { + die "The PID file $PID_file already exists but it cannot be read: " + . $EVAL_ERROR; + } + PTDEBUG && _d('PID file exists; it contains PID', $pid); + if ( $pid ) { + my $pid_is_alive = kill 0, $pid; + if ( $pid_is_alive ) { + die "The PID file $PID_file already exists " + . " and the PID that it contains, $pid, is running"; + } + else { + warn "Overwriting PID file $PID_file because the PID that it " + . "contains, $pid, is not running"; + } + } + else { + die "The PID file $PID_file already exists but it does not " + . "contain a PID"; + } + } + else { + PTDEBUG && _d('No PID file'); + } + return; +} + +sub make_PID_file { + my ( $self ) = @_; + if ( exists $self->{child} ) { + die "Do not call Daemon::make_PID_file() for daemonized scripts"; + } + $self->_make_PID_file(); + $self->{PID_owner} = $PID; + return; +} + +sub _make_PID_file { + my ( $self ) = @_; + + my $PID_file = $self->{PID_file}; + if ( !$PID_file ) { + PTDEBUG && _d('No PID file to create'); + return; + } + + $self->check_PID_file(); + + open my $PID_FH, '>', $PID_file + or die "Cannot open PID file $PID_file: $OS_ERROR"; + print $PID_FH $PID + or die "Cannot print to PID file $PID_file: $OS_ERROR"; + close $PID_FH + or die "Cannot close PID file $PID_file: $OS_ERROR"; + + PTDEBUG && _d('Created PID file:', $self->{PID_file}); + return; +} + +sub _remove_PID_file { + my ( $self ) = @_; + if ( $self->{PID_file} && -f $self->{PID_file} ) { + unlink $self->{PID_file} + or warn "Cannot remove PID file $self->{PID_file}: $OS_ERROR"; + PTDEBUG && _d('Removed PID file'); + } + else { + PTDEBUG && _d('No PID to remove'); + } + return; +} + +sub DESTROY { + my ( $self ) = @_; + + $self->_remove_PID_file() if ($self->{PID_owner} || 0) == $PID; + + return; +} + +sub slurp_file { + my ($file) = @_; + return unless $file; + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + return do { local $/; <$fh> }; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Daemon package +# ########################################################################### + +# ########################################################################### +# SchemaIterator package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/SchemaIterator.pm +# t/lib/SchemaIterator.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package SchemaIterator; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +$Data::Dumper::Indent = 1; +$Data::Dumper::Sortkeys = 1; +$Data::Dumper::Quotekeys = 0; + +my $open_comment = qr{/\*!\d{5} }; +my $tbl_name = qr{ + CREATE\s+ + (?:TEMPORARY\s+)? + TABLE\s+ + (?:IF NOT EXISTS\s+)? + ([^\(]+) +}x; + + +sub new { + my ( $class, %args ) = @_; + my @required_args = qw(OptionParser TableParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + + my ($file_itr, $dbh) = @args{qw(file_itr dbh)}; + die "I need either a dbh or file_itr argument" + if (!$dbh && !$file_itr) || ($dbh && $file_itr); + + my %resume; + if ( my $table = $args{resume} ) { + PTDEBUG && _d('Will resume from or after', $table); + my ($db, $tbl) = $args{Quoter}->split_unquote($table); + die "Resume table must be database-qualified: $table" + unless $db && $tbl; + $resume{db} = $db; + $resume{tbl} = $tbl; + } + + my $self = { + %args, + resume => \%resume, + filters => _make_filters(%args), + }; + + return bless $self, $class; +} + +sub _make_filters { + my ( %args ) = @_; + my @required_args = qw(OptionParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($o, $q) = @args{@required_args}; + + my %filters; + + + my @simple_filters = qw( + databases tables engines + ignore-databases ignore-tables ignore-engines); + FILTER: + foreach my $filter ( @simple_filters ) { + if ( $o->has($filter) ) { + my $objs = $o->get($filter); + next FILTER unless $objs && scalar keys %$objs; + my $is_table = $filter =~ m/table/ ? 1 : 0; + foreach my $obj ( keys %$objs ) { + die "Undefined value for --$filter" unless $obj; + $obj = lc $obj; + if ( $is_table ) { + my ($db, $tbl) = $q->split_unquote($obj); + $db ||= '*'; + PTDEBUG && _d('Filter', $filter, 'value:', $db, $tbl); + $filters{$filter}->{$db}->{$tbl} = 1; + } + else { # database + PTDEBUG && _d('Filter', $filter, 'value:', $obj); + $filters{$filter}->{$obj} = 1; + } + } + } + } + + my @regex_filters = qw( + databases-regex tables-regex + ignore-databases-regex ignore-tables-regex); + REGEX_FILTER: + foreach my $filter ( @regex_filters ) { + if ( $o->has($filter) ) { + my $pat = $o->get($filter); + next REGEX_FILTER unless $pat; + $filters{$filter} = qr/$pat/; + PTDEBUG && _d('Filter', $filter, 'value:', $filters{$filter}); + } + } + + PTDEBUG && _d('Schema object filters:', Dumper(\%filters)); + return \%filters; +} + +sub next { + my ( $self ) = @_; + + if ( !$self->{initialized} ) { + $self->{initialized} = 1; + if ( $self->{resume}->{tbl} ) { + if ( !$self->table_is_allowed(@{$self->{resume}}{qw(db tbl)}) ) { + PTDEBUG && _d('Will resume after', + join('.', @{$self->{resume}}{qw(db tbl)})); + $self->{resume}->{after}->{tbl} = 1; + } + if ( !$self->database_is_allowed($self->{resume}->{db}) ) { + PTDEBUG && _d('Will resume after', $self->{resume}->{db}); + $self->{resume}->{after}->{db} = 1; + } + } + } + + my $schema_obj; + if ( $self->{file_itr} ) { + $schema_obj= $self->_iterate_files(); + } + else { # dbh + $schema_obj= $self->_iterate_dbh(); + } + + if ( $schema_obj ) { + if ( my $schema = $self->{Schema} ) { + $schema->add_schema_object($schema_obj); + } + PTDEBUG && _d('Next schema object:', + $schema_obj->{db}, $schema_obj->{tbl}); + } + + return $schema_obj; +} + +sub _iterate_files { + my ( $self ) = @_; + + if ( !$self->{fh} ) { + my ($fh, $file) = $self->{file_itr}->(); + if ( !$fh ) { + PTDEBUG && _d('No more files to iterate'); + return; + } + $self->{fh} = $fh; + $self->{file} = $file; + } + my $fh = $self->{fh}; + PTDEBUG && _d('Getting next schema object from', $self->{file}); + + local $INPUT_RECORD_SEPARATOR = ''; + CHUNK: + while (defined(my $chunk = <$fh>)) { + if ($chunk =~ m/Database: (\S+)/) { + my $db = $1; # XXX + $db =~ s/^`//; # strip leading ` + $db =~ s/`$//; # and trailing ` + if ( $self->database_is_allowed($db) + && $self->_resume_from_database($db) ) { + $self->{db} = $db; + } + } + elsif ($self->{db} && $chunk =~ m/CREATE TABLE/) { + if ($chunk =~ m/DROP VIEW IF EXISTS/) { + PTDEBUG && _d('Table is a VIEW, skipping'); + next CHUNK; + } + + my ($tbl) = $chunk =~ m/$tbl_name/; + $tbl =~ s/^\s*`//; + $tbl =~ s/`\s*$//; + if ( $self->_resume_from_table($tbl) + && $self->table_is_allowed($self->{db}, $tbl) ) { + my ($ddl) = $chunk =~ m/^(?:$open_comment)?(CREATE TABLE.+?;)$/ms; + if ( !$ddl ) { + warn "Failed to parse CREATE TABLE from\n" . $chunk; + next CHUNK; + } + $ddl =~ s/ \*\/;\Z/;/; # remove end of version comment + my $tbl_struct = $self->{TableParser}->parse($ddl); + if ( $self->engine_is_allowed($tbl_struct->{engine}) ) { + return { + db => $self->{db}, + tbl => $tbl, + name => $self->{Quoter}->quote($self->{db}, $tbl), + ddl => $ddl, + tbl_struct => $tbl_struct, + }; + } + } + } + } # CHUNK + + PTDEBUG && _d('No more schema objects in', $self->{file}); + close $self->{fh}; + $self->{fh} = undef; + + return $self->_iterate_files(); +} + +sub _iterate_dbh { + my ( $self ) = @_; + my $q = $self->{Quoter}; + my $tp = $self->{TableParser}; + my $dbh = $self->{dbh}; + PTDEBUG && _d('Getting next schema object from dbh', $dbh); + + if ( !defined $self->{dbs} ) { + my $sql = 'SHOW DATABASES'; + PTDEBUG && _d($sql); + my @dbs = grep { + $self->_resume_from_database($_) + && + $self->database_is_allowed($_) + } @{$dbh->selectcol_arrayref($sql)}; + PTDEBUG && _d('Found', scalar @dbs, 'databases'); + $self->{dbs} = \@dbs; + } + + DATABASE: + while ( $self->{db} || defined(my $db = shift @{$self->{dbs}}) ) { + if ( !$self->{db} ) { + PTDEBUG && _d('Next database:', $db); + $self->{db} = $db; + } + + if ( !$self->{tbls} ) { + my $sql = 'SHOW /*!50002 FULL*/ TABLES FROM ' . $q->quote($self->{db}); + PTDEBUG && _d($sql); + my @tbls = map { + $_->[0]; # (tbl, type) + } + grep { + my ($tbl, $type) = @$_; + (!$type || ($type ne 'VIEW')) + && $self->_resume_from_table($tbl) + && $self->table_is_allowed($self->{db}, $tbl); + } + + eval { @{$dbh->selectall_arrayref($sql)}; }; + if ($EVAL_ERROR) { + warn "Skipping $self->{db}..."; + $self->{db} = undef; + next; + } + + PTDEBUG && _d('Found', scalar @tbls, 'tables in database',$self->{db}); + $self->{tbls} = \@tbls; + } + + TABLE: + while ( my $tbl = shift @{$self->{tbls}} ) { + my $ddl = eval { $tp->get_create_table($dbh, $self->{db}, $tbl) }; + if ( my $e = $EVAL_ERROR ) { + my $table_name = "$self->{db}.$tbl"; + if ( $e =~ /\QTable '$table_name' doesn't exist/ ) { + PTDEBUG && _d("$table_name no longer exists"); + } + else { + warn "Skipping $table_name because SHOW CREATE TABLE failed: $e"; + } + next TABLE; + } + + my $tbl_struct = $tp->parse($ddl); + if ( $self->engine_is_allowed($tbl_struct->{engine}) ) { + return { + db => $self->{db}, + tbl => $tbl, + name => $q->quote($self->{db}, $tbl), + ddl => $ddl, + tbl_struct => $tbl_struct, + }; + } + } + + PTDEBUG && _d('No more tables in database', $self->{db}); + $self->{db} = undef; + $self->{tbls} = undef; + } # DATABASE + + PTDEBUG && _d('No more databases'); + return; +} + +sub database_is_allowed { + my ( $self, $db ) = @_; + die "I need a db argument" unless $db; + + $db = lc $db; + + my $filter = $self->{filters}; + + if ( $db =~ m/^(information_schema|performance_schema|lost\+found|percona_schema)$/ ) { + PTDEBUG && _d('Database', $db, 'is a system database, ignoring'); + return 0; + } + + if ( $self->{filters}->{'ignore-databases'}->{$db} ) { + PTDEBUG && _d('Database', $db, 'is in --ignore-databases list'); + return 0; + } + + if ( $filter->{'ignore-databases-regex'} + && $db =~ $filter->{'ignore-databases-regex'} ) { + PTDEBUG && _d('Database', $db, 'matches --ignore-databases-regex'); + return 0; + } + + if ( $filter->{'databases'} + && !$filter->{'databases'}->{$db} ) { + PTDEBUG && _d('Database', $db, 'is not in --databases list, ignoring'); + return 0; + } + + if ( $filter->{'databases-regex'} + && $db !~ $filter->{'databases-regex'} ) { + PTDEBUG && _d('Database', $db, 'does not match --databases-regex, ignoring'); + return 0; + } + + return 1; +} + +sub table_is_allowed { + my ( $self, $db, $tbl ) = @_; + die "I need a db argument" unless $db; + die "I need a tbl argument" unless $tbl; + + $db = lc $db; + $tbl = lc $tbl; + + my $filter = $self->{filters}; + + return 0 if $db eq 'mysql' && $tbl =~ m/^(?: + general_log + |gtid_executed + |innodb_index_stats + |innodb_table_stats + |slave_master_info + |slave_relay_log_info + |slave_worker_info + |slow_log + )$/x; + + if ( $filter->{'ignore-tables'}->{'*'}->{$tbl} + || $filter->{'ignore-tables'}->{$db}->{$tbl}) { + PTDEBUG && _d('Table', $tbl, 'is in --ignore-tables list'); + return 0; + } + + if ( $filter->{'ignore-tables-regex'} + && $tbl =~ $filter->{'ignore-tables-regex'} ) { + PTDEBUG && _d('Table', $tbl, 'matches --ignore-tables-regex'); + return 0; + } + + if ( $filter->{'tables'} + && (!$filter->{'tables'}->{'*'}->{$tbl} && !$filter->{'tables'}->{$db}->{$tbl}) ) { + PTDEBUG && _d('Table', $tbl, 'is not in --tables list, ignoring'); + return 0; + } + + if ( $filter->{'tables-regex'} + && $tbl !~ $filter->{'tables-regex'} ) { + PTDEBUG && _d('Table', $tbl, 'does not match --tables-regex, ignoring'); + return 0; + } + + if ( $filter->{'tables'} + && $filter->{'tables'}->{$tbl} + && $filter->{'tables'}->{$tbl} ne '*' + && $filter->{'tables'}->{$tbl} ne $db ) { + PTDEBUG && _d('Table', $tbl, 'is only allowed in database', + $filter->{'tables'}->{$tbl}); + return 0; + } + + return 1; +} + +sub engine_is_allowed { + my ( $self, $engine ) = @_; + + if ( !$engine ) { + PTDEBUG && _d('No engine specified; allowing the table'); + return 1; + } + + $engine = lc $engine; + + my $filter = $self->{filters}; + + if ( $filter->{'ignore-engines'}->{$engine} ) { + PTDEBUG && _d('Engine', $engine, 'is in --ignore-databases list'); + return 0; + } + + if ( $filter->{'engines'} + && !$filter->{'engines'}->{$engine} ) { + PTDEBUG && _d('Engine', $engine, 'is not in --engines list, ignoring'); + return 0; + } + + return 1; +} + +sub _resume_from_database { + my ($self, $db) = @_; + + return 1 unless $self->{resume}->{db}; + if ( $db eq $self->{resume}->{db} ) { + if ( !$self->{resume}->{after}->{db} ) { + PTDEBUG && _d('Resuming from db', $db); + delete $self->{resume}->{db}; + return 1; + } + else { + PTDEBUG && _d('Resuming after db', $db); + delete $self->{resume}->{db}; + delete $self->{resume}->{tbl}; + } + } + + return 0; +} + +sub _resume_from_table { + my ($self, $tbl) = @_; + + return 1 unless $self->{resume}->{tbl}; + + if ( $tbl eq $self->{resume}->{tbl} ) { + if ( !$self->{resume}->{after}->{tbl} ) { + PTDEBUG && _d('Resuming from table', $tbl); + delete $self->{resume}->{tbl}; + return 1; + } + else { + PTDEBUG && _d('Resuming after table', $tbl); + delete $self->{resume}->{tbl}; + } + } + + return 0; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End SchemaIterator package +# ########################################################################### + +# ########################################################################### +# Transformers package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Transformers.pm +# t/lib/Transformers.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Transformers; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Time::Local qw(timegm timelocal); +use Digest::MD5 qw(md5_hex); +use B qw(); + +BEGIN { + require Exporter; + our @ISA = qw(Exporter); + our %EXPORT_TAGS = (); + our @EXPORT = (); + our @EXPORT_OK = qw( + micro_t + percentage_of + secs_to_time + time_to_secs + shorten + ts + parse_timestamp + unix_timestamp + any_unix_timestamp + make_checksum + crc32 + encode_json + ); +} + +our $mysql_ts = qr/(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)(\.\d+)?/; +our $proper_ts = qr/(\d\d\d\d)-(\d\d)-(\d\d)[T ](\d\d):(\d\d):(\d\d)(\.\d+)?/; +our $n_ts = qr/(\d{1,5})([shmd]?)/; # Limit \d{1,5} because \d{6} looks + +sub micro_t { + my ( $t, %args ) = @_; + my $p_ms = defined $args{p_ms} ? $args{p_ms} : 0; # precision for ms vals + my $p_s = defined $args{p_s} ? $args{p_s} : 0; # precision for s vals + my $f; + + $t = 0 if $t < 0; + + $t = sprintf('%.17f', $t) if $t =~ /e/; + + $t =~ s/\.(\d{1,6})\d*/\.$1/; + + if ($t > 0 && $t <= 0.000999) { + $f = ($t * 1000000) . 'us'; + } + elsif ($t >= 0.001000 && $t <= 0.999999) { + $f = sprintf("%.${p_ms}f", $t * 1000); + $f = ($f * 1) . 'ms'; # * 1 to remove insignificant zeros + } + elsif ($t >= 1) { + $f = sprintf("%.${p_s}f", $t); + $f = ($f * 1) . 's'; # * 1 to remove insignificant zeros + } + else { + $f = 0; # $t should = 0 at this point + } + + return $f; +} + +sub percentage_of { + my ( $is, $of, %args ) = @_; + my $p = $args{p} || 0; # float precision + my $fmt = $p ? "%.${p}f" : "%d"; + return sprintf $fmt, ($is * 100) / ($of ||= 1); +} + +sub secs_to_time { + my ( $secs, $fmt ) = @_; + $secs ||= 0; + return '00:00' unless $secs; + + $fmt ||= $secs >= 86_400 ? 'd' + : $secs >= 3_600 ? 'h' + : 'm'; + + return + $fmt eq 'd' ? sprintf( + "%d+%02d:%02d:%02d", + int($secs / 86_400), + int(($secs % 86_400) / 3_600), + int(($secs % 3_600) / 60), + $secs % 60) + : $fmt eq 'h' ? sprintf( + "%02d:%02d:%02d", + int(($secs % 86_400) / 3_600), + int(($secs % 3_600) / 60), + $secs % 60) + : sprintf( + "%02d:%02d", + int(($secs % 3_600) / 60), + $secs % 60); +} + +sub time_to_secs { + my ( $val, $default_suffix ) = @_; + die "I need a val argument" unless defined $val; + my $t = 0; + my ( $prefix, $num, $suffix ) = $val =~ m/([+-]?)(\d+)([a-z])?$/; + $suffix = $suffix || $default_suffix || 's'; + if ( $suffix =~ m/[smhd]/ ) { + $t = $suffix eq 's' ? $num * 1 # Seconds + : $suffix eq 'm' ? $num * 60 # Minutes + : $suffix eq 'h' ? $num * 3600 # Hours + : $num * 86400; # Days + + $t *= -1 if $prefix && $prefix eq '-'; + } + else { + die "Invalid suffix for $val: $suffix"; + } + return $t; +} + +sub shorten { + my ( $num, %args ) = @_; + my $p = defined $args{p} ? $args{p} : 2; # float precision + my $d = defined $args{d} ? $args{d} : 1_024; # divisor + my $n = 0; + my @units = ('', qw(k M G T P E Z Y)); + while ( $num >= $d && $n < @units - 1 ) { + $num /= $d; + ++$n; + } + return sprintf( + $num =~ m/\./ || $n + ? '%1$.'.$p.'f%2$s' + : '%1$d', + $num, $units[$n]); +} + +sub ts { + my ( $time, $gmt ) = @_; + my ( $sec, $min, $hour, $mday, $mon, $year ) + = $gmt ? gmtime($time) : localtime($time); + $mon += 1; + $year += 1900; + my $val = sprintf("%d-%02d-%02dT%02d:%02d:%02d", + $year, $mon, $mday, $hour, $min, $sec); + if ( my ($us) = $time =~ m/(\.\d+)$/ ) { + $us = sprintf("%.6f", $us); + $us =~ s/^0\././; + $val .= $us; + } + return $val; +} + +sub parse_timestamp { + my ( $val ) = @_; + if ( my($y, $m, $d, $h, $i, $s, $f) + = $val =~ m/^$mysql_ts$/ ) + { + return sprintf "%d-%02d-%02d %02d:%02d:" + . (defined $f ? '%09.6f' : '%02d'), + $y + 2000, $m, $d, $h, $i, (defined $f ? $s + $f : $s); + } + elsif ( $val =~ m/^$proper_ts$/ ) { + return $val; + } + return $val; +} + +sub unix_timestamp { + my ( $val, $gmt ) = @_; + if ( my($y, $m, $d, $h, $i, $s, $us) = $val =~ m/^$proper_ts$/ ) { + $val = $gmt + ? timegm($s, $i, $h, $d, $m - 1, $y) + : timelocal($s, $i, $h, $d, $m - 1, $y); + if ( defined $us ) { + $us = sprintf('%.6f', $us); + $us =~ s/^0\././; + $val .= $us; + } + } + return $val; +} + +sub any_unix_timestamp { + my ( $val, $callback ) = @_; + + if ( my ($n, $suffix) = $val =~ m/^$n_ts$/ ) { + $n = $suffix eq 's' ? $n # Seconds + : $suffix eq 'm' ? $n * 60 # Minutes + : $suffix eq 'h' ? $n * 3600 # Hours + : $suffix eq 'd' ? $n * 86400 # Days + : $n; # default: Seconds + PTDEBUG && _d('ts is now - N[shmd]:', $n); + return time - $n; + } + elsif ( $val =~ m/^\d{9,}/ ) { + PTDEBUG && _d('ts is already a unix timestamp'); + return $val; + } + elsif ( my ($ymd, $hms) = $val =~ m/^(\d{6})(?:\s+(\d+:\d+:\d+))?/ ) { + PTDEBUG && _d('ts is MySQL slow log timestamp'); + $val .= ' 00:00:00' unless $hms; + return unix_timestamp(parse_timestamp($val)); + } + elsif ( ($ymd, $hms) = $val =~ m/^(\d{4}-\d\d-\d\d)(?:[T ](\d+:\d+:\d+))?/) { + PTDEBUG && _d('ts is properly formatted timestamp'); + $val .= ' 00:00:00' unless $hms; + return unix_timestamp($val); + } + else { + PTDEBUG && _d('ts is MySQL expression'); + return $callback->($val) if $callback && ref $callback eq 'CODE'; + } + + PTDEBUG && _d('Unknown ts type:', $val); + return; +} + +sub make_checksum { + my ( $val ) = @_; + my $checksum = uc substr(md5_hex($val), -16); + PTDEBUG && _d($checksum, 'checksum for', $val); + return $checksum; +} + +sub crc32 { + my ( $string ) = @_; + return unless $string; + my $poly = 0xEDB88320; + my $crc = 0xFFFFFFFF; + foreach my $char ( split(//, $string) ) { + my $comp = ($crc ^ ord($char)) & 0xFF; + for ( 1 .. 8 ) { + $comp = $comp & 1 ? $poly ^ ($comp >> 1) : $comp >> 1; + } + $crc = (($crc >> 8) & 0x00FFFFFF) ^ $comp; + } + return $crc ^ 0xFFFFFFFF; +} + +my $got_json = eval { require JSON }; +sub encode_json { + return JSON::encode_json(@_) if $got_json; + my ( $data ) = @_; + return (object_to_json($data) || ''); +} + + +sub object_to_json { + my ($obj) = @_; + my $type = ref($obj); + + if($type eq 'HASH'){ + return hash_to_json($obj); + } + elsif($type eq 'ARRAY'){ + return array_to_json($obj); + } + else { + return value_to_json($obj); + } +} + +sub hash_to_json { + my ($obj) = @_; + my @res; + for my $k ( sort { $a cmp $b } keys %$obj ) { + push @res, string_to_json( $k ) + . ":" + . ( object_to_json( $obj->{$k} ) || value_to_json( $obj->{$k} ) ); + } + return '{' . ( @res ? join( ",", @res ) : '' ) . '}'; +} + +sub array_to_json { + my ($obj) = @_; + my @res; + + for my $v (@$obj) { + push @res, object_to_json($v) || value_to_json($v); + } + + return '[' . ( @res ? join( ",", @res ) : '' ) . ']'; +} + +sub value_to_json { + my ($value) = @_; + + return 'null' if(!defined $value); + + my $b_obj = B::svref_2object(\$value); # for round trip problem + my $flags = $b_obj->FLAGS; + return $value # as is + if $flags & ( B::SVp_IOK | B::SVp_NOK ) and !( $flags & B::SVp_POK ); # SvTYPE is IV or NV? + + my $type = ref($value); + + if( !$type ) { + return string_to_json($value); + } + else { + return 'null'; + } + +} + +my %esc = ( + "\n" => '\n', + "\r" => '\r', + "\t" => '\t', + "\f" => '\f', + "\b" => '\b', + "\"" => '\"', + "\\" => '\\\\', + "\'" => '\\\'', +); + +sub string_to_json { + my ($arg) = @_; + + $arg =~ s/([\x22\x5c\n\r\t\f\b])/$esc{$1}/g; + $arg =~ s/\//\\\//g; + $arg =~ s/([\x00-\x08\x0b\x0e-\x1f])/'\\u00' . unpack('H2', $1)/eg; + + utf8::upgrade($arg); + utf8::encode($arg); + + return '"' . $arg . '"'; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Transformers package +# ########################################################################### + +# ########################################################################### +# Retry package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/Retry.pm +# t/lib/Retry.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package Retry; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Time::HiRes qw(sleep); + +sub new { + my ( $class, %args ) = @_; + my $self = { + %args, + }; + return bless $self, $class; +} + +sub retry { + my ( $self, %args ) = @_; + my @required_args = qw(try fail final_fail); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + }; + my ($try, $fail, $final_fail) = @args{@required_args}; + my $wait = $args{wait} || sub { sleep 1; }; + my $tries = $args{tries} || 3; + + my $last_error; + my $tryno = 0; + TRY: + while ( ++$tryno <= $tries ) { + PTDEBUG && _d("Try", $tryno, "of", $tries); + my $result; + eval { + $result = $try->(tryno=>$tryno); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d("Try code failed:", $EVAL_ERROR); + $last_error = $EVAL_ERROR; + + if ( $tryno < $tries ) { # more retries + my $retry = $fail->(tryno=>$tryno, error=>$last_error); + last TRY unless $retry; + PTDEBUG && _d("Calling wait code"); + $wait->(tryno=>$tryno); + } + } + else { + PTDEBUG && _d("Try code succeeded"); + return $result; + } + } + + PTDEBUG && _d('Try code did not succeed'); + return $final_fail->(error=>$last_error); +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End Retry package +# ########################################################################### + +# ########################################################################### +# HTTP::Micro package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/HTTP/Micro.pm +# t/lib/HTTP/Micro.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package HTTP::Micro; + +our $VERSION = '0.01'; + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); +use Carp (); + +my @attributes; +BEGIN { + @attributes = qw(agent timeout); + no strict 'refs'; + for my $accessor ( @attributes ) { + *{$accessor} = sub { + @_ > 1 ? $_[0]->{$accessor} = $_[1] : $_[0]->{$accessor}; + }; + } +} + +sub new { + my($class, %args) = @_; + (my $agent = $class) =~ s{::}{-}g; + my $self = { + agent => $agent . "/" . ($class->VERSION || 0), + timeout => 60, + }; + for my $key ( @attributes ) { + $self->{$key} = $args{$key} if exists $args{$key} + } + return bless $self, $class; +} + +my %DefaultPort = ( + http => 80, + https => 443, +); + +sub request { + my ($self, $method, $url, $args) = @_; + @_ == 3 || (@_ == 4 && ref $args eq 'HASH') + or Carp::croak(q/Usage: $http->request(METHOD, URL, [HASHREF])/); + $args ||= {}; # we keep some state in this during _request + + my $response; + for ( 0 .. 1 ) { + $response = eval { $self->_request($method, $url, $args) }; + last unless $@ && $method eq 'GET' + && $@ =~ m{^(?:Socket closed|Unexpected end)}; + } + + if (my $e = "$@") { + $response = { + success => q{}, + status => 599, + reason => 'Internal Exception', + content => $e, + headers => { + 'content-type' => 'text/plain', + 'content-length' => length $e, + } + }; + } + return $response; +} + +sub _request { + my ($self, $method, $url, $args) = @_; + + my ($scheme, $host, $port, $path_query) = $self->_split_url($url); + + my $request = { + method => $method, + scheme => $scheme, + host_port => ($port == $DefaultPort{$scheme} ? $host : "$host:$port"), + uri => $path_query, + headers => {}, + }; + + my $handle = HTTP::Micro::Handle->new(timeout => $self->{timeout}); + + $handle->connect($scheme, $host, $port); + + $self->_prepare_headers_and_cb($request, $args); + $handle->write_request_header(@{$request}{qw/method uri headers/}); + $handle->write_content_body($request) if $request->{content}; + + my $response; + do { $response = $handle->read_response_header } + until (substr($response->{status},0,1) ne '1'); + + if (!($method eq 'HEAD' || $response->{status} =~ /^[23]04/)) { + $response->{content} = ''; + $handle->read_content_body(sub { $_[1]->{content} .= $_[0] }, $response); + } + + $handle->close; + $response->{success} = substr($response->{status},0,1) eq '2'; + return $response; +} + +sub _prepare_headers_and_cb { + my ($self, $request, $args) = @_; + + for ($args->{headers}) { + next unless defined; + while (my ($k, $v) = each %$_) { + $request->{headers}{lc $k} = $v; + } + } + $request->{headers}{'host'} = $request->{host_port}; + $request->{headers}{'connection'} = "close"; + $request->{headers}{'user-agent'} ||= $self->{agent}; + + if (defined $args->{content}) { + $request->{headers}{'content-type'} ||= "application/octet-stream"; + utf8::downgrade($args->{content}, 1) + or Carp::croak(q/Wide character in request message body/); + $request->{headers}{'content-length'} = length $args->{content}; + $request->{content} = $args->{content}; + } + return; +} + +sub _split_url { + my $url = pop; + + my ($scheme, $authority, $path_query) = $url =~ m<\A([^:/?#]+)://([^/?#]*)([^#]*)> + or Carp::croak(qq/Cannot parse URL: '$url'/); + + $scheme = lc $scheme; + $path_query = "/$path_query" unless $path_query =~ m<\A/>; + + my $host = (length($authority)) ? lc $authority : 'localhost'; + $host =~ s/\A[^@]*@//; # userinfo + my $port = do { + $host =~ s/:([0-9]*)\z// && length $1 + ? $1 + : $DefaultPort{$scheme} + }; + + return ($scheme, $host, $port, $path_query); +} + +} # HTTP::Micro + +{ + package HTTP::Micro::Handle; + + use strict; + use warnings FATAL => 'all'; + use English qw(-no_match_vars); + + use Carp qw(croak); + use Errno qw(EINTR EPIPE); + use IO::Socket qw(SOCK_STREAM); + + sub BUFSIZE () { 32768 } + + my $Printable = sub { + local $_ = shift; + s/\r/\\r/g; + s/\n/\\n/g; + s/\t/\\t/g; + s/([^\x20-\x7E])/sprintf('\\x%.2X', ord($1))/ge; + $_; + }; + + sub new { + my ($class, %args) = @_; + return bless { + rbuf => '', + timeout => 60, + max_line_size => 16384, + %args + }, $class; + } + + my $ssl_verify_args = { + check_cn => "when_only", + wildcards_in_alt => "anywhere", + wildcards_in_cn => "anywhere" + }; + + sub connect { + @_ == 4 || croak(q/Usage: $handle->connect(scheme, host, port)/); + my ($self, $scheme, $host, $port) = @_; + + if ( $scheme eq 'https' ) { + eval "require IO::Socket::SSL" + unless exists $INC{'IO/Socket/SSL.pm'}; + croak(qq/IO::Socket::SSL must be installed for https support\n/) + unless $INC{'IO/Socket/SSL.pm'}; + } + elsif ( $scheme ne 'http' ) { + croak(qq/Unsupported URL scheme '$scheme'\n/); + } + + $self->{fh} = IO::Socket::INET->new( + PeerHost => $host, + PeerPort => $port, + Proto => 'tcp', + Type => SOCK_STREAM, + Timeout => $self->{timeout} + ) or croak(qq/Could not connect to '$host:$port': $@/); + + binmode($self->{fh}) + or croak(qq/Could not binmode() socket: '$!'/); + + if ( $scheme eq 'https') { + IO::Socket::SSL->start_SSL($self->{fh}); + ref($self->{fh}) eq 'IO::Socket::SSL' + or die(qq/SSL connection failed for $host\n/); + if ( $self->{fh}->can("verify_hostname") ) { + $self->{fh}->verify_hostname( $host, $ssl_verify_args ) + or die(qq/SSL certificate not valid for $host\n/); + } + else { + my $fh = $self->{fh}; + _verify_hostname_of_cert($host, _peer_certificate($fh), $ssl_verify_args) + or die(qq/SSL certificate not valid for $host\n/); + } + } + + $self->{host} = $host; + $self->{port} = $port; + + return $self; + } + + sub close { + @_ == 1 || croak(q/Usage: $handle->close()/); + my ($self) = @_; + CORE::close($self->{fh}) + or croak(qq/Could not close socket: '$!'/); + } + + sub write { + @_ == 2 || croak(q/Usage: $handle->write(buf)/); + my ($self, $buf) = @_; + + my $len = length $buf; + my $off = 0; + + local $SIG{PIPE} = 'IGNORE'; + + while () { + $self->can_write + or croak(q/Timed out while waiting for socket to become ready for writing/); + my $r = syswrite($self->{fh}, $buf, $len, $off); + if (defined $r) { + $len -= $r; + $off += $r; + last unless $len > 0; + } + elsif ($! == EPIPE) { + croak(qq/Socket closed by remote server: $!/); + } + elsif ($! != EINTR) { + croak(qq/Could not write to socket: '$!'/); + } + } + return $off; + } + + sub read { + @_ == 2 || @_ == 3 || croak(q/Usage: $handle->read(len)/); + my ($self, $len) = @_; + + my $buf = ''; + my $got = length $self->{rbuf}; + + if ($got) { + my $take = ($got < $len) ? $got : $len; + $buf = substr($self->{rbuf}, 0, $take, ''); + $len -= $take; + } + + while ($len > 0) { + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $buf, $len, length $buf); + if (defined $r) { + last unless $r; + $len -= $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + if ($len) { + croak(q/Unexpected end of stream/); + } + return $buf; + } + + sub readline { + @_ == 1 || croak(q/Usage: $handle->readline()/); + my ($self) = @_; + + while () { + if ($self->{rbuf} =~ s/\A ([^\x0D\x0A]* \x0D?\x0A)//x) { + return $1; + } + $self->can_read + or croak(q/Timed out while waiting for socket to become ready for reading/); + my $r = sysread($self->{fh}, $self->{rbuf}, BUFSIZE, length $self->{rbuf}); + if (defined $r) { + last unless $r; + } + elsif ($! != EINTR) { + croak(qq/Could not read from socket: '$!'/); + } + } + croak(q/Unexpected end of stream while looking for line/); + } + + sub read_header_lines { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->read_header_lines([headers])/); + my ($self, $headers) = @_; + $headers ||= {}; + my $lines = 0; + my $val; + + while () { + my $line = $self->readline; + + if ($line =~ /\A ([^\x00-\x1F\x7F:]+) : [\x09\x20]* ([^\x0D\x0A]*)/x) { + my ($field_name) = lc $1; + $val = \($headers->{$field_name} = $2); + } + elsif ($line =~ /\A [\x09\x20]+ ([^\x0D\x0A]*)/x) { + $val + or croak(q/Unexpected header continuation line/); + next unless length $1; + $$val .= ' ' if length $$val; + $$val .= $1; + } + elsif ($line =~ /\A \x0D?\x0A \z/x) { + last; + } + else { + croak(q/Malformed header line: / . $Printable->($line)); + } + } + return $headers; + } + + sub write_header_lines { + (@_ == 2 && ref $_[1] eq 'HASH') || croak(q/Usage: $handle->write_header_lines(headers)/); + my($self, $headers) = @_; + + my $buf = ''; + while (my ($k, $v) = each %$headers) { + my $field_name = lc $k; + $field_name =~ /\A [\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5A\x5E-\x7A\x7C\x7E]+ \z/x + or croak(q/Invalid HTTP header field name: / . $Printable->($field_name)); + $field_name =~ s/\b(\w)/\u$1/g; + $buf .= "$field_name: $v\x0D\x0A"; + } + $buf .= "\x0D\x0A"; + return $self->write($buf); + } + + sub read_content_body { + @_ == 3 || @_ == 4 || croak(q/Usage: $handle->read_content_body(callback, response, [read_length])/); + my ($self, $cb, $response, $len) = @_; + $len ||= $response->{headers}{'content-length'}; + + croak("No content-length in the returned response, and this " + . "UA doesn't implement chunking") unless defined $len; + + while ($len > 0) { + my $read = ($len > BUFSIZE) ? BUFSIZE : $len; + $cb->($self->read($read), $response); + $len -= $read; + } + + return; + } + + sub write_content_body { + @_ == 2 || croak(q/Usage: $handle->write_content_body(request)/); + my ($self, $request) = @_; + my ($len, $content_length) = (0, $request->{headers}{'content-length'}); + + $len += $self->write($request->{content}); + + $len == $content_length + or croak(qq/Content-Length missmatch (got: $len expected: $content_length)/); + + return $len; + } + + sub read_response_header { + @_ == 1 || croak(q/Usage: $handle->read_response_header()/); + my ($self) = @_; + + my $line = $self->readline; + + $line =~ /\A (HTTP\/(0*\d+\.0*\d+)) [\x09\x20]+ ([0-9]{3}) [\x09\x20]+ ([^\x0D\x0A]*) \x0D?\x0A/x + or croak(q/Malformed Status-Line: / . $Printable->($line)); + + my ($protocol, $version, $status, $reason) = ($1, $2, $3, $4); + + return { + status => $status, + reason => $reason, + headers => $self->read_header_lines, + protocol => $protocol, + }; + } + + sub write_request_header { + @_ == 4 || croak(q/Usage: $handle->write_request_header(method, request_uri, headers)/); + my ($self, $method, $request_uri, $headers) = @_; + + return $self->write("$method $request_uri HTTP/1.1\x0D\x0A") + + $self->write_header_lines($headers); + } + + sub _do_timeout { + my ($self, $type, $timeout) = @_; + $timeout = $self->{timeout} + unless defined $timeout && $timeout >= 0; + + my $fd = fileno $self->{fh}; + defined $fd && $fd >= 0 + or croak(q/select(2): 'Bad file descriptor'/); + + my $initial = time; + my $pending = $timeout; + my $nfound; + + vec(my $fdset = '', $fd, 1) = 1; + + while () { + $nfound = ($type eq 'read') + ? select($fdset, undef, undef, $pending) + : select(undef, $fdset, undef, $pending) ; + if ($nfound == -1) { + $! == EINTR + or croak(qq/select(2): '$!'/); + redo if !$timeout || ($pending = $timeout - (time - $initial)) > 0; + $nfound = 0; + } + last; + } + $! = 0; + return $nfound; + } + + sub can_read { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_read([timeout])/); + my $self = shift; + return $self->_do_timeout('read', @_) + } + + sub can_write { + @_ == 1 || @_ == 2 || croak(q/Usage: $handle->can_write([timeout])/); + my $self = shift; + return $self->_do_timeout('write', @_) + } +} # HTTP::Micro::Handle + +my $prog = <<'EOP'; +BEGIN { + if ( defined &IO::Socket::SSL::CAN_IPV6 ) { + *CAN_IPV6 = \*IO::Socket::SSL::CAN_IPV6; + } + else { + constant->import( CAN_IPV6 => '' ); + } + my %const = ( + NID_CommonName => 13, + GEN_DNS => 2, + GEN_IPADD => 7, + ); + while ( my ($name,$value) = each %const ) { + no strict 'refs'; + *{$name} = UNIVERSAL::can( 'Net::SSLeay', $name ) || sub { $value }; + } +} +{ + use Carp qw(croak); + my %dispatcher = ( + issuer => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_issuer_name( shift )) }, + subject => sub { Net::SSLeay::X509_NAME_oneline( Net::SSLeay::X509_get_subject_name( shift )) }, + ); + if ( $Net::SSLeay::VERSION >= 1.30 ) { + $dispatcher{commonName} = sub { + my $cn = Net::SSLeay::X509_NAME_get_text_by_NID( + Net::SSLeay::X509_get_subject_name( shift ), NID_CommonName); + $cn =~s{\0$}{}; # work around Bug in Net::SSLeay <1.33 + $cn; + } + } else { + $dispatcher{commonName} = sub { + croak "you need at least Net::SSLeay version 1.30 for getting commonName" + } + } + + if ( $Net::SSLeay::VERSION >= 1.33 ) { + $dispatcher{subjectAltNames} = sub { Net::SSLeay::X509_get_subjectAltNames( shift ) }; + } else { + $dispatcher{subjectAltNames} = sub { + return; + }; + } + + $dispatcher{authority} = $dispatcher{issuer}; + $dispatcher{owner} = $dispatcher{subject}; + $dispatcher{cn} = $dispatcher{commonName}; + + sub _peer_certificate { + my ($self, $field) = @_; + my $ssl = $self->_get_ssl_object or return; + + my $cert = ${*$self}{_SSL_certificate} + ||= Net::SSLeay::get_peer_certificate($ssl) + or return $self->error("Could not retrieve peer certificate"); + + if ($field) { + my $sub = $dispatcher{$field} or croak + "invalid argument for peer_certificate, valid are: ".join( " ",keys %dispatcher ). + "\nMaybe you need to upgrade your Net::SSLeay"; + return $sub->($cert); + } else { + return $cert + } + } + + + my %scheme = ( + ldap => { + wildcards_in_cn => 0, + wildcards_in_alt => 'leftmost', + check_cn => 'always', + }, + http => { + wildcards_in_cn => 'anywhere', + wildcards_in_alt => 'anywhere', + check_cn => 'when_only', + }, + smtp => { + wildcards_in_cn => 0, + wildcards_in_alt => 0, + check_cn => 'always' + }, + none => {}, # do not check + ); + + $scheme{www} = $scheme{http}; # alias + $scheme{xmpp} = $scheme{http}; # rfc 3920 + $scheme{pop3} = $scheme{ldap}; # rfc 2595 + $scheme{imap} = $scheme{ldap}; # rfc 2595 + $scheme{acap} = $scheme{ldap}; # rfc 2595 + $scheme{nntp} = $scheme{ldap}; # rfc 4642 + $scheme{ftp} = $scheme{http}; # rfc 4217 + + + sub _verify_hostname_of_cert { + my $identity = shift; + my $cert = shift; + my $scheme = shift || 'none'; + if ( ! ref($scheme) ) { + $scheme = $scheme{$scheme} or croak "scheme $scheme not defined"; + } + + return 1 if ! %$scheme; # 'none' + + my $commonName = $dispatcher{cn}->($cert); + my @altNames = $dispatcher{subjectAltNames}->($cert); + + if ( my $sub = $scheme->{callback} ) { + return $sub->($identity,$commonName,@altNames); + } + + + my $ipn; + if ( CAN_IPV6 and $identity =~m{:} ) { + $ipn = IO::Socket::SSL::inet_pton(IO::Socket::SSL::AF_INET6,$identity) + or croak "'$identity' is not IPv6, but neither IPv4 nor hostname"; + } elsif ( $identity =~m{^\d+\.\d+\.\d+\.\d+$} ) { + $ipn = IO::Socket::SSL::inet_aton( $identity ) or croak "'$identity' is not IPv4, but neither IPv6 nor hostname"; + } else { + if ( $identity =~m{[^a-zA-Z0-9_.\-]} ) { + $identity =~m{\0} and croak("name '$identity' has \\0 byte"); + $identity = IO::Socket::SSL::idn_to_ascii($identity) or + croak "Warning: Given name '$identity' could not be converted to IDNA!"; + } + } + + my $check_name = sub { + my ($name,$identity,$wtyp) = @_; + $wtyp ||= ''; + my $pattern; + if ( $wtyp eq 'anywhere' and $name =~m{^([a-zA-Z0-9_\-]*)\*(.+)} ) { + $pattern = qr{^\Q$1\E[a-zA-Z0-9_\-]*\Q$2\E$}i; + } elsif ( $wtyp eq 'leftmost' and $name =~m{^\*(\..+)$} ) { + $pattern = qr{^[a-zA-Z0-9_\-]*\Q$1\E$}i; + } else { + $pattern = qr{^\Q$name\E$}i; + } + return $identity =~ $pattern; + }; + + my $alt_dnsNames = 0; + while (@altNames) { + my ($type, $name) = splice (@altNames, 0, 2); + if ( $ipn and $type == GEN_IPADD ) { + return 1 if $ipn eq $name; + + } elsif ( ! $ipn and $type == GEN_DNS ) { + $name =~s/\s+$//; $name =~s/^\s+//; + $alt_dnsNames++; + $check_name->($name,$identity,$scheme->{wildcards_in_alt}) + and return 1; + } + } + + if ( ! $ipn and ( + $scheme->{check_cn} eq 'always' or + $scheme->{check_cn} eq 'when_only' and !$alt_dnsNames)) { + $check_name->($commonName,$identity,$scheme->{wildcards_in_cn}) + and return 1; + } + + return 0; # no match + } +} +EOP + +eval { require IO::Socket::SSL }; +if ( $INC{"IO/Socket/SSL.pm"} ) { + eval $prog; + die $@ if $@; +} + +1; +# ########################################################################### +# End HTTP::Micro package +# ########################################################################### + +# ########################################################################### +# VersionCheck package +# This package is a copy without comments from the original. The original +# with comments and its test file can be found in the Bazaar repository at, +# lib/VersionCheck.pm +# t/lib/VersionCheck.t +# See https://launchpad.net/percona-toolkit for more information. +# ########################################################################### +{ +package VersionCheck; + + +use strict; +use warnings FATAL => 'all'; +use English qw(-no_match_vars); + +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +use Data::Dumper; +local $Data::Dumper::Indent = 1; +local $Data::Dumper::Sortkeys = 1; +local $Data::Dumper::Quotekeys = 0; + +use Digest::MD5 qw(md5_hex); +use Sys::Hostname qw(hostname); +use File::Basename qw(); +use File::Spec; +use FindBin qw(); + +eval { + require Percona::Toolkit; + require HTTP::Micro; +}; + +my $home = $ENV{HOME} || $ENV{HOMEPATH} || $ENV{USERPROFILE} || '.'; +my @vc_dirs = ( + '/etc/percona', + '/etc/percona-toolkit', + '/tmp', + "$home", +); + +{ + my $file = 'percona-version-check'; + + sub version_check_file { + foreach my $dir ( @vc_dirs ) { + if ( -d $dir && -w $dir ) { + PTDEBUG && _d('Version check file', $file, 'in', $dir); + return $dir . '/' . $file; + } + } + PTDEBUG && _d('Version check file', $file, 'in', $ENV{PWD}); + return $file; # in the CWD + } +} + +sub version_check_time_limit { + return 60 * 60 * 24; # one day +} + + +sub version_check { + my (%args) = @_; + + my $instances = $args{instances} || []; + my $instances_to_check; + + PTDEBUG && _d('FindBin::Bin:', $FindBin::Bin); + if ( !$args{force} ) { + if ( $FindBin::Bin + && (-d "$FindBin::Bin/../.bzr" || + -d "$FindBin::Bin/../../.bzr" || + -d "$FindBin::Bin/../.git" || + -d "$FindBin::Bin/../../.git" + ) + ) { + PTDEBUG && _d("$FindBin::Bin/../.bzr disables --version-check"); + return; + } + } + + eval { + foreach my $instance ( @$instances ) { + my ($name, $id) = get_instance_id($instance); + $instance->{name} = $name; + $instance->{id} = $id; + } + + push @$instances, { name => 'system', id => 0 }; + + $instances_to_check = get_instances_to_check( + instances => $instances, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + PTDEBUG && _d(scalar @$instances_to_check, 'instances to check'); + return unless @$instances_to_check; + + my $protocol = 'https'; + eval { require IO::Socket::SSL; }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + PTDEBUG && _d("SSL not available, won't run version_check"); + return; + } + PTDEBUG && _d('Using', $protocol); + + my $advice = pingback( + instances => $instances_to_check, + protocol => $protocol, + url => $args{url} # testing + || $ENV{PERCONA_VERSION_CHECK_URL} # testing + || "$protocol://v.percona.com", + ); + if ( $advice ) { + PTDEBUG && _d('Advice:', Dumper($advice)); + if ( scalar @$advice > 1) { + print "\n# " . scalar @$advice . " software updates are " + . "available:\n"; + } + else { + print "\n# A software update is available:\n"; + } + print join("\n", map { "# * $_" } @$advice), "\n\n"; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Version check failed:', $EVAL_ERROR); + } + + if ( @$instances_to_check ) { + eval { + update_check_times( + instances => $instances_to_check, + vc_file => $args{vc_file}, # testing + now => $args{now}, # testing + ); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error updating version check file:', $EVAL_ERROR); + } + } + + if ( $ENV{PTDEBUG_VERSION_CHECK} ) { + warn "Exiting because the PTDEBUG_VERSION_CHECK " + . "environment variable is defined.\n"; + exit 255; + } + + return; +} + +sub get_instances_to_check { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + + if ( !-f $vc_file ) { + PTDEBUG && _d('Version check file', $vc_file, 'does not exist;', + 'version checking all instances'); + return $instances; + } + + open my $fh, '<', $vc_file or die "Cannot open $vc_file: $OS_ERROR"; + chomp(my $file_contents = do { local $/ = undef; <$fh> }); + PTDEBUG && _d('Version check file', $vc_file, 'contents:', $file_contents); + close $fh; + my %last_check_time_for = $file_contents =~ /^([^,]+),(.+)$/mg; + + my $check_time_limit = version_check_time_limit(); + my @instances_to_check; + foreach my $instance ( @$instances ) { + my $last_check_time = $last_check_time_for{ $instance->{id} }; + PTDEBUG && _d('Intsance', $instance->{id}, 'last checked', + $last_check_time, 'now', $now, 'diff', $now - ($last_check_time || 0), + 'hours until next check', + sprintf '%.2f', + ($check_time_limit - ($now - ($last_check_time || 0))) / 3600); + if ( !defined $last_check_time + || ($now - $last_check_time) >= $check_time_limit ) { + PTDEBUG && _d('Time to check', Dumper($instance)); + push @instances_to_check, $instance; + } + } + + return \@instances_to_check; +} + +sub update_check_times { + my (%args) = @_; + + my $instances = $args{instances}; + my $now = $args{now} || int(time); + my $vc_file = $args{vc_file} || version_check_file(); + PTDEBUG && _d('Updating last check time:', $now); + + my %all_instances = map { + $_->{id} => { name => $_->{name}, ts => $now } + } @$instances; + + if ( -f $vc_file ) { + open my $fh, '<', $vc_file or die "Cannot read $vc_file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + + foreach my $line ( split("\n", ($contents || '')) ) { + my ($id, $ts) = split(',', $line); + if ( !exists $all_instances{$id} ) { + $all_instances{$id} = { ts => $ts }; # original ts, not updated + } + } + } + + open my $fh, '>', $vc_file or die "Cannot write to $vc_file: $OS_ERROR"; + foreach my $id ( sort keys %all_instances ) { + PTDEBUG && _d('Updated:', $id, Dumper($all_instances{$id})); + print { $fh } $id . ',' . $all_instances{$id}->{ts} . "\n"; + } + close $fh; + + return; +} + +sub get_instance_id { + my ($instance) = @_; + + my $dbh = $instance->{dbh}; + my $dsn = $instance->{dsn}; + + my $sql = q{SELECT CONCAT(@@hostname, @@port)}; + PTDEBUG && _d($sql); + my ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $sql = q{SELECT @@hostname}; + PTDEBUG && _d($sql); + ($name) = eval { $dbh->selectrow_array($sql) }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d($EVAL_ERROR); + $name = ($dsn->{h} || 'localhost') . ($dsn->{P} || 3306); + } + else { + $sql = q{SHOW VARIABLES LIKE 'port'}; + PTDEBUG && _d($sql); + my (undef, $port) = eval { $dbh->selectrow_array($sql) }; + PTDEBUG && _d('port:', $port); + $name .= $port || ''; + } + } + my $id = md5_hex($name); + + PTDEBUG && _d('MySQL instance:', $id, $name, Dumper($dsn)); + + return $name, $id; +} + + +sub get_uuid { + my $uuid_file = '/.percona-toolkit.uuid'; + foreach my $dir (@vc_dirs) { + my $filename = $dir.$uuid_file; + my $uuid=_read_uuid($filename); + return $uuid if $uuid; + } + + my $filename = $ENV{"HOME"} . $uuid_file; + my $uuid = _generate_uuid(); + + open(my $fh, '>', $filename) or die "Could not open file '$filename' $!"; + print $fh $uuid; + close $fh; + + return $uuid; +} + +sub _generate_uuid { + return sprintf+($}="%04x")."$}-$}-$}-$}-".$}x3,map rand 65537,0..7; +} + +sub _read_uuid { + my $filename = shift; + my $fh; + + eval { + open($fh, '<:encoding(UTF-8)', $filename); + }; + return if ($EVAL_ERROR); + + my $uuid; + eval { $uuid = <$fh>; }; + return if ($EVAL_ERROR); + + chomp $uuid; + return $uuid; +} + + +sub pingback { + my (%args) = @_; + my @required_args = qw(url instances); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my $url = $args{url}; + my $instances = $args{instances}; + + my $ua = $args{ua} || HTTP::Micro->new( timeout => 3 ); + + my $response = $ua->request('GET', $url); + PTDEBUG && _d('Server response:', Dumper($response)); + die "No response from GET $url" + if !$response; + die("GET on $url returned HTTP status $response->{status}; expected 200\n", + ($response->{content} || '')) if $response->{status} != 200; + die("GET on $url did not return any programs to check") + if !$response->{content}; + + my $items = parse_server_response( + response => $response->{content} + ); + die "Failed to parse server requested programs: $response->{content}" + if !scalar keys %$items; + + my $versions = get_versions( + items => $items, + instances => $instances, + ); + die "Failed to get any program versions; should have at least gotten Perl" + if !scalar keys %$versions; + + my $client_content = encode_client_response( + items => $items, + versions => $versions, + general_id => get_uuid(), + ); + + my $client_response = { + headers => { "X-Percona-Toolkit-Tool" => File::Basename::basename($0) }, + content => $client_content, + }; + PTDEBUG && _d('Client response:', Dumper($client_response)); + + $response = $ua->request('POST', $url, $client_response); + PTDEBUG && _d('Server suggestions:', Dumper($response)); + die "No response from POST $url $client_response" + if !$response; + die "POST $url returned HTTP status $response->{status}; expected 200" + if $response->{status} != 200; + + return unless $response->{content}; + + $items = parse_server_response( + response => $response->{content}, + split_vars => 0, + ); + die "Failed to parse server suggestions: $response->{content}" + if !scalar keys %$items; + my @suggestions = map { $_->{vars} } + sort { $a->{item} cmp $b->{item} } + values %$items; + + return \@suggestions; +} + +sub encode_client_response { + my (%args) = @_; + my @required_args = qw(items versions general_id); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items, $versions, $general_id) = @args{@required_args}; + + my @lines; + foreach my $item ( sort keys %$items ) { + next unless exists $versions->{$item}; + if ( ref($versions->{$item}) eq 'HASH' ) { + my $mysql_versions = $versions->{$item}; + for my $id ( sort keys %$mysql_versions ) { + push @lines, join(';', $id, $item, $mysql_versions->{$id}); + } + } + else { + push @lines, join(';', $general_id, $item, $versions->{$item}); + } + } + + my $client_response = join("\n", @lines) . "\n"; + return $client_response; +} + +sub parse_server_response { + my (%args) = @_; + my @required_args = qw(response); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($response) = @args{@required_args}; + + my %items = map { + my ($item, $type, $vars) = split(";", $_); + if ( !defined $args{split_vars} || $args{split_vars} ) { + $vars = [ split(",", ($vars || '')) ]; + } + $item => { + item => $item, + type => $type, + vars => $vars, + }; + } split("\n", $response); + + PTDEBUG && _d('Items:', Dumper(\%items)); + + return \%items; +} + +my %sub_for_type = ( + os_version => \&get_os_version, + perl_version => \&get_perl_version, + perl_module_version => \&get_perl_module_version, + mysql_variable => \&get_mysql_variable, +); + +sub valid_item { + my ($item) = @_; + return unless $item; + if ( !exists $sub_for_type{ $item->{type} } ) { + PTDEBUG && _d('Invalid type:', $item->{type}); + return 0; + } + return 1; +} + +sub get_versions { + my (%args) = @_; + my @required_args = qw(items); + foreach my $arg ( @required_args ) { + die "I need a $arg arugment" unless $args{$arg}; + } + my ($items) = @args{@required_args}; + + my %versions; + foreach my $item ( values %$items ) { + next unless valid_item($item); + eval { + my $version = $sub_for_type{ $item->{type} }->( + item => $item, + instances => $args{instances}, + ); + if ( $version ) { + chomp $version unless ref($version); + $versions{$item->{item}} = $version; + } + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d('Error getting version for', Dumper($item), $EVAL_ERROR); + } + } + + return \%versions; +} + + +sub get_os_version { + if ( $OSNAME eq 'MSWin32' ) { + require Win32; + return Win32::GetOSDisplayName(); + } + + chomp(my $platform = `uname -s`); + PTDEBUG && _d('platform:', $platform); + return $OSNAME unless $platform; + + chomp(my $lsb_release + = `which lsb_release 2>/dev/null | awk '{print \$1}'` || ''); + PTDEBUG && _d('lsb_release:', $lsb_release); + + my $release = ""; + + if ( $platform eq 'Linux' ) { + if ( -f "/etc/fedora-release" ) { + $release = `cat /etc/fedora-release`; + } + elsif ( -f "/etc/redhat-release" ) { + $release = `cat /etc/redhat-release`; + } + elsif ( -f "/etc/system-release" ) { + $release = `cat /etc/system-release`; + } + elsif ( $lsb_release ) { + $release = `$lsb_release -ds`; + } + elsif ( -f "/etc/lsb-release" ) { + $release = `grep DISTRIB_DESCRIPTION /etc/lsb-release`; + $release =~ s/^\w+="([^"]+)".+/$1/; + } + elsif ( -f "/etc/debian_version" ) { + chomp(my $rel = `cat /etc/debian_version`); + $release = "Debian $rel"; + if ( -f "/etc/apt/sources.list" ) { + chomp(my $code_name = `awk '/^deb/ {print \$3}' /etc/apt/sources.list | awk -F/ '{print \$1}'| awk 'BEGIN {FS="|"} {print \$1}' | sort | uniq -c | sort -rn | head -n1 | awk '{print \$2}'`); + $release .= " ($code_name)" if $code_name; + } + } + elsif ( -f "/etc/os-release" ) { # openSUSE + chomp($release = `grep PRETTY_NAME /etc/os-release`); + $release =~ s/^PRETTY_NAME="(.+)"$/$1/; + } + elsif ( `ls /etc/*release 2>/dev/null` ) { + if ( `grep DISTRIB_DESCRIPTION /etc/*release 2>/dev/null` ) { + $release = `grep DISTRIB_DESCRIPTION /etc/*release | head -n1`; + } + else { + $release = `cat /etc/*release | head -n1`; + } + } + } + elsif ( $platform =~ m/(?:BSD|^Darwin)$/ ) { + my $rel = `uname -r`; + $release = "$platform $rel"; + } + elsif ( $platform eq "SunOS" ) { + my $rel = `head -n1 /etc/release` || `uname -r`; + $release = "$platform $rel"; + } + + if ( !$release ) { + PTDEBUG && _d('Failed to get the release, using platform'); + $release = $platform; + } + chomp($release); + + $release =~ s/^"|"$//g; + + PTDEBUG && _d('OS version =', $release); + return $release; +} + +sub get_perl_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $version = sprintf '%vd', $PERL_VERSION; + PTDEBUG && _d('Perl version', $version); + return $version; +} + +sub get_perl_module_version { + my (%args) = @_; + my $item = $args{item}; + return unless $item; + + my $var = '$' . $item->{item} . '::VERSION'; + my $version = eval "use $item->{item}; $var;"; + PTDEBUG && _d('Perl version for', $var, '=', $version); + return $version; +} + +sub get_mysql_variable { + return get_from_mysql( + show => 'VARIABLES', + @_, + ); +} + +sub get_from_mysql { + my (%args) = @_; + my $show = $args{show}; + my $item = $args{item}; + my $instances = $args{instances}; + return unless $show && $item; + + if ( !$instances || !@$instances ) { + PTDEBUG && _d('Cannot check', $item, + 'because there are no MySQL instances'); + return; + } + + if ($item->{item} eq 'MySQL' && $item->{type} eq 'mysql_variable') { + @{$item->{vars}} = grep { $_ eq 'version' || $_ eq 'version_comment' } @{$item->{vars}}; + } + + + my @versions; + my %version_for; + foreach my $instance ( @$instances ) { + next unless $instance->{id}; # special system instance has id=0 + my $dbh = $instance->{dbh}; + local $dbh->{FetchHashKeyName} = 'NAME_lc'; + my $sql = qq/SHOW $show/; + PTDEBUG && _d($sql); + my $rows = $dbh->selectall_hashref($sql, 'variable_name'); + + my @versions; + foreach my $var ( @{$item->{vars}} ) { + $var = lc($var); + my $version = $rows->{$var}->{value}; + PTDEBUG && _d('MySQL version for', $item->{item}, '=', $version, + 'on', $instance->{name}); + push @versions, $version; + } + $version_for{ $instance->{id} } = join(' ', @versions); + } + + return \%version_for; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +1; +} +# ########################################################################### +# End VersionCheck package +# ########################################################################### + +# ########################################################################### +# This is a combination of modules and programs in one -- a runnable module. +# http://www.perl.com/pub/a/2006/07/13/lightning-articles.html?page=last +# Or, look it up in the Camel book on pages 642 and 643 in the 3rd edition. +# +# Check at the end of this package for the call to main() which actually runs +# the program. +# ########################################################################### +package pt_table_sync; + +use English qw(-no_match_vars); +use List::Util qw(sum max min); +use POSIX qw(ceil); +use Data::Dumper; + +Transformers->import(qw(time_to_secs any_unix_timestamp)); + +use Percona::Toolkit; +use constant PTDEBUG => $ENV{PTDEBUG} || 0; + +$OUTPUT_AUTOFLUSH = 1; + +my %dsn_for; +my $q = new Quoter(); + +sub main { + local @ARGV = @_; # set global ARGV for this package + + # Reset global vars else tests will have weird results. + %dsn_for = (); + + # ######################################################################## + # Get configuration information. + # ######################################################################## + my $o = new OptionParser(); + $o->get_specs(); + $o->get_opts(); + + my $dp = $o->DSNParser(); + $dp->prop('set-vars', $o->set_vars()); + + if ( $o->get('replicate') || $o->get('sync-to-master') ) { + $o->set('wait', 60) unless $o->got('wait'); + } + if ( $o->get('wait') ) { + $o->set('lock', 1) unless $o->got('lock'); + } + if ( $o->get('dry-run') ) { + $o->set('verbose', 1); + } + + # There's a conflict of interests: we added 't' and 'D' parts to dp, + # and there are -t and -D options (--tables, --databases), so parse_options() + # is going to return a DSN with the default values from -t and -D, + # but these are not actually be default dsn vals, they're filters. + # So we have to remove them from $dsn_defaults. + my $dsn_defaults = $dp->parse_options($o); + $dsn_defaults->{D} = undef; + $dsn_defaults->{t} = undef; + + my @dsns; + while ( my $arg = shift(@ARGV) ) { + my $dsn = $dp->parse($arg, $dsns[0], $dsn_defaults); + die "You specified a t part, but not a D part in $arg" + if ($dsn->{t} && !$dsn->{D}); + if ( $dsn->{D} && !$dsn->{t} ) { + die "You specified a database but not a table in $arg. Are you " + . "trying to sync only tables in the '$dsn->{D}' database? " + . "If so, use '--databases $dsn->{D}' instead.\n"; + } + push @dsns, $dsn; + } + + if ( !@dsns + || (@dsns ==1 && !$o->get('replicate') && !$o->get('sync-to-master'))) { + $o->save_error('At least one DSN is required, and at least two are ' + . 'required unless --sync-to-master or --replicate is specified'); + } + + if ( @dsns > 1 && $o->get('sync-to-master') && $o->get('replicate') ) { + $o->save_error('--sync-to-master and --replicate require only one DSN ', + ' but ', scalar @dsns, ' where given'); + } + + if ( $o->get('lock-and-rename') ) { + if ( @dsns != 2 || !$dsns[0]->{t} || !$dsns[1]->{t} ) { + $o->save_error("--lock-and-rename requires exactly two DSNs and they " + . "must each specify a table."); + } + } + + if ( $o->get('bidirectional') ) { + if ( $o->get('replicate') || $o->get('sync-to-master') ) { + $o->save_error('--bidirectional does not work with ' + . '--replicate or --sync-to-master'); + } + if ( @dsns < 2 ) { + $o->save_error('--bidirectional requires at least two DSNs'); + } + if ( !$o->get('conflict-column') || !$o->get('conflict-comparison') ) { + $o->save_error('--bidirectional requires --conflict-column ' + . 'and --conflict-comparison'); + } + my $cc = $o->get('conflict-comparison'); + my $cmp = $o->read_para_after(__FILE__, qr/MAGIC_comparisons/); + $cmp =~ s/ //g; + if ( $cc && $cc !~ m/$cmp/ ) { + $o->save_error("--conflict-comparison must be one of $cmp"); + } + if ( $cc && $cc =~ m/equals|matches/ && !$o->get('conflict-value') ) { + $o->save_error("--conflict-comparison $cc requires --conflict-value") + } + + # Override --algorithms becuase only TableSyncChunk works with + # bidirectional syncing. + $o->set('algorithms', 'Chunk'); + $o->set('buffer-to-client', 0); + } + + if ( $o->get('explain-hosts') ) { + foreach my $host ( @dsns ) { + print "# DSN: ", $dp->as_string($host), "\n"; + } + return 0; + } + + eval { + MasterSlave::check_recursion_method($o->get('recursion-method')); + }; + if ( $EVAL_ERROR ) { + $o->save_error("Invalid --recursion-method: $EVAL_ERROR") + } + + $o->usage_or_errors(); + + # ######################################################################## + # If --pid, check it first since we'll die if it already exits. + # ######################################################################## + my $daemon; + if ( $o->get('pid') ) { + # We're not daemoninzing, it just handles PID stuff. Keep $daemon + # in the the scope of main() because when it's destroyed it automatically + # removes the PID file. + $daemon = new Daemon(o=>$o); + $daemon->make_PID_file(); + } + + # ######################################################################## + # Do the work. + # ######################################################################## + my $tp = new TableParser( Quoter => $q ); + my $ms = new MasterSlave(OptionParser=>$o,DSNParser=>$dp,Quoter=>$q, channel=>$o->get('channel')); + my $rt = new Retry(); + my $chunker = new TableChunker( Quoter => $q, TableParser => $tp ); + my $nibbler = new TableNibbler( Quoter => $q, TableParser => $tp ); + my $checksum = new TableChecksum( Quoter => $q ); + my $syncer = new TableSyncer( + Quoter => $q, + MasterSlave => $ms, + TableChecksum => $checksum, + DSNParser => $dp, + Retry => $rt, + ); + my %modules = ( + OptionParser => $o, + DSNParser => $dp, + TableParser => $tp, + Quoter => $q, + TableChunker => $chunker, + TableNibbler => $nibbler, + TableChecksum => $checksum, + MasterSlave => $ms, + TableSyncer => $syncer, + ); + + # Create the sync plugins. + my $plugins = []; + my %have_plugin = get_plugins(); + foreach my $algo ( split(',', $o->get('algorithms')) ) { + my $plugin_name = $have_plugin{lc $algo}; + if ( !$plugin_name ) { + die "The $algo algorithm is not available. Available algorithms: " + . join(", ", sort keys %have_plugin); + } + PTDEBUG && _d('Loading', $plugin_name); + my $plugin; + eval { + $plugin = $plugin_name->new(%modules); + }; + die "Error loading $plugin_name for $algo algorithm: $EVAL_ERROR" + if $EVAL_ERROR; + push @$plugins, $plugin; + } + + # Create callbacks for bidirectional syncing. Currently, this only + # works with TableSyncChunk, so that should be the only plugin because + # --algorithms was overriden earlier. + if ( $o->get('bidirectional') ) { + set_bidirectional_callbacks( + plugin => $plugins->[0], + %modules, + ); + } + + my $exit_status = 0; # 1: internal error, 2: tables differed, 3: both + + # dsn[0] is expected to be the master (i.e. the source). So if + # --sync-to-master, then dsn[0] is a slave. Find its master and + # make the master dsn[0] and the slave dsn[1]. + if ( $o->get('sync-to-master') ) { + PTDEBUG && _d('Getting master of', $dp->as_string($dsns[0])); + $dsns[0]->{dbh} = get_cxn($dsns[0], %modules); + my $master = $ms->get_master_dsn($dsns[0]->{dbh}, $dsns[0], $dp) + or die "Can't determine master of " . $dp->as_string($dsns[0]); + unshift @dsns, $master; # dsn[0]=master, dsn[1]=slave + $dsns[0]->{dbh} = get_cxn($dsns[0], %modules); + if ( $o->get('check-master') ) { + $ms->is_master_of($dsns[0]->{dbh}, $dsns[1]->{dbh}); + } + } + + # ######################################################################## + # Do the version-check + # ######################################################################## + # This tool has way too many dbhs and doesn't use Cxn, so we have to + # manually disconnect them else they'll throw a warning. Also, it + # creates some dbh late, so here we need to create a dbh and then + # disconnect it only if we created it, i.e. don't disconnect the few + # dbh created early by the tool. + if ( $o->get('version-check') && (!$o->has('quiet') || !$o->get('quiet')) ) { + my @vc_dbhs; + my @instances = map { + my $dsn = $_; + my $dbh = $dsn->{dbh}; + if ( !$dbh ) { + $dbh = get_cxn($dsn, %modules); + push @vc_dbhs, $dbh; # disconnect this dbh after version check + } + +{ dbh => $dbh, dsn => $dsn } + } @dsns; + + VersionCheck::version_check( + force => $o->got('version-check'), + instances => \@instances, + ); + + map { $_->disconnect } @vc_dbhs; + } + + # ######################################################################## + # Sync! + # ######################################################################## + my %args = ( + dsns => \@dsns, + plugins => $plugins, + %modules, + ); + + if ( $o->get('dry-run') ) { + print "# NOTE: --dry-run does not show if data needs to be synced because it\n" + . "# does not access, compare or sync data. --dry-run only shows\n" + . "# the work that would be done.\n"; + + } + + if ( $o->get('lock-and-rename') ) { + $exit_status = lock_and_rename(%args); + } + elsif ( $dsns[0]->{t} ) { + $exit_status = sync_one_table(%args); + } + elsif ( $o->get('replicate') ) { + $exit_status = sync_via_replication(%args); + } + else { + $exit_status = sync_all(%args); + } + + return $exit_status; +} + +# ############################################################################ +# Subroutines +# ############################################################################ + +# Sub: lock_and_rename +# Lock and rename a table. +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# dsns - Arrayref of DSNs +# plugins - Arrayref of TableSync* objects +# OptionParser - object +# DSNParser - object +# Quoter - object +# +# Returns: +# Exit status +sub lock_and_rename { + my ( %args ) = @_; + my @required_args = qw(dsns plugins OptionParser DSNParser Quoter ); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $dsns = $args{dsns}; + my $o = $args{OptionParser}; + my $dp = $args{DSNParser}; + my $q = $args{Quoter}; + + PTDEBUG && _d('Locking and syncing ONE TABLE with rename'); + my $src = { + dsn => $dsns->[0], + dbh => $dsns->[0]->{dbh} || get_cxn($dsns->[0], %args), + misc_dbh => get_cxn($dsns->[0], %args), + db => $dsns->[0]->{D}, + tbl => $dsns->[0]->{t}, + }; + my $dst = { + dsn => $dsns->[1], + dbh => $dsns->[1]->{dbh} || get_cxn($dsns->[1], %args), + misc_dbh => get_cxn($dsns->[1], %args), + db => $dsns->[1]->{D}, + tbl => $dsns->[1]->{t}, + }; + + my %options = ( DSNParser => $dp, OptionParser => $o ); + if ( grep { VersionParser->new($_->{dbh}) < '5.5' } $src, $dst ) { + disconnect($src, $dst); + die "--lock-and-rename requires MySQL 5.5 or later"; + } + + if ( $o->get('verbose') ) { + print_header("# Lock and rename " . $dp->as_string($src->{dsn})); + } + + # We don't use lock_server() here because it does the usual stuff wrt + # waiting for slaves to catch up to master, etc, etc. + my $src_db_tbl = $q->quote($src->{db}, $src->{tbl}); + my $dst_db_tbl = $q->quote($dst->{db}, $dst->{tbl}); + my $tmp_db_tbl = $q->quote($src->{db}, $src->{tbl} . "_tmp_$PID"); + my $sql = "LOCK TABLES $src_db_tbl WRITE"; + PTDEBUG && _d($sql); + $src->{dbh}->do($sql); + $sql = "LOCK TABLES $dst_db_tbl WRITE"; + PTDEBUG && _d($sql); + $dst->{dbh}->do($sql); + + my $exit_status = sync_a_table( + src => $src, + dst => $dst, + %args, + ); + + # Now rename the tables to swap them. + $sql = "ALTER TABLE $src_db_tbl RENAME $tmp_db_tbl"; + PTDEBUG && _d($sql); + $src->{dbh}->do($sql); + $sql = "ALTER TABLE $dst_db_tbl RENAME $src_db_tbl"; + PTDEBUG && _d($sql); + $dst->{dbh}->do($sql); + $sql = "UNLOCK TABLES"; + PTDEBUG && _d($sql); + $src->{dbh}->do($sql); + $sql = "ALTER TABLE $tmp_db_tbl RENAME $dst_db_tbl"; + PTDEBUG && _d($sql); + $src->{dbh}->do($sql); + + unlock_server(src => $src, dst => $dst, %args); + + disconnect($src, $dst); + return $exit_status; +} + +# Sub: sync_one_table +# Sync one table between one source host and multiple destination hosts. +# The first DSN in $args{dsns} specifies the source host, database (D), +# and table (t). The other DSNs are the destination hosts. If a destination +# DSN does not specify a database or table, the source database or table +# are used as defaults. Else, the destination-specific database or table +# are used. This allows you to sync tables with different names. +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# dsns - Arrayref of DSNs +# plugins - Arrayref of TableSync* objects +# OptionParser - object +# DSNParser - object +# Quoter - object +# +# Returns: +# Exit status +sub sync_one_table { + my ( %args ) = @_; + my @required_args = qw(dsns plugins OptionParser DSNParser Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my @dsns = @{$args{dsns}}; + my $o = $args{OptionParser}; + my $dp = $args{DSNParser}; + + PTDEBUG && _d('DSN has t part; syncing ONE TABLE between servers'); + my $src = { + dsn => $dsns[0], + dbh => $dsns[0]->{dbh} || get_cxn($dsns[0], %args), + misc_dbh => get_cxn($dsns[0], %args), + db => $dsns[0]->{D}, + tbl => $dsns[0]->{t}, + }; + + my $exit_status = 0; + foreach my $dsn ( @dsns[1 .. $#dsns] ) { + my $dst = { + dsn => $dsn, + dbh => $dsn->{dbh} || get_cxn($dsn, %args), + misc_dbh => get_cxn($dsn, %args), + db => $dsn->{D} || $src->{db}, + tbl => $dsn->{t} || $src->{tbl}, + }; + + if ( $o->get('verbose') ) { + print_header("# Syncing " . $dp->as_string($dsn) + . ($o->get('dry-run') + ? ' in dry-run mode, without accessing or comparing data' + : '')); + } + + lock_server(src => $src, dst => $dst, %args); + + $exit_status |= sync_a_table( + src => $src, + dst => $dst, + %args, + ); + + unlock_server(src => $src, dst => $dst, %args); + disconnect($dst); + } + + disconnect($src); + return $exit_status; +} + +# Sub: sync_via_replication +# Sync multiple destination hosts to one source host via replication. +# The first DSN in $args{dsns} specifies the source host. +# If --sync-to-master is specified, then the source host is a master +# and there is only one destination host which is its slave. +# Else, destination hosts are auto-discovered with +# . +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# dsns - Arrayref of DSNs +# plugins - Arrayref of TableSync* objects +# OptionParser - object +# DSNParser - object +# Quoter - object +# TableChecksum - object +# MasterSlave - object +# +# Returns: +# Exit status +# +# See Also: +# +sub sync_via_replication { + my ( %args ) = @_; + my @required_args = qw(dsns plugins OptionParser DSNParser Quoter + TableChecksum MasterSlave); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $dsns = $args{dsns}; + my $o = $args{OptionParser}; + my $dp = $args{DSNParser}; + my $q = $args{Quoter}; + my $checksum = $args{TableChecksum}; + my $ms = $args{MasterSlave}; + + PTDEBUG && _d('Syncing via replication'); + my $src = { + dsn => $dsns->[0], + dbh => $dsns->[0]->{dbh} || get_cxn($dsns->[0], %args), + misc_dbh => get_cxn($dsns->[0], %args), + db => undef, # set later + tbl => undef, # set later + }; + + # Used to filter which tables are synced. + # https://bugs.launchpad.net/percona-toolkit/+bug/1002365 + my $schema_iter = new SchemaIterator( + dbh => $src->{dbh}, + OptionParser => $o, + TableParser => $args{TableParser}, + Quoter => $args{Quoter}, + ); + + my %skip_table; + my $exit_status = 0; + + # Connect to the master and treat it as the source, then find + # differences on the slave and sync them. + if ( $o->get('sync-to-master') ) { + my $dst = { + dsn => $dsns->[1], + dbh => $dsns->[1]->{dbh} || get_cxn($dsns->[1], %args), + misc_dbh => get_cxn($dsns->[1], %args), + db => undef, # set later + tbl => undef, # set later + }; + + # First, check that the master (source) has no discrepancies itself, + # and ignore tables that do. + my $src_diffs = $checksum->find_replication_differences( + $src->{dbh}, $o->get('replicate')); + map { $skip_table{lc $_->{db}}->{lc $_->{tbl}}++ } @$src_diffs; + + # Now check the slave for differences and sync them if necessary. + my $dst_diffs = $checksum->find_replication_differences( + $dst->{dbh}, $o->get('replicate')); + my $diffs = filter_diffs( + diffs => $dst_diffs, + SchemaIterator => $schema_iter, + skip_table => \%skip_table, + ); + + if ( $o->get('verbose') ) { + print_header("# Syncing via replication " .$dp->as_string($dst->{dsn}) + . ($o->get('dry-run') ? + ' in dry-run mode, without accessing or comparing data' : '')); + } + + if ( $diffs && scalar @$diffs ) { + lock_server(src => $src, dst => $dst, %args); + + foreach my $diff ( @$diffs ) { + # Clear the tbl_struct if this is a new table. The tbl_struct + # is fetched and parsed in ok_to_sync() if not set. We only + # need to set it once per table to avoid doing this for every + # diff in the same table. + # https://bugs.launchpad.net/percona-toolkit/+bug/1003014 + if ( ($src->{db} || '') ne $diff->{db} + || ($src->{tbl} || '') ne $diff->{tbl} ) { + PTDEBUG && _d('New table:', $diff->{db}, $diff->{tbl}); + $src->{tbl_struct} = undef; + } + $src->{db} = $dst->{db} = $diff->{db}; + $src->{tbl} = $dst->{tbl} = $diff->{tbl}; + + $exit_status |= sync_a_table( + src => $src, + dst => $dst, + where => 1, # prevents --where from being used + diff => $diff, + %args, + ); + } + + unlock_server(src => $src, dst => $dst, %args); + } + else { + PTDEBUG && _d('No checksum differences'); + } + + disconnect($dst); + } # sync-to-master + + # The DSN is the master. Connect to each slave, find differences, + # then sync them. + else { + $ms->recurse_to_slaves( + { dbh => $src->{dbh}, + dsn => $src->{dsn}, + recurse => 1, + callback => sub { + my ( $dsn, $dbh, $level, $parent ) = @_; + my $all_diffs = $checksum->find_replication_differences( + $dbh, $o->get('replicate')); + if ( !$level ) { + # This is the master; don't sync any tables that are wrong + # here, for obvious reasons. + map { $skip_table{lc $_->{db}}->{lc $_->{tbl}}++ } + @$all_diffs; + } + else { + # This is a slave. + my $diffs = filter_diffs( + diffs => $all_diffs, + SchemaIterator => $schema_iter, + skip_table => \%skip_table, + ); + + if ( $o->get('verbose') ) { + print_header("# Syncing via replication " + . $dp->as_string($dsn) + . ($o->get('dry-run') + ? ' in dry-run mode, without ' + . 'accessing or comparing data' + : '')); + } + + if ( $diffs && scalar @$diffs ) { + my $dst = { + dsn => $dsn, + dbh => $dbh, + misc_dbh => get_cxn($dsn, %args), + db => undef, # set later + tbl => undef, # set later + }; + + lock_server(src => $src, dst => $dst, %args); + + foreach my $diff ( @$diffs ) { + # Clear the tbl_struct if this is a new table. + # See the same code block above. + if ( ($src->{db} || '') ne $diff->{db} + || ($src->{tbl} || '') ne $diff->{tbl} ) { + PTDEBUG && _d('New table:', + $diff->{db}, $diff->{tbl}); + $src->{tbl_struct} = undef; + } + $src->{db} = $dst->{db} = $diff->{db}; + $src->{tbl} = $dst->{tbl} = $diff->{tbl}; + + $exit_status |= sync_a_table( + src => $src, + dst => $dst, + where => 1, # prevents --where from being used + diff => $diff, + %args, + ); + } + + unlock_server(src => $src, dst => $dst, %args); + disconnect($dst); + } + else { + PTDEBUG && _d('No checksum differences'); + } + } # this is a slave + + return; + }, # recurse_to_slaves() callback + }, + ); + } # DSN is master + + disconnect($src); + return $exit_status; +} + +# Sub: sync_all +# Sync every table between one source host and multiple destination hosts. +# The first DSN in $args{dsns} specifies the source host. The other DSNs +# are the destination hosts. Unlike , the database and +# table names must be the same on the source and destination hosts. +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# dsns - Arrayref of DSNs +# plugins - Arrayref of TableSync* objects +# OptionParser - object +# DSNParser - object +# Quoter - object +# TableParser - object +# +# Returns: +# Exit status +sub sync_all { + my ( %args ) = @_; + my @required_args = qw(dsns plugins OptionParser DSNParser Quoter + TableParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my @dsns = @{$args{dsns}}; + my $o = $args{OptionParser}; + my $dp = $args{DSNParser}; + + PTDEBUG && _d('Syncing all dbs and tbls'); + my $src = { + dsn => $dsns[0], + dbh => $dsns[0]->{dbh} || get_cxn($dsns[0], %args), + misc_dbh => get_cxn($dsns[0], %args), + db => undef, # set later + tbl => undef, # set later + }; + + my $schema_iter = new SchemaIterator( + dbh => $src->{dbh}, + OptionParser => $o, + TableParser => $args{TableParser}, + Quoter => $args{Quoter}, + ); + + # Make a list of all dbs.tbls on the source. It's more efficient this + # way because it avoids open/closing a dbh for each tbl and dsn, unless + # we pre-opened the dsn. It would also cause confusing verbose output. + my @dbs_tbls; + while ( my $tbl = $schema_iter->next() ) { + PTDEBUG && _d('Got table', $tbl->{db}, $tbl->{tbl}); + push @dbs_tbls, $tbl; + } + + my $exit_status = 0; + foreach my $dsn ( @dsns[1 .. $#dsns] ) { + if ( $o->get('verbose') ) { + print_header("# Syncing " . $dp->as_string($dsn) + . ($o->get('dry-run') + ? ' in dry-run mode, without accessing or comparing data' : '')); + } + + my $dst = { + dsn => $dsn, + dbh => $dsn->{dbh} || get_cxn($dsn, %args), + misc_dbh => get_cxn($dsn, %args), + db => undef, # set later + tbl => undef, # set later + }; + + lock_server(src => $src, dst => $dst, %args); + + foreach my $db_tbl ( @dbs_tbls ) { + $src->{tbl_struct} = $db_tbl->{tbl_struct}; + $src->{db} = $dst->{db} = $db_tbl->{db}; + $src->{tbl} = $dst->{tbl} = $db_tbl->{tbl}; + + $exit_status |= sync_a_table( + src => $src, + dst => $dst, + %args, + ); + } + + unlock_server(src => $src, dst => $dst, %args); + disconnect($dst); + } + + disconnect($src); + return $exit_status; +} + +# Sub: lock_server +# Lock a host with FLUSH TABLES WITH READ LOCK. This implements +# --lock 3 by calling . +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# src - Hashref with source host information +# dst - Hashref with destination host information +# OptionParser - object +# DSNParser - object +# TableSyncer - object +sub lock_server { + my ( %args ) = @_; + foreach my $arg ( qw(src dst OptionParser DSNParser TableSyncer) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $o = $args{OptionParser}; + + return unless $o->get('lock') && $o->get('lock') == 3; + + eval { + $args{TableSyncer}->lock_and_wait( + %args, + lock => 3, + lock_level => 3, + replicate => $o->get('replicate'), + timeout_ok => $o->get('timeout-ok'), + transaction => $o->get('transaction'), + wait => $o->get('wait'), + ); + }; + if ( $EVAL_ERROR ) { + die "Failed to lock server: $EVAL_ERROR"; + } + return; +} + +# Sub: unlock_server +# Unlock a host with UNLOCK TABLES. This implements +# --lock 3 by calling . +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# src - Hashref with source host information +# dst - Hashref with destination host information +# OptionParser - object +# DSNParser - object +# TableSyncer - object +sub unlock_server { + my ( %args ) = @_; + my @required_args = qw(src dst OptionParser DSNParser TableSyncer); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($src, $dst, $o) = @args{@required_args}; + + return unless $o->get('lock') && $o->get('lock') == 3; + + eval { + # Open connections as needed. + $src->{dbh} ||= get_cxn($src->{dsn}, %args); + $dst->{dbh} ||= get_cxn($dst->{dsn}, %args); + $src->{misc_dbh} ||= get_cxn($src->{dsn}, %args); + $args{TableSyncer}->unlock( + src_dbh => $src->{dbh}, + src_db => '', + src_tbl => '', + dst_dbh => $dst->{dbh}, + dst_db => '', + dst_tbl => '', + misc_dbh => $src->{misc_dbh}, + replicate => $o->get('replicate') || 0, + timeout_ok => $o->get('timeout-ok') || 0, + transaction => $o->get('transaction') || 0, + wait => $o->get('wait') || 0, + lock => 3, + lock_level => 3, + ); + }; + if ( $EVAL_ERROR ) { + die "Failed to unlock server: $EVAL_ERROR"; + } + return; +} + +# Sub: sync_a_table +# Sync the destination host table to the source host table. This sub +# is not called directly but indirectly via the other sync_* subs. +# In turn, this sub calls which actually +# does the sync work. Calling sync_table() requires a fair amount of +# prep work that this sub does/simplifies. New and +# objects are created, so those packages need to be available. +# +# Parameters: +# $args - Arguments +# +# Required Arguments: +# src - Hashref with source host information +# dst - Hashref with destination host information +# plugins - Arrayref of TableSync* objects +# OptionParser - object +# Quoter - object +# TableParser - object +# TableSyncer - object +# +# Returns: +# Exit status +sub sync_a_table { + my ( %args ) = @_; + my @required_args = qw(src dst plugins OptionParser Quoter TableParser + TableSyncer); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($src, $dst, undef, $o, $q, $tp, $syncer) = @args{@required_args}; + + my ($start_ts, $end_ts); + my $exit_status = 0; + my %status; + eval { + $start_ts = get_server_time($src->{dbh}) if $o->get('verbose'); + + # This will either die if there's a problem or return the tbl struct. + ok_to_sync($src, $dst, %args); + my $tbl_struct = $src->{tbl_struct}; + + if ( my $diff = $args{diff} ) { + PTDEBUG && _d('Converting checksum diff to WHERE:', Dumper($diff)); + $args{where} = diff_where( + %args, + tbl_struct => $tbl_struct, + ); + } + + # If the table is InnoDB, prefer to sync it with transactions, unless + # the user explicitly said not to. + my $use_txn = $o->got('transaction') ? $o->get('transaction') + : $tbl_struct->{engine} eq 'InnoDB' ? 1 + : 0; + + if ($tbl_struct->{charset}) { + PTDEBUG && _d("Detected table's character set: $tbl_struct->{charset}"); + PTDEBUG && _d("Executing: SET NAMES '$tbl_struct->{charset}'"); + $src->{dbh}->do("SET NAMES '$tbl_struct->{charset}'"); + $src->{misc_dbh}->do("SET NAMES '$tbl_struct->{charset}'"); + $dst->{dbh}->do("SET NAMES '$tbl_struct->{charset}'"); + $dst->{misc_dbh}->do("SET NAMES '$tbl_struct->{charset}'"); + } else { + PTDEBUG && _d("Cannot get the default character set for the table"); + } + # Turn off AutoCommit if we're using transactions. + $src->{dbh}->{AutoCommit} = !$use_txn; + $src->{misc_dbh}->{AutoCommit} = !$use_txn; + $dst->{dbh}->{AutoCommit} = !$use_txn; + $dst->{misc_dbh}->{AutoCommit} = !$use_txn; + + # Determine which columns to compare. + my $ignore_columns = $o->get('ignore-columns'); + my @compare_columns = grep { + !$ignore_columns->{lc $_}; + } @{$o->get('columns') || $tbl_struct->{cols}}; + + # Make sure conflict col is in compare cols else conflicting + # rows won't have the col for --conflict-comparison. + if ( my $conflict_col = $o->get('conflict-column') ) { + push @compare_columns, $conflict_col + unless grep { $_ eq $conflict_col } @compare_columns; + } + + # --print --verbose --verbose is the magic formula for having + # all src/dst sql printed so we can see the chunk/row sql. + my $callback; + if ( $o->get('print') && $o->get('verbose') >= 2 ) { + $callback = \&print_sql; + } + + # get_change_dbh() may die if, for example, the destination is + # not a slave. Perhaps its work should be part of can_sync()? + my $change_dbh = get_change_dbh(tbl_struct => $tbl_struct, %args); + my $actions = make_action_subs(change_dbh => $change_dbh, %args); + + my $rd = new RowDiff(dbh => $src->{misc_dbh}); + my $ch = new ChangeHandler( + left_db => $src->{db}, + left_tbl => $src->{tbl}, + right_db => $dst->{db}, + right_tbl => $dst->{tbl}, + tbl_struct => $tbl_struct, + hex_blob => $o->get('hex-blob'), + queue => $o->get('buffer-to-client') ? 1 : 0, + replace => $o->get('replace') + || $o->get('replicate') + || $o->get('sync-to-master') + || 0, + actions => $actions, + Quoter => $args{Quoter}, + ); + + %status = $syncer->sync_table( + %args, + tbl_struct => $tbl_struct, + cols => \@compare_columns, + chunk_size => $o->get('chunk-size'), + RowDiff => $rd, + ChangeHandler => $ch, + transaction => $use_txn, + callback => $callback, + where => $args{where} || $o->get('where'), + bidirectional => $o->get('bidirectional'), + buffer_in_mysql => $o->get('buffer-in-mysql'), + buffer_to_client => $o->get('buffer-to-client'), + changing_src => $o->get('replicate') + || $o->get('sync-to-master') + || $o->get('bidirectional') + || 0, + float_precision => $o->get('float-precision'), + index_hint => $o->get('index-hint'), + chunk_index => $o->get('chunk-index'), + chunk_col => $o->get('chunk-column'), + zero_chunk => $o->get('zero-chunk'), + lock => $o->get('lock'), + replace => $o->get('replace'), + replicate => $o->get('replicate'), + dry_run => $o->get('dry-run'), + timeout_ok => $o->get('timeout-ok'), + trim => $o->get('trim'), + wait => $o->get('wait'), + function => $o->get('function'), + trace => !$ENV{PT_TEST_NO_TRACE}, + ); + + if ( sum(@status{@ChangeHandler::ACTIONS}) ) { + $exit_status |= 2; + } + }; + + if ( $EVAL_ERROR ) { + print_err($EVAL_ERROR, $dst->{db}, $dst->{tbl}, $dst->{dsn}->{h}); + $exit_status |= 1; + } + + # Print this last so that the exit status is its final result. + if ( $o->get('verbose') ) { + $end_ts = get_server_time($src->{dbh}) || ""; + print_results( + map { $_ || '0' } @status{@ChangeHandler::ACTIONS, 'ALGORITHM'}, + $start_ts, $end_ts, + $exit_status, $src->{db}, $src->{tbl}); + } + + return $exit_status; +} + +# Sub: get_change_dbh +# Return the dbh to write to for syncing changes. Write statements +# are executed on the "change dbh". If --sync-to-master or --replicate +# is specified, the source (master) dbh is the "change dbh". This means +# changes replicate to all slaves. Else, the destination dbh is the +# change dbh. This is the case when two independent servers (or perhaps +# one table on the same server) are synced. This sub implements +# --[no]check-slave because writing to a slave is generally a bad thing. +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# src - Hashref with source host information +# dst - Hashref with destination host information +# tbl_struct - Hashref returned by +# OptionParser - object +# DSNParser - object +# MasterSlave - object +# +# Returns: +# Either $args{src}->{dbh} or $args{dst}->{dbh} if no checks fail. +# +# See Also: +# +sub get_change_dbh { + my ( %args ) = @_; + my @required_args = qw(src dst tbl_struct OptionParser DSNParser + MasterSlave); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($src, $dst, $tbl_struct, $o, $dp, $ms) = @args{@required_args}; + + my $change_dbh = $dst->{dbh}; # The default case: making changes on dst. + + if ( $o->get('sync-to-master') || $o->get('replicate') ) { + # Is it possible to make changes on the master (i.e. the source)? + # Only if REPLACE will work. + my $can_replace = grep { $_->{is_unique} } values %{$tbl_struct->{keys}}; + PTDEBUG && _d("This table's replace-ability:", $can_replace); + die "Can't make changes on the master because no unique index exists" + unless $can_replace; + $change_dbh = $src->{dbh}; # The alternate case. + PTDEBUG && _d('Will make changes on source', $change_dbh); + } + elsif ( $o->get('check-slave') ) { + # Is it safe to change data on the destination? Only if it's *not* + # a slave. We don't change tables on slaves directly. If we are + # forced to change data on a slave, we require either that 1) binary + # logging is disabled, or 2) the check is bypassed. By the way, just + # because the server is a slave doesn't mean it's not also the master + # of the master (master-master replication). + my $slave_status = $ms->get_slave_status($dst->{dbh}); + my (undef, $log_bin) = $dst->{dbh}->selectrow_array( + q{SHOW VARIABLES LIKE 'log_bin'}); + my ($sql_log_bin) = $dst->{dbh}->selectrow_array( + 'SELECT @@SQL_LOG_BIN'); + PTDEBUG && _d('Variables on destination:', + 'log_bin=', (defined $log_bin ? $log_bin : 'NULL'), + ' @@SQL_LOG_BIN=', (defined $sql_log_bin ? $sql_log_bin : 'NULL')); + if ( $slave_status && $sql_log_bin && ($log_bin || 'OFF') eq 'ON' ) { + die "Can't make changes on ", $dp->as_string($dst->{dsn}), + " because it's a slave. See the documentation section", + " 'REPLICATION SAFETY' for solutions to this problem."; + } + PTDEBUG && _d('Will make changes on destination', $change_dbh); + } + + return $change_dbh; +} + +# Sub: make_action_subs +# Make callbacks for actions argument. This +# sub implements --print and --execute. +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# change_dbh - dbh returned by +# OptionParser - object +# +# Returns: +# Arrayref of callbacks (coderefs) +sub make_action_subs { + my ( %args ) = @_; + my @required_args = qw(change_dbh OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($change_dbh, $o) = @args{@required_args}; + + my @actions; + if ( $o->get('execute') ) { + push @actions, sub { + my ( $sql, $dbh ) = @_; + # Use $dbh if given. It's from a bidirectional callback. + $dbh ||= $change_dbh; + PTDEBUG && _d('Execute on dbh', $dbh, $sql);; + $dbh->do($sql); + }; + } + if ( $o->get('print') ) { + # Print AFTER executing, so the print isn't misleading in case of an + # index violation etc that doesn't actually get executed. + push @actions, sub { + my ( $sql, $dbh ) = @_; + # Append /*host:port*/ to the sql, if possible, so the user + # can see on which host it was/would be ran. + my $dsn = $dsn_for{$dbh} if $dbh; + if ( $dsn ) { + my $h = $dsn->{h} || $dsn->{S} || ''; + my $p = $dsn->{P} || ''; + $sql = "/*$h" . ($p ? ":$p" : '') . "*/ $sql"; + } + print($sql, ";\n") or die "Cannot print: $OS_ERROR"; + }; + } + + return \@actions; +} + + +# Sub: print_err +# Try to extract the MySQL error message and print it. +# +# Parameters: +# $msg - Error message +# $database - Database name being synced when error occurred +# $table - Table name being synced when error occurred +# $host - Host name error occurred on +sub print_err { + my ( $msg, $database, $table, $host ) = @_; + return if !defined $msg; + $msg =~ s/^.*?failed: (.*?) at \S+ line (\d+).*$/$1 at line $2/s; + $msg =~ s/\s+/ /g; + if ( $database && $table ) { + $msg .= " while doing $database.$table"; + } + if ( $host ) { + $msg .= " on $host"; + } + print STDERR $msg, "\n"; +} + +# Sub: get_cxn +# Connect to host specified by DSN. +# +# Parameters: +# $dsn - Host DSN +# %args - Arguments +# +# Required Arguments: +# OptionaParser - object +# DSNParser - object +# +# Returns: +# dbh +sub get_cxn { + my ( $dsn, %args ) = @_; + my @required_args = qw(OptionParser DSNParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($o, $dp) = @args{@required_args}; + + if ( !$dsn->{p} && $o->get('ask-pass') ) { + # Just "F=file" is a valid DSN but fill_in_dsn() can't help us + # because we haven't connected yet. If h is not specified, + # then user is relying on F or .my.cnf/system defaults. + # http://code.google.com/p/maatkit/issues/detail?id=947 + my $host = $dsn->{h} ? $dsn->{h} + : "DSN ". $dp->as_string($dsn); + $dsn->{p} = OptionParser::prompt_noecho("Enter password for $host: "); + } + my $dbh = $dp->get_dbh( + $dp->get_cxn_params($dsn, {}) # get_cxn_params needs the 2nd arg + ); + + my $sql; + if ( !$o->get('bin-log') ) { + $sql = "/*!32316 SET SQL_LOG_BIN=0 */"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + if ( !$o->get('unique-checks') ) { + $sql = "/*!40014 SET UNIQUE_CHECKS=0 */"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + if ( !$o->get('foreign-key-checks') ) { + $sql = "/*!40014 SET FOREIGN_KEY_CHECKS=0 */"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + } + + # Disable auto-increment on zero (bug #1919897). + $sql = '/*!40101 SET @@SQL_MODE := CONCAT(@@SQL_MODE, ' + . "',NO_AUTO_VALUE_ON_ZERO')*/"; + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + + # Ensure statement-based replication. + # http://code.google.com/p/maatkit/issues/detail?id=95 + # https://bugs.launchpad.net/percona-toolkit/+bug/919352 + # The tool shouldn't blindly attempt to change binlog_format; + # instead, it should check if it's already set to STATEMENT. + # This is becase starting with MySQL 5.1.29, changing the format + # requires a SUPER user. + if ( VersionParser->new($dbh) >= '5.1.29' + && ($o->get('replicate') || $o->get('sync-to-master'))) { + $sql = 'SELECT @@binlog_format'; + PTDEBUG && _d($dbh, $sql); + my ($original_binlog_format) = $dbh->selectrow_array($sql); + PTDEBUG && _d('Original binlog_format:', $original_binlog_format); + if ( $original_binlog_format !~ /STATEMENT/i ) { + $sql = q{/*!50108 SET @@binlog_format := 'STATEMENT'*/}; + eval { + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + }; + if ( $EVAL_ERROR ) { + die "Failed to $sql: $EVAL_ERROR\n" + . "This tool requires binlog_format=STATEMENT, " + . "but the current binlog_format is set to " + ."$original_binlog_format and an error occurred while " + . "attempting to change it. If running MySQL 5.1.29 or newer, " + . "setting binlog_format requires the SUPER privilege. " + . "You will need to manually set binlog_format to 'STATEMENT' " + . "before running this tool.\n"; + } + } + } + + # Set repeatable read for both explicit and auto_commit transactions + # as lower isolation levels will not play nice with binlog_format=STATEMENT + # https://bugs.launchpad.net/percona-toolkit/+bug/869005 + $sql = "SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ"; + eval { + PTDEBUG && _d($dbh, $sql); + $dbh->do($sql); + }; + die "Failed to $sql: $EVAL_ERROR" if $EVAL_ERROR; + + $dsn_for{$dbh} = $dsn; + + PTDEBUG && _d('Opened dbh', $dbh); + return $dbh; +} + + +# Sub: ok_to_sync +# Check that the destination host table can be synced to the source host +# table. All sorts of sanity checks are performed to help ensure that +# syncing the table won't cause problems in or +# . +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# src - Hashref with source host information +# dst - Hashref with destination host information +# DSNParser - object +# Quoter - object +# TableParser - object +# TableSyncer - object +# OptionParser - object +# +# Returns: +# Table structure (from ) if ok to sync, else it dies. +sub ok_to_sync { + my ( %args ) = @_; + my @required_args = qw(src dst DSNParser Quoter TableParser + TableSyncer OptionParser); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($src, $dst, $dp, $q, $tp, $syncer, $o) = @args{@required_args}; + + if ( !$src->{tbl_struct} ) { + eval { + $src->{ddl} = $tp->get_create_table( + $src->{dbh}, $src->{db}, $src->{tbl}); + $src->{tbl_struct} = $tp->parse($src->{ddl}); + + }; + if ( $EVAL_ERROR ) { + die "Error getting table structure for $src->{db}.$src->{tbl} on " + . $dp->as_string($src->{dsn}) . "$EVAL_ERROR\nEnsure that " + . "the table exists and is accessible.\n"; + } + } + + # Check that the dst has the table. + my $dst_has_table = $tp->check_table( + dbh => $dst->{dbh}, + db => $dst->{db}, + tbl => $dst->{tbl}, + ); + + if ( lc($src->{tbl_struct}->{engine}) eq 'rocksdb' && ($o->get('sync-to-master')) ) { + print STDERR "Cannot sync using --sync-to-master with $dst->{db}.$dst->{tbl} ". + "due to the limitations of the RocksDB engine.\n\n". + "More information: https://www.percona.com/doc/percona-server/LATEST/myrocks/limitations.html\n\n"; + die "Process aborted.\n"; + } + + if ( !$dst_has_table ) { + die "Table $dst->{db}.$dst->{tbl} does not exist on " + . $dp->as_string($dst->{dsn}) . "\n"; + } + + # Check that no triggers are defined on the dst tbl. + if ( $o->get('check-triggers') ) { + PTDEBUG && _d('Checking for triggers'); + if ( !defined $dst->{supports_triggers} ) { + $dst->{supports_triggers} = VersionParser->new($dst->{dbh}) >= '5.0.2'; + } + if ( $dst->{supports_triggers} + && get_triggers($dst->{dbh}, $q, $dst->{db}, $dst->{tbl}) ) { + die "Triggers are defined on the table"; + } + else { + PTDEBUG && _d('Destination does not support triggers', + $dp->as_string($dst->{dsn})); + } + } + + my $replace = $o->get('replace') + || $o->get('replicate') + || $o->get('sync-to-master'); + if ( $replace && $o->get('execute') && $o->get('check-child-tables') ) { + my $child_tables = find_child_tables( + tbl => $src, + dbh => $src->{dbh}, + Quoter => $q, + ); + if ( $child_tables ) { + foreach my $tbl ( @$child_tables ) { + my $ddl = $tp->get_create_table( + $src->{dbh}, $tbl->{db}, $tbl->{tbl}); + if ( $ddl && $ddl =~ m/(ON (?:DELETE|UPDATE) (?:SET|CASCADE))/ ) { + my $fk = $1; + die "REPLACE statements on $src->{db}.$src->{tbl} can adversely affect child table $tbl->{name} because it has an $fk foreign key constraint. See --[no]check-child-tables in the documentation for more information. --check-child-tables error\n" + } + } + } + } + + return; +} + +# Sub: get_triggers +# +# Originally from MySQLDump. This should perhaps belong in TableParser, +# but right now it would only be bloat. +# +# Returns: +# List of triggers + +sub get_triggers { + my ( $dbh, $quoter, $db, $tbl ) = @_; + my $triggers = {}; + my $sql = '/*!40101 SET @OLD_SQL_MODE := @@SQL_MODE, ' + . q{@@SQL_MODE := REPLACE(REPLACE(@@SQL_MODE, 'ANSI_QUOTES', ''), ',,', ','), } + . '@OLD_QUOTE := @@SQL_QUOTE_SHOW_CREATE, ' + . '@@SQL_QUOTE_SHOW_CREATE := 1 */'; + PTDEBUG && _d($sql); + eval { $dbh->do($sql); }; + PTDEBUG && $EVAL_ERROR && _d($EVAL_ERROR); + $sql = "SHOW TRIGGERS FROM " . $quoter->quote($db); + PTDEBUG && _d($sql); + my $sth = $dbh->prepare($sql); + $sth->execute(); + if ( $sth->rows ) { + my $trgs = $sth->fetchall_arrayref({}); + foreach my $trg (@$trgs) { + my %trg; + @trg{ map { lc $_ } keys %$trg } = values %$trg; + push @{ $triggers->{$db}->{ $trg{table} } }, \%trg; + } + } + $sql = '/*!40101 SET @@SQL_MODE := @OLD_SQL_MODE, ' + . '@@SQL_QUOTE_SHOW_CREATE := @OLD_QUOTE */'; + PTDEBUG && _d($sql); + $dbh->do($sql); + if ( $tbl ) { + return $triggers->{$db}->{$tbl}; + } + return values %{$triggers->{$db}}; +} + +# Sub: filter_diffs +# Filter different slave tables according to the various schema object +# filters. This sub is called in to implement +# schema object filters like --databases and --tables. +# +# Returns: +# Arrayref of different slave tables that pass the filters +sub filter_diffs { + my ( %args ) = @_; + my @required_args = qw(diffs SchemaIterator skip_table); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($diffs, $si, $skip_table) = @args{@required_args}; + + my @filtered_diffs; + foreach my $diff ( @$diffs ) { + my $db = lc $diff->{db}; + my $tbl = lc $diff->{tbl}; + if ( !$skip_table->{$db}->{$tbl} + && $si->database_is_allowed($db) + && $si->table_is_allowed($db, $tbl) ) { + push @filtered_diffs, $diff; + } + } + + return \@filtered_diffs; +} + +# Sub: disconnect +# Disconnect host dbhs created by . To make sure all dbh +# are closed, pt-table-sync keeps track of the dbh it opens and this +# sub helps keep track of the dbh that are closed. +# +# Parameters: +# @hosts - Array of hashrefs with host information, one for each host +sub disconnect { + my ( @hosts ) = @_; + foreach my $host ( @hosts ) { + foreach my $thing ( qw(dbh misc_dbh) ) { + my $dbh = $host->{$thing}; + next unless $dbh; + delete $dsn_for{$dbh}; + # The following is for when misc_dbh loses + # connection due to timeout. Since it has nothing + # to commit we avoid reporting an error. + if ( $thing eq 'misc_dbh' && !$dbh->ping() ) { + next; + } + $dbh->commit() unless $dbh->{AutoCommit}; + $dbh->disconnect(); + PTDEBUG && _d('Disconnected dbh', $dbh); + } + } + return; +} + +# Sub: print_sql +# Callback for if --print --verbose --verbose +# is specified. The callback simply prints the SQL statements passed to +# it by sync_table(). They're usually (always?) identical statements. +# +# Parameters: +# $src_sql - SQL statement to be executed on the sourch host +# $dst_sql - SQL statement to be executed on the destination host +sub print_sql { + my ( $src_sql, $dst_sql ) = @_; + print "# $src_sql\n" if $src_sql; + print "# $dst_sql\n" if $dst_sql; + return; +} + +use constant UPDATE_LEFT => -1; +use constant UPDATE_RIGHT => 1; +use constant UPDATE_NEITHER => 0; # neither value equals/matches +use constant FAILED_THRESHOLD => 2; # failed to exceed threshold + +# Sub: cmd_conflict_col +# Compare --conflict-column values for --bidirectional. This sub is +# used as a callback in . +# +# Parameters: +# $left_val - Column value from left (usually the source host) +# $right_val - Column value from right (usually the destination host) +# $cmp - Type of conflict comparison, --conflict-comparison +# $val - Value for certain types of comparisons, --conflict-value +# $thr - Threshold for certain types of comparisons, +# --conflict-threshold +# +# Returns: +# One of the constants above, UPDATE_* or FAILED_THRESHOLD +sub cmp_conflict_col { + my ( $left_val, $right_val, $cmp, $val, $thr ) = @_; + PTDEBUG && _d('Compare', @_); + my $res; + if ( $cmp eq 'newest' || $cmp eq 'oldest' ) { + $res = $cmp eq 'newest' ? ($left_val || '') cmp ($right_val || '') + : ($right_val || '') cmp ($left_val || ''); + + if ( $thr ) { + $thr = time_to_secs($thr); + my $lts = any_unix_timestamp($left_val); + my $rts = any_unix_timestamp($right_val); + my $diff = abs($lts - $rts); + PTDEBUG && _d('Check threshold, lts rts thr abs-diff:', + $lts, $rts, $thr, $diff); + if ( $diff < $thr ) { + PTDEBUG && _d("Failed threshold"); + return FAILED_THRESHOLD; + } + } + } + elsif ( $cmp eq 'greatest' || $cmp eq 'least' ) { + $res = $cmp eq 'greatest' ? (($left_val ||0) > ($right_val ||0) ? 1 : -1) + : (($left_val ||0) < ($right_val ||0) ? 1 : -1); + $res = 0 if ($left_val || 0) == ($right_val || 0); + if ( $thr ) { + my $diff = abs($left_val - $right_val); + PTDEBUG && _d('Check threshold, abs-diff:', $diff); + if ( $diff < $thr ) { + PTDEBUG && _d("Failed threshold"); + return FAILED_THRESHOLD; + } + } + } + elsif ( $cmp eq 'equals' ) { + $res = ($left_val || '') eq $val ? 1 + : ($right_val || '') eq $val ? -1 + : 0; + } + elsif ( $cmp eq 'matches' ) { + $res = ($left_val || '') =~ m/$val/ ? 1 + : ($right_val || '') =~ m/$val/ ? -1 + : 0; + } + else { + # Should happen; caller should have verified this. + die "Invalid comparison: $cmp"; + } + + return $res; +} + +# Sub: set_bidirectional_callbacks +# Set syncer plugin callbacks for --bidirectional. +# +# Parameters: +# %args - Arguments +# +# Required Arguments: +# plugin - TableSync* object +# OptionParser - object +sub set_bidirectional_callbacks { + my ( %args ) = @_; + foreach my $arg ( qw(plugin OptionParser) ) { + die "I need a $arg argument" unless $args{$arg}; + } + my $o = $args{OptionParser}; + my $plugin = $args{plugin}; + + my $col = $o->get('conflict-column'); + my $cmp = $o->get('conflict-comparison'); + my $val = $o->get('conflict-value'); + my $thr = $o->get('conflict-threshold'); + + # plugin and syncer are actually the same module. For clarity we + # name them differently. + + $plugin->set_callback('same_row', sub { + my ( %args ) = @_; + my ($lr, $rr, $syncer) = @args{qw(lr rr syncer)}; + my $ch = $syncer->{ChangeHandler}; + my $action = 'UPDATE'; + my $change_dbh; + my $auth_row; + my $err; + + my $left_val = $lr->{$col} || ''; + my $right_val = $rr->{$col} || ''; + PTDEBUG && _d('left', $col, 'value:', $left_val); + PTDEBUG && _d('right', $col, 'value:', $right_val); + + my $res = cmp_conflict_col($left_val, $right_val, $cmp, $val, $thr); + if ( $res == UPDATE_LEFT ) { + PTDEBUG && _d("right dbh $args{right_dbh} $cmp; " + . "update left dbh $args{left_dbh}"); + $ch->set_src('right', $args{right_dbh}); + $auth_row = $args{rr}; + $change_dbh = $args{left_dbh}; + } + elsif ( $res == UPDATE_RIGHT ) { + PTDEBUG && _d("left dbh $args{left_dbh} $cmp; " + . "update right dbh $args{right_dbh}"); + $ch->set_src('left', $args{left_dbh}); + $auth_row = $args{lr}; + $change_dbh = $args{right_dbh}; + } + elsif ( $res == UPDATE_NEITHER ) { + if ( $cmp eq 'equals' || $cmp eq 'matches' ) { + $err = "neither `$col` value $cmp $val"; + } + else { + $err = "`$col` values are the same" + } + } + elsif ( $res == FAILED_THRESHOLD ) { + $err = "`$col` values do not differ by the threhold, $thr." + } + else { + # Shouldn't happen. + die "cmp_conflict_col() returned an invalid result: $res." + } + + if ( $err ) { + $action = undef; # skip change in case we just warn + my $where = $ch->make_where_clause($lr, $syncer->key_cols()); + $err = "# Cannot resolve conflict WHERE $where: $err\n"; + + # die here is caught in sync_a_table(). We're deeply nested: + # sync_a_table > sync_table > compare_sets > syncer > here + $o->get('conflict-error') eq 'warn' ? warn $err : die $err; + } + + return $action, $auth_row, $change_dbh; + }); + + $plugin->set_callback('not_in_right', sub { + my ( %args ) = @_; + $args{syncer}->{ChangeHandler}->set_src('left', $args{left_dbh}); + return 'INSERT', $args{lr}, $args{right_dbh}; + }); + + $plugin->set_callback('not_in_left', sub { + my ( %args ) = @_; + $args{syncer}->{ChangeHandler}->set_src('right', $args{right_dbh}); + return 'INSERT', $args{rr}, $args{left_dbh}; + }); + + return; +} + +# Sub: get_plugins +# Get internal TableSync* plugins. +# +# Returns: +# Hash of available algoritms and the plugin/module names that +# implement them, like "chunk => TableSyncChunk". +sub get_plugins { + my ( %args ) = @_; + + my $file = __FILE__; + open my $fh, "<", $file or die "Cannot open $file: $OS_ERROR"; + my $contents = do { local $/ = undef; <$fh> }; + close $fh; + + my %local_plugins = map { + my $package = $_; + my ($module, $algo) = $package =~ m/(TableSync(\w+))/; + lc $algo => $module; + } $contents =~ m/^package TableSync\w{3,};/gm; + + return %local_plugins; +} + +{ +# DELETE REPLACE INSERT UPDATE ALGORITHM START END EXIT DATABASE.TABLE +my $hdr = "# %6s %7s %6s %6s %-9s %-8s %-8s %-4s %s.%s\n"; + +sub print_header { + my ( $title ) = @_; + print "$title\n" if $title; + printf $hdr, @ChangeHandler::ACTIONS, + qw(ALGORITHM START END EXIT DATABASE TABLE); + return; +} + +sub print_results { + my ( @values ) = @_; + printf $hdr, @values; + return; +} +} + +# Sub: get_server_time +# Return HH:MM:SS of SELECT NOW() from the server. +# +# Parameters: +# $dbh - dbh +sub get_server_time { + my ( $dbh ) = @_; + return unless $dbh; + my $now; + eval { + my $sql = "SELECT NOW()"; + PTDEBUG && _d($dbh, $sql); + ($now) = $dbh->selectrow_array($sql); + PTDEBUG && _d("Server time:", $now); + $now =~ s/^\S+\s+//; + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d("Failed to get server time:", $EVAL_ERROR); + } + return $now +} + +sub get_current_user { + my ( $dbh ) = @_; + return unless $dbh; + + my $user; + eval { + my $sql = "SELECT CURRENT_USER()"; + PTDEBUG && _d($dbh, $sql); + ($user) = $dbh->selectrow_array($sql); + }; + if ( $EVAL_ERROR ) { + PTDEBUG && _d("Error getting current user:", $EVAL_ERROR); + } + + return $user; +} + +{ +my %asc_for_table; + +sub diff_where { + my (%args) = @_; + my @required_args = qw(diff tbl_struct TableNibbler); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($diff, $tbl_struct, $tn) = @args{@required_args}; + + my $key = $diff->{chunk_index}; + if ( !$key ) { + PTDEBUG && _d('One nibble checksum'); + return; + } + my $cols = $tbl_struct->{keys}->{$key}->{cols}; + my $asc = $asc_for_table{$diff->{table}}; + if ( !$asc ) { + die "Index $key does not exist in table" unless $cols && @$cols; + + # NibbleIterator does this to make the boundary statements. + $asc = $args{TableNibbler}->generate_asc_stmt( + %args, + tbl_struct => $tbl_struct, + index => $key, + cols => $cols, + asc_only => 1, + ); + + $asc_for_table{$diff->{table}} = $asc; + PTDEBUG && _d('Ascend params:', Dumper($asc)); + } + + my ($lb_sql, $ub_sql); + + if ( defined $diff->{lower_boundary} ) { + $lb_sql = $asc->{boundaries}->{'>='}; + foreach my $val ( $q->deserialize_list($diff->{lower_boundary}) ) { + my $quoted_val = $q->quote_val($val); + $lb_sql =~ s/\?/$quoted_val/; + } + } + + if ( defined $diff->{upper_boundary} ) { + $ub_sql = $asc->{boundaries}->{'<='}; + foreach my $val ( $q->deserialize_list($diff->{upper_boundary}) ) { + my $quoted_val = $q->quote_val($val); + $ub_sql =~ s/\?/$quoted_val/; + } + } + + die "Invalid checksum diff: " . Dumper($diff) + unless $lb_sql || $ub_sql; + + return $lb_sql && $ub_sql ? "$lb_sql AND $ub_sql" + : $lb_sql ? $lb_sql + : $ub_sql; +} +} + +sub find_child_tables { + my ( %args ) = @_; + my @required_args = qw(tbl dbh Quoter); + foreach my $arg ( @required_args ) { + die "I need a $arg argument" unless $args{$arg}; + } + my ($tbl, $dbh, $q) = @args{@required_args}; + + if ( lc($tbl->{tbl_struct}->{engine} || '') eq 'myisam' ) { + PTDEBUG && _d(q{MyISAM table, not looking for child tables}); + return; + } + + PTDEBUG && _d('Finding child tables'); + + my $sql = "SELECT table_schema, table_name " + . "FROM information_schema.key_column_usage " + . "WHERE constraint_schema='$tbl->{db}' " + . "AND referenced_table_name='$tbl->{tbl}'"; + PTDEBUG && _d($sql); + my $rows = $dbh->selectall_arrayref($sql); + if ( !$rows || !@$rows ) { + PTDEBUG && _d('No child tables found'); + return; + } + + my @child_tables; + foreach my $row ( @$rows ) { + my $tbl = { + db => $row->[0], + tbl => $row->[1], + name => $q->quote(@$row), + }; + push @child_tables, $tbl; + } + + PTDEBUG && _d('Child tables:', Dumper(\@child_tables)); + return \@child_tables; +} + +sub _d { + my ($package, undef, $line) = caller 0; + @_ = map { (my $temp = $_) =~ s/\n/\n# /g; $temp; } + map { defined $_ ? $_ : 'undef' } + @_; + print STDERR "# $package:$line $PID ", join(' ', @_), "\n"; +} + +# ############################################################################ +# Run the program. +# ############################################################################ +if ( !caller ) { exit main(@ARGV); } + +1; # Because this is a module as well as a script. + +# ############################################################################ +# Documentation +# ############################################################################ +=pod + +=head1 NAME + +pt-table-sync - Synchronize MySQL table data efficiently. + +=head1 SYNOPSIS + +Usage: pt-table-sync [OPTIONS] DSN [DSN] + +pt-table-sync synchronizes data efficiently between MySQL tables. + +This tool changes data, so for maximum safety, you should back up your data +before using it. When synchronizing a server that is a replication slave +with the L<"--replicate"> or L<"--sync-to-master"> methods, it B +makes the changes on the replication master, B the replication slave +directly. This is in general the only safe way to bring a replica back in +sync with its master; changes to the replica are usually the source of the +problems in the first place. However, the changes it makes on the master +should be no-op changes that set the data to their current values, and +actually affect only the replica. + +Sync db.tbl on host1 to host2: + + pt-table-sync --execute h=host1,D=db,t=tbl h=host2 + +Sync all tables on host1 to host2 and host3: + + pt-table-sync --execute host1 host2 host3 + +Make slave1 have the same data as its replication master: + + pt-table-sync --execute --sync-to-master slave1 + +Resolve differences that L found on all slaves of master1: + + pt-table-sync --execute --replicate test.checksum master1 + +Same as above but only resolve differences on slave1: + + pt-table-sync --execute --replicate test.checksum \ + --sync-to-master slave1 + +Sync master2 in a master-master replication configuration, where master2's copy +of db.tbl is known or suspected to be incorrect: + + pt-table-sync --execute --sync-to-master h=master2,D=db,t=tbl + +Note that in the master-master configuration, the following will NOT do what you +want, because it will make changes directly on master2, which will then flow +through replication and change master1's data: + + # Don't do this in a master-master setup! + pt-table-sync --execute h=master1,D=db,t=tbl master2 + +=head1 RISKS + +B: pt-table-sync changes data! Before using this tool, please: + +=over + +=item * Read the tool's documentation + +=item * Review the tool's known L<"BUGS"> + +=item * Test the tool on a non-production server + +=item * Backup your production server and verify the backups + +pt-table-sync is mature, proven in the real world, and well tested, +but if used improperly it can have adverse consequences. Always test +syncing first with L<"--dry-run"> and L<"--print">. + +=back + +=head1 DESCRIPTION + +pt-table-sync does one-way and bidirectional synchronization of table data. +It does B synchronize table structures, indexes, or any other schema +objects. The following describes one-way synchronization. +L<"BIDIRECTIONAL SYNCING"> is described later. + +This tool is complex and functions in several different ways. To use it +safely and effectively, you should understand three things: the purpose +of L<"--replicate">, finding differences, and specifying hosts. These +three concepts are closely related and determine how the tool will run. +The following is the abbreviated logic: + + if DSN has a t part, sync only that table: + if 1 DSN: + if --sync-to-master: + The DSN is a slave. Connect to its master and sync. + if more than 1 DSN: + The first DSN is the source. Sync each DSN in turn. + else if --replicate: + if --sync-to-master: + The DSN is a slave. Connect to its master, find records + of differences, and fix. + else: + The DSN is the master. Find slaves and connect to each, + find records of differences, and fix. + else: + if only 1 DSN and --sync-to-master: + The DSN is a slave. Connect to its master, find tables and + filter with --databases etc, and sync each table to the master. + else: + find tables, filtering with --databases etc, and sync each + DSN to the first. + +pt-table-sync can run in one of two ways: with L<"--replicate"> or without. +The default is to run without L<"--replicate"> which causes pt-table-sync +to automatically find differences efficiently with one of several +algorithms (see L<"ALGORITHMS">). Alternatively, the value of +L<"--replicate">, if specified, causes pt-table-sync to use the differences +already found by having previously ran L with its own +C<--replicate> option. Strictly speaking, you don't need to use +L<"--replicate"> because pt-table-sync can find differences, but many +people use L<"--replicate"> if, for example, they checksum regularly +using L then fix differences as needed with pt-table-sync. +If you're unsure, read each tool's documentation carefully and decide for +yourself, or consult with an expert. + +Regardless of whether L<"--replicate"> is used or not, you need to specify +which hosts to sync. There are two ways: with L<"--sync-to-master"> or +without. Specifying L<"--sync-to-master"> makes pt-table-sync expect +one and only slave DSN on the command line. The tool will automatically +discover the slave's master and sync it so that its data is the same as +its master. This is accomplished by making changes on the master which +then flow through replication and update the slave to resolve its differences. +B: although this option specifies and syncs a single +slave, if there are other slaves on the same master, they will receive +via replication the changes intended for the slave that you're trying to +sync. + +Alternatively, if you do not specify L<"--sync-to-master">, the first +DSN given on the command line is the source host. There is only ever +one source host. If you do not also specify L<"--replicate">, then you +must specify at least one other DSN as the destination host. There +can be one or more destination hosts. Source and destination hosts +must be independent; they cannot be in the same replication topology. +pt-table-sync will die with an error if it detects that a destination +host is a slave because changes are written directly to destination hosts +(and it's not safe to write directly to slaves). Or, if you specify +L<"--replicate"> (but not L<"--sync-to-master">) then pt-table-sync expects +one and only one master DSN on the command line. The tool will automatically +discover all the master's slaves and sync them to the master. This is +the only way to sync several (all) slaves at once (because +L<"--sync-to-master"> only specifies one slave). + +Each host on the command line is specified as a DSN. The first DSN +(or only DSN for cases like L<"--sync-to-master">) provides default values +for other DSNs, whether those other DSNs are specified on the command line +or auto-discovered by the tool. So in this example, + + pt-table-sync --execute h=host1,u=msandbox,p=msandbox h=host2 + +the host2 DSN inherits the C and C

DSN parts from the host1 DSN. +Use the L<"--explain-hosts"> option to see how pt-table-sync will interpret +the DSNs given on the command line. + +=head1 LIMITATIONS + +=over + +=item Replicas using row-based replication + +pt-table-sync requires statement-based replication when used with +the L<"--sync-to-master"> or L<"--replicate"> option. +Therefore it will set C on the master +for its session if required. +To do this user must have C privilege. + +=back + +=head1 OUTPUT + +If you specify the L<"--verbose"> option, you'll see information about the +differences between the tables. There is one row per table. Each server is +printed separately. For example, + + # Syncing h=host1,D=test,t=test1 + # DELETE REPLACE INSERT UPDATE ALGORITHM START END EXIT DATABASE.TABLE + # 0 0 3 0 Chunk 13:00:00 13:00:17 2 test.test1 + +Table test.test1 on host1 required 3 C statements to synchronize +and it used the Chunk algorithm (see L<"ALGORITHMS">). The sync operation +for this table started at 13:00:00 and ended 17 seconds later (times taken +from C on the source host). Because differences were found, its +L<"EXIT STATUS"> was 2. + +If you specify the L<"--print"> option, you'll see the actual SQL statements +that the script uses to synchronize the table if L<"--execute"> is also +specified. + +If you want to see the SQL statements that pt-table-sync is using to select +chunks, nibbles, rows, etc., then specify L<"--print"> once and L<"--verbose"> +twice. Be careful though: this can print a lot of SQL statements. + +There are cases where no combination of C, C or C +statements can resolve differences without violating some unique key. For +example, suppose there's a primary key on column a and a unique key on column b. +Then there is no way to sync these two tables with straightforward UPDATE +statements: + + +---+---+ +---+---+ + | a | b | | a | b | + +---+---+ +---+---+ + | 1 | 2 | | 1 | 1 | + | 2 | 1 | | 2 | 2 | + +---+---+ +---+---+ + +The tool rewrites queries to C and C in this case. This is +automatically handled after the first index violation, so you don't have to +worry about it. + +Be careful when using pt-table-sync in any master-master setup. Master-master +replication is inherently tricky, and it's easy to make mistakes. You need to +be sure you're using the tool correctly for master-master replication. See the +L<"SYNOPSIS"> for the overview of the correct usage. + +Also be careful with tables that have foreign key constraints with C +or C definitions because these might cause unintended changes on the +child tables. See L<"--[no]check-child-tables">. + +In general, this tool is best suited when your tables have a primary key or +unique index. Although it can synchronize data in tables lacking a primary key +or unique index, it might be best to synchronize that data by another means. + +=head1 REPLICATION SAFETY + +Synchronizing a replication master and slave safely is a non-trivial problem, in +general. There are all sorts of issues to think about, such as other processes +changing data, trying to change data on the slave, whether the destination and +source are a master-master pair, and much more. + +In general, the safe way to do it is to change the data on the master, and let +the changes flow through replication to the slave like any other changes. +However, this works only if it's possible to REPLACE into the table on the +master. REPLACE works only if there's a unique index on the table (otherwise it +just acts like an ordinary INSERT). + +If your table has unique keys, you should use the L<"--sync-to-master"> and/or +L<"--replicate"> options to sync a slave to its master. This will generally do +the right thing. When there is no unique key on the table, there is no choice +but to change the data on the slave, and pt-table-sync will detect that you're +trying to do so. It will complain and die unless you specify +C<--no-check-slave> (see L<"--[no]check-slave">). + +If you're syncing a table without a primary or unique key on a master-master +pair, you must change the data on the destination server. Therefore, you need +to specify C<--no-bin-log> for safety (see L<"--[no]bin-log">). If you don't, +the changes you make on the destination server will replicate back to the +source server and change the data there! + +The generally safe thing to do on a master-master pair is to use the +L<"--sync-to-master"> option so you don't change the data on the destination +server. You will also need to specify C<--no-check-slave> to keep +pt-table-sync from complaining that it is changing data on a slave. + +=head1 ALGORITHMS + +pt-table-sync has a generic data-syncing framework which uses different +algorithms to find differences. The tool automatically chooses the best +algorithm for each table based on indexes, column types, and the algorithm +preferences specified by L<"--algorithms">. The following algorithms are +available, listed in their default order of preference: + +=over + +=item Chunk + +Finds an index whose first column is numeric (including date and time types), +and divides the column's range of values into chunks of approximately +L<"--chunk-size"> rows. Syncs a chunk at a time by checksumming the entire +chunk. If the chunk differs on the source and destination, checksums each +chunk's rows individually to find the rows that differ. + +It is efficient when the column has sufficient cardinality to make the chunks +end up about the right size. + +The initial per-chunk checksum is quite small and results in minimal network +traffic and memory consumption. If a chunk's rows must be examined, only the +primary key columns and a checksum are sent over the network, not the entire +row. If a row is found to be different, the entire row will be fetched, but not +before. + +Note that this algorithm will not work if chunking a char column where all +the values start with the same character. In that case, the tool will exit +and suggest picking a different algorithm. + +=item Nibble + +Finds an index and ascends the index in fixed-size nibbles of L<"--chunk-size"> +rows, using a non-backtracking algorithm (see L for more on this +algorithm). It is very similar to L<"Chunk">, but instead of pre-calculating +the boundaries of each piece of the table based on index cardinality, it uses +C to define each nibble's upper limit, and the previous nibble's upper +limit to define the lower limit. + +It works in steps: one query finds the row that will define the next nibble's +upper boundary, and the next query checksums the entire nibble. If the nibble +differs between the source and destination, it examines the nibble row-by-row, +just as L<"Chunk"> does. + +=item GroupBy + +Selects the entire table grouped by all columns, with a COUNT(*) column added. +Compares all columns, and if they're the same, compares the COUNT(*) column's +value to determine how many rows to insert or delete into the destination. +Works on tables with no primary key or unique index. + +=item Stream + +Selects the entire table in one big stream and compares all columns. Selects +all columns. Much less efficient than the other algorithms, but works when +there is no suitable index for them to use. + +=item Future Plans + +Possibilities for future algorithms are TempTable (what I originally called +bottom-up in earlier versions of this tool), DrillDown (what I originally +called top-down), and GroupByPrefix (similar to how SqlYOG Job Agent works). +Each algorithm has strengths and weaknesses. If you'd like to implement your +favorite technique for finding differences between two sources of data on +possibly different servers, I'm willing to help. The algorithms adhere to a +simple interface that makes it pretty easy to write your own. + +=back + +=head1 BIDIRECTIONAL SYNCING + +Bidirectional syncing is a new, experimental feature. To make it work +reliably there are a number of strict limitations: + + * only works when syncing one server to other independent servers + * does not work in any way with replication + * requires that the table(s) are chunkable with the Chunk algorithm + * is not N-way, only bidirectional between two servers at a time + * does not handle DELETE changes + +For example, suppose we have three servers: c1, r1, r2. c1 is the central +server, a pseudo-master to the other servers (viz. r1 and r2 are not slaves +to c1). r1 and r2 are remote servers. Rows in table foo are updated and +inserted on all three servers and we want to synchronize all the changes +between all the servers. Table foo has columns: + + id int PRIMARY KEY + ts timestamp auto updated + name varchar + +Auto-increment offsets are used so that new rows from any server do not +create conflicting primary key (id) values. In general, newer rows, as +determined by the ts column, take precedence when a same but differing row +is found during the bidirectional sync. "Same but differing" means that +two rows have the same primary key (id) value but different values for some +other column, like the name column in this example. Same but differing +conflicts are resolved by a "conflict". A conflict compares some column of +the competing rows to determine a "winner". The winning row becomes the +source and its values are used to update the other row. + +There are subtle differences between three columns used to achieve +bidirectional syncing that you should be familiar with: chunk column +(L<"--chunk-column">), comparison column(s) (L<"--columns">), and conflict +column (L<"--conflict-column">). The chunk column is only used to chunk the +table; e.g. "WHERE id >= 5 AND id < 10". Chunks are checksummed and when +chunk checksums reveal a difference, the tool selects the rows in that +chunk and checksums the L<"--columns"> for each row. If a column checksum +differs, the rows have one or more conflicting column values. In a +traditional unidirectional sync, the conflict is a moot point because it can +be resolved simply by updating the entire destination row with the source +row's values. In a bidirectional sync, however, the L<"--conflict-column"> +(in accordance with other C<--conflict-*> options list below) is compared +to determine which row is "correct" or "authoritative"; this row becomes +the "source". + +To sync all three servers completely, two runs of pt-table-sync are required. +The first run syncs c1 and r1, then syncs c1 and r2 including any changes +from r1. At this point c1 and r2 are completely in sync, but r1 is missing +any changes from r2 because c1 didn't have these changes when it and r1 +were synced. So a second run is needed which syncs the servers in the same +order, but this time when c1 and r1 are synced r1 gets r2's changes. + +The tool does not sync N-ways, only bidirectionally between the first DSN +given on the command line and each subsequent DSN in turn. So the tool in +this example would be ran twice like: + + pt-table-sync --bidirectional h=c1 h=r1 h=r2 + +The L<"--bidirectional"> option enables this feature and causes various +sanity checks to be performed. You must specify other options that tell +pt-table-sync how to resolve conflicts for same but differing rows. +These options are: + + * --conflict-column + * --conflict-comparison + * --conflict-value + * --conflict-threshold + * --conflict-error"> (optional) + +Use L<"--print"> to test this option before L<"--execute">. The printed +SQL statements will have comments saying on which host the statement +would be executed if you used L<"--execute">. + +Technical side note: the first DSN is always the "left" server and the other +DSNs are always the "right" server. Since either server can become the source +or destination it's confusing to think of them as "src" and "dst". Therefore, +they're generically referred to as left and right. It's easy to remember +this because the first DSN is always to the left of the other server DSNs on +the command line. + +=head1 EXIT STATUS + +The following are the exit statuses (also called return values, or return codes) +when pt-table-sync finishes and exits. + + STATUS MEANING + ====== ======================================================= + 0 Success. + 1 Internal error. + 2 At least one table differed on the destination. + 3 Combination of 1 and 2. + +=head1 OPTIONS + +Specify at least one of L<"--print">, L<"--execute">, or L<"--dry-run">. + +L<"--where"> and L<"--replicate"> are mutually exclusive. + +This tool accepts additional command-line arguments. Refer to the +L<"SYNOPSIS"> and usage information for details. + +=over + +=item --algorithms + +type: string; default: Chunk,Nibble,GroupBy,Stream + +Algorithm to use when comparing the tables, in order of preference. + +For each table, pt-table-sync will check if the table can be synced with +the given algorithms in the order that they're given. The first algorithm +that can sync the table is used. See L<"ALGORITHMS">. + +=item --ask-pass + +Prompt for a password when connecting to MySQL. + +=item --bidirectional + +Enable bidirectional sync between first and subsequent hosts. + +See L<"BIDIRECTIONAL SYNCING"> for more information. + +=item --[no]bin-log + +default: yes + +Log to the binary log (C). + +Specifying C<--no-bin-log> will C. + +=item --buffer-in-mysql + +Instruct MySQL to buffer queries in its memory. + +This option adds the C option to the comparison queries. +This causes MySQL to execute the queries and place them in a temporary table +internally before sending the results back to pt-table-sync. The advantage of +this strategy is that pt-table-sync can fetch rows as desired without using a +lot of memory inside the Perl process, while releasing locks on the MySQL table +(to reduce contention with other queries). The disadvantage is that it uses +more memory on the MySQL server instead. + +You probably want to leave L<"--[no]buffer-to-client"> enabled too, because +buffering into a temp table and then fetching it all into Perl's memory is +probably a silly thing to do. This option is most useful for the GroupBy and +Stream algorithms, which may fetch a lot of data from the server. + +=item --[no]buffer-to-client + +default: yes + +Fetch rows one-by-one from MySQL while comparing. + +This option enables C which causes MySQL to hold the selected +rows on the server until the tool fetches them. This allows the tool to use +less memory but may keep the rows locked on the server longer. + +If this option is disabled by specifying C<--no-buffer-to-client> then +C is used which causes MySQL to send all selected rows to +the tool at once. This may result in the results "cursor" being held open for +a shorter time on the server, but if the tables are large, it could take a long +time anyway, and use all your memory. + +For most non-trivial data sizes, you want to leave this option enabled. + +This option is disabled when L<"--bidirectional"> is used. + +=item --channel + +type: string + +Channel name used when connected to a server using replication channels. +Suppose you have two masters, master_a at port 12345, master_b at port 1236 and +a slave connected to both masters using channels chan_master_a and chan_master_b. +If you want to run pt-table-sync to syncronize the slave against master_a, pt-table-sync +won't be able to determine what's the correct master since SHOW SLAVE STATUS +will return 2 rows. In this case, you can use --channel=chan_master_a to specify +the channel name to use in the SHOW SLAVE STATUS command. + +=item --charset + +short form: -A; type: string + +Default character set. If the value is utf8, sets Perl's binmode on +STDOUT to utf8, passes the mysql_enable_utf8 option to DBD::mysql, and +runs SET NAMES UTF8 after connecting to MySQL. Any other value sets +binmode on STDOUT without the utf8 layer, and runs SET NAMES after +connecting to MySQL. + +=item --[no]check-child-tables + +default: yes + +Check if L<"--execute"> will adversely affect child tables. When +L<"--replace">, L<"--replicate">, or L<"--sync-to-master"> is specified, +the tool may sync tables using C statements. If a table being +synced has child tables with C, C, +or C, the tool prints an error and skips the table because +C becomes C then C, so the C will cascade +to the child table and delete its rows. In the worst case, this can delete +all rows in child tables! + +Specify C<--no-check-child-tables> to disable this check. To completely +avoid affecting child tables, also specify C<--no-foreign-key-checks> +so MySQL will not cascade any operations from the parent to child tables. + +This check is only preformed if L<"--execute"> and one of L<"--replace">, +L<"--replicate">, or L<"--sync-to-master"> is specified. L<"--print"> +does not check child tables. + +The error message only prints the first child table found with an +C, C, or C +foreign key constraint. There could be other affected child tables. + +=item --[no]check-master + +default: yes + +With L<"--sync-to-master">, try to verify that the detected +master is the real master. + +=item --[no]check-slave + +default: yes + +Check whether the destination server is a slave. + +If the destination server is a slave, it's generally unsafe to make changes on +it. However, sometimes you have to; L<"--replace"> won't work unless there's a +unique index, for example, so you can't make changes on the master in that +scenario. By default pt-table-sync will complain if you try to change data on +a slave. Specify C<--no-check-slave> to disable this check. Use it at your own +risk. + +=item --[no]check-triggers + +default: yes + +Check that no triggers are defined on the destination table. + +Triggers were introduced in MySQL v5.0.2, so for older versions this option +has no effect because triggers will not be checked. + +=item --chunk-column + +type: string + +Chunk the table on this column. + +=item --chunk-index + +type: string + +Chunk the table using this index. + +=item --chunk-size + +type: string; default: 1000 + +Number of rows or data size per chunk. + +The size of each chunk of rows for the L<"Chunk"> and L<"Nibble"> algorithms. +The size can be either a number of rows, or a data size. Data sizes are +specified with a suffix of k=kibibytes, M=mebibytes, G=gibibytes. Data sizes +are converted to a number of rows by dividing by the average row length. + +=item --columns + +short form: -c; type: array + +Compare this comma-separated list of columns. + +=item --config + +type: Array + +Read this comma-separated list of config files; if specified, this must be the +first option on the command line. + +=item --conflict-column + +type: string + +Compare this column when rows conflict during a L<"--bidirectional"> sync. + +When a same but differing row is found the value of this column from each +row is compared according to L<"--conflict-comparison">, L<"--conflict-value"> +and L<"--conflict-threshold"> to determine which row has the correct data and +becomes the source. The column can be any type for which there is an +appropriate L<"--conflict-comparison"> (this is almost all types except, for +example, blobs). + +This option only works with L<"--bidirectional">. +See L<"BIDIRECTIONAL SYNCING"> for more information. + +=item --conflict-comparison + +type: string + +Choose the L<"--conflict-column"> with this property as the source. + +The option affects how the L<"--conflict-column"> values from the conflicting +rows are compared. Possible comparisons are one of these MAGIC_comparisons: + + newest|oldest|greatest|least|equals|matches + + COMPARISON CHOOSES ROW WITH + ========== ========================================================= + newest Newest temporal --conflict-column value + oldest Oldest temporal --conflict-column value + greatest Greatest numerical "--conflict-column value + least Least numerical --conflict-column value + equals --conflict-column value equal to --conflict-value + matches --conflict-column value matching Perl regex pattern + --conflict-value + +This option only works with L<"--bidirectional">. +See L<"BIDIRECTIONAL SYNCING"> for more information. + +=item --conflict-error + +type: string; default: warn + +How to report unresolvable conflicts and conflict errors + +This option changes how the user is notified when a conflict cannot be +resolved or causes some kind of error. Possible values are: + + * warn: Print a warning to STDERR about the unresolvable conflict + * die: Die, stop syncing, and print a warning to STDERR + +This option only works with L<"--bidirectional">. +See L<"BIDIRECTIONAL SYNCING"> for more information. + +=item --conflict-threshold + +type: string + +Amount by which one L<"--conflict-column"> must exceed the other. + +The L<"--conflict-threshold"> prevents a conflict from being resolved if +the absolute difference between the two L<"--conflict-column"> values is +less than this amount. For example, if two L<"--conflict-column"> have +timestamp values "2009-12-01 12:00:00" and "2009-12-01 12:05:00" the difference +is 5 minutes. If L<"--conflict-threshold"> is set to "5m" the conflict will +be resolved, but if L<"--conflict-threshold"> is set to "6m" the conflict +will fail to resolve because the difference is not greater than or equal +to 6 minutes. In this latter case, L<"--conflict-error"> will report +the failure. + +This option only works with L<"--bidirectional">. +See L<"BIDIRECTIONAL SYNCING"> for more information. + +=item --conflict-value + +type: string + +Use this value for certain L<"--conflict-comparison">. + +This option gives the value for C and C +L<"--conflict-comparison">. + +This option only works with L<"--bidirectional">. +See L<"BIDIRECTIONAL SYNCING"> for more information. + +=item --databases + +short form: -d; type: hash + +Sync only this comma-separated list of databases. + +A common request is to sync tables from one database with tables from another +database on the same or different server. This is not yet possible. +L<"--databases"> will not do it, and you can't do it with the D part of the DSN +either because in the absence of a table name it assumes the whole server +should be synced and the D part controls only the connection's default database. + +=item --defaults-file + +short form: -F; type: string + +Only read mysql options from the given file. You must give an absolute pathname. + +=item --dry-run + +Analyze, decide the sync algorithm to use, print and exit. + +Implies L<"--verbose"> so you can see the results. The results are in the same +output format that you'll see from actually running the tool, but there will be +zeros for rows affected. This is because the tool actually executes, but stops +before it compares any data and just returns zeros. The zeros do not mean there +are no changes to be made. + +=item --engines + +short form: -e; type: hash + +Sync only this comma-separated list of storage engines. + +=item --execute + +Execute queries to make the tables have identical data. + +This option makes pt-table-sync actually sync table data by executing all +the queries that it created to resolve table differences. Therefore, B And unless you also specify L<"--verbose">, the +changes will be made silently. If this is not what you want, see +L<"--print"> or L<"--dry-run">. + +=item --explain-hosts + +Print connection information and exit. + +Print out a list of hosts to which pt-table-sync will connect, with all +the various connection options, and exit. + +=item --float-precision + +type: int + +Precision for C and C number-to-string conversion. Causes FLOAT +and DOUBLE values to be rounded to the specified number of digits after the +decimal point, with the ROUND() function in MySQL. This can help avoid +checksum mismatches due to different floating-point representations of the same +values on different MySQL versions and hardware. The default is no rounding; +the values are converted to strings by the CONCAT() function, and MySQL chooses +the string representation. If you specify a value of 2, for example, then the +values 1.008 and 1.009 will be rounded to 1.01, and will checksum as equal. + +=item --[no]foreign-key-checks + +default: yes + +Enable foreign key checks (C). + +Specifying C<--no-foreign-key-checks> will C. + +=item --function + +type: string + +Which hash function you'd like to use for checksums. + +The default is C. Other good choices include C and C. If you +have installed the C user-defined function, C will detect +it and prefer to use it, because it is much faster than the built-ins. You can +also use MURMUR_HASH if you've installed that user-defined function. Both of +these are distributed with Percona Server. See L for more +information and benchmarks. + +=item --help + +Show help and exit. + +=item --[no]hex-blob + +default: yes + +C C, C and C columns. + +When row data from the source is fetched to create queries to sync the +data (i.e. the queries seen with L<"--print"> and executed by L<"--execute">), +binary columns are wrapped in HEX() so the binary data does not produce +an invalid SQL statement. You can disable this option but you probably +shouldn't. + +=item --host + +short form: -h; type: string + +Connect to host. + +=item --ignore-columns + +type: Hash + +Ignore this comma-separated list of column names in comparisons. + +This option causes columns not to be compared. However, if a row is determined +to differ between tables, all columns in that row will be synced, regardless. +(It is not currently possible to exclude columns from the sync process itself, +only from the comparison.) + +=item --ignore-databases + +type: Hash + +Ignore this comma-separated list of databases. + +(system databases such as B and B are ignored by default) + +=item --ignore-engines + +type: Hash; default: FEDERATED,MRG_MyISAM + +Ignore this comma-separated list of storage engines. + +=item --ignore-tables + +type: Hash + +Ignore this comma-separated list of tables. + +Table names may be qualified with the database name. + +=item --ignore-tables-regex + +type: string; group: Filter + +Ignore tables whose names match the Perl regex. + +=item --[no]index-hint + +default: yes + +Add FORCE/USE INDEX hints to the chunk and row queries. + +By default C adds a FORCE/USE INDEX hint to each SQL statement +to coerce MySQL into using the index chosen by the sync algorithm or specified +by L<"--chunk-index">. This is usually a good thing, but in rare cases the +index may not be the best for the query so you can suppress the index hint +by specifying C<--no-index-hint> and let MySQL choose the index. + +This does not affect the queries printed by L<"--print">; it only affects the +chunk and row queries that C uses to select and compare rows. + +=item --lock + +type: int + +Lock tables: 0=none, 1=per sync cycle, 2=per table, or 3=globally. + +This uses C. This can help prevent tables being changed while +you're examining them. The possible values are as follows: + + VALUE MEANING + ===== ======================================================= + 0 Never lock tables. + 1 Lock and unlock one time per sync cycle (as implemented + by the syncing algorithm). This is the most granular + level of locking available. For example, the Chunk + algorithm will lock each chunk of C rows, and then + unlock them if they are the same on the source and the + destination, before moving on to the next chunk. + 2 Lock and unlock before and after each table. + 3 Lock and unlock once for every server (DSN) synced, with + C. + +A replication slave is never locked if L<"--replicate"> or L<"--sync-to-master"> +is specified, since in theory locking the table on the master should prevent any +changes from taking place. (You are not changing data on your slave, right?) +If L<"--wait"> is given, the master (source) is locked and then the tool waits +for the slave to catch up to the master before continuing. + +If C<--transaction> is specified, C is not used. Instead, lock +and unlock are implemented by beginning and committing transactions. +The exception is if L<"--lock"> is 3. + +If C<--no-transaction> is specified, then C is used for any +value of L<"--lock">. See L<"--[no]transaction">. + +=item --lock-and-rename + +Lock the source and destination table, sync, then swap names. This is useful as +a less-blocking ALTER TABLE, once the tables are reasonably in sync with each +other (which you may choose to accomplish via any number of means, including +dump and reload or even something like L). It requires exactly two +DSNs and assumes they are on the same server, so it does no waiting for +replication or the like. Tables are locked with LOCK TABLES. + +=item --password + +short form: -p; type: string + +Password to use when connecting. +If password contains commas they must be escaped with a backslash: "exam\,ple" + +=item --pid + +type: string + +Create the given PID file. The tool won't start if the PID file already +exists and the PID it contains is different than the current PID. However, +if the PID file exists and the PID it contains is no longer running, the +tool will overwrite the PID file with the current PID. The PID file is +removed automatically when the tool exits. + +=item --port + +short form: -P; type: int + +Port number to use for connection. + +=item --print + +Print queries that will resolve differences. + +If you don't trust C, or just want to see what it will do, this +is a good way to be safe. These queries are valid SQL and you can run them +yourself if you want to sync the tables manually. + +=item --recursion-method + +type: array; default: processlist,hosts + +Preferred recursion method used to find slaves. + +Possible methods are: + + METHOD USES + =========== ================== + processlist SHOW PROCESSLIST + hosts SHOW SLAVE HOSTS + none Do not find slaves + +The processlist method is preferred because SHOW SLAVE HOSTS is not reliable. +However, the hosts method is required if the server uses a non-standard +port (not 3306). Usually pt-table-sync does the right thing and finds +the slaves, but you may give a preferred method and it will be used first. +If it doesn't find any slaves, the other methods will be tried. + + +=item --replace + +Write all C and C statements as C. + +This is automatically switched on as needed when there are unique index +violations. + +=item --replicate + +type: string + +Sync tables listed as different in this table. + +Specifies that C should examine the specified table to find data +that differs. The table is exactly the same as the argument of the same name to +L. That is, it contains records of which tables (and ranges +of values) differ between the master and slave. + +For each table and range of values that shows differences between the master and +slave, C will sync that table, with the appropriate C +clause, to its master. + +This automatically sets L<"--wait"> to 60 and causes changes to be made on the +master instead of the slave. + +If L<"--sync-to-master"> is specified, the tool will assume the server you +specified is the slave, and connect to the master as usual to sync. + +Otherwise, it will try to use C to find slaves of the server +you specified. If it is unable to find any slaves via C, it +will inspect C instead. You must configure each slave's +C, C and other options for this to work right. After +finding slaves, it will inspect the specified table on each slave to find data +that needs to be synced, and sync it. + +The tool examines the master's copy of the table first, assuming that the master +is potentially a slave as well. Any table that shows differences there will +B be synced on the slave(s). For example, suppose your replication is set +up as A->B, B->C, B->D. Suppose you use this argument and specify server B. +The tool will examine server B's copy of the table. If it looks like server B's +data in table C is different from server A's copy, the tool will not +sync that table on servers C and D. + +=item --slave-user + +type: string + +Sets the user to be used to connect to the slaves. +This parameter allows you to have a different user with less privileges on the +slaves but that user must exist on all slaves. + +=item --slave-password + +type: string + +Sets the password to be used to connect to the slaves. +It can be used with --slave-user and the password for the user must be the same +on all slaves. + +=item --set-vars + +type: Array + +Set the MySQL variables in this comma-separated list of C pairs. + +By default, the tool sets: + +=for comment ignore-pt-internal-value +MAGIC_set_vars + + wait_timeout=10000 + +Variables specified on the command line override these defaults. For +example, specifying C<--set-vars wait_timeout=500> overrides the defaultvalue of C<10000>. + +The tool prints a warning and continues if a variable cannot be set. + +=item --socket + +short form: -S; type: string + +Socket file to use for connection. + +=item --sync-to-master + +Treat the DSN as a slave and sync it to its master. + +Treat the server you specified as a slave. Inspect C, +connect to the server's master, and treat the master as the source and the slave +as the destination. Causes changes to be made on the master. Sets L<"--wait"> +to 60 by default, sets L<"--lock"> to 1 by default, and disables +L<"--[no]transaction"> by default. See also L<"--replicate">, which changes +this option's behavior. + +=item --tables + +short form: -t; type: hash + +Sync only this comma-separated list of tables. + +Table names may be qualified with the database name. + +=item --timeout-ok + +Keep going if L<"--wait"> fails. + +If you specify L<"--wait"> and the slave doesn't catch up to the master's +position before the wait times out, the default behavior is to abort. This +option makes the tool keep going anyway. B: if you are trying to get a +consistent comparison between the two servers, you probably don't want to keep +going after a timeout. + +=item --[no]transaction + +Use transactions instead of C. + +The granularity of beginning and committing transactions is controlled by +L<"--lock">. This is enabled by default, but since L<"--lock"> is disabled by +default, it has no effect. + +Most options that enable locking also disable transactions by default, so if +you want to use transactional locking (via C and C, you must specify C<--transaction> explicitly. + +If you don't specify C<--transaction> explicitly C will decide on +a per-table basis whether to use transactions or table locks. It currently +uses transactions on InnoDB tables, and table locks on all others. + +If C<--no-transaction> is specified, then C will not use +transactions at all (not even for InnoDB tables) and locking is controlled +by L<"--lock">. + +When enabled, either explicitly or implicitly, the transaction isolation level +is set C and transactions are started C. + +=item --trim + +C C columns in C and C modes. Helps when +comparing MySQL 4.1 to >= 5.0. + +This is useful when you don't care about the trailing space differences between +MySQL versions which vary in their handling of trailing spaces. MySQL 5.0 and +later all retain trailing spaces in C, while previous versions would +remove them. + +=item --[no]unique-checks + +default: yes + +Enable unique key checks (C). + +Specifying C<--no-unique-checks> will C. + +=item --user + +short form: -u; type: string + +User for login if not current user. + +=item --verbose + +short form: -v; cumulative: yes + +Print results of sync operations. + +See L<"OUTPUT"> for more details about the output. + +=item --version + +Show version and exit. + +=item --[no]version-check + +default: yes + +Check for the latest version of Percona Toolkit, MySQL, and other programs. + +This is a standard "check for updates automatically" feature, with two +additional features. First, the tool checks its own version and also the +versions of the following software: operating system, Percona Monitoring and +Management (PMM), MySQL, Perl, MySQL driver for Perl (DBD::mysql), and +Percona Toolkit. Second, it checks for and warns about versions with known +problems. For example, MySQL 5.5.25 had a critical bug and was re-released +as 5.5.25a. + +A secure connection to Percona’s Version Check database server is done to +perform these checks. Each request is logged by the server, including software +version numbers and unique ID of the checked system. The ID is generated by the +Percona Toolkit installation script or when the Version Check database call is +done for the first time. + +Any updates or known problems are printed to STDOUT before the tool's normal +output. This feature should never interfere with the normal operation of the +tool. + +For more information, visit L. + +=item --wait + +short form: -w; type: time + +How long to wait for slaves to catch up to their master. + +Make the master wait for the slave to catch up in replication before comparing +the tables. The value is the number of seconds to wait before timing out (see +also L<"--timeout-ok">). Sets L<"--lock"> to 1 and L<"--[no]transaction"> to 0 +by default. If you see an error such as the following, + + MASTER_POS_WAIT returned -1 + +It means the timeout was exceeded and you need to increase it. + +The default value of this option is influenced by other options. To see what +value is in effect, run with L<"--help">. + +To disable waiting entirely (except for locks), specify L<"--wait"> 0. This +helps when the slave is lagging on tables that are not being synced. + +=item --where + +type: string + +C clause to restrict syncing to part of the table. + +=item --[no]zero-chunk + +default: yes + +Add a chunk for rows with zero or zero-equivalent values. The only has an +effect when L<"--chunk-size"> is specified. The purpose of the zero chunk +is to capture a potentially large number of zero values that would imbalance +the size of the first chunk. For example, if a lot of negative numbers were +inserted into an unsigned integer column causing them to be stored as zeros, +then these zero values are captured by the zero chunk instead of the first +chunk and all its non-zero values. + +=back + +=head1 DSN OPTIONS + +These DSN options are used to create a DSN. Each option is given like +C. The options are case-sensitive, so P and p are not the +same option. There cannot be whitespace before or after the C<=> and +if the value contains whitespace it must be quoted. DSN options are +comma-separated. See the L manpage for full details. + +=over + +=item * A + +dsn: charset; copy: yes + +Default character set. + +=item * D + +dsn: database; copy: yes + +Database containing the table to be synced. + +=item * F + +dsn: mysql_read_default_file; copy: yes + +Only read default options from the given file + +=item * h + +dsn: host; copy: yes + +Connect to host. + +=item * p + +dsn: password; copy: yes + +Password to use when connecting. +If password contains commas they must be escaped with a backslash: "exam\,ple" + +=item * P + +dsn: port; copy: yes + +Port number to use for connection. + +=item * S + +dsn: mysql_socket; copy: yes + +Socket file to use for connection. + +=item * t + +copy: yes + +Table to be synced. + +=item * u + +dsn: user; copy: yes + +User for login if not current user. + +=back + +=head1 ENVIRONMENT + +The environment variable C enables verbose debugging output to STDERR. +To enable debugging and capture all output to a file, run the tool like: + + PTDEBUG=1 pt-table-sync ... > FILE 2>&1 + +Be careful: debugging output is voluminous and can generate several megabytes +of output. + +=head1 ATTENTION + +Using might expose passwords. When debug is enabled, all command line +parameters are shown in the output. + +=head1 SYSTEM REQUIREMENTS + +You need Perl, DBI, DBD::mysql, and some core packages that ought to be +installed in any reasonably new version of Perl. + +=head1 BUGS + +For a list of known bugs, see L. + +Please report bugs at L. +Include the following information in your bug report: + +=over + +=item * Complete command-line used to run the tool + +=item * Tool L<"--version"> + +=item * MySQL version of all servers involved + +=item * Output from the tool including STDERR + +=item * Input files (log/dump/config files, etc.) + +=back + +If possible, include debugging output by running the tool with C; +see L<"ENVIRONMENT">. + +=head1 DOWNLOADING + +Visit L to download the +latest release of Percona Toolkit. Or, get the latest release from the +command line: + + wget percona.com/get/percona-toolkit.tar.gz + + wget percona.com/get/percona-toolkit.rpm + + wget percona.com/get/percona-toolkit.deb + +You can also get individual tools from the latest release: + + wget percona.com/get/TOOL + +Replace C with the name of any tool. + +=head1 AUTHORS + +Baron Schwartz + +=head1 ACKNOWLEDGMENTS + +My work is based in part on Giuseppe Maxia's work on distributed databases, +L and code derived from that +article. There is more explanation, and a link to the code, at +L. + +Another programmer extended Maxia's work even further. Fabien Coelho changed +and generalized Maxia's technique, introducing symmetry and avoiding some +problems that might have caused too-frequent checksum collisions. This work +grew into pg_comparator, L. Coelho also +explained the technique further in a paper titled "Remote Comparison of Database +Tables" (L). + +This existing literature mostly addressed how to find the differences between +the tables, not how to resolve them once found. I needed a tool that would not +only find them efficiently, but would then resolve them. I first began thinking +about how to improve the technique further with my article +L, +where I discussed a number of problems with the Maxia/Coelho "bottom-up" +algorithm. After writing that article, I began to write this tool. I wanted to +actually implement their algorithm with some improvements so I was sure I +understood it completely. I discovered it is not what I thought it was, and is +considerably more complex than it appeared to me at first. Fabien Coelho was +kind enough to address some questions over email. + +The first versions of this tool implemented a version of the Coelho/Maxia +algorithm, which I called "bottom-up", and my own, which I called "top-down." +Those algorithms are considerably more complex than the current algorithms and +I have removed them from this tool, and may add them back later. The +improvements to the bottom-up algorithm are my original work, as is the +top-down algorithm. The techniques to actually resolve the differences are +also my own work. + +Another tool that can synchronize tables is the SQLyog Job Agent from webyog. +Thanks to Rohit Nadhani, SJA's author, for the conversations about the general +techniques. There is a comparison of pt-table-sync and SJA at +L + +Thanks to the following people and organizations for helping in many ways: + +The Rimm-Kaufman Group L, +MySQL AB L, +Blue Ridge InternetWorks L, +Percona L, +Fabien Coelho, +Giuseppe Maxia and others at MySQL AB, +Kristian Koehntopp (MySQL AB), +Rohit Nadhani (WebYog), +The helpful monks at Perlmonks, +And others too numerous to mention. + +=head1 ABOUT PERCONA TOOLKIT + +This tool is part of Percona Toolkit, a collection of advanced command-line +tools for MySQL developed by Percona. Percona Toolkit was forked from two +projects in June, 2011: Maatkit and Aspersa. Those projects were created by +Baron Schwartz and primarily developed by him and Daniel Nichter. Visit +L to learn about other free, open-source +software from Percona. + +=head1 COPYRIGHT, LICENSE, AND WARRANTY + +This program is copyright 2011-2021 Percona LLC and/or its affiliates, +2007-2011 Baron Schwartz. + +THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +This program is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free Software +Foundation, version 2; OR the Perl Artistic License. On UNIX and similar +systems, you can issue `man perlgpl' or `man perlartistic' to read these +licenses. + +You should have received a copy of the GNU General Public License along with +this program; if not, write to the Free Software Foundation, Inc., 59 Temple +Place, Suite 330, Boston, MA 02111-1307 USA. + +=head1 VERSION + +pt-table-sync 3.4.0 + +=cut diff --git a/dbm-services/mysql/slow-query-parser-service/.gitignore b/dbm-services/mysql/slow-query-parser-service/.gitignore new file mode 100644 index 0000000000..d8b1adce79 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/.gitignore @@ -0,0 +1,3 @@ +build/ +tmysqlparse +*.log \ No newline at end of file diff --git a/dbm-services/mysql/slow-query-parser-service/Dockerfile b/dbm-services/mysql/slow-query-parser-service/Dockerfile new file mode 100644 index 0000000000..d83313873f --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/Dockerfile @@ -0,0 +1,6 @@ +FROM mirrors.tencent.com/sccmsp/tmysqlparse:3.0.7 + +ADD build/slow-query-parser-service / + +WORKDIR / +ENTRYPOINT ["/slow-query-parser-service", "run"] \ No newline at end of file diff --git a/dbm-services/mysql/slow-query-parser-service/Makefile b/dbm-services/mysql/slow-query-parser-service/Makefile new file mode 100644 index 0000000000..7305f64bf0 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/Makefile @@ -0,0 +1,26 @@ +PROJ="slow-query-parser-service" +VERSION = $(error please set VERSION flag) +PKG = ${PROJ}.tar.gz +OUTPUT_DIR = build +RELEASE_BUILD_FLAG = "-X main.version=${VERSION} -X main.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.gitHash=`git rev-parse HEAD` " +DEV_BUILD_FLAG = "-X main.version="develop" -X main.buildStamp=`date -u '+%Y-%m-%d_%I:%M:%S%p'` -X main.gitHash="" " +BK_NAMESPACE = blueking +BK_DH_URL = mirrors.tencent.com/build + +.PHONY: release-bin +release-bin: + @CGO_ENABLE=0 GOARCH=amd64 GOOS=linux go build -ldflags ${RELEASE_BUILD_FLAG} -o ${OUTPUT_DIR}/${$PROJ} + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PROJ}.tar.gz ${PROJ} + +.PHONY: dev-bin +dev-bin: + @go build -ldflags ${DEV_BUILD_FLAG} -o ${OUTPUT_DIR}/${PROJ} + @tar -C ${OUTPUT_DIR} -zcf ${OUTPUT_DIR}/${PROJ}.tar.gz ${PROJ} + +.PHONY: bk-image +bk-image: release-bin + docker build --build-arg SRV_NAME=${PROJ} --rm -t ${BK_DH_URL}/${BK_NAMESPACE}/${PROJ}:${VERSION} + +.PHONY: clean +clean: + @rm -rf $(OUTPUT_DIR) \ No newline at end of file diff --git a/dbm-services/mysql/slow-query-parser-service/README.md b/dbm-services/mysql/slow-query-parser-service/README.md new file mode 100644 index 0000000000..19d3abd120 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/README.md @@ -0,0 +1,2 @@ +1. 命令行启动看 help +2. 容器启动 docker run -d --name test-parser -p 22222:22222 -e SQ_ADDRESS=0.0.0.0:22222 -e SQ_TMYSQLPARSER_BIN=/tmysqlparse ${THIS_IMAGE} \ No newline at end of file diff --git a/dbm-services/mysql/slow-query-parser-service/go.mod b/dbm-services/mysql/slow-query-parser-service/go.mod new file mode 100644 index 0000000000..d0046c4237 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/go.mod @@ -0,0 +1,41 @@ +module dbm-services/mysql/slow-query-parser-service + +go 1.19 + +require ( + github.com/alecthomas/kingpin/v2 v2.3.2 + github.com/gin-gonic/gin v1.9.0 + golang.org/x/exp v0.0.0-20230418202329-0354be287a23 +) + +require ( + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect + github.com/bytedance/sonic v1.8.8 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.12.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/kr/pretty v0.3.0 // indirect + github.com/leodido/go-urn v1.2.3 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sys v0.7.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/dbm-services/mysql/slow-query-parser-service/go.sum b/dbm-services/mysql/slow-query-parser-service/go.sum new file mode 100644 index 0000000000..3bf9f29f80 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/go.sum @@ -0,0 +1,108 @@ +github.com/alecthomas/kingpin/v2 v2.3.2 h1:H0aULhgmSzN8xQ3nX1uxtdlTHYoPLu5AhHxWrKI6ocU= +github.com/alecthomas/kingpin/v2 v2.3.2/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q= +github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= +github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= +github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= +github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23 h1:4NKENAGIctmZYLK9W+X1kDK8ObBFqOSCJM6WE7CvkJY= +golang.org/x/exp v0.0.0-20230418202329-0354be287a23/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/dbm-services/mysql/slow-query-parser-service/main.go b/dbm-services/mysql/slow-query-parser-service/main.go new file mode 100644 index 0000000000..d4f6cfd8b4 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/main.go @@ -0,0 +1,61 @@ +package main + +import ( + "dbm-services/mysql/slow-query-parser-service/pkg/mysql" + "dbm-services/mysql/slow-query-parser-service/pkg/service" + "fmt" + "os" + "path/filepath" + + "github.com/alecthomas/kingpin/v2" + "golang.org/x/exp/slog" +) + +var ( + version = "" + buildStamp = "" + gitHash = "" +) + +var ( + root = kingpin.New("slow-query-parser-service", "slow query parser service") + + runCmd = root.Command("run", "start service") + runCmdAddress = runCmd.Flag("address", "service listen address").Required().Envar("SQ_ADDRESS").TCP() + tmysqlParsePath = runCmd.Flag("tmysqlparse-bin", "tmysqlparse bin path").Required().Envar("SQ_TMYSQLPARSER_BIN"). + ExistingFile() + + versionCmd = root.Command("version", "print version") +) + +func init() { + slog.SetDefault( + slog.New( + slog.HandlerOptions{ + AddSource: true, + Level: slog.LevelDebug, + }.NewTextHandler(os.Stdout), + ), + ) +} + +func main() { + switch kingpin.MustParse(root.Parse(os.Args[1:])) { + case runCmd.FullCommand(): + slog.Info("init run", + slog.String("address", (*runCmdAddress).String()), + slog.String("tmysqlparse-bin", *tmysqlParsePath), + ) + + if !filepath.IsAbs(*tmysqlParsePath) { + cwd, _ := os.Getwd() + *tmysqlParsePath = filepath.Join(cwd, *tmysqlParsePath) + slog.Info("init run concat cwd to tmysqlparse-bin", slog.String("cwd", cwd)) + } + + mysql.ParserPath = tmysqlParsePath + service.Start((*runCmdAddress).String()) + case versionCmd.FullCommand(): + fmt.Printf("Version: %s, GitHash: %s, BuildAt: %s\n", version, gitHash, buildStamp) + } +} diff --git a/dbm-services/mysql/slow-query-parser-service/pkg/mysql/mysql.go b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/mysql.go new file mode 100644 index 0000000000..b65d989eef --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/mysql.go @@ -0,0 +1,2 @@ +// Package mysql TODO +package mysql diff --git a/dbm-services/mysql/slow-query-parser-service/pkg/mysql/parse.go b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/parse.go new file mode 100644 index 0000000000..bfbc2db213 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/parse.go @@ -0,0 +1,85 @@ +package mysql + +import ( + "bytes" + "encoding/json" + "io" + "os" + "os/exec" + + "golang.org/x/exp/slog" +) + +// ParserPath TODO +var ParserPath *string + +func parse(query string) (*Response, error) { + slog.Info("mysql parse receive query", slog.String("query", query)) + + inputFile, err := os.CreateTemp("/tmp", "mysql-slow-input") + if err != nil { + slog.Error("mysql parse create input file", err) + return nil, err + } + defer os.Remove(inputFile.Name()) + slog.Info("mysql parse create input file success", slog.String("input file", inputFile.Name())) + + outputFile, err := os.CreateTemp("/tmp", "mysql-slow-output") + if err != nil { + slog.Error("mysql parse create output file", err) + return nil, err + } + defer os.Remove(outputFile.Name()) + slog.Info("mysql parse create output file success", slog.String("output file", outputFile.Name())) + + _, err = inputFile.WriteString(query) + if err != nil { + slog.Error("mysql parse write query", err) + return nil, err + } + slog.Info("mysql parse write query success") + + cmd := exec.Command(*ParserPath, + "--sql-file", inputFile.Name(), + "--output-path", outputFile.Name(), + "--print-query-mode", "2", + ) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err = cmd.Run() + if err != nil { + slog.Error("mysql parse execute tmysqlparse", err, + slog.String("command", cmd.String()), + slog.String("stderr", stderr.String())) + return nil, err + } + slog.Info("mysql parse execute tmysqlparse", + slog.String("command", cmd.String()), + slog.String("stdout", stdout.String()), + ) + + outputFile.Seek(0, 0) + content, err := io.ReadAll(outputFile) + if err != nil { + slog.Error("mysql parse read output file", err, slog.String("output file", outputFile.Name())) + return nil, err + } + slog.Info("mysql parse read output file success", slog.String("output file", outputFile.Name())) + + var cmdRet struct { + Result []Response `json:"result"` + } + err = json.Unmarshal(content, &cmdRet) + if err != nil { + slog.Error("mysql parse unmarshal result", err, slog.String("result", string(content))) + return nil, err + } + cmdRet.Result[0].QueryLength = len(query) + + slog.Info("mysql parse unmarshal result", slog.Any("struct result", cmdRet)) + + return &cmdRet.Result[0], nil +} diff --git a/dbm-services/mysql/slow-query-parser-service/pkg/mysql/request.go b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/request.go new file mode 100644 index 0000000000..f8dcd4bed4 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/request.go @@ -0,0 +1,6 @@ +package mysql + +// Request TODO +type Request struct { + Content string `json:"content" binding:"required"` +} diff --git a/dbm-services/mysql/slow-query-parser-service/pkg/mysql/response.go b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/response.go new file mode 100644 index 0000000000..9f6113d5a0 --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/response.go @@ -0,0 +1,12 @@ +package mysql + +// Response TODO +type Response struct { + Command string `json:"command"` + QueryString string `json:"query_string"` + QueryDigestText string `json:"query_digest_text"` + QueryDigestMd5 string `json:"query_digest_md5"` + DbName string `json:"db_name"` + TableName string `json:"table_name"` + QueryLength int `json:"query_length"` +} diff --git a/dbm-services/mysql/slow-query-parser-service/pkg/mysql/router.go b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/router.go new file mode 100644 index 0000000000..15e43bfe9b --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/pkg/mysql/router.go @@ -0,0 +1,34 @@ +package mysql + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "golang.org/x/exp/slog" +) + +// AddRouter TODO +func AddRouter(r *gin.Engine) { + g := r.Group("/mysql") + + g.POST("/", func(ctx *gin.Context) { + body := Request{} + err := ctx.BindJSON(&body) + if err != nil { + slog.Error("mysql", err) + ctx.JSON(http.StatusBadRequest, err.Error()) + return + } + + slog.Info("mysql", slog.Any("body", body), slog.String("path", g.BasePath())) + + res, err := parse(body.Content) + if err != nil { + slog.Error("mysql", err) + ctx.JSON(http.StatusInternalServerError, err.Error()) + return + } + + ctx.JSON(http.StatusOK, res) + }) +} diff --git a/dbm-services/mysql/slow-query-parser-service/pkg/service/service.go b/dbm-services/mysql/slow-query-parser-service/pkg/service/service.go new file mode 100644 index 0000000000..7ba484687c --- /dev/null +++ b/dbm-services/mysql/slow-query-parser-service/pkg/service/service.go @@ -0,0 +1,16 @@ +// Package service TODO +package service + +import ( + "dbm-services/mysql/slow-query-parser-service/pkg/mysql" + + "github.com/gin-gonic/gin" +) + +// Start TODO +func Start(address string) error { + r := gin.New() + r.Use(gin.Logger()) + mysql.AddRouter(r) + return r.Run(address) +} diff --git a/dbm-services/redis/db-tools/dbactuator/.ci/codecc.yml b/dbm-services/redis/db-tools/dbactuator/.ci/codecc.yml new file mode 100644 index 0000000000..078caeb7dc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/.ci/codecc.yml @@ -0,0 +1,32 @@ +version: v2.0 +resources: + repositories: + - repository: ci_templates/public/codecc + name: codecc +on: + mr: + target-branches: ["*"] +stages: + - name: "代码检查" + check-out: + gates: + - template: commonGate.yml@codecc + timeout-hours: 10 + jobs: + codecc: + name: "CodeCC代码检查" + runs-on: + pool-name: docker #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0 + steps: + - checkout: self + - uses: CodeccCheckAtomDebug@4.* + name: 腾讯代码分析 + with: + # beAutoLang: true # 自动检测项目语言 + languages: + - "GOLANG" + - "OTHERS" + checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置 + toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1 diff --git a/dbm-services/redis/db-tools/dbactuator/.ci/open_source_check.yml b/dbm-services/redis/db-tools/dbactuator/.ci/open_source_check.yml new file mode 100644 index 0000000000..f421f315f3 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/.ci/open_source_check.yml @@ -0,0 +1,84 @@ +version: "v2.0" +name: "开源检查" +label: [] +variables: {} +stages: +- name: "开源检查" + label: + - "Build" + jobs: + job_AfK: + name: "构建环境-LINUX" + runs-on: + pool-name: "docker" + container: + image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0" + needs: {} + steps: + - checkout: self + - name: "敏感信息检查-部门RTX" + uses: "SensitiveRtxChecker@3.*" + - name: "腾讯代码分析(官方-代码分析工作组)" + uses: "CodeccCheckAtomDebug@4.*" + with: + beAutoLang: true + languages: + - "GOLANG" + checkerSetType: "communityOpenScan" + tools: + - "WOODPECKER_COMMITSCAN" + - "SCC" + - "PECKER_SECURITY" + - "SENSITIVE" + - "DUPC" + - "IP_CHECK" + - "WOODPECKER_SENSITIVE" + - "HORUSPY" + - "XCHECK" + - "CCN" + asyncTask: false + asyncTaskId: "" + scriptType: "SHELL" + script: |- + # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷 + # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh + # 确保build.sh能够编译代码 + # cd path/to/build.sh + # sh build.sh + languageRuleSetMap: {} + checkerSetEnvType: "prod" + multiPipelineMark: "" + rtxReceiverType: "1" + botWebhookUrl: "" + botRemindRange: "2" + botRemindSeverity: "7" + botRemaindTools: [] + emailReceiverType: "1" + emailCCReceiverList: [] + instantReportStatus: "2" + reportDate: [] + reportTime: "" + reportTools: [] + toolScanType: "1" + diffBranch: "" + byFile: false + mrCommentEnable: true + prohibitIgnore: false + newDefectJudgeFromDate: "" + transferAuthorList: [] + path: [] + customPath: [] + scanTestSource: false + openScanPrj: false + openScanFilterEnable: false + issueSystem: "TAPD" + issueSubSystem: "" + issueResolvers: [] + issueReceivers: [] + issueFindByVersion: "" + maxIssue: 1000 + issueAutoCommit: false + check-out: + gates: + - template: open_source_gate.yml + timeout-hours: 10 \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/.ci/templates/open_source_gate.yml b/dbm-services/redis/db-tools/dbactuator/.ci/templates/open_source_gate.yml new file mode 100644 index 0000000000..34ff9b0cb8 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/.ci/templates/open_source_gate.yml @@ -0,0 +1,26 @@ +parameters: +- name: receivers + type: array + default: [ "${{ ci.actor }}" ] + +gates: +- name: open-source-gate + rule: + - "CodeccCheckAtomDebug.all_risk <= 0" + - "CodeccCheckAtomDebug.high_med_new_issue <= 0" + - "CodeccCheckAtomDebug.ccn_new_max_value <= 40" + - "CodeccCheckAtomDebug.sensitive_defect <= 0" + - "CodeccCheckAtomDebug.dupc_average <= 15" + - "CodeccCheckAtomDebug.ccn_average <= 3" + - "CodeccCheckAtomDebug.ccn_new_defect <= 0" + - "CodeccCheckAtomDebug.ccn_funcmax <= 20" + - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0" + - "CodeccCheckAtomDebug.horuspy_all_defect <= 0" + - "CodeccCheckAtomDebug.go_serious_defect <= 0" + - "CodeccCheckAtomDebug.go_all_defect <= 100" + notify-on-fail: + - type: wework-message + receivers: ${{ parameters.receivers }} + continue-on-fail: + gatekeepers: + - "${{ ci.actor }}" \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/.gitignore b/dbm-services/redis/db-tools/dbactuator/.gitignore new file mode 100644 index 0000000000..fc015b4217 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/.gitignore @@ -0,0 +1,31 @@ +.idea +.vscode +logs/* +bin/dbactuator_redis +scripts/upload_media.sh +.codecc +gonote +goimports +.agent.properties +agent.zip +codecc/ +devopsAgent +devopsDaemon +install.sh +jre.zip +jre/ +latest_version.txt +preci +preci.log +preci.pid +preci_server.jar +runtime/ +start.sh +stop.sh +telegraf.conf +tmp/ +uninstall.sh +worker-agent.jar +preci.port +build.yml +tests/dbactuator-test diff --git a/dbm-services/redis/db-tools/dbactuator/LICENSE b/dbm-services/redis/db-tools/dbactuator/LICENSE new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dbm-services/redis/db-tools/dbactuator/Makefile b/dbm-services/redis/db-tools/dbactuator/Makefile new file mode 100644 index 0000000000..cc8041e156 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/Makefile @@ -0,0 +1,9 @@ +SRV_NAME=dbactuator_redis + +clean: + -rm ./bin/${SRV_NAME} + +build:clean + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin/$(SRV_NAME) -v main.go + +.PHONY: init clean build diff --git a/dbm-services/redis/db-tools/dbactuator/README.md b/dbm-services/redis/db-tools/dbactuator/README.md new file mode 100644 index 0000000000..bec96e7b80 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/README.md @@ -0,0 +1,90 @@ +## bk-dbactuator-redis +redis 原子任务合集,包含Redis 以及 RedisProxy 安装、集群创建、备份、回档等等原子任务。 + +使用方式: +```go +./bin/dbactuator_redis -h +redis原子任务合集,包含Redis 以及 RedisProxy 安装、集群创建、备份、回档等等原子任务 + +Usage: + dbactuator_redis [flags] + + +Flags: + -A, --atom-job-list string 多个原子任务名用','分割,如 redis_install,redis_replicaof + -B, --backup_dir string 备份保存路径,亦可通过环境变量REDIS_BACKUP_DIR指定 + -D, --data_dir string 数据保存路径,亦可通过环境变量 REDIS_DATA_DIR 指定 + -h, --help help for dbactuator_redis + -N, --node_id string 节点id + -p, --payload string 原子任务参数信息,base64包裹 + -f, --payload_file string 原子任务参数信息,json/yaml文件 + -R, --root_id string 流程id + -t, --toggle Help message for toggle + -U, --uid string 单据id + -V, --version_id string 运行版本id + -u, --user string db进程运行的os用户 + -g, --group string db进程运行的os用户的属主 + +//执行示例 +./bin/dbactuator_redis --uid=1111 --root_id=2222 --node_id=3333 --version_id=v1 --payload='eyJkaXIiOiIvZGF0YS9yZWRpcy8zMDAwMCIsInBvcnQiOjMwMDAwLCJwYXNzd29yZCI6InBhc3MwMSIsInZlcnNpb24iOiJyZWRpcy00LjExLjEyIiwiZGF0YWJhc2VzIjoyfQ==' --atom-job-list="redis_install" +``` + +### 架构图 +![架构图](./imgs/bk-dbactuator-redis_structur.png) + +### 开发规范 +go开发规范参考: [https://google.github.io/styleguide/go/decisions](https://google.github.io/styleguide/go/decisions) + +### 开发流程 +- **step1(必须):`pkg/atomJobs`目录下添加类对象,如`pkg/atomJobs/redis/redis_install_execute.go`**; +以`type RedisInstallExecute`为例。 +需实现`JobRunner`中的相关接口: +```go +//JobRunner defines a behavior of a job +type JobRunner interface { + // Init doing some operation before run a job + // such as reading parametes + Init(*JobGenericRuntime) error + + // Name return the name of the job + Name() string + + // Run run a job + Run() error + + Retry() uint + + // Rollback you can define some rollback logic here when job fails + Rollback() error +} +``` +而后实现一个New函数,该函数简单返回一个`*RedisInstallExecute{}`即可,如:`func NewRedisInstallExecute() jobruntime.JobRunner`; +- **step2(必须):`pkg/jobmanager/jobmanager.go`中修改`GetAtomJobInstance()`函数** +加一行代码即可。 +```go +//key名必须和./dbactuator_redis --atom-job-list 参数中的保持一致; +//value就是step1中的New函数; +m.atomJobMapper["redis_install_execute"] = atomredis.NewRedisInstallExecute +``` +- **step3(非必须):更新README.md中的“当前支持的原子任务”** + +### 注意事项 +- 第一: **`bk-dbactuator-redis`中每个原子任务,强烈建议可重入,即可反复执行** +虽然接口`JobRunner`中有`Rollback() error`实现需求,但其实不那么重要。 +相比可回档,实现可重入有如下优势: + - **可重入实现难度更低, 基本上每个动作前先判断该动作是否已做过即可,而回档操作难度大,如100个redis实例建立主从关系,其中1个失败,99个成功,可重入实现简单,回档操作则非常麻烦;** + - **可重入风险更低,创建的回档动作是删除,删除的回档动作是创建。回档操作代码细微bug,影响很大;** + - **可重入对DBA和用户更实用,用户执行某个操作失败,用户基本诉求是重跑,完全不执行该操作了恢复环境需求很少;** + +### 当前支持的原子任务 +```go +sysinit +redis_install //redis 安装 +redis_replicaof //redis 主从关系建立 +redis_replicaof_batch //redis 主从关系建立(端口连续) +clustermeet_slotsassign //集群关系建立与slots分配 +redis_backup //redis备份 +tendis_keyspattern //redis key提取 +tendis_keysdelete_regex //redis key 正则删除 +tendis_keysdelete_files //redis key 结果文件删除 +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/cmd/root.go b/dbm-services/redis/db-tools/dbactuator/cmd/root.go new file mode 100644 index 0000000000..8934f2f656 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/cmd/root.go @@ -0,0 +1,147 @@ +// Package cmd 根目录 +/* +Copyright © 2022 NAME HERE + +*/ +package cmd + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobmanager" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/base64" + "fmt" + "log" + "os" + "strings" + + "github.com/spf13/cobra" +) + +var uid string +var rootID string +var nodeID string +var versionID string +var dataDir string +var backupDir string +var payLoad string +var payLoadFormat string +var payLoadFile string +var atomJobList string +var user string +var group string + +var showSupportedAtomJobs bool + +// RootCmd represents the base command when called without any subcommands +var RootCmd = &cobra.Command{ + Use: "dbactuator_redis", + Short: "redis原子任务合集", + Long: `redis原子任务合集,包含Redis 以及 RedisProxy 安装、集群创建、备份、回档等等原子任务`, + // Uncomment the following line if your bare application + // has an action associated with it: + Run: func(cmd *cobra.Command, args []string) { + var err error + dir, _ := util.GetCurrentDirectory() + + manager, err := jobmanager.NewJobGenericManager(uid, rootID, nodeID, versionID, + payLoad, payLoadFormat, atomJobList, dir) + if err != nil { + return + } + if showSupportedAtomJobs { + names := manager.SupportAtomJobs() + fmt.Printf("Support atom jobs:%s\n", strings.Join(names, "\n")) + return + } + + // 优先使用payLoad。 payLoadFile 个人测试的时候使用的. + if payLoad == "" && payLoadFile != "" { + if o, err := os.ReadFile(payLoadFile); err == nil { + payLoad = base64.StdEncoding.EncodeToString(o) + log.Printf("using payload file %s", payLoadFile) + } else { + log.Printf("using payload file %s err %v", payLoadFile, err) + } + } + err = consts.SetRedisDataDir(dataDir) + if err != nil { + log.Println(err.Error()) + os.Exit(-1) + } + err = consts.SetRedisBakcupDir(backupDir) + if err != nil { + log.Println(err.Error()) + os.Exit(-1) + } + + // 设置mongo环境变量 + err = consts.SetMongoDataDir(dataDir) + if err != nil { + log.Println(err.Error()) + os.Exit(-1) + } + err = consts.SetMongoBackupDir(backupDir) + if err != nil { + log.Println(err.Error()) + os.Exit(-1) + } + + err = consts.SetProcessUser(user) + if err != nil { + log.Println(err.Error()) + os.Exit(-1) + } + err = consts.SetProcessUserGroup(group) + if err != nil { + log.Println(err.Error()) + os.Exit(-1) + } + + err = manager.LoadAtomJobs() + if err != nil { + os.Exit(-1) + } + err = manager.RunAtomJobs() + if err != nil { + os.Exit(-1) + } + }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + err := RootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +func init() { + // Here you will define your flags and configuration settings. + // Cobra supports persistent flags, which, if defined here, + // will be global for your application. + + // Cobra also supports local flags, which will only run + // when this action is called directly. + RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") + RootCmd.PersistentFlags().BoolVarP(&showSupportedAtomJobs, "show-supported-atomjobs", "s", false, + "show supported atom jobs") + RootCmd.PersistentFlags().StringVarP(&uid, "uid", "U", "", "单据id") + RootCmd.PersistentFlags().StringVarP(&rootID, "root_id", "R", "", "流程id") + RootCmd.PersistentFlags().StringVarP(&nodeID, "node_id", "N", "", "节点id") + RootCmd.PersistentFlags().StringVarP(&versionID, "version_id", "V", "", "运行版本id") + RootCmd.PersistentFlags().StringVarP(&dataDir, "data_dir", "D", "", + "数据保存路径,亦可通过环境变量 REDIS_DATA_DIR 指定") + RootCmd.PersistentFlags().StringVarP(&backupDir, "backup_dir", "B", "", + "备份保存路径,亦可通过环境变量REDIS_BACKUP_DIR指定") + RootCmd.PersistentFlags().StringVarP(&payLoad, "payload", "p", "", "原子任务参数信息,base64包裹") + RootCmd.PersistentFlags().StringVarP(&payLoadFormat, "payload-format", "m", "", + "command payload format, default base64, value_allowed: base64|raw") + RootCmd.PersistentFlags().StringVarP(&atomJobList, "atom-job-list", "A", "", + "多个原子任务名用','分割,如 redis_install,redis_replicaof") + RootCmd.PersistentFlags().StringVarP(&payLoadFile, "payload_file", "f", "", "原子任务参数信息,json文件") + RootCmd.PersistentFlags().StringVarP(&user, "user", "u", "", "开启进程的os用户") + RootCmd.PersistentFlags().StringVarP(&group, "group", "g", "", "开启进程的os用户属主") +} diff --git a/dbm-services/redis/db-tools/dbactuator/doc/twemproxyredisinstance.txt b/dbm-services/redis/db-tools/dbactuator/doc/twemproxyredisinstance.txt new file mode 100644 index 0000000000..aec158c823 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/doc/twemproxyredisinstance.txt @@ -0,0 +1,11 @@ +TwemproxyRedisInstance twemproxyconf twemproxy slowms 1000000 INT RANGE [0,10000000] 0 0 +TwemproxyRedisInstance twemproxyconf twemproxy backlog 512 INT nullNULL nullNULL 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy redis true STRING ENUM true 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy distribution modhash STRING ENUM modhash 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy hash fnv1a_64 STRING ENUM fnv1a_64 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy auto_eject_hosts false STRING ENUM true|false 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy preconnect false STRING ENUM true|false 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy server_retry_timeout 2000 INT RANGE [1000,10000] 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy server_connections 1 INT RANGE [1,10] 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy server_failure_limit 3 INT RANGE [1,10] 0 1 +TwemproxyRedisInstance twemproxyconf twemproxy mbuf-size 1024 INT RANGE [128,102400] 0 0 \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/add_shard_to_cluster.example.md b/dbm-services/redis/db-tools/dbactuator/example/add_shard_to_cluster.example.md new file mode 100644 index 0000000000..79e26d22cc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/add_shard_to_cluster.example.md @@ -0,0 +1,24 @@ +### add_shard_to_cluster +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="add_shard_to_cluster" --payload='{{payload_base64}}' +``` + + +原始payload + +```json +{ + "ip":"127.0.0.1", + "port":27021, + "adminUsername":"xxx", + "adminPassword":"xxxxxxx", + "shard":{ + "test-test1-s1":"127.0.0.2:27001,127.0.0.3:27002", + "test-test1-s2":"127.0.0.2:27004,127.0.0.3:27005", + "test-test1-s3":"127.0.0.3:27001,127.0.0.4:27002", + "test-test1-s4":"127.0.0.3:27004,127.0.0.4:27005" + } +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/bkdbmon_install.example.md b/dbm-services/redis/db-tools/dbactuator/example/bkdbmon_install.example.md new file mode 100644 index 0000000000..4d18a8bf7d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/bkdbmon_install.example.md @@ -0,0 +1,93 @@ +### bkdbmon_install +bk-dbmon安装: +```sh +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="bkdbmon_install" --payload='{{payload_base64}}' +``` + +原始payload: +```json +{ + "bkdbmonpkg":{ + "pkg":"bk-dbmon-v0.2.tar.gz", + "pkg_md5":"99081e28443d0615b151ae82e74b69e4" + }, + "dbtoolspkg":{ + "pkg":"dbtools.tar.gz", + "pkg_md5":"334cf6e3b84d371325052d961584d5aa" + }, + "gsepath":"/usr/local/gse_bkte", + "redis_fullbackup":{ + "to_backup_system":"yes", + "old_file_left_day":2, + "cron":"0 5,13,21 * * *" + }, + "redis_binlogbackup":{ + "to_backup_system":"yes", + "old_file_left_day":2, + "cron":"@every 10m" + }, + "redis_heartbeat":{ + "cron":"@every 10s" + }, + "redis_monitor":{ + "bkmonitor_event_data_id": 542898, + "bkmonitor_event_token": "xxxxxx", + "bkmonitor_metric_data_id": 11111, + "bkmonitor_metirc_token": "xxxx", + "cron":"@every 1m" + }, + "redis_keylife":{ + "stat_dir":"/data/dbbak/keylifecycle", + "cron":"", + "hotkey_conf":{ + "top_count":10, + "duration_seconds":30, + }, + "bigkey_conf":{ + "top_count":10, + "duration_seconds":60*60*5, + "on_master":false, + "use_rdb":true, + "disk_max_usage":65, + "keymod_spec":["axxxy","bxxr"], + "keymod_engine":"default" + } + }, + "servers":[ + { + "bk_biz_id":"200500194", + "bk_cloud_id":"246", + "app":"testapp", + "app_name":"测试app", + "cluster_domain":"tendisx.aaaa.testapp.db", + "cluster_name":"aaaa", + "cluster_type":"PredixyTendisplusCluster", + "meta_role":"redis_master", + "server_ip":"127.0.0.1", + "server_ports":[ + 30000, + 30001, + 30002, + 30003 + ] + }, + { + "bk_biz_id":"200500194", + "bk_cloud_id":"246", + "app":"testapp", + "app_name":"测试app", + "cluster_domain":"tendisx.aaaa.testapp.db", + "cluster_name":"aaaa", + "cluster_type":"PredixyTendisplusCluster", + "meta_role":"redis_slave", + "server_ip":"127.0.0.1", + "server_ports":[ + 31000, + 31001, + 31002, + 31003 + ] + } + ] +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/cluster_balancer.example.md b/dbm-services/redis/db-tools/dbactuator/example/cluster_balancer.example.md new file mode 100644 index 0000000000..1aa885ff31 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/cluster_balancer.example.md @@ -0,0 +1,20 @@ +### mongod_replace +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="cluster_balancer" --payload='{{payload_base64}}' +``` + + +原始payload + +```json +{ + "ip":"127.0.0.1", + "port":27021, + "open": false, + "adminUsername":"xxx", + "adminPassword":"xxxxxxxxx" +} +``` +"open"字段 true:表示打开balancer false:表示关闭balancer diff --git a/dbm-services/redis/db-tools/dbactuator/example/clustermeet_slotsassign.example.md b/dbm-services/redis/db-tools/dbactuator/example/clustermeet_slotsassign.example.md new file mode 100644 index 0000000000..3491af6cac --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/clustermeet_slotsassign.example.md @@ -0,0 +1,80 @@ +### redis replicaof +建立主从关系: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="clustermeet_slotsassign" --payload='{{payload_base64}}' +``` + +前置工作: +- 先运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 先运行 `./dbactuator_redis --atom-job-list="redis_install"`,确保redis 已经安装ok; + +原始payload +示例1: +```json +{ + "password":"xx", + "use_for_expansion":false,//是否用于扩容,false:不是用于扩容 + "slots_auto_assign":true, //slots自动分配 + "replica_pairs":[ + { + "master_ip":"127.0.0.1", + "master_port":30000, + "slave_ip":"127.0.0.1", + "slave_port":31000, + "slots":"" + }, + { + "master_ip":"127.0.0.1", + "master_port":30001, + "slave_ip":"127.0.0.1", + "slave_port":31001, + "slots":"" + }, + { + "master_ip":"127.0.0.1", + "master_port":30002, + "slave_ip":"127.0.0.1", + "slave_port":31002, + "slots":"" + } + ] +} +``` +示例2: +```json +{ + "password":"xxx", + "use_for_expansion":true,//是否用于扩容,true :是用于扩容 + "slots_auto_assign":false, //不自动分配slot,根据用户指定 + "replica_pairs":[ + { + "master_ip":"127.0.0.1", + "master_port":30000, + "slave_ip":"127.0.0.1", + "slave_port":31000, + "slots":"0-4096" + }, + { + "master_ip":"127.0.0.1", + "master_port":30001, + "slave_ip":"127.0.0.1", + "slave_port":30001, + "slots":"4097-8193" + }, + { + "master_ip":"127.0.0.1", + "master_port":30002, + "slave_ip":"127.0.0.1", + "slave_port":31002, + "slots":"8194-12290" + }, + { + "master_ip":"127.0.0.1", + "master_port":30003, + "slave_ip":"127.0.0.1", + "slave_port":31003, + "slots":"12291-16383" + } + ] +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/initiate_replicaset.example.md b/dbm-services/redis/db-tools/dbactuator/example/initiate_replicaset.example.md new file mode 100644 index 0000000000..1886cf889c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/initiate_replicaset.example.md @@ -0,0 +1,35 @@ +### init_replicaset +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="init_replicaset" --payload='{{payload_base64}}' +``` +--data_dir、--backup_dir 可以留空. --user启动进程用户名,--group启动进程用户名的属组,如果为空默认都为mysql。 + +原始payload + +```json +{ + "ip":"127.0.0.1", + "port":27001, + "app":"test", + "areaId":"test1", + "setId":"s1", + "configSvr":false, + "ips":[ + "127.0.0.1:27001", + "127.0.0.2:27002", + "127.0.0.3:27003" + ], + "priority":{ + "127.0.0.1:27001":1, + "127.0.0.2:27002":1, + "127.0.0.3:27003":0 + }, + "hidden":{ + "127.0.0.1:27001":false, + "127.0.0.2:27002":false, + "127.0.0.3:27003":true + } +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongo_add_user.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongo_add_user.example.md new file mode 100644 index 0000000000..f5b9e6586c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongo_add_user.example.md @@ -0,0 +1,52 @@ +### add_user +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="add_user" --payload='{{payload_base64}}' +``` + + +原始payload + +创建管理员用户 +```json +{ + "ip":"127.0.0.1", + "port":27001, + "instanceType":"mongod", + "username":"xxx", + "password":"xxxxxxx", + "adminUsername":"", + "adminPassword":"", + "authDb":"admin", + "dbs":[ + + ], + "privileges":[ + "root" + ] +} +``` + +创建业务用户 +```json +{ + "ip":"127.0.0.1", + "port":27001, + "instanceType":"mongod", + "username":"xxx", + "password":"xxxxxxx", + "adminUsername":"xxx", + "adminPassword":"xxxxxxxx", + "authDb":"admin", + "dbs":[ + + ], + "privileges":[ + "xxx" + ] +} +``` + + +"instanceType"字段 "mongod":表示在复制集或者复制集单点进行创建用户 "mongos":表示cluster进行创建用户 \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongo_deinstall.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongo_deinstall.example.md new file mode 100644 index 0000000000..1685c30515 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongo_deinstall.example.md @@ -0,0 +1,22 @@ +### mongo_deinstall +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="mongo_deinstall" --payload='{{payload_base64}}' +``` + +原始payload +```json +{ + "ip":"127.0.0.1", + "port":27002, + "app":"test", + "areaId":"test1", + "nodeInfo":[ + "127.0.0.1", + "127.0.0.2" + ], + "instanceType":"mongod" +} +``` + diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongo_del_user.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongo_del_user.example.md new file mode 100644 index 0000000000..45c6aea30e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongo_del_user.example.md @@ -0,0 +1,34 @@ +### delete_user +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="delete_user" --payload='{{payload_base64}}' +``` + + +原始payload +mongos删除业务用户 +```json +{ + "ip":"127.0.0.1", + "port":27023, + "instanceType":"mongos", + "adminUsername":"xxx", + "adminPassword":"xxxxx", + "username":"xx", + "authDb":"admin" +} +``` + +mongod删除业务用户 +```json +{ + "ip":"127.0.0.1", + "port":27001, + "instanceType":"mongod", + "adminUsername":"xxx", + "adminPassword":"xxxx", + "username":"xx", + "authDb":"admin" +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongo_execute_script.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongo_execute_script.example.md new file mode 100644 index 0000000000..6b8027e2fa --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongo_execute_script.example.md @@ -0,0 +1,30 @@ +### mongo_execute_script +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="mongo_execute_script" --payload='{{payload_base64}}' +``` + + +原始payload + +# 原始payload +```json +{ + "ip":"127.0.0.1", + "port":27021, + "script":"xxx", + "type":"cluster", + "secondary": false, + "adminUsername":"xxx", + "adminPassword":"xxxxxx", + "repoUrl":"url", + "repoUsername":"username", + "repoToken":"token", + "repoProject":"project", + "repoRepo":"project-package", + "repoPath":"path" +} +``` + +以repo为前缀的字段为制品库信息 \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongo_process_restart.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongo_process_restart.example.md new file mode 100644 index 0000000000..7d60196380 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongo_process_restart.example.md @@ -0,0 +1,42 @@ +### mongo_restart +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="mongo_restart" --payload='{{payload_base64}}' +``` + + +原始payload + +## mongod +```json +{ + "ip":"127.0.0.1", + "port":27001, + "instanceType":"mongod", + "singleNodeInstallRestart":false, + "auth":true, + "cacheSizeGB": null, + "mongoSConfDbOld":"", + "MongoSConfDbNew":"", + "adminUsername":"", + "adminPassword":"" +} +``` +"singleNodeInstallRestart"字段表示安装替换节点时mongod单节点重启 true:替换节点单节点重启 false:复制集节点重启 +"adminUsername"和"adminPassword"字段为空时表示安装时最后一步重启进程,不为空时表示提供服务期间重启 +## mongos +```json +{ + "ip":"127.0.0.1", + "port":27021, + "instanceType":"mongos", + "singleNodeInstallRestart":false, + "auth":true, + "cacheSizeGB": null, + "mongoSConfDbOld":"127.0.0.2:27001", + "MongoSConfDbNew":"127.0.0.2:27004", + "adminUsername":"", + "adminPassword":"" +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongod_install.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongod_install.example.md new file mode 100644 index 0000000000..510b1ebb6e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongod_install.example.md @@ -0,0 +1,60 @@ +### mongod_install +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="mongod_install" --data_dir=/path/to/data --backup_dir=/path/to/backup --user="xxx" --group="xxx" --payload='{{payload_base64}}' +``` +--data_dir、--backup_dir 可以留空. --user启动进程用户名,--group启动进程用户名的属组,如果为空默认都为mysql。 + +原始payload + +## shardsvr +```json +{ + "mediapkg":{ + "pkg":"mongodb-linux-x86_64-3.4.20.tar.gz", + "pkg_md5":"e68d998d75df81b219e99795dec43ffb" + }, + "ip":"127.0.0.1", + "port":27001, + "dbVersion":"3.4.20", + "instanceType":"mongod", + "app":"test", + "areaId":"test1", + "setId":"s1", + "auth": true, + "clusterRole":"shardsvr", + "dbConfig":{ + "slowOpThresholdMs":200, + "cacheSizeGB":1, + "oplogSizeMB":500, + "destination":"file" + } +} +``` +部署复制集时"clusterRole"字段为空 + +## configsvr +```json +{ + "mediapkg":{ + "pkg":"mongodb-linux-x86_64-3.4.20.tar.gz", + "pkg_md5":"e68d998d75df81b219e99795dec43ffb" + }, + "ip":"127.0.0.1", + "port":27002, + "dbVersion":"3.4.20", + "instanceType":"mongod", + "app":"test", + "areaId":"test1", + "setId":"conf", + "auth": true, + "clusterRole":"configsvr", + "dbConfig":{ + "slowOpThresholdMs":200, + "cacheSizeGB":1, + "oplogSizeMB":500, + "destination":"file" + } +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongod_replace.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongod_replace.example.md new file mode 100644 index 0000000000..b7e927322f --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongod_replace.example.md @@ -0,0 +1,29 @@ +### mongod_replace +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="mongod_replace" --payload='{{payload_base64}}' +``` + + +原始payload + +## mongod +```json +{ + "ip":"127.0.0.1", + "port":27002, + "sourceIP":"127.0.0.3", + "sourcePort":27007, + "sourceDown":true, + "adminUsername":"xxx", + "adminPassword":"xxxxxxxx", + "targetIP":"127.0.0.1", + "targetPort":27004, + "targetPriority":"", + "targetHidden":"" +} +``` +"sourceDown" 源端是否已down机 +"targetPriority"可以指定替换节点的优先级 +"targetHidden"可以指定替换节点是否为隐藏节点 \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongod_step_down.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongod_step_down.example.md new file mode 100644 index 0000000000..6227726baa --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongod_step_down.example.md @@ -0,0 +1,18 @@ +### replicaset_stepdown +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="replicaset_stepdown" --payload='{{payload_base64}}' +``` + + +原始payload + +```json +{ + "ip":"127.0.0.1", + "port":27001, + "adminUsername":"xxx", + "adminPassword":"xxx" +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/mongos_install.example.md b/dbm-services/redis/db-tools/dbactuator/example/mongos_install.example.md new file mode 100644 index 0000000000..b9dbdc617b --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/mongos_install.example.md @@ -0,0 +1,30 @@ +### mongos_install +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="mongos_install" --data_dir=/path/to/data --backup_dir=/path/to/backup --user="xxx" --group="xxx" --payload='{{payload_base64}}' +``` +--data_dir、--backup_dir 可以留空. --user启动进程用户名,--group启动进程用户名的属组,如果为空默认都为mysql。 + +原始payload + +```json +{ + "mediapkg":{ + "pkg":"mongodb-linux-x86_64-3.4.20.tar.gz", + "pkg_md5":"e68d998d75df81b219e99795dec43ffb" + }, + "ip":"127.0.0.1", + "port":27021, + "dbVersion":"3.4.20", + "instanceType":"mongos", + "app":"test", + "areaId":"test1", + "auth": true, + "configDB":["127.0.0.2:27001","127.0.0.3:27002","127.0.0.4:27003"], + "dbConfig":{ + "slowOpThresholdMs":200, + "destination":"file" + } +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/os_mongo_init.example.md b/dbm-services/redis/db-tools/dbactuator/example/os_mongo_init.example.md new file mode 100644 index 0000000000..3aa463c70e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/os_mongo_init.example.md @@ -0,0 +1,16 @@ +### os_mongo_init +初始化新机器: + +```json +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="os_mongo_init" --data_dir=/path/to/data --backup_dir=/path/to/backup --user="xxx" --group="xxx" --payload='{{payload_base64}}' +``` +--data_dir、--backup_dir 可以留空. --user启动进程用户名,--group启动进程用户名的属组,如果为空默认都为mysql。 + +原始payload + +```json +{ +"user":"xxx", +"password":"xxxxxxx" +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/predixy_install.example.md b/dbm-services/redis/db-tools/dbactuator/example/predixy_install.example.md new file mode 100644 index 0000000000..81aaeade0c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/predixy_install.example.md @@ -0,0 +1,41 @@ +# predixy_install +## 安装predixy +`./dbactuator_redis +--uid={{uid}} +--root_id={{root_id}} +--node_id={{node_id}} +--version_id={{version_id}} +--atom-job-list="twemproxy_install" +--payload='{{payload_base64}}'` + +## 前置工作: +- 先运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 将`predixy-1.0.5.tar.gz`下载到`/data/install`目录下; + + +##原始payload +```json +{ + "ip":"127.0.0.1", + "port":50000, + "predixypasswd":"xxxxx", + "redispasswd":"xxxxx", + "servers":[ + "127.0.0.1:11", + "2.2.2.2:11" + ], + "dbconfig":{ + "workerthreads":"8", + "clienttimeout":"0", + "RefreshInterval":"1", + "serverfailurelimit":"10", + "serverretrytimeout":"1", + "servertimeout":"0", + "keepalive":"0" + }, + "mediapkg":{ + "pkg":"predixy-1.4.0.tar.gz", + "pkg_md5":"9a863ce100bfe6138523d046c068f49c" + } +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_backup.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_backup.example.md new file mode 100644 index 0000000000..f781470afb --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_backup.example.md @@ -0,0 +1,25 @@ +### redis_backup +备份: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="redis_backup" --data_dir=/path/to/data --backup_dir=/path/to/backup --payload='{{payload_base64}}' +``` + +`--data_dir`、`--backup_dir` 可以留空. + +原始payload +```json +{ + "bk_biz_id":"1111", + "domain": "cache.hello.testapp.db", + "ip":"xx.xx.xx.xx", + "ports":[], + "start_port":30000, + "inst_num":10, + "backup_type":"normal_backup", + "without_to_backup_sys":true, //是否上传到备份系统,默认false + "ssd_log_count":{ // tendisssd 重建slave做备份需设置相关参数,普通备份不传值 或 传0即可 + "log-count":8000000, + "slave-log-keep-count":5000000 + } +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_dts_datacheck.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_dts_datacheck.example.md new file mode 100644 index 0000000000..2f29b0433f --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_dts_datacheck.example.md @@ -0,0 +1,36 @@ +### redis_dts_datacheck +bk-dbmon安装: +```sh +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="redis_dts_datacheck" --payload='{{payload_base64}}' +``` + +dts_type是迁移类型,有四个值: +- one_app_diff_cluster: 一个业务下的不同集群间迁移 +- diff_app_diff_cluster: 不同业务下的不同集群间迁移 +- sync_to_other_system: 同步到其他系统,如迁移到腾讯云 +- user_built_to_dbm: 用户自建redis到dbm系统 + +原始payload: +```json +{ + "pkg":"dbtools.tar.gz", + "pkg_md5":"ced0fa280c63cb31536fefc1845f3ff0", + "bk_biz_id":"testapp", + "dts_type":"one_app_diff_cluster", + "src_redis_ip":"127.0.0.1", //源redis信息 + "src_redis_port_segmentlist":[ + { + "port":30000, + "seg_start":-1, + "seg_end":-1 + } + ], + "src_hash_tag":false, //是否开启hash_tag + "src_redis_password":"xxxxx", //是redis的密码,非srcCluster proxy得密码 + "src_cluster_addr":"tendisx.aaaa.testapp.db:50000", + "dst_cluster_addr":"tendisx.bbbb.testapp.db:50000", //目的集群addr + "dst_cluster_password":"yyyy", //目的集群proxy密码 + "key_white_regex":"*", + "key_black_regex":"" +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_dts_datarepaire.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_dts_datarepaire.example.md new file mode 100644 index 0000000000..df0e19a973 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_dts_datarepaire.example.md @@ -0,0 +1,36 @@ +### redis_dts_datacheck +bk-dbmon安装: +```sh +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="redis_dts_datarepaire" --payload='{{payload_base64}}' +``` + +dts_type是迁移类型,有四个值: +- one_app_diff_cluster: 一个业务下的不同集群间迁移 +- diff_app_diff_cluster: 不同业务下的不同集群间迁移 +- sync_to_other_system: 同步到其他系统,如迁移到腾讯云 +- user_built_to_dbm: 用户自建redis到dbm系统 + +原始payload: +```json +{ + "pkg":"dbtools.tar.gz", + "pkg_md5":"ced0fa280c63cb31536fefc1845f3ff0", + "bk_biz_id":"testapp", + "dts_type":"one_app_diff_cluster", + "src_redis_ip":"127.0.0.1", //源redis信息 + "src_redis_port_segmentlist":[ + { + "port":30000, + "seg_start":-1, + "seg_end":-1 + } + ], + "src_hash_tag":false, //是否开启hash_tag + "src_redis_password":"xxxxx", //是redis的密码,非srcCluster proxy得密码 + "src_cluster_addr":"tendisx.aaaa.testapp.db:50000", + "dst_cluster_addr":"tendisx.bbbb.testapp.db:50000", //目的集群addr + "dst_cluster_password":"yyyy", //目的集群proxy密码 + "key_white_regex":"*", + "key_black_regex":"" +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_install.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_install.example.md new file mode 100644 index 0000000000..548d717c3b --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_install.example.md @@ -0,0 +1,89 @@ +### redis install +安装redis +`./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="redis_install" --data_dir=/path/to/data --backup_dir=/path/to/backup --payload='{{payload_base64}}'` + +`--data_dir`、`--backup_dir` 可以留空. + +前置工作: +- 先运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 将`redis-6.2.7.tar.gz`下载到`/data/install`目录下; + +原始payload +```json +{ + "pkg":"redis-6.2.7.tar.gz", + "pkg_md5":"1fc9e5c3a044ce523844a6f2717e5ac3", + "dbtoolspkg":{ + "pkg":"dbtools.tar.gz", + "pkg_md5":"334cf6e3b84d371325052d961584d5aa" + }, + "data_dirs":[], // 优先尝试用 /data1/ /data/作为数据保存目录,而后测试 data_dirs 中目录是否满足,满足则可用作数据目录 + "ip":"127.0.0.1", + "ports":[], //端口不连续 + "start_port":30000, // 端口连续,起始端口 + "inst_num":3, //实例个数 + "password":"xxx", + "databases":2, + "db_type":"TwemproxyRedisInstance", + "maxmemory":536870912, + "redis_conf_configs":{ + "activerehashing":"yes", + "always-show-logo":"yes", + "aof-load-truncated":"yes", + "aof-rewrite-incremental-fsync":"yes", + "aof-use-rdb-preamble":"no", + "appendfilename":"appendonly.aof", + "appendfsync":"everysec", + "appendonly":"no", + "auto-aof-rewrite-min-size":"64mb", + "auto-aof-rewrite-percentage":"100", + "bind":"{{address}} 127.0.0.1", + "client-output-buffer-limit":"normal 0 0 0 \n client-output-buffer-limit slave 2048mb 2048mb 300 \n client-output-buffer-limit pubsub 32mb 8mb 60", + "cluster-config-file":"nodes.conf", + "cluster-enabled":"{{cluster_enabled}}", + "cluster-node-timeout":"15000", + "daemonize":"yes", + "databases":"{{databases}}", + "dbfilename":"dump.rdb", + "dir":"{{redis_data_dir}}/data", + "hash-max-ziplist-entries":"512", + "hash-max-ziplist-value":"64", + "hll-sparse-max-bytes":"3000", + "hz":"10", + "lazyfree-lazy-eviction":"yes", + "lazyfree-lazy-expire":"yes", + "lazyfree-lazy-server-del":"yes", + "list-compress-depth":"0", + "list-max-ziplist-size":"-2", + "logfile":"{{redis_data_dir}}/redis.log", + "loglevel":"notice", + "lua-time-limit":"5000", + "maxclients":"180000", + "maxmemory":"{{maxmemory}}", + "maxmemory-policy":"noeviction", + "no-appendfsync-on-rewrite":"yes", + "pidfile":"{{redis_data_dir}}/redis.pid", + "port":"{{port}}", + "protected-mode":"yes", + "rdbchecksum":"yes", + "rdbcompression":"yes", + "rename-command":"flushall cleanall \n rename-command config confxx \n rename-command flushdb cleandb \n rename-command debug nobug \n rename-command keys mykeys", + "repl-diskless-sync":"no", + "requirepass":"{{password}}", + "save":"", + "slave-lazy-flush":"yes", + "slave-priority":"100", + "slave-read-only":"yes", + "slave-serve-stale-data":"yes", + "slowlog-log-slower-than":"10000", + "slowlog-max-len":"256", + "stop-writes-on-bgsave-error":"yes", + "supervised":"no", + "tcp-backlog":"511", + "tcp-keepalive":"300", + "timeout":"0", + "zset-max-ziplist-entries":"128", + "zset-max-ziplist-value":"64" + }, +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_keysdelete_files.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_keysdelete_files.example.md new file mode 100644 index 0000000000..a0141ab390 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_keysdelete_files.example.md @@ -0,0 +1,35 @@ +### redis keyspattern +redis key 结果文件删除: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="tendis_keyspattern" --payload='{{payload_base64}}' +``` + +`--data_dir`、`--backup_dir` 留空. +前置工作: 初始化机器和安装完成redis +- 运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 运行 `./dbactuator_redis --atom-job-list="redis_install"` + +原始payload +```json + +{ + "pkg":"keytools.tag.gz", + "pkg_md5":"e0598229d65e1232b33f60a56e16cd0a", + "fileserver": { + "url": "介质库https地址", + "bucket": "bk-dbm-redistest", + "password": "xxxx", + "username": "xxx", + "project": "bk-dbm" + }, + "bk_biz_id":"1111", + "path": "/redis/keyfiles/20220919.cache2006.moyecachetest.redistest.db", + "domain": "cache2006.moyecachetest.redistest.db", + "proxy_port":52006, + "proxy_password":"xxxx", + "tendis_type":"TwemproxyRedisInstance", + "delete_rate": 20000, + "tendisplus_delete_rate": 3000 + +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.example.md new file mode 100644 index 0000000000..c81e77ed64 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.example.md @@ -0,0 +1,35 @@ +### redis keyspattern +redis key 提取: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="tendis_keyspattern" --payload='{{payload_base64}}' +``` + +`--data_dir`、`--backup_dir` 留空. +前置工作: 初始化机器和安装完成redis +- 运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 运行 `./dbactuator_redis --atom-job-list="redis_install"` + +原始payload +```json +{ + "pkg":"keytools.tag.gz", + "pkg_md5":"e0598229d65e1232b33f60a56e16cd0a", + "fileserver": { + "url": "介质库https地址", + "bucket": "bk-dbm-redistest", + "password": "xxxx", + "username": "xxxx", + "project": "bk-dbm" + }, + "bk_biz_id":"1111", + "path": "/redis/keyfiles/20220913.cache.moyelocaltest.redistest.db/", + "domain": "cache.moyelocaltest.redistest.db", + "ip":"127.0.0.1", + "ports":[46000,46001,46002,46003,46004,46005,46006,46007,46008,46009,46010,46011,46012,46013,46014], + "start_port":0, + "inst_num":0, + "key_white_regex": "test*", + "key_black_regex": "" + +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.json b/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.json new file mode 100644 index 0000000000..6729a7cd18 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern.json @@ -0,0 +1,21 @@ +{ + "pkg":"dbtools.tar.gz", + "pkg_md5":"334cf6e3b84d371325052d961584d5aa", + "fileserver": { + "url": "介质库https地址", + "bucket": "bk-dbm-redistest", + "password": "xxxx", + "username": "xxxx", + "project": "bk-dbm" + }, + "bk_biz_id":"1111", + "path": "/redis/keyfiles/20220913.cache.moyelocaltest.redistest.db/", + "domain": "cache.moyelocaltest.redistest.db", + "ip":"127.0.0.1", + "ports":[11000,11001,11002,11003], + "start_port":0, + "inst_num":0, + "key_white_regex": "*", + "key_black_regex": "*" + +} \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern_delete.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern_delete.example.md new file mode 100644 index 0000000000..5c547d3bdf --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_keyspattern_delete.example.md @@ -0,0 +1,39 @@ +### redis keyspattern +redis key 正则删除: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="tendis_keyspattern" --payload='{{payload_base64}}' +``` + +`--data_dir`、`--backup_dir` 留空. +前置工作: 初始化机器和安装完成redis +- 运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 运行 `./dbactuator_redis --atom-job-list="redis_install"` + +原始payload +```json + +{ + "pkg":"keytools.tag.gz", + "pkg_md5":"e0598229d65e1232b33f60a56e16cd0a", + "fileserver": { + "url": "介质库https地址", + "bucket": "bk-dbm-redistest", + "password": "xxxx", + "username": "xxxx", + "project": "bk-dbm" + }, + "bk_biz_id":"1111", + "path": "/redis/keyfiles/20220916.cache.moyelocaltest.redistest.db/", + "domain": "cache.moyelocaltest.redistest.db", + "ip":"127.0.0.1", + "ports":[46000,46001,46002,46003,46004,46005,46006,46007,46008,46009,46010,46011,46012,46013,46014], + "start_port":0, + "inst_num":0, + "key_white_regex": "test*", + "key_black_regex": "", + "is_keys_to_be_del": true, + "delete_rate": 20000, + "tendisplus_delete_rate": 3000 + +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.json b/dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.json new file mode 100644 index 0000000000..9cb4892b0e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.json @@ -0,0 +1,16 @@ +{ + "src_node": { + "ip": "127.0.0.1", + "port":40000, + "password": "redisPassTest" + }, + "dst_node": { + "ip": "127.0.0.1", + "port":47001, + "password": "redisPassTest" + }, + "is_delete_node":true, + "migrate_specified_slot":false, + "slots":"0-100" + +} \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.md new file mode 100644 index 0000000000..fe7d487c37 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_migrate_slots.example.md @@ -0,0 +1,43 @@ +### tendisplus migrate slot +迁移slots: +``` +原始payload +示例1: +``` +``` + +{ + "src_node": { + "ip": "127.0.0.1", + "port":40000, + "password": "redisPassTest" + }, + "dst_node": { + "ip": "127.0.0.1", + "port":45100, + "password": "redisPassTest" + }, + "migrate_specified_slot":true, + "slots":"0-100" + +} +``` +``` +原始payload +示例2: +{ + "src_node": { + "ip": "127.0.0.1", + "port":40000, + "password": "redisPassTest" + }, + "dst_node": { + "ip": "127.0.0.1", + "port":45100, + "password": "redisPassTest" + }, + "migrate_specified_slot":false, + "slots":"0-100" + +} +``` diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_replicaof.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_replicaof.example.md new file mode 100644 index 0000000000..0524aec996 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_replicaof.example.md @@ -0,0 +1,34 @@ +### redis replicaof +建立主从关系: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="redis_replicaof" --payload='{{payload_base64}}' +``` + +前置工作: +- 先运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 先运行 `./dbactuator_redis --atom-job-list="redis_install"`,确保redis 已经安装ok; + +原始payload +```json +{ + "replica_pairs":[ + { + "master_ip":"127.0.0.1", + "master_port":30000, + "master_auth":"xxx", + "slave_ip":"127.0.0.1", + "slave_port":31000, + "slave_password":"xxxx" + }, + { + "master_ip":"127.0.0.1", + "master_port":30001, + "master_auth":"xxx", + "slave_ip":"127.0.0.1", + "slave_port":31001, + "slave_password":"xxx" + } + ] +} +``` + diff --git a/dbm-services/redis/db-tools/dbactuator/example/redis_replicaof_batch.example.md b/dbm-services/redis/db-tools/dbactuator/example/redis_replicaof_batch.example.md new file mode 100644 index 0000000000..475c167dd0 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/redis_replicaof_batch.example.md @@ -0,0 +1,27 @@ +### redis replicaof batch +批量建立主从关系: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="redis_replica_batch" --payload='{{payload_base64}}' +``` + +前置工作: +- 先运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 先运行 `./dbactuator_redis --atom-job-list="redis_install"`,确保redis 已经安装ok; + +原始payload +```json +{ + "bacth_pairs":[ + { + "master_ip":"127.0.0.1", + "master_start_port":30000, + "master_inst_num":3, + "master_auth":"xxx", + "slave_ip":"127.0.0.1", + "slave_start_port":30000, + "slave_inst_num":3, + "slave_password":"xxx" + } + ] +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/sysinit.example.md b/dbm-services/redis/db-tools/dbactuator/example/sysinit.example.md new file mode 100644 index 0000000000..8091e482cd --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/sysinit.example.md @@ -0,0 +1,15 @@ +### sysinit +初始化新机器: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="sysinit" --data_dir=/path/to/data --backup_dir=/path/to/backup --payload='{{payload_base64}}' +``` + +`--data_dir`、`--backup_dir` 可以留空. + +原始payload +```json +{ + "user":"mysql", + "password":"xxxx" +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/tendisssd_dr_restore.examle.md b/dbm-services/redis/db-tools/dbactuator/example/tendisssd_dr_restore.examle.md new file mode 100644 index 0000000000..ce6b2fee3d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/tendisssd_dr_restore.examle.md @@ -0,0 +1,62 @@ +### tendisssd dr restore +tendisSSD 重建dr: +``` +./dbactuator_redis --uid={{uid}} --root_id={{root_id}} --node_id={{node_id}} --version_id={{version_id}} --atom-job-list="tendisssd_dr_restore" --payload='{{payload_base64}}' +``` + +`--data_dir`、`--backup_dir` 可留空。 +前置工作: 初始化机器和安装完成tendisSSD +- 初始化机器: [sysinit](./sysinit.example.md) +- 安装tendisSSD: [redis_install](./redis_install.example.md) +- 需先在master上备份, 并把备份文件传输到slave上, 而后slave机器上执行该原子任务; +![tendisssd_dr_restore](../imgs/tendissd_redo_dr.png) + +原始payload +```json +{ + // backup_tasks 中的内容由redis_backup原子任务返回 + "backup_tasks":[ + { + "bk_biz_id":"myapp", + "ip":"127.0.0.1", + "port":30000, + "tendis_type":"TendisSSDInstance", + "backup_type":"normal_backup", + "role":"slave", + "data_size":13974781892, + "data_dir":"/data1/redis/30000/data", + "backup_dir":"/data/dbbak", + "backup_files":[ + "/data/dbbak/myapp-TENDISSSD-FULL-slave-127.0.0.1-30000-20220926-113124.tar" + ], + "backup_taskids":null + }, + { + "bk_biz_id":"myapp", + "ip":"127.0.0.1", + "port":30001, + "tendis_type":"TendisSSDInstance", + "backup_type":"normal_backup", + "role":"slave", + "data_size":11471121387, + "data_dir":"/data1/redis/30001/data", + "backup_dir":"/data/dbbak", + "backup_files":[ + "/data/dbbak/myapp-TENDISSSD-FULL-slave-127.0.0.1-30001-20220926-113208.tar" + ], + "backup_taskids":null + } + ], + "master_ip":"127.0.0.1", + "master_start_port":30000, + "master_inst_num":2, + "master_ports":null, + "master_auth":"xxxxx", + "slave_ip":"2.2.2.2", + "slave_start_port":30000, + "slave_inst_num":2, + "slave_ports":null, + "slave_password":"xxxxx", + "task_dir":"/data/dbbak" //本机备份文件保存的目录 +} +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.example.md b/dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.example.md new file mode 100644 index 0000000000..99662c8be6 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.example.md @@ -0,0 +1,42 @@ +# twemproxy install +## 安装redis +`./dbactuator_redis +--uid={{uid}} +--root_id={{root_id}} +--node_id={{node_id}} +--version_id={{version_id}} +--atom-job-list="twemproxy_install" +--payload='{{payload_base64}}'` + +## 前置工作: +- 先运行 `./dbactuator_redis --atom-job-list="twemproxy_install"` +- 将`twemproxy-0.4.1-v22.tar.gz`下载到`/data/install`目录下; + +## 原始payload +[twemproxy_install.json] (./twemproxy_install.json) + +## 目录与文件 +- binDir : /usr/local/twemproxy -> /usr/local/twemproxy-0.4.1-v22 +- dataDir : /data/twemproxy-0.2.4/$port +- configFile : /data/twemproxy-0.2.4/52006/nutcracker.52006.yml +``` + cat /data/twemproxy-0.2.4/52006/nutcracker.52006.yml +# twemproxy instance conf of 1.1.1.2 52006 +nosqlproxy: + backlog : 512 + redis_password : xxxxx + redis : true + distribution : modhash + hash : fnv1a_64 + slowms : 1000000 + password : xxxxx + server_failure_limit : 3 + listen : 1.1.1.2:52006 + auto_eject_hosts : false + preconnect : false + server_retry_timeout : 2000 + server_connections : 1 + servers: + - 127.0.0.1:30000:1 redistest 0-69999 1 + +``` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.json b/dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.json new file mode 100644 index 0000000000..feeddf9ffd --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/twemproxy_install.json @@ -0,0 +1,14 @@ +{ + "pkg": "twemproxy-0.4.1-v22.tar.gz", + "pkg_md5": "897caf79cb5adb8d8318299987b84483", + "redis_password":"x", + "password": "xxx", + "ip": "127.0.0.1", + "db_type": "TwemproxyRedisInstance", + "port": 50010, + "servers": ["127.0.0.1:30001:1 xxx 0-419999 1"], + "conf_configs": { + "hash_tag":"{}", + "kkk2":3333 + } +} diff --git a/dbm-services/redis/db-tools/dbactuator/example/twemproxy_operate.example.md b/dbm-services/redis/db-tools/dbactuator/example/twemproxy_operate.example.md new file mode 100644 index 0000000000..993ad1a6b4 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/example/twemproxy_operate.example.md @@ -0,0 +1,21 @@ +# twemproxy 启停、下架 +`./dbactuator_redis +--uid={{uid}} +--root_id={{root_id}} +--node_id={{node_id}} +--version_id={{version_id}} +--atom-job-list="twemproxy_operate" +--payload='{{payload_base64}}'` + +## 前置工作: +- 先运行 `./dbactuator_redis --atom-job-list="sysinit"` +- 先运行 `./dbactuator_redis --atom-job-list="twemproxy_install"` + +原始payload +```json +{ + "ip":"127.0.0.1", + "ports": 50000, + "operate": "proxy_open/proxy_close/proxy_shutdown" +} +`` \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbactuator/go.mod b/dbm-services/redis/db-tools/dbactuator/go.mod new file mode 100644 index 0000000000..8dfe8b3bae --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/go.mod @@ -0,0 +1,43 @@ +module dbm-services/redis/db-tools/dbactuator + +go 1.18 + +require ( + github.com/dustin/go-humanize v1.0.1 + github.com/flosch/pongo2/v6 v6.0.0 + github.com/go-playground/validator/v10 v10.12.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/gofrs/flock v0.8.1 + github.com/google/go-cmp v0.5.9 + github.com/panjf2000/ants/v2 v2.7.2 + github.com/pkg/errors v0.9.1 + github.com/shirou/gopsutil/v3 v3.23.2 + github.com/smartystreets/goconvey v1.7.2 + github.com/spf13/cobra v1.7.0 + golang.org/x/sys v0.7.0 + gopkg.in/yaml.v2 v2.4.0 +) + +require ( + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/kr/pretty v0.3.0 // indirect + github.com/leodido/go-urn v1.2.3 // indirect + github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de // indirect + github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/smartystreets/assertions v1.2.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/text v0.9.0 // indirect +) diff --git a/dbm-services/redis/db-tools/dbactuator/go.sum b/dbm-services/redis/db-tools/dbactuator/go.sum new file mode 100644 index 0000000000..c756d1aa31 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/go.sum @@ -0,0 +1,120 @@ +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/flosch/pongo2/v6 v6.0.0 h1:lsGru8IAzHgIAw6H2m4PCyleO58I40ow6apih0WprMU= +github.com/flosch/pongo2/v6 v6.0.0/go.mod h1:CuDpFm47R0uGGE7z13/tTlt1Y6zdxvr2RLT5LJhsHEU= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= +github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= +github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de h1:V53FWzU6KAZVi1tPp5UIsMoUWJ2/PNwYIDXnu7QuBCE= +github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/panjf2000/ants/v2 v2.7.2 h1:2NUt9BaZFO5kQzrieOmK/wdb/tQ/K+QHaxN8sOgD63U= +github.com/panjf2000/ants/v2 v2.7.2/go.mod h1:KIBmYG9QQX5U2qzFP/yQJaq/nSb6rahS9iEHkrCMgM8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= +github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/dbm-services/redis/db-tools/dbactuator/imgs/bk-dbactuator-redis_structur.png b/dbm-services/redis/db-tools/dbactuator/imgs/bk-dbactuator-redis_structur.png new file mode 100644 index 0000000000000000000000000000000000000000..4cc723ed152435c88754ba5082a00d1c262b5cb2 GIT binary patch literal 162444 zcmeFZbySq?+BOaYgOq{-(xHfigwo9j0*cZtDIG(@P*PF~5=u!7CEXz1pmcY)^b8=~ z{BE9SKkw(>u-@(W?{9ssH8M=x*L9t7oW~hAALL{tZ(@^SqoAPNeDVC50tyP2APNe) zCng5)%IH4+8VU-og{hdB+zT-=8aZ1lBU7j$3d-{j5vn)T6x;5`YkUq3?G=r><3jMv z42AJtF-m`+F6{#^pF8@Za)n<5vGcT^6((oj53m3~BW%iJ4ya+fs}$}aPfX5aB?_wC zcr+t0!*g)XXSXD>*!^=F&2#BS-m}mCW~iZTtf3EmZg4cGq;fo$MMWh+Mbkx%s}PL# zAtENenTR`m!MbUQ^30}gEob(4`P74mvKG>ehC+J7=Alk=hyNij%ArgAv)3pn@=0Ye z3N-SweE929Z#?ny6=tDy)0y?rQj?szVd3?DhwLb3O023eH$9t$GK>kgi|&Oq8^70T zdi#k4rE3oV#+Hw!@Q;8||K(*_g<0(P$fO!{JrX=!Blni*GoJI#d+w|}u9%N@$nZ<> zKfU_g0^j$DT=ID79{;x2pM^pxiSz!`Pa-*UxsxyCv0wR?Ys8TfWM^>wpnmq&a0(T| zDC+#IsGo{!u5Gly9YU?AD$Mf$TaV!V_aDwr`k35|A{joSJ(*-#QIbcc$I=sfPmFDl z^noe1tN#TSla7g2WO$dtYL87SnmbV~RjXZ?I@39iD5_*rh$$gfn8Lj@2vmzf>VuaW znjW#brCxZuk0w>Ce+TWTcs@nN!SiO2cT_JJuL@AJD^^u^b4_uB#$ z#}*>rRa^T`-_gTUf0Jy+60d;5lm3~y=QH6CI>v<`fwtwJY}%c+3~9&;o?_~v@KHqT z-f2ZmF5Nh`JbcMVfKd*?F7iU*&F1ueq;SLN$5sqxKs_}+mM7FiYpXs7g;4Y?>R#^l z8g3DW5zY;jpYPJ~m%SN6Z+LXvQ2F3ro$5(1M|L)xG1Cv{??}&Z4_XAUpqz{#+8O>TmkWAZq*7|#Nd5(qGpon{AcLBv(0BAH zf*A%92;q9O=jp|ze^c!dOv%e1HWKDF#kIonh9+LK6*y8|1ATTzN@q!PNypwd+dRnQ z5{S*#XxM{+lw{}|RB!OiEf9ueryXgxwVi_<8st}={JC3A5Vey(&2jFG{f_&M`nrm` zx{FpGEdvgelbX3-BB#>EJ?Ib(N>4j~XIs-G1^-8^=!a}cSh6v<0b6FEn51}o#z6!ffNe=&bH)*Gj z&IJ70cSB;mmtaMFmw()=_L=Jw&rJQ}FW{w*Po8k!4NTQn|3Vn+8=<$TMx+wR)wukY zXuZC&FogGZ`U37G>BcOkAnEd4*e~@%y|#2Kxn_<$6?l@-+F_4 zFZQvooG1f5l%C>#oS$)K*jDiK2Wte#_tRUptKL(2!`kCq?WcRml&P)w?Fri0pg zno2&mmTL+u2UfeUhtwKhE=gDqwTC*0Ivn!vIFzo%4LZMSSy8d)hD#<$&Pa$zZw}I} zjj!6;+u3*6AFPzF?ysz_IIpCxNDpNX3S^BWg{)4j+3oJ^My*lz)+JrE)em|a(mYl( zkTbV#nys08{GCS3>rLjI_V;zagpVW=NfH$j9YJeB8{`Yk!Og##6+iNjC2((0r3rLe zkyxp%7NC9K_>%ZA_#xKA0qN41;+Wl-m}tLPsaGR0@tjER0UjLojwc$1u+E&0I7=#B zR(2i3Y3TX8q!!&L+ayFE6+cqp5Cmf4R5aZUmDbQiiq1RCpd63JX@+zA-TUmug zMQC_w_-n4Q+KAdr;k}8+7CzkO(C+c-vXXDoC0e=~Rwt9)y}|mb2CDjwJ@H1=CL&|C z?dLIQeO{}f>je#SMYL-xs|@xz_SHXq_?`Ja#BuYM;L+nAkg`%~bDgGf3`H+$u4pct z_KMHYji-*LPA?8&uHRmzjSS%xn9t5mH;-TE9;u>rkdO#wrW)hj4vPIs-jmypj1lM( z>U1l0^*%AjqsQ}VkqFUA52?1!271QWMy#S{cI07S(Z7}kxge}od-i7LXKvSHwqlM_^xihP z-FhlAX)_8d_tC_<&>yI*Hss7eXWnOOPc+ zP;f9sjqPp?Ni2!YD>vr%i$C%{)=(Q0^YCpv7knbjTFZWJ+Suwtie*f&`hkGJk*@Y7 zw#@4od!8|2_4vup$X3y6G27{l_Hg{6kJ?Y|=4j&Gck&=Be6F)7$ZI4(h zgf9rTM0~7zGHrP}_-mP-M7A~7+CF*d*~SlVQlF25AGJsmDhel=EUg~W+qoIxg>-3r zcWuL)&!2b1It4#M?QFQ4(;RbTaSAmC(XBMr*G%@bALH? z3E8FE(bFozJbZgOKvtSPmeWC@>cS6N0?vCssZ7J|t8hOBMR5_XG=?0jRMec( zAAN81Y;14DQ&dQ!h|3aE7A$dyStu4Ch$r$TI%B6y=#HIc|E||ONs4D&WMmob+u0sB z#6DrRe>w)YYg|Uey^M|(I4IK_nI~%U=z0ks;hnKRk2?zSz`n>#d-nC&Yg;?7nRn7S zzkHK@EdnQrbG3F$!gXAdI!kh@5j%R*^Ee_Qrxo+d=XB~X$0J=>rpiB-zp6$wZ`Ush zsG8(6y#D31xDw%zO_D98dRF2xzqSJaQtYtYaO;S@(lb@goD%ISD011M(P89~_oIV6 z9`!ntvy+>rH&wYYEOMp{Dl?%2>0QZPWu-n=nb}3TOfyoGugmvVOT#UUJ>Hxv)J!?K zj_tDv{1D)WJwym9Bek?@It)ho$%-Yk2TEKgH#%HJ_c;goM=Zy(>&l?iqx3TedLGT^ z5p?KK_;`xX1`fB)pKL3T!lM`^=X^tCNHb30?Y zFSUiCWr=#-iq&jGh?Q^J?f%J+g~$&qj!+&W!Pw?81-mb;H+O=UY0qD8XZH}qu$uJ4Br(owXdR?&rIo zajf(V$3GE*=cneM%rLu~@4{Mis60w`oA-pBqAv0_-BxdW#S;)=7l}U$KVDUz3GYei zA=06!<#Z1^cF7m1tJ7(Wu)QG+e$3mO!;CVGK=F*kK%v~X2rq2cwuayvUhsR?do;GY z+d!&OGJIz?e@$(zFFiy#DQ(!t**0&~<_gAue8z)JP5Jvt8K`eqO0hB_6fyCRPi7k9 zgNpBXvUMDvoMauJ)Ioh48mE7itBrXx`Cz*Gb=l~!?{GBK-8S?%)(RGx_PP(k+gdo& zVWR&;fM!?p8OT4NhH5X2q@_`qfajPf=%{2UVBi@l@DN5N|DWd)s0=7*f4vSuLGd?5 zLI2lhWPqQS|3ZPs<#+!62}%V0^9d}$M6`dNqk96MKv9*eD+YdGSwB~|LqWMqclkhl zq3~b}1qFig;@MLrCsf4LjcNkr@s?lyOwwTar-Wd6Mo_>_RIrW3=n2!BxmaNp)Bd|r z9KDul@NM$>%KHI?jLDp+pO_dFl;j}Hp7pL5-(+Cy(JyQ_yQClT)a@MkbjAVcPp6~a zLw3BL%!0xU5-=({J_O~@{%b|^3UGcb;rAW|gh}K1XaDhn&;@@z{nHg7C|=dKX=WL% zSc%ZY|8Q5pwL$TRxPQ7K8c)bHCjM49PgH={{XaYe@Et=;d`Kb!-DUTv9KoM%{GURT zV(dL>_m&z%tD?Q`nxSr}?1F|fakT#zD`J6Lx0FPAVPc8AAYuokQruGUzeM6ybSFbj zg>r!HZ#5jz^_I=Fo{~NOdacOnX(4{}co>0ytTGyW3qYdDZf+!$f39aIbWEph-GcuM z)9AYkD0+Ovzwh3461(;_Pf1VD|96!CrGT2@;4KflS@*UPcZ;Zbk z8CI#0=t{9DwmBYrQ3~sdNkP6Y9-qom&WNq+lJ;#1;JAe2Q+1`FM874Y(RoiVb) zdrS9bvZ~Zf-`hV}HiKd9B z?+HmmcUP+%y-Ns2Jv-g8)hMy`RnFF)D$S@$9m*c5hmEIJDCc|`Q)?fp%PXRGSDVMJ zlmGfqCDV4PuCkUyq?r{L_1Lz5`JRT9&e=f7ON+jP8TAsgA&czXb#*vPX=nLNg2(xr zKw_5Sh)3>)E_>v>`BD)~5oTNF!>6V=b#SY?yIQf88kWe9H7)`+}2@3KnV?BRcyo7t6?_bSiE`we857i4Ehb&9Pyb4VO~nr~&b2 zT5;66)wf(0ts;hD_n-7)uUc@i)3Dpmf6j;u%Ln-Oh@1>gJ1;25a+v#dvATc_gU3sh z$py|+268*(>3JJoywO^F{r2!Y(Wi@N(j~!S#QFv~*QuCmaSxo^dH&Fco4;8+O2^or z$~{LpL+;+2syfk;iwb(`;!MSdybCG?VRb9x)Y_a&Uqb|%#!J^pOjc(k*L6NpTd;{7 zl%Flvl-=RxG4E?!{K$diC#R6vG5l< zyicERa35$eg7)_ixPXDCn zu-KKGQAK1doUi6>2VV(MS2&rR6yRJ^SgjkFgzo^~lo&N6j-w)Bm>2N?Z{a%asW0#4 zXS~zzfMXD;LwBC}PO|#G}iU_q4u!yu=(aH(fRn7BWp-z*W1JZ5k$> zKVenT&RVr*R8`&5_3P%j_KDfr&+>Y*GWjx`3&RjW>#7=|Euu1}Iju9DNo2yX%Rvny zlX#{mcikeaw>kHVu*2uG^+t|6pq@$OMpfP!d4A1GH6i}~_{8_m7&VObh~Ri3#NzTv zrPhP$N3E;_? z$&Nntrz9HH^wNH+&wGZz2|Pa`(OZs#+0VKw{Q<=`kfHguBV%Z(x$6Vq&uzKHR#SSa z&w#ynwBk+8bXeBR*-jL^@%B*c-?KEPARt5hoRUZ7Hk&w@bFCgSW!WrLt@42>KBO}e z?cX&%$5lk~Q|PXbLc-59J_&#NrARq3_rszyp)c*Cm&9_bHA zk7ydSzavy4tcG&U-^3IJ;1)$(O>Xj zhD)M?J;J&5yRzAYO*e14)TjwIwZ)|bQqDLtf9dv5gc=$t@6C6{A4t{ww z;WU!)cGsO=KddZ|x1$}K%N@~L?1&Z9$U4^j-LKZldm0hd`_n%)5ZRo~CY2#uCYq37 zp3loxP!Y_RIAiBE(a5xX*Kk&S2}Eb(y`^{{&IrH0}I9RAh(U05;ZME zf3djeiH*48!BI!}i-ePGXc@OF{h+V%>EZV~t{a*Djocawl#Z~ppXL*_iYDY1au3sr zn8id$;(E{QWc@-kM&9<+ObFn z2L+xAFzAP|3lWvs3ugpQxo_(4H_v|Rc1=ga!$FFYhBgOLQB_XWCF!L~4gc~Ko6q9v zrVFeK$=)$mQAm^xgU2?NyeMUNX#dgM#dgbSMwn_RLg=KuYWN~I1bSSpgV_2ig9)W3 zbRrtDfMEN1ym2kUZh8LaLN0y+@GgJpU0!If zUqdC|-Fx(Wf^)R{SXiJJW-;_oWm$_v(8SY`Yq6+EyN2ERy_+!7HJ0DFVrRN=CyDXk zE9xJJ_ZSp0U?ueupx+O?WF_4xoGP&gv2e_?N>+_v&LJ9zMmrg)QPqJ!#(|y>8MP6%>cXiS#5 zN#fw}9y^c8s#wKZz?)-WGSQL4R%#b@@W4ni=tOVnl;ue|e=b7?BXnG7GaR<2Ixq>2 z80bb&eVH@c=~^Gnbrgb1p6d4`PTg5s>Wuj$mmZL9RXvcUGWFn%#v+q4Vmx-E=;FjU z%Ew|sBE&lFXSNdG9#R2SN4D-TawtRnQ4%%NA5c|x`p{!6%cWp(SxlNxdQi59mSIQK z842$=M|bVxg6Z5-zybINoWRIB-Sf#}P<*%G!JTaF8ovp{$*MFgfsK@boSvGp<{@+O zr*hb2Qaosi*;9AWOqxT;iPjy>I`A+SR2!bl#6Te5F}44E$YKR%`1JT7(Tm&c(EpM=_QF!9dN~bVKi$)C z?8pt6s!6L=k^-EhyVJ^=!k-OAtxfN2z$ znT~hVomO||n3&ZYh@+~nx^H~m`_BE}Qz);&OSs}r27^8k_Hw=voAzko6#r7D#Ei|$ zk9fb3qt_HLy{5;~lMuzL702JJWU8E{Bt53u8*V&qx`EgQ#jjbGIP~j3!1<`INhqfn zS5F`fb5^-p2&e|YVkkvI6v~AavzA`2BPpPC!0g8 zL{MyaC0-dC*iCB^8Lj~4t8iO&lb7etPE799(ioJc`k1=9t3YHJ-)?_&b}*v=tC-pk zQYcOO6Syz}DuL}EX=`Xg9Z(I6DiH4mKp`hUD3kF^t`9O*VuH{EO<{G84E`j`FzGBH zmdy8vxZ~v8Z!N7iiuE@pdtG5Gu3JCQe3QJNY-v9Th6_zc z@U=5UFl`yM&HH+t@JKzOjISDy6*<>!KO|65$$d>)n#mx?=x~>2v%9KfY@%w7S@sb6 z(W1^RxxrGrLB5HlN9ahdT{nHWYSYjKS!=8@QE0MOynweUH~HP5x(4K*SG}8)a>leU z67O187FktN2t(o(c;(mVT2=4s^}96|d9dj0f6aYea^Dmqw<)kzcD#&xIIBe6ggmJ1 zK`M-_;nk{v@T0cEu^WGIV0ll^^k)QS_^mjoU^NC$F|YgQMMVs1bPNSQM~1RbrQr@9 zFgPWduk45Y%OrQ*WdePFL51~;$cWZQSF0k$f%3hA2w$9KjjxoB*b~q|VmIPSmZ4&k zb4qezFbQM%8jzefa|8T-kPKSp2A$Fxt&#qP7O(9*hvj%s?x4@-LgSMSf z7v|K+R46mP^<=nJu0mgU2;m>N(2{3mi)`qvgeIrB1H|xE}Fs z(;X`oqxu-C34fOiGohfuygPEZ84>ZwqcHX5U9d$Fs<&_+-nt*IC zFn4Mcx#-z&pUw2qr4OpSrCSn^tj7a9JlPMYtk{Sd|IP%Vtm+ z)3!!U5}1x_Y)4LrAGH#HtWVHd2}M=sFe!Gj7ls$K@Xm}xy^1jxo z%J5ezil$|361$tHQ(c54F3C(~8pi<^Kjz2nS}I*SHWmrZDk*@EoE{m^rU7Le%gTr6 z>~lL$_D=`!PO|Na&ReMKZcsUj_t}3qPqwQ#-nKkn8$I?5BG>D9&AWWsu`pcoC@S6~ zYszssh}&rJvBp-{WO9zo=D7TgP5l_7$5Z`vbyEEyrqQG49) zu5o6rE1*m@N%zFO=bpGj`!?sVz(TB(%#Hee7DVXx;8J*OX+i2s;)zjNk-~zH5^ISq zdyWrw!*WEulmVtCJ5hynAFf`sEH#+}k#p~!^=feryX{Yj*t-_pWYO6C1pW4n1)j*@ zLH~v{*WR*h%HEgY@T=*z`;l{W&Ge@8`blfeIji3K`fzR_i?M!^=}x5Ah<3wsfJ2Xu zZ_bP{sb)dL=!44qH|kh8UB5I}HV{ubw689YD~3!_^G_v|t}6H2jz>=+Kj3$)=LyaT zlWpfwx*Xc1nPM+@$G?$1KoT6!v=q|ASXLj8;{A8LO#BGMIwo4RN^u%;(9Kc{6MuWY z;kM@l_JYz+BF$n7VJ8lyNV1L4f~vHVs*R;8>FCIJ0icY=BEF-Ego!hqQn#;^XG5mB z9#7nhS0l1CPxIFmxUi;bSB+w0)1qJAEhsVXm=Zg{n~=+QN2AYp$S{f+B2M6}>1Sz6 zyZx#xWWHFjUQtfIAoAU6fa0ls{%Z$nexNJjVMh@GS1Zc3WEl_ z4KGfv=~ZK`**_{p;onA0D-LY+)g{3USDNj#V`Ey)>vqRB!+vy`E}d`2_3zC%wZM_d z3h1=HL0uX%K+S0w$SS6@^w`SSuuricRcbmq~0KP9(Rg+fjx|?~PHk;fZKD9qeZ|_L?HD&Q#U_LAj~N`0q$JjI-6C7%{LaLZ)~NkJS9Nxw zp`mgAAWWaWt4v>Zg)CdP)LM0u9@(}$Q_QacB^_mEUQirezE?oN^aouYDLT0~`tpp* zc!-ZEpd^sc@mmD6D#P3w<>g9n>gX0wjfW!Azau-^bVl2gQ9g;dUv!xffqkGQd)%d3 z-zjqxm&J18=`&Kw{f{>$j<_v%txt?Lm^Eqr%0Whca@ZBIOyyEsxgEfII{)9lsUbbzu~otNANX zIZvSpNN!yR>|KB~PG#7i&E{hL;?~f{JeejfkwM z^36m_53Q2^EX$$~N8a!A>awbCA}&N5Sr3E{Kf;~bqp|kR1AsiwY-g#nxI#RCjH8ZS znt-OPBY z`2>>-!uOjIWW+k?Nq!~7R`}HXP>i*5GFlv5H+Ibdb?Gt z?bRSNKCC-|+>?0vTRY2(8SBtH$X?Aui~p%qG#P7yO!Vgz>Z=rl#^W*tN-%GF4w#&m z?Y&+A6}?bpUq&$(Jz^6cV68^HHXn4N*C?~hbvqkUBHun27~V`x2lcoCdQbJN2B5d6^f)Sp;Bp)#`Ch^DG!nV8 zAY0gAg~u@Dxo=sOKxx;QnY!i;|IH&Al)U_;VS(_?P+Nuz=9cwg8ZZ?cG84 z$o$@I(M?OFm$pbxOnQq&VVj<>SP)j#S#n+Pl0ncbz}K-o?#bm*A$edX< zQDf|7v9+H{1}GhuN+!5>X}cPKv8WtdGToFE``s#@6OQJ+Fi-FA`Vu~R|1}r3wQY-3 zfnssq&9A>(*T3jS5evjG9U5pS_}3LAAx~v*P{WFMj$!A5vHOe9G%D>sofQ}qp2_Xz zAstFb`_~3D&2KuIKrJR5iY>^X{#3uK#Nc`2oJ`eNc~ZVXMp+TEnH=#G{w^OHBDMLu zEdD9N$P)qemn>C3n@jfqKPUiDD2@2v;%yOqA|p~s;v>mib+TYCJa+6uCZg!zX!iNl z%R$1yU;0_wRs9REneQ=Vz+a@HKPaABUFTGB9Ghysu5$r-gAEwcml60kiQnz=Z^?GT zsk*xo3AyfUD#5<{LG)@_!{pkl;&Kye;T|-Dnghaj%lAB~2vx7ozDHma9iV z0r@_-v@k7;ZlbGXa0}BUhPQ@LGetE#fM*mCM6$gh3C`6$gkJW`Z9Q(1Tvt5tl!Tzl zVAm@=zS|%}MRNByhxng%HwEzZLz-(6JuvgE4Dqs&QahI~C~mT_T)$)Q3$O6i4R}H> zu`^(JPnYxR86h(0$?U0cp3xVi@eEhr22kh_fR63Jf@_+WgrT1A0w(K|HE#$xY)p=Q zQZ=GaD|(cOcP~BQcl7%Es)UL_8mjlMkK(E%Av8N6z$wu3Fk$W0Y(m#~`r-@V%iH!` ze8j)|)!!mYLC0$J6xaMP%Sb#NcurksEJ-`1vD4{G3w&FNr zYa6+fa*`;Rb}Q3qtES1}Z*dKBHSPU1$!n@sdRsqN^~0ie9x_ zY$f0%pqi~X?kk(1!GG{r@5g%}T^>IAb}3Q5?3QKStD8Up{cn>^>bI@a!^rt;4OD`o z)@ZO@$b{dP54?J+_R1zo>Lc>%)gW`1N8%lc;kd>{M8V0AQ{f)gWCV(nN6KoFSI>h_ zGy9?Esz7NxJ!$cq66tStiZsECgv8&h2?=vwT^t2eaXIeEvq5ob`q*6-**)wD@FUt+;n_)lX$(=7a0gdpp{%sYE3HZx$D> zNt5s{p4)Pgg!u4TuVP!ifa^7Oe{_jI!dfiN*Em#op(l@>0x2~U?bONN=0bbck9F0m zJR$agR01|i-WdEQj{m_EQ%ooCcuq=c#mQ7J34*JBLlXjgyL}I3q5Jls?}s^{x{c4$EQ4T;yrtn8y&etfQiI73s=mC$K)Mf>yi$X!zyZGt`jS3RUCvB$J03uDO zAHZ=0_)dY*z-q}WqgA||EQ7qel_^EX}kd8KS+Zee)*k5+!u z)H6{)de1d6Fubn1SvVdln4|*eMZftAcTgoQA1iV9sI(07PNFcUm3O2Gw_Jlk?3WW@ zOVX>?%muZ&qFSk>*vm>phfV6zJ9;3qJ!u`aCWx%V1DRg}5V8@Ga?SYZ|28ljs^~S3 z{?OSQei|KDoaaePv=^Pt{PKJ@TT{9uY9O$^;Tp3NBnCjChmGd@HGtDD3K^)YP^?cS z*iC`lFBuR963y|usf-)d>gkQm6!uhu*YFL@pDKV5Cy(vy|4sFO!5PgqC=(P<$o#Ww zBYe$gaGD&N!u&Eb_)}KTXXrtQn0orP2+?=B#p99px34i8zT4K`pCy7RqIVCcb?JE? zDfLqZa5IfQSDdUs+VM5P9kYbe%3nGxj$UIzKeZEWH~i~?X>$}W6RaHVvuwB1Iah9X z1Vii-w*a>~Yp>kb6^-1`QT$vlb~0duuNnf>7c9PYnNPrOlB)+sR%uM^Qz8?^LW*9O za0i`^lG1=O(c?}QLz7Py%Yg^yMpS0?<;rs)n}ZG&@q-Q+@c3S?;}>Y?7)bsxocZK) zEYXVwd2Br<8TktrQxULqg_mLzGM%>t*DU*qNmh+!2(1b9S}{MsR$@5rhzt5KGzbLC zhnM5>qpM5Fd2seVO1JRB`kd$iWRo$~H=Y~S&#e_nH3o!0zRZdKmGtqpk#L7;y5VB= z4$-vMt_s!Gt~VV6^2Tmj_H}PdSKN=t4m!5&wSf4a{BU-XO_rrn2ll8^hkMyw{q9J* zRo#UVbPdQo8LLzwVW9Zxu`iSGNtAoJQTzE{s@*c{KBLQ-*7e`L;C|b}{%C z&9Z8s2|O+TmY@B7hV2)gxUxs~B_!t4SfO)_$5F4;;dvF+vC;|rZKMcaVr`QsEm$;{ z`7rabx3LeYMx)v_Pl0+ z;3D3hB62>VYp{D!1K8=}$+0aA*u^v5#=|GWdxzd}y6Hj(lLKw_{bg1<@8F-m6k?&G zOR*(-O1X_NtcTUQ5?W_&-gl#y$A6!Ua?rt`P1NiIIU=5QC(Sj_0no(CkDg~oK^=^m z>A^D)7MwlHbnvBHw`kYjkpjEEeTcWnzLEmtDjK(v1FU*`N#ThckU6G!vI{tfoSHu!H^Ee+6zBsj8?@bbWi1$&`W8E^Dy+a2bmD0)s8+nU7hWPGZjeq!Bz&?^) zEl5ZKt4s1sTeR_pW8jkL5Q1hF#2dwcP=y;>gF_e5aW%D66Nd69R^wjJIHGBq5 z)QzaV@_jhJgDplf1W3x;y6TmNX?Km)`*&TYp^AoXZ_6$cZOSK{u7Oef);tv1!*AOR9oMMcNfg3O3-?81(NlWFk5URAC(bsrlYba63hwnV(j zPYnAU3F)dp)3~zyw(8P`#4r_X?n|a=N`Vbi=nY#d(gL^<*$u@>!xL=lzf;+N^SRkN zphWxY1pspI?)2Kr_Xz(=wClzbIei=r$6i~rE5dxH3WYoFYmC?%w8<=P50kH}1e6e#O)^``z5% z8`07DX4JK9guo>PG$N+E)EPZo?QBPXGWk%ud2T#1tx6RRBaUg5W&1Oh(`d1*=;Mj>#&K5o)dN*jN<^y8WtiNxy5RVcGk# z?z+$tY+FhR8-0^xF?vbmf4;6i3%0HMW-CM$tm6c{ZBxfwo!w1J8|ncnJXxHXQ9p9T z_{bXuI?r58W^aNw&l$P3|2*m^zUpwEo}{R7GZEtIq$+%?u zWG`OemZ2NaYj&+5@wc-+|DV+T|J3i>3jt!dcsOrshDn3ZwsqDBmT6+GLfonI;sX4F zMijK+7=8jLN=kYX4!UhE;<{m62$6{$c!uI7NMK#}b#gODkX-mXix?`Hcn>i)!1wN5 zJmFuQO#X{mK5q0X*;PAltpJwft3$^HSWXdLf!QMVvYf;EaM$oXzFUA?0ejP|F!_#ep-Kqs7PpfOTwWK z9JBFX$802+{iuj#B2~d4ZL{3P-$dw`nMXBo4M!XV3GT^SJj3}nSou3Y^%5k+Y%dBA zWZXDikQ-*izQzh5ZI_v11?6=~7v?ifNP|Soyo`E+lI=fq&5rfPb?lt(4$W-lGbrMk zngUHI8wdcy0{Oi3$iQ|b*CX3068fuK!G8#l@h`Z0SA_{o;!D+wlxXG^F@u9>i4~C% z*?}ExS5gGX(@Woxi;4}qCV|J+0FFY=W7+9knx65hM0GR)qnqlAt!yg;?D%bs?MLz} zZvl?-19Z;(5-#A#YNJCp^@Iu7hy5wynhD*XQovO-+w3&2Q%M~5Wty3)8J<=?FnTSc zZU7Hr9}mTQ*IySF&@bzW_2Rmi|9Sb#)%u9uHv*c9He;() z{qX5ZG6uPKX|B&zW9_anSA2#~5T*3;0VP+Ux5LoKS4ePmo1yuDOc&Q9^e3-dU7g4} zy?lM_Te{t(ri{%p+$`U2xjU`!vW4`!DuG|UM1j&%rd%H9@--z3n$X**9;NhPipBdK z3WPv7$7F#2hbS+9ZMsy;7hTTl2o?k=-DC4A$QLLA4t(UD&)EcDyTLV(+cpQ=aNK52q`P_*-l zU(<4Ezch-)Us<;Q)*<<)hNey=SMVvVTydB_;%|wDFmA+kG+ea{x$etCiZOBcz%^^& zw+4B>gA+o-*Dpi#khWzWNb<^dfjA4u0zZ#^`Kkv1oPrDWV$c>~ScIIxny*TtxSVcX zgc)5*!_ctMNLm;QBu%o#0DF$4xc3+#AI6A=e@;~p1h7Wp;%0l87>zIeii^-Dbo)B>qB2h$U~QYrvH)4w=Bs9T5` z%u?>?7TByVuN2k(w3_(Tdmgi!f1Np7F*!-=-x=tV+M5|1mG_HJ-OpH!Gd7QbZoa(+z-aHrZsV4{=W>0run z!eK;S}%A^h`UEIbDr?*a-ZKRF)|ET+j5L=3v`y*a&XG+JE|iB>uBO>6Q%=YQfcNqj$`>=f2D49vFs2e%U%1AV zg`XI{cyO{$zKR&W{JQ}~0w>`Rh+^Ai(ZAHVCtk?zj`dFrp$fFNA>PS_ol8pgALtYQ z%##r*mE0`+*BhS*M_J4WQF#^0kO00D?BE>Y~i&MqZ&Gr=@4%1 zKHr=`&eiE0vG2bnSq2kp>Key88wulbaW}Vx(bJ;~m@WY3P7-CoQ0i?pH)+=sG)3Ux zLBVeDJ|CR@*J4e#<^1T*Xi>lI;4){O0A$&iHbX^fi;Bvtf^JpVxM8S8i?u8Q+>;*R)FX88 zU{Jhp9@tC~kT2e|HvxtRzlmI&a{P0{YP$-+wJz(;hHkd$m5N%WfjlcO7J8LF0rle2 zp4f$}q3|3>sry247WXfplAO9#>(v%!#%ZD}fGiGkK#cY=smi!#Uk+@;7i42N+ag)D%v?8X z$~fF)fEz=@s7C5+0Ben8o_&FgSQR}x32aV`5)l{#h>^uuk)cYVo_I@B-OTo4hedlh zWAH?jj{7Rqa;zwIwPeDwYD`7LVsWNPU8mZqa3H^`@XdEWm2?@(^n@q+&7+^`Ud9On z_n;bR`|quf2v{_SP+N(dZh7y})ViJQ+gMBrbL`au-xxI(OCSITb6HOE?vwUasAMTI zBEOri;ks!{1d$m&QpvH7+vUsFsdMiaws-TtNp7^W)MeK!m;0Iv*a7mD7q&lz?R#b5 z*VS|Av&2yJ@WssaU$hG{>Rz(bw$353HQC^r>hHG%MgasNH?c;RjJ)>0MF|CZk_paTt(k9YJUJ`z>7DN-DIJtV!y+E3@-MFl zv2h(QM(MI zTy{+cYI1=ccb^Hhj!YB^6X>@DbDixUpPy{PXBaEl%siw;GCaXR3iM0|(jeo)SpR%r zVl_2+4|D7 zYqkNai;LSz

?Ef0K^5GocDZSp^wf-g;L`_b;`+yLX5OC0!p`0B_={RX)gDSf=m!9Cx4yMaws2|eU zg}?28LGPR$=c%#jI7sD+ctb!h^RZ-gAU$LXaK1u^saPVpRL|^0TfmHbr!}VbH_ke= zRO|CM43Al1fg5a3QDw;zHUxkH;NF*Bl0f@7(1I0dg$;i0>itdlY*(>rqjLS#)B5x6 zQT-4;{ob0@R@#tVD;Owt=IxI3g!>7SpfsXXw85gX^qXVPvpvuU{2{%sF^?>p zfeE{wqpgxnJ1@i2PPEnJdn@BO#V>~_x5esjTHAUD?dCCVP98(yzOI6f6qn(@Iq3o~ z=|7^)<*)Jt86+jBPpB>llUEV3oSz?15Pxi15GIl24|heDL+nfBsA)Go%mU3dRsHtvcOBX^eGy zwwKBhMlV~cF}sM__NbT{mA6FxsV> z@a-jQ#gBNGHO5>8Aj5qQQIG#jlvepNji63?EUsGID-D=DDiaRt2mv^XNkLffSJ&)! zhK&L2)PT*~*~H57EfNlNXb{GwLS<4JqYtI$M9#73=6%BKQ2MWUz`y$$_niG+edkyGqD@UKW|0-~J-u2g044 z0;(Z~+`f-}AjqFnI!J>vDB!%Qu+B%i&UQr0u}R2jaMxfN?ilZJa3|k9fEPUN6j;P& z`kMUW3s8TawOC?4;Si?dWXQ5k_v=HGGL(K^w&Jw|z+ObLb5Y_Gu8y+pw}oCNQa>-p$;n2J4gwjNN-X@ z5hAFdAfQw!0hHdQLnuKJkluTh9!es;lXqw49tHGy&VA08^X2{G?=lIytiARs|5Yv^ zE(0wcH^2GTWkuA;3Xh_+^j(c--;0BJo?ADhrbeJ<%X$q|hiNMMH)VICKf|rEQiO5w zql_b-1@OG5f7%W@hOa@cvltomRW=w#zG5Ucj_)#U<3vht!|P^YiVD0I~*i10rD>DPq*`ryv=soUEjo@bg3(7;uEm_JtS>_(9`!M%nUm4v$i z_Ypl8{!AnxA7S063SFBFQ4(Hc)6ASDTAlrtfHEt`gi5$QelnMqQY+9dg-!?(8xZV& z6jI`bZHP3DH3wB3l@6yw<3m$WERW^)nxchTUBAzrX6$$dg$q*PKuZGliMm9YknyNS zwYvedLPc9EGlLhj^d5}(PBz?l&$`iCXOx!o?A&! z_}-eGN49>&Kz(om_7WDBb-8r(fgL3DQ-kxMwP{p`T4y8L-C%C5~qJqW_Y3a`h= zpt=0av`ehOIW-`(X-k9@#*D1t*lawCT53gOVOMO{Q>+5KCVnGL9}r%s7E}(xYhGnB z;#kjZ^mEEie%mM;(B^}!s9RI_Rwwf&-~8;=6mJFfP_)D+W{lh?Zh$AMFutYt+4kS~ zmY&SI5)O6luUR!zm6VG5aF@d3!fvQd`|$_1w^rMvvTTNtiW7(Gm@bkey?_ zi;Y>mf?+{o+bj>*Xhp5MrDlOaBFxR1(W2{!sZXBitTt7k?W^=~pYO{x8tbxZh)*P0 zjxte{RN6NuT$31qEXHnZ)n+w@v_>-m*Rag1bGd~ApswDd@`W}*-`W3~eDP4Fv$K4pg z4XaG3SQ}yFB%XHh=lz3AJMW9`)DXq6dmQW;FT@oaftSpSqfku~8HK?V3C>}NH-?+L zVl^1y*dgP!n4Zq|Chi7ra`$OhytI;w9$nDb?_>Hq^QztPqaIIs=mc@_N zXB?8hbz>I4hOpD&|$j%onolt(9 zEIZSJP;mN91sY>7;UkxZwF_gLcn>6;IMlpex8>%d#XinUDjlG(}2Uv zN*<(d(_URbN_pdE=LoBskN8-fb;<^C^uHK~DHV;;~%6oJc(0=|^f=78%-cmJ@OaNv;ANMig4qT@>a*v^i#%*-~q0Z&G#LP;+W1PsK{wE|El9L|1O? z-62TZw_nJut4*ik&sdNd`5^y_#^6bAFZ!&mF~qrljHy^3i;Q439MWAK{Dz#*_HXP+j4uN_ z?%O7H_ru;E#VD4KEucG+?J@oimMiVoT(-A24o^vc_UXB2^o9)6a z2E{~#vwkCt8c>jo=-X+ z!Wt5c;x}t!%udh5_)DV9htLEQCzK1uDO96x+I>LDSV1@1;thx6S-(?r+#g$@2`yBm z4VA&eIF8&M%G4@1>{YrdI;<>~$%u&ak}E=BGX zh{ts&?@6d#b4dyB{PQz?JR4;Y&MN^j)eIt2nIQeXrGQmZt>h0##T|cYCaZOEMn~G` zqRNiN^G{-3r!T<-*0T+>ngs_r*2&2HfS;|L>47AVYG~Ch8nTzeS`6jEqv4|^wU>0- zkCPbhw3Tfn`L3eF89X-bT8`$oc|8GfFxI$iY9l*;jo*IB^VR#zG@v+}UE!E_4I_$3wd$4oI9tTHb1WqVS zOid%>gwnY*^Eq$XHrs2q;dQh&C7(hAR6fb?gT~4Gn9><~T2Z$GRFo4?AP!#f9C9^# z(kzjT-|@+3_B?^|G2|Xc#mQ3%PmmlR8rK`3f={(!qW-QVCFs1+S5@gvKa1=?84EnB z5)XiIh;%-KbhEF743u^T*9dUvkdp1Hn2^HTkE1^3mRcLQFABhN`>@-MoAKzuV5dpM z`Fo3@l2>OxO=3|vjMd8e)M#%Rr&^=U3361f_@C@%O*V{Ca5vF_#g$fZiYpJ5_WtMXi^nqn?6Hoj`Zu@xIJ=!I3reuMp$INWB&ed+iy zBJVuNxmoqRATPL^>RR5Dt>u!I-OlPQKGyUlEd!4W#Y);)o7mQxTP@_=cF za{gWq+`On%`<*w&(xz6n7(QHFVZ72T6qI#Bp>8g&q19RhU`w!eSKH2H1;7}0B2z-c z+6@5OHmMpSAL0070a_l9Akpi(Z&FQapd)!}*Z0*F!A#T~~Aa%M2t-end|ECnv;UUSUT zTN-RG>A+ab?KKJ#HHq?*>0A~0UUfWKA{!VCTsF9I#d@k@KK7HLlmxcU%!CM8n93Sr zg4?nu^Hs(&N!R9a&DDz4zr6u9YId^Ek)rO-dcE5=6iDXu7KYR$iK$nV%(aVHgC*0y z*Z{5p#~J5hMn=m$-1**^xk2FZ!L%+J9z%w<*!?KY7I*PZ!M*SfTQ*x6>hDymw127f zuAw{Y2457KwSNZK-9x<9wct5BLhDq)-vb&g3~wnUx)9Bnx=lX0yP_1=u zH6FaJo3jD2hQ2&%eh;NG=f#+LJ{U%C26|>c4YRvb`d_mr; z4+Y1Sn7EngqC7oPT5PlI+@GOW4SU5;z?F6MW-{^db{qUrH((yRV@-wq#Gl1g_Bt|j zZs?6xdMs8Qr;sl(2&KX^+b5Zhkq5inAOY2iMg( zQ{Gxlq6k;%QqL68Cq*Ip2oIZQL(8I)$JeNdzax`Cb=EWrg|18=mEA@#$|;0otgJ_X zSVkA<1(LleH2Nf;^6Un{@dhPH=tAFIRok!)i?3oQzU7jy!FskYu7-z2N@Iu-fAZxt zZ8(3BX0S?eQ9WYuJpUQnr{+a<)xXC1<99)rUwuD^#}vdFi5cTL2-5U1)iQgHaJUKm zqWgNkDL(HiQ_A;@qM=`ef;OWKz#mi6bqXyMiwLmJ#^^MyoLK8zP+&Rgxx@76owBa& z-Pak}NVco_&-Zx&AVLA7KSg33u}rVjxG!<3dnd}wN<~^!JY4bb z`IgzGH;M-F5S4C^cU};Si~J`K%!~qo^N2kjz0}ga3j}g)5S{Vf=>At%*4bC8ZUpe! zE5_fJs6Cjs{V_({v1Oa%{*-ywwg*ifLbd&NwJE}C(m+1Z`{8*{Z0mAjT02$w0yU05 zX?sNTF1ygn;sO-w&_rlW*+!*%`yCplD}Q-)ljYIfd~Eo+2=8lWbIrp1F{Zp-w#>%J zE?ZXHb}Ru7ONMoZZ2^4wBYby)L2>;rg|KrND@dve`(R!ng)BJs z?Ghbq1Ls380JSLxGBjD-(}ljX&f?@6wkI9siR+M(@ug7~Ihm~?`@iH5F$ zpCAoTf-!>-#5zjxD5N=^8if=%RBO}A7hojBo16Z;izO8O1wlB1+zo?`x%Ya?H^|^| zBd5qmtc6&2VXRA}DB3n%&J19ezFV=4Yt9OYd=_FaY^PCu_vo@FA777?AEAjq8_LBTHN+u}zEh2B{P*eXk@^n)K~(p{YpF2&PiR`7Ckd z_OshYv^f1fyE?i(jZ0A6YNrxqCC#uzq0LE zm9#4@+0==dlhho8MlEwH#-rH2Z;j&iVV`ZIOYX*AAXbHXtaZmmjdRZxcL5mCn2aCR z7@{O*zTcWqEiVVLD3K>9a9-c z#kdCIt)>pq_Z9>^M*yTIi;ExBHo(+Xhqm5;l z)8LLDM!V7*o!faTK53PLEUMpzHP?yQsEjj#zw5U^7jpfo@}GYf(#Oo-fkvv{Afu_M z7sQ>O8?)rzw_uU0_9iBesk%WdgnppoPt6%)t`geLsJ#W2WZjV{)8Z)FkX?YS+!0nw z|6{NYrO<0MQtqmF>}`>`&P~x&c7?|gxQjK=hl zj8a%f9J!jKg<$$=c?uU`@=Vq_X|M3sph#l3)V6i;aF1(AdK!9{BD6a2J^G|TMVuJJ z%1o<}f?Z(sjK~+5j6=%aGS3OB?_x z4fsrqH00NeMF1I%1Q)srV9GCAy(qtEtVq(ID7MiUtbfd7rX@t_S*z`p&K2 zt^!f@ZN(}zmeU|~&-f7J>oO52F(G+DFYT9{Sm)&vw5+r_gmJ5}i{lKwc_tCoJh zggks!USqeqyY}?V<6o%uvuW`!Zt5{YT%%q6*gZD( zdLZsIElmj9Rg>kBm>8*edgA5tFkpVRBpWoJhZQPYATJ-9QgDJTR7acCrbfp)s8XLRZJ zB9Sei8u8#Z98VJvC_yKLg~<>cB=hY&rqsDDm~nm8&v zd>-Vb`Bk>qBu%RXsJbIOMgpY|4{?&82JC<6HpQ`@*`v=pg=Y}00EjX|A#eyJb0bC@RCCgfAZAR?e9yR z?;hEC0iK05t{wjk`<-QBQcnK@edIB>plLym5GAQQIJ2kCI*$j-p;+PnzC5TJgY-{t$5 zRgRR;sWhFokn=D)mj{b}hqJmr)hFwz&}9n=nEm z3oS;J^wdV?03t2&+-xl2xm7Cd@N4sG1r{dmewk`KK_{KlHPb3ieG)%HDhXe*lP+`0 zijeCP@!`bD(Vjk5ZXjDDW6C@V6q60>{WGjh%eU1G#y7adDY(DJJ8!A;U@;nAyYykB z`8vZ@#|VkiuPKu=VetGltrA8)7Jqn_cye+u$!D70v~!GY_YEvnV5K^7!Sfs# z$%km!UEQ-a{e4=btkAiyxb%<`z zi{8CsPeiyazkhfKIH;NEvM6_SR=4C+i7e7dCJac#j8VvnV!~Lw@6ykLGOS!Z%p7Yh zfOF|{qJU8TQ=5=IE6=287@w=5JvjIhOog}#rdoE_G`-TvbCQaD67Ie=`fy3O5zr;| z!aszwFxH<2vo#VbOHRV((Y1+STuqmKyw=8Q+0 zN7$l>m`J31eT@e~NF;pUuve7g>$(frPBM^~u8#3Zjj95&LjnltLfJ))6`h-chbfpU1Sq)XT+7s*OZ5yC6UJu)Fb5_?>mZtO$u3 zNL}>9L)w+%D~Yf5v4HLmJt=109CljdN$b2sxW5Jdff=*FwXz8N3a$n+s;d2&l8hWz zp_Lm5^mUwpEVOkiwr-teRQ4sbQ$jWk4l?)YjB&1yF(DM3 zS3o*cCtDMU?TU=~*GEKG9sXXqsJZ_k5r2kAQPll@)Pvf{j?+RzZ;A$OAd#_iV^NXU zys}x&MzW35Q*C@GVXmo=xvne+HdW{-(DXuLg?MOIr+M2 zuIdS0`lfNk6X1Rx5NBR)Kvk2FP=|VA;cdVwKT@63@<1>ROI{b|m z@G6=4Qo&5&?B^b=>YSCPJ=zbZL~vrj3$^&clvZrxLSEV2KsT!Zqo6KXu09=T910&OWV13Ej;U~^q=^MB~S1IO=z+ZCT zxOa3wt{vPh7h_w(Hx!RRE_sZv8l!Rup>&L$fnd&_^A+Wg6}~ha|!>I5vpx?3a>-$M*U(jHkPgV#3I_wvjZr?RmDm3}W@`XWWA8)K0$29zyA}WuNFqUn8u-zTLv}M|H zp}Iv*2sh5`B)N`2U?_tJDDYb0c&2-0W0$a;K~NvrIRk{ z<+;}#GbQY*R0N&{SQyjOWmwuCnQ$K*Rrja>--IxpO{8fu%+kRj86&aT-^`n&C=zF4 zJ+D=)fc$X(R&h!qjYp5(9`h<9{n9vqPL8|UJZxK8u-WPvtEpq$T+nhNI@vsB&wS9? zkjm>aH6X<|eVuF6O;4d5)ZbGuZZq!Gj8 z??5JH6JhpvUw4%Bl55?S4^y8EYvwtQjG27{=$8zr%FGda6E6Vb{mE}vso4FnCPadO zp|8m_#v5Hc+Egi5chqjj*4%DqhJ%sBJk^dtF7pY$wCX9pfx=U7}N@9co z=>kC_eXOVKeL=W)N^RxHB9Mt-C@xoR1l6x&d10=fl~i>7s6=$ETlQ6%Z0mIjjGsf0 z@ugT~>u5P2(}C+M0mGZo!#Khi@!yQY#dwxL98lhhYBQ9C$Y~lqwes~@5SxYtcVLJq zj7_X{x1?dfX<7ruacG;fR(QSpx7<1^;la_$GkU}Q{?XnV*EdswnK0u|mJL&O^&Hv= zj-PnszqMUHDs*@;v$BMum-ls*D9buXZ_yAI{_*(Br48fGe%m_S9-P0sW2ouHA+r*h zQ$^ex*r4b+`(WPDfXH-Yiuf7}i#K99Ji?<94(oy0-AKM^BHeUdb}DR8%X8!{n$?(tX7kuMjXDp|H!?rd4X(!bFXf3ANurP0XiuudCn{per-jC{$iK*UkWok7G0?vgPn;@ z*Vo@hq+7DOt1a;Hiq@oL?Tk9U@DTR@Y~l&043@QM_EH9BGr{WHrN*IBQ> zfOAK&|4J3+dQ8(*z8Wh<4bX)(j9r&bzbl|wi84lW;DBYnL2 zF(1Zf`@0?7kO8W9+3@|z6X|!`)QF%8ifI)hF09j|1LZ4K*)pGb-W@uplU;8C7Yifr z1Jmg67kIvr*de-hKlSEpnsg9-)`_!)*q2GgTmnH~$JNto`tH+s$>$E~G#-zA7`Sj|~zbpdnX0I&aNhPDq9M5h%L!;?TF#3gnkphVdyl-V@KxoW9QVHWcbmOYA` z$^i^>@NeT|@j|+@n^)_1yA}p4{R*)3ohlX6;*;_gX{v^_epC-I zd_V3wxe^(Z-}o%{2IxzFkTl?@I@CaMHEPuj9irKPPIOk?>!jdio`<*n09!cw#iz{9 zFNC)yh}N|{OY%8&siVIWCUSUoWfdSkE=(1I*W&y-;G6=ZH{#lC+D*e$Q7`Eb_kFss z_i2~4({F6VSRa812Cf15%_&=Zc(wm*wQZ5;+Sjyycq{F)mr@8h1A&@rTWEYry+#3u z2_Klb>jU6XopQG6+rQ&cg_F0Rc7Az9N$w@3LZ2P$9cw|@{>R~EOOW7A3Pa_xL+a%} zbHvY+cK!9LtX_q)MOon1;NTIQ=6|2One?_v%J{JlZY*{wiO}Y~OOa;}E#2`cSo#@> zg5QBWL`Yu={qgLZ-_v^JhDhD!=;6^V!3P;(SO%~M3^~vecJR~x5XS4wIkTum^RX%~ z^U1Go>GH6$i-|omT4^gq1_x<(cGeA82*?our@IFz`r|+xy(Nf511|DhgQyZw0^f?$ z#JwS3CE&6^I;Hv*@59ro>x1P?1g&b#IK4o~iqxJL5HgM|@~|KXDvB;eS?|>VU`o ztdv|It8j*ufbhITGVP^Qys`LW!@zyP0)ou#kwyJR-x3fCFyY(&-R+OPb~b&0lj3vw zy2zEd!*pCYPrsiA1|M@9>EpnkSVU+)XFDNclso@-I`kT}U-~qn4{}H>P2SI;{8@ zm2C+t=B+;HrS$U*bntX{d32oOYk6Z5WnSBr@<7{C zd(3MG7bbRbM>1dXcrWd{?G|2#d{4Rqhtd@bC^}Gf!{oOYoaWriV<{}gG}d2Up3@M` zI9cK+Xg~RNAq3YVY`GA(gV{C}-hN{w+cMWqQXv!;mD?#JT78V{rRb zqaEJl+-;d~d*SS|W@bGadvE3xgq^c;@*#BN5pB!DTVDnRC9!?=dZb5< zU5OGLd6>W@_qY|Yk(l8F?OfQM#t20`O)+5;{&_n9cpk{gHR2=^LwZPz{d;`|IL{I& zp1V&ad8hFFLKh|^?-`Qnrq$^64qq(%euRUN`9W35t()90Qe}3y|?KN(ga+0g#WP!9{CGoVJss0nUdZ_$G zaWRjjtq63@jDdXb{uVcDLtmIhwe6m+@VA1i=5ulKPz$Hxc?hhpA3dLgvT+{Sn@0C$ zDjoFD3R%Gc3Fi1~9~5){UNC@^fYQ7s6H$P|MsXU`IQTCpo_kkV|3I$$z5QUyZg+%1 zQiOraEsyPbQ#8n*viy1_x`t>=hXGT(S1E+|{lg-8#kB{LUmY$#xE6Iog$`uj>R69xu~FP50>xz^n=H3+76-E7}A z{pLO@wXSGaS+iIm9veIIWU+ZXo8v4?!#t=zgrSRQZI!^=ZB*Rg3p)X|@_dW@<}bn` z7&p@#^|84dAtZ)8434=>&|MqMN8mZf{(kd30 z<(h&V3NgTb{*1s#p^>`dh*j0zN8dgrsC zV}QS?2rksD-|CuH0N-8CrTm)C*d6R!!6B<>f4y4oYc15;Wg2!|Y;NAy9LGKi;S^FF zTohqZ;HjYs2-Lzz>-o}VSMrGNec@|fry0CG)))S)=$@`^v4)P}`-c6!u;>KOpBFX^ z4}P#}?;Lba|CM?1gTTmAvtupO5nE!(r-?i1;?IYX_Zp7RUa3T zX102qB3Z7LR?~j4oUesb9CcER?TpLim9gumCI3>Iks=>0!{*D)jeZxeSq#+mp}{qU zo5mTf7fjed5)G_N9WHMym6*&YO!TM3$@yX&T%??HmGX0QP}^<;rZanTCvMbiZbk>R zj@p#*LEVO#%#mo%%fn2A#iO1PEuxD<{0rWVa@nmABb(|L6vxh${W7Ue`W?*3RqeXv zy`7)-fU<%YZU=QX{T6xM&kqk+3lenXR3wmYDd&`!?QYYNW}5l^9gv(SGiZx6ihhRh zQ`*EpurlsexOomEB|ErfqdT_1w%;&i-1_=c=Gwc?iLyl?wL4ap&Z3!pT4jraYmaz@tRb5=m@Mt&zxuAsE9Nps~ciUMD+zCDH%tp^3se}^UoE^`; z+h|+4uQ{+wqg!-&q0216han}-(PVq?9Q>>7tN?W8Cy3zX2BKB!L0Zi*zY`0om%K!( z%t1jit3h^TeOTnZCIKBSQWrm7XXyp_qx=ptV@1>>7*SM=^Jg?uZ`iz*j$hVLNja`5 zq*XR(Z)R!N)E6$k)JJK&QCjsEc__5EpPe55sNonKdEWYD&9czk<2 z3}Tb378Sbm)@cFAU-pqZ54I>!mxhw7(mtQKLw-(#`cm8hixDa*Aybz9_yXei?-Zg2 z-ZT*@Q_CWT+^I(7R-|^j=+a{Elg5?u#G`jl%9W-yIgB&D+|GHWfXk>K(6(A zDpRdRMr+jm4!96avpncosX*4!Q5JMj+colVJuPkog5Yvh9lx3r03y<5^!5dikcg40bW@tSvg$&< zIX<@~iY^`f&g6K5<0DBW_E2qYl3`GBDkZ$O`QF$RIr@Gl8E;78uX{cZgU5H3%l50J ztlo*Tg(pvzZqAX*Ww8ww_Xr>F(3#H;qlQ|&UH!bmowS3k6c>GrL&*l>TA$(Sy)`xU zJ(7awqn#^(35x!l&uue+*xUcM!*IFbyGLxS$J|zzY7em31}i4lyD=gYpWmA&X=urN zm@Y=+ZA?w)MPO&K+{)@T@JMJwt>nODmtHxK=Q1Y&VXAgNf;$FOuyme?&W^)ne%=d{BQLzQ06&fiQ8RmY2={lG^%ov)4fpk~6t^ZsWBT z=-UBqTXIOCXz66%$$hfMr0^~6BC*<7`1A{Oumd;et^ME?AAtc_y`=C@cJC?yM($;l zMDh_Q`ET_oVNG@l1k$yxLAZuC{{yD4|sX+2+;+p2HJN$)dr=6~i@ z&py!Mv`s?vPS=bP7d7DA`_lS&aCF|2J+uIpAo!ZQC(DJQ6 zIBf;Y8aWbL|FG^Nea4NP^XC6}^&W?9elw5mc4s7WvXB8h|4~skkiJX|I+UaHw&nW5 z6W;IBOmkxUpIkFiC2Gh*ZQo_h)=qOq7t*8%^y(*kw7^xbTb^9q{&V-1$98h8_Bfa`#Gu0hFH9n(dB zAk2H|9486Dimoxvzwl{#pfacY6U9)@!TYjn4Us=3?Ex_bq?2JvGJ#@}cR-Yh8k<0} zx`BpbdLYI2vVRgEZUMQ$^}_J2YKqYOLVj{{aX$w;dynIuWlnidLB=E&v9YP)XC<7Z zLv+PDchLk5M3#Axuy?%QGwrC;vrhw0TA+u^U~?emNwSb-1j_~1XxQ+#&wDg%W!d4H zO+39ZOeuVIF%F>%xc=4%j(%~R_e$78P|Ff!EzQ1<0eo#Ocw$>#)QS4Omc6l*=`LM( z!_BqNeB2=U$~XCk#~sgwhy7}dd1ZbYDnA95#T`b%*zp7n1HBOg_LD>;;=WT zUUX&tYStezzKc7|ox6#Xl?`jH#Z~1@bsMn{{et(}`xCywm)8f_la9x7-P-7&ij%Qs zfUhnk+B_8bL2|DsybYxmQT^zk&HF=fx2+M;KN)+!ie`=zgyeSj>ToKESFbj24-PDS z+F=N%6P~xq4~ItNyfbR)U#Yn}dhYgxPwH95(sPi;rhp>L?duKi!YqwOU2~cVl;&+p z6!V9ltTY#+fqkij6<9DGs+)+)YK5E`+<3X_zt+nY{fdpDH*$1uda*ascfQLgY$uwv zceG=2*&5_#ywQJ>ZPdsJ`eWg&(QQ3Hy_Zy1fG-HZU&+7up(_qPtZ_cZ%hJDXE*U+y z#TxVG!d|nU(Vjr;WpB@h!nAF94I7R3@P@Qx4h|vcyPNN?j-~bDjQh-yVxH3VCE+@9 z1S*_#WiD}z*&nThvCGrOOMZ2?CHmWauY`;+?0DM29^C55so1ajjJ4pFZ}-zJoYAtZ z!xg@)&0bnzupLjbx_^_rsQVg6VY-&aZ+57Ck?RdYc6DS(!@deToIgah16E+y5Vv?lOs(V ze4V~-=**vDDT`B`B645nKvc}342t#UIK0aXTF!ly8^DV^@=W#%R?cgjT8T{IIv*Tv zOtKufV)x?LaDK9<6?)Wv%hM9^`$Q(uMn%CPo@-ORvg>e-8~$xLlYDc%benJcS2IYhtYJUFi+D?%s#lbz zByLWBUvSY;oU=5=jv+{n@`gN1DHmaiY}gHsTlYwbbX z6)8MC=F%=7`GZRiKCJzroLw;V(*16@NB}dv$A?GoG);S3rD;mr~EX@Pb@+>C!-SC-4ch?4gg8 z{ds*!7Ce>nTXk*Aq4u-~!dr89@XAe(b>)vA66C;x1cHo1??%)QA*IUunQlgzxAh_*~)iS9d^BxAe{8k?K z?V1xPmeOo>fKDy;aeSN`E*@2YSQpBrebXL|M|f&GL)|rRzd{uip-p2d7uu>fb`;pS zgl_KFEukCavSqdO?2Y@aEv##1;ym;%)uJZ1CYcr=R)=ZnrcJIdZFmf+NOb^ko;C}& z5GcZh*L;pxag619p`Y?AEV-N7eDu>1=v@!BWV1Z1ZzPYu(p=cr`G$pSwujF3_K|)XZ&W zaYOY{1Y2)7^g(~Y=Z3}z-Qnzg{R&weEav)Vl6iFY`$TAH4|^Cfdv@mldv?N(%cK7V+~(_(RqH5EAG8|Ebw{JAJa|U;%J+hSsEp*VBAV%m zW7b0Z0-_sDE9PxEMGE;XVwT(A_b56wsnnfSm5!l! zI6AoC_i_BQEUafG`5=qAM#A6TQOacp#lN?on(`C6pCUhr0@GAf$CPcJmLUCUz zYMWUtVrW0*bibdXaDw$GdM4O+93%*0{5ZTlHHl|Y`)`lq@^LNORxX_(=c0a7_EIrk zojW~o$=1<~B)?spqn#Ja`?ZL{92ve1glllCV{e7C%pf)o>OW}x5xtFW@>jIAlP?WB_tL3) zbU!#;XWDeiCv-u4dBZnifxG(>6Sf7p`C45sIkT%|eYLsUwk0o`0k*o-+;itvRYErIwiBu+WR_x(?4ONfwZ$6oUZK}U|V@w_BtekG8{d6URBK^>vtj>ihu z7&G+z=O5PuM5Kr#oE~nY9XrFX2u_8n3C_m>U^Hs&jx?6lEdp4StSJ_iakFvhRycL^ zHY8gi+BRSNl%*PbaSp3&j8rMzItIFZCHdWl)it&$${jAD$|1W1@$CNL1YGPH;pL~{ zJ{JIi>j$xA0yVgO^PiML`bh-6I-Qjf^Lph+pm!EYxCJL4fIk5MWnbhEtI4sl3SCXn zat4Ve+i~3fgue%?z`2k!qJQ#7z>bR^-mg-N6Z7b3%5RDR-5`1jIxN_W*d-?NmBi(Y z?&6q5SGw}iA$D>6S0+Nb%CvI$JDd2DD)ZNy31c+xyKYfGdo$p79tUd(2@(&}cb_U; zm6Sr4uwM3l;K|#p)KR|D8ltJ^R8vo8VLqP=u^(~=n6JLuH}HOu1UodSTvT#s<%30; zR=Rc!?M=&8xNXdRmC|@*01Bm{ZZlc1P9VI)E4Q#hkpSS4)aq>g6B-5ow8d`UWPx4H z;6};jRPO3j&vfGhLdh`N><7EUoO!d?v>8X9khk=n<MR*y_bHb-GS8O7W)a)WJ-fv-CvSie|&%B zA?zG*1}Y8`*v ztvQ{s)o&!GEBFMjxor{T@?`67r#8A{g`0A$D~vnO0n{8M6O9wE=WL8#r*m7qDj<*} zgLpktm*{s%37@w^>KHa7y+bAJJpV07FLd$ycZF82Qlm?;vU#)r)1JF2U8a(>I#o#a z1@Wq&{|~`ItNa4c4kj6NAL9gVT)EdUi?|Adij0Y`VE0U3pi2u+1?cx-4)%Zsol*gk zXD5xEjgjj>Ne3#X#JJn*03>97&QVl+Zq7auR2?{Rv~|Zathum0a!IetFaiC+{}Bx~ zER5A^LgllHKRl-RaY9}ZjTo1_GiSLoV52sztDpKPIzXd?2u1O|Rer;&r}YC7f7f0a zb`XJHv0-Nb3cLzBVkY%ZzvF~@?tMg(x-IX%E3mZY#~-I^d~6&89lGu9js-~JYYh~6 z=kY^x9+kI?lrNhULFtw@g5GoU!6(JVi2?yI21siol%E)(-54vOsS_>LkD8JG$1{p& zC+k{SNzl*neVXD{VLyE&N%P-Bg({9Ui6g?oa{ZeWi-+v}JDi{xGRRdzqB%*%2A^8d zo0u9E9ezx6;fRR11wM}%6p={A)5kHaJ%Iw7Z>5_LRIB_u*ujTcUcxxfWo_i@Ze!lc zD_-ZN&$sdLoY&mRFZe;*$49Pg%<5RjG8DQQH~E9=Hx^~3)*v^O*#DA)0uGqe>s#?Y zX8?n^7<&R06VKlPiPUU~A^(H|=uq?oJv5_vB|$UiwJWGhffCP6ZK+^;rGd9-Ai2W) zKln~p|8(n`T@Wc6b{^!i6H~+|_dq2XQOq-7eIhAOY_{|)?ELrdfvFb3j_s#u{w>_aiG`Cj_s!u@emO+}rN{`da?)d%Nsz?-`VR z=k5RZ#e%Sx>2?Rpg-Ze}8a*`494) zvKAik{pUs>`r`Mej^PCMkh=lg6bPmxBmL$N;;2f3?xBFkrGGk$N|N zoAh;*j`8mJ^t}Je=tpnr|IduBTulg?6?@=Qc(*ZSST?~%@UjcW3eW-u?ay6lwsesj zwQKJNigI+WbapgIGkEX^&_RlluF@efpryJDc>91lzT9r0VidH)dq2GM@i^dmb*V7` zSZ3Bf$T#v_ZI|v}nF0w&&7O5I5!(-NRUHxd!?K>(7k1g8@2zZvp1q9QRG8L{sk6p) zwS2s>9{A2r{XA2eU!FW11aFG_ICK_dsTW8APKJE9`dupdKN2D}v%{XaLB?1z zXy|ec3flFiZQj`eEpbDpqb#!>yHunv6j{%I2EJ~b=KTjR_(8#6GQKZ-R&=4|5O`Zi zT)4g)x&Tx!DK2}Cm)C(xCPiU%Vgcxlp2C_E!|VP^tR9j_04_xOAK$?{5mFQc`~N7T zm?n@$h=yeqj|jV04!KMO^uM3Db-pz^zePBsSG>GQd-J1-SfnHJ{&dmo4NK%*47UYnvhjPBVy= zwrJ>YcD76#DDLM7e(i8(uxP<_nZP6*>d*%aH`OSIQ`wZNOgl4{0k^YUu zU5EXx85$q%!Y(l-B+#vn7D8<) zP#3tbb!(-7wpk{v^~n3t1|806$TW+LP|;y6lnyME(1YQLb{Yt zx=V7SpwdW5BaQT?LqbJxQvyntARwL6an|e1=zKGy=R4QA&UOCz{q>EX!hZMrJkNUW zb+3Efi%I7qS!rVgah)8eQEIq~RJWF`Y#(^`_&o~K5&;L_LeF%tBuG1S+KD!&d?GsF zt$1)7Or8q<8V5G!-g@bcJmGEW9{YO%CB}0N;Po7j*fqb(hnb&c`BVfUx2M5JNfJ7g zV^U@n2w3yQ{IKK$jwy~d$h$9xo_rZliI@i?$hru_oRtCtH_Df!YT)VN=s!4k6oaYQ zg-V_L{#JKQ2TZDay0;uzlL85lY*#it<3*2SVSSf7M|u_Y1<)Mv(g|*B>V?`C`E>`? zYn8Igz9jjfM@EbuPzY?T&BYilGh~gGf5uTV zwGIEvXdvxNn_4x#9Sd4wbS|EluV;3Z_>Gr*rB;l`Cbpn-8F$+GbI6BE$A*^^zGf=6 z&72axwtmavgd@ZUJ7sybMW}}XYh(zb8jo#&Z=3;2ly}fh$svK|m{?pak*|$i{rYFd za)HboKGROH+720PqZfmkTiU&q+@z{$aOv+J-EiuvPgnLkqGp}Y)(?(pVH`%Os`UD| zigwgV$dlt;m*2g5rlw{TW!M4rtpr0n|K`W-&?{r6ugL)FY=A4zi;qFSSsR z*Z{=#OOcBW@T1>uv&lechVv!3FfNg3%Z7eS8;@Y%t(M zX`%Kd$QzGcV9cKzUA=K0!%<)U)|LJ$Sl`C#;s}AW-}wcT?sae`i~$eEIET5{V5o8; z;}*)cO_&!h6do1{vZ(C{;Qxuw^)r-;kC?eemz3#BY<4u_kX)8wE8f{WNq*qCz?Vc; z#x-z1Hz8i_>sTwuO>U|e=gFuV1iSssJD&vO1(%pCma6Tm`Z&(??) zy$pB?0hq?jV~jM@XF{9hMwp59Y*qcU9v%$?V_z=O_M{^UdtQ8)NpTi?}-Z-jyr@ zgQHS~iFCYEe*C#`ZMI0ou!9+Q%UbG#1E^M{0p7VQ@ZXg5zuzrT5IQ$CI&Npc=Gz9x zFU~EgS<}D~ql+UkkqVbCy(|o{0q3EvCo*J&TEraY^o4K1_ck6r!S8y5uh?^;vdnhh z{e5~JX0jT!z-2RMwyZm$^n0wn;*Khlg z4I=>_or`cg<|VyG-pv&4hmQ{{UGg*#T62t@WVqh zThvE!NQGnj7C=w=?c1SFQB>qf2ai&O8J>AUf((=*rTPj%+34KD5X{W7`-VI&w#cF3 zsxiP)+dKw0_wsDL(@tP*c2zwe@{R{(^)gcJbF!qtaQE^HKQ{T2aw zcXyEyWzVm*0q%``c_NUkt#YnJ(^rPF2A)dEKZukAwL*%LsGkGpuV(j&)J2e9xo><@ z_xuQ>TjJ?V-{w}%N5lW|Xtv%S$C4Uup58^FQ%q>LYvtutRSGyf;%|c+)ks!M`bywE zU)TO-AS7Sp=F)Lust|k z-w3&pS4@g^IpiqQe1bJiAZgo!Mtl-f){>S<&Rk)e2$Kk?SZ*O6`0D%>c<#*I^VQq) z#HjpsuyKw>U@s2^MVr^gH9yZ(#u^GXFN+nuZoH=fo;i3E0WN#+Z({X%xfY@Z$3_L;x z&WAD(Doi4rr`{0B%N(Mz67wZ{>+F|J?wqK&Dn1c#Z_{sVR-#>0!JPxP8tzvZB^C2B zEO4xWU}A%s4Rc7GAsDQuMX2pRD5ytdHmZN5zH)ex4! zNgyq)68Zurz6o?0#RD(xeaMX%acg_T)l)$PArG^FwQD16uL4e3u=Z`4gQ!RlvX(o<3EBi@wcd&MspCyy)8Jt~ETKdw=z~?2-*QVz#wnMJhAFY>zmx$?SEo^?u5wYVA6Bs*T zs!?e5-qwtn8;*4S!aZk|K(%?8{;(#6#?NIMc+hqmzVSyIkDjq!+A9xsSP(+D|NAX)S=UAPIBTU=|Z-8 zolj2Ny}ztp@(Qoomo^8Pl0za|%3ihj$_}6x&mikIHOc@6b7~086?JyNRIF~ZX279Q zz6p<74t@b1_zrfeABpO$mBSBl#h+pQ`^J8+tT)?Jpzh=AbQ|f#xtm45$P_`Z)QL=? zF?91Kd#Nzb*7sDJJqq_c8@iEB{e36gJP8O)v4gsHD#G;Y`|k4tCM>K?W%P?YB487MB9D6a_y>c+Y;&zLQ@(FX}V?E<0$ zVJi$x=_Fa)8_u^WhFdO|*6gxr`e##F&)09W>$EoT?zT3a6u+K+50J9s^CQ|+3z_Nh zQjqMim?32xL-Ol%O_F=>RNpSGRBu=`Ev=Zl0g;^8f$bdC33JiXwI3mqObh46h6XO` zD^-s*#*&SXF4umnsu^%JKrx38GQAZAU}_8_Gh~B=*VM{eq}Sl>_NgDPLFfgRx@gg4 zgw`iMx4MkJFUuRK_Ynpsvd#qFm8;ekVfTu-)32D|DkDTGofCBT!F5kDd!+2$FsSe| z3K3<&4z|gnDR}cwXj9=@;YQ+XlqAjx@eAz;?w~utjOH6R98WqcZ}91{XIj-mc^slS zaE9qEA@oN`7Nc#y1L=)YA*HDi8q6;@Ct{2tr^ZxvOlRfAzr+a~N#m32B(;6qe5PQ( z(U*`-heE<9#px3jo7ZCv(}JMJzOV)Qw+#z$0vTDt4%vwtW#L`WaNis!xOzvlJOAd$hl|>0}V_9 zeaLtwh&??NPR>JgWGt>w7>2_g6JFIMNS~Nt>{pvyRwGf8u4w-1;bC>>PNADjZ$D$9 zzh#xQc@PUZV>$tPcblx$?CoyfP1T@hEK_F4Xv{y=_C75kuX>9)M#n(Stb*NxCo&oe zJUGl8v;+CeZv($?>3{skJ%$tH(Me_+Lv+t3!9=`OsvBYTjFB-UAqw=CA@);{%)C=bk1O>d+i!BPzQNT(wsVO)8Nk|S!Y-m)m|vUL4+VBdjWoycj}Ficp&0Tqjct-~koB>)A!w%Hr~ zJ0ky=!VNf~g~*OM)@*C5bWK8dZ^672F9~ei2#YNCqt45Jg>MYA#F)4)x&en}3^Xe{ z&Xs!UF}zb-a8aTCr4`c7V?NZcU+UHhbzg}YHsA=>7Y&J{wcuJw4CAA2MNdVZjAqfL zSraBTUaIAYLktZ(A?nP1khgWQo?@X{i21KlQM@-zBS0FFN)d`jmF1N5F}qArJ|G3R zf4k2i5^0Qeq*4Be=@g8K*R8&uuUthH=r1ogn}3z`EQxv?-BraZNf^r5Ss3|l6D!$3 zVb5j*n6n$eM_ZMc0JxE>R}ZB7!o0@$`Uv;u7}9F`LS=&}Og}=P-Ct+luV9i|C@!w| zTfb*L1YcMKX@gzchEW=A)nf^GR=#APWi<^pdteYIdLHkrU*x_lw1pUg1zl5{8_Xu| zwM5+0z#@^ZQ>O^em#zAdJjOX2AVV+rSuY)Wr%Urogp8DzB^5}Ejg8XMwgdGi$3M{} z>z$q@M#(L?f^Q0>)zxV+Ms;ZgvIYZ?i}ZI;m}V=SB5EswxwZlX6$o8R8P3yksGgfS zQ1`|7%HzZ?Af-xsgB4fa9clrbz=URZpB3g`svF6=;+GCo^3N=NWJZm{P^}J zT-eO4WE|OWtA49TSgYRVpE%i%%zRK^%XozkERP)%B>URaHUi{7d#8!`G+e@TSECvG~KI?VFN58-L6_; zGfn*nK|@!@WC_DrtRgO-Z9Rh-5x*Yngf*<_H>*N;ip0F0MAghV!ye(PZlD@3gq5ucI*{8DT>#14A05e)d*8#U}BpApKT<8;G#yKyqn&H%nNr8b)gK?5>s8 z0Ou*lb?4f9xv-sgO>5cE{Wb+a&a$9N%biq#fx{9)ii7VFnkBDoz$Ee@Ycwul=CTW8 zHnf9zrYS%ia}Nt4AYI)kjS)-v#Wiue21EP1DVbz@o>28S)BZb^}#_fOPA2Pr!Mwl$=q4DX5 z{iD}afZt5KELIyTid5eIUS0hC{}DTiR|VY36fkLK7eg5dieO%^0oIKIIvu*>koF)K zzPB|YFs(vL^1}Ad^HRMV9}<&a8ufOly4a>0JJ}zLQ1SdpJC1wQSW*bVxW{*tdam1`NR1HIV0Mn1hw@W zLUmn(#OdK0j}oo<1fl+6$ij6kBwJRY!n)rY-yTBE*+*sD9OBjh{UO!bwM`aA2lAyWOQ}*%C;}aX-0PXLb@2|0e7wbb2gp7S<3uNY-i= zUlBycsTkx;`>%ae)lF@*ATdYiM#O{iE>#yNoRIgQu;JFwjF`*&5J(1WZ1(CGh*(dL zL3N?%%}MEt_}2r>hXD550n>LkCJ!KGOBe5;rGL#y#?vzeXA zUj?F@?X-p(VA`OT9(hVR%N=zS$Zz>UBFJXv5$titBYl z$)v}uKSF@6U(ee3Bizrqsh^mnKAJ%BTn=93xY^uy$=pA)S0fY@tBusTult`0K`6S5 zt{aO+{m8vA1=D)v;D}fj+UMA7*|m$SCn_y9_4P1FRf_whA?cR3ZOf(JrfEFaidl3M z+A|fYpLs(Pt4UlZ?@S4oaKseT1X7SUib|xTWtOTR1S+{VpRqjh(B<*|dg*yS6$5Of z3WAt#|L07LV^W+*!2eFzsb7!yPw6n_HT|2eI~ZOtD# zY8myThecPc3kG3oV?>vle!!Jl;#GT4!s6a0`3W-#R5fGiMGq-UL zPO`}a{stXbZPMQfokH-*uYt%D`f3c|=eENl7&F41Y6zDX{5T5fN6}PXt5U6P5>_^sPosFmC%OwsKdCN1~ZxLb_ zNzq;Jtx;c=T6)Usl}0DVj2Rf`2u3ajBYodOm2t`jf_cEhO#4xEmC6R^m_GV8eJ0_E zr07@F4a4&i$~PjAcKjTq>?OPH;?-eZ_z)V z2AWkiq@B@A?a(o*XVC}o5LUu_IXNVU$tya#1kVeh@X)mBjB(MWvb=*AUDiI=fasv^ zYCT(EjOcI4MN`>@tr}JLpGMw&cH8HA{~LaHsN84XZHo`s^7EL5wlZLdflmN`Lbz(f zHNpWJDWi4IgJ1o3Z{&aEp?@Sr0@w6_FjA*&MN|z4^?_8RXFE$RhmZAogvG@`DFdG) zEBzTX#BRp{0_cFT$>DBy#KMF%90%={s;+T-1Z`bhcrAh_&@wcgGTgE}voQYd9cHw? z#M$paCc^JAVK~KaBq0(OMAvE#1vBp>XEu9w1a+h&-lag+S3Ud8)ojVJdUz+*iHe>} zK|JkgmxclB6SS5m(Z&(S^5?et7K zRoKkcl)`v-9Y(aLG_I1F!^|rgAkbY(V)u!#UMNs-Xyoaa0o^CZ|-hdS+DOU%D}gMBnctO0-Z-s8ZX8lgn$N=p|+?fH8+ zeSaHzLmqTOZ^#8j!5`fD)u z`78haF&&D)aTHZ$#}faS@9O~e6cb!w_jOcT@&El-{_~^%XIcJdS^np={Qo^IlANe3 z-uUH&3%!Ns>#w(3!Gs{xD#_yfyCGo#^0p6@F}z&{RP_2t5L}M&bd+D4Z{M8Tu^n?g z?1VXL7>N35JQZWW9%l}-74FK>jJzSQLXU*M&k?`>hI0FXYk^|{EtIBo{T*b;3!uG= zQ{{qo3~G%^{=D$=~d~|$wSwp_4Ln{llBHq zpQAtnJ_)ly=y~}PWM=uhj8g|8yas#V+HrIVtFIdkCWjU;?S^El`dFWV8Neix;R`{^ zH6n0DY>^o3{50MhpSfAle5Px~=k&Z5Fcf!P)@E<`t&E;UUVt+3dttTpJC_Dy?1SXG z-mXiab;#esEx4&b8(uA{xVr_n+_Y-&(zo^bw>Og#AeJC#QBv_W2$)SWO&EE9_`XyO zOVbltPci1hUv3xA*Hw_s#AmzMqIs)DpYGywiojH>V9>R`@!d3Q>T-OOlcfM3x|Ez56>XCv8zY`1ZD-b(# zXE)?U_XGIlFKzrOfBMVWa09dLAHH1O@^ep1he@Qp<&*KI`VE1_#ird)zcR7q2_S&L z`COtIRYmFa=`5F=`Enhj^Mii&@^azuu3l&{y@fI3Q?$Wy!!()qh4FX3I!b@6xj0XO)30!@@bTK`at|`88_0P{-E(DB{Y5ld- za=SOJO8DbMOVnN6Y&Ano9?LTam@lzYCto{&B0ll{w`m&^OfP4{Y4a9Mp3=3r)`AYh zV>eJCcSWvMi*oTM9fAXuUxPihvs+NZ(EC!`2ut3(aNBeC_>WD3vOBo}%&pnlY%l+P zsQ^me5+J}XO>-VRa!Ri$g3Yf&L{J48mZ*}g7Vj+5c^Q zKbH``@>5x{<3p9&_E9e9`g=|tM=+MyGXCOjNL9B_$T#$8pbY>ln-YEK`%j^WNpD{0 z1d+!sciHrdEM&s{-peh%TOc3);T{y2K4>F4@if&U2N(kD zoelN;1VjUWrfKbNnq&ywnV4PRzAg4ZqI)5{zy-LY!5s*E`XD#Y69#3@ncYK2DGa#p zw%RqVY6@^vEqWmer892NF8{e^|AOeMh!~$aKALF!ORV~wQyWMvm~Gmfo2wjyDV`#M zM;{7|NN!*`aT;om2_9Q9B9L*bJimhL;oca0h8%tOlI76Az6>L6E*yjs%V7}@N``-a z*7pUawZv^)3@hEE`VnPlkK=~*8pBcA!-MI`3{c^Dy|ZZ$%czb{NpNEnY=Gg31rHrn z6U9&uOP@IYhI^_din}39oVlI!h*d`kvS-(aA-RyC%{D@!F!Mm~nuvINFv zw_+mLOmj3#OMs0WbX0lYi8M{XQ1jxNLbB0j~ z-WetzUzEU@@8CJxJ4UdH9e_CAs*LBePTGa$gS{%L*O?!))jJ`Xo=f{G0smmPKnI3W z^O*Hq@Q9PDKT0j|Apxd8SBmC5NqmhqY?7Ibi;*>)h0&s8$N2T%9x?4NF>m}t!gniE zF*Vq*`TIqY$-cM|w!f#H>o#Vpw5xnd6xqr@u5B@j!!i(N1Y_Li5`3Lh44ix$UtRhB z-BEUAHyzUTW;#-(ee}-fsGbPaSt?RYmGZU|{2YAs%7l`!5nMNGQ1g+XMwiaRnJpP{ zX8tX_`-N)WX>=~kTpwaqsu^+E^F14N(%HlOm@`t2n_uG zsKJM@VKQi`ac7!0RXN1AGb_z+I1)$F--Vv9JQ~*f0&We9Em-G#xi|su*+$KD?;!!U z8JO%WSyL6SaJ>LH>%j{9nTg}#y_u{0@Rof4614y2z%;r+8fP;cOHcRxb$u?V1wA-1 zmlqUu2BLY)DbTDnZDCAGZ~ICuEh`-t9EhQwxPBunE>peG$U?_(lDKCz$YbZVZo-#q zPfnhLnVT^D_4V;2DLaqpJiSys2V{PfCd`-DpML>Ok7s5`f=P}zbzqjNvm;YLmX{ipWU!^Jz9n9` zABZ4IC8O-cwAv(SweaC+y)c|EPZJ~PXyG&7TTELuI{+(o@|aVH8muGP$;FhBJaIo# z^-u;`v}wldq3_@OpUZa=;S(-jsQ7`JQc7w;o)!KHM~;6y?Dt1mJdg&A*GB<|*?S4D z%TGu+bap(b_v1BSaxF6C{Y(>G%g&7MzHbprr_0f(O9vO@r6pxG^W&>)!;3eT8UZI6vkLCZDk6k$T*#?e>Eb)b`3gb5MQMJ1)F> z(`TIZm#agFtWGRH{*To;iFXbEekRPiIqA>3cEVw{=aVC&Mp1@n93>G)^~RkKul*@` zTPb)gsO;TTR5QYcR;N0Nh63IapUx<9nCpAEo_y4ogk=JIH0&eBlzFn$W!<7_&h!&? zg5%HM$8x1u-OjPv%^wT%nFa+{yb)ioR5-JtT7($^)WFYsNE$Xpd`-8#e`j-f9D-o} zvqw{%=|P!qV!B}rykX7$o=aLDO!sz)`P}G|qfvZitr7xk@a8q6SB2un{jxDU=g}w;4izX&pH$c z)qy^wJDp|TUjm_AK(pK?RkP;)SCg9I+1|n&*UjZU*P?u>ttTZ%adB%1u;(nly~|1J|(f|X_cdK+Z3oT1;W#d5BiY&*d_n?uV9$X zAv7M#=JGA;`L@!MTqx-8JM;SSQ{fRNwN|+YZ8@Il@SKChRiBHIAf=3vl}#OpcN#sc zG@|5|%V)bZ; zNMv$1*4jsuX5m^2t6qrL9+sy964NP9~@Q|nBm)c(hgcFUNSWimCv}zMV#eo$(ucjSc9Wc$8fU zrS_{6ukQjD(0AKjJE~hXyn_E0zoguDvM*P=GTUisw5~o%&axMM?rH+uzjxK%{_^=? z6id8KLsasQGjkRt2FoH@$9m&}vIich?`w9AWk2L-@*0Sw8|79lOOY*p$kVOKc%)Y4 zR3MumB0RmdNb-q1;Z-*cgw0QWYc4wKFO+@durwibFRybuFB>3A{hduHybeQ!kG2v5 z-e6@7-GwD+1U)#hCm0o{-;!@j`r{G)@n2#x@EYqs-#+)_{0bbthQScA4n~+wRzNI@ z;nepL&4zsbnNE2-^nRXIx}HYLrt$ghC$y_S+>xz1(9$~dF)NbVWwwCb^M>FYY|F;# z1SpmTZ)Pf`_d)&f4!$0R@O`^R0ZSu54?`&~-Io{@QzRR?jDEG{)^Pyux^z(;{(c?M z2H_&%&3^hr0fdt+di64^=W~OV-SBjuIiw?=1c_`ly6=8f_27eGwhVDENeqR(2Ibub zA%CuOxAJs^uhJOWXw;84E#RZ2ioVVd_D68&T6{v8>M%g1NPxv4 z5_WMR+?2hC&&TR{>?}Q++RA#TJ~j{MP;&n$CC@{x-*U|0`$*FK@@qH(U5?SV4D|VN zavHGV#|jZFMdOXFO?;s<@$zpTQ4Ej+SU|dSR6U}%8Ok~Hj+;ADO!fOh=TC(>cpkw z1DT5spl|o#xbsuC2Oxuu0$j~nZzgX32(``}iL^0#wV~HnA@jP6=LGuTutDgc*%sD= z`Y`4WvFRgd&is}m+bh(51dY$hj$N3acqB?bEeb9j_5HnFZh!U58X#;agum?oE!&t6 zYYcpsO0g6WboAq1^G@)?G%+!nctoR};-JfGFOBQyK_Mg)T!Wd5z0)7Fc15BeD_6^d z*t(zmZY*K-W9OgWrzaZ4QC{UvtjGRRI1!5{V8lbP+;Q>-AUsZSB^=>KTEZonib2&; zD;35Va^&htL;l^i)2avM^s^u^1H8vQ!DN+b_30IRZ3d)*dnM5~Pbe0fe4K@cDhBlN zC|hvp4bjyJC`x7AtTU{p;4*lOV7R@Ng#8|X0kcp$U*v7)1b%C>%`^$-hpj;wZlejD zVysl>-|H^!6LA|g6)8r0iO!tQnc@q223(;FrV)1NS+;AX1y=o6BT- z&Io$Vv?LQ^Yc1 zvoF`|ZDo0MU>HB7N|SkrkTZY?RS7u{E3SnS(=QuO4@D(A{I3rO^a1Z5L3fk>bp*qQ`|6JqJP8vUdm9hE1ocCoI+X z`_@VcMe2ffm-xxPKNa%qv~B=tq!LWuMSb6Kn;Nv0zr{b?1R0Ju4j)rfC!9z;lXu+- z7Yl(0(I~eWzd{_rs(z<|+EME-XAfm}0+IsiT)N5s+{7sfV!+Gcb}oeR)Bok8M~V3M zcUI;g-(MF5DBK4EpoKQ>5K&xE8dKPeLK1SKPVNldFS6F)QtS`+}n4we{PWPoB8{eZ_Go`Yo2QS%FjRQ`#=8u8EA2l zPp%R* zS@mB(nB-;DD<(c8AOG+7{eSKX;6SShd#dN|#w@QX$MD{bcPqFPZr)o}k1*SotqDZp z@N#Sv6Cv=Z75*`KdKpb^+4t1QQ_=1YTmm;{7+nfw|ztQ=sA>ZH1JcJ)+M;KW0?pNY+p}x z?`1xIP4`;q8Y$_|bH!jsY~7r_zOD6&!3Yk`PY2tdUp)krm>)udLNqe+CRiEXc6PMq z zl`MrSQWdd*lT>}y+h>0+7s{1U%qQK|sh!{O`w>J@k%4tvAkjiM)@uG1#xEBwOUbJa z{J<>WmC^_{&H3)?9=vZcf)fQ5EVUZ%4Gr1Pud1bKqf74{?a~Uby7_ zUS^7A4uCIHu86E=2b8dCL>0f@2cGJMYM}K`w|h!lEzyGnJHuJvgu&N8ELXacrTH){ z7c*XlSqAcpvx+k2Tiy+A>`AV%yxD-=BZ`+Yx-P-2ONJeXXcUG{v`VUHZpw$Qfg>p|Ug^QX}e(sQwlz`RRYJB)3wX^H>P?Oy6IfswF{m*kOJkYW%nP-ol0U zoV&^A5yyV_mv8E9x~G>5BJpKCnv2{zFO)M`=dM!bsTI6J68_QzU=h_{-v5d&YC~wT z3WvGrwF1{o8$l?u>Can2x2Xe2Cd);Dx+#R@zhO3^5d+3sgV4ZQdX2~04ZxWgurX?{ z`U|kWFS6Jg^0Ic*W`Drpzb^s2T}ibOXk9NvdTc(;z2a@0Kk$5U_q;&ftlW@uIIDUG z+;Dm zxGO`wiX+&yrw?7r(XA0N+6U=w`|-f(!OBvoAaG1%0!P(Z0&wh^n;O*J|JbC#JMe+$ z_AlLe_~)+u{#QSCO>htnWcK>_C#6!5S{_vW(w@b@MfnjCNI!F{3I{5_nAiBK&yj-V z@uuyS0)~UfsO}*Jis5mj+(v0pR+o@>^SPJ#P!+$yPz;ko(nEwn;V?`eFW54DS+b-N z&U|YYat+5qL5JDJpk!7Bpg`=K!f#7mRy6?_UknB!4YD&qa6v~)P1#R%)ayX=lE)@= zSeq9bwJc9Y{fL*1E(m(sqqy)ZrcA7sj1Mx)Y^dGrxe5{Wms++J^+WaV&ecc9T8ea? z9&zfweaPV}Fr23=DmrrGvIICF<yA)S z=mQ2|uu7^ea{(mk#%^1y)=fp)z?<#t)ja{h=td#F!E{eSa?C!!@EqXw_5?V-NT;rV zQB^Ybg-19l;F;&klmf?PkeKf#m8me@R}cioKSFpXo#%VS(O^JygS1^<5ixf&4vuX!@+WkeLQu>h;u60=^enW2}JZjd>r>7DyaZDg*&-4SW{m4KAa7Yj-lRJPR z&;7w1U^db?5w{=6ek0XU*tk3P>QTT~T1&oVi7V`bJ$E(YP}8>IxRcqBkQ3|%@69mb zoXam@5uEi64;19Lec%qJ2i*tRM=35cz7cPNiu{$x@hK=7l|l}-HACXG3d!--%FvtJ zkEo4+qJz0%N>eqE1j#e`!jmZOyIZ?|sW1MwM-ZaF=qxajUx8}uDYMoW z8q`XFL)d|IGnQ=+L!U&{+AXo0F+Ao;YcuW1k1hc@gex}(aypZmW)ClGf&1q4pujYvl6v)Gnh3CIIhs4*^|pZ}FYK~bG+OWX$kA~{Cs=6%bMJ3Qi5EFa z0fG`pp86k8_SZks;WJVy0DnfG>sNAt<=i5a`b>Z`x2sffNQUx3575!`hXkm>1jPY- z^bGlmyrRJThfQ$vbLbo>AX@W9y1ald1xV3<$9}0B_Llsfor8>)+t-g>1^Nv~DxvE* z2#O?waZSNzu5x9o8ZrGPDB9owwxH@pDf}|KFQ-dXv_OPs1ZMZar_3DAeu?|;eMhtK zASFSFaXBCvEpc8kq7<;p2q5Qv%XWMZX~Zh;uw>@w&eaxVQOnn}vTru#AW~|U<1a#k zQX@^(35ejQnsy+5>xUjSqUz`$v@_}~0x6%cAH4btZ4u=D8qnw@Vug~ioouyF9fE># z$fsZk--VYS9PEW|d3_LR*mr(XPoJ+^kt16yg4(IliCc8)@nPaGpe{)s7}M+Z^qjO& zm-kn=r^EsYGG@mC9p&6>PfBBTn(9Kz445~Ojc&p^zlEO_hKP@loKY4@v|5H|l(Wq zdQ~X_)>(s5|8U0J*9X!bx%3fYV&BcELUCB#=Jd;eQIM6fdIIB@#p zbj&!AfWSsT3uf{{OAiT1nV|p_K*ir7JW>M=e&!>!DE5&X$DPU8Gz`wiT3u;&yU$_? zA~20HpuePfO7OI%BX{xWRs~UE8zRZXEL+(zd>N0ngWzvd{vvRnCFlfw94hlWr1Lar^ zvUd)f6Xg_QYJju7)Hhug!Lp%p&Va}qbCM*UNeZR46|Z<9uiAy=)P!5

_! z*@f9qm948+NQVx|v#QHHCWlmLgAE|6oa@EC&G$=eC3hH6!tCYVQeiJldo+me)Ugdf zRLjwjU-5k?X561fbO@9dW&~vHc9#_pCmo|oyBQMN;+i=KAVDxz+J0u>>ae`X=9@IR zxasvJugD^85EYpXlv$H9sojPyu6a}emnzeHma2LscP3ZL351(b!^T&M0;NedySh#~ ztMU^ZAt*n#Lc1Et%!kZSg8PcjN`^%k6Q%s1(gZ-+eV9m&gpb-2eQV6OB0r|hdSS}K z?p1;%A0o2bGlEvXbS7GUMWsXyxK3xGQP(=EfP%;DOZCEP9JPFZnKeC@{E}Gvd6=o= zBo!@PC-&1Q3x9!%C2?Boy*r47ZJ~XcCNl;lobQ#GJJCm1rHUz=*1KUvIUck^W z1%IhKL{kJ3&x-Maz4x#Cq0H1U{q(kWLm;)R`enLHhOIZr1q{y~E&)+U<)bx>TGFEp z6O6;jOa#W_3Ni7C^=92ef{GjgBe*w=gfHr|bjE7{@R$W>0RmlW`#tdX2&`c5!ymNd zfJze?t-zf(cr*Q_c~jf8EV8KFAz>k*v0yI~59r=IQh6LaCD2WD8ks^bs$G&U;jXtn zqX2!t^!rAL=6I$^{q^8mSy+y{5^D(3|Rp?jKpmZJlE&;`nfaks-G2XFMQO?Tx`qIFVtxlHk;0zS~W zS>kRp3F@XKqJKNaSGxcicEMve5a&*>Ke_n4!={eQzE0*QMdVsm+EY7BL=U*RNmg#t)cnO)fLrdirm`y2|PB;Q;ap!eAe=Aq3H({ND zi-19eM+}!y+@#Kv(~vS&$k`^v!XiqQONE;`i^7;yf!7EZdsB_Jy2gG&pH}WyaP~x_ zELM0%`6nOBlLDV>_P%oL-?k|_3f?^UswIeS{|uHa#-<(axApf99Vs``RE=_=_R^Jd zgQ%u|&~e!Ow?oH{yp#Hr=YQc-xRGzZNevkQ(@PU2%=PKbbWxYWwL4feIv*Gt9s!R2 z0CpxrqKxw=@&`z|vVZYa_a@6LhCAj;1$XN6hm3wZc*Lqnqts#!y<~h{{m?BQ`8lx8 zC_vOPvWNC>UcVeJeG96C+cLvJPk6#jD!+DV>RUt*4d38A-Y5l)52SGOqrF4oeu=Mo z=@YY0;Gh0RRh;kiOox(QA4m@jYj)Q(eYDr-`n%nCmLzIEb021Z@TV7mw$CvXF1`bp zc4Xr=VV&DLp>fU$4mo*`=|reN8d?_|CnwGa2(xVW4C@zCBW$QPwsZt5XSkf7avEEq zeg>Dd*dN~;;lOC|;skJc`McH~(5ou*Eoae}AF3ZGzUZuyJ6SOLv){H9X19aIec>HUp^L@h^yCEI*TW;&`Az9c) zpt`%#+T(vAX};4nXIZV9IUAmx<^yNarNfq4*;JU36w)KtYzVMDGKgciTpyOg`{)H< zO*7!jUYZY9TyiZI&0BVa%si;lF0SU&FcF0+MAw0&+pm3k2_>iqwc@5$Avnv92ha(J zwc3r<2xeS+4=4Dfyy_OR7}kd~KC4$@y{M3Ah`G$vRj2Fp2^Vge=%c5FdX(cY4k;$J zuk`|xLbjNqXa{FB5mCFhS5%A>kg{6U$wj|orF<^+1aU*Q5DZUIVyN~eAVb`VCJs$8 z<1uGVjvPBNejHaB_qUuXiv*9oa+*auVEJNH(>`kEIfg$cFe=2)?OcV_oD|KdcE1>; z00>43{tDGhu{A_$`!as^=Vfv@%{?3_pzWXOYBr16oN-ZVme~$FHQaw#KjBP zAw_LS2qF%q$O01}TZ>|^(pGTmRMVaVvlc5Xx#^U-D)W~Cec*_{wgi%W4af*y*6&rm z{d@^m&il|TbJIug#CXZBFbtbF^7bH*ZQ~rBb}1qtB3Ro#JN6Je41^LV?C(L+R{~td zb?r8M97UwXLAl1upXRUm(Z65mwDLHwq~C~BK2#lI?b~vSWKrq>#$ggIl%rMN0Y;@% zrtL}I!Yj?(Do~PgavWP%a!3X9!Dn2;#x$VnXA2zyh43tNs95(8K|i)i#?3Pxq9F`n zcq{6WYB(1Og|@yO&&=^SQ9W&H_cMbL-cgf z5iC|y(A~}%P9vLQzc4h2D`Xsl^bdgyiPCySkD(5>jrc5)R)!uW?q2U*uO2}l@E4uD^ z>mqqXx*wurusO$n{O4)+pNoNVrN0TP93alBR$2=o*&a%hPq085J3!puqk{wme%mw> zR<#T4$8iM^;~6CLMUq}~D0H%4xIvk9w*OqD*63O?mkoFnsA_E=ao7UUtufGI)_yuL z%oLFWd1V#4X z9OX0_a@F`eh>`5~q%O-dho>h#2hXHhq=kv90l0b0K$zq

t8|jZQ3(N{Ud7W)qm?n90h?Q&E zuPu%4#!xPpQRaeM?K1Sq%lG6jW#IEgExAts8alfQl5M_g0(WzBQ*ywxSz&bKgACdU z`OLA~@8!mq(eDnMPNRx7D%urlyC;aR1Gb2o)E0p%Q1oMhSq5bKe8(-&_l=nW4L82! z<=r1$e%}6dqTvis;NkxpyHWmZ;O23=eG4VA`? z>qIwALL9?Pwq)vRE$-R%cw3oGnsS}I>ZR)*YjqDE1phwm{<1T$QT_aHnTa7vOzx*o zGCVKc!@tPppG&YgC|QfEJV1h<5yInPXWXj}>PFoJncd!UHUhqF(U4tcdG2uOq*;^qjeoP!=Y``X0{UaRavBVV+`rMZ7PZ&W1?Py1d zZn8O|J+Z|2Awcp96Ir{@C&?6dzNTXO6hYeBQwT2bOACFLd8chQ(Hi-!!pE_sZZ4)C zZx5?A!>Z%k*XCe4GD5r1Pd3ws+t{UGv8UXlEQD3G(|rM?SJI{HZ5kDs8G7gD2B zdf7#CBq=R?koG2;@7hq@_&JIlLoFBR=zx^J==U>KmdnbzPJHVyHegcO5@aQSs-3lZ z4b>RHCFfPuppjwjY+%A7rj&u}ri%js75Xdj%7o;n9gCqdmak#T&>63P{1TASj{Pp zSA!swcGN7|i&)S+L@q_mlIhC)#A|0XFL%$tu)K$+wdwMiP<@odqkz{jxBx20<5q%I zheibg%0k&Omfb*tYlDs($Lz!DNkyA4%Hth zCe}|uswUe^7pb%0vXpz`T4f}&o6XZ%qbV8!qv-JF4!&VFDSSj(Dxsd3>kbnn?qdu1 z9T2?7aPypVAm#%;8o6rCf?t)E5H0O$e0c!>ouAmIlmBO5c$PNF<}rzpoSDJ0Z1{kR^I6iZ^|v{(2_|A)NL2TWNlR)0h@+QFi`j1pIj` zE6<7co2*2O;{^@cQTcdI;}r#CtxD^=38~Zh^bCGzJo!BA@}_M9%Nd)3>}Ye7K6yB0 z3u|pH&1*Va3Re?qtDCd?@{@YA90nKY)l+|pEX;Jx z`y`-n)fw&DguaE0!4rM&=+b(JZE0upL-aR6q?~8_M%iG7?mX~2Ystt$EvN-UNQ7WF z%K??`8u^N#*(Qf(%`Lj7$@r&8?KX|vtgLpeTf@-rI6JfTDNg&-V&{7kaJ(HsQF23a ze*u-J5YK@{SB9m=*AVvB4#onhU%@S1W3Bbm&zBB`-LKN#Ih=AP^)Hm&7&dwXjD0q* zOFQ?YHuMrJjU@G%P+H%ql}K*bb~L`CJZNw|7!VEoNBi?)~!1*o*e5Lk9-ObYq0G z+j!#qu7k}~4NZ&(Y7}p=w4Rqyc4VubvKBSin?qJX$Uwa0305hAx~@;Y*Kwa`<)YW~ zV%^3scJ4R#H;39uqb!Q_#T5}BPY2oWrk@IUAZrO5lSgZ>tvdixz`D<-chdJOdi7;V zhrvt<Qi>>N)FrpxtFAJ&v4OsvxafA9w6%bR^)@HQvYm+w|s-Z4<2!>uczm)X#g+xg%g7%yhwjoBEu%wsp~cBBA!HiAk`l?@W0f{yKAWLAlcw zhIjvN>3U+Y2-pj(1{W(}{cf3hw@B<*DK{<4Pt5Z&)oll(boSV3J@CC)pt{|a&h?lS zB}l`4f-(a@hS(g|r9Cld(v5f3!ff^AI@p)qWy|K;sGLk-j7O3KD%O?+rst76fuaH*2}!J(qS zYbZU52)J$7DV5r2${;s7^#)|>{y}Yka@BNcSEA$>4%@Nml1yaUD8U(A%u)*T!9pbI$m@K(E%I`Fp88zEdsAX=EfpWBj~R-++&?NT)lI zx*s=aymp|-3OE@sqJ`fq9p|ZxH&WUzOi>*C;08^5!Pv%XYanf{v13Qi1WGU8c~|V_ z4&?uRySWF(*={0}6dBo%=G&!J&J!3>%dErL=%4nzY0r!!Gb5!nz}^FIb$SXqinfT_ z;O1VvReN*Kf==cj4&g3=jXc1%98K=y{-BdzwC20xsE$VqYe3j9vMXft^jcm)ZE4dY zZqD^Q&ttuxg!GQCpaZP+5gf$Bqaq3f~e z+frY07AZ=hFX>)aci-_4gIcfYfvD%}C>*6XTA=-$An z@&YlVnC`Lrjx-6<8$(^oTrppvrx?UR(EOY{9%so_BRDVQpywxRwT{irq~?Ns6pxHr zi#gIQIzJmS`B!9MG2{9RGmXHG3a6rRrE#v(8)gkKH9i$GLDIb!kqF)z4BtbS0Cu;4 z#v~1}DSZ;r#i9xT=2@Qzl4dTHI&&>);GiY{?&x>?<^Em6V!Z>@17w7&yAw6QUUtK) zyIc+5NH$A9&IyF)1Z+uUbA?K=8co49PR8hM^oRKH5qC}R{G?u`IHVp zLLKT8vbPHjnHU`Os5q~`%>H0uFqt{osNQG0O7Vpq3K`?_c!^uvY64zHFY3EGbDhU4 zO=J7?hJe%g6LJVwBF#2O3=V*<;FCciitlt-TQhZu0DOy8 zp{M!7Zu*tUMt{Ny{NY|*&*#qB9jHe;MD0%Msy4Q^^cdypWP*OTL;o7oM#|KGfiK(LG zq!?XqVRz)4u6YhOU#hI*Pli&AXNGr4)YemuR7&J}tH{>6ur+z6h-d=#brnnJI z*~xJTgAe245>ytb-*w6`GcS7bH^+oO6<@s_Q{^=)<@};!IFx_sCoEr}cq`h6{8=*b z>hD(H!;rf+Od0&4z1un%aJ~-;c@7_F)nDiiR8Nl=y5A%c(X^{@1t`E+(~q2~H3S#! zI4S<^)CP;SDQ?);~|;{0vzH|QAx@imXkZ*$zw^j>n}a> zQxkN)zPs8ezj7;jLo&b#$*29dX5aFCPKlmX9sdJi-rxTkOy&U%ey$+gk1`iJpEZ)d z3@9$YA`x3{55fW^<75h2jOTnya=PY8>eUSX3t{fUumLQlU6&PI6+VvJxT}5r!PeTW zm!mlqQ0+%rQ1mThMVkkp;}Q}*X{KI`&MQv(R664&iZJrxep!E@1jywLXk~$I5&B14JZla9%TW!>+zLOOZSM z!aN9kiFxlf!gp`Od67)I8(6{DI%}m*J z+5m82v9G?aA}Z(b+3lwa)lRmvqvxOEM$$?~U}&LXlc;JVi9M~?x+B?~cPD>6wQ7}S zvtDm==={LY7iyZNozJpFYa;tr_iuA)TuT}WsyQlb$t4`g)6%BjrNbYliA8jN9x?vh zJh9L=#$B?r^!RWJQTrcXK#TzfnWI~UDruP-oD~NTZ)%Ef<0y74 z^f>c|+}v$%(Zx!_}Z0nr6B-~rU{Wc z(XoDWAYZzk$N3&{thL?7u4VJY@+r{Onl+iPw(w@V1Ma8r`M>}iQX@=fX=Le=r^HZ( zG};tYT&XC9tq^>?R(wq;WU56Yh^orE))Br~m~?EEN5_Wl$Hu-=WwKRjK#Dpx;6HHU zfG?V9$TW~h@?n-b%JsW7G#pAJ?WyzAOl-Dlcp$!bkF3gAw(2c)YctY#{+#8s1t)Yk zFn|%Cv0W;Ulx8VryP`WEoSp;*O*(25XyQH^%5p1Z)NSRWemx2D`U{s!Q1PPd61LDw zRx}uea#ETpL61V$iZ7=+qrP8z-2-pB^vo&<6IPK}F2RLfVq(0Xh5ESi>@Gk4(DA{h z-|S@j4myxe*`ksymwt6b!fqNt<}z!LsQSmD3=pSqp19+GDboW1ma=j0RPuTOUlhi} z=8JUIpR)#;4%;t;;=+Ay%-TLFNUSf7Z1qD=6K^uN7x?s}$V)mKC}W<_VN5|YCE-y~ zVsN%~wx3??qDXARyE`lud!$n{r&v|SKAYll4O%B;;7G=r(L6jaiK0YVYMtAEaDeJ=HD}vzzZ!#)ig5X z>JZsyExb&{*YsIHLK&Ts{#FwBiqu$*yi^T`^K4hFMAazQ8XrTC1ewgG4{Pb4;~ImD zS62hotrrYI)mBygQK}5n2b$eKP_mX@9NgD1kdM!`{$RV`e{1ZNa~jt&Aom=}YHH}a zz{PHoGmp2K_>nu91o${wDQDtkM2P#_tZCDBfBP%F*Lx?;wdY?*QGC|?-4mNGHmveK z%ve2n_PbFymIUB2-F_IC7SUC*MV?>cB_6wy1A2)tO z4{7mqio=h7&8#_O=&FXClfvH!%BQGcakT1qiD(AQtTe2eZ@u6>lmW44HFZ-$+at-odVQyy=VG`&^)DHfS8=eh1SrJJv=L84pC-K+ z#KzN}7iFYG7b{44fyy_ILKC{Rf=HXhG3Xpzp~fTsn(!g*3nZMfDp*+*seIF>Dp&;E zDdJUv-L6m8dH0WB?A4cq0ne81S7GqIiRzH*dLJqI<$L)NSt z(?52kfXo+ebPBXDsmz_E{>@AK2ixx@L$%XPONpmr-%OmB6q#%}YIWf8p8RUDR7jN< zOu@GMTFi}-R+exm399Gw8RmoFOy)zRkeE=9WWU(X3<5l#)T4N9i^(q5CUVpdw`~z( z1KiVhLks=uL(!)F{WS-)^2WWsgd|1+9bjMzjjcje&KanKwc<0EBC^B)HcfK308|QI zGC`^DEn{Bw%(ixUZj{IucZo@+2#zr;P~Di0{IKjh=V8z9E8Gz3WnBBzGFbc4cbB!;h=h{k#Tm$Ee)Ne6BZ#7c`l-HcA1w8tj+Y4-bZBN^Qo9Mb(4H6#p zS!@{a=et>&@P6-7%we`ktz_|SK^|*M)}Uv2TEe{vG&szPNBH5eLZKWDx08b5-mSnn z`=Iy__QHN8eI3kXX=<&qWw=YO8mT=;%6$>Dl_<<@%!@E*ff?DbvRukem!9JxDiWWs zt2hYw2HE~6*hw7nXFEVkM#LNN-S7ytAr0I=#it*r2*HjMXhat*&*JoohCw#s;JQ{x z8$!&91^V7J?0G(Pr_aY)%-dWmhdM6{<+X^3w3{yuSnn=~8O4Ihsc3QdWF zPi9v)5E~_mrm_~Ae@yNlcRbthH2|f&p}^kHQpp8l?>CTIdJ?h1slYyLvY%ApB--&ha>{(S<1SLr?lk)uFsdgvnHxiRlO9MxW#T zGJU9I{}7dFeZ?C?dHz{9x|zBh}rM;Kq9?$g!}aDzGQr?@|f zG~y&0kAUQCtsnU(^U_RJRsH<2VLG=Gu6rPTvFw}xU*%7w^SHDMOAnL1rn)%ED{-Mz zM^4K_UaKQJ!$zo=n1?20HPwG&R>$m`kWL5b$nC+Xb=%HrC(|Tm{?DER@LSA#6uhb+ z(9Z$%44WEf;Wu|?s~p5NKKjHwzttC!0O>W=VV?EY1ZoX?Lpog+yKjodO6Z1BYO761 z%x${QCu5J`x^HTDLE-m~J*gi+r_sszPhx0@Ct0~zpNKql&WGVi*R)J61aX|;D<+$q z*;MXoi?t2`Wz0L!4qOoT49wcfE-#sKZzB#MMukV_fNxv*b25Hr%LIaMPP2M@B;bjI z;X2(Idcla#J^lg!Ip}e6s3)+VClB*76&;ieXX{CWh*ja1kMkdsD?_YabISsIY%yQ8 zMj6SbCY=YRm>M`+UC^*P@`fXWTk+$aGt&Ju z$kyt{>)TszLo{EnJAXl+#~_&yZ&b?}&_B?m{|nz=Aj>(OZzJkjn^kcE8d{CsMP$xL zkNO7WZDGvo^)I*Ee)Ke`f7YZs^cGa#1ZnNFDr>};tQB4CB_b3jB7*~j8;~U{RI%5zt8>9O&5nbqPE_5%6i+FXYl7sQ80#T{J* zJ!(GoufY-9%FNfXz?HY~i_857*sIXKXv)8{d{5Dk4=n!53*V2t`#H5I_YB%5bBq}a z_855yh}zyCPi|xnDfT;vCMT<@%gyIJ+#kZ-D@{DVwj2LN-zPIA#{1=hl`QHoE|L2x z*OVhK za|^*TZ8LA573rD6Y^=>a3f4y2MwB2C@Q^q}sRXFeDSnSIC|Ie-!6SRG7-oPga%rpI z+|Dv2-%EwqZ8nKrCm_06EpB|~M2?TLtXXf9qFMV>V^=%3p|VMmXSIF{+HK{l8Kyn+ zQ9;zEVI5K$#}3Byw3JuD;kR_jWalK=@s1FU<)@?Fujv%Rk?oCyR zJ*kfio*8^9R2(0j@5M?A{eDTRACp0sjcZBgc}RRmJE$+sP4XP25R7y!A|!#_m=}&g zhyMOXw0k}X;Ho8#(hpqD)5@CFKu9>jF^j$-e@4c(wu?3xDM*X`k;%hrN9j2mU& zTP+T5y~et<7OUIRXeVE7R%%+Y7MLULqCc1``XtNj*V2KvqZTX;H#m3wB6U%WlbJr# z?}qZ0F)_70@x}35-PUO`M}r?JUE;+m0Q~WSdg{r4E|4khqu7NSl~S+UDXuH;ijfTh z)XAbl2<^?;4r9mxG^v_VwUS%S)!s^`-s>DGn%w-=L@8l0qyyKQu2WOpq6q`YKlYoK zOwaZwq_16k$Do9TU}sOM-E(W;XE+oHkkvE`GZjeD0w0joL*2M1s|xg{_tR9Gi?y3l z`sez&ZGVfDdX|y5C*`Wm9B|RQb#2Nw_MQ7mG;#|CI&{^7T#ob1wn*aySQRTPtA^s% zv`JsWTldEYkL6<1==bWtb1yUwkE~PH)CdTa)ecUlUt2Yn47eJlrK_tNmpmA~Q#YXh z_=YAqz?7zBhV4K9eSE1${yTNFVL_Lp847bO1O$RiE*Q)1^b|KD9UcRIVi(q{Z|Qul z^t~H3T7I-h7rI^_<#snUG4nVX%g*JOrDUexx>pAeN>XXSYl$JJLv}zBvk$>40}n;C z)F?w_L6gL~+A%G}>c<2LZPza4J8+!Qd(e*OYdv_ik1WY7+7~}yi5A~aK^xlYD^ewp zm9_^tyCzG^i?2qM8w^Tn-m?Oo%H0jKtqs}QJ}H>4>EPp+OZEIc`#gjTv8X7#TrV4v zokM{`aS`tzE`e@H7%(p@mH4GylGN0Avy#b~cPPg?f2g3PpN(=riJ0OHJ-o)wWB#W2?kpN|P<3!OFn+ zGTG;uX-rRT7P=NCF%EB6xoDUvjvFPoE4o1|wu?Xl|NCtHTW!6p%gy5cO~d8d-~Jb+wxfz>fSSgBFzbEMZ0CrAxLty-MSW)Os zb(rsv|LO+*knUH45BOZ3)S74i&l6din)H&xfHhYH{VbrAYaFvLTME-0?rJ0Wv zd1_BVQPYEnCE z%A=*t>vSTOo+;?ZWLCO$_80N{Q&ZC=o`Wnffl_!0le^Qh zu&!(n_0#}Yand-_%3ncuw6^(c7eyIIQDNc96~nAm#aYZ`e(do;j~{RaT8aYz8G^Vb4y0Wx?(FX0LS3ze)$=~i18zBUz(OT&dvjKx+4-&*!_2}EY%Qzqnsaaq2<5PN3h zR`DyKAsZu*YWd9O=NPnK`r5Rey_$It>)o7_3zQU+VxZ#=mNS>fiq+qPCb(@UaMFy!u6*1 zggUe7%=2ue;vZ}67C|bO&CJ#M>MtdH78;{IDlyjD#sWp{!0_%@sl9qoc+M|*?LKbV z)5W|Sn{9L^dD5u_0#oG<$u*v@*{Zg15+}mqt?-76YKo^{6z;}-ig(8*ZWl61VOy-s z^$LC((oKMPh#R!dyQS_={%V{yN+;i?i$~zK3e!3-duf&o^Rc+$)$t~O!LU@?yr2?B zqK19m_f?_@a?db*c{wtcPo*I}k}7}IBAghiUk>7Ss$@4(=IE3SYQ=JW4u{$7%2>u5 zrT%njn5TEwwgRbWa+O`ep!Rx-{6TX|)1qnRpz_05s`1tZN*g%HDX|D2R15OS4l941`rr4p*Iy}=}sesy~OIiP;L)ta988n(#>vt&X($_D$yS}MN5SMQp9H_ zc(p}5&@9ZG_B#Go*b0*5?iaaR88%Y_i@7KL`IctQVZUf?%0Ymi|7RhP;J*|9Rp^m4 z_(O%BeJi0q(S}oqEli70`aytCM!(9*Hp7j-{1q2Zu3Cv+tJ)^39_{I1rL(7IWJ9<% zCwD1a`?c%j)3_xa86?Rg3sD^)bLsj8zKd+ZR)vHk+QtVBUi|q$yP{+|%!IYjN|172 zqOWoYSB19=^8I`*1g!9UYuCf1d8iZZFONAVz=sr={Kkz)ZW$EQ^op)I&^998@*o>5>`A^KQ8&o~uWS9E z%jnd4#D8rrx0~agtE+!wUF6|3lmNLVl!n8A+}kRPCWaYBUItD`3eo;`ow!Gvy%8_G z%s0_GRFLaqWp)j1mU{?>CLREnlwUQW8mhP|97WNuMG>uhwB#BHWHw(PmC=E2$x_D) zs#zjdJssLq+v2tUip@2h0urAn;CU8?jo`06YVKEwL-aV;I7vE2@;awLwNM=7tRRx< z16*A4^52L?astV7P2aH_;12z~)1{~IdehfG%$fc;3_BC1aH1_mG4Ux-XXKLa2yoHT z@W127hYL&?CP_6Itc+&>R%Fy@o_Ts9t7HeIoGsNd@$`M$W7(wepdEsLLtDb47{YI= zis2e`r-a|<+Q#C>|Ab(QpBX16%jQtAN_n{0FbOnP*NCHbis$vCO>AxiD?J!YNoqEX zdzF6)e9!<{$I{&mA};`Ys{IAi+x*XY?IrtRFZ}X;frVZ}m;j{cICRu{N8A_gk|=#g zdWR}2b^wIn_!X+f(lOPq>%7DGV}913P+L5uX4U566b2B)tWE9ag%p_qXqI}OG-Ww4 z^=Z<^#->*2kY0szl{58W^OtXH@!839~XgG!ur5r8O$V&Jl1 z2Gn$c_IK38$!<~Eb-W=tG?J5&pl7^AV>i6VVaZ2?nv_f7GEqL!Iz+W}h>j+|bj=|a^dQ@Eqi%y61%yRX@Y_$P~kgx7qom){(ve%;J8PT)7| z(G#x|)zsn>Iy_tBMwRJwXl{!^EQ-;2uX)AM8ka@tsHf8UCJH;y$03}Q2hTi!W{>q? z+~$Wt!%u2kD1jADqQ~pBWwp4k{OP`=zWXOF(1WbzuX_yLWWEE+BBfRg;akv$f$@z5 zdO`Q0%e~B!06&0w&>(9HV!U_yT8^x ze+TcJ!2^!hdax{jG@%vjd;vY`^vkG9wzYfyWuUJ><0WSgt~Z9Jm~e6% zd$NnQRN>IW$Tbi2Z!picjU@YOnHDs1%}6gUc^Y*$@TCO!gsw)tftEFP|3zj4i{klM zcuCVL8L0v`ClQNU6z}d^_lLU+lgpzOY$RLt(!YyIC3Fm7aXvPZnT7#%HNcYHRI^H~ zf8fc%nyv~}gJxpM{Cb@KG*9r}z{o)2Vyd<5tqC_bd*RT!&<7vGh;`SWWn+cAX`2D{ z^BP0y#I6Twl^xbSOi!;g$7@ugoDiu@OO5M=0~cRS8} z8>BCe&S65Hd(M;}pg*%YZ7W?Z9eS&03>79hnr#uiKB*JVk^gi0A?F+i-)ga5)@(s5 z=+GtKc5o@)mg!&@k~vruSISiSZ16G$-Vdv*??FQT+(EEp&wAAKN!YpHI*^lnzm#8_ zST4}AKldHO;HIAKi6A!8?K}kfvyn&7kZ~kTC`tO>^^2Z|&F@;9oTgGvf-QsoS<*>b zl+HJ1bFLYLwFIPGYJESQ`Hc6288x8>A;sJ`e3tgKiqzj6C^aeOY&@Hiei$t_LBa7L z^7#$vp7Xi!0!UOf`we55u$bg8k!@|hY%nEWCmdx zy6(RecceDG!7twIwp3TOZ#31RM$QSE9BTSllb|Ia9JCA1c?cb=`p3Pgj4nDAv zjcr-E%Cv*F)Bxm(;_Kwl- z0J!+-qgunfIw@zHfLNvBpR&PaC>@G-(30ewhw|NR{XK2s)_1G9`-eF`+@rUJEYCAu zS-z!?!Si_gOq$?@h}O2Ucnpoaumxjt05%@hTeVxY7+B6*Wi`vUG!nuEH00t`)q#nq z((OW9!Ftd-QNf`q2J zl^HHDrD1SZHOck-dhp1Rs$h3|>wwsOxS_FZf&fu%8F$|iFa+kiIgU*k!ES+6{Q(4aDh;Y{uPi;YMZdz zfcb40r1r_Cta?OAP=z0%@+bJ>4YI%8&ZUQKk|En{|Az6Y^-qj^}IeS+oe*kyOxj~B-P@D7tL15iB9IOtk-%AwF zg<2bn)6F55=Hvb=W9FVJ0=~wy5_hG8d(=G$kuiBsi=Z`07A7;QeNctYg!r2w4Kk9O zIvU)S83I*M52~3gz3C^iB`JP1Z6ZF)r0+N5gLea`!^!L+Ft;^qg=aI2%;h0LL7KOp zKlLFA%05UGqYEi#{K+JknmWv_YV|SgS*W(a=QJPvgA#n~{edri&gJhQ_&7S*abTY6 zZ=m!eJhj3~h4`5FNdB`7z)Ji-A7n4y3$k+7a^L7o&h5LAo2F-w%oGi&7i+}kJmtgM z8`p@5Zmb~~JP5c+(acCb9*^5-0F~}!F2b5~?kGN<*Y7>9*0?lryai~z?td(8SnYwK zuvz1}7mtT)%hDL8o%ZL|F!mhwy}V@rUu$E+4;VR&esfi3+62=o4xfP`!#Th>7eTuG zP8}ew|342B6y|cpslQ^BCpD3dsnJ;ddQ8xuynwX#uxl zZyw%OsPUm-oX2qpUKLyfB63{P>e06wQg&IC}~jM!ky9D|6|R7 z-M>3kl%%DS^3(N?@8eE1e;`ir_veI}-n-Urd>O3>MAGCt&BgE2dzREMsg?~|L-0`NDozlWNyuL>p^uicMeB#8ng_Em4en&ErAN00xnhhMdbZ%z1oja5;K z0eQfPvD72K?{8xAm}lrdT4?H)XmMJ?{ug6k85ULBwJjweD$>#j(xo8MA&7KJx3r|x z(52E6lG4)M-H3Dy9Rm#AGjub=xAA?SJD%73KEC6a9~+q2!@go&Yn|&{=W48DIjOC6 z*~FMRmCo0x5+wAQ_>^(&_OgA#0(*DVKsa(s~KU=cSh}G|u+M+URDf~&Trr3GjK6SsG3)Xd5%TCah+58O#w+J$o)W%IP6rxB(dPIW(khRi*g zU7b5D#WJ}IEK)9BR`^puVTMAMbWB?vZHo7Ms0})B;C<@AQUt0)NnUgpP4EFlTu(|5 zAuzIHy(Xr)k2*Cr$u-lAmc3j${^G&FR)D0q;0otq1g zP|jrLHmxPUwfmmPoru0EH((P3gf+o~!$VHB&4c~@{iSBdBf=O6sQyQhv4%nTmoK+q zY><{v&F3ytb?AhsvX||*^D9H^htXLANat5hC&#P%+49NKfM~>|buhF9Aj2!Eo3heO z(!!FOgm{Zznp$3b|4s#!p_5B|nFY}CfF9~iRdTE&QktpaX85-gAVNa_%yjSZbeU_w zlu5BpojK5~^o1FiY~T4KRu{v1NkzyRAN#n}^3zhm-;97H1=8}N@-l;Issah3&OHU% zS`qt04Mqku*J

v0m+R79M#r8wuF(peoR&;f!EG##Qe?f;IgNjXXJWC(O8iMk^)* z1TbYErB}iS(~p5(g+1^q14|F!b`->kz#IF%wIF`{_;GXrD;Wifyk9@RF=C~r{s^#l zQ6T-MNwcoj4+HPrKG5C=4pYMTC#W27MB814% zR4G>G#h!m2`#ZY~rUy>w3Xk)2%qai3FY^p|*Up)Z~h=)Om9T8BMXzyg&vst+jy7-D;o z8)|B5z;1EOkq7QS`*(?)*CJCU=vrF9jm=i^VA1;ft9@}fK!H+_L%&IU#?&PDvHXko zW4Soi)0I*hQ@8eq#R({sa9r&rxByyf6tM|1UKT#|5xsqM$xo<+gF3PmIRk3Wx4K}9 z&2oeuK%6ORfQ^NPXB&a-;qZv{(iI799XnR;(Wk%e*8f~HpsqORXg}*0O0689hxRpq zwwVeiAlgg;K>O#nKu74!0q%LnTE_|gq#xL^u!lqwL^|F(6&~-a@IX^ zLAPidorYJQd?#sHiLrc_3MKIH$G`JLs5~DTu)BDSv1%7P7>NDw@wsEz2SUB|ZUbRp zy5M~gs(93D8)!m*O7XU-oI%OkoUlhyPfB4BVVARrow*P~KS+g1*8M#5cSZV|TWdf| zZ8mVzqQ+fN5!m*Tn+3-G`j7-KhR& zngWg1&Ssr9gW?{biHV9DFa+-Uk7By;d%nJp0i57#7?Qq%X5b`4+snI;vEY{hOkAQm zIs)VZAZZUaKYfKz+Myj3iJpQFPWwP<=$nTSV z_%Y!%JF{8A>`pkT`|~Wa1V{VfxM;AcsakqH6s)f5ws9CvrOWEw{GnVd>$y4wYts^J z?&-SSM-LE3-eaCqW)`W@vZYuQLK&9;`o2{O9R8U=X^PH3j$eDGRFtE8z3U+zKXa@0PHjz`T9yf4wAaQP0j7eY*<{XWpqB2BorRueY|j0sF(x(q*mH|0BT0 zrk&t=B+?2&T_Bu;ynarBk@*f8em4a=8Q|}K?}v(3>F!Jowyh82TkbBJq({5pL791mj$*ybS*d#Zy8EtY%k%8~aAO&fat~v}7BP+#xkARY*a9 z;7X}7zTwjB!oFX`Q2m&Zpc6QZ0rNa0R|DvGExyrJ|MrH%DwL9pOcvl3t2vJ~d%D*< zZsG!cH5FKavdH~(%Hzu`ikK4{Y36Ip?5|nHQx?JYzeYzAnbm?W#jaDroJGG`Knh_# z*T_5Y?YWrKlbg{~X2xec5pxs=pvcZwM*lrLw-ycy^$o)yb{9Feyh}>1fcmApYxB1h z6fBsQLIZG2`|_V1UHX04pM&<=Hvt&(SdFbdkuCAhPn^vt3ZlTtu0^yD^b%B_$8e7< z8#uc1ym{9_g0QUG%qBZHUKo{3Ju^A@3+q$SH&4dv%s~m`M;9o-Dxpd8>QInPF*yoN2z4Gc=h` zqV~0!%zpLB=#gS^hpT=1~gI7(?7np({YDTTZeu6fRC%sUBT1bqS~B_X3*Q(EqT z*5;OrNpE!8_Ds!Nz|CPt&y9KiOsU1ovuxfh#q|voy3!MsXVevDF{kxWm)rAd5QXW= zoSe1gn~c+@aS?0pZSdS0h0!>g#+}UP1#}?j>Iyi4L&S(VO=V9A=KysH=na7o@jj#V zOerg5IgREwM$&~ENl5De{nvyfojBjORs|-c>zUh;`Qo+iPI798MN>$xeLK~HEV2!4 z@nPqz(%Gs*Xhudxa*yD67xKDF3o1p+9m1(9v#jz~y&QS+I4ro6Ug2v_VamJ%OBDo~ z*7bH*G&=*g=L?fC>Lh(!r%;$mR!QeWYET^bT_5=BsKu4}wH9I-3}qmJAe=}p)X=NG zu}(T)+i9$rvFkDy&hX+z(-W1^@(#X#=^^NiwogWu9Zvd&-}^#MBhw+J<7O3B{bA&t zOTPJ&ss~snoqMbB)fLs+`m-~_B1S!qg?(O+mzS))-y3UJdOrXzklj_4jn}y!7U4vd zXa)1+0-I4E(LI+N4i??|Xp6~WIRMTnmn1q>*5URgikt}wRZ0~0pp@-+WKK%`{K~;qJ`1H(N$}5E- zJV_Sj{&FXLP=ykxoza@jk|8oI$;Y5tut09+ww)Urz@emMKbV_~t#62niVu} z?y>er`5EQ0mv_2k1pyy=HBa0-=q#p6okyGI7F;=nb)MX>fE%|3#hZfn)u4cCbZ59n zz?J~vmCR>+#}}V-2cw^Vn%*g1x)5S~!gL|@9Fb`sH?P0!v`iHCWzqWvL0J}uZM`t> zt3|XqU0<1Es(#jZ19)5o+N(qlzC~4eFKj=gT_u4&xqee7lVhSfn0poR>v&ao`pkM( zz0ysyZQexB+uWRCUx~O{R72yNI6;Hs=ruiL%BO4PC0YH8U1``!VFuMI8Q>QF*TH;y z^1x764u5(_hpKfu2AnX2cds7mEYA%H%yXUCoeEguTwbdB1F;UCb`dAavX=>|Wf}2L zt!0;qlyGfVV;gI}=sI4c>uZgM=0>aS)7D7_k5YqvYQ6^RN}}3iW-$wl7I=1K$374p z;)M7Kx}RhMuGLnx>)GDBcSZb0o%aD~i3o`G{VgU6rGOgT44{NfH3=o2%Ti6N%n;24 zoD_eiV?ptAem<3uRqN^JSm_w1Akw5hWqzLu%aS|Z5+n9N9GF$BzCy9yA1?+#Ws#a8 zQ**mGg*N&arvej5se)RM*XaVYj72oo48FsQtoB z<|>68_oUxu(X|B6l!b6&|EJIurQo3>23HXxU0m25?Et~7IfZC=1}MGX!^BM6fi@I~ z9XR{4PHzV>S}eba{r2n~@&iDZr-s=pDsn!hl0fImd3>Ug0sYp2GQ-n2f>I|4T36d> zFn)yHqcqcaT0;Oo9z9u{Pa{us%zS{D47uquheh1|KmOJo-%42#p{}Yb3C=<1P|vE) z>P|lOHEYboX69N{i*0E2ZpZnhTDH*os?s(V6^3B~&Wy$5OF$cbtWcAaNjWp{jEv7Qkc3OR^b5dM zPo%0f?!kSMPR8@W=lX0J+wghH;L@#FyuARV6-V;6Yoz_mnP&r@6q6 z&8sGMeN(R^;@IyQNlDnbx}LV!@jZ#RyoYUNJL@LqY++GMwbZQUP^)kE3$f?8*9q6z8Zz0mrh1Udeyj+dj}dx20MTYd zYdz)U@aJJnJp*Mo#N^KsX5@zLxJzX>!Ee~Vi^f{(P^oEX{5tr$$j%@zU~#LygVt5N zd-14LzBP%@xXCs+Iy=*ZOFN~4r+s~;oTq*1NrTU%zZIlBhy(Qe3EET8gTaX{78U^d zNsx|ac-0j~PE)-V!{!TExqNbeKo}*vF9&ot=`G@(%fzLJZ7D(VeZZZUby@_-qI22r zb+t|gH0m0SyNEfBAFUTw&|U&(|HyG-4TFM+{PVeLsCPVxGY|$H>C`D#zTZ~~RxMog zo^h8ORhvw@efYD4x9n ze9$c5djol=f)iP%h*&fhB6*?jhX4qD1b{fkY4^`z^WHa}_599yXg)xQBvMaZYCGxG zn<)IP8L`OUg;3Mm+!@Z!Sq6n`s}!rnyFJ)3$rmiiu&-K|trDmCjh(uKG)UF@kx#p@ zVwDe_Z~}5CLNK4&M?@O;gS%bwyUgf4_x&Yt5d=-`vgdlW_BXH%sRRzoyga)EXHq(yii*|C9o2cId2X$@ zaW6v|*Et`Ed%IRQfJ(0du5osjA!y*Q-ryl@;eb5CBLyPn*X-lP`POVB<=AFZ1B1e5 zY(tg2!nxZdV}8rzyQi7%DpI{?omEeMhZ|8Dq_&E6L=`}cG6pJ_A!qk3 z-HWU1LN$qwfSz$Geoi}GcYT0t*veBNb-;ksguQ~F%=bR3Y!7BZv=8SKt*^2f&nJjs z(Tp@SJX+_hDAlX8H+d3P(RT{S5vpQnHj-AmgolM?Fa8X`(`L6{I2`9K?Vu}3^@{^A zRlV9&A_AKelq4wpV=ctK5EG__brP**^Rk7dZGeBU#sf;&t5`^we07TJc_o!jdK4@8 zMtKc4Ex#0O-Su$uhf-wQhOce@w)ce-sjAHdofG9rJQJuO<|VG~%nKYY4+@WXH*!YX zEK}eE1V(phQ1v1Q2<(ox0Xt{@%rofZF>w(~I~qMtD^#9G(kC=qGKe1K z*yd(lC9P89)~mI*-zR=KVF1j)7}tS}j7KqBi$Qd38}LYZpzZ0U(}r218&*i^JCs=* zDXPHVpyPkw9f)yteI=kazoYzcrAN#vugBNg`8-hNg<2YL_YORJG=d(wu-@_3%{tm z-GHdiq|Q=biaLorc@w7YC4V}BJF*`{4mg&1Xon4Q%-6%2-OdNz@xAh#Afm~e8TxEQ z)7XU9ITS{_erEmw`=Bq?Cjs(@qs2;%8Y3w_93~@=y)6Dw#sLQ?5 zRvzsm$~fK0#a_y-+g3ypFygag&G@$SzC{ZFV#pkMoF8C@Qwi(>tw29NYI59E&e!IC zw`)4V2}ogb4kV43j^!$4cR_0HXq+~Ne5dSn{Q;fUIHB_uyxU>?P1fg+^jU-~a-o=P zGN0=9!1U)A(W+Qq_jr(-gNt!VZzOAW41_7`NeCqT4RsS&Ov}xVvNxtA2p#Cm1lT*a zOKn!m%F42#LVIziGqx>q;6{b99KZ_>Cf(3TcxjygwY!JUAmYCbu^x;$-JQ?jGy`LB z69A~M+KfXYphz6#K&COfbU3ORak^htNf(1M+u$4yP7#!8q?2HKW=u(ZNCxO(eudzN z{5bDXY&o9LZZ9s~Ml0g!RcmG>o*J33S`gN=s$%=3q+!OrLI#JT=-BR__NC?Nsa%gI z&-Ob828X33ey!5wftGW>>#>G0JC3rV#}}ubfeIe|X2+lv5YfbeDw9wY{5oastDti!8Uj|* zQf%YL+FYVjC+eXVmg5JozeWRbXdL_W$_)K>DBbF%jeisG{b6_RpvvN%0XK1c;|;fV z8_AT)Mr6Dj!9X=WNrMwNz0NtTF^Zbt4Jzsy@1=1&GucZIO2ga?2!3F8x5LYR>aZ?X zc}|v)Mqbz@BuvMfsyC6~G$UozL922R*=u+)M3|bPQjze5O_!N!K=KW-L~At{@Fd4$I*{) z;p<$jrQW9aD^G)y(-1NuyDi*^N!89UwdIYAgZ;6`$J*Ctk666M_p8 z=6Tr1nIl#2PuccyUF^5(YC$OTMV=?Xp(LOHv80u$?FX z@C_11ZEjYx6_@x)U*`oR2eT2S4FpBZE`$>RE+Bh-eVyG^Ixz%ffh?~S*MKCmuKqQ6 z7g7;OB3rUrI09ZxXH@3i$A@N|{g-_+T!nhzv! zsQ`sCRrh~~DgP14?!86^hW{WXbLWYX+3qMqArJ^gO!%X1tV_Fk|>gboG zkwatfh?g_x=7gQ;4$^*vwB^GHRg`u|kK&7RZ`|;9FFy4*@5*dg);r-sUeI{=Ee^=i z)jFq=3KLbvqshT}FG*ibkHC)OrE6kyw@+00>9wENTM>v%Q=6*YiwU&cb^n5+$#H~5 zJOtG{=WN*$pXN;NVezwAQuLRHodIRx%mp>rKYpIQbN{OXIr0Gl28Z=^S&#|1L;zkci{8`% z6hH|SFyJ0xu8N8g&rtjD^NHl)lRxxclNSxv>b<-$3KhNk617kzFY=j+ zZ0K=3q=Xf4r~M{VnsYXaF&i*X4zt!N8U(y^bGu2w;yuglF^|HpvX8gT2qbJFvW9lpi zlEjP>iIa(mNW0&Pyb4TAIOFwADl5S zsqd5=?k)b@ikx#@C%*vbjXCu->dX;;th3)3I>{sDW6W4 z`3Pv?B%Oab$Z;_FE-Bu9a|{mR7j=*s9-ebFyTE2zl z_K@?6p~D}TOBc~sdXDsqE)H%PRu4(`l7!EhccyuD6v0L}SIS>F6j00fL4@7tOvhw9 z%$63H%e3{U&r;lk6QlTqqr{^1>xpDo)%CI+YVfffI%qv!u6}T;R@5&p(_zoRK$sjF zd0C@V8yZFu0kI%x?ejRS>91y?mW(OkMISpX~6gOPc6JuA9<_c^l37z zgAVbrYzQ+U(usr5cXjB{%Vr#iMG2mJ7mdW}W*vUhByirUx@Ve3(yO`JsVMY}W>vkg z{yd7t=%a_gfuGKiUFdAIgk2pNwun90XBHn&#maCXoZQI#+FB29kh4gS%0&I5Vq?(c z@*T|cVW?N_Yio6VocAUSZgP-{%RM2UJ+gAyPF;K)UD63Awxv^@Fr|=c-7neQ-%x*e z1Vmn+%CiuF7ny|wC<3f{=jvEQ?8pb(uA@G`peg1=#b{ADP<=K}lr&JmF&e2WRV!9X zS6IR9QmrF#2T2|GF%TKc7n%~mSxRL>KLql|n%y=qi7$`SKn`&T zb=^m}|K=qx(4j^mE-L}A^#y>x$L&(r!5mK7G{|mL9N5l+0t4C%lXbfs3}A=y;YF=g z4i$~x>~j*g(?fv z#|Sz+i*-n(ydMj1@r`RoL)tcIv4I-v2k7>fn8%}B{E)T9w(1G1O-Em zF2DLlCB-c@vna?XQBBh4XlqrJEXcrFc~%#^?exY0-M^_42;ny0vn-yB)aAqKZr37K2R?tgEu1 zhulU>pSgG&;+*j{a$~nimx6%EUb`mAvP#ulcJJ;fP71f0gyDsTMo)lnwTM`RvJI;L}v0Xz+od=Z&~kt zcdlk2U5MuJ#|izL3lTS}RthKeVjU^K_u72dP%n7bSibaGoSzo)+k4=B%;{Py>ZLvs z=e_!Sv~NXt`E~CSj#WLOQwz&yt&(kN+GiTDivCdCP-w!Ngysw?)AK&}&TqXIDI;vj zsZBYRc_um)4*$gV#Z`7vq75r5lUa*?NLWTB z66X3(hs1~^Vsn=m9g9OmFW=OK@T5^TFTv?C*_5EisG%3NC&v;}8SHAs|8;o}`HxV9 zCs3LS@m#%RAYBi1C9*_H~d3qQGot^7fTRk(Z#kd3X-@0EdY z(rFDjK(Nj^La=kf=RHJ@W3Re;jM3(&rck}l%AIv|+Gen<7OmO4K_}DSAEJ)%BQ0N< zgi16X)PKzYd9;*up;eq_Wq=wV3K{8ImVxy!0GUzave;;yc_=V7VxP-ZsHs}wz}MG3 zEtFx+IR$D(=>VTkBdG*n+LiIRdkWa33VE6ztn?6`eoA`VSzIf;GWD-$R4dLcZ_dcw zOjwu;;So3aZta}0#>h4ufDx+@0u%5?)0uePRAR4(PgPTTZHBZgxS4k(EaefyJAKbz z0LK{-3-Ny>GcBke;1 z>5ydf{W1G-lgOPZVMVaW`lB-qS$4{AL40viN7rtr?;1-*2h(fKQ)hVCtMn78S}XWn zSqmC_I9FoJ&M{i@aTo~=3i4*O4+PP!xxK=}Q5^tfD0JQ& z=`eNRT(&cl*fwtZQiXT1TJmZ!CoYhhv0lqEXZz=7?G(?!{&>p+6RTeYeFw-z_X#Cw z%Hi;GlvE>GB_1p!gqD(0hMvyr+(JV~L1I#DqVHQ*4Iyy1qN=YZaN=d4xIX=K&Ru~} zvrtBjheQG3@}F>a0{C49AgeH))HO9X>{8y80Rn6rDU&mH5nKaF1NYgY$3we-zzX1+EV42>-4Sbp zyMS%F`XsSnGQIO?oRb4^m;Ue;@7-nuo8RZpia>&N?=!8h6k$$)2vI??O)AbyI`{=-qA!^bx_ibO3KCuhk7N#dzBv%t$Fz|WB? zdLW3ucY4TWLyx zAn(W$)DGLlZc!Lq9cX_$^uk2HC8;S#qYYh|b9L5%@iS+5$@Q1aeRrA-gSfWzZR;%C zaq*jxk+?%8F7eOaMJk~Ex?+v8@L8`jW!QzVp zzdIK?Wh!QqJJXMj_i#Fx^=Rd-CA)heeRIWZjtHr^u?By76YwhMsIA?Sjx;pRr$Isq%jcr>B= zioodKkTPH!h#(^oZbDR>suTzO#f55C4=0L(5bO*)%O(q)YnsN~cR|*>@P%zS5u*A>xz?*jebi9{_|u1 zb8C@=r+4fG<@D67dZRF0TwSxK?4uky-K2}zd8ta9R}^L7C|*G`W;& zSp_^s9pOZ(IF*vFi8}0=8s?JT1lbD_8+p1Rht+nw5yRZI*Ocp%lN35*nMdL>9r<}v zzZ@5o_476)uij@s`p*;n4ncvTCS7@q0Ee1h=Y)EUM#9MUiU)_hPSA05#f)YuT2_ik zYrnJa%-O}+*~ZDw*v~reSSGoYIZRm|hW36+!P2DDf!Wq3uK=K%sW!5R3!LbE2k)k>X$a2 zn4bNxhejsS$4)iGRnDX6E6kEj{8t_+q8c-r@$DPvLbEPn?8R@Q=M?epM8^k(Xhv1H zJK7+?rzV+C<-cdeXA~2kNlTLa5Kt?(&`P`|UJnf^TH}oTqZNqAM>VKR$v=C}`!~y& zDU9rTxSG}4=_I!ggJD2U8fS_S(%0J#d&_`Rdn6pMXOL{o(Z4MCZ zYEpbwY8eCakjke!(A)ZfOrSmw#teOm4z{s>QhNU{-<5+JEUf#5?|9?&D7_pJD)3pc zPj?@r19Z2nx71TF|2BPp+7_P%ybsKMPX1ih(=UQULh#xu8-7;QGy~15A_2bL%nnM@ zwyxmVU)ND4s&CER({#h&ZnmGdyiCALuXI?K4-O2B==jaD|KDk9WyLr*4&s*CwAARSCt$2eb_j4^Qx0O0Qng z?e+n*z4sP)QJtV5uBUn@5w7mr#-xCWzo?$&GY&5=fLppg&TF*^@cY+bV4s<8$F?5*V-BHxVLV;9?6xyyR?>*aWm$-G`E4dwQ#WN z-KE6#t}Y36oBj{{>i(6~m*u$+eW5Y)(VxqpF%Cdsk*xKc6936s>yLiV_b1m}qw;JN zu&AfFh7~RJChU7rPg@lfL{lA(=T)z)ewkJucbu~|LU$l$8mZ3fs-BJy3kxx z{q4X1wbfkMkYL${DGij)+Po>kuo6iN!4~f_7n_SK$2QJTM2)Rdtk3L)-;;F^>KR{; zV#aGK$gSOBmN|6f4JK_LapDabO(D}bTjeI7b)sB=_sp{ulK;8iM(+9oMNnxSEo|vv zTc>P`s(j@gkxTj));gMSDvOpf3-sLRFCk$;y++?KS&@SORr{?S%wE? zL0h8Cakj%OI}&g)Had>m<7mkC?Ev}?_npPbVG~DrK_319nm2zvFFrim>heO|HiA=Nk)2)2bd^yUqQw=o^$ITyyx&Cyx%OSzkbv0HI*k016hPB z&Qt+j>hi!7p_3=NHwX;q#i@W`aJYj0vsvSs@TFz^>-CL`eVX3mK!SqaWL7*@?}EDo zy)j%p1xKq6{Z&u&>Po}$J5q#CK9;36RTt+PE!No1H@8*srIu{x`LY%?IPdOMBkruz zBTPGlnx(cw+fI`*5IsswWWsr;B^ijMu8xbR&~^^&LdHb#$|BLQQ1FMZ=Vwf@WB zoL7H71QjDHnV`3$3j2E7p{|rDC4GQC!~^nPYS3cLy!Yk%r*fxl>~H%AV9`w?PkIyR zC518V*=Qa)T`@tgclCq4&lHn?&h&-EGEL@KUBe$9^FpNDy)~+IPL=O+9ezd0R=eM~ z75xMo+_RoUa{4u%H)@3LyrE`}lhGmZy`^XDN?bxbC!=*_3ocABilZjJdcsV~EAM~a z#$5KFw-HAFo9pefj`QqL_64JLk{s3TZDGB~kj#M#EUWt{e}KFmJ6ufkY=6(pu+b z8$nnaRXcyr3@*n?yZJQ$8z5SDP1^prs#GI{& ztqQwzJm-dBh$Y&R_WIyGEO;$C_6SoL{WOuoJ1nQQ_K)>dM0nq9bC}RJOO!& zBTUW|^TYmuWb9U>6ik*1o!{q7u`>G5pB}E`8$N1-413OJ25i&c*L*D zPK{VDTKDwvkJ4X9?2qdRJq&>;VS3Nm^2W2z39V##0N#tl!?5tsED1Y1{1T59 zy)hm>e+t3i$?Zukx{Pq{aSy&g3qkT+kBh)#ZiS>fw|YmmnH4-V$vo&PDSR-2`)f-@Mm&Cpkvov87=ohZ(%f?gJS zS?m%g)(Cb+dV5!ADJFFT{*0%`cw-G6>t`3;xwy|Fy9F3W(yks!)L**jFqXrkUtp~$ zGfb{qPeQa(n-0D-&#A!jsKgbqGq*Y%u#^PFn3|v6mdCr|0CF3&Q52*N6}4<>A1j{v z{wtXO$BIb#;!_hBSK1pJ3)v5+zzKl|+=OH<$JvhVGuLd|(l+D2-(-UFaE2_{^>#tU zkZxF1nDoOwD|~p!q}{3?xY_?vtkCd$=a?s1%YA=U-24+NLb6WeF7Hw$o*&P>CSI?(;zXEM454z-K$%jOKp*?IoDz z5cs)#Rt;VVGVLm>dKmpLNDB~?nx-=ISj@DAwyg+Tpw5Y#&`jg9m|;y)BU7KokHE8MBHc=HiQf*=H}+I zcesJ<3+K!`&xw|@zZ`0px>eVnul~Ntp84{uT6M_~u`8XBL3)H+=QGlhct9WT;WLSN zL9UYO!_l!R6g$5V%Z2s`ct%%nzU7~7c1kp)?2Png^Ij!Wux;qGi{qNneaiH|J{=VS z#-HaRnk@6>jlz(t;UF?yc4?lOjx%R}hlQkqGCy;wCowCedvpZCj_!XZx%_Eve^nA7v^vOmq z{Ck7*&PYAflF_U<=_OIcPFL#YmDg^_1<1DX%}-$pn~I|9kA5>(4JV_;)(w77db$-P z%nO`0g-@@=@w#O3O}6K)6y&tIhR6G4ORAgNFAjhL&7?xdq@}gA!x%MCr^hs+a@rQaeYw9VA zC+8xmRw+n`z%xI$>7A|~x82cGttP9gZ)R85`iu-b-CIcw)!XT5f&ofV>WM=I^$qd8E0h)lw@DE77b z#OVBT^$6ChU!+P~S7@G`WU+*c(4h^ciNn~y|YouF4vV%%uxc&)V z01`9Q05+N>X+-gtU;bq+NgAOtspU!X*T5ZX3#dDS6@A;z8QsBT(K2?hl(%*$DKusv z9Lo*|-l0I}HzUpSr#*#iHo7&TY&YOi=Z%%Js7kx-HvqJG*kE{NC8J{ob0_97?xu>W z=MiGl8!!V$YRV-_U3#AJ9Pf-tOh$KjFmAw@Uc|r3xXX(_c_VnrZmsm$@u3&wawCh7 zJ?JPsK194t)K^phgkx%N&sveZd}gy8bm@J)v!l{{Rvt~dX+R6gD=6~&Hk z|FBg2F=vBo#cQZ-l@Q$1!+k}GO4Q*6>w~edkJIriwlkSM&i)9W_1)Esx z(?Hne!H>O~W429A)EdGsdUyKwG+x@q^Qo-Y=n-9pQ}W6OV3QH9BDo^xzJZcZCy%o; zJHdy)n##(5kZ4iaM;+RQdRcf|x3O!hEj4q@HhR6K^9vlDoPAtrUFlh2GH7bW6owUn zVAX1?cE8xo2CE>N1&^%l$R|s*6r#H1(>`=8w^kczCm%E^(ZBjT!~lO(dfkG?Pp*je z5AIS#GbT%Ng9-t7AmR)TwR@OY7IOj<i0Z11zwTl$^SR>TdRVG9urbjnhQ9Vd)& zfzsRvw(zjvEQPj0wLR%6SZ7&Zq>SiBx3z1=FYvnLhXPRE1PO$k?1s9lXF}{#O1J|I z6^JtK%E3a8&NC)i#G4snj(EDr)5YJMs|8C8J;p4#Z zy{P8Fe|NT}+>{UQGUWhVHE?!e&vEXJ@*xKwiVl?i-4>B{qypC$PtP zcQz{kn{uSmPG1SI%BnxU4NC0Jl_Y}P3N}lGZ?ss~D>fX^*RpB6$(wVYL*m3gU{?#2r#1ScfKA^l5}zE{{Sq#B{94r+mJ z+~XQOaVboy-7AtyQ`UQ3=lhA&_AJDn{^0}8`rkbS%4mYpUSh3fC}2R%<0NJ&WxU?e%0If3(^U5uB7-;_VEaS;C? zu^6ZMT0)}1up#dy9S}6T0jw132|z%=evFOr|F!qsaZPR8x?3!$sHlLrMPVx{D$Poy z#)jAs5h+p6<#JN@a zJz*Q*E}wGViF+%YQ@?9w&U3pumW%R)JKybT2#H+YxZyr4H7ure|M=y8hi8AIAd_DHacJpp4v;KHO8wFrH@Z=#@-S)9gVnYxE*?9v-vWz zh=LS<;a$`F((ZRdr1SGv$7@!4i^h_LUH6+u%61bo>OMT$mr|T#E-X7IvMJsUS$;t| z>Ya5pHlPdyQQZB>@mtw$VE*}ExW5Zm0tvXJkWc*_?Dl7pId z+B)`0;3_IAG6uE*9pfi2y}o`LND;3M;7`_=Rip8Gya%EM5KHSm13D>t)~eJT#Cxr z`kpq*`vN()!#4hteEmZTkCbBK+IFfrF7YXRQIQT1wF@KcaZZ;``JDe*iIXaBx3u>y-OBrZ|ICeLUUnk- zpkt-{s&;Cih6);IZEZq(8$JELk{Cf^M$3ggo~yV9x}hk#J@RaNt?gU>?lL)S+Mx(}*t`=?|QjdORO*r-O$*aQ$r%#{JJEQ`< zY^>ur(2Fh^D0y$3?X_lyPdc`Wki!GGWu)GFCXYi(+M=GHGO&M{WLj|sCh!cUY_52ekE$f6G25Wm;Q1pGjJ~J`7P^uTRyNFr_!{#aD-BS(67_UxD9q4 zdKE<5pR3Jjs0UtBudjkvijnc(c{prjb9zvAEMA}jbR{U1)~nN(cAOk#d&Zh3Ah#nm12?krB~7T zl|f@(ls}!24Ug4{+wMKRd;$}gFlo$#BH_t z(IOIB8GT{8{Se|p#gW?4nCjmzKfbDeFT3-yKV|S2v7p6KcCgstAmIw0^-RyU(PW<$ zeH4|s)Fj~JKT7%`#sZ1+RboH<$$AMZX~Rc6kN33C4O7CZoPzo=*H>#6|U{zZ1(nE-Z!?!e39d~W&7a(zjFeJs66G0?~^%PI?1(L9+bRWr< zfAvV+8r2t9m?tk4Uj6k--y2Vx-cgl?YZUkPq8wrxwT@EA3{frTWbxkUOVsu6f>o5i z+y%`;tbWP7xGUT8^_hffwQApjSK;sa8iQWY@9C8usK+PjVUiZZvgNEot^BSW>1wl3 z*dmi{#*`4>ABnkiK0!vE>EC_CF4!Q1wq;kDTcIxyL@>5z(z)w=mqQ(CHo!CqB^9kwxn7rRKYp{xG7bdmvNpTlUIVH+oARLKk!oo^+&Ls~ zQ>LdHbZ@{SnO)X*AknL1$i zF)Mq6y|Y~5LBd_JU}om!!pN9sSEKAk9=VarRxMpAyM05uk-YZlnl%E|Fz5FLn_AM+ z&qO`74F@fb4>z9gPZ*u59Iy85g`Qb~bGonR(tS8i2fiQZ_jjMm8aWQ6rc;4jdd8Cu zXa2_srQj}~LQ7be#w6I%KSwK!sq#E8SAOl<-@4zp+v$1RgiflIyF($mr#Gq z5Psbr_^o^G+fMXc<@k>&Y}jt`6xUd$9A%;s3DAp*LRjY}#CFx???b1m@f_+jvA!J+ zHtSG+%G)lQM@N1F0u9wTlpieN_EAVC!qC{;a$P1~$ID7hj#(#WAk<0jipjc`>8R(3 z{bL*VZgjR{XoqQOw(dtn+kk3_hLlx+ooN~>NdIdufQH+Y%C-)oDtfDRv5}V6M@v~_ zybKj#Z9rG1&_9SXoLK+cCa|zE`Q}+uTahaj6*`R0 zm3NK@A_@s(bhRi-8_{Go(u8z?MB8rFJvZ3eSMoCIL>GVrWKdpHwX8m}HhvCD%9wZ% zwMlv&5P8Rag$4JG8*S#iTzD|l>tip_I4nKfe)BEB1ij%V)OFnO1A0g9-nsK6;X&{e z(62(mHNmDr1T>?RPEU|}A81oyU|`S;n6B9tPstDP=9FBZVCt1HH+Qcuz#<9e!HxTG zf-5`f)s{yuULeYoKh{^Is(qT*XqiOsDu9}^t=iA0(?UmW`K8 z(5DeGYS40(ml`=e+v6{!$|jjMcUH_gb>?Kesoz64a&5+jygutZh~ZR9!5G=SwZPNq zM$T%T^FWzF%Qm=1sim!=>uuuJ0&*-PwUzZ&{FOszO=zZn0mF5`Y)IAA!Y3 zBbtH1!QfLly3H9tTi=5KYozwEqBzjP=A^|-)E4dN*>0G^h_}(y%|Ii=HE_nzItqpv zI|g(zpKTP`2)t$d4A4$%-4tyGXlXF{($;Vxz0$kHOvUnOC#r3VeWe;`8t+14E04R} zoOp4R#Z2F@3fUTaE~h>BrTLtiT-)*Kl0W~0 zYpacJ7!lflercJ^EE({3;Q@VjPnGp9qDwvnfVK^Tn9CK>7CstMkX6|`-%~Va4qSS* zJ?0rZx3^Ras4IOQh<0=)7Tat|n`y3Y^>7+HekfaJy!F`efhXh#tcy3!ZgPl-Cb`oo zSqZAvUG7HHzf@*vw!E-9{;pHO-==LG;y4*w=J2ra*ZC}_N86U@yiqNWULt`58nH-i z7z4Qd&1Sl;TCm=7kI|OhTJGdkvrf|QK|$3|4mq?u1>)W?kGFSM-^ZWD0qyvL|M=sc zdx0KM(!2G8`(j9htIe2NlKvE+>n{e1Eq&L1buUP`sKLeGHTU4W=;MpBxGg32Ec{r; zt@~>;Gy4zJk}i+5Qq9ueYz@vT%)C(YyS^rxQg7A|4HuygyIU)3Fk^=q{_NbhRzUok ze0JuTem4HfzN$enFR=Ps< zoMbS%?hwZkT71>y;8BF;vAZkr=8+$-*93=c*twymtb%d(wxYWDp7h*;4Gh^cIvATa zKin*(!?*MNI4wjzmaw_&E?NLoCv2gC4AT8UrMzn8u2;7P?N6qGZV2?r$y@EvLd=!{A(Z8I!!$!x!f`) z;a!RW>XxvG*_vYrF`UTVB{5@h96)~>wnvyBTx%S z3MWIUqpo~B-*kYs_06wJ%?@5B{0C1<@SRodGzAs(4)`4^EQqyuDcu;WVTvYbuio0Ge z9T|n$mvPZ?^Oav{w~Nw)Mr#crPO2-j%(RVH*q#&(DQNTNlj|yRo{%DO zyo<$=1pk{}J9{V}c+XMgbVd7#W+EqINPNqFu48rGV8Bp-T^j$;xU0@M9Jx8Y=ZT2T z5EcouX(wR^`Beb_i)2(#GueJkA)>)w6x$O{G4f|cjGPzI&+n#O+9Iytd%?H&G=BUL zrJmkyjhvB^Phyx;Y9>2nu`XdC0*+Zve;xzoJs*C_+s(J}rXs_RxXhSQsID#;VI(L>us^jf|QXL5yrUpC6AU(fkKygHF<uIr+CdT| z8@6sjAoTNyr6~W$zbq14vfK$H=E+-23RsMxPn*ASD6i0VAXW?COc&tx;#>#c3-^pB zsH&)V;Cf%u92?wwEe|{K&r=|Mtd&IHj% z=T8qea_Jo+n%%DkPdB+JiR6}^+nR^YQ=|n?W8y{C# zkiaz&bDK!=Et6)Plu;|~OJ3c`E?uB)cqov!jed{|&+XM$*%EHMhu(`Bx}j~=oS<_XO|qWKI^8V-&R z@pC2$+PwWJg9-e)7||i!sdYx_&ygj$+XpJKU2BQ4c-6$D5d4_AHS((0&c0 zJCHAn zr}SDAUcQnMWRpa#U7-<0|XtmTwpgBYNZ`WwAYNf!IZ0xoGkGX`T+TmD#JS$ht z_<-vdlFgHv%2-3qy`KdyIebqhCzF9JWA1!VzG3rs5o=4X^mkstBUYT-jtk{>;`cdE$F z138eZ6U#3fo0J*XrsZFIrYSPx69f9`D{;>tr-r2wqD8ov^I&FvB#39OwPt2~vDuh1 za-fVUHP}`^yk}=36DDfm1k6A^k?Y-X@jch8>lyygA<*g}qgWzsx+u)WVUPzFR?yxH z&$u@v>TrHVwg}$w2OkDF-9Q`YvI0jQY4>a$?<@&OA>SscW5!&XC~Ig<6NVB|LH2)X zwb`9{!M3c^8easzJy`TCFkf|TU-{BoYFoc(y{`h;b^nNWke`IPA4^59VymR}=7c zGXC1Jfo4k!yXvR1JQyF9ib7NK8pRR}%3kdF_EQTS*z(prfm&OubjPKprbdE$5=CZO zwS?@*%+5A)?1kHJRxsJAJ2z>5tp zx>8YbVw(qQ$=?wp!$34VCWuM7NOLvD#)r@rd^P5$-l_h%Vi(QZvQ zKD;&8b4eT5{AfJ!I`wx2+{Y!>h70{fptDIObugj_#&1hQ*H3+<$(l?3<2`@;q*7~b zZb`)&+I7$u=j8h@|9I7`EbWv6MjVUITxcGCxYl;d@^)*excBS$pG`u3d z*4AUuKwXOU3w7CT7;DVAJ4c{T`7@(MEPQly^hFy8qP+gor{`&?xfFiF$x+4tw~&#HzSoGEBS=>V=#xp0KJ9gMjAbi{X(*xE>4SO{BZoz%f{J$vMp_E=)>>6R>lKowUa zuY6>|x&Bex$cSkWlx;7~NnW7+ip>*6U?Y+L$W`xRCZ-tT`_7z=Zu|TmP}7xhQXlYv z6cKT%l*vdaCe!4lwjHsp6-$yzU*&RGY)M=G9m=~y0qiH67m1l{*yw+r-)$s&5fRXM z^iHM65_0;12YEN@tdeKNS*-O-0Z>~XXT+`FVC180Va0bvdFi+2Su-0=6irV_{BulwzSa-Zg_=u={g#Hgn#-3PQv^Lj3cEpb>np!tgD zdzIo@-K>pK*IK^z$-_JKQzM7vg6AIh(#mc>I>&?W>~$_3A^PA!u&Idis$e*dyiqnt z_eKsX)j#Kps1K`ys}F*}MOQ7Gg`Ev`5&LodWoa4A!v3PqmyyaMlf04fdN}6;j#tps zn2+nO8F*ufhUV44DpZ|I4theSt+1T&?qB=V%B)*EEO2N0HC?kmsS}09A)N4wr!z(d z|5U)Oy~U*mCE&2SqTnjqF+C-k4nN`WlLJ1zEwHpIoKg*(uQ8&IZ{{zfXOoy|fT9|A zTaEi@U3duh5~*}^DPH}p;}xqiWC-ji?!BO^=-vS~?3DC1lbAGk3A>4jMe%&5e8iz3E{`)w9uZV40BK-SK!#x3@f|l%VX326i%#RpN78;XnFOyW9=vP z?fClaa{DAqPrkiLC-5Gr7cRb@hCD}YJm1&*X#ew!12{1&OeMy^_^|zG(?NCbia5MF zM{_c=p^U~1TMXl&gzf{DvCu`{Pi+y4H!e;$w232=a?ITYtIuI?J0O-mggnbQ+g#rL zidR(ArF)<9Zl~z{ka>RCpHs5ytJ_;r?IE?i8f9NNY>^l^n*@yZ&`8Yk%8&zjD}| zRx>;~s34lz*l}^i+FcSY0iDSS;PKZS!<`K%s8nFOm&RiuqoIzz7b|$br4+hOgs$m+ z`Y~0RY=Kl}v}iCgcUbNVB!=n~gi|}z!AfLIULCy@ufApoUB8j7>8i_XRm)kng1_mv z>_=~4OB0qE@gtd$)a)V9zQ=-8u6nFC)*HrUxT2)cofotSZ!6&0dxWP?7R_U zVOuX=R!F0dPq zRpO+I!pa)Me>?JYN1)I7@`05YrNR?0<;235k_W5##aDqiw`?RH>3%g;k)k^hDCxv1 z$Onr!_~z9vxeTR=tKFsb{jLlKKi=OthqId4+!0H>6~j-vx08AhYJ>1!&3z1#K1Jc3-Unrl>@{WJa_6-%AWJu z>hHrW4q;xPXHV@T8_T38$`0yh#hJGl&7RuGd=c8*5H`8ZtX_F^?$iVLrD}Uhwa>at zvdq-nsrNcB2j+g$lo`sMJ9SO|{}1P%oE?Xu`WSFWA~7REd=H^!`!PrOX+>N`b>HPVy$YkkF8FW~0K=Lyr}5bKf*ZjDRhYjZ z1~*(A^ndpH5=$zKq;MFosQuk*S=c$O)RF~U$OhRv@qn5APyN{`z%ihPUtjn(-@f|) z(*2k~SI9lrRaI3z>+%G60HNKT1hXTxs~96?kAF@p-{J8+dny} z(rh`Lur^m1t*WD^qsK*s3S($Q zx}U!f@W)b+BppCW0)Hq*N-}gB_{zl0Fj&6UPF`X5k7S^aE4M_UIN@@jX@ikjxbb=U z3J?fn4!GcPyW2?oUFhSx%Jg%j?91UmxI*a zDj1>eOS_G49ld-F8Ffq_%<@ zPcq;neZUOwvv@6Bb|@Nu4eP_-;5@g+($pQbrjLoYenq6%xMfo)SK=+ylh9!rNzkw1 z{0CV6DqN#lNgI=yYnUaA$Lq`C)q(Y{_Py_BIs~%33m9c2X*cs`;0+ZRM)+T`wzSera(#Nn(@%f*UR_gCd+F(__f;xqd4Id(+0Bn zhF>wCU^*B#4czMwJ~t49@Tfdal>U2VrW`&qp&p~M9}-LSfsmXKHy%&c^4~M}*fEvE z9+;E=!&~;6@yQf2d8Ip^zP&a=ogn+CR&W}eQ`*td(Z+Sua%t89I1F#a0jI&bdCu=_ z9#mA8mi)Q}Vt56@V?_#_Db;28xYC35bpck$pf`0Cz%O+(oU;r@RQ3PS>oILyW;`{B zo!~x)e*Y(ey7yQmnY&!t^k*Bx_JZx6cq&Jc>&V%Q`$*e@;O~@rGUh-<5wD9y*f?6ZnP@Ks3|mnSuJLEsh}_yIPz#G) z=iFyCYi~K+{aJ6yIX>(pg z`c#Zes5lPTL36SLsMBX%C_Z;XklkF^Qjv{2+D~%-AHLqP3Z@6UB`I(G@Nm0^kNo&L z@vf7JMNzVX6Ppj2<%CV&Gld%yXQ7gPT=g0N`*2Z@nZq=qYwo+{8$95?Y@fqXfaZkd z#T9cf2n+hKQT|moL~ZUaDkv$^oZtLv?qrXTC*1Xa7h(gzIi7>9r0RbMjd{m7Z2WNg zo-tHr?y=g!&0}NGzUb1AT|)||387iQvJIV7%|B2?zUzT!#NRyCC{v( zr$(g#Sb++_WmZ(oNULL+b`dJsc|qK!I^v#15zS+IJg zI+`h`@$t~ye$>mx{f}OcwWQ)~fJC~ z6w=v{C(Hs{d&RhWaHl^8U{ni zfqLOHCtq$oV%rKhR+vrM59gk=(nrkBf3m?;?_ z{W7bnE4rzbA%!gwE(^Jw?MX}ZSR{}$9)x#R1aduQLRv?3n$7_e(MW0Z-Ogl}Pq46d zU)f2ju~Xv;oa$Ao%{z?2D}5z?;k|6{MoTu;u;s6W_1{c(no4dP6nqHxal<>`srXtl z_quoZKVu~4Ow}h_SJ!s>=7#-+z=}392a9p zo1*mdP~V{gAXEpoxt)#p&jJC9qkzM<@^IWj;`igmKLRADT(0(2=;G z>6Sb7o{kHBf8JPI&hl(`?CYA+6W8RqYA znwl;a>1b=0)AbGYcIBGp@KvDrOZG z*70WW?O=-N#V{J~pM|Y45cRsAT<94Cqn73r4}{7{)$$ysQCw??0<Zn}nMJn`t;Dn89f%vZh*yrwQqLOA$){s_achv!R- zACLFycCGOeZ3$iTDWmu~-gGS*(WbdFiWDoSV^KIznb?V9g<33Sp!1gjCt zhb6MJ-xJtsqY2$3H19Fz91-xJQ+Q4a<+*|lKcy;?#*q{<#dnJYZ;68ATKuCS>E^g` zQ~QuDUK?voUfmD_3nq=4&l>q$)}nVa?`j>Ub=0IPZ4tf8ubcAv)xqTGJoy`xn6aFh zthb*3>5{_Jf&2`zxeh&Qgarm;3pk?Dm(Wxxl&Siu+Aj9lqz z(~zz0hJFl-Tmwja52Ef#vHsewtW{4e616|>7+ACYq7{iIw^kmX-< z3K($^QA8fuP*e`QNtoiis zkhfhiE`%PhasL9|BCMO9W?w1806funjD|?Wgn}RFj#ZGr*vZMGV1b+4+y3ZgqO$x%uQk8r6 zJ?wR~{K8x)c7+gE-=ED%Z&b-92#OvDSyl@KopK%#Foh{2 z2!;&6r~PPHyV6!?Sd>>MJpEZqYIQ0rp2!0CY=NdEfhPsyFn?_lNM#Hm=jN z{4O?K@zz$s3Is4}VSpeLYE+{)Rt)KirGpRXQmgS<0om!^fl^=hUZCghzdW4i9XPKq z@Qf}=wC;)P7oLYbq(V(3SzGwe!!t!`P~?V&uc~f*Ey>E+x;Dva4o{*fiE3Hl)TkZ|m=lJ%!u#daG{k|{O;k6<1cISHyT#w6g91rx_n!%>nYgIh;1debx)csIvd zm{->(2|Sh)lFHMa zK#Uwv5S;@l;dxCBaw^j_|FeNXU-h#sZ+))FtQ=fKrj|{chq3k54q?GSM*AA3O&CJ7 z#+R=7x*%V~I5$zE+xG41rhtc~M*U`FqoYQ?W5`M&Rt@0+u+{3&0T)aA{A5{rRf%%2 zazc1>2PLVM=hsk&ZZ5l-8lf(1mTk^Un#p=BN9C0AKaO8h@N%Lwj|ecixX~E%RD&(T zVc?W&{oq4?|Nc1sv4+0MH|f~D)$e_C@aRp*%H#5TpX_I8Q=q&7!pd<2`Md<;d;i|4 zwgaWm@gz{&V7muL#{w%6f&gH?T^|U4~K@b6l_gv=S?4 zH+mH!0FfUBFqAzSVv;lDa5EiOIeG?axW`ut$nGu2QRj#=lN>CLZ^d@CcCSA)l3v*&=iBGdWXPac_4&9Pgi3|IwxT$Ed0tyT>Eg!9 zG>?h^z--z{5kH0K(w>d<9ha0JRYKNs$5080kjZlMv}-iMXAjX^`A0&AA$^R0ldB%$ zW?(u*JgvZgKK;06b1YBng^ z+QEhbe84Dkx5&&-F9&$(I%^~k9MBWByN8NkkC6*C(fqr=4;uMf)&gnxry<~6kQwK? zz5VV)My<(8*xnDgnhGkdEZQXMyVsHRveFVMXyNZH;}J56{Oe9Rf5+?#WdezOE+{yI zmd^y<>liIP>xZzS*?hP!)sfgHi1am8;4}}={h8X$2EFXZtY=2ZT%PHqX-SDN&M`;z zAce{>j~C&HIKNT>y;l#*9%DauM7SG*ln{3Nky`|PlP|#jmr6g@M}uEsc~jm-D8JFc zi^7+j!@^0rXx|o(Q;3h+Lb)An^;&i_zb1HGf^0QpwyHj#U%0?*iA5Rjb2;; z!ZkwpB4kTzy}hFz$Am}MrD7975?TBarZN(K!(2s`lpGY+EgKP=$&vRUdy&--hJgF~N8W?5u0^yz z!_-FJ&wyb%BjJg}JxBxTkb!DhA^U<&&J0$OrknpNi7F`ydg~WYE#bGS`xlgzAZQeP zeRc0T^dlJXkP%EzIVwCw^lmT-%;K>yf-o?(;__3FrqaCVA-7u0gkw3Qnv^K}nxYGfeCRW=ZluDX2c$cIK%E0`oEh@h0}nk6IWhXd zXV_+qAySlz0;XXAt%XDzLiH$hicBGEMm816=YrGZdVEmFcyHxJx#xSrS9%5o>$2qq z590Nto?OCcHL^rz#O)QctZ@-v@_Z#jCpLl*AIHzeSe$!{Vc6@upSkHv7L$ehep zB~35jRduXqIBq;dmebs1An$&?6YyIAx9YkJB8R6v5)}WL1Nv;PC8UCd$^y1zG_Ej! z-Q#^0rX>jPA*`z4)zj0|6Pc+Eup)Ga;Dj$t5xHORJ6&L2TCEC$4DUx`c|8caOK_

mIXymVLS(Az2E8x$9EUfFt1-a|2JrGM`}?4tRSo1NrzP}BE< zoKqU+(CUYqlnDSS@n!jo|6`D7j6Ik%`Asw52vmUFHr4`4dp$EToT)=2k3nm4d>rh8iI$vVZ6?C-G^hfQf6t zTU}i1tfXs6Z(_927m-vnw!mC)7uXrRC??L2Ks4k4s>`u|@I(AZkU9{t0k`{eO-b|C z5#KThCVsk~jW~>**2jdMZE8+p*Nha~DG-!FnjP+R-DVpaJcdTkrtq1#>G0kgl8Ba| zGT8Qep{+@HWg~ynRjuJh2y%oryJkx>8^=QjbFDwf3CgTtF+f+NZzA4Rc%+#iLuDlE zUX41FZ;(|R_A|=qf0n?g;gpmNl;tzEH_XyRbz*1JmVEH-uF)@8H^v{Y7Q-{ndr>FU4(MC8p{r1BO6Z;e2@KS~=D*;G8f0?D6|1`>S9C^Y9j zjR;hlv#ruYJhNdovOi$~`5_~T6D z@i$^1W;1aD9h5hA%BBZbrCH7S)h?;m&*WIk{qM6_aYs-z5HLJN`Ib1#AqXS^GHd3U zzmZv~M}C8?!u7SP-x`l56WMygcj>7nMhJsv__Ihi`)8Yj`5E+dJxYdg)`O6qsa%g) z-wQo6tmLN|1U*X3981ulKc`ITMG3lNfKH?&CiL_^gz%t1Q#4Uh#iDV}5-0x57cJsR zwt_N>3H|I%XX*laxL!r_FQM5Seis3!doX&wr)goc2wYiETH&iSACSr1v-f}%zof`G zh-7p@LHP7-a|Ti%J7Nlho3f=yDU9TtZo3g#(za|_lx}}Ji-z;_E^Y3iw`VG&hlq;c zk$R=nBAm#%0BwnKgfh4>&%y92^mD!yEd`(jG{8TZ$oDw*W;1zX3WDy~5lPX|X>4g# zgwtdN3a7q>*)Aju;2Wf&^065lEJNr0VtK}$Di;%>#vS7thk6Q8G)^>FZGPF@JARLP z%lv`s2F5nj@Fa-G%W`u+4?Fh9lSz+(HmZ>tF5T^N9xbP5NwNoU#_W@43=_Zb@6UF@ z+t5=**lb)3zbB$8g(mE7^v?Q*5@>9@cl{yvmtUm+IDO*irQ005nh}@-ASHmIjc9=7 ziuR1VM_`*PKs`lL?9)tbrw-X+5<W0IAlDrMJ;2nOI$1a}FIUIEbo^?J0(UR>AFhL)!T8OH zn{Cb*Cy?T&W0YXQdZQQQcBGY)gp?M+3tVxSpGTrWuiWIG>I6et#K5KtX z#=B?Scf?DaSjYo41eeh&9JPiY1(hNPt&1#69G&$tJ+-IK{($%dj+;K6!P<3Evpve@ zoBR2W+pKR>xnwp5Kxgn=3s<-tk&B5VUJvEZ%eMPwV{ji`5*N-AE(T8n zS^^QIy)Mz~uo?U-V)$W9^KA9n-*P>dl9`e?NqGP$>ha#>1mwSSeTQ`@q!fUb39wT< zk@xh_=353I#1mH-gK_`{hYDiOL1?;be2^7w-3`?^S1poShre8vF~319Lo2SB}fG`^|E*@Fw}pW6_SW`aJj6=VGb@EPIYJedX9AeRY$!x8^c#LQ;vNW>oQ z{JJInw)o%H=n)W4TZzvbz_)YX5B^yQ-`2fn(?aV%PYMqK>akKpG zlNal9OCXD%$Nuf(e;C6jSZ995)7QwT3Y9{iz<`1ymw{}QdQEU69JeRE+7o#B$UW@`GhdWiWSg6u01RdKC8 z{;QBW0k=<3ezvZ6j=U~$S-0Sp{b9)h|L@N%6&G7v0I2k5;z2N)P-fGa;9@mO+m;gi z2TX0YBZ>-CM1%q?$!odS1*;uY&^5l3Gpe0Fh~2&?cDJ34%-n`++&?%IPCL4Aeb`Bf z7Oc>MAdmG80R45JdRkf*-! z-!PuINtbY^1$S${(qC{Y%@HyFrEbo*7Ja?Gn8ZDHG5zyFbtnj^m9s<|);P!6Auihj zWcqI*m9PKxf1oiBwy~)AH@_~Z5k{AGcH$kmrMH^Pb=)yvNh0z)-2&fgiA1>TQh~yo zf7oC1KB-&@>~QnS#uGot(1O4Ay7H0+foZ@ZFs>jXdbIz0jGj{t%m$hA6YW!}@tduC zfLw9JxrO)K_l;GU1LU5vsW6WP*Y5kgv@csimff&nWwZ5ceH|6?M+Hy>;{&`!#m%t^(U}E8KH0a;^ShKRys-jhX4y1%QttoE%v}f2!L~&U?zJB_ z?LTWY0XFusAHKHW*p7tH?qPrr?2z`#Grk_SA32{+iftnhuNO76e%~-Z+_Bv)ut3Po z;5c&v;(l>*GJ`y_)9@I2N5WxRg7qstRQrraOWbHk_mJ}@4CMu*ah1r1_;<) zB%E;#kXt+SQj3HMRN(KKsd`?>lN;;OoG0~JMu7uX<%IM=s|72|f4tUA8gTWup-wKS zf4)!CZ=X3I_bs0{)b9@dcS~JaJ8QhB8_#_I=$nkpJMee=CwzTRvAL3)q1kwHw&X^^ z0UaAVLduCGoGqmdjb>uh%vVR%37mo8bC;5NBy(BBCk#_pL;yTV;wl;cHIG3R&^WYhD9-@?bLu z$q^B@aM)gCGTH2$<18T*evu6F2iyz@3c=B!5L+!r+T##bjtJ+6@q##3jC-6OBA!KH zAr9cOH2o*$__PEGFk&z(CZVh#Nz@6X#w`)07y*0eFiMgshfo2iq9bhb#0zf#>(Rx)+R^@_75i&f?Uf1S%xJZ5 zbhfw8;FT$IiVV!O21-lIyA5au7@6Fxe=bbnpDY`)#N*#;yn^U}W&iuChHQXpq{q0D xS5~(-zqq!eegXcS)<1tD=a}v9{|f@$u2KL1 literal 0 HcmV?d00001 diff --git a/dbm-services/redis/redis-dts/main.go b/dbm-services/redis/redis-dts/main.go new file mode 100644 index 0000000000..f9e55e2807 --- /dev/null +++ b/dbm-services/redis/redis-dts/main.go @@ -0,0 +1,60 @@ +package main + +import ( + "dbm-services/redis/redis-dts/config" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsJob" + "dbm-services/redis/redis-dts/pkg/osPerf" + "dbm-services/redis/redis-dts/tclog" + "dbm-services/redis/redis-dts/util" + "fmt" + "log" + "os" + "runtime/debug" + "sync" + + "github.com/spf13/viper" +) + +func main() { + defer func() { + if r := recover(); r != nil { + fmt.Fprintf(os.Stderr, "%s", string(debug.Stack())) + } + }() + + debug := viper.GetBool("TENDIS_DEBUG") + tclog.Logger.Info(fmt.Sprintf("TENDIS_DEBUG:%v", debug)) + + env := viper.GetString("ENV") + tclog.Logger.Info(fmt.Sprintf("ENV:%s", env)) + + wg := &sync.WaitGroup{} + localIP, err := util.GetLocalIP() + if err != nil { + log.Fatal(err) + } + + go func() { + osPerf.WatchDtsSvrPerf() + }() + + jobers := make([]dtsJob.DtsJober, 0, 3) + jobers = append(jobers, dtsJob.NewTendisSSDDtsJob(constvar.GetBkCloudID(), localIP, + constvar.GetZoneName(), tclog.Logger, wg)) + jobers = append(jobers, dtsJob.NewTendisplusDtsJob(constvar.GetBkCloudID(), localIP, + constvar.GetZoneName(), tclog.Logger, wg)) + jobers = append(jobers, dtsJob.NewRedisCacheDtsJob(constvar.GetBkCloudID(), localIP, + constvar.GetZoneName(), tclog.Logger, wg)) + + for _, jober := range jobers { + jober.StartBgWorkers() + } + wg.Wait() +} + +func init() { + config.InitConfig() + tclog.InitMainlog() + // mysql.DB.Init() //不连接MySQL +} diff --git a/dbm-services/redis/redis-dts/models/myredis/myredis.go b/dbm-services/redis/redis-dts/models/myredis/myredis.go new file mode 100644 index 0000000000..d3db6401ea --- /dev/null +++ b/dbm-services/redis/redis-dts/models/myredis/myredis.go @@ -0,0 +1,403 @@ +// Package myredis redis操作 +package myredis + +import ( + "context" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/tclog" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/v8" + "go.uber.org/zap" +) + +// RedisWorker redis连接信息 +type RedisWorker struct { + Addr string `json:"addr"` + Password string `json:"password"` + DB int `json:"db"` + Client *redis.Client `json:"-"` + logger *zap.Logger `json:"-"` +} + +// ParamsString 将连接信息返回为字符串 +func (db *RedisWorker) ParamsString() string { + ret, _ := json.Marshal(db) + return string(ret) +} + +// NewRedisClient 新建redis连接 +func NewRedisClient(addr, passwd string, db int, logger *zap.Logger) (conn *RedisWorker, err error) { + retry := 0 // 如果失败。重试12次,10秒一次 + if logger == nil { + logger = tclog.NewFileLogger("log/main.log") + } + conn = &RedisWorker{ + Addr: addr, + Password: passwd, + DB: db, + logger: logger, + } +RECONNCT: + if conn.Password == "" { + conn.Client = redis.NewClient(&redis.Options{ + Addr: conn.Addr, + DB: conn.DB, + DialTimeout: 10 * time.Second, + MaxConnAge: 24 * time.Hour, + }) + } else { + conn.Client = redis.NewClient(&redis.Options{ + Addr: conn.Addr, + Password: conn.Password, + DB: conn.DB, + DialTimeout: 10 * time.Second, + MaxConnAge: 24 * time.Hour, + }) + } + _, err = conn.Client.Ping(context.TODO()).Result() + if err != nil { + conn.logger.Error("redis new client fail,sleep 10s then retry.err:%v,addr:%s", + zap.Error(err), zap.String("params", conn.ParamsString())) + if retry < 12 { + retry++ + time.Sleep(10 * time.Second) + goto RECONNCT + } else { + return nil, fmt.Errorf("redis new client fail,err:%v addr:%s", err, conn.Addr) + } + } + return +} + +// LPush .. +func (db *RedisWorker) LPush(key01, data string) error { + _, err := db.Client.LPush(context.TODO(), key01, data).Result() + if err != nil { + db.logger.Error("redis lpush fail", zap.Error(err), + zap.String("key", key01), zap.String("data", data), + zap.String("params", db.ParamsString())) + err = fmt.Errorf("redis lpush fail,err:%v,keyname:%s", err, key01) + return err + } + return err +} + +// SetNx .. +func (db *RedisWorker) SetNx(key01 string, data interface{}, expire time.Duration) (bool, error) { + ret, err := db.Client.SetNX(context.TODO(), key01, data, expire).Result() + if err != nil { + db.logger.Error("redis setNx fail", zap.Error(err), + zap.String("key", key01), zap.Any("data", data), + zap.String("params", db.ParamsString())) + err = fmt.Errorf("redis setNx fail,err:%v,keyname:%s", err, key01) + return false, err + } + return ret, nil +} + +// SetEx .. +func (db *RedisWorker) SetEx(key01 string, data interface{}, expire time.Duration) (string, error) { + ret, err := db.Client.SetEX(context.TODO(), key01, data, expire).Result() + if err != nil { + db.logger.Error("redis setEx fail", zap.Error(err), + zap.String("key", key01), zap.Any("data", data), + zap.String("params", db.ParamsString())) + err = fmt.Errorf("redis setEX fail,err:%v,keyname:%s", err, key01) + return "", err + } + return ret, nil +} + +// Get .. +func (db *RedisWorker) Get(key01 string) (val01 string, err error) { + val01, err = db.Client.Get(context.TODO(), key01).Result() + if err != nil && err != redis.Nil { + db.logger.Error("redis get fail", zap.Error(err), + zap.String("key", key01), zap.String("params", db.ParamsString())) + err = fmt.Errorf("redis get fail,err:%v,keyName:%s", err, key01) + return + } + return +} + +// Expire .. +func (db *RedisWorker) Expire(key01 string, expire time.Duration) (bool, error) { + _, err := db.Client.Expire(context.TODO(), key01, expire).Result() + if err != nil && err != redis.Nil { + db.logger.Error("redis expire fail", zap.Error(err), + zap.String("key", key01), zap.String("params", db.ParamsString())) + err = fmt.Errorf("redis expire fail,err:%v,keyname:%s", err, key01) + return false, err + } + if err != nil && err == redis.Nil { + return false, nil + } + return true, nil +} + +// SISMember .. +func (db *RedisWorker) SISMember(key01, member01 string) (bool, error) { + ret, err := db.Client.SIsMember(context.TODO(), key01, member01).Result() + if err != nil && err != redis.Nil { + db.logger.Error("redis SIsMember fail", zap.Error(err), + zap.String("key", key01), zap.String("member01", member01), + zap.String("params", db.ParamsString())) + err = fmt.Errorf("redis SIsMember fail,err:%v,keyname:%s,member:%s", err, key01, member01) + return false, err + } + return ret, nil +} + +// TendisSSDBakcup tendisSSD执行备份命令 +func (db *RedisWorker) TendisSSDBakcup(dstDir string) error { + cmd := []interface{}{"backup", dstDir} + _, err := db.Client.Do(context.TODO(), cmd...).Result() + if err != nil { + db.logger.Error("tendisssd instance backup fail", zap.Error(err), + zap.Any("backupCmd", cmd), zap.String("Addr:", db.Addr)) + return fmt.Errorf("tendisssd:%s backup fail,err:%v,cmd:%s %s", db.Addr, err, "backup", dstDir) + } + db.logger.Info(fmt.Sprintf("tendisssd:%s start backup dstdir:%s", db.Addr, dstDir)) + return nil +} + +// InfoBackups tendisSSD 执行Info Backups得到的结果解析 +type InfoBackups struct { + BackupCount int `json:"backup-count"` + LastBackupTime int `json:"last-backup-time"` + CurrentBackupRunning string `json:"current-backup-running"` +} + +// TendisSSDInfoBackups tenidsSSD执行info Backups得到的结果 +func (db *RedisWorker) TendisSSDInfoBackups() (*InfoBackups, error) { + str01, err := db.Client.Info(context.TODO(), "Backups").Result() + if err != nil { + db.logger.Error("tendisssd info backups fail", zap.Error(err), + zap.Any("Cmd", "info Backups"), zap.String("Addr:", db.Addr)) + return nil, fmt.Errorf("tendisssd:%s info backups fail,err:%v,cmd:%s", db.Addr, err, "info Backups") + } + infoList := strings.Split(str01, "\n") + ret := &InfoBackups{} + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "backup-count" { + ret.BackupCount, _ = strconv.Atoi(list01[0]) + } else if list01[0] == "last-backup-time" { + ret.LastBackupTime, _ = strconv.Atoi(list01[0]) + } else if list01[0] == "current-backup-running" { + ret.CurrentBackupRunning = list01[1] + } + } + return ret, nil +} + +// TendisSSDIsBackupRunning tendisSSD是否在备份中 +func (db *RedisWorker) TendisSSDIsBackupRunning() (bool, error) { + info01, err := db.TendisSSDInfoBackups() + if err != nil { + return false, err + } + if info01.CurrentBackupRunning == "yes" { + return true, nil + } else { + return false, nil + } +} + +// ConfigSet tendis执行config set +func (db *RedisWorker) ConfigSet(confName string, val interface{}) (string, error) { + var err error + var ok bool + valStr := fmt.Sprintf("%v", val) + // 先执行config set,如果报错则执行 confxx set + data, err := db.Client.ConfigSet(context.TODO(), confName, valStr).Result() + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "set", confName, val} + confRet, err := db.Client.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + db.logger.Error(err.Error()) + return "", err + } + data, ok = confRet.(string) + if !ok { + err = fmt.Errorf(`confxx set result not interface{},cmd:%v,cmdRet:%v,nodeAddr:%s`, + cmd, confRet, db.Addr) + db.logger.Error(err.Error()) + return "", err + } + } else if err != nil { + err = fmt.Errorf("redis config set %s %s fail,err:%v,addr:%s", confName, val, err, db.Addr) + db.logger.Error(err.Error()) + return data, err + } + return data, nil +} + +// ConfigGet tendis执行config get +func (db *RedisWorker) ConfigGet(confName string) (ret map[string]string, err error) { + var confInfos []interface{} + var ok bool + ret = map[string]string{} + + // 先执行config get,如果报错则执行 confxx get + confInfos, err = db.Client.ConfigGet(context.TODO(), confName).Result() + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "get", confName} + var confRet interface{} + confRet, err = db.Client.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("cmd:%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + db.logger.Error(err.Error()) + return ret, err + } + confInfos, ok = confRet.([]interface{}) + if ok == false { + err = fmt.Errorf("cmd:%v result not []interface{},cmdRet:%v,nodeAddr:%s", cmd, confRet, db.Addr) + db.logger.Error(err.Error()) + return ret, err + } + } else if err != nil { + err = fmt.Errorf(" cmd:config get %q failed,err:%v", confName, err) + db.logger.Error(err.Error()) + return ret, err + } + + var k01, v01 string + for idx, confItem := range confInfos { + conf01 := confItem.(string) + if idx%2 == 0 { + k01 = conf01 + continue + } + v01 = conf01 + ret[k01] = v01 + } + return ret, nil +} + +// TendisSSDBinlogSize tendis ssd binlog size +type TendisSSDBinlogSize struct { + FirstSeq uint64 `json:"firstSeq"` + EndSeq uint64 `json:"endSeq"` +} + +// TendisSSDBinlogSize command: binlogsize +func (db *RedisWorker) TendisSSDBinlogSize() (ret TendisSSDBinlogSize, err error) { + cmd := []interface{}{"binlogsize"} + ret = TendisSSDBinlogSize{} + sizeRet, err := db.Client.Do(context.TODO(), cmd...).Result() + if err != nil { + db.logger.Error("TendisSSDBinlogSize fail", zap.Error(err), zap.Any("cmd", cmd)) + return ret, fmt.Errorf("cmd:%v fail,err:%v,addr:%s", cmd, err, db.Addr) + } + sizeInfos, ok := sizeRet.([]interface{}) + if ok == false { + err = fmt.Errorf("TendisSSDBinlogSize cmd:%v result not []interface{},cmdRet:%v,nodeAddr:%s", cmd, sizeRet, db.Addr) + db.logger.Error(err.Error()) + return ret, err + } + if len(sizeInfos) != 4 { + err = fmt.Errorf("'binlogsize' result not correct,length:%d != 4,data:%+v,addr:%s", + len(sizeInfos), sizeInfos, db.Addr) + db.logger.Error(err.Error()) + return ret, err + } + firstSeqStr := sizeInfos[1].(string) + endSeqStr := sizeInfos[3].(string) + + ret.FirstSeq, err = strconv.ParseUint(firstSeqStr, 10, 64) + if err != nil { + err = fmt.Errorf("'binlogsize' firstSeq:%s to uint64 fail,err:%v,data:%+v,addr:%s", + firstSeqStr, err, sizeInfos, db.Addr) + db.logger.Error(err.Error()) + return ret, err + } + ret.EndSeq, err = strconv.ParseUint(endSeqStr, 10, 64) + if err != nil { + err = fmt.Errorf("'binlogsize' endSeq:%s to uint64 fail,err:%v,data:%+v,addr:%s", + endSeqStr, err, sizeInfos, db.Addr) + db.logger.Error(err.Error()) + return ret, err + } + return ret, nil +} + +// Info 执行info $section命令并将返回结果保存在map中 +func (db *RedisWorker) Info(section string) (infoRet map[string]string, err error) { + infoRet = make(map[string]string) + str01, err := db.Client.Info(context.TODO(), section).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'info %s' fail,err:%v", db.Addr, section, err) + db.logger.Error(err.Error()) + return + } + infoList := strings.Split(str01, "\n") + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + infoRet[list01[0]] = list01[1] + } + return +} + +// GetMasterAddrAndPasswd 获取master addr和passsword信息 +// 如果'self'是slave,则info replication中获取其master信息; +// 如果'self'是master,则直接返回其addr 和 password信息 +func (db *RedisWorker) GetMasterAddrAndPasswd() (masterAddr, masterAuth string, err error) { + var infoRet map[string]string + var confRet map[string]string + infoRet, err = db.Info("replication") + if err != nil { + return + } + if infoRet["role"] == constvar.RedisSlaveRole { + masterAddr = infoRet["master_host"] + ":" + infoRet["master_port"] + confRet, err = db.ConfigGet("masterauth") + if err != nil { + return + } + masterAuth = confRet["masterauth"] + } else { + masterAddr = db.Addr + masterAuth = db.Password + } + return +} + +// Close :关闭client +func (db *RedisWorker) Close() { + if db.Client == nil { + return + } + db.Client.Close() + db.Client = nil +} diff --git a/dbm-services/redis/redis-dts/models/myredis/tendisplus_infoRepl.go b/dbm-services/redis/redis-dts/models/myredis/tendisplus_infoRepl.go new file mode 100644 index 0000000000..efd00cacff --- /dev/null +++ b/dbm-services/redis/redis-dts/models/myredis/tendisplus_infoRepl.go @@ -0,0 +1,302 @@ +package myredis + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +// InfoReplSlave Tendisplus master中执行info replication结果中slave状态 +// 如: slave0:ip=luketest03-redis-rdsplus4-1.luketest03-svc.dmc,port=30000,state=online,offset=930327677,lag=0 +type InfoReplSlave struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + Offset int64 `json:"offset"` + Lag int64 `json:"lag"` +} + +func (slave *InfoReplSlave) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + if len(list01) < 2 { + return fmt.Errorf(`%s format not correct, + the correct format is as follows:slave0:ip=xx,port=48000,state=online,offset=2510,lag=0`, line) + } + slave.Name = list01[0] + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + slave.IP = list02[1] + } else if list02[0] == "port" { + slave.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + slave.State = list02[1] + } else if list02[0] == "offset" { + slave.Offset, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + slave.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// InfoReplRocksdb .. +type InfoReplRocksdb struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + BinlogPos int64 `json:"binlog_pos"` + Lag int64 `json:"lag"` +} + +// InfoReplRocksdbSlave 在tendisplus master上执行info replication结果中rocksdb_slave0解析 +// 如: rocksdb0_slave0:ip=127.0.0.1,port=48000,dest_store_id=0,state=online,binlog_pos=249,lag=0,binlog_lag=0 +type InfoReplRocksdbSlave struct { + InfoReplRocksdb + DestStoreID int `json:"dest_store_id"` + BinlogLag int64 `json:"binlog_lag"` +} + +func (slave *InfoReplRocksdbSlave) decode(line string) error { + line = strings.TrimSpace(line) + var err error + list01 := strings.Split(line, ":") + if len(list01) < 2 { + err = fmt.Errorf(`%s format not correct, + the correct format is as follows: + rocksdb0_slave0:ip=xx,port=xx,dest_store_id=0,state=online,binlog_pos=249,lag=0,binlog_lag=0`, line) + return err + } + slave.Name = list01[0] + + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + slave.IP = list02[1] + } else if list02[0] == "port" { + slave.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "dest_store_id" { + slave.DestStoreID, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + slave.State = list02[1] + } else if list02[0] == "binlog_pos" { + slave.BinlogPos, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + slave.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "binlog_lag" { + slave.BinlogLag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// InfoReplRocksdbMaster 在tendisplus slave上执行info replication结果中rocksdb_master解析 +// 如: rocksdb0_master:ip=127.0.0.1,port=47000,src_store_id=0,state=online,binlog_pos=249,lag=0 +type InfoReplRocksdbMaster struct { + InfoReplRocksdb + SrcStoreID int `json:"src_store_id"` +} + +func (master *InfoReplRocksdbMaster) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + var err error + if len(list01) < 2 { + err = fmt.Errorf(`%s format not correct, + the correct format is as follows: + rocksdb0_master:ip=xxxx,port=47000,src_store_id=0,state=online,binlog_pos=249,lag=0`, line) + return err + } + master.Name = list01[0] + + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + master.IP = list02[1] + } else if list02[0] == "port" { + master.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "src_store_id" { + master.SrcStoreID, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + master.State = list02[1] + } else if list02[0] == "binlog_pos" { + master.BinlogPos, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + master.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// TendisplusInfoReplData tendisplus info replication命令结果解析 +type TendisplusInfoReplData struct { + Addr string `json:"addr"` + Role string `json:"role"` + MasterHost string `json:"master_host"` + MasterPort int `json:"master_port"` + MasterLinkStatus string `json:"master_link_status"` + MasterLastIoSecondsAgo int64 `json:"master_last_io_seconds_ago"` + MasterSyncInPogress int64 `json:"master_sync_in_progress"` + SlaveReplOffset int64 `json:"slave_repl_offset"` + SlavePriority int64 `json:"slave_priority"` + SlaveReadOnly int `json:"slave_read_only"` + ConnectedSlaves int `json:"connected_slaves"` + MasterReplOffset int64 `json:"master_repl_offset"` + SlaveList []InfoReplSlave `json:"slave_list"` + RocksdbMasterList []InfoReplRocksdbMaster `json:"rocksdb_master_list"` + RocksdbSlaveList []InfoReplRocksdbSlave `json:"rocksdb_slave_list"` +} + +// String 用于打印 +func (rpl *TendisplusInfoReplData) String() string { + tmp, _ := json.Marshal(rpl) + return string(tmp) +} + +// GetRole master/slave +func (rpl *TendisplusInfoReplData) GetRole() string { + return rpl.Role +} + +// GetMasterLinkStatus up/down +func (rpl *TendisplusInfoReplData) GetMasterLinkStatus() string { + return rpl.MasterLinkStatus +} + +// SlaveMaxLag .. +// - 如果我的角色是slave,则从 RocksdbMasterList 中获取maxLag; +// - 如果我的角色是master,则先根据slaveAddr找到slave,然后从 SlaveList 中获取获取maxLag; +// - 如果slaveAddr为空,则获取master第一个slave的lag作为 maxLag; +func (rpl *TendisplusInfoReplData) SlaveMaxLag(slaveAddr string) (int64, error) { + var maxLag int64 = 0 + var err error = nil + slaveAddr = strings.TrimSpace(slaveAddr) + if rpl.GetRole() == "slave" { + if rpl.GetMasterLinkStatus() == "down" { + err = fmt.Errorf("slave:%s master_link_status is %s", rpl.Addr, rpl.GetMasterLinkStatus()) + return maxLag, err + } + for _, rdbMaster01 := range rpl.RocksdbMasterList { + if rdbMaster01.Lag > 18000000000000000 { + // 以前tendisplus的一个bug, 新版本已修复 + continue + } + if rdbMaster01.Lag > maxLag { + maxLag = rdbMaster01.Lag + } + } + return maxLag, nil + } + // role==master + if len(rpl.SlaveList) == 0 { + err = fmt.Errorf("master:%s have no slave", rpl.Addr) + return maxLag, err + } + if slaveAddr == "" { + // default first slave lag + maxLag = rpl.SlaveList[0].Lag + return maxLag, nil + } + var destSlave *InfoReplSlave = nil + for _, slave01 := range rpl.SlaveList { + slaveItem := slave01 + addr01 := fmt.Sprintf("%s:%d", slaveItem.IP, slaveItem.Port) + if slaveAddr == addr01 { + destSlave = &slaveItem + break + } + } + if destSlave == nil { + err = fmt.Errorf("master:%s not find slave:%s", rpl.Addr, slaveAddr) + return maxLag, err + } + maxLag = destSlave.Lag + return maxLag, nil +} + +// TendisplusInfoRepl tendisplus info replication结果解析 +// 参考内容: http://tendis.cn/#/Tendisplus/%E5%91%BD%E4%BB%A4/info?id=replication +func (db *RedisWorker) TendisplusInfoRepl() (replData TendisplusInfoReplData, err error) { + var replRet string + replRet, err = db.Client.Info(context.TODO(), "replication").Result() + if err != nil { + err = fmt.Errorf("info replication fail,err:%v,aadr:%s", err, db.Addr) + return + } + infoList := strings.Split(replRet, "\n") + replData = TendisplusInfoReplData{} + replData.Addr = db.Addr + + slaveReg := regexp.MustCompile(`^slave\d+$`) + rocksdbSlaveReg := regexp.MustCompile(`^rocksdb\d+_slave\d+$`) + rocksdbMasterReg := regexp.MustCompile(`^rocksdb\d+_master$`) + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "role" { + replData.Role = list01[1] + } else if list01[0] == "master_host" { + replData.MasterHost = list01[1] + } else if list01[0] == "master_port" { + replData.MasterPort, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "master_link_status" { + replData.MasterLinkStatus = list01[1] + } else if list01[0] == "master_last_io_seconds_ago" { + replData.MasterLastIoSecondsAgo, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "master_sync_in_progress" { + replData.MasterSyncInPogress, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_repl_offset" { + replData.SlaveReplOffset, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_priority" { + replData.SlavePriority, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_read_only" { + replData.SlaveReadOnly, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "connected_slaves" { + replData.ConnectedSlaves, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "master_repl_offset" { + replData.MasterReplOffset, _ = strconv.ParseInt(list01[1], 10, 64) + } else if slaveReg.MatchString(list01[0]) == true { + slave01 := InfoReplSlave{} + err = slave01.decode(infoItem) + if err != nil { + return + } + replData.SlaveList = append(replData.SlaveList, slave01) + } else if rocksdbSlaveReg.MatchString(list01[0]) == true { + rdbSlave01 := InfoReplRocksdbSlave{} + err = rdbSlave01.decode(infoItem) + if err != nil { + return + } + replData.RocksdbSlaveList = append(replData.RocksdbSlaveList, rdbSlave01) + } else if rocksdbMasterReg.MatchString(list01[0]) == true { + rdbMaster01 := InfoReplRocksdbMaster{} + err = rdbMaster01.decode(infoItem) + if err != nil { + return + } + replData.RocksdbMasterList = append(replData.RocksdbMasterList, rdbMaster01) + } + } + return +} diff --git a/dbm-services/redis/redis-dts/models/mysql/init.go b/dbm-services/redis/redis-dts/models/mysql/init.go new file mode 100644 index 0000000000..ee2dbebea3 --- /dev/null +++ b/dbm-services/redis/redis-dts/models/mysql/init.go @@ -0,0 +1,96 @@ +package mysql + +import ( + "fmt" + "log" + "time" + + "github.com/jinzhu/gorm" + _ "github.com/jinzhu/gorm/dialects/mysql" // mysql TODO + "github.com/spf13/viper" +) + +// Database 连接MySQL +type Database struct { + Gcs *gorm.DB + Tendis *gorm.DB +} + +// DB TODO +var DB *Database + +func openDB(username, password, addr, name string) *gorm.DB { + config := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=%t&loc=%s", + username, + password, + addr, + name, + true, + // "Asia/Shanghai"), + "Local") + + db, err := gorm.Open("mysql", config) + if err != nil { + log.Fatalf("Database connection failed. Database name: %s, error: %v", name, err) + } + + // set for db connection + setupDB(db) + + return db +} + +func setupDB(db *gorm.DB) { + db.LogMode(viper.GetBool("gormlog")) + // 用于设置最大打开的连接数,默认值为0表示不限制.设置最大的连接数,可以避免并发太高导致连接mysql出现too many connections的错误。 + // db.DB().SetMaxOpenConns(20000) + maxIdleConns := viper.GetInt("max_idle_conns") + maxOpenConn := viper.GetInt("max_open_conns") + maxLifeTime := viper.GetInt("max_life_time") + + db.DB().SetMaxOpenConns(maxOpenConn) + // 用于设置闲置的连接数.设置闲置的连接数则当开启的一个连接使用完成后可以放在池里等候下一次使用 + db.DB().SetMaxIdleConns(maxIdleConns) + // fix gorm invalid connect bug,参考问题:https://studygolang.com/topics/5576 + db.DB().SetConnMaxLifetime(time.Duration(maxLifeTime) * time.Hour) +} + +// InitGcsDB 初始化 GCS DB +func InitGcsDB() *gorm.DB { + return openDB(viper.GetString("gcs_db.username"), + viper.GetString("gcs_db.password"), + viper.GetString("gcs_db.addr"), + viper.GetString("gcs_db.name")) +} + +// GetGcsDB .. +func GetGcsDB() *gorm.DB { + return InitGcsDB() +} + +// InitTendisDB 初始化TendisDB +func InitTendisDB() *gorm.DB { + return openDB(viper.GetString("tendis_db.username"), + viper.GetString("tendis_db.password"), + viper.GetString("tendis_db.addr"), + viper.GetString("tendis_db.name")) +} + +// GetTendisDB .. +func GetTendisDB() *gorm.DB { + return InitTendisDB() +} + +// Init 初始化MySQL连接 +func (db *Database) Init() { + DB = &Database{ + // Gcs: GetGcsDB(), + Tendis: GetTendisDB(), + } +} + +// Close 关闭MySQL连接 +func (db *Database) Close() { + // DB.Gcs.Close() + DB.Tendis.Close() +} diff --git a/dbm-services/redis/redis-dts/models/mysql/mysql.go b/dbm-services/redis/redis-dts/models/mysql/mysql.go new file mode 100644 index 0000000000..b65d989eef --- /dev/null +++ b/dbm-services/redis/redis-dts/models/mysql/mysql.go @@ -0,0 +1,2 @@ +// Package mysql TODO +package mysql diff --git a/dbm-services/redis/redis-dts/models/mysql/tendisdb/job.go b/dbm-services/redis/redis-dts/models/mysql/tendisdb/job.go new file mode 100644 index 0000000000..37a5af5297 --- /dev/null +++ b/dbm-services/redis/redis-dts/models/mysql/tendisdb/job.go @@ -0,0 +1,88 @@ +package tendisdb + +import ( + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/customtime" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "encoding/json" + "fmt" + "net/http" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// TbTendisDTSJob tendisSSD迁移数据到Tendisx任务行 +type TbTendisDTSJob struct { + ID int64 `json:"id" gorm:"column:id;primary_key"` + BillID int64 `json:"bill_id" gorm:"column:bill_id"` + App string `json:"app" gorm:"column:app"` + BkCloudID int64 `json:"bk_cloud_id" gorm:"column:bk_cloud_id"` + User string `json:"user" gorm:"column:user"` + DtsBillType string `json:"dts_bill_type" gorm:"column:dts_bill_type"` + DtsCopyType string `json:"dts_copy_type" gorm:"column:dts_copy_type"` + OnlineSwitchType string `json:"online_switch_type" gorm:"column:online_switch_type"` + DataCheck int `json:"data_check" gorm:"column:data_check"` + DataRepair int `json:"data_repair" gorm:"column:data_repair"` + DataRapairMode string `json:"data_repair_mode" gorm:"column:data_repair_mode"` + SrcCluster string `json:"src_cluster" gorm:"column:src_cluster"` + SrcClusterType string `json:"src_cluster_type" gorm:"column:src_cluster_type"` + SrcRollbackBillID int64 `json:"src_rollback_bill_id" gorm:"column:src_rollback_bill_id"` + SrcRollbackInstances string `json:"src_rollback_instances" gorm:"column:src_rollback_instances"` + DstBkBizID string `json:"dst_bk_biz_id" gorm:"column:dst_bk_biz_id"` + DstCluster string `json:"dst_cluster" gorm:"column:dst_cluster"` + DstClusterType string `json:"dst_cluster_type" gorm:"column:dst_cluster_type"` + KeyWhiteRegex string `json:"key_white_regex" gorm:"column:key_white_regex"` + KeyBlackRegex string `json:"key_black_regex" gorm:"column:key_black_regex"` + Status int `json:"status" gorm:"column:status"` + Reason string `json:"reason" gorm:"column:reason"` + CreateTime customtime.CustomTime `json:"createTime" gorm:"column:create_time"` + UpdateTime customtime.CustomTime `json:"updateTime" gorm:"column:update_time"` +} + +// TableName sets the insert table name for this struct type +func (t *TbTendisDTSJob) TableName() string { + return "tb_tendis_dts_job" +} + +// GetTendisDTSJob 获取job对应row +func GetTendisDTSJob( + billID int64, srcCluster, dstCluster string, + logger *zap.Logger, +) (jobRows []*TbTendisDTSJob, err error) { + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + jobRows = []*TbTendisDTSJob{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sGetDtsJobURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmGetDtsJobURL + } + type dtsJobReq struct { + BillID int64 `json:"bill_id"` + SrcCluster string `json:"src_cluster"` + DstCluster string `json:"dst_cluster"` + } + param := dtsJobReq{ + BillID: billID, + SrcCluster: srcCluster, + DstCluster: dstCluster, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + err = json.Unmarshal(data.Data, &jobRows) + if err != nil { + err = fmt.Errorf("GetTendisDTSJob unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return +} diff --git a/dbm-services/redis/redis-dts/models/mysql/tendisdb/task.go b/dbm-services/redis/redis-dts/models/mysql/tendisdb/task.go new file mode 100644 index 0000000000..b56ee70253 --- /dev/null +++ b/dbm-services/redis/redis-dts/models/mysql/tendisdb/task.go @@ -0,0 +1,591 @@ +package tendisdb + +import ( + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/customtime" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "encoding/json" + "fmt" + "net/http" + "reflect" + "regexp" + "strings" + "sync" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +var ( + tbTaskFiledToColunm map[string]string // struct filedName map to colunmName + once01 sync.Once +) + +// TbTendisDTSTask 迁移task +type TbTendisDTSTask struct { + ID int64 `json:"id" gorm:"column:id;primary_key"` + BillID int64 `json:"bill_id" gorm:"column:bill_id"` // 单据号 + App string `json:"app" gorm:"column:app"` // 业务英文名 + BkCloudID int64 `json:"bk_cloud_id" gorm:"column:bk_cloud_id"` // 云区域id + DtsServer string `json:"dts_server" gorm:"column:dts_server"` // 执行迁移任务的server ip + User string `json:"user" gorm:"column:user"` // 申请人 + SrcCluster string `json:"src_cluster" gorm:"column:src_cluster"` // 源集群域名 + SrcClusterPriority int `json:"src_cluster_priority" gorm:"column:src_cluster_priority"` // 源集群优先级,值越大,优先级越高 + SrcIP string `json:"src_ip" gorm:"column:src_ip"` // 源slave ip + SrcPort int `json:"src_port" gorm:"column:src_port"` // 源slave port + SrcPassword string `json:"src_password" gorm:"column:src_password"` // 源实例密码base64值 + SrcDbType string `json:"src_dbtype" gorm:"column:src_dbtype"` // 源实例db类型,TendisSSDInstance/RedisInstance/TendisplusInstance + SrcDbSize int64 `json:"src_dbsize" gorm:"column:src_dbsize"` // 源实例数据量大小,单位byte,ssd=>rocksdbSize,cache=>used_memory + SrcSegStart int `json:"src_seg_start" gorm:"column:src_seg_start"` // 源实例所属segment start + SrcSegEnd int `json:"src_seg_end" gorm:"column:src_seg_end"` // 源实例所属segment end + SrcWeight int `json:"src_weight" gorm:"column:src_weight"` // 源实例权重,单个集群中根据实例的weight从小到大执行迁移 + SrcIPConcurrencyLimit int `json:"src_ip_concurrency_limit" gorm:"column:src_ip_concurrency_limit"` // 源slave ip上task并发数控制 + SrcIPZonename string `json:"src_ip_zonename" gorm:"column:src_ip_zonename"` // 源实例所在城市 + SrcOldLogCount int64 `json:"src_old_logcount" gorm:"column:src_old_logcount"` // 源实例slave-keep-log-count的旧值 + SrcNewLogCount int64 `json:"src_new_logcount" gorm:"column:src_new_logcount"` // 源实例slave-keep-log-count的新值 + IsSrcLogCountRestored int `json:"is_src_logcount_restored" gorm:"column:is_src_logcount_restored"` // 源实例slave-keep-log-count是否恢复 + SrcHaveListKeys int `json:"src_have_list_keys" gorm:"column:src_have_list_keys"` // srcRedis是否包含list类型key,list类型key重试存在风险 + KeyWhiteRegex string `json:"key_white_regex" gorm:"column:key_white_regex"` // key正则(白名单) + KeyBlackRegex string `json:"key_black_regex" gorm:"column:key_black_regex"` // key正则(黑名单) + SrcKvStoreID int `json:"src_kvstore_id" gorm:"column:src_kvstore_id"` // tendisplus kvstore id + DstCluster string `json:"dst_cluster" gorm:"column:dst_cluster"` // 目的集群 + DstPassword string `json:"dst_password" gorm:"column:dst_password"` // 目的密码base64值 + TaskType string `json:"task_type" gorm:"column:task_type"` // task类型,包含tendis_backup, backupfile_fetch,tendisdump,cmdsImporter,make_sync + TendisbackupFile string `json:"tendisbackup_file" gorm:"column:tendisbackup_file"` // tendis slave上bakup文件位置 + FetchFile string `json:"fetch_file" gorm:"column:fetch_file"` // backup文件拉取到dtsserver本地位置 + SqlfileDir string `json:"sqlfile_dir" gorm:"column:sqlfile_dir"` // tendisdumper得到的sql文件夹 + SyncerPort int `json:"syncer_port" gorm:"column:syncer_port"` // redis-sync端口 + SyncerPid int `json:"syncer_pid" gorm:"column:syncer_pid"` // sync的进程id + TendisBinlogLag int64 `json:"tendis_binlog_lag" gorm:"column:tendis_binlog_lag"` // redis-sync tendis_binlog_lag信息 + RetryTimes int `json:"retry_times" gorm:"column:retry_times"` // task重试次数 + SyncOperate string `json:"sync_operate" gorm:"column:sync_operate"` // sync操作,包括pause,resume,upgrade,stop等,对应值有PauseTodo PauseFail PauseSucc + KillSyncer int `json:"kill_syncer" gorm:"column:kill_syncer"` // 杀死syncer,0代表否,1代表是 + Message string `json:"message" gorm:"column:message"` // 信息 + Status int `json:"status" gorm:"column:status"` // 0:未开始 1:执行中 2:完成 -1:发生错误 + IgnoreErrlist string `json:"ignore_errlist" gorm:"column:ignore_errlist"` // 迁移过程中被忽略的错误,如key同名不同类型WRONGTYPE Operation + ResyncFromTime customtime.CustomTime `json:"resync_from_time" gorm:"column:resync_from_time"` // sync从该时间点重新同步增量数据 + CreateTime customtime.CustomTime `json:"create_time" gorm:"column:create_time"` // 创建时间 + UpdateTime customtime.CustomTime `json:"update_time" gorm:"column:update_time"` // 更新时间 +} + +// TableName 表名 +func (t *TbTendisDTSTask) TableName() string { + return "tb_tendis_dts_task" +} + +// ToString 行数据返回为json +func (t *TbTendisDTSTask) ToString() string { + ret, _ := json.Marshal(t) + return string(ret) +} + +// TaskLockKey keyname +func (t *TbTendisDTSTask) TaskLockKey() string { + return fmt.Sprintf("TendisDTS_task_lock_%d_%s_%s_%s_%d", + t.BillID, t.SrcCluster, t.DstCluster, + t.SrcIP, t.SrcPort) +} + +// IsAllDtsTasksToForceKill 是否全部tasks都等待被force kill +func IsAllDtsTasksToForceKill(tasks []*TbTendisDTSTask) (allForceKill bool) { + if len(tasks) == 0 { + return false + } + for _, t01 := range tasks { + t02 := t01 + if t02.SyncOperate != constvar.RedisForceKillTaskTodo { + return false + } + } + return true +} + +func genColInWhere(colName string, valList []string) string { + if len(valList) == 0 || colName == "" { + return "" + } + + var builder strings.Builder + builder.WriteString(" and " + colName + " in (") + + if len(valList) == 1 { + builder.WriteString("'" + valList[0] + "'") + } else { + builder.WriteString("'" + valList[0] + "'") + for _, s := range valList[1:] { + builder.WriteString(",'" + s + "'") + } + } + builder.WriteString(")") + return builder.String() +} + +// GetDtsSvrMigratingTasks TODO +/*GetDtsSvrMigratingTasks 获取dtsserver正在迁移的task,与task对应多少dataSize +对tendiSSD来说,'迁移中' 指处于 'tendisBackup'、'backupfileFetch'、'tendisdump'、'cmdsImporter'中的task, +不包含处于 status=-1 或 处于 makeSync 状态的task +对tendisCache来说,'迁移中'指处于 'makeCacheSync'中的task,不包含处于 status=-1 或 处于 watchCacheSync 状态的task +@params: + dbType: TendisSSDInstance/RedisInstance/TendisplusInstance +*/ +func GetDtsSvrMigratingTasks(bkCloudID int64, dtsSvr, dbType string, taskTypes []string, + logger *zap.Logger) (tasks []*TbTendisDTSTask, dataSize uint64, err error) { + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + tasks = []*TbTendisDTSTask{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsServerMigratingTasksURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsServerMigratingTasksURL + } + type dtsSvrMigratingTasksReq struct { + BkCloudID int64 `json:"bk_cloud_id"` + DtsServer string `json:"dts_server"` + DbType string `json:"db_type"` + TaskTypes []string `json:"task_types"` + } + param := dtsSvrMigratingTasksReq{ + BkCloudID: bkCloudID, + DtsServer: dtsSvr, + DbType: dbType, + TaskTypes: taskTypes, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + err = json.Unmarshal(data.Data, &tasks) + if err != nil { + err = fmt.Errorf("GetDtsSvrMigratingTasks unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + dataSize = 0 + for _, tmp := range tasks { + task := tmp + dataSize = dataSize + uint64(task.SrcDbSize) + } + return +} + +// GetDtsSvrMaxSyncPort 获取DtsServer上syncPort最大的task +func GetDtsSvrMaxSyncPort(bkCloudID int64, dtsSvr, dbType string, taskTypes []string, logger *zap.Logger) ( + ret *TbTendisDTSTask, err error) { + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + type dtsSvrMaxSyncPortReq struct { + BkCloudID int64 `json:"bk_cloud_id"` + DtsServer string `json:"dts_server"` + DbType string `json:"db_type"` + TaskTypes []string `json:"task_types"` + } + + ret = &TbTendisDTSTask{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsServerMaxSyncPortURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsServerMaxSyncPortURL + } + param := dtsSvrMaxSyncPortReq{ + BkCloudID: bkCloudID, + DtsServer: dtsSvr, + DbType: dbType, + TaskTypes: taskTypes, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + if len(data.Data) == 4 && string(data.Data) == "null" { + return nil, nil + } + err = json.Unmarshal(data.Data, ret) + if err != nil { + err = fmt.Errorf("GetDtsSvrMaxSyncPortV2 unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return +} + +// GetLast30DaysToExecuteTasks 用于获取最近一个月本地等待执行的tasks +// 可用于获取 tendis_backup,backupfile_fetch等待执行的task +func GetLast30DaysToExecuteTasks( + bkCloudID int64, + dtsServer, taskType, dbType string, + status, limit int, + logger *zap.Logger) (tasks []*TbTendisDTSTask, err error) { + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + type dtsLast30DaysToExecuteTasksReq struct { + BkCloudID int64 `json:"bk_cloud_id"` + DtsServer string `json:"dts_server"` + DbType string `json:"db_type"` + TaskType string `json:"task_type"` + Status int `json:"status"` + Limit int `json:"limit"` + } + tasks = []*TbTendisDTSTask{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsLast30DaysToExecuteTasksURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsLast30DaysToExecuteTasksURL + } + param := dtsLast30DaysToExecuteTasksReq{ + DtsServer: dtsServer, + DbType: dbType, + TaskType: taskType, + Status: status, + Limit: limit, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + err = json.Unmarshal(data.Data, &tasks) + if err != nil { + err = fmt.Errorf("GetLast30DaysTobeExecutedTasks unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return +} + +// GetToBeScheduledJobs 获取最近一个月待调度的Jobs +// 遍历jobs +// GetJobToBeScheduledTasks 获取job中所有待调度的task +// 如果task 同时满足两个条件,即可执行调度: +// a. 数据量满足 <= maxDataSize +// b. task所在的srcIP, 其当前迁移中的tasksCnt + 1 <= srcIP可支持的最大并发数(task.src_ip_concurrency_limit决定) + +// GetLast30DaysToScheduleJobs 获取最近30天待调度的Jobs +// jobs必须满足: 有一个待调度的task.dataSize < maxDataSize +func GetLast30DaysToScheduleJobs(bkCloudID int64, maxDataSize int64, zoneName, dbType string, + logger *zap.Logger) (jobs []*TbTendisDTSTask, err error) { + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + type dtsLast30DaysToScheduleJobsReq struct { + BkCloudID int64 `json:"bk_cloud_id"` + MaxDataSize int64 `json:"max_data_size"` + ZoneName string `json:"zone_name"` + DbType string `json:"db_type"` + } + jobs = []*TbTendisDTSTask{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsLast30DaysToScheduleJobsURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsLast30DaysToScheduleJobsURL + } + param := dtsLast30DaysToScheduleJobsReq{ + BkCloudID: bkCloudID, + MaxDataSize: maxDataSize, + ZoneName: zoneName, + DbType: dbType, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + err = json.Unmarshal(data.Data, &jobs) + if err != nil { + err = fmt.Errorf("GetLast30DaysToBeScheduledJobsV2 unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return +} + +// GetJobToScheduleTasks 获取job中所有待调度的task +// billId、srcCluster、dstCluster确定一个job +// dtsserver='1.1.1.1' and status=0 and task_type="" 代表 '未执行' +// 一个job可能部分task执行,部分未执行; +// 根据权重src_weight排序,权重越小,越前面执行 +func GetJobToScheduleTasks(billID int64, srcCluster, dstCluster string, + logger *zap.Logger) (tasks []*TbTendisDTSTask, err error) { + if billID == 0 || srcCluster == "" || dstCluster == "" { + err = fmt.Errorf("billId:%d or srcCluster:%s or dstCluster:%s cann't be empty", + billID, srcCluster, dstCluster) + logger.Error(err.Error()) + return tasks, err + } + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + type dtsJobToScheduleTasks struct { + BillID int64 `json:"bill_id"` + SrcCluster string `json:"src_cluster"` + DstCluster string `json:"dst_cluster"` + } + tasks = []*TbTendisDTSTask{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsJobToScheduleTasksURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsJobToScheduleTasksURL + } + param := dtsJobToScheduleTasks{ + BillID: billID, + SrcCluster: srcCluster, + DstCluster: dstCluster, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + err = json.Unmarshal(data.Data, &tasks) + if err != nil { + err = fmt.Errorf("GetJobToBeScheduledTasks unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return +} + +// GetJobSrcIPRunningTasks 获取job中某个srcIP上正在迁移的tasks信息 +// billId、srcCluster、dstCluster确定一个job +// 对于每个srcIP上可同时执行的迁移的task个数,我们必须控制,否则将影响srcIP,影响现网; +// 每个srcIP上可同时执行的最大tasks,由task.src_ip_concurrency_limit决定 +func GetJobSrcIPRunningTasks(billID int64, srcCluster, dstCluster, srcIP string, taskTypes []string, + logger *zap.Logger) (tasks []*TbTendisDTSTask, err error) { + if billID == 0 || srcCluster == "" || dstCluster == "" { + err = fmt.Errorf("billId:%d or srcCluster:%s or dstCluster:%s cann't be empty", + billID, srcCluster, dstCluster) + logger.Error(err.Error()) + return tasks, err + } + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + type dtsJobSrcIPRuningTasksReq struct { + BillID int64 `json:"bill_id"` + SrcCluster string `json:"src_cluster"` + DstCluster string `json:"dst_cluster"` + SrcIP string `json:"src_ip"` + TaskTypes []string `json:"task_types"` + } + + tasks = []*TbTendisDTSTask{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsJobSrcIPRunningTasksURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsJobSrcIPRunningTasksURL + } + param := dtsJobSrcIPRuningTasksReq{ + BillID: billID, + SrcCluster: srcCluster, + DstCluster: dstCluster, + SrcIP: srcIP, + TaskTypes: taskTypes, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + err = json.Unmarshal(data.Data, &tasks) + if err != nil { + err = fmt.Errorf("GetJobSrcIPRunningTasks unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return +} + +// DtsTaskStructFieldsToColumns 获取 TbTendisDTSTask 字段名 到 列名之间的对应关系 +// 如 filedNames=["BillID","App","User","SrcIP"] 对应的 columnNames=["bill_id","app","user","src_ip"] +func DtsTaskStructFieldsToColumns(fieldNames []string, logger *zap.Logger) (columnNames []string, err error) { + once01.Do(func() { + t01 := TbTendisDTSTask{} + reg01 := regexp.MustCompile(`column:(\w+)`) + getType := reflect.TypeOf(t01) + tbTaskFiledToColunm = make(map[string]string, getType.NumField()) + for i := 0; i < getType.NumField(); i++ { + field := getType.Field(i) + gormTag := string(field.Tag.Get("gorm")) + l01 := reg01.FindStringSubmatch(gormTag) + if len(l01) < 2 { + continue + } + tbTaskFiledToColunm[field.Name] = l01[1] + } + }) + columnNames = make([]string, 0, len(fieldNames)) + for _, field01 := range fieldNames { + colName, ok := tbTaskFiledToColunm[field01] + if ok == false { + err = fmt.Errorf("struct TbTendisDTSTask have no field:%s", colName) + logger.Error(err.Error()) + return + } + columnNames = append(columnNames, colName) + } + return +} + +// GetFieldsValue 根据 字段名 从task中获取其字段values +// 如 filedNames=["BillID","App","User","SrcIP"] 其对应值为 ret=[1111,"test_app","zhangsan","1.1.1.1"] +func (t *TbTendisDTSTask) GetFieldsValue(fieldNames []string, logger *zap.Logger) (ret []interface{}, err error) { + _, err = DtsTaskStructFieldsToColumns(fieldNames, logger) + if err != nil { + return + } + ret = make([]interface{}, 0, len(fieldNames)) + getValue := reflect.ValueOf(t) + for _, field01 := range fieldNames { + val01 := reflect.Indirect(getValue).FieldByName(field01) + ret = append(ret, val01.Interface()) + } + return +} + +// GetColToValByFields 根据struct fieldName 生成 表列名=>值 之间的对应关系 +func (t *TbTendisDTSTask) GetColToValByFields(fieldNames []string, logger *zap.Logger) ( + colToVal map[string]interface{}, err error) { + var columnNames []string + var values []interface{} + columnNames, err = DtsTaskStructFieldsToColumns(fieldNames, logger) + if err != nil { + return + } + values, err = t.GetFieldsValue(fieldNames, logger) + if err != nil { + return + } + colToVal = make(map[string]interface{}, len(fieldNames)) + for idx, col := range columnNames { + colToVal[col] = values[idx] + } + return +} + +// UpdateFieldsValues 根据字段名 自动生成update 语句并进行更新 +// 如 filedNames=["BillID","App","User","SrcIP"] +// 生成的update语句: update tb_tendis_dts_task set bill_id=?,app=?,user=?,src_ip=?,update_time=now() where id=xxxx; +// 该函数主要目的只更新 值变化的字段,而不是row全部值 +func (t *TbTendisDTSTask) UpdateFieldsValues(fieldNames []string, logger *zap.Logger) (err error) { + var colToVal map[string]interface{} + colToVal, err = t.GetColToValByFields(fieldNames, logger) + if err != nil { + return err + } + _, err = UpdateDtsTaskRows([]int64{t.ID}, colToVal, logger) + return +} + +// UpdateDtsTaskRows 更新tasks多行 +func UpdateDtsTaskRows(ids []int64, colToValue map[string]interface{}, logger *zap.Logger) (rowsAffected int64, + err error) { + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + type dtsTaskRowsUpdateReq struct { + TaskIDs []int64 `json:"task_ids"` + ColumnToValue map[string]interface{} `json:"col_to_val"` + } + + type dtsTaskRowsUpdateRsp struct { + RowsAffected int64 `json:"rows_affected"` + } + + ret := &dtsTaskRowsUpdateRsp{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsUpdateTaskRowsURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsUpdateTaskRowsURL + } + param := dtsTaskRowsUpdateReq{ + TaskIDs: ids, + ColumnToValue: colToValue, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + err = json.Unmarshal(data.Data, ret) + if err != nil { + err = fmt.Errorf("UpdateDtsTaskRows unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return ret.RowsAffected, nil +} + +// GetTaskByID 根据id获得task详细信息 +func GetTaskByID(id int64, logger *zap.Logger) (task *TbTendisDTSTask, err error) { + if logger == nil { + err = fmt.Errorf("GetTaskById logger cannot be nil") + return + } + var cli01 *scrdbclient.Client + var subURL string + var data *scrdbclient.APIServerResponse + cli01, err = scrdbclient.NewClient(viper.GetString("serviceName"), logger) + if err != nil { + return + } + type dtsTaskRowByIDReq struct { + TaskID int64 `json:"task_id"` + } + + task = &TbTendisDTSTask{} + if cli01.GetServiceName() == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsTaskRowByIDURL + } else if cli01.GetServiceName() == constvar.BkDbm { + subURL = constvar.DbmDtsTaskRowByIDURL + } + param := dtsTaskRowByIDReq{ + TaskID: id, + } + data, err = cli01.Do(http.MethodPost, subURL, param) + if err != nil { + return + } + if len(data.Data) == 4 && string(data.Data) == "null" { + return nil, nil + } + err = json.Unmarshal(data.Data, task) + if err != nil { + err = fmt.Errorf("GetTaskByIDV2 unmarshal data fail,err:%v,resp.Data:%s,subURL:%s,param:%+v", + err.Error(), string(data.Data), subURL, param) + logger.Error(err.Error()) + return + } + return +} diff --git a/dbm-services/redis/redis-dts/models/mysql/tendisdb/tendisdb.go b/dbm-services/redis/redis-dts/models/mysql/tendisdb/tendisdb.go new file mode 100644 index 0000000000..101a4beba5 --- /dev/null +++ b/dbm-services/redis/redis-dts/models/mysql/tendisdb/tendisdb.go @@ -0,0 +1,2 @@ +// Package tendisdb TODO +package tendisdb diff --git a/dbm-services/redis/redis-dts/pkg/constvar/constvar.go b/dbm-services/redis/redis-dts/pkg/constvar/constvar.go new file mode 100644 index 0000000000..d3168e8b0e --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/constvar/constvar.go @@ -0,0 +1,234 @@ +// Package constvar TODO +package constvar + +import ( + "regexp" + + "github.com/spf13/viper" +) + +// version +const ( + TendisDTSVersion = "v0.5" +) + +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + EiByte +) + +const ( + // RedisMasterRole redis role master + RedisMasterRole = "master" + // RedisSlaveRole redis role slave + RedisSlaveRole = "slave" + + // RedisNoneRole none role + RedisNoneRole = "none" + + // MasterLinkStatusUP up status + MasterLinkStatusUP = "up" + // MasterLinkStatusDown down status + MasterLinkStatusDown = "down" +) + +// 环境类型 +const ( + ProdENV = "prod" // 正式环境 + TestENV = "test" // 测试环境 + GlobalENV = "global" // 海外环境 +) + +// db类型 +const ( + TendisTypeTendisSSDInsance = "TendisSSDInstance" + TendisTypeRedisInstance = "RedisInstance" + TendisTypeTendisplusInsance = "TendisplusInstance" + UserTwemproxyType = "user_twemproxy" + UserRedisInstance = "user_redis_instance" + UnknownType = "unknown" +) + +// redis-sync state +const ( + SyncOnlineState = "ONLINE" +) + +// tendisplus replicate state +const ( + TendisplusReplSendbulk = "send_bulk" + TendisplusReplOnline = "online" +) + +// IsProdEnv 是否是正式环境 +func IsProdEnv() bool { + return viper.GetString("ENV") == ProdENV +} + +// IsTestEnv 是否是测试环境 +func IsTestEnv() bool { + return viper.GetString("ENV") == TestENV +} + +// IsGlobalEnv 是否是海外环境 +func IsGlobalEnv() bool { + return viper.GetString("ENV") == GlobalENV +} + +// tendisssd task type +const ( + TendisBackupTaskType = "tendisBackup" + BackupfileFetchTaskType = "backupfileFetch" + TredisdumpTaskType = "tendisdump" + CmdsImporterTaskType = "cmdsImporter" + MakeSyncTaskType = "makeSync" + WatchOldSyncTaskType = "WatchOldSync" +) + +// redis cache task type +const ( + MakeCacheSyncTaskType = "makeCacheSync" + WatchCacheSyncTaskType = "watchCacheSync" +) + +// tendisplus task type +const ( + TendisplusMakeSyncTaskType = "tendisplusMakeSync" + // 将存量数据同步 与 增量数据同步分开,原因是 存量数据同步讲占用较多内存,增量不占用内存 + TendisplusSendBulkTaskType = "tendisplusSendBulk" + TendisplusSendIncrTaskType = "tendisplusSendIncr" +) + +/* +migrating tasks type +'迁移中' 是指那些正在占用资源 或者 即将占用资源 阶段, 资源主要指磁盘 or 内存 +对tendiSSD来说,'迁移中'指处于[tendisBackup,backupfileFetch,tendisdump,cmdsImporter]中的task,不包含处于status=-1或 处于 makeSync 状态的task +不包含处于 status=-1 或 处于 makeSync 状态的task; +对tendisCache来说,'迁移中'指处于 'makeCacheSync'中的task,不包含处于 status=-1 或 处于 watchCacheSync 状态的task; +对tendisplus来说,'迁移中'指处于 'tendisplusMakeSync'、`tendisplusSendBulk`阶段的task,不包含 status=-1 或 处于 tendisplusSendIncr的task +*/ +var ( + SSDMigratingTasksType = []string{ + TendisBackupTaskType, + BackupfileFetchTaskType, + TredisdumpTaskType, + CmdsImporterTaskType, + } + CacheMigratingTasksType = []string{MakeCacheSyncTaskType} + TendisplusMigratingTasksType = []string{ + TendisplusMakeSyncTaskType, + TendisplusSendBulkTaskType, + } +) + +// Tredisdump 结果文件格式 +const ( + TredisdumpRespFormat = "resp" + TredisdumpCmdFormat = "aof" +) + +// Tredisdump 结果文件匹配模式 +const ( + TredisdumpListGlobMatch = "*_list_*" + TredisdumpListRegMatch = `^[0-9]+_list_[0-9]+$` + TredisdumpOutputGlobMatch = "*_output_*" + TredisdumpExpireGlobMatch = "*_expire_*" + TredisdumpDelGlobMatch = "*_del_*" +) + +// ListKeyFileReg TODO +var ListKeyFileReg = regexp.MustCompile(TredisdumpListRegMatch) + +// redis-sync 操作状态 +const ( + // pause,'SYNCADMIN stop' + RedisSyncPauseTodo = "SyncPauseTodo" + RedisSyncPauseFail = "SyncPauseFail" + RedisSyncPauseSucc = "SyncPauseSucc" + + // resume,'SYNCADMIN start' + RedisSyncResumeTodo = "SyncResumeTodo" + RedisSyncResumeFail = "SyncResumeFail" + RedisSyncResumeSucc = "SyncResumeSucc" + + // upgrade,upgrade redis-sync binary + RedisSyncUpgradeTodo = "SyncUpgradeTodo" + RedisSyncUpgradeFail = "SyncUpgradeFail" + RedisSyncUpgradeSucc = "SyncUpgradeSucc" + + // Stop,kill redis-sync proccess + RedisSyncStopTodo = "SyncStopTodo" + RedisSyncStopFail = "SyncStopFail" + RedisSyncStopSucc = "SyncStopSucc" + + // force kill migrate task + RedisForceKillTaskTodo = "ForceKillTaskTodo" + RedisForceKillTaskFail = "ForceKillTaskFail" + RedisForceKillTaskSuccess = "ForceKillTaskSucc" + + // resyunc from specific time + ReSyncFromSpecTimeTodo = "ReSyncFromSpecTimeTodo" + ReSyncFromSpecTimeFail = "ReSyncFromSpecTimeFail" + ReSyncFromSpecTimeSucc = "ReSyncFromSpecTimeSucc" +) + +// 可以忽略错误类型 +const ( + WrongTypeOperationErr = "WRONGTYPE Operation" +) + +// remote services' name +const ( + DtsRemoteTendisxk8s = "dtsRemoteTendisxk8s" + // tendisk8s mico service + K8sIsDtsSvrInBlacklistURL = "/tendisxk8s/cluster/tendis-dts/is-dts-server-in-blacklist" + K8sDtsLockKeyURL = "/tendisxk8s/cluster/tendis-dts/dts-lock-key" + K8sDtsUnlockKeyURL = "/tendisxk8s/cluster/tendis-dts/dts-unlock-key" + K8sGetDtsJobURL = "/tendisxk8s/cluster/tendis-dts/get-dts-job" + K8sDtsServerMigratingTasksURL = "/tendisxk8s/cluster/tendis-dts/get-dts-server-migrating-tasks" + K8sDtsServerMaxSyncPortURL = "/tendisxk8s/cluster/tendis-dts/get-dts-server-max-sync-port" + K8sDtsLast30DaysToExecuteTasksURL = "/tendisxk8s/cluster/tendis-dts/get-dts-last-30days-to-execute-tasks" + K8sDtsLast30DaysToScheduleJobsURL = "/tendisxk8s/cluster/tendis-dts/get-dts-last-30days-to-schedule-jobs" + K8sDtsJobToScheduleTasksURL = "/tendisxk8s/cluster/tendis-dts/get-dts-job-to-schedule-tasks" + K8sDtsJobSrcIPRunningTasksURL = "/tendisxk8s/cluster/tendis-dts/get-dts-job-srcip-running-tasks" + K8sDtsTaskRowByIDURL = "/tendisxk8s/cluster/tendis-dts/get-dts-task-row-by-id" + K8sDtsUpdateTaskRowsURL = "/tendisxk8s/cluster/tendis-dts/update-dts-task-rows" + + BkDbm = "bkDbm" + DbmIsDtsSvrInBlacklistURL = "/apis/proxypass/redis_dts/is_dtsserver_in_blacklist/" + DbmDtsLockKeyURL = "/apis/proxypass/redis_dts/distribute_trylock/" + DbmDtsUnlockKeyURL = "/apis/proxypass/redis_dts/distribute_unlock/" + DbmGetDtsJobURL = "/apis/proxypass/redis_dts/job_detail/" + DbmDtsServerMigratingTasksURL = "/apis/proxypass/redis_dts/dts_server_migrating_tasks/" + DbmDtsServerMaxSyncPortURL = "/apis/proxypass/redis_dts/dts_server_max_sync_port/" + DbmDtsLast30DaysToExecuteTasksURL = "/apis/proxypass/redis_dts/last_30_days_to_exec_tasks/" + DbmDtsLast30DaysToScheduleJobsURL = "/apis/proxypass/redis_dts/last_30_days_to_schedule_jobs/" + DbmDtsJobToScheduleTasksURL = "/apis/proxypass/redis_dts/job_to_schedule_tasks/" + DbmDtsJobSrcIPRunningTasksURL = "/apis/proxypass/redis_dts/job_src_ip_running_tasks/" + DbmDtsTaskRowByIDURL = "/apis/proxypass/redis_dts/task_by_task_id/" + DbmDtsUpdateTaskRowsURL = "/apis/proxypass/redis_dts/tasks_update/" + + DbmJobApiFastExecuteScriptURL = "/apis/proxypass/jobapi/fast_execute_script/" + DbmJobApiGetJobInstanceStatusURL = "/apis/proxypass/jobapi/get_job_instance_status/" + DbmJobApiBatchGetJobInstanceIPLogURL = "/apis/proxypass/jobapi/batch_get_job_instance_ip_log/" + DbmJobApiTransferFileURL = "/apis/proxypass/jobapi/fast_transfer_file/" +) + +// ZonenameTransform 城市转换 +func ZonenameTransform(zoneName string) string { + switch zoneName { + case "苏州": + return "上海" + case "扬州": + return "南京" + case "清远": + return "广州" + default: + return zoneName + } +} diff --git a/dbm-services/redis/redis-dts/pkg/constvar/methods.go b/dbm-services/redis/redis-dts/pkg/constvar/methods.go new file mode 100644 index 0000000000..b26b73c82b --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/constvar/methods.go @@ -0,0 +1,67 @@ +package constvar + +import ( + "fmt" + "time" + + "github.com/spf13/viper" +) + +// GetABSUser env ABSUSER +func GetABSUser() (absUser string) { + absUser = viper.GetString("ABSUSER") + if absUser == "" { + absUser = "mysql" // default 'mysql' + } + return absUser +} + +// GetABSPassword env ABSPASSWORD +func GetABSPassword() (absPasswd string, err error) { + absPasswd = viper.GetString("ABSPASSWORD") + if absPasswd == "" { + err = fmt.Errorf("ABSPASSWORD is empty...") + return + } + return +} + +// GetABSPort env ABSPORT +func GetABSPort() (absPort int) { + absPort = viper.GetInt("ABSPORT") + if absPort == 0 { + absPort = 36000 // default 36000 + } + return +} + +// GetABSPullBwLimit env RsyncPullBwLimit +func GetABSPullBwLimit() (pullBwLimit int64) { + pullBwLimit = viper.GetInt64("RsyncPullBwLimit") + if pullBwLimit == 0 { + pullBwLimit = 400 * 1024 // default 400 kbit/s + } + return +} + +// GetABSPullTimeout env RsyncPullTimeout +func GetABSPullTimeout() (pullTimeout time.Duration) { + var timeout int + timeout = viper.GetInt("RsyncPullTimeout") + if pullTimeout == 0 { + timeout = 36000 + } + return time.Duration(timeout * int(time.Second)) +} + +// GetBkCloudID 获取本机器bk_cloud_id +func GetBkCloudID() (bkCloudID int64) { + bkCloudID = viper.GetInt64("bkCloudID") + return +} + +// GetZoneName 获取本机器zoneName +func GetZoneName() (zoneName string) { + zoneName = viper.GetString("zoneName") + return +} diff --git a/dbm-services/redis/redis-dts/pkg/customtime/customtime.go b/dbm-services/redis/redis-dts/pkg/customtime/customtime.go new file mode 100644 index 0000000000..da7f9e83a4 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/customtime/customtime.go @@ -0,0 +1,76 @@ +// Package customtime 自定义时间 +package customtime + +import ( + "database/sql/driver" + "fmt" + "strings" + "time" +) + +// CustomTime 自定义时间类型 +type CustomTime struct { + time.Time +} + +const ctLayout = "2006-01-02 15:04:05" + +var nilTime = (time.Time{}).UnixNano() + +// UnmarshalJSON .. +func (ct *CustomTime) UnmarshalJSON(b []byte) (err error) { + s := strings.Trim(string(b), "\"") + if s == "null" || s == "" { + ct.Time = time.Time{} + return + } + ct.Time, err = time.ParseInLocation(ctLayout, s, time.Local) + return +} + +// MarshalJSON .. +func (ct CustomTime) MarshalJSON() ([]byte, error) { + if ct.Time.UnixNano() == nilTime { + return []byte("null"), nil + } + return []byte(fmt.Sprintf("\"%s\"", ct.Time.Format(ctLayout))), nil +} + +// Scan .. +func (ct *CustomTime) Scan(value interface{}) error { + switch v := value.(type) { + case []byte: + return ct.UnmarshalText(string(v)) + case string: + return ct.UnmarshalText(v) + case time.Time: + ct.Time = v + case nil: + ct.Time = time.Time{} + default: + return fmt.Errorf("cannot sql.Scan() CustomTime from: %#v", v) + } + return nil +} + +// UnmarshalText .. +func (ct *CustomTime) UnmarshalText(value string) error { + dd, err := time.ParseInLocation(ctLayout, value, time.Local) + if err != nil { + return err + } + ct.Time = dd + return nil +} + +// Value .. +// 注意这里ct不能是指针 +// 参考文章:https://www.codenong.com/44638610/ +func (ct CustomTime) Value() (driver.Value, error) { + return driver.Value(ct.Local().Format(ctLayout)), nil +} + +// IsSet .. +func (ct *CustomTime) IsSet() bool { + return ct.UnixNano() != nilTime +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsJob/base.go b/dbm-services/redis/redis-dts/pkg/dtsJob/base.go new file mode 100644 index 0000000000..639290b242 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsJob/base.go @@ -0,0 +1,312 @@ +package dtsJob + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask/factory" + "dbm-services/redis/redis-dts/pkg/dtsTask/rediscache" + "dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus" + "dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "fmt" + "log" + "runtime/debug" + "sync" + "time" + + "github.com/jinzhu/gorm" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// DtsJober dts-job 接口 +type DtsJober interface { + StartBgWorkers() + ClaimDtsJobs() (err error) +} + +// DtsJobBase base class +type DtsJobBase struct { + BkCloudID int64 `json:"bk_cloud_id"` + ServerIP string `json:"serverIP"` + ZoneName string `json:"zoneName"` + logger *zap.Logger + wg *sync.WaitGroup +} + +// NewDtsJobBase new +func NewDtsJobBase(bkCloudID int64, serverIP, zoneName string, logger *zap.Logger, wg *sync.WaitGroup) *DtsJobBase { + return &DtsJobBase{ + BkCloudID: bkCloudID, + ServerIP: serverIP, + ZoneName: zoneName, + logger: logger, + wg: wg, + } +} + +// GetTaskParallelLimit concurrency for task +func (job *DtsJobBase) GetTaskParallelLimit(taskType string) int { + limit01 := viper.GetInt(taskType + "ParallelLimit") + if limit01 == 0 { + limit01 = 5 // 默认值5 + } + return limit01 +} + +// BgDtsTaskRunnerWithConcurrency 执行子task,限制并发度,如backup、tredisdump等task任务 +// 如拉起5个goroutine执行 backup tasks, 拉起 5个goroutine执行 tredisdump tasks +func (job *DtsJobBase) BgDtsTaskRunnerWithConcurrency(taskType, dbType string) { + var err error + wg := sync.WaitGroup{} + genChan := make(chan *tendisdb.TbTendisDTSTask) + limit := job.GetTaskParallelLimit(taskType) + status := 0 + perTaskNum := 5 + + for worker := 0; worker < limit; worker++ { + wg.Add(1) + go func() { + defer wg.Done() + defer func() { + if r := recover(); r != nil { + job.logger.Error(string(debug.Stack())) + } + }() + for oldRow := range genChan { + // 可能在等待调度过程中row01数据已经改变,所以重新获取数据 + latestRow, err := tendisdb.GetTaskByID(oldRow.ID, job.logger) + if err != nil { + latestRow = oldRow + } + if latestRow == nil { + job.logger.Warn(fmt.Sprintf("根据task_id:%d获取task row失败,taskRow:%v", oldRow.ID, latestRow)) + continue + } + if latestRow.Status != 0 || latestRow.TaskType != taskType { + job.logger.Info(fmt.Sprintf("task_id:%d src_slave:%s:%d status=%d taskType=%s. 期待的taskType:%s 已经在运行中,不做任何处理", + latestRow.ID, latestRow.SrcIP, latestRow.SrcPort, latestRow.Status, latestRow.TaskType, taskType)) + continue + } + task01 := factory.MyTaskFactory(latestRow) + task01.Init() // 执行Init,成功则status=1,失败则status=-1 + task01.Execute() + } + }() + } + go func() { + defer close(genChan) + var toExecuteTasks []*tendisdb.TbTendisDTSTask + for { + if !tendisdb.IsAllDtsTasksToForceKill(toExecuteTasks) { + // 如果所有dts tasks都是 ForceKillTaskTodo 状态,则大概率该dts job用户已强制终止, 无需sleep + // 否则 sleep 10s + time.Sleep(10 * time.Second) + } + toExecuteTasks, err = tendisdb.GetLast30DaysToExecuteTasks(job.BkCloudID, job.ServerIP, taskType, dbType, + status, perTaskNum, job.logger) + if err != nil { + continue + } + if len(toExecuteTasks) == 0 { + job.logger.Info(fmt.Sprintf("not found to be executed %q task,sleep 10s", taskType), + zap.String("serverIP", job.ServerIP)) + continue + } + for _, task01 := range toExecuteTasks { + task02 := task01 + genChan <- task02 + } + } + }() + wg.Wait() +} + +// BgDtsTaskRunnerWithoutLimit 执行子task,不限制并发度,执行makeSync、watchCacheSync 等增量同步不能限制并发度 +func (job *DtsJobBase) BgDtsTaskRunnerWithoutLimit(taskType, dbType string) { + wg := sync.WaitGroup{} + genChan := make(chan *tendisdb.TbTendisDTSTask) + status := 0 + perTaskNum := 5 + var err error + + wg.Add(1) + go func() { + // 消费者:处理task + defer wg.Done() + defer func() { + if r := recover(); r != nil { + job.logger.Error(string(debug.Stack())) + } + }() + for row01 := range genChan { + rowItem := row01 + wg.Add(1) + go func(rowData *tendisdb.TbTendisDTSTask) { + defer wg.Done() + defer func() { + if r := recover(); r != nil { + job.logger.Error(string(debug.Stack())) + } + }() + task01 := factory.MyTaskFactory(rowData) + task01.Init() + task01.Execute() + }(rowItem) + } + }() + wg.Add(1) + go func() { + defer wg.Done() + defer close(genChan) + var toExecuteTasks []*tendisdb.TbTendisDTSTask + for { + // 生产者: 获取task + // 如果所有dts tasks都是 ForceKillTaskTodo 状态,则大概率该dts job用户已强制终止, 无需sleep + // 否则 sleep 10s + if !tendisdb.IsAllDtsTasksToForceKill(toExecuteTasks) { + time.Sleep(10 * time.Second) + } + toExecuteTasks, err = tendisdb.GetLast30DaysToExecuteTasks(job.BkCloudID, job.ServerIP, taskType, dbType, + status, perTaskNum, job.logger) + if err != nil { + continue + } + if len(toExecuteTasks) == 0 { + job.logger.Info(fmt.Sprintf("not found to be executed %q task,sleep 10s", taskType), + zap.String("serverIP", job.ServerIP)) + continue + } + for _, task01 := range toExecuteTasks { + task02 := task01 + genChan <- task02 + } + } + }() + wg.Wait() +} + +// BgOldRunningSyncTaskWatcher 目的: +// 很多时候 redis-sync 已经拉起,状态为runnig(taskrow.status==1 taskrow.taskType="makeSync") +// 而此时我们需要暂停 dbm-services/redis/redis-dts 升级 dbm-services/redis/redis-dts的介质 +// 再次拉起后, 以前(taskrow.status==1 taskrow.taskType="makeSync")的task其相关状态依然需要我们不断watch +// 注意: 该函数只在 dbm-services/redis/redis-dts 被拉起时执行,启动goroutine监听属于当前dts_server的属于running状态的tasks +// 对于后续新增的 (taskrow.status==1 taskrow.taskType="makeSync")的task,不归该函数处理 +func (job *DtsJobBase) BgOldRunningSyncTaskWatcher(taskType, dbType string, status int) { + limit := 100000 + oldSyncTasks, err := tendisdb.GetLast30DaysToExecuteTasks(job.BkCloudID, job.ServerIP, taskType, dbType, status, limit, + job.logger) + if err != nil { + return + } + if len(oldSyncTasks) == 0 { + job.logger.Info(fmt.Sprintf("DTSserver:%s not found oldRunningSyncTasks", job.ServerIP)) + return + } + job.logger.Info(fmt.Sprintf("DTSserver:%s found %d oldRunningSyncTasks", job.ServerIP, len(oldSyncTasks))) + for _, taskRow01 := range oldSyncTasks { + taskRowItem := taskRow01 + go func(taskRow *tendisdb.TbTendisDTSTask) { + defer func() { + if r := recover(); r != nil { + job.logger.Error(string(debug.Stack())) + } + }() + if taskRow.TaskType == constvar.MakeSyncTaskType { + watcherTask := tendisssd.NewWatchOldSync(taskRow) + watcherTask.Init() + watcherTask.Execute() + } else if taskRow.TaskType == constvar.WatchCacheSyncTaskType { + watcherTask := rediscache.NewWatchCacheSyncTask(taskRow) + watcherTask.Init() + watcherTask.Execute() + } else if taskRow.TaskType == constvar.TendisplusSendBulkTaskType || + taskRow.TaskType == constvar.TendisplusSendIncrTaskType { + watcherTask := tendisplus.NewWatchSyncTask(taskRow) + watcherTask.Init() + watcherTask.Execute() + } + }(taskRowItem) + } +} + +// IsMyselfInBlacklist 当前dts_server是否在黑名单中 +func (job *DtsJobBase) IsMyselfInBlacklist() bool { + scrCli, err := scrdbclient.NewClient(viper.GetString("serviceName"), job.logger) + if err != nil { + log.Fatal(err) + } + return scrCli.IsDtsServerInBlachList(job.ServerIP) +} + +// CheckSrcSlaveServerConcurrency 检查源slave机器是否还能新增迁移task +// 如源slave机器上有20个redis,同时启动迁移是危险的,需做并发控制 +func (job *DtsJobBase) CheckSrcSlaveServerConcurrency(taskRow *tendisdb.TbTendisDTSTask, taskTypes []string) (ok bool, + err error) { + + var msg string + srcSlaveRunningTasks, err := tendisdb.GetJobSrcIPRunningTasks(taskRow.BillID, taskRow.SrcCluster, taskRow.DstCluster, + taskRow.SrcIP, taskTypes, job.logger) + if (err != nil && gorm.IsRecordNotFoundError(err)) || len(srcSlaveRunningTasks) == 0 { + // 该 srcSlave 上没有任何处于迁移中的task,满足对 srcSlave的并发度保护 + return true, nil + } else if err != nil { + return false, err + } + // 该srcSlave上有部分处于迁移中的task + if len(srcSlaveRunningTasks)+1 > taskRow.SrcIPConcurrencyLimit { + // srcSlave 当前迁移中的tasks数 + 1 > srcSlave可支持的最大并发数 + // 遍历下一个task + msg = fmt.Sprintf("srcSlave:%s上正在运行的迁移任务数:%d,srcSlave允许的并发数:%d,so stop accept task", + taskRow.SrcIP, len(srcSlaveRunningTasks), taskRow.SrcIPConcurrencyLimit) + job.logger.Info(msg) + return false, nil + } + // srcSlave 当前迁移中的tasks数 + 1 <= srcSlave可支持的最大并发数,满足对 srcSlave的并发度保护 + return true, nil +} + +func (job *DtsJobBase) getFirstTaskType(taskRow *tendisdb.TbTendisDTSTask) (taskType string) { + switch taskRow.SrcDbType { + case constvar.TendisTypeTendisSSDInsance: + return constvar.TendisBackupTaskType + case constvar.TendisTypeRedisInstance: + return constvar.MakeCacheSyncTaskType + case constvar.TendisTypeTendisplusInsance: + return constvar.TendisplusMakeSyncTaskType + } + return "" +} + +// TryAcceptTask 上锁,尝试认领任务 +func (job *DtsJobBase) TryAcceptTask(taskRow *tendisdb.TbTendisDTSTask) (succ bool, err error) { + var lockOK bool + scrCli, err := scrdbclient.NewClient(viper.GetString("serviceName"), job.logger) + if err != nil { + return false, err + } + // 获取锁,尝试认领该task + lockOK, err = scrCli.DtsLockKey(taskRow.TaskLockKey(), job.ServerIP, 120) + if err != nil { + return false, err + } + if !lockOK { + // 已经有其他dtsserver在尝试认领该task,遍历下一个task + job.logger.Info(fmt.Sprintf( + `taskId:%d srcCluster:%s dstCluster:%s srcRedis:%s#%d 已经有其他dts_server在调度,放弃调度`, + taskRow.ID, taskRow.SrcCluster, + taskRow.DstCluster, taskRow.SrcIP, taskRow.SrcPort)) + return false, nil + } + job.logger.Info(fmt.Sprintf("myself:%s get task dts lock ok,key:%s", job.ServerIP, taskRow.TaskLockKey())) + // 尝试认领task成功 + job.logger.Info(fmt.Sprintf( + `myself:%s 认领task,下一步开始迁移,taskId:%d srcCluster:%s dstCluster:%s srcRedis:%s#%d`, + job.ServerIP, taskRow.ID, taskRow.SrcCluster, + taskRow.DstCluster, taskRow.SrcIP, taskRow.SrcPort)) + taskRow.DtsServer = job.ServerIP + taskRow.TaskType = job.getFirstTaskType(taskRow) + taskRow.Status = 0 + taskRow.UpdateFieldsValues([]string{"DtsServer", "TaskType", "Status"}, job.logger) + return true, nil +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsJob/dtsJob.go b/dbm-services/redis/redis-dts/pkg/dtsJob/dtsJob.go new file mode 100644 index 0000000000..6d9e2bc681 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsJob/dtsJob.go @@ -0,0 +1,2 @@ +// Package dtsJob TODO +package dtsJob diff --git a/dbm-services/redis/redis-dts/pkg/dtsJob/redisCacheDtsJob.go b/dbm-services/redis/redis-dts/pkg/dtsJob/redisCacheDtsJob.go new file mode 100644 index 0000000000..c113a2f0c9 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsJob/redisCacheDtsJob.go @@ -0,0 +1,220 @@ +package dtsJob + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "fmt" + "runtime/debug" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/jinzhu/gorm" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// RedisCacheDtsJob redis-cache dts job +type RedisCacheDtsJob struct { + DtsJobBase +} + +// NewRedisCacheDtsJob new +func NewRedisCacheDtsJob(bkCloudID int64, serverIP, zoneName string, + logger *zap.Logger, wg *sync.WaitGroup) (job *RedisCacheDtsJob) { + job = &RedisCacheDtsJob{} + job.DtsJobBase = *NewDtsJobBase(bkCloudID, serverIP, zoneName, logger, wg) + return +} + +// GetMaxMigrationCacheDataSizePerDtsServer 单台dts_server允许的迁移中的最大Cache数据量,单位byte +func (job *RedisCacheDtsJob) GetMaxMigrationCacheDataSizePerDtsServer() uint64 { + dataSizeStr := viper.GetString("maxCacheDataSizePerDtsServer") + if dataSizeStr == "" { + dataSizeStr = "256GiB" + } + maxCacheDataSizePerDtsServer, err := humanize.ParseBytes(dataSizeStr) + if err != nil { + job.logger.Error(fmt.Sprintf("'maxCacheDataSizePerDtsServer' in config fail is %s,ParseBytes fail,err:%v", + dataSizeStr, err)) + maxCacheDataSizePerDtsServer = 256 * constvar.GiByte + } + if maxCacheDataSizePerDtsServer > 256*constvar.GiByte { + maxCacheDataSizePerDtsServer = 256 * constvar.GiByte + } + return maxCacheDataSizePerDtsServer +} + +// IsDataMigrationExceedingMemLimit 检查迁移中redis_cache数据量是否超过 单台dts_server限制 +func (job *RedisCacheDtsJob) IsDataMigrationExceedingMemLimit() (ok bool, allowedMigrationDataSize int64, err error) { + maxMigrationCacheSize := job.GetMaxMigrationCacheDataSizePerDtsServer() + var cacheMigratingTasks []*tendisdb.TbTendisDTSTask + var cacheMigratingDataSize uint64 + var msg string + cacheMigratingTasks, cacheMigratingDataSize, err = tendisdb.GetDtsSvrMigratingTasks( + job.BkCloudID, job.ServerIP, constvar.TendisTypeRedisInstance, + constvar.CacheMigratingTasksType, job.logger) + if err != nil && gorm.IsRecordNotFoundError(err) == false { + return + } + // '我'正在迁移中的数据量大于 dstMaxMigratingCacheSize, 则不继续给自己分配迁移任务 + if cacheMigratingDataSize > maxMigrationCacheSize { + msg = fmt.Sprintf("正在迁移中的tendis_cache task 数据量:%s > 单机限制:%s,,stop accept redis_cache dts task", + humanize.Bytes(cacheMigratingDataSize), + humanize.Bytes(maxMigrationCacheSize)) + job.logger.Info(msg) + return false, 0, nil + } + allowedMigrationDataSize = int64(maxMigrationCacheSize - cacheMigratingDataSize) + // 如果'我'上面还有2个以上task等待做 makeCacheSync,则不继续认领 + todoTasks := []*tendisdb.TbTendisDTSTask{} + for _, task01 := range cacheMigratingTasks { + task02 := task01 + if task02.TaskType == constvar.MakeCacheSyncTaskType && task02.Status == 0 { + todoTasks = append(todoTasks, task02) + } + } + if len(todoTasks) >= 2 { + job.logger.Info(fmt.Sprintf("redis_cache正在等待MakeCacheSync的task数量:%d>=2,stop accept redis_cache dts task", + len(todoTasks))) + return false, allowedMigrationDataSize, nil + } + return true, allowedMigrationDataSize, nil +} + +// ClaimDtsJobs 认领redis-cache任务 +func (job *RedisCacheDtsJob) ClaimDtsJobs() (err error) { + var memOk bool + var allowedMigrationDataSize int64 + var toScheduleTasks []*tendisdb.TbTendisDTSTask + var srcSlaveConcurrOK bool + var acceptOk bool + succClaimTaskCnt := 0 + defer func() { + if r := recover(); r != nil { + job.logger.Error(string(debug.Stack())) + } + }() + for { + time.Sleep(1 * time.Minute) + job.logger.Info(fmt.Sprintf("dts_server:%s start claim redisCache dts jobs", job.ServerIP)) + // 如果dts_server在黑名单中,则不认领task + if scrdbclient.IsMyselfInBlacklist(job.logger) { + job.logger.Info(fmt.Sprintf( + "dts_server:%s in dts_server blacklist,stop accept redis_cache dts task", + job.ServerIP), + ) + continue + } + memOk, allowedMigrationDataSize, err = job.IsDataMigrationExceedingMemLimit() + if err != nil { + continue + } + if !memOk { + continue + } + /* + 下面模块执行逻辑: + 1. GetLast30DaysToBeScheduledJobs 获取最近一个月待调度的tendis_cache Jobs(相同城市) + 2. 遍历Jobs + 3. GetJobToBeScheduledTasks 获取job中所有待调度的task + 如果task 同时满足三个条件,则本节点 可调度该task: + a. 数据量满足 <= myAvailScheduleSize, + allowedMigrationDataSize = maxCacheDataSizePerDtsServer - 本机迁移中的(tendis_cache tasks)的dataSize + b. task所在的srcIP, 其当前迁移中的tasksCnt + 1 <= srcIP可支持的最大并发数(task.src_ip_concurrency_limit决定) + c. 其他dtsserver没有在 尝试认领该task + */ + toScheduleJobs, err := tendisdb.GetLast30DaysToScheduleJobs(job.BkCloudID, + allowedMigrationDataSize, job.ZoneName, + constvar.TendisTypeRedisInstance, job.logger) + if err != nil { + continue + } + if len(toScheduleJobs) == 0 { + job.logger.Info(fmt.Sprintf( + "redis_cache GetLast30DaysToScheduleJobs empty record,剩余可迁移的数据量:%s,ZoneName:%s,dbType:%s", + humanize.Bytes(uint64(allowedMigrationDataSize)), + job.ZoneName, constvar.TendisTypeRedisInstance)) + continue + } + succClaimTaskCnt = 0 + for _, tmpJob := range toScheduleJobs { + jobItem := tmpJob + toScheduleTasks, err = tendisdb.GetJobToScheduleTasks( + jobItem.BillID, jobItem.SrcCluster, jobItem.DstCluster, job.logger) + if err != nil { + // 执行下一个Job的遍历 + continue + } + if len(toScheduleTasks) == 0 { + continue + } + for _, tmpTask := range toScheduleTasks { + taskItem := tmpTask + if allowedMigrationDataSize < 1*constvar.GiByte { + // 如果可用空间小于1GB,则不再继续 + break + } + if taskItem.SrcDbSize > allowedMigrationDataSize { + // 数据量过大,遍历job的下一个task + continue + } + // 检查源slave机器是否还能新增迁移task + srcSlaveConcurrOK, err = job.CheckSrcSlaveServerConcurrency(taskItem, constvar.CacheMigratingTasksType) + if err != nil { + break + } + if !srcSlaveConcurrOK { + continue + } + // 尝试认领task成功 + acceptOk, err = job.TryAcceptTask(taskItem) + if err != nil { + continue + } + if !acceptOk { + continue + } + allowedMigrationDataSize = allowedMigrationDataSize - taskItem.SrcDbSize + succClaimTaskCnt++ + // 如果认领的task个数 超过 backup limit,则等待下一次调度 + if succClaimTaskCnt > job.GetTaskParallelLimit(constvar.MakeCacheSyncTaskType) { + break + } + } + if err != nil { + // 执行下一个job的遍历 + continue + } + // 如果认领的task个数 超过 backup limit,则等待下一次调度 + if succClaimTaskCnt > job.GetTaskParallelLimit(constvar.MakeCacheSyncTaskType) { + break + } + } + } +} + +// StartBgWorkers 拉起多个后台goroutine +func (job *RedisCacheDtsJob) StartBgWorkers() { + // redis_cache + // 监听以前的迁移中的task + job.BgOldRunningSyncTaskWatcher(constvar.WatchCacheSyncTaskType, constvar.TendisTypeRedisInstance, 1) + // 在tasks被认领后,后台负责执行task的worker + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithConcurrency(constvar.MakeCacheSyncTaskType, constvar.TendisTypeRedisInstance) + }() + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithoutLimit(constvar.WatchCacheSyncTaskType, constvar.TendisTypeRedisInstance) + }() + // 根据dts_server自身情况尝试认领 task + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.ClaimDtsJobs() + }() +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsJob/tendisSSDDtsJob.go b/dbm-services/redis/redis-dts/pkg/dtsJob/tendisSSDDtsJob.go new file mode 100644 index 0000000000..1038a71c20 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsJob/tendisSSDDtsJob.go @@ -0,0 +1,239 @@ +package dtsJob + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/osPerf" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "fmt" + "runtime/debug" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/jinzhu/gorm" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// TendisSSDDtsJob tendis-ssd dts job +type TendisSSDDtsJob struct { + DtsJobBase +} + +// NewTendisSSDDtsJob new +func NewTendisSSDDtsJob(bkCloudID int64, serverIP, zoneName string, + logger *zap.Logger, wg *sync.WaitGroup) (job *TendisSSDDtsJob) { + job = &TendisSSDDtsJob{} + job.DtsJobBase = *NewDtsJobBase(bkCloudID, serverIP, zoneName, logger, wg) + return +} + +// GetRatioN_LocalDisk 最大使用是本地磁盘的几分之一 +func (job *TendisSSDDtsJob) GetRatioN_LocalDisk() (ratioNOfLocalDisk uint64) { + ratioNOfLocalDisk = viper.GetUint64("maxLocalDiskDataSizeRatioNTendisSSD") + if ratioNOfLocalDisk == 0 { + ratioNOfLocalDisk = 12 + } + return +} + +// IsDataMigrationExceedingDiskLimit 检查迁移中数据量是否超过本地磁盘限制 +func (job *TendisSSDDtsJob) IsDataMigrationExceedingDiskLimit() (ok bool, + allowedMigrationDataSize int64, err error) { + var myDisk01 osPerf.HostDiskUsage + var msg string + ratioNOfLocalDisk := job.GetRatioN_LocalDisk() + myDisk01, err = osPerf.GetMyHostDisk() + if err != nil { + return + } + if myDisk01.UsageRatio > 50 { + // 如果当前已使用磁盘大于50%,则不继续给自己分配迁移任务 + msg = fmt.Sprintf("%s 磁盘使用率大于50%%,磁盘路径:%s,使用率:%d%%,stop accept tendis_ssd dts task", + job.ServerIP, myDisk01.DirName, myDisk01.UsageRatio) + job.logger.Info(msg) + return + } + ssdMigratingTasks, ssdMigratingDataSize, err := tendisdb.GetDtsSvrMigratingTasks( + job.BkCloudID, job.ServerIP, constvar.TendisTypeTendisSSDInsance, constvar.SSDMigratingTasksType, job.logger) + if err != nil && gorm.IsRecordNotFoundError(err) == false { + return false, 0, err + } + // '我'正在迁移中的数据量大于 本地磁盘的 1/ratioNOfLocalDisk, 则不继续给自己分配迁移任务 + if ssdMigratingDataSize > myDisk01.TotalSize/ratioNOfLocalDisk { + msg = fmt.Sprintf("正在迁移中的tendis_ssd task 数据量:%s > 本地磁盘的1/%d:%s,本地磁盘大小:%s,stop accept tendis_ssd dts task", + humanize.Bytes(ssdMigratingDataSize), + ratioNOfLocalDisk, + humanize.Bytes(myDisk01.TotalSize/ratioNOfLocalDisk), + humanize.Bytes(myDisk01.TotalSize)) + job.logger.Info(msg) + return false, 0, nil + } + allowedMigrationDataSize = int64(myDisk01.TotalSize/ratioNOfLocalDisk - ssdMigratingDataSize) + if allowedMigrationDataSize < 1*constvar.GiByte { // less than 1GB + msg = fmt.Sprintf("本地磁盘可用于迁移的空间:%s,tendisssd迁移中的数据量:%s,剩余可迁移数据量:%s < 1GB,stop accept tendis_ssd dts task", + humanize.Bytes(myDisk01.TotalSize/ratioNOfLocalDisk), + humanize.Bytes(ssdMigratingDataSize), + humanize.Bytes(uint64(allowedMigrationDataSize))) + job.logger.Info(msg) + return false, allowedMigrationDataSize, nil + } + // 如果'我'上面还有2个及以上task等待做 tendisBackup,则不继续认领 + todoBackupTasks := []*tendisdb.TbTendisDTSTask{} + for _, task01 := range ssdMigratingTasks { + task02 := task01 + if task02.TaskType == constvar.TendisBackupTaskType && task02.Status == 0 { + todoBackupTasks = append(todoBackupTasks, task02) + } + } + if len(todoBackupTasks) >= 2 { + job.logger.Info(fmt.Sprintf("tendis_ssd正在等待tendisBackup的task数量:%d>=2,stop accept tendis_ssd dts task", + len(todoBackupTasks))) + return false, allowedMigrationDataSize, nil + } + return true, allowedMigrationDataSize, nil +} + +// ClaimDtsJobs 认领tendis-ssd dts任务 +func (job *TendisSSDDtsJob) ClaimDtsJobs() (err error) { + var diskOk bool + var allowedMigrationDataSize int64 + var toScheduleTasks []*tendisdb.TbTendisDTSTask + var srcSlaveConcurrOK bool + var acceptOk bool + succClaimTaskCnt := 0 + defer func() { + if r := recover(); r != nil { + job.logger.Error(string(debug.Stack())) + } + }() + for { + time.Sleep(1 * time.Minute) + job.logger.Info(fmt.Sprintf("dts_server:%s start claim tendis_ssd dts jobs", job.ServerIP)) + // 如果dts_server在黑名单中,则不认领task + if scrdbclient.IsMyselfInBlacklist(job.logger) { + job.logger.Info(fmt.Sprintf("dts_server:%s in dts_server blacklist,stop accept tendis_ssd dts task", job.ServerIP)) + continue + } + diskOk, allowedMigrationDataSize, err = job.IsDataMigrationExceedingDiskLimit() + if err != nil { + continue + } + if !diskOk { + continue + } + // 下面模块执行逻辑: + // - GetLast30DaysToBeScheduledJobs 获取最近一个月待调度的Jobs(相同城市) + // - 遍历Jobs + // - GetJobToBeScheduledTasks 获取每个job中所有待调度的task + // - 如果task 同时满足三个条件,则本节点 可调度该task: + // 1. 数据量满足 <= availDiskSize, availDiskSize = 本机磁盘1/fractionalOfLocalDisk - 本机迁移中的(tasks)的dataSize + // 2. task所在的srcIP, 其当前迁移中的tasksCnt + 1 <= srcIP可支持的最大并发数(task.src_ip_concurrency_limit决定) + // 3. 其他dts_server没有在 尝试认领该task + toScheduleJobs, err := tendisdb.GetLast30DaysToScheduleJobs(job.BkCloudID, allowedMigrationDataSize, job.ZoneName, + constvar.TendisTypeTendisSSDInsance, job.logger) + if err != nil { + continue + } + if len(toScheduleJobs) == 0 { + job.logger.Info(fmt.Sprintf( + "tendis_ssd GetLast30DaysToScheduleJobs empty record,剩余可迁移的数据量:%s,ZoneName:%s,dbType:%s", + humanize.Bytes(uint64(allowedMigrationDataSize)), job.ZoneName, constvar.TendisTypeTendisSSDInsance)) + continue + } + succClaimTaskCnt = 0 + for _, tmpJob := range toScheduleJobs { + jobItem := tmpJob + toScheduleTasks, err = tendisdb.GetJobToScheduleTasks( + jobItem.BillID, jobItem.SrcCluster, jobItem.DstCluster, job.logger) + if err != nil { + // 执行下一个Job的遍历 + continue + } + if len(toScheduleTasks) == 0 { + continue + } + for _, tmpTask := range toScheduleTasks { + taskItem := tmpTask + if allowedMigrationDataSize < 1*constvar.GiByte { + // 如果可用空间小于1GB,则不再继续 + break + } + if taskItem.SrcDbSize > allowedMigrationDataSize { + // 数据量过大,遍历job的下一个task + continue + } + // 检查源slave机器是否还能新增迁移task + srcSlaveConcurrOK, err = job.CheckSrcSlaveServerConcurrency(taskItem, constvar.SSDMigratingTasksType) + if err != nil { + break + } + if !srcSlaveConcurrOK { + continue + } + // 尝试认领task + acceptOk, err = job.TryAcceptTask(taskItem) + if err != nil { + continue + } + if !acceptOk { + continue + } + allowedMigrationDataSize = allowedMigrationDataSize - taskItem.SrcDbSize + succClaimTaskCnt++ + // 如果认领的task个数 超过 backup limit,则等待下一次调度 + if succClaimTaskCnt > job.GetTaskParallelLimit(constvar.TendisBackupTaskType) { + break + } + } + if err != nil { + // 执行下一个job的遍历 + continue + } + // 如果认领的task个数 超过 backup limit,则等待下一次调度 + if succClaimTaskCnt > job.GetTaskParallelLimit(constvar.TendisBackupTaskType) { + break + } + } + } +} + +// StartBgWorkers 拉起多个后台goroutine +func (job *TendisSSDDtsJob) StartBgWorkers() { + // tendis_ssd + // 监听以前的迁移中的task + job.BgOldRunningSyncTaskWatcher(constvar.MakeSyncTaskType, constvar.TendisTypeTendisSSDInsance, 1) + // 在tasks被认领后,后台负责执行task的worker + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithConcurrency(constvar.TendisBackupTaskType, constvar.TendisTypeTendisSSDInsance) + }() + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithConcurrency(constvar.BackupfileFetchTaskType, constvar.TendisTypeTendisSSDInsance) + }() + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithConcurrency(constvar.TredisdumpTaskType, constvar.TendisTypeTendisSSDInsance) + }() + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithConcurrency(constvar.CmdsImporterTaskType, constvar.TendisTypeTendisSSDInsance) + }() + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithoutLimit(constvar.MakeSyncTaskType, constvar.TendisTypeTendisSSDInsance) + }() + // 根据dts_server自身情况尝试认领 task + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.ClaimDtsJobs() + }() +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsJob/tendisplusDtsJob.go b/dbm-services/redis/redis-dts/pkg/dtsJob/tendisplusDtsJob.go new file mode 100644 index 0000000000..9d20b77a5a --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsJob/tendisplusDtsJob.go @@ -0,0 +1,282 @@ +package dtsJob + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/osPerf" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "fmt" + "math" + "runtime/debug" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/jinzhu/gorm" + "github.com/shirou/gopsutil/v3/mem" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// TendisplusDtsJob tendisplus dts job +type TendisplusDtsJob struct { + DtsJobBase +} + +// NewTendisplusDtsJob new +func NewTendisplusDtsJob(bkCloudID int64, serverIP, zoneName string, + logger *zap.Logger, wg *sync.WaitGroup) (job *TendisplusDtsJob) { + job = &TendisplusDtsJob{} + job.DtsJobBase = *NewDtsJobBase(bkCloudID, serverIP, zoneName, logger, wg) + return +} + +// GetMemSizePerKvStoreSync 每个tendisplus kvstore的redis-sync占用的内存 +func (job *TendisplusDtsJob) GetMemSizePerKvStoreSync() uint64 { + memSizeStr := viper.GetString("memSizePerTendisplusKvStoreSync") + if memSizeStr == "" { + memSizeStr = "500MiB" + } + memSize, _ := humanize.ParseBytes(memSizeStr) + if memSize <= 0 { + memSize = 500 * constvar.MiByte + } + return memSize +} + +// IsDataMigrationExceedingMemLimit 内存是否满足继续认领dts task +// 1. 内存已使用 50%+ 不认领; +// 2. 可用内存小于3GB不认领; +// 3. 每个tendisplus kvstore迁移时预计使用500MB内存,如果评估kvstore迁移使用内存数超过 50%,不认领 +// 4. 如果等待迁移的kvstore>=2,不认领 +func (job *TendisplusDtsJob) IsDataMigrationExceedingMemLimit() (ok bool, availMemSizeMigration int64, err error) { + var memInfo *mem.VirtualMemoryStat + var tendisplusMigratingTasks []*tendisdb.TbTendisDTSTask + var kvstoreMigratingMemUsed uint64 + var msg string + memInfo, err = osPerf.GetHostsMemInfo(job.logger) + if memInfo.UsedPercent > 50 { + msg = fmt.Sprintf("本机已使用内存%.2f%% > 50%%,stop accept tendisplus dts task", memInfo.UsedPercent) + job.logger.Info(msg) + return false, 0, nil + } + if memInfo.Available < 3*constvar.GiByte { + msg = fmt.Sprintf("本机可用内存%s<3GB,stop accept tendisplus dts task", humanize.Bytes(memInfo.Available)) + job.logger.Info(msg) + return false, 0, nil + } + tendisplusMigratingTasks, _, err = tendisdb.GetDtsSvrMigratingTasks( + job.BkCloudID, job.ServerIP, constvar.TendisTypeTendisplusInsance, constvar.TendisplusMigratingTasksType, job.logger, + ) + if err != nil && gorm.IsRecordNotFoundError(err) == false { + return + } + // 如果迁移中的tasks数,评估其内存已超过系统内存 50%,返回false + kvstoreMigratingMemUsed = uint64(len(tendisplusMigratingTasks)) * job.GetMemSizePerKvStoreSync() + if kvstoreMigratingMemUsed > memInfo.Total*50/100 { + msg = fmt.Sprintf("本机迁移中kvstore数:%d,评估使用内存:%s,stop accept tendisplus dts task", + len(tendisplusMigratingTasks), + humanize.Bytes(kvstoreMigratingMemUsed)) + job.logger.Info(msg) + return false, 0, nil + } + availMemSizeMigration = int64(memInfo.Total*50/100 - kvstoreMigratingMemUsed) + // 如果'我'上面还有2个以上task等待做 tendisplusStartSync,则不继续认领 + todoTaskCnt := 0 + for _, task01 := range tendisplusMigratingTasks { + task02 := task01 + if task02.TaskType == constvar.TendisplusMakeSyncTaskType && task02.Status == 0 { + todoTaskCnt++ + } + } + if todoTaskCnt >= 2 { + job.logger.Info(fmt.Sprintf("tendisplus正在等待tendisplusMakeSync的task数量:%d>=2,stop accept tendisplus dts task", + todoTaskCnt)) + return false, availMemSizeMigration, nil + } + return true, availMemSizeMigration, nil +} + +// TryAcceptTasks 上锁,尝试认领任务 +func (job *TendisplusDtsJob) TryAcceptTasks(taskRows []*tendisdb.TbTendisDTSTask) (succ bool, err error) { + var lockOK bool + scrCli, err := scrdbclient.NewClient(viper.GetString("serviceName"), job.logger) + if err != nil { + return false, err + } + // 获取锁,尝试认领该task + lockOK, err = scrCli.DtsLockKey(taskRows[0].TaskLockKey(), job.ServerIP, 120) + if err != nil { + return false, err + } + if !lockOK { + // 已经有其他dtsserver在尝试认领该task,遍历下一个task + job.logger.Info(fmt.Sprintf( + `billId:%d srcCluster:%s dstCluster:%s srcRedis:%s#%d 已经有其他dts_server在调度,放弃调度`, + taskRows[0].BillID, taskRows[0].SrcCluster, + taskRows[0].DstCluster, taskRows[0].SrcIP, taskRows[0].SrcPort)) + return false, nil + } + job.logger.Info(fmt.Sprintf("myself:%s get task dts lock ok,key:%s", job.ServerIP, taskRows[0].TaskLockKey())) + // 尝试认领task成功 + job.logger.Info(fmt.Sprintf( + `myself:%s 认领task,下一步开始迁移,billID:%d srcCluster:%s dstCluster:%s srcRedis:%s#%d`, + job.ServerIP, taskRows[0].BillID, taskRows[0].SrcCluster, + taskRows[0].DstCluster, taskRows[0].SrcIP, taskRows[0].SrcPort)) + taskIDs := make([]int64, 0, len(taskRows)) + for _, tmpTask := range taskRows { + task := tmpTask + taskIDs = append(taskIDs, task.ID) + } + taskRows[0].DtsServer = job.ServerIP + taskRows[0].TaskType = job.getFirstTaskType(taskRows[0]) + taskRows[0].Status = 0 + colToVal, err := taskRows[0].GetColToValByFields([]string{"DtsServer", "TaskType", "Status"}, job.logger) + if err != nil { + return false, err + } + _, err = tendisdb.UpdateDtsTaskRows(taskIDs, colToVal, job.logger) + if err != nil { + return false, err + } + return true, nil +} + +// TasksGroupBySlaveAddr 按照slaveAddr分组 +func (job *TendisplusDtsJob) TasksGroupBySlaveAddr(taskRows []*tendisdb.TbTendisDTSTask) ( + slaveAddrToTasks map[string][]*tendisdb.TbTendisDTSTask, err error) { + slaveAddrToTasks = make(map[string][]*tendisdb.TbTendisDTSTask) + var slaveAddr string + var ok bool + for _, tmpRow := range taskRows { + row := tmpRow + slaveAddr = fmt.Sprintf("%d|%s|%s|%s|%d", row.BillID, row.SrcCluster, row.DstCluster, row.SrcIP, row.SrcPort) + if _, ok = slaveAddrToTasks[slaveAddr]; !ok { + slaveAddrToTasks[slaveAddr] = []*tendisdb.TbTendisDTSTask{} + } + slaveAddrToTasks[slaveAddr] = append(slaveAddrToTasks[slaveAddr], row) + } + return +} + +// ClaimDtsJobs 认领tendisplus dts任务 +func (job *TendisplusDtsJob) ClaimDtsJobs() (err error) { + var memOK bool + var availMemSizeMigration int64 + var toScheduleTasks []*tendisdb.TbTendisDTSTask + var slaveAddrToTasks map[string][]*tendisdb.TbTendisDTSTask + var srcSlaveConcurrOK bool + var acceptOk bool + // 在迁移tendisplus时,其本身数据量不影响,所以用MAX_INT64值 + var maxInt64 int64 = math.MaxInt64 + succClaimTaskCnt := 0 + defer func() { + if r := recover(); r != nil { + job.logger.Error(string(debug.Stack())) + } + }() + for { + time.Sleep(1 * time.Minute) + job.logger.Info(fmt.Sprintf("dts_server:%s start claim tendisplus dts jobs", job.ServerIP)) + // 如果dts_server在黑名单中,则不认领task + if scrdbclient.IsMyselfInBlacklist(job.logger) { + job.logger.Info(fmt.Sprintf("dts_server:%s in dts_server blacklist,stop accept tendisplus dts task", job.ServerIP)) + continue + } + memOK, availMemSizeMigration, err = job.IsDataMigrationExceedingMemLimit() + if err != nil { + continue + } + if err != nil { + continue + } + if !memOK { + continue + } + toScheduleJobs, err := tendisdb.GetLast30DaysToScheduleJobs(job.BkCloudID, maxInt64, job.ZoneName, + constvar.TendisTypeTendisplusInsance, job.logger) + if err != nil { + continue + } + if len(toScheduleJobs) == 0 { + job.logger.Info(fmt.Sprintf( + "tendisplus GetLast30DaysToScheduleJobs empty record,ZoneName:%s,dbType:%s", + job.ZoneName, constvar.TendisTypeTendisplusInsance)) + continue + } + succClaimTaskCnt = 0 + for _, tmpJob := range toScheduleJobs { + jobItem := tmpJob + toScheduleTasks, err = tendisdb.GetJobToScheduleTasks( + jobItem.BillID, jobItem.SrcCluster, jobItem.DstCluster, job.logger) + if err != nil { + // 执行下一个Job的遍历 + continue + } + if len(toScheduleTasks) == 0 { + continue + } + slaveAddrToTasks, err = job.TasksGroupBySlaveAddr(toScheduleTasks) + if err != nil { + continue + } + for _, taskRows := range slaveAddrToTasks { + if availMemSizeMigration < 1*constvar.GiByte { + // 如果可用内存小于1GB,则不再继续 + break + } + // 检查源slave机器是否还能新增迁移task + srcSlaveConcurrOK, err = job.CheckSrcSlaveServerConcurrency(taskRows[0], constvar.TendisplusMigratingTasksType) + if err != nil { + break + } + if !srcSlaveConcurrOK { + continue + } + // 尝试认领task + acceptOk, err = job.TryAcceptTasks(taskRows) + if err != nil { + continue + } + if !acceptOk { + continue + } + // 减去预估将使用掉的内存 + availMemSizeMigration = availMemSizeMigration - int64(len(taskRows))*int64(job.GetMemSizePerKvStoreSync()) + succClaimTaskCnt++ + // 如果认领的task个数 超过 tendisplus_start_sync limit,则等待下一次调度 + if succClaimTaskCnt > job.GetTaskParallelLimit(constvar.TendisplusMakeSyncTaskType) { + break + } + } + if err != nil { + // 执行下一个job的遍历 + continue + } + // 如果认领的task个数 超过 tendisplus_start_sync limit,则等待下一次调度 + if succClaimTaskCnt > job.GetTaskParallelLimit(constvar.TendisplusMakeSyncTaskType) { + break + } + } + } +} + +// StartBgWorkers 拉起多个后台goroutine +func (job *TendisplusDtsJob) StartBgWorkers() { + // tendisplus + // 监听以前的迁移中的task + job.BgOldRunningSyncTaskWatcher(constvar.TendisplusSendBulkTaskType, constvar.TendisTypeTendisplusInsance, 1) + job.BgOldRunningSyncTaskWatcher(constvar.TendisplusSendIncrTaskType, constvar.TendisTypeTendisplusInsance, 1) + // 在tasks被认领后,后台负责执行task的worker + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.BgDtsTaskRunnerWithoutLimit(constvar.TendisplusMakeSyncTaskType, constvar.TendisTypeTendisplusInsance) + }() + // 根据dts_server自身情况尝试认领 task + go func() { + job.wg.Add(1) + defer job.wg.Done() + job.ClaimDtsJobs() + }() +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/dtsTask.go b/dbm-services/redis/redis-dts/pkg/dtsTask/dtsTask.go new file mode 100644 index 0000000000..7d65e8862b --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/dtsTask.go @@ -0,0 +1,2 @@ +// Package dtsTask TODO +package dtsTask diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/factory/factory.go b/dbm-services/redis/redis-dts/pkg/dtsTask/factory/factory.go new file mode 100644 index 0000000000..a983953def --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/factory/factory.go @@ -0,0 +1,46 @@ +// Package factory TODO +package factory + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask/rediscache" + "dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus" + "dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd" +) + +// MyTasker task接口 +type MyTasker interface { + TaskType() string + NextTask() string + Init() + Execute() +} + +// MyTaskFactory task工厂 +func MyTaskFactory(taskRow *tendisdb.TbTendisDTSTask) MyTasker { + if taskRow.TaskType == (&tendisssd.TendisBackupTask{}).TaskType() { + // tendis-ssd + return tendisssd.NewTendisBackupTask(taskRow) + } else if taskRow.TaskType == (&tendisssd.BakcupFileFetchTask{}).TaskType() { + return tendisssd.NewBakcupFileFetchTask(taskRow) + } else if taskRow.TaskType == (&tendisssd.TredisdumpTask{}).TaskType() { + return tendisssd.NewTredisdumpTask(taskRow) + } else if taskRow.TaskType == (&tendisssd.CmdsImporterTask{}).TaskType() { + return tendisssd.NewCmdsImporterTask(taskRow) + } else if taskRow.TaskType == (&tendisssd.MakeSyncTask{}).TaskType() { + return tendisssd.NewMakeSyncTask(taskRow) + } else if taskRow.TaskType == (&rediscache.MakeCacheSyncTask{}).TaskType() { + // redis-cache + return rediscache.NewMakeCacheSyncTask(taskRow) + } else if taskRow.TaskType == (&rediscache.WatchCacheSyncTask{}).TaskType() { + return rediscache.NewWatchCacheSyncTask(taskRow) + } else if taskRow.TaskType == (&tendisplus.MakeSyncTask{}).TaskType() { + // tendisplus + return tendisplus.NewMakeSyncTask(taskRow) + } else if taskRow.TaskType == constvar.TendisplusSendBulkTaskType || + taskRow.TaskType == constvar.TendisplusSendIncrTaskType { + return tendisplus.NewWatchSyncTask(taskRow) + } + return nil +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/init.go b/dbm-services/redis/redis-dts/pkg/dtsTask/init.go new file mode 100644 index 0000000000..c10de34961 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/init.go @@ -0,0 +1,670 @@ +package dtsTask + +import ( + "dbm-services/redis/redis-dts/models/myredis" + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "dbm-services/redis/redis-dts/tclog" + "dbm-services/redis/redis-dts/util" + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/dustin/go-humanize" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// FatherTask 迁移父task +type FatherTask struct { + RowData *tendisdb.TbTendisDTSTask `json:"rowData"` + valueChangedFields []string // 值已变化的字段名 + TaskDir string `json:"taskDir"` + Logger *zap.Logger `json:"-"` + Err error `json:"-"` +} + +// NewFatherTask 新建tredisdump task +func NewFatherTask(row *tendisdb.TbTendisDTSTask) FatherTask { + ret := FatherTask{} + ret.RowData = row + return ret +} + +// SetStatus 设置status的值 +func (t *FatherTask) SetStatus(status int) { + t.RowData.Status = status + t.valueChangedFields = append(t.valueChangedFields, "Status") +} + +// SetTaskType 设置task_type的值 +func (t *FatherTask) SetTaskType(taskType string) { + t.RowData.TaskType = taskType + t.valueChangedFields = append(t.valueChangedFields, "TaskType") +} + +// SetMessage 设置message的值 +func (t *FatherTask) SetMessage(format string, args ...interface{}) { + if len(args) == 0 { + t.RowData.Message = format + } else { + t.RowData.Message = fmt.Sprintf(format, args...) + } + t.valueChangedFields = append(t.valueChangedFields, "Message") +} + +// SetFetchFile set function +func (t *FatherTask) SetFetchFile(file string) { + t.RowData.FetchFile = file + t.valueChangedFields = append(t.valueChangedFields, "FetchFile") +} + +// SetSqlfileDir set function +func (t *FatherTask) SetSqlfileDir(dir string) { + t.RowData.SqlfileDir = dir + t.valueChangedFields = append(t.valueChangedFields, "SqlfileDir") +} + +// SetSyncOperate set function +func (t *FatherTask) SetSyncOperate(op string) { + t.RowData.SyncOperate = op + t.valueChangedFields = append(t.valueChangedFields, "SyncOperate") +} + +// SetTendisBinlogLag set function +func (t *FatherTask) SetTendisBinlogLag(lag int64) { + t.RowData.TendisBinlogLag = lag + t.valueChangedFields = append(t.valueChangedFields, "TendisBinlogLag") +} + +// SetSrcNewLogCount set function +func (t *FatherTask) SetSrcNewLogCount(logcnt int64) { + t.RowData.SrcNewLogCount = logcnt + t.valueChangedFields = append(t.valueChangedFields, "SrcNewLogCount") +} + +// SetSrcOldLogCount set function +func (t *FatherTask) SetSrcOldLogCount(logcnt int64) { + t.RowData.SrcOldLogCount = logcnt + t.valueChangedFields = append(t.valueChangedFields, "SrcOldLogCount") +} + +// SetIsSrcLogCountRestored set function +func (t *FatherTask) SetIsSrcLogCountRestored(isRestored int) { + t.RowData.IsSrcLogCountRestored = isRestored + t.valueChangedFields = append(t.valueChangedFields, "IsSrcLogCountRestored") +} + +// SetIgnoreErrlist set function +func (t *FatherTask) SetIgnoreErrlist(errlist string) { + t.RowData.IgnoreErrlist = errlist + t.valueChangedFields = append(t.valueChangedFields, "IgnoreErrlist") +} + +// SetSyncerPort set function +func (t *FatherTask) SetSyncerPort(syncport int) { + t.RowData.SyncerPort = syncport + t.valueChangedFields = append(t.valueChangedFields, "SyncerPort") +} + +// SetSyncerPid set function +func (t *FatherTask) SetSyncerPid(syncpid int) { + t.RowData.SyncerPid = syncpid + t.valueChangedFields = append(t.valueChangedFields, "SyncerPid") +} + +// SetSrcHaveListKeys set function +func (t *FatherTask) SetSrcHaveListKeys(havelist int) { + t.RowData.SrcHaveListKeys = havelist + t.valueChangedFields = append(t.valueChangedFields, "SrcHaveListKeys") +} + +// SetTendisbackupFile set function +func (t *FatherTask) SetTendisbackupFile(file string) { + t.RowData.TendisbackupFile = file + t.valueChangedFields = append(t.valueChangedFields, "TendisbackupFile") +} + +// SetDtsServer set function +func (t *FatherTask) SetDtsServer(svrip string) { + t.RowData.DtsServer = svrip + t.valueChangedFields = append(t.valueChangedFields, "DtsServer") +} + +// UpdateDbAndLogLocal update db相关字段 并记录本地日志 +func (t *FatherTask) UpdateDbAndLogLocal(format string, args ...interface{}) { + t.SetMessage(format, args...) + t.UpdateRow() + t.Logger.Info(t.RowData.Message) +} + +// UpdateRow update tendisdb相关字段(值变化了的字段) +func (t *FatherTask) UpdateRow() { + if len(t.valueChangedFields) == 0 { + return + } + t.RowData.UpdateFieldsValues(t.valueChangedFields, t.Logger) + t.valueChangedFields = []string{} +} + +// Init 初始化 +func (t *FatherTask) Init() { + defer func() { + if t.Err != nil { + t.SetStatus(-1) + t.SetMessage(t.Err.Error()) + } else { + t.SetStatus(1) // 更新为running状态 + } + t.UpdateRow() + }() + t.Err = t.InitLogger() + if t.Err != nil { + return + } + if t.RowData.SyncOperate == constvar.RedisForceKillTaskTodo { + t.RowData.SyncOperate = constvar.RedisForceKillTaskSuccess + t.Err = fmt.Errorf(constvar.RedisForceKillTaskSuccess + "...") + return + } +} + +// InitTaskDir 初始化本地任务目录 +func (t *FatherTask) InitTaskDir() error { + currExecPath, err := util.CurrentExecutePath() + if err != nil { + return err + } + domainPort := strings.Split(t.RowData.SrcCluster, ":") + subDir := fmt.Sprintf("tasks/%d_%s_%s/%s_%d", t.RowData.BillID, + domainPort[0], domainPort[1], t.RowData.SrcIP, t.RowData.SrcPort) + t.TaskDir = filepath.Join(currExecPath, subDir) + err = util.MkDirIfNotExists(t.TaskDir) + if err != nil { + return err + } + return nil +} + +// InitLogger 初始化日志文件logger +func (t *FatherTask) InitLogger() error { + err := t.InitTaskDir() + if err != nil { + return nil + } + var logFile string + if t.RowData.SrcDbType == constvar.TendisTypeTendisplusInsance { + logFile = fmt.Sprintf("task_%s_%d_kvstore_%d.log", + t.RowData.SrcIP, t.RowData.SrcPort, t.RowData.SrcKvStoreID) + } else { + logFile = fmt.Sprintf("task_%s_%d.log", t.RowData.SrcIP, t.RowData.SrcPort) + } + fullPath := filepath.Join(t.TaskDir, logFile) + t.Logger = tclog.NewFileLogger(fullPath) + return nil +} + +// IsSupportPipeImport 是否支持 redis-cli --pipe < $file 导入 +func (t *FatherTask) IsSupportPipeImport() bool { + // if strings.HasPrefix(t.RowData.DstCluster, "tendisx") || constvar.IsGlobalEnv() == true { + // return true + // } + return true +} + +// TredisdumpOuputFormat tredisdump结果文件内容格式,resp格式 或 普通命令格式 +func (t *FatherTask) TredisdumpOuputFormat() string { + if t.IsSupportPipeImport() { + return constvar.TredisdumpRespFormat + } + return constvar.TredisdumpCmdFormat +} + +// TredisdumpOuputFileSize tredisdump结果文件大小 +func (t *FatherTask) TredisdumpOuputFileSize() uint64 { + var fileSize uint64 = 0 + var sizeStr string + if t.IsSupportPipeImport() { + sizeStr = viper.GetString("tredisdumpOutputRespFileSize") + fileSize, _ = humanize.ParseBytes(sizeStr) + if fileSize <= 0 { + fileSize = constvar.MiByte // resp格式,单文件默认1MB + } else if fileSize > 100*constvar.MiByte { + fileSize = 100 * constvar.MiByte // redis-cli --pipe < $file 导入单个文件不宜过大,否则可能导致proxy oom,最大100MB + } + } else { + sizeStr = viper.GetString("tredisdumpOutputCmdFileSize") + fileSize, _ = humanize.ParseBytes(sizeStr) + if fileSize <= 0 { + fileSize = 1 * constvar.GiByte // 普通命令格式,单个文件默认1GB + } else if fileSize > 50*constvar.GiByte { + fileSize = 50 * constvar.GiByte // redis-cli < $file 导入单个文件不宜过大,否则事件很长,最大50GB + } + } + return fileSize +} + +// ImportParallelLimit 导入并发度 +func (t *FatherTask) ImportParallelLimit() int { + limit := 0 + if t.IsSupportPipeImport() { + limit = viper.GetInt("respFileImportParallelLimit") + if limit <= 0 { + limit = 1 + } else if limit > 10 { + limit = 20 // redis-cli --pipe < $file 导入数据,并发度不宜过大 + } + } else { + limit = viper.GetInt("cmdFileImportParallelLimit") + if limit <= 0 { + limit = 40 + } else if limit > 100 { + limit = 100 // redis-cli < $file 导入并发度控制100以内 + } + } + return limit +} + +// ImportTimeout 导入并发度 +func (t *FatherTask) ImportTimeout() int { + timeout := 0 + if t.IsSupportPipeImport() { + timeout = viper.GetInt("respFileImportTimeout") + if timeout <= 0 { + timeout = 120 // redis-cli --pipe --pipe-timeout 默认120s超时 + } else if timeout > 600 { + timeout = 600 // redis-cli --pipe --pipe-timeout < $file,最大超时10分钟 + } + } else { + timeout = viper.GetInt("cmdFileImportTimeout") + if timeout <= 0 { + timeout = 604800 // redis-cli < $file,默认7天超时 + } else if timeout > 604800 { + timeout = 604800 // redis-cli < $file 导入最大7天超时 + } + } + return timeout +} + +func (t *FatherTask) newSrcRedisClient() *myredis.RedisWorker { + srcAddr := fmt.Sprintf("%s:%d", t.RowData.SrcIP, t.RowData.SrcPort) + srcPasswd, err := base64.StdEncoding.DecodeString(t.RowData.SrcPassword) + if err != nil { + t.Logger.Error("SaveSrcSSDKeepCount base64.decode srcPasswd fail", + zap.Error(err), zap.String("rowData", t.RowData.ToString())) + t.Err = fmt.Errorf("SaveSrcSSDKeepCount get src password fail,err:%v", err) + return nil + } + srcClient, err := myredis.NewRedisClient(srcAddr, string(srcPasswd), 0, t.Logger) + if err != nil { + t.Err = err + return nil + } + return srcClient +} + +// SaveSrcSSDKeepCount 保存source ssd的 slave-log-keep-count值 +func (t *FatherTask) SaveSrcSSDKeepCount() { + var logcnt int64 + srcClient := t.newSrcRedisClient() + if t.Err != nil { + return + } + defer srcClient.Close() + + keepCountMap, err := srcClient.ConfigGet("slave-log-keep-count") + if err != nil { + t.Err = err + return + } + if len(keepCountMap) > 0 { + val01, ok := keepCountMap["slave-log-keep-count"] + if ok == true { + t.Logger.Info("SaveSrcSSDKeepCount slave-log-keep-count old value...", + zap.String("slave-log-keep-count", val01)) + logcnt, _ = strconv.ParseInt(val01, 10, 64) + t.SetSrcOldLogCount(logcnt) + t.SetIsSrcLogCountRestored(1) + t.UpdateRow() + return + } + } + return +} + +// RestoreSrcSSDKeepCount 恢复source ssd的 slave-log-keep-count值 +func (t *FatherTask) RestoreSrcSSDKeepCount() { + srcClient := t.newSrcRedisClient() + if t.Err != nil { + return + } + defer srcClient.Close() + + _, t.Err = srcClient.ConfigSet("slave-log-keep-count", t.RowData.SrcOldLogCount) + if t.Err != nil { + return + } + t.SetIsSrcLogCountRestored(2) + t.UpdateDbAndLogLocal("slave-log-keep-count restore ok") + + return +} + +// ChangeSrcSSDKeepCount 修改source ssd的 slave-log-keep-count值 +func (t *FatherTask) ChangeSrcSSDKeepCount(dstKeepCount int64) { + srcClient := t.newSrcRedisClient() + if t.Err != nil { + return + } + defer srcClient.Close() + + _, t.Err = srcClient.ConfigSet("slave-log-keep-count", dstKeepCount) + if t.Err != nil { + return + } + return +} + +// GetSyncSeqFromFullBackup get sync pos from full backup +func (t *FatherTask) GetSyncSeqFromFullBackup() (ret *SyncSeqItem) { + var err error + ret = &SyncSeqItem{} + syncPosFile := filepath.Join(t.RowData.SqlfileDir, "sync-pos.txt") + _, err = os.Stat(syncPosFile) + if err != nil && os.IsNotExist(err) == true { + t.Err = fmt.Errorf("%s not exists,err:%v", syncPosFile, err) + t.Logger.Error(t.Err.Error()) + return nil + } + posData, err := ioutil.ReadFile(syncPosFile) + if err != nil { + t.Logger.Error("Read sync-pos.txt fail", zap.Error(err), + zap.String("syncPosFile", syncPosFile)) + t.Err = fmt.Errorf("Read sync-pos.txt fail.err:%v", err) + return nil + } + posStr := string(posData) + posStr = strings.TrimSpace(posStr) + posList := strings.Fields(posStr) + if len(posList) < 2 { + t.Err = fmt.Errorf("sync-pos.txt content nor correct,syncPosFile:%s", syncPosFile) + t.Logger.Error(t.Err.Error()) + return nil + } + t.Logger.Info("sync-pos.txt content ...", zap.String("syncPosData", posStr)) + ret.RunID = posList[0] + ret.Seq, err = strconv.ParseUint(posList[1], 10, 64) + if err != nil { + t.Err = fmt.Errorf("sync-pos.txt seq:%s to uint64 fail,err:%v", posList[1], err) + t.Logger.Error(t.Err.Error()) + return nil + } + return ret +} + +// ConfirmSrcRedisBinlogOK confirm binlog seq is OK in src redis +func (t *FatherTask) ConfirmSrcRedisBinlogOK(seq uint64) { + srcAddr := fmt.Sprintf("%s:%d", t.RowData.SrcIP, t.RowData.SrcPort) + srcPasswd, err := base64.StdEncoding.DecodeString(t.RowData.SrcPassword) + if err != nil { + t.Logger.Error(constvar.TendisBackupTaskType+" init base64.decode srcPasswd fail", + zap.Error(err), zap.String("rowData", t.RowData.ToString())) + t.Err = fmt.Errorf("[fatherTask] get src password fail,err:%v", err) + return + } + srcClient, err := myredis.NewRedisClient(srcAddr, string(srcPasswd), 0, t.Logger) + if err != nil { + t.Err = err + return + } + defer srcClient.Close() + srcBinlogSeqRange, err := srcClient.TendisSSDBinlogSize() + if err != nil { + t.Err = err + return + } + if seq < srcBinlogSeqRange.FirstSeq { + t.Err = fmt.Errorf("srcRedis:%s current binlog seq range:[%d,%d] > seq:%d", + srcAddr, srcBinlogSeqRange.FirstSeq, srcBinlogSeqRange.EndSeq, seq) + t.Logger.Error(t.Err.Error()) + return + } + if seq > srcBinlogSeqRange.EndSeq { + t.Err = fmt.Errorf("srcRedis:%s current binlog seq range:[%d,%d] < seq:%d", + srcAddr, srcBinlogSeqRange.FirstSeq, srcBinlogSeqRange.EndSeq, seq) + t.Logger.Error(t.Err.Error()) + return + } + t.Logger.Info(fmt.Sprintf("srcRedis:%s current binlog seq range:[%d,%d],seq:%d is ok", + srcAddr, srcBinlogSeqRange.FirstSeq, srcBinlogSeqRange.EndSeq, seq)) + + return +} + +// ClearSrcHostBackup clear src redis remote backup +func (t *FatherTask) ClearSrcHostBackup() { + if strings.Contains(t.RowData.TendisbackupFile, "REDIS_FULL_rocksdb_") == false { + return + } + // 清理srcIP上的backupFile文件,避免占用过多空间 + t.Logger.Info("ClearSrcHostBackup srcIP clear backupfile", + zap.String("cmd", fmt.Sprintf(`cd /data/dbbak/ && rm -rf %s >/dev/null 2>&1`, t.RowData.TendisbackupFile)), + zap.String("srcIP", t.RowData.SrcIP)) + + rmCmd := fmt.Sprintf(`cd /data/dbbak/ && rm -rf %s >/dev/null 2>&1`, t.RowData.TendisbackupFile) + cli, err := scrdbclient.NewClient(constvar.BkDbm, t.Logger) + if err != nil { + t.Err = err + return + } + _, err = cli.ExecNew(scrdbclient.FastExecScriptReq{ + Account: "mysql", + Timeout: 3600, + ScriptLanguage: 1, + ScriptContent: rmCmd, + IPList: []scrdbclient.IPItem{ + { + BkCloudID: int(t.RowData.BkCloudID), + IP: t.RowData.SrcIP, + }, + }, + }, 5) + if err != nil { + t.Err = err + return + } +} + +// ClearLocalFetchBackup clear src redis local backup +func (t *FatherTask) ClearLocalFetchBackup() { + srcAddr := fmt.Sprintf("%s_%d", t.RowData.SrcIP, t.RowData.SrcPort) + if strings.Contains(t.RowData.FetchFile, srcAddr) == false { + // fetchFile 必须包含 srcAddr,否则不确定传入的是什么参数,对未知目录 rm -rf 很危险 + t.Logger.Warn("ClearLocalFetchBackup fetchFile not include srcAddr", + zap.String("fetchFile", t.RowData.FetchFile), zap.String("srcAddr", srcAddr)) + return + } + _, err := os.Stat(t.RowData.FetchFile) + if err == nil { + // 文件存在,则清理 + rmCmd := fmt.Sprintf("rm -rf %s > /dev/null 2>&1", t.RowData.FetchFile) + t.Logger.Info(fmt.Sprintf("ClearLocalFetchBackup execute localCmd:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 120*time.Second, t.Logger) + } +} + +// ClearLocalSQLDir clear local sql dir(backup to commands) +func (t *FatherTask) ClearLocalSQLDir() { + srcAddr := fmt.Sprintf("%s_%d", t.RowData.SrcIP, t.RowData.SrcPort) + if strings.Contains(t.RowData.SqlfileDir, srcAddr) == false { + // fetchFile 必须包含 srcAddr,否则不确定传入的是什么参数,对未知目录 rm -rf 很危险 + t.Logger.Warn("ClearLocalSqlDir sqlDir not include srcAddr", + zap.String("sqlDir", t.RowData.SqlfileDir), zap.String("srcAddr", srcAddr)) + return + } + _, err := os.Stat(t.RowData.SqlfileDir) + if err == nil { + // 文件存在,则清理 + rmCmd := fmt.Sprintf("rm -rf %s > /dev/null 2>&1", t.RowData.SqlfileDir) + t.Logger.Info(fmt.Sprintf("ClearLocalSqlDir execute localCmd:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 120*time.Second, t.Logger) + } +} + +// DealProcessPid 处理进程id; +// 如用户发送 ForceKillTaskTodo '强制终止' 指令,则tredisdump、redis-cli等命令均执行kill操作 +func (t *FatherTask) DealProcessPid(pid int) error { + go func(pid01 int) { + bakTaskType := t.RowData.TaskType + bakStatus := t.RowData.Status + if bakStatus != 1 { + return + } + for { + time.Sleep(10 * time.Second) + row01, err := tendisdb.GetTaskByID(t.RowData.ID, t.Logger) + if err != nil { + continue + } + if row01 == nil { + t.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", t.RowData.ID, row01) + continue + } + if row01.SyncOperate == constvar.RedisForceKillTaskTodo { + // 命令执行中途,用户要求强制终止 + retryTimes := 0 + for retryTimes < 5 { + retryTimes++ + isAlive, err := util.CheckProcessAlive(pid01) + if err != nil { + tclog.Logger.Error(err.Error() + ",retry ...") + time.Sleep(1 * time.Second) + continue + } + if !isAlive { + t.Logger.Error(fmt.Sprintf("kill pid:%d success", pid01)) + break + } + err = util.KillProcess(pid01) + if err != nil { + t.Logger.Error(fmt.Sprintf("kill pid:%d fail,err:%v", pid01, err)) + continue + } + break + } + t.RowData.SyncOperate = constvar.RedisForceKillTaskSuccess + t.Err = fmt.Errorf("%s...", constvar.RedisForceKillTaskSuccess) + return + } + if row01.TaskType != bakTaskType || row01.Status != bakStatus { + // task本阶段已执行完 + return + } + } + }(pid) + return nil +} + +// TredisdumpThreadCnt get tredisdump threadcnt +func (t *FatherTask) TredisdumpThreadCnt() int { + threadCnt := viper.GetInt("tredisdumpTheadCnt") + if threadCnt <= 0 { + threadCnt = 10 // default 10 + } else if threadCnt > 50 { + threadCnt = 50 // max threadcnt 50,并发度不宜过大 + } + return threadCnt +} + +// SaveIgnoreErrs 记录忽略的错误类型 +func (t *FatherTask) SaveIgnoreErrs(igErrs []string) { + isUpdated := false + for _, igErr := range igErrs { + if strings.Contains(t.RowData.IgnoreErrlist, igErr) == false { + if t.RowData.IgnoreErrlist == "" { + t.SetIgnoreErrlist(igErr) + } else { + t.SetIgnoreErrlist(t.RowData.IgnoreErrlist + "," + igErr) + } + isUpdated = true + } + } + if isUpdated == true { + t.UpdateRow() + } +} + +// IsMatchAny is match all +func (t *FatherTask) IsMatchAny(reg01 string) bool { + return reg01 == "*" || reg01 == ".*" || reg01 == "^.*$" +} + +// RefreshRowData refresh task row data +func (task *FatherTask) RefreshRowData() { + row01, err := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if err != nil { + task.Err = err + return + } + if row01 == nil { + task.Err = fmt.Errorf("get task row data empty record,task_id:%d", task.RowData.ID) + task.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", task.RowData.ID, row01) + return + } + task.RowData = row01 +} + +// GetSrcRedisAddr 源redis_addr +func (task *FatherTask) GetSrcRedisAddr() string { + return task.RowData.SrcIP + ":" + strconv.Itoa(task.RowData.SrcPort) +} + +// GetSrcRedisPasswd 源redis_password +func (task *FatherTask) GetSrcRedisPasswd() string { + srcPasswd, err := base64.StdEncoding.DecodeString(task.RowData.SrcPassword) + if err != nil { + task.Err = fmt.Errorf("decode srcPassword fail,err:%v,taskid:%d", err, task.RowData.ID) + task.UpdateDbAndLogLocal("decode srcPassword fail,err:%v,encodedPassword:%s,taskID:%d", + err, task.RowData.SrcPassword, task.RowData.ID) + } + return string(srcPasswd) +} + +// GetDstRedisAddr 目的redis_addr +func (task *FatherTask) GetDstRedisAddr() string { + return task.RowData.DstCluster +} + +// GetDstRedisPasswd 目的redis_password +func (task *FatherTask) GetDstRedisPasswd() string { + dstPasswd, err := base64.StdEncoding.DecodeString(task.RowData.DstPassword) + if err != nil { + task.Err = fmt.Errorf("decode DstPassword fail,err:%v,taskid:%d", err, task.RowData.ID) + task.UpdateDbAndLogLocal("decode DstPassword fail,err:%v,encodedPassword:%s,taskID:%d", + err, task.RowData.DstPassword, task.RowData.ID) + } + return string(dstPasswd) +} + +// GetTaskParallelLimit 从配置文件中获取每一类task的并发度 +func GetTaskParallelLimit(taskType string) int { + limit01 := viper.GetInt(taskType + "ParallelLimit") + if limit01 == 0 { + limit01 = 5 // 默认值5 + } + return limit01 +} + +// PortSyncerMut 保证makeSync串行执行,因为redis-syncer启动前需要获取port信息,避免彼此抢占 +var PortSyncerMut *sync.Mutex + +func init() { + PortSyncerMut = &sync.Mutex{} +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/makeCacheSync.go b/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/makeCacheSync.go new file mode 100644 index 0000000000..5a04e8d079 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/makeCacheSync.go @@ -0,0 +1,746 @@ +package rediscache + +import ( + "context" + "dbm-services/redis/redis-dts/models/myredis" + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask" + "dbm-services/redis/redis-dts/util" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/jinzhu/gorm" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +const ( + // ShakeWaitFullStatus waitfull + ShakeWaitFullStatus = "waitfull" + // ShakeFullStatus full + ShakeFullStatus = "full" + // ShakeIncrStatus incr + ShakeIncrStatus = "incr" +) + +// MakeCacheSyncTask cache_task +type MakeCacheSyncTask struct { + dtsTask.FatherTask + RedisShakeBin string `json:"redisSahkeBin"` + ShakeLogFile string `json:"shakeLogFile"` + ShakeConfFile string `json:"shakeConfFile"` + SystemProfile int `json:"systemProfile"` + HTTPProfile int `json:"httpProfile"` + SrcADDR string `json:"srcAddr"` + SrcPassword string `json:"srcPassword"` + DstADDR string `json:"dstAddr"` + DstPassword string `json:"dstPassword"` + DstVersion string `json:"dstVersion"` +} + +// TaskType task类型 +func (task *MakeCacheSyncTask) TaskType() string { + return constvar.MakeCacheSyncTaskType +} + +// NextTask 下一个task类型 +func (task *MakeCacheSyncTask) NextTask() string { + return constvar.WatchCacheSyncTaskType +} + +// NewMakeCacheSyncTask 新建一个 RedisShake启动task +func NewMakeCacheSyncTask(row *tendisdb.TbTendisDTSTask) *MakeCacheSyncTask { + return &MakeCacheSyncTask{ + FatherTask: dtsTask.NewFatherTask(row), + } +} + +// PreClear 关闭以前生成的redis-shake +func (task *MakeCacheSyncTask) PreClear() { + if task.Err != nil { + return + } + if task.RowData.SyncerPort == 0 { + return + } + defer func() { + // clear old sync config file and log file + syncDir := task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + rmCmd := fmt.Sprintf("cd %s && rm -rf *-taskid%d-*.conf log/", syncDir, task.RowData.ID) + task.Logger.Info(fmt.Sprintf("makeCacheSync preClear execute:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 10*time.Second, task.Logger) + }() + + task.RedisShakeStop() + return +} + +// Execute 执行启动redis-shake +func (task *MakeCacheSyncTask) Execute() { + if task.Err != nil { + return + } + + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + + task.SetStatus(1) + task.UpdateDbAndLogLocal("开始启动redis-shake") + + srcPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.SrcPassword) + dstPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.DstPassword) + + task.SrcADDR = fmt.Sprintf("%s:%d", task.RowData.SrcIP, task.RowData.SrcPort) + task.SrcPassword = string(srcPasswd) + task.DstADDR = task.RowData.DstCluster + task.DstPassword = string(dstPasswd) + + task.HTTPProfile = task.RowData.SyncerPort + task.SystemProfile = task.HTTPProfile + 1 + + isSyncOk := task.IsSyncStateOK() + if isSyncOk { + // 同步状态本来就是ok的,直接watcht redis-shake即可 + task.Logger.Info(fmt.Sprintf("redis:%s 同步状态ok,开始watch...", task.SrcADDR)) + task.SetTaskType(task.NextTask()) + task.SetStatus(0) + task.UpdateRow() + return + } + + task.GetMyRedisShakeTool(true) + if task.Err != nil { + return + } + + _, task.Err = util.IsFileExistsInCurrDir("redis-shake-template.conf") + if task.Err != nil { + task.Logger.Error(task.Err.Error()) + return + } + + task.PreClear() + if task.Err != nil { + return + } + task.GetDestRedisVersion() + if task.Err != nil { + return + } + + task.RedisShakeStart(true) + if task.Err != nil { + return + } + task.WatchShake() + if task.Err != nil { + return + } + + task.SetTaskType(task.NextTask()) + task.SetStatus(0) + task.UpdateDbAndLogLocal("redis-shake 启动成功,pid:%d,开始修改taskType:%s taskStatus:%d", + task.RowData.SyncerPid, task.RowData.TaskType, task.RowData.Status) + + return +} + +// GetDestRedisVersion TODO +// 通过info server命令获取目的redis版本; +// 如果目的redis不支持info server命令,则用源redis版本当做目的redis版本; +// 如果源redis、目的redis均不支持info server命令,则报错; +func (task *MakeCacheSyncTask) GetDestRedisVersion() { + if task.DstVersion != "" { + return + } + defer task.Logger.Info("get targetVersion:" + task.DstVersion) + srcConn, err := myredis.NewRedisClient(task.SrcADDR, task.SrcPassword, 0, task.Logger) + if err != nil { + task.Err = err + return + } + defer srcConn.Close() + destConn, err := myredis.NewRedisClient(task.DstADDR, task.DstPassword, 0, task.Logger) + if err != nil { + task.Err = err + return + } + defer destConn.Close() + + infoData, err := destConn.Info("server") + _, ok := infoData["redis_version"] + if err == nil && ok { + task.DstVersion = infoData["redis_version"] + return + } + infoData, err = srcConn.Info("server") + if err != nil { + task.Err = fmt.Errorf("srcRedis:%s dstRedis:%s both not support 'info server'", + task.SrcADDR, task.DstADDR) + task.Logger.Error(task.Err.Error()) + return + } + task.DstVersion = infoData["redis_version"] +} + +// MkSyncDirIfNotExists create sync directory if not exists +func (task *MakeCacheSyncTask) MkSyncDirIfNotExists() (syncDir string) { + err := task.InitTaskDir() + if err != nil { + task.Err = err + return + } + return task.TaskDir +} + +// IsRedisShakeAlive redis-shake是否存活 +func (task *MakeCacheSyncTask) IsRedisShakeAlive() (isAlive bool, err error) { + isSyncAliaveCmd := fmt.Sprintf("ps -ef|grep %s_%d|grep 'taskid%d-'|grep -v grep|grep 'redis-shake'|grep conf || true", + task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.ID) + task.Logger.Info("", zap.String("isSyncAliaveCmd", isSyncAliaveCmd)) + ret, err := util.RunLocalCmd("bash", []string{"-c", isSyncAliaveCmd}, "", nil, 1*time.Minute, task.Logger) + if err != nil { + task.Logger.Error("RedisSyncStop IsSyncAlive fail", zap.Error(err)) + return false, err + } + ret = strings.TrimSpace(ret) + if ret != "" { + return true, nil + } + return false, nil +} + +// IsSyncStateOK 同步状态是否本来就ok +func (task *MakeCacheSyncTask) IsSyncStateOK() (ok bool) { + // redis-shake进程是否活着 + ok, task.Err = task.IsRedisShakeAlive() + if task.Err != nil { + return false + } + if !ok { + return false + } + // redis-shake 获取metrics是否成功 + metrics := task.GetShakeMerics() + if task.Err != nil { + return false + } + if metrics == nil { + return false + } + return true +} + +// RedisShakeStop 关闭redis-shake +func (task *MakeCacheSyncTask) RedisShakeStop() { + var err error + var isAlive bool + isAlive, err = task.IsRedisShakeAlive() + if isAlive == false { + task.Logger.Info(fmt.Sprintf("RedisShakeStop srcRedis:%s#%d sync is not alive", task.RowData.SrcIP, + task.RowData.SrcPort)) + return + } + task.Logger.Info(fmt.Sprintf("RedisShakeStop srcRedis:%s#%d sync is alive", task.RowData.SrcIP, task.RowData.SrcPort)) + + // kill redis-shake + killCmd := fmt.Sprintf(` + ps -ef|grep %s_%d|grep 'taskid%d-'|grep -v grep|grep 'redis-shake'|grep conf|awk '{print $2}'|while read pid + do + kill -9 $pid + done + `, task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.ID) + task.Logger.Info("RedisShakeStop...", zap.String("killCmd", killCmd)) + retryTimes := 0 + for isAlive == true && retryTimes < 5 { + msg := fmt.Sprintf("Killing redis-shake times:%d ...", retryTimes+1) + task.Logger.Info(msg) + // redis-shake is alive, now kill it + _, err = util.RunLocalCmd("bash", []string{"-c", killCmd}, "", nil, 1*time.Minute, task.Logger) + if err != nil { + task.Logger.Error("Kill redis-shake process fail", zap.Error(err)) + } + time.Sleep(10 * time.Second) + retryTimes++ + isAlive, _ = task.IsRedisShakeAlive() + if isAlive == true { + task.Logger.Error(fmt.Sprintf("srcRedis:%s#%d,Kill redis-shake fail,process still alive", + task.RowData.SrcIP, task.RowData.SrcPort)) + } + } + if isAlive == true && retryTimes == 5 { + task.Logger.Error(fmt.Sprintf("srcRedis:%s#%d,Kill redis-shake process failed", task.RowData.SrcIP, + task.RowData.SrcPort)) + task.Err = fmt.Errorf("Kill redis-shake process failed") + return + } + task.Logger.Info(fmt.Sprintf("srcRedis:%s#%d,kill redis-shake success", task.RowData.SrcIP, task.RowData.SrcPort)) + return +} + +// GetMyRedisShakeTool Get [latest] redis-shake tool +func (task *MakeCacheSyncTask) GetMyRedisShakeTool(fetchLatest bool) { + task.GetRedisShakeToolFromLocal() + return +} + +// GetRedisShakeToolFromLocal 本地获取redis-shake +func (task *MakeCacheSyncTask) GetRedisShakeToolFromLocal() { + currentPath, err := util.CurrentExecutePath() + if err != nil { + task.Err = err + task.Logger.Error(err.Error()) + return + } + shakeBin := filepath.Join(currentPath, "redis-shake") + _, err = os.Stat(shakeBin) + if err != nil && os.IsNotExist(err) == true { + task.Err = fmt.Errorf("%s not exists,err:%v", shakeBin, err) + task.Logger.Error(task.Err.Error()) + return + } else if err != nil && os.IsPermission(err) == true { + err = os.Chmod(shakeBin, 0774) + if err != nil { + task.Err = fmt.Errorf("%s os.Chmod 0774 fail,err:%v", shakeBin, err) + task.Logger.Error(task.Err.Error()) + return + } + } + task.Logger.Info(fmt.Sprintf("%s is ok", shakeBin)) + task.RedisShakeBin = shakeBin +} + +// getMySyncPort 获取redis-shake port, 20000<=port<30000 +func (task *MakeCacheSyncTask) getMySyncPort(initSyncPort int) { + taskTypes := []string{} + var syncerport int + taskTypes = append(taskTypes, constvar.MakeCacheSyncTaskType) + taskTypes = append(taskTypes, constvar.WatchCacheSyncTaskType) + if initSyncPort <= 0 { + initSyncPort = 20000 + localIP, _ := util.GetLocalIP() + dtsSvrMaxSyncPortTask, err := tendisdb.GetDtsSvrMaxSyncPort(task.RowData.BkCloudID, localIP, + constvar.TendisTypeRedisInstance, taskTypes, task.Logger) + if (err != nil && gorm.IsRecordNotFoundError(err)) || dtsSvrMaxSyncPortTask == nil { + initSyncPort = 20000 + } else if err != nil { + task.Err = err + return + } else { + if dtsSvrMaxSyncPortTask.SyncerPort >= 20000 { + initSyncPort = dtsSvrMaxSyncPortTask.SyncerPort + 2 // 必须加2 + } + } + } + if initSyncPort > 30000 { + initSyncPort = 20000 + } + syncerport, task.Err = util.GetANotUsePort("127.0.0.1", initSyncPort, 2) + if task.Err != nil { + task.Logger.Error(task.Err.Error()) + return + } + task.SetSyncerPort(syncerport) + task.UpdateRow() + task.HTTPProfile = task.RowData.SyncerPort + task.SystemProfile = task.HTTPProfile + 1 + + return +} +func (task *MakeCacheSyncTask) clearOldShakeConfigFile() { + task.ShakeConfFile = strings.TrimSpace(task.ShakeConfFile) + if task.ShakeConfFile == "" { + return + } + _, err := os.Stat(task.ShakeConfFile) + if err == nil { + // rm old sync log file + rmCmd := fmt.Sprintf("cd %s && rm -rf %s", + filepath.Dir(task.ShakeConfFile), filepath.Base(task.ShakeLogFile)) + task.Logger.Info(rmCmd) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 20*time.Second, task.Logger) + } +} +func (task *MakeCacheSyncTask) clearOldShakeLogFile() { + task.ShakeLogFile = strings.TrimSpace(task.ShakeLogFile) + if task.ShakeLogFile == "" { + return + } + _, err := os.Stat(task.ShakeLogFile) + if err == nil { + // rm old sync log file + rmCmd := fmt.Sprintf("cd %s && rm -rf *.log", + filepath.Dir(task.ShakeLogFile)) + task.Logger.Info(rmCmd) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 20*time.Second, task.Logger) + } +} + +// createShakeConfigFile create redis-shake config file if not exists +func (task *MakeCacheSyncTask) createShakeConfigFile() { + syncDir := task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + task.ShakeConfFile = filepath.Join(syncDir, + fmt.Sprintf("shake-taskid%d-%d.conf", task.RowData.ID, task.RowData.SyncerPort)) + + _, err := os.Stat(task.ShakeConfFile) + if err == nil { + // if config file exists,return + task.Logger.Info(fmt.Sprintf("redis-shake config file:%s already exists", task.ShakeConfFile)) + return + } + + currentPath, _ := util.CurrentExecutePath() + tempFile := filepath.Join(currentPath, "redis-shake-template.conf") + tempContent, err := ioutil.ReadFile(tempFile) + if err != nil { + task.Logger.Error("Read redis-shake template conf fail", + zap.Error(err), zap.String("templateConfig", tempFile)) + task.Err = fmt.Errorf("Read redis-shake template conf fail.err:%v", err) + return + } + loglevel := "info" + debug := viper.GetBool("TENDIS_DEBUG") + if debug == true { + loglevel = "debug" + } + startSeg := -1 + endSeg := -1 + if task.RowData.SrcSegStart >= 0 && + task.RowData.SrcSegEnd <= 419999 && + task.RowData.SrcSegStart < task.RowData.SrcSegEnd { + if task.RowData.SrcSegStart < 0 || task.RowData.SrcSegEnd < 0 { + task.Err = fmt.Errorf("srcTendis:%s#%d segStart:%d<0 or segEnd:%d<0", + task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.SrcSegStart, task.RowData.SrcSegEnd) + task.Logger.Error(err.Error()) + return + } + if task.RowData.SrcSegStart >= task.RowData.SrcSegEnd { + task.Err = fmt.Errorf("srcTendis:%s#%d segStart:%d >= segEnd:%d", + task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.SrcSegStart, task.RowData.SrcSegEnd) + task.Logger.Error(err.Error()) + return + } + startSeg = task.RowData.SrcSegStart + endSeg = task.RowData.SrcSegEnd + } + var keyWhiteRegex string = "" + var keyBlackRegex string = "" + if task.RowData.KeyWhiteRegex != "" && !task.IsMatchAny(task.RowData.KeyWhiteRegex) { + keyWhiteRegex = task.RowData.KeyWhiteRegex + } + if task.RowData.KeyBlackRegex != "" && !task.IsMatchAny(task.RowData.KeyBlackRegex) { + keyBlackRegex = ";" + task.RowData.KeyBlackRegex // 注意最前面有个分号 + } + + tempData := string(tempContent) + tempData = strings.ReplaceAll(tempData, "{{LOG_FILE}}", task.ShakeLogFile) + tempData = strings.ReplaceAll(tempData, "{{LOG_LEVEL}}", loglevel) + tempData = strings.ReplaceAll(tempData, "{{PID_PATH}}", filepath.Dir(task.ShakeConfFile)) + tempData = strings.ReplaceAll(tempData, "{{SYSTEM_PROFILE}}", strconv.Itoa(task.SystemProfile)) + tempData = strings.ReplaceAll(tempData, "{{HTTP_PROFILE}}", strconv.Itoa(task.HTTPProfile)) + tempData = strings.ReplaceAll(tempData, "{{SRC_ADDR}}", task.SrcADDR) + tempData = strings.ReplaceAll(tempData, "{{SRC_PASSWORD}}", task.SrcPassword) + tempData = strings.ReplaceAll(tempData, "{{START_SEGMENT}}", strconv.Itoa(startSeg)) + tempData = strings.ReplaceAll(tempData, "{{END_SEGMENT}}", strconv.Itoa(endSeg)) + tempData = strings.ReplaceAll(tempData, "{{TARGET_ADDR}}", task.DstADDR) + tempData = strings.ReplaceAll(tempData, "{{TARGET_PASSWORD}}", task.DstPassword) + tempData = strings.ReplaceAll(tempData, "{{TARGET_VERSION}}", task.DstVersion) + tempData = strings.ReplaceAll(tempData, "{{KEY_WHITE_REGEX}}", keyWhiteRegex) + tempData = strings.ReplaceAll(tempData, "{{KEY_BLACK_REGEX}}", keyBlackRegex) + + err = ioutil.WriteFile(task.ShakeConfFile, []byte(tempData), 0755) + if err != nil { + task.Logger.Error("Save redis-shake conf fail", zap.Error(err), zap.String("syncConfig", task.ShakeConfFile)) + task.Err = fmt.Errorf("Save redis-shake conf fail.err:%v", err) + return + } + task.Logger.Info(fmt.Sprintf("create redis-shake config file:%s success", task.ShakeConfFile)) + return +} +func (task *MakeCacheSyncTask) createShakeLogFile() { + syncDir := task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + logDir := filepath.Join(syncDir, "log") + util.MkDirIfNotExists(logDir) + task.ShakeLogFile = filepath.Join(logDir, + fmt.Sprintf("%s-%d-%d.log", task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.SyncerPort)) + return +} + +// RedisShakeStart 启动redis-shake +func (task *MakeCacheSyncTask) RedisShakeStart(reacquirePort bool) { + task.Logger.Info(fmt.Sprintf("redis-shake start 源%s 目的%s ...", task.SrcADDR, task.DstADDR)) + defer task.Logger.Info("end redis-shake start") + + dtsTask.PortSyncerMut.Lock() // 串行获取redis-shake端口 和 启动 + defer dtsTask.PortSyncerMut.Unlock() + + if reacquirePort == true { + task.getMySyncPort(0) + if task.Err != nil { + return + } + } + maxRetryTimes := 5 + for maxRetryTimes >= 0 { + maxRetryTimes-- + task.Err = nil + task.createShakeLogFile() + if task.Err != nil { + return + } + task.createShakeConfigFile() + if task.Err != nil { + return + } + logFile, err := os.OpenFile(task.ShakeLogFile, os.O_RDWR|os.O_CREATE, 0755) + if err != nil { + task.Logger.Error("open logfile fail", zap.Error(err), zap.String("syncLogFile", task.ShakeLogFile)) + task.Err = fmt.Errorf("open logfile fail,err:%v syncLogFile:%s", err, task.ShakeLogFile) + return + } + logCmd := fmt.Sprintf("%s -type=sync -conf=%s", task.RedisShakeBin, task.ShakeConfFile) + task.Logger.Info(logCmd) + ctx, cancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext(ctx, task.RedisShakeBin, "-type", "sync", "-conf", task.ShakeConfFile) + cmd.Stdout = logFile + cmd.Stderr = logFile + err = cmd.Start() + if err != nil { + defer cancel() + logFile.Close() + task.Logger.Error("cmd.Start fail", zap.Error(err), zap.String("cmd", logCmd)) + task.Err = fmt.Errorf("cmd.Start fail,err:%v command:%s", err, logCmd) + return + } + go func() { + err = cmd.Wait() + if err != nil { + task.Logger.Error("redis-shake cmd.wait error", zap.Error(err)) + } + }() + time.Sleep(5 * time.Second) + isAlive, err := task.IsRedisShakeAlive() + if err != nil { + defer cancel() + logFile.Close() + task.Err = err + task.Logger.Error(task.Err.Error()) + return + } + if isAlive == false { + defer cancel() + logFile.Close() + logContent, _ := ioutil.ReadFile(task.ShakeLogFile) + task.Logger.Error("redis-shake start fail", zap.String("failDetail", string(logContent))) + task.Err = fmt.Errorf("redis-shake start fail,detail:%s", string(logContent)) + if strings.Contains(string(logContent), "address already in use") { + // port address already used + // clear and get sync port again and retry + task.clearOldShakeLogFile() + task.clearOldShakeConfigFile() + task.getMySyncPort(task.RowData.SyncerPort + 2) + if task.Err != nil { + return + } + continue + } + } + task.SetSyncerPid(cmd.Process.Pid) + break + } + if task.Err != nil { + task.Err = fmt.Errorf("redis-shake start fail") + return + } + task.UpdateDbAndLogLocal("redis-shake %d start success", task.RowData.SyncerPort) + + return +} + +// WatchShake 监听redis-shake,binlog-lag与last-key等信息 +func (task *MakeCacheSyncTask) WatchShake() { + + for { + time.Sleep(10 * time.Second) + row01, err := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if err != nil { + task.Err = err + return + } + if row01 == nil { + task.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", task.RowData.ID, row01) + continue + } + task.RowData = row01 + if task.RowData.KillSyncer == 1 || + task.RowData.SyncOperate == constvar.RedisSyncStopTodo || + task.RowData.SyncOperate == constvar.RedisForceKillTaskTodo { // stop redis-shake + + succ := constvar.RedisSyncStopSucc + fail := constvar.RedisSyncStopFail + if task.RowData.SyncOperate == constvar.RedisForceKillTaskTodo { + succ = constvar.RedisForceKillTaskSuccess + fail = constvar.RedisForceKillTaskFail + } + task.Logger.Info(fmt.Sprintf("start execute %q ...", task.RowData.SyncOperate)) + task.RedisShakeStop() + if task.Err == nil { + task.SetSyncOperate(succ) + task.SetStatus(2) + task.UpdateDbAndLogLocal("redis-shake:%d终止成功", task.RowData.SyncerPid) + task.Err = nil + } else { + task.SetSyncOperate(fail) + } + task.Logger.Info(fmt.Sprintf("end %q ...", task.RowData.SyncOperate)) + return + } + // upgrade redis-shake + if task.RowData.SyncOperate == constvar.RedisSyncUpgradeTodo { + task.Logger.Info(fmt.Sprintf("start execute %q ...", task.RowData.SyncOperate)) + task.UpgradeShakeMedia() + if task.Err != nil { + return + } + task.SetSyncOperate(constvar.RedisSyncUpgradeSucc) + task.UpdateDbAndLogLocal(constvar.RedisSyncUpgradeSucc + "...") + task.Logger.Info(fmt.Sprintf("end %q ...", task.RowData.SyncOperate)) + continue + } + metric := task.GetShakeMerics() + if task.Err != nil { + return + } + if metric == nil { + task.SetStatus(1) + task.UpdateDbAndLogLocal("获取metic失败,retry...") + continue + } + if metric.Status == ShakeWaitFullStatus { + task.SetStatus(1) + task.UpdateDbAndLogLocal("等待源实例执行bgsave...") + continue + } + if metric.Status == ShakeFullStatus { + task.SetStatus(1) + task.UpdateDbAndLogLocal("rdb导入中,进度:%d%%", metric.FullSyncProgress) + continue + } + if metric.Status == ShakeIncrStatus { + task.SetMessage("增量同步中,延迟:%s", metric.Delay) + task.SetStatus(1) + task.UpdateRow() + if task.RowData.TaskType == constvar.MakeCacheSyncTaskType { + // makeCacheSync 在确保rdb导入完成后,增量数据同步状态由 watchCacheSync 来完成 + return + } + } + continue + } +} + +// UpgradeShakeMedia 更新redis-shake介质 +func (task *MakeCacheSyncTask) UpgradeShakeMedia() { + defer func() { + if task.Err != nil { + task.SetSyncOperate(constvar.RedisSyncUpgradeFail) + } + }() + // stop redis-shake + task.RedisShakeStop() + if task.Err != nil { + return + } + task.GetMyRedisShakeTool(true) + if task.Err != nil { + return + } + task.RedisShakeStart(false) + if task.Err != nil { + return + } +} + +// RedisShakeMetric shake meric +type RedisShakeMetric struct { + StartTime time.Time `json:"StartTime"` + PullCmdCount int `json:"PullCmdCount"` + PullCmdCountTotal int `json:"PullCmdCountTotal"` + BypassCmdCount int `json:"BypassCmdCount"` + BypassCmdCountTotal int `json:"BypassCmdCountTotal"` + PushCmdCount int `json:"PushCmdCount"` + PushCmdCountTotal int `json:"PushCmdCountTotal"` + SuccessCmdCount int `json:"SuccessCmdCount"` + SuccessCmdCountTotal int `json:"SuccessCmdCountTotal"` + FailCmdCount int `json:"FailCmdCount"` + FailCmdCountTotal int `json:"FailCmdCountTotal"` + Delay string `json:"Delay"` + AvgDelay string `json:"AvgDelay"` + NetworkSpeed int `json:"NetworkSpeed"` + NetworkFlowTotal int `json:"NetworkFlowTotal"` + FullSyncProgress int `json:"FullSyncProgress"` + Status string `json:"Status"` + SenderBufCount int `json:"SenderBufCount"` + ProcessingCmdCount int `json:"ProcessingCmdCount"` + TargetDBOffset int `json:"TargetDBOffset"` + SourceDBOffset int `json:"SourceDBOffset"` + SourceAddress string `json:"SourceAddress"` + TargetAddress []string `json:"TargetAddress"` + Details interface{} `json:"Details"` +} + +// GetShakeMerics get shake metric +func (task *MakeCacheSyncTask) GetShakeMerics() *RedisShakeMetric { + var url string + var resp []byte + maxRetryTimes := 6 + for maxRetryTimes >= 0 { + maxRetryTimes-- + task.Err = nil + + url = fmt.Sprintf("http://127.0.0.1:%d/metric", task.HTTPProfile) + resp, task.Err = util.HTTPGetURLParams(url, nil, task.Logger) + if task.Err != nil { + time.Sleep(5 * time.Second) + continue + } + break + } + if task.Err != nil { + return nil + } + shakeMeric := []RedisShakeMetric{} + task.Err = json.Unmarshal(resp, &shakeMeric) + if task.Err != nil { + task.Err = fmt.Errorf("json.Unmarshal fail,err:%v,url:%s", task.Err, url) + task.Logger.Error(task.Err.Error(), zap.String("resp", string(resp))) + return nil + } + if len(shakeMeric) > 0 { + return &shakeMeric[0] + } + return nil +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/rediscache.go b/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/rediscache.go new file mode 100644 index 0000000000..9b96e9d685 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/rediscache.go @@ -0,0 +1,2 @@ +// Package rediscache TODO +package rediscache diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/watchCacheSync.go b/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/watchCacheSync.go new file mode 100644 index 0000000000..91bb404cf5 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/rediscache/watchCacheSync.go @@ -0,0 +1,72 @@ +package rediscache + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "encoding/base64" + "fmt" +) + +// WatchCacheSyncTask .. +type WatchCacheSyncTask struct { + MakeCacheSyncTask +} + +// TaskType task类型 +func (task *WatchCacheSyncTask) TaskType() string { + return constvar.WatchCacheSyncTaskType +} + +// NextTask 下一个task类型 +func (task *WatchCacheSyncTask) NextTask() string { + return "" +} + +// NewWatchCacheSyncTask 新建任务 +func NewWatchCacheSyncTask(row *tendisdb.TbTendisDTSTask) *WatchCacheSyncTask { + ret := &WatchCacheSyncTask{ + MakeCacheSyncTask: *NewMakeCacheSyncTask(row), + } + return ret +} + +// Execute 程序重新拉起后监听以往处于taskType='makeShake',status=1状态的redis-shake +func (task *WatchCacheSyncTask) Execute() { + if task.Err != nil { + return + } + + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + defer task.Logger.Info(fmt.Sprintf("end WatchCacheSyncTask")) + + task.SetStatus(1) + task.UpdateDbAndLogLocal("开始watch redis-shake port:%d", task.RowData.SyncerPort) + + task.GetMyRedisShakeTool(false) + if task.Err != nil { + return + } + + srcPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.SrcPassword) + dstPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.DstPassword) + + task.SrcADDR = fmt.Sprintf("%s:%d", task.RowData.SrcIP, task.RowData.SrcPort) + task.SrcPassword = string(srcPasswd) + task.DstADDR = task.RowData.DstCluster + task.DstPassword = string(dstPasswd) + + task.HTTPProfile = task.RowData.SyncerPort + task.SystemProfile = task.HTTPProfile + 1 + + task.Logger.Info(fmt.Sprintf("WatchCacheSyncTask 开始处理,srcTendis:%s srcAddr:%s", + task.RowData.SrcCluster, task.SrcADDR)) + + task.WatchShake() + return +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/saveSyncSeq.go b/dbm-services/redis/redis-dts/pkg/dtsTask/saveSyncSeq.go new file mode 100644 index 0000000000..5b7a0c646a --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/saveSyncSeq.go @@ -0,0 +1,196 @@ +package dtsTask + +import ( + "bufio" + "dbm-services/redis/redis-dts/pkg/customtime" + "dbm-services/redis/redis-dts/util" + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "go.uber.org/zap" +) + +// SyncSeqItem redis-sync binlog seq & time +type SyncSeqItem struct { + Time customtime.CustomTime `json:"time"` + RunID string `json:"runID"` + Seq uint64 `json:"seq"` +} + +// String .. +func (pos *SyncSeqItem) String() string { + ret, _ := json.Marshal(pos) + return string(ret) +} + +// SyncSeqItemDecode string decode to SyncSeqItem +func SyncSeqItemDecode(str01 string) (item SyncSeqItem, err error) { + str01 = strings.TrimSpace(str01) + item = SyncSeqItem{} + err = json.Unmarshal([]byte(str01), &item) + if err != nil { + err = fmt.Errorf("SyncPosDecode fail,err:%v,data:%s", err, str01) + return item, nil + } + return +} + +// ISaveSyncSeq save sync seq interface +type ISaveSyncSeq interface { + HaveOldSyncSeq() bool + SyncSeqWriter(posItem *SyncSeqItem, flushDisk bool) error + GetLastSyncSeq() (latestPos SyncSeqItem, err error) + GetSpecificTimeSyncSeq(time01 time.Time) (lastSeq SyncSeqItem, err error) + Close() error +} + +// SaveSyncSeqToFile TODO +// save redis-sync seq to local file +type SaveSyncSeqToFile struct { + saveFile string + fileP *os.File + bufWriter *bufio.Writer + logger *zap.Logger +} + +// NewSaveSyncSeqToFile new +func NewSaveSyncSeqToFile(saveFile string, logger *zap.Logger) (ret *SaveSyncSeqToFile, err error) { + ret = &SaveSyncSeqToFile{} + ret.logger = logger + err = ret.SetSaveFile(saveFile) + if err != nil { + return nil, err + } + return +} + +// SaveFile .. +func (f *SaveSyncSeqToFile) SaveFile() string { + return f.saveFile +} + +// SetSaveFile .. +func (f *SaveSyncSeqToFile) SetSaveFile(dstFile string) error { + var err error + err = f.Close() + if err != nil { + return err + } + if dstFile == "" { + err = fmt.Errorf("saveFile(%s) cannot be empty", dstFile) + f.logger.Error(err.Error()) + return err + } + f.saveFile = dstFile + f.fileP, err = os.OpenFile(dstFile, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + err = fmt.Errorf("open file:%s fail,err:%v", dstFile, err) + f.logger.Error(err.Error()) + return err + } + f.bufWriter = bufio.NewWriter(f.fileP) + return nil +} + +// HaveOldSyncSeq confirm that old sync seq exists +func (f *SaveSyncSeqToFile) HaveOldSyncSeq() bool { + if f.saveFile == "" { + return false + } + file01, err := os.Stat(f.saveFile) + if err != nil && os.IsNotExist(err) == true { + return false + } + if file01.Size() == 0 { + return false + } + return true +} + +// SyncSeqWriter syncSeq record to file +func (f *SaveSyncSeqToFile) SyncSeqWriter(seqItem *SyncSeqItem, flushDisk bool) error { + line01 := seqItem.String() + _, err := f.bufWriter.WriteString(line01 + "\n") + if err != nil { + f.logger.Error("write file fail", zap.Error(err), + zap.String("line01", line01), zap.String("saveFile", f.saveFile)) + return err + } + if flushDisk == true { + err = f.bufWriter.Flush() + if err != nil { + err = fmt.Errorf("bufio flush fail.err:%v,file:%s", err, f.saveFile) + f.logger.Error(err.Error()) + return nil + } + } + return nil +} + +// GetLastSyncSeq get latest syncSeq +func (f *SaveSyncSeqToFile) GetLastSyncSeq() (lastSeq SyncSeqItem, err error) { + f.bufWriter.Flush() + tailCmd := fmt.Sprintf("tail -1 %s", f.saveFile) + lastLine, err := util.RunLocalCmd("bash", []string{"-c", tailCmd}, "", nil, 30*time.Second, f.logger) + if err != nil { + return lastSeq, err + } + lastSeq, err = SyncSeqItemDecode(lastLine) + if err != nil { + f.logger.Error(err.Error()) + return lastSeq, err + } + return +} + +// GetSpecificTimeSyncSeq get specific time sync seq +// 该函数会忽略time01 中'秒',只获取 time01 相同'分' 的第一条seq +func (f *SaveSyncSeqToFile) GetSpecificTimeSyncSeq(time01 time.Time) (lastSeq SyncSeqItem, err error) { + f.bufWriter.Flush() + layoutMin := "2006-01-02 15:04" + timeStr := time01.Local().Format(layoutMin) + grepCmd := fmt.Sprintf("grep -i %q %s| head -1", timeStr, f.saveFile) + f.logger.Info("GetSpecificTimeSyncSeq " + grepCmd) + + firstLine, err := util.RunLocalCmd("bash", []string{"-c", grepCmd}, "", nil, 1*time.Minute, f.logger) + if err != nil { + return lastSeq, err + } + firstLine = strings.TrimSpace(firstLine) + if firstLine == "" { + f.logger.Warn(fmt.Sprintf("GetSpecificTimeSyncSeq not found %q seq record,file:%s", timeStr, f.saveFile)) + return lastSeq, util.NewNotFound() + } + lastSeq, err = SyncSeqItemDecode(firstLine) + if err != nil { + f.logger.Error(err.Error()) + return lastSeq, err + } + return +} + +// Close file +func (f *SaveSyncSeqToFile) Close() error { + var err error + if f.saveFile == "" { + return nil + } + f.saveFile = "" + + err = f.bufWriter.Flush() + if err != nil { + err = fmt.Errorf("bufio flush fail.err:%v,file:%s", err, f.saveFile) + f.logger.Error(err.Error()) + return nil + } + err = f.fileP.Close() + if err != nil { + err = fmt.Errorf("file close fail.err:%v,file:%s", err, f.saveFile) + f.logger.Error(err.Error()) + return nil + } + return nil +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/makeSync.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/makeSync.go new file mode 100644 index 0000000000..6d369c2d90 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/makeSync.go @@ -0,0 +1,759 @@ +package tendisplus + +import ( + "dbm-services/redis/redis-dts/models/myredis" + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask" + "dbm-services/redis/redis-dts/tclog" + "dbm-services/redis/redis-dts/util" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/jinzhu/gorm" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// MakeSyncTask 启动redis-sync +type MakeSyncTask struct { + dtsTask.FatherTask + RedisCliTool string `json:"redisCliTool"` + RedisSyncTool string `json:"redisSyncTool"` + SyncLogFile string `json:"syncLogFile"` + SyncConfigFile string `json:"syncConfigFile"` + SyncDir string `json:"syncDir"` +} + +// TaskType task 类型 +func (task *MakeSyncTask) TaskType() string { + return constvar.TendisplusMakeSyncTaskType +} + +// NextTask 下一个task类型 +func (task *MakeSyncTask) NextTask() string { + return constvar.TendisplusSendBulkTaskType +} + +// NewMakeSyncTask 新建一个 redis-sync启动task +func NewMakeSyncTask(row *tendisdb.TbTendisDTSTask) *MakeSyncTask { + return &MakeSyncTask{ + FatherTask: dtsTask.NewFatherTask(row), + } +} + +// PreClear 关闭以前生成的redis-sync +func (task *MakeSyncTask) PreClear() { + if task.Err != nil { + return + } + if task.RowData.SyncerPort == 0 { + return + } + defer func() { + // clear old sync config file and log file + task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + rmCmd := fmt.Sprintf("cd %s && rm -rf *-taskid%d-*.log *-taskid%d-*.conf", task.SyncDir, task.RowData.ID, + task.RowData.ID) + task.Logger.Info(fmt.Sprintf("tendisplus makeSync preClear execute:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 10*time.Second, task.Logger) + + }() + + task.RedisSyncStop() + return +} + +// Execute 执行启动redis-sync +func (task *MakeSyncTask) Execute() { + if task.Err != nil { + return + } + + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + task.SetStatus(1) + task.UpdateDbAndLogLocal("开始创建sync关系") + + task.GetMyRedisCliTool() + if task.Err != nil { + return + } + task.GetMyRedisSyncTool() + if task.Err != nil { + return + } + + isSyncOk := task.IsSyncStateOK() + if isSyncOk { + // 同步状态本来就是ok的,直接watcht redis-sync即可 + task.Logger.Info(fmt.Sprintf("redis:%s 同步状态ok,开始watch...", task.GetSrcRedisAddr())) + task.SetTaskType(task.NextTask()) + task.SetStatus(1) + task.UpdateRow() + task.WatchSync() + return + } + task.PreClear() + if task.Err != nil { + return + } + task.isRedisConnectOK() + if task.Err != nil { + return + } + + task.TendisplusMasterSlaveConfigSet() + if task.Err != nil { + return + } + task.RedisSyncStart(true) + if task.Err != nil { + return + } + task.UpdateDbAndLogLocal("tendis redis-sync 拉起ok,srcRedisAddr:%s,taskid:%d", task.GetSrcRedisAddr(), task.RowData.ID) + + task.WatchSync() + return +} + +// MkSyncDirIfNotExists create sync directory if not exists +func (task *MakeSyncTask) MkSyncDirIfNotExists() { + task.Err = task.InitTaskDir() + if task.Err != nil { + return + } + task.SyncDir = task.TaskDir + return +} + +// GetMyRedisSyncTool 本地获取redis-sync-tendisplus +func (task *MakeSyncTask) GetMyRedisSyncTool() { + task.RedisSyncTool, task.Err = util.IsToolExecutableInCurrDir("redis-sync-tendisplus") + return +} + +// GetMyRedisCliTool 本地获取redis-cli +func (task *MakeSyncTask) GetMyRedisCliTool() { + task.RedisCliTool, task.Err = util.IsToolExecutableInCurrDir("redis-cli") + return +} + +func (task *MakeSyncTask) getSlaveConn() (slaveConn *myredis.RedisWorker) { + slaveConn, task.Err = myredis.NewRedisClient(task.GetSrcRedisAddr(), task.GetSrcRedisPasswd(), 0, task.Logger) + if task.Err != nil { + return + } + return +} + +func (task *MakeSyncTask) getMasterConn() (masterConn *myredis.RedisWorker) { + var masterAddr, masterAuth string + slaveConn := task.getSlaveConn() + if task.Err != nil { + return + } + defer slaveConn.Close() + masterAddr, masterAuth, task.Err = slaveConn.GetMasterAddrAndPasswd() + masterConn, task.Err = myredis.NewRedisClient(masterAddr, masterAuth, 0, task.Logger) + return +} + +func (task *MakeSyncTask) isRedisConnectOK() { + task.GetSrcRedisPasswd() + if task.Err != nil { + return + } + slaveConn := task.getSlaveConn() + if task.Err != nil { + return + } + defer slaveConn.Close() + + masterConn := task.getMasterConn() + if task.Err != nil { + return + } + defer masterConn.Close() +} + +// TendisplusMasterSlaveConfigSet 'config set'修改必要配置 +// redis-sync will connect to tendisplus slave or master when migrating data +// 1. tendisplus master/slave aof-enabled=yes +// 2. tendisplus slave fullpushthreadnum=10 +// 3. tendisplus master/slave incrpushthreadnum >=10 +// 4. tendisplus slave supply-fullpsync-key-batch-num=50 +func (task *MakeSyncTask) TendisplusMasterSlaveConfigSet() { + var ok bool + // slaveConn 和 masterConn 可能指向同一个tendisplus实例 + slaveConn := task.getSlaveConn() + if task.Err != nil { + return + } + defer slaveConn.Close() + masterConn := task.getMasterConn() + if task.Err != nil { + return + } + defer masterConn.Close() + + _, task.Err = slaveConn.ConfigSet("aof-enabled", "yes") + if task.Err != nil { + return + } + _, task.Err = masterConn.ConfigSet("aof-enabled", "yes") + if task.Err != nil { + return + } + task.Logger.Info(fmt.Sprintf("tendisplus master:%s config set 'aof-enabled' 'yes' success", masterConn.Addr)) + task.Logger.Info(fmt.Sprintf("tendisplus slave:%s config set 'aof-enabled' 'yes' success", masterConn.Addr)) + + var slaveFullThreadNum int + var masterIncrThreadNum int + var slaveIncrThreadNum int + var tmpMap map[string]string + var tmpVal string + // slave fullpushthreadnum + tmpMap, task.Err = slaveConn.ConfigGet("fullpushthreadnum") + if task.Err != nil { + return + } + tmpVal, ok = tmpMap["fullpushthreadnum"] + if ok == false { + task.Err = fmt.Errorf("tendisplus slave:%s config get 'fullpushthreadnum' fail,empty val", slaveConn.Addr) + return + } + slaveFullThreadNum, _ = strconv.Atoi(tmpVal) + if slaveFullThreadNum < 10 { + slaveConn.ConfigSet("fullpushthreadnum", "10") + } + // slave incrpushthreadnum + tmpMap, _ = slaveConn.ConfigGet("incrpushthreadnum") + tmpVal, ok = tmpMap["incrpushthreadnum"] + if ok == false { + task.Err = fmt.Errorf("tendisplus slave:%s config get 'incrpushthreadnum' fail,empty val", slaveConn.Addr) + return + } + slaveIncrThreadNum, _ = strconv.Atoi(tmpVal) + if slaveIncrThreadNum < 10 { + slaveConn.ConfigSet("incrpushthreadnum", "10") + } + fullsyncBatchNum := viper.GetInt("tendisplus-full-sync-batchNum") + if fullsyncBatchNum == 0 { + fullsyncBatchNum = 50 + } else if fullsyncBatchNum < 10 { + fullsyncBatchNum = 10 + } else if fullsyncBatchNum > 500 { + fullsyncBatchNum = 500 + } + slaveConn.ConfigSet("supply-fullpsync-key-batch-num", strconv.Itoa(fullsyncBatchNum)) + + // master incrpushthreadnum + tmpMap, _ = masterConn.ConfigGet("incrpushthreadnum") + tmpVal, ok = tmpMap["incrpushthreadnum"] + if ok == false { + task.Err = fmt.Errorf("tendisplus master:%s config get 'incrpushthreadnum' fail,empty val", masterConn.Addr) + return + } + masterIncrThreadNum, _ = strconv.Atoi(tmpVal) + if masterIncrThreadNum < 10 { + masterConn.ConfigSet("incrpushthreadnum", "10") + } +} + +// getMySyncPort 获取redis-sync port, 40000<=port<50000 +func (task *MakeSyncTask) getMySyncPort(initSyncPort int) { + taskTypes := []string{} + var syncerPort int + taskTypes = append(taskTypes, constvar.MakeSyncTaskType) + if initSyncPort <= 0 { + initSyncPort = 40000 + dtsSvrMaxSyncPortTask, err := tendisdb.GetDtsSvrMaxSyncPort(task.RowData.BkCloudID, task.RowData.DtsServer, + constvar.TendisTypeTendisplusInsance, taskTypes, task.Logger) + if (err != nil && gorm.IsRecordNotFoundError(err)) || dtsSvrMaxSyncPortTask == nil { + initSyncPort = 40000 + } else if err != nil { + task.Err = err + return + } else { + if dtsSvrMaxSyncPortTask.SyncerPort >= 40000 { + initSyncPort = dtsSvrMaxSyncPortTask.SyncerPort + 1 + } + } + } + if initSyncPort > 50000 { + initSyncPort = 40000 + } + syncerPort, task.Err = util.GetANotUsePort("127.0.0.1", initSyncPort, 1) + if task.Err != nil { + task.Logger.Error(task.Err.Error()) + return + } + task.SetSyncerPort(syncerPort) + + return +} + +func (task *MakeSyncTask) createSyncLogFile() { + task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + task.SyncLogFile = filepath.Join(task.SyncDir, + fmt.Sprintf("log-%s-%d-kvstore-%d-%d.log", task.RowData.SrcIP, task.RowData.SrcPort, + task.RowData.SrcKvStoreID, task.RowData.SyncerPort)) + return +} + +func (task *MakeSyncTask) clearOldSyncLogFile() { + task.SyncLogFile = strings.TrimSpace(task.SyncLogFile) + if task.SyncLogFile == "" { + return + } + _, err := os.Stat(task.SyncLogFile) + if err == nil { + // rm old sync log file + rmCmd := fmt.Sprintf("cd %s && rm -rf %s", + filepath.Dir(task.SyncLogFile), filepath.Base(task.SyncLogFile)) + task.Logger.Info(rmCmd) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 20*time.Second, task.Logger) + } +} + +func (task *MakeSyncTask) createSyncConfigFile() { + task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + // 必须是 kvstore-%d-,最后的-很重要,因为可能出现 kvstore-1-、kvstore-10- + task.SyncConfigFile = filepath.Join(task.SyncDir, + fmt.Sprintf("sync-taskid%d-%d-kvstore-%d-.conf", + task.RowData.ID, task.RowData.SyncerPort, task.RowData.SrcKvStoreID)) + + _, err := os.Stat(task.SyncConfigFile) + if err == nil { + // if config file exists,return + task.Logger.Info(fmt.Sprintf("redis-sync config file:%s already exists", task.SyncConfigFile)) + return + } + sampleFile, err := util.IsFileExistsInCurrDir("tendisplus-sync-template.conf") + if err != nil { + task.Err = err + task.Logger.Error(task.Err.Error()) + return + } + sampleBytes, err := ioutil.ReadFile(sampleFile) + if err != nil { + task.Err = fmt.Errorf("read tendisplus redis-sync template file(%s) fail,err:%v", sampleFile, err) + task.Logger.Error(task.Err.Error()) + return + } + sampleData := string(sampleBytes) + sampleData = strings.ReplaceAll(sampleData, "{{SYNC_PORT}}", strconv.Itoa(task.RowData.SyncerPort)) + // sampleData = strings.ReplaceAll(sampleData, "{{SYNC_LOG_FILE}}", task.SyncLogFile) + sampleData = strings.ReplaceAll(sampleData, "{{SYNC_LOG_FILE}}", "./"+filepath.Base(task.SyncLogFile)) + sampleData = strings.ReplaceAll(sampleData, "{{KV_STORE_ID}}", strconv.Itoa(task.RowData.SrcKvStoreID)) + sampleData = strings.ReplaceAll(sampleData, "{{SRC_ADDR}}", task.GetSrcRedisAddr()) + sampleData = strings.ReplaceAll(sampleData, "{{SRC_PASSWORD}}", task.GetSrcRedisPasswd()) + sampleData = strings.ReplaceAll(sampleData, "{{DST_ADDR}}", task.GetDstRedisAddr()) + sampleData = strings.ReplaceAll(sampleData, "{{DST_PASSWORD}}", task.GetDstRedisPasswd()) + // 如果目标集群是域名,则redis-sync需要先解析域名中的 proxy ips,而后连接;该行为通过 proxy-enable 参数控制 + proxyEnable := "no" + if util.IsDbDNS(task.GetDstRedisAddr()) { + proxyEnable = "yes" + } + sampleData = strings.ReplaceAll(sampleData, "{{PROXY_ENABLE}}", proxyEnable) + err = ioutil.WriteFile(task.SyncConfigFile, []byte(sampleData), 0755) + if err != nil { + task.Err = fmt.Errorf("save redis-sync config file(%s) fail,err:%v", task.SyncConfigFile, err) + task.Logger.Error(task.Err.Error()) + return + } + task.Logger.Info(fmt.Sprintf("save redis-sync config file(%s) succeess", task.SyncConfigFile)) + return +} + +func (task *MakeSyncTask) clearOldSyncConfigFile() { + task.SyncConfigFile = strings.TrimSpace(task.SyncConfigFile) + if task.SyncConfigFile == "" { + return + } + _, err := os.Stat(task.SyncConfigFile) + if err == nil { + // rm old sync config file + rmCmd := fmt.Sprintf("cd %s && rm -rf %s", + filepath.Dir(task.SyncConfigFile), filepath.Base(task.SyncConfigFile)) + task.Logger.Info(rmCmd) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 20*time.Second, task.Logger) + } +} + +func (task *MakeSyncTask) redisSyncRunCmd(cmds []string, recordLog bool) (cmdRet string) { + localIP := "127.0.0.1" + opts := []string{"--no-auth-warning", "-h", localIP, "-p", strconv.Itoa(task.RowData.SyncerPort)} + opts = append(opts, cmds...) + + logCmd := task.RedisCliTool + " " + strings.Join(opts, " ") + if recordLog { + task.Logger.Info("redis-sync cmd ...", zap.String("cmd", logCmd)) + } + + cmdRet, err := util.RunLocalCmd(task.RedisCliTool, opts, "", nil, 5*time.Second, task.Logger) + if err != nil { + task.Err = err + task.Logger.Error("redis-sync cmd fail", zap.Error(task.Err), zap.String("cmd", logCmd)) + return + } + if strings.HasPrefix(cmdRet, "ERR ") == true { + task.Logger.Error("redis-sync cmd fail", zap.String("cmdRet", cmdRet)) + task.Err = fmt.Errorf("redis-sync cmd fail,err:%v", cmdRet) + return + } + if recordLog { + task.Logger.Info("redis-sync cmd success", zap.String("cmdRet", cmdRet)) + } + return cmdRet +} + +// RedisSyncInfo redis-sync执行info [tendis-plus]等 +func (task *MakeSyncTask) RedisSyncInfo(section string) (infoRets map[string]string) { + opts := []string{"info"} + if section != "" { + opts = append(opts, section) + } + var str01 string + maxRetryTimes := 5 + for maxRetryTimes >= 0 { + maxRetryTimes-- + task.Err = nil + str01 = task.redisSyncRunCmd(opts, false) + if task.Err != nil { + time.Sleep(5 * time.Second) + continue + } + break + } + if task.Err != nil { + return + } + infoList := strings.Split(str01, "\n") + infoRets = make(map[string]string) + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + infoRets[list01[0]] = list01[1] + } + return infoRets +} + +// IsSyncAlive sync是否存活 +func (task *MakeSyncTask) IsSyncAlive() (isAlive bool, err error) { + isSyncAliaveCmd := fmt.Sprintf("ps -ef|grep 'taskid%d-'|grep 'kvstore-%d-'|grep -v grep|grep sync|grep conf || true", + task.RowData.ID, task.RowData.SrcKvStoreID) + tclog.Logger.Info("", zap.String("isSyncAliaveCmd", isSyncAliaveCmd)) + ret, err := util.RunLocalCmd("bash", []string{"-c", isSyncAliaveCmd}, "", nil, 1*time.Minute, task.Logger) + if err != nil { + return false, err + } + ret = strings.TrimSpace(ret) + if ret != "" { + return true, nil + } + return false, nil +} + +// IsSyncStateOK 同步状态是否本来就ok +func (task *MakeSyncTask) IsSyncStateOK() (ok bool) { + // redis-sync是否存活 + ok, task.Err = task.IsSyncAlive() + if task.Err != nil { + return false + } + if !ok { + return false + } + // 同步状态是否本来就是ok的 + syncInfoMap := task.RedisSyncInfo("") + if task.Err != nil { + return + } + syncState := syncInfoMap["sync_redis_state"] + if syncState == constvar.SyncOnlineState { + return true + } + return false +} + +// RedisSyncStop 关闭redis-sync +func (task *MakeSyncTask) RedisSyncStop() { + isAlive, err := task.IsSyncAlive() + if !isAlive { + tclog.Logger.Info(fmt.Sprintf("RedisSyncStop srcRedis:%s kvStore:%d sync is not alive", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID)) + return + } + tclog.Logger.Info(fmt.Sprintf("RedisSyncStop srcRedis:%s kvStore:%d sync is alive", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID)) + + opts := []string{"SYNCADMIN", "stop"} + task.redisSyncRunCmd(opts, true) + if task.Err != nil { + task.Err = nil // 这里已经需要关闭sync,所以 SYNCADMIN stop 执行错误可忽略 + } + + // kill redis-sync + killCmd := fmt.Sprintf(` + ps -ef|grep 'taskid%d-'|grep 'kvstore-%d-'|grep -v grep|grep sync|grep conf|awk '{print $2}'|while read pid + do + kill -9 $pid + done + `, task.RowData.ID, task.RowData.SrcKvStoreID) + task.Logger.Info("RedisSyncStop...", zap.String("killCmd", killCmd)) + retryTimes := 0 + for isAlive == true && retryTimes < 5 { + msg := fmt.Sprintf("Killing redis-sync times:%d ...", retryTimes+1) + task.Logger.Info(msg) + // redis-sync is alive, now kill it + _, err = util.RunLocalCmd("bash", []string{"-c", killCmd}, "", nil, 1*time.Minute, task.Logger) + if err != nil { + task.Logger.Error("Kill redis-sync process fail", zap.Error(err)) + } + time.Sleep(10 * time.Second) + retryTimes++ + isAlive, _ = task.IsSyncAlive() + if isAlive { + task.Logger.Error(fmt.Sprintf("srcRedis:%s kvStoreId:%d,Kill redis-sync fail,process still alive", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID)) + } + } + if isAlive && retryTimes == 5 { + task.Logger.Error(fmt.Sprintf("srcRedis:%s kvStoreId:%d,Kill redis-sync process failed", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID)) + task.Err = fmt.Errorf("Kill redis-sync process failed") + return + } + task.Logger.Info(fmt.Sprintf("srcRedis:%s kvStoreId:%d,kill redis-sync success", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID)) + return +} + +// RedisSyncStart 启动redis-sync +func (task *MakeSyncTask) RedisSyncStart(reacquirePort bool) { + tclog.Logger.Info(fmt.Sprintf("redis-sync start srcRedisAddr:%s kvStoreId:%d dstCluster:%s ...", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID, task.GetDstRedisAddr())) + defer tclog.Logger.Info("end redis-sync start") + + if reacquirePort { + task.getMySyncPort(0) + if task.Err != nil { + return + } + } + maxRetryTimes := 5 + for maxRetryTimes >= 0 { + maxRetryTimes-- + task.Err = nil + + task.createSyncLogFile() + if task.Err != nil { + return + } + task.createSyncConfigFile() + if task.Err != nil { + return + } + + startCmds := fmt.Sprintf(`cd %s && nohup %s -f %s >>%s 2>&1 &`, + task.TaskDir, + task.RedisSyncTool, task.SyncConfigFile, + filepath.Base(task.SyncLogFile)) + task.Logger.Info(startCmds) + + go func(bgcmd string) { + util.RunLocalCmd("bash", []string{"-c", bgcmd}, "", nil, 10*time.Second, task.Logger) + }(startCmds) + + time.Sleep(5 * time.Second) + isAlive, err := task.IsSyncAlive() + if err != nil { + task.Err = err + task.Logger.Error(task.Err.Error()) + return + } + if !isAlive { + logContent, _ := ioutil.ReadFile(task.SyncLogFile) + task.Logger.Error("redis-sync start fail", zap.String("failDetail", string(logContent))) + task.Err = fmt.Errorf("redis-sync start fail,detail:%s", string(logContent)) + if strings.Contains(string(logContent), "Address already in use") { + // port address already used + // clear and get sync port again and retry + task.clearOldSyncLogFile() + task.clearOldSyncConfigFile() + task.getMySyncPort(task.RowData.SyncerPort + 1) + if task.Err != nil { + return + } + continue + } + } + break + } + if task.Err != nil { + task.Err = fmt.Errorf("make sync start fail") + return + } + + // 命令: redis-cli -h $redis_sync_ip -p $redis_sync_port SYNCADMIN start + opts := []string{"SYNCADMIN", "start"} + ret02 := task.redisSyncRunCmd(opts, true) + if task.Err != nil { + return + } + tclog.Logger.Info("redis-sync 'syncadmin start' success", zap.String("cmdRet", ret02)) + + task.UpdateDbAndLogLocal("redis-sync %d start success", task.RowData.SyncerPort) + return +} + +// WatchSync 监听redis-sync +// 获取binlog-lag 与 last-key等信息 +// 执行stop 等操作 +func (task *MakeSyncTask) WatchSync() { + tenSlaveCli := task.getSlaveConn() + if task.Err != nil { + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + return + } + + task.SetTaskType(constvar.TendisplusSendBulkTaskType) + task.UpdateRow() + + for { + time.Sleep(10 * time.Second) + row01, err := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if err != nil { + task.Err = err + return + } + task.RowData = row01 + if task.RowData.KillSyncer == 1 || + task.RowData.SyncOperate == constvar.RedisSyncStopTodo || + task.RowData.SyncOperate == constvar.RedisForceKillTaskTodo { // stop redis-sync + + succ := constvar.RedisSyncStopSucc + fail := constvar.RedisSyncStopFail + if task.RowData.SyncOperate == constvar.RedisForceKillTaskTodo { + succ = constvar.RedisForceKillTaskSuccess + fail = constvar.RedisForceKillTaskFail + } + task.Logger.Info(fmt.Sprintf("start execute %q ...", task.RowData.SyncOperate)) + task.RedisSyncStop() + if task.Err == nil { + task.SetSyncOperate(succ) + task.SetStatus(2) + task.UpdateDbAndLogLocal("tendisplus redis-sync:%d终止成功", task.RowData.SyncerPid) + task.Err = nil + } else { + task.RowData.SyncOperate = fail + task.SetSyncOperate(fail) + task.SetStatus(-1) + task.UpdateDbAndLogLocal("tendisplus redis-sync:%d终止失败,err:%v", task.RowData.SyncerPid, task.Err) + } + return + } + syncInfoMap := task.RedisSyncInfo("") + if task.Err != nil { + return + } + redisIP := syncInfoMap["redis_ip"] + redisPort := syncInfoMap["redis_port"] + if redisIP != task.RowData.SrcIP || redisPort != strconv.Itoa(task.RowData.SrcPort) { + task.Err = fmt.Errorf("redis-sync(%s:%d) 同步redis(%s:%s) 的数据 不等于 %s,同步源redis不对", + "127.0.0.1", task.RowData.SyncerPort, redisIP, redisPort, task.GetSrcRedisAddr()) + task.SetMessage(task.Err.Error()) + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + return + } + syncState := syncInfoMap["sync_redis_state"] + if syncState != constvar.SyncOnlineState { + task.Err = fmt.Errorf("redis-sync(%s:%d) sync-redis-state:%s != %s", + "127.0.0.1", task.RowData.SyncerPort, syncState, constvar.SyncOnlineState) + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + // return + continue + } + infoRepl, err := tenSlaveCli.TendisplusInfoRepl() + if err != nil { + task.Err = err + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + return + } + if len(infoRepl.RocksdbSlaveList) == 0 { + task.Err = fmt.Errorf("tendisplus slave(%s) 'info replication' not found rocksdb slaves", + task.GetSrcRedisAddr()) + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + return + } + var myRockSlave *myredis.InfoReplRocksdbSlave = nil + for _, slave01 := range infoRepl.RocksdbSlaveList { + if slave01.DestStoreID == task.RowData.SrcKvStoreID { + myRockSlave = &slave01 + break + } + } + if myRockSlave == nil { + task.Err = fmt.Errorf("tendisplus slave(%s) 'info replication' not found dst_store_id:%d rocksdb slave", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID) + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + task.Logger.Info(infoRepl.String()) + return + } + if myRockSlave.State != constvar.TendisplusReplSendbulk && + myRockSlave.State != constvar.TendisplusReplOnline { + task.Err = fmt.Errorf("tendisplus slave(%s) 'info replication' dst_store_id:%d rocksdbSlave state=%s not %s/%s", + task.GetSrcRedisAddr(), task.RowData.SrcKvStoreID, myRockSlave.State, + constvar.TendisplusReplSendbulk, constvar.TendisplusReplOnline) + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + return + } + task.SetStatus(1) + if myRockSlave.State == constvar.TendisplusReplSendbulk { + task.SetTaskType(constvar.TendisplusSendBulkTaskType) + task.UpdateDbAndLogLocal("全量迁移中,binlog_pos:%d,lag:%d", myRockSlave.BinlogPos, myRockSlave.Lag) + } else { + task.SetTaskType(constvar.TendisplusSendIncrTaskType) + task.UpdateDbAndLogLocal("增量同步中,binlog_pos:%d,lag:%d", myRockSlave.BinlogPos, myRockSlave.Lag) + } + } +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/tendisplus.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/tendisplus.go new file mode 100644 index 0000000000..1470481346 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/tendisplus.go @@ -0,0 +1,2 @@ +// Package tendisplus TODO +package tendisplus diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/watchSync.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/watchSync.go new file mode 100644 index 0000000000..c49aa87059 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisplus/watchSync.go @@ -0,0 +1,60 @@ +package tendisplus + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" +) + +// WatchSyncTask 很多时候tendisplus redis-sync 已经拉起,状态为runnig(taskrow.status==1 taskrow.taskType="tendisplusSendBulk/tendisplusSendIncr") +// 而此时我们需要暂停 dbm-services/redis/redis-dts 重新替换 dbm-services/redis/redis-dts的介质 +// 再次拉起后, 以前(taskrow.status==1 taskrow.taskType="makeSync")的task其相关状态依然需要我们不断更新 +// 注意: 该任务只在 dbm-services/redis/redis-dts 被拉起一瞬间创建,只监听 以往 (taskrow.status==1 taskrow.taskType="makeSync")的task +// 对于新增的 (taskrow.status==1 taskrow.taskType="makeSync")的task 不做任何处理 +type WatchSyncTask struct { + MakeSyncTask +} + +// TaskType task类型 +func (task *WatchSyncTask) TaskType() string { + return constvar.WatchOldSyncTaskType +} + +// NextTask 下一个task类型 +func (task *WatchSyncTask) NextTask() string { + return "" +} + +// NewWatchSyncTask 新建任务 +func NewWatchSyncTask(row *tendisdb.TbTendisDTSTask) *WatchSyncTask { + ret := &WatchSyncTask{ + MakeSyncTask: *NewMakeSyncTask(row), + } + return ret +} + +// Execute 程序重新拉起后监听以往处于taskType='makeSync',status=1状态的redis-sync +func (task *WatchSyncTask) Execute() { + if task.Err != nil { + return + } + + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + + task.GetMyRedisCliTool() + if task.Err != nil { + return + } + task.GetMyRedisSyncTool() + if task.Err != nil { + return + } + + task.WatchSync() + return +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/backupFileFetch.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/backupFileFetch.go new file mode 100644 index 0000000000..854e2f037f --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/backupFileFetch.go @@ -0,0 +1,118 @@ +package tendisssd + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask" + "dbm-services/redis/redis-dts/pkg/remoteOperation" + "path/filepath" +) + +// BakcupFileFetchTask 备份拉取task +type BakcupFileFetchTask struct { + dtsTask.FatherTask +} + +// TaskType task类型 +func (task *BakcupFileFetchTask) TaskType() string { + return constvar.BackupfileFetchTaskType +} + +// NextTask 下一个task类型 +func (task *BakcupFileFetchTask) NextTask() string { + return constvar.TredisdumpTaskType +} + +// NewBakcupFileFetchTask 新建一个备份拉取task +func NewBakcupFileFetchTask(row *tendisdb.TbTendisDTSTask) *BakcupFileFetchTask { + return &BakcupFileFetchTask{ + FatherTask: dtsTask.NewFatherTask(row), + } +} + +// PreClear 清理以往生成的垃圾数据 +func (task *BakcupFileFetchTask) PreClear() { + if task.Err != nil { + return + } + task.ClearLocalFetchBackup() +} + +// Execute 执行文件拉取 +func (task *BakcupFileFetchTask) Execute() { + if task.Err != nil { + return + } + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + task.SetStatus(1) + task.UpdateDbAndLogLocal("从%s拉取%s到本地...", task.RowData.SrcIP, task.RowData.TendisbackupFile) + + task.Err = task.InitTaskDir() + if task.Err != nil { + return + } + + task.PreClear() + if task.Err != nil { + return + } + + // 从srcIP上拉取备份文件 + var absCli remoteOperation.RemoteOperation + absCli, task.Err = remoteOperation.NewIAbsClientByEnvVars(task.RowData.SrcIP, task.Logger) + if task.Err != nil { + return + } + task.Err = absCli.RemoteDownload( + filepath.Dir(task.RowData.TendisbackupFile), + task.TaskDir, + filepath.Base(task.RowData.TendisbackupFile), + constvar.GetABSPullBwLimit(), + ) + if task.Err != nil { + return + } + + backupFile := filepath.Base(task.RowData.TendisbackupFile) + task.SetFetchFile(filepath.Join(task.TaskDir, backupFile)) + task.UpdateDbAndLogLocal("%s上备份拉取成功", task.RowData.SrcIP) + + task.RefreshRowData() + if task.Err != nil { + return + } + if task.RowData.SyncOperate == constvar.RedisForceKillTaskSuccess { + // task had been terminated by force + // clear src host backup + task.ClearSrcHostBackup() + // clear local backup dir + task.ClearLocalFetchBackup() + // restore slave-log-keep-count + task.RestoreSrcSSDKeepCount() + return + } + + task.EndClear() + if task.Err != nil { + return + } + + task.SetStatus(0) + task.SetTaskType(task.NextTask()) + task.UpdateDbAndLogLocal("备份文件:%s成功拉取到本地", task.RowData.FetchFile) +} + +// EndClear 文件拉取完成后清理srcIP上残留备份信息 +func (task *BakcupFileFetchTask) EndClear() { + if task.RowData.TendisbackupFile == "" { + return + } + // 备份文件拉取完成后,清理 srcIP上的backupFile文件,避免占用过多空间 + task.ClearSrcHostBackup() +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/cmdsImporter.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/cmdsImporter.go new file mode 100644 index 0000000000..6cfa5de3ee --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/cmdsImporter.go @@ -0,0 +1,810 @@ +package tendisssd + +import ( + "context" + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask" + "dbm-services/redis/redis-dts/util" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// CmdsImporterTask 命令导入task +type CmdsImporterTask struct { + dtsTask.FatherTask + DelFiles []string `json:"delFiles"` + OutputFiles []string `json:"outputFiles"` + ListFiles []string `json:"listFiles"` + ExpireFiles []string `json:"expireFiles"` + ImportLogDir string `json:"importLogDir"` + DstProxyAddrs []string `json:"dstProxyAddrs"` + DstProxyIterIdx int32 `json:"dstProxyIterIdx"` + DtsProxyMut sync.Mutex `json:"-"` // 更新DstProxyAddrs时上锁 + DtsProxyLastUpdatetime time.Time `json:"-"` // 最后更新DstProxyAddrs的时间 +} + +// TaskType task类型 +func (task *CmdsImporterTask) TaskType() string { + return constvar.CmdsImporterTaskType +} + +// NextTask 下一个task类型 +func (task *CmdsImporterTask) NextTask() string { + return constvar.MakeSyncTaskType +} + +// NewCmdsImporterTask 新建一个命令导入task +func NewCmdsImporterTask(row *tendisdb.TbTendisDTSTask) *CmdsImporterTask { + return &CmdsImporterTask{ + FatherTask: dtsTask.NewFatherTask(row), + } +} + +// ImporterItem 命令导入项(为并发执行导入) +type ImporterItem struct { + RedisClient string `json:"redisClient"` + SQLFile string `json:"sqlFile"` + DstPassword string `json:"dstPassword"` + LogFile string `json:"logFile"` + IgnoreErrlist []string `json:"ignoreErrLsit"` + ErrFile string `json:"errFile"` + Err error `json:"err"` + task *CmdsImporterTask `json:"-"` + Logger *zap.Logger `json:"-"` +} + +// ToString .. +func (item *ImporterItem) ToString() string { + ret, _ := json.Marshal(item) + return string(ret) +} + +// MaxRetryTimes 获取最大重试次数 +func (item *ImporterItem) MaxRetryTimes() int { + ret := viper.GetInt("importMaxRetryTimes") + if ret <= 0 { + ret = 5 + } else if ret >= 10 { + ret = 10 + } + return ret +} + +// RetryAble 能否重复导入 +func (item *ImporterItem) RetryAble() bool { + if constvar.ListKeyFileReg.MatchString(item.SQLFile) { + return false + } + return true +} + +// IsWrongTypeErr .. +func (item *ImporterItem) IsWrongTypeErr(errData string) bool { + errData = strings.TrimSpace(errData) + lines := strings.Split(errData, "\n") + for _, line01 := range lines { + if strings.Contains(line01, constvar.WrongTypeOperationErr) == false { + return false + } + } + item.IgnoreErrlist = append(item.IgnoreErrlist, constvar.WrongTypeOperationErr) + return true +} + +// ErrorAbleToBeIgnored 能够忽略的错误 +func (item *ImporterItem) ErrorAbleToBeIgnored(errData string) bool { + return item.IsWrongTypeErr(errData) +} + +// RunTask 执行导入task +func (item *ImporterItem) RunTask(task *CmdsImporterTask) { + supPipeImport := task.IsSupportPipeImport() + importTimeout := task.ImportTimeout() + cmdTimeout := importTimeout + 60 + + item.Logger.Info("开始执行导入...", zap.String("params", item.ToString())) + maxRetryTimes := item.MaxRetryTimes() + times := 0 + retryAble := item.RetryAble() + dtsAddr := item.task.NextDstProxyAddr(false) + for { + times++ + importCmd := []string{"-c"} + var grepStdoutCmd string + list01 := strings.Split(dtsAddr, ":") + if len(list01) != 2 { + item.Logger.Error("DstAddr format not correct", zap.String("dstAddr", dtsAddr)) + item.Err = fmt.Errorf("DstAddr:%s format not corret", dtsAddr) + return + } + dstIP := list01[0] + dstPort := list01[1] + if supPipeImport == true { + importCmd = append(importCmd, fmt.Sprintf( + "%s --no-auth-warning -h %s -p %s -a %s --pipe --pipe-timeout %d < %s 1>%s 2>%s", + item.RedisClient, dstIP, dstPort, item.DstPassword, importTimeout, item.SQLFile, item.LogFile, item.ErrFile)) + grepStdoutCmd = fmt.Sprintf("grep -i 'errors' %s | { grep -v 'errors: 0' || true; } ", item.LogFile) + } else { + importCmd = append(importCmd, fmt.Sprintf("%s --no-auth-warning -h %s -p %s -a %s < %s 1>%s 2>%s", + item.RedisClient, dstIP, dstPort, item.DstPassword, item.SQLFile, item.LogFile, item.ErrFile)) + grepStdoutCmd = fmt.Sprintf("grep -i 'Err' %s | { grep -v 'invalid DB index' || true; }", item.LogFile) + } + item.Logger.Info(fmt.Sprintf("第%d次导入文件,导入命令:%s", times, importCmd)) + _, item.Err = util.RunLocalCmd("bash", importCmd, "", nil, time.Duration(cmdTimeout)*time.Second, item.Logger) + if item.Err != nil { + errBytes, _ := ioutil.ReadFile(item.ErrFile) + errStr := strings.TrimSpace(string(errBytes)) + if item.ErrorAbleToBeIgnored(errStr) == true { + // 可忽略的错误 + item.Err = nil + return + } + if retryAble && times <= maxRetryTimes { + dtsAddr = item.task.NextDstProxyAddr(true) + item.Logger.Error("导入出错,retry...", zap.Error(item.Err), zap.String("params", item.ToString())) + continue + } + item.Logger.Error("导入出错", zap.Error(item.Err), zap.String("params", item.ToString())) + return + } + grepRet, _ := util.RunLocalCmd("bash", + []string{"-c", grepStdoutCmd}, "", nil, 30*time.Second, item.Logger) + if grepRet != "" && retryAble && times <= maxRetryTimes { + item.Err = fmt.Errorf("import file:%s some error occur,pls check logfile:%s", item.SQLFile, item.LogFile) + item.Logger.Error(item.Err.Error()) + dtsAddr = item.task.NextDstProxyAddr(true) + continue + } else if grepRet != "" { + item.Err = fmt.Errorf("import file:%s some error occur,pls check logfile:%s", item.SQLFile, item.LogFile) + item.Logger.Error(item.Err.Error()) + return + } + // 是否发生错误 + errBytes, _ := ioutil.ReadFile(item.ErrFile) + errStr := strings.TrimSpace(string(errBytes)) + if errStr != "" { + if item.ErrorAbleToBeIgnored(errStr) == true { + return + } + if retryAble == true && times <= maxRetryTimes { + item.Err = fmt.Errorf("import file:%s some error occur,pls check errfile:%s", item.SQLFile, item.ErrFile) + item.Logger.Error(item.Err.Error()) + dtsAddr = item.task.NextDstProxyAddr(true) + continue + } + item.Err = fmt.Errorf("import file:%s some error occur,pls check errfile:%s", item.SQLFile, item.ErrFile) + item.Logger.Error(item.Err.Error()) + return + } + break + } +} + +// LookupDstRedisProxyAddrs .. +// 如果 task.RowData.DstCluster 是由 domain:port组成,则通过 net.lookup 得到其 task.DstProxyAddrs; +// 否则 task.DstProxyAddrs = []string{task.RowData.DstCluster} +func (task *CmdsImporterTask) LookupDstRedisProxyAddrs() { + task.DtsProxyMut.Lock() + defer task.DtsProxyMut.Unlock() + + task.DstProxyAddrs, task.Err = util.LookupDbDNSIPs(task.RowData.DstCluster) + if task.Err != nil { + task.Logger.Error(task.Err.Error()) + return + } + task.DtsProxyLastUpdatetime = time.Now().Local() +} + +// NextDstProxyAddr 依次轮训 DstProxyAddrs +func (task *CmdsImporterTask) NextDstProxyAddr(refreshDns bool) string { + if len(task.DstProxyAddrs) == 0 { + return task.RowData.DstCluster + } + if refreshDns && time.Now().Local().Sub(task.DtsProxyLastUpdatetime).Seconds() > 10 { + // 如果最近10秒内更新过 task.DstProxyAddrs,则不再次更新 + task.LookupDstRedisProxyAddrs() + if task.Err != nil { + // 如果发生错误,则直接返回 DtsServer addr + task.Err = nil + return task.RowData.DtsServer + } + } + // 轮训 task.DstProxyAddrs + if len(task.DstProxyAddrs) == 1 { + return task.DstProxyAddrs[0] + } + idx := int(atomic.LoadInt32(&task.DstProxyIterIdx)) % len(task.DstProxyAddrs) + targetAddr := task.DstProxyAddrs[idx] + atomic.AddInt32(&task.DstProxyIterIdx, 1) + return targetAddr +} + +// NewImporterItem 单个文件导入 +func (task *CmdsImporterTask) NewImporterItem(redisCli, keysFile, dstPasswd string) (item ImporterItem, err error) { + baseName := filepath.Base(keysFile) + importLogDir := task.getimportLogDir() + item = ImporterItem{ + RedisClient: redisCli, + SQLFile: keysFile, + task: task, + DstPassword: dstPasswd, + LogFile: filepath.Join(importLogDir, baseName+".log"), + ErrFile: filepath.Join(importLogDir, baseName+".err"), + Logger: task.Logger, + } + return +} + +func (task *CmdsImporterTask) parallelImportV2(importTasks []*ImporterItem, concurrency int) ([]*ImporterItem, error) { + wg := sync.WaitGroup{} + + if len(importTasks) == 0 { + return importTasks, nil + } + + if concurrency <= 0 { + importTasks[0].Logger.Warn(fmt.Sprintf("parallelImportV2 concurrency:%d <= 0,now concurrency=1", concurrency)) + concurrency = 1 + } + genChan := make(chan *ImporterItem) + retChan := make(chan *ImporterItem) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + currentIndex := 0 + totalCnt := len(importTasks) + ticker := time.NewTicker(600 * time.Second) // 每10分钟打印一次进度 + + for worker := 0; worker < concurrency; worker++ { + wg.Add(1) + go func() { + defer wg.Done() + + for importGo := range genChan { + importGo.RunTask(task) + select { + case retChan <- importGo: + case <-ctx.Done(): + return + } + } + }() + } + go func() { + defer close(genChan) + + for _, task01 := range importTasks { + taskItem := task01 + select { + case genChan <- taskItem: + case <-ctx.Done(): + return + } + } + }() + + go func() { + wg.Wait() + close(retChan) + }() + + go func() { + tick01 := time.NewTicker(10 * time.Second) + ok01 := false + for { + select { + case <-tick01.C: + ok01 = true + row01, err := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if err != nil { + break + } + if row01 == nil { + task.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", task.RowData.ID, row01) + break + } + if row01.SyncOperate == constvar.RedisForceKillTaskTodo { + // 用户选择强制终止,则终止所有导入 + task.SetSyncOperate(constvar.RedisForceKillTaskSuccess) + task.Err = fmt.Errorf("%s...", constvar.RedisForceKillTaskSuccess) + cancel() + ok01 = false + break + } + case <-ctx.Done(): + ok01 = false + break + } + if !ok01 { + break + } + } + }() + + var retItem *ImporterItem + errList := []string{} + ignoreErrMap := make(map[string]bool) // 忽略的错误类型去重 + ignoreErrList := []string{} + ok := false + for { + currentIndex++ + select { + case retItem, ok = <-retChan: + if !ok { + break + } + if retItem.Err != nil { + errList = append(errList, retItem.Err.Error()) + cancel() // 发生错误,及时退出 + ok = false + break + } + for _, igErr := range retItem.IgnoreErrlist { + ignoreErrMap[igErr] = true + } + case <-ticker.C: + task.UpdateDbAndLogLocal("[%d/%d] import progress...", currentIndex, totalCnt) + ok = true + break + case <-ctx.Done(): + ok = false + break + } + if !ok { + break + } + } + for igErr := range ignoreErrMap { + ignoreErrList = append(ignoreErrList, igErr) + } + task.SaveIgnoreErrs(ignoreErrList) + + if len(errList) > 0 { + return importTasks, fmt.Errorf("import output fail fail") + } + return importTasks, nil +} + +// SyncImport 同步导入 +func (task *CmdsImporterTask) SyncImport(importTasks []*ImporterItem) ([]*ImporterItem, error) { + currentIndex := 0 + totalCnt := len(importTasks) + ignoreErrMap := make(map[string]bool) // 忽略的错误类型去重 + ignoreErrList := []string{} + for _, import01 := range importTasks { + importItem := import01 + task.Logger.Info(fmt.Sprintf("SyncImport====>%s", importItem.SQLFile)) + } + for _, import01 := range importTasks { + importItem := import01 + currentIndex++ + importItem.RunTask(task) + if importItem.Err != nil { + return importTasks, importItem.Err + } + for _, igErr := range importItem.IgnoreErrlist { + ignoreErrMap[igErr] = true + } + if currentIndex%200 == 0 { + task.UpdateDbAndLogLocal("[%d/%d] import progress...", currentIndex, totalCnt) + row01, err := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if err != nil { + continue + } + if row01 == nil { + task.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", task.RowData.ID, row01) + continue + } + if row01.SyncOperate == constvar.RedisForceKillTaskTodo { + // 用户选择强制终止,则终止所有导入 + task.SetSyncOperate(constvar.RedisForceKillTaskSuccess) + task.Err = fmt.Errorf("%s...", constvar.RedisForceKillTaskSuccess) + return importTasks, task.Err + } + } + } + for igErr := range ignoreErrMap { + ignoreErrList = append(ignoreErrList, igErr) + } + task.SaveIgnoreErrs(ignoreErrList) + + return importTasks, nil +} + +// PreClear 清理以往task生成的垃圾数据(tredis-binlog-0-output* list-output-0 tendis-binlog-expires*) +func (task *CmdsImporterTask) PreClear(delOutputFile, delOutputExpire, delImportLog bool) { + // 删除output 文件 + if len(task.OutputFiles) > 0 && delOutputFile == true { + // 文件存在,则清理 + // rmCmd := fmt.Sprintf("rm -rf %s/%s > /dev/null 2>&1", task.RowData.SqlfileDir, constvar.TredisdumpOutputGlobMatch) + rmCmd := fmt.Sprintf("cd %s && find . -maxdepth 1 -name '%s' -print|xargs rm > /dev/null 2>&1", + task.RowData.SqlfileDir, constvar.TredisdumpOutputGlobMatch) + task.Logger.Info(fmt.Sprintf("CmdsImporterTask PreClear execute localCmd:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 1*time.Hour, task.Logger) + } + + // 删除del 文件 + if len(task.DelFiles) > 0 && delOutputFile == true { + // 文件存在,则清理 + // rmCmd := fmt.Sprintf("rm -rf %s/%s > /dev/null 2>&1", task.RowData.SqlfileDir, constvar.TredisdumpDelGlobMatch) + rmCmd := fmt.Sprintf("cd %s && find . -maxdepth 1 -name '%s' -print|xargs rm > /dev/null 2>&1", + task.RowData.SqlfileDir, constvar.TredisdumpDelGlobMatch) + task.Logger.Info(fmt.Sprintf("CmdsImporterTask PreClear execute localCmd:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 1*time.Hour, task.Logger) + } + + // 删除list 文件 + if len(task.ListFiles) > 0 && delOutputFile == true { + // 文件存在,则清理 + // rmCmd := fmt.Sprintf("rm -rf %s/%s > /dev/null 2>&1", task.RowData.SqlfileDir, constvar.TredisdumpListGlobMatch) + rmCmd := fmt.Sprintf("cd %s && find . -maxdepth 1 -name '%s' -print|xargs rm > /dev/null 2>&1", + task.RowData.SqlfileDir, constvar.TredisdumpListGlobMatch) + task.Logger.Info(fmt.Sprintf("CmdsImporterTask PreClear execute localCmd:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 1*time.Hour, task.Logger) + } + + // 找到expire文件 + if len(task.ExpireFiles) > 0 && delOutputExpire == true { + // 文件存在,则清理 + // rmCmd := fmt.Sprintf("rm -rf %s/%s > /dev/null 2>&1", task.RowData.SqlfileDir, constvar.TredisdumpExpireGlobMatch) + rmCmd := fmt.Sprintf("cd %s && find . -maxdepth 1 -name '%s' -print|xargs rm > /dev/null 2>&1", + task.RowData.SqlfileDir, constvar.TredisdumpExpireGlobMatch) + task.Logger.Info(fmt.Sprintf("CmdsImporterTask PreClear execute localCmd:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 1*time.Hour, task.Logger) + } + importLogDir := filepath.Join(task.RowData.SqlfileDir, "importlogs") + _, err := os.Stat(importLogDir) + if err == nil && delImportLog == true { + // 文件存在,则清理 + rmCmd := fmt.Sprintf("rm -rf %s > /dev/null 2>&1", importLogDir) + task.Logger.Info(fmt.Sprintf("CmdsImporterTask PreClear execute localCmd:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 1*time.Hour, task.Logger) + } +} + +// GetOutputFiles 获取output文件列表 +func (task *CmdsImporterTask) GetOutputFiles() { + outputFiles, err := filepath.Glob(task.RowData.SqlfileDir + "/" + constvar.TredisdumpOutputGlobMatch) + if err != nil { + task.Err = fmt.Errorf("GetOutputFiles match %s/%s fail,err:%v", + task.RowData.SqlfileDir, constvar.TredisdumpOutputGlobMatch, err) + task.Logger.Error(task.Err.Error()) + return + } + task.OutputFiles = outputFiles + return +} + +// GetListFiles 获取list文件列表 +func (task *CmdsImporterTask) GetListFiles() { + listFiles, err := filepath.Glob(task.RowData.SqlfileDir + "/" + constvar.TredisdumpListGlobMatch) + if err != nil { + task.Err = fmt.Errorf("GetListFiles match %s/%s fail,err:%v", + task.RowData.SqlfileDir, constvar.TredisdumpListGlobMatch, err) + task.Logger.Error(task.Err.Error()) + return + } + twoNumReg := regexp.MustCompile(`^(\d+)_list_(\d+)$`) + sort.Slice(listFiles, func(i, j int) bool { + f01 := filepath.Base(listFiles[i]) + f02 := filepath.Base(listFiles[j]) + + list01 := twoNumReg.FindStringSubmatch(f01) + list02 := twoNumReg.FindStringSubmatch(f02) + if len(list01) != 3 || len(list02) != 3 { + return false + } + + f01ThreadID, _ := strconv.ParseUint(list01[1], 10, 64) + f01Idx, _ := strconv.ParseUint(list01[2], 10, 64) + + f02ThreadID, _ := strconv.ParseUint(list02[1], 10, 64) + f02Idx, _ := strconv.ParseUint(list02[2], 10, 64) + + // 按照线程id、文件编号 正序 + if f01ThreadID < f02ThreadID { + return true + } else if f01ThreadID == f02ThreadID { + if f01Idx < f02Idx { + return true + } + } + return false + }) + + task.ListFiles = listFiles + return +} + +// GetExpireFiles 获取expire文件列表 +func (task *CmdsImporterTask) GetExpireFiles() { + expireFiles, err := filepath.Glob(task.RowData.SqlfileDir + "/" + constvar.TredisdumpExpireGlobMatch) + if err != nil { + task.Err = fmt.Errorf("GetExpireFiles match %s/%s fail,err:%v", + task.RowData.SqlfileDir, constvar.TredisdumpExpireGlobMatch, err) + task.Logger.Error(task.Err.Error()) + return + } + task.ExpireFiles = expireFiles + return +} + +// GetDelFiles 获取del文件列表(包含符合类型key del命令) +func (task *CmdsImporterTask) GetDelFiles() { + delFiles, err := filepath.Glob(task.RowData.SqlfileDir + "/" + constvar.TredisdumpDelGlobMatch) + if err != nil { + task.Err = fmt.Errorf("GetDelFiles match %s/%s fail,err:%v", + task.RowData.SqlfileDir, constvar.TredisdumpDelGlobMatch, err) + task.Logger.Error(task.Err.Error()) + return + } + task.DelFiles = delFiles + return +} + +// createImportLogDirNotExists create importLogDir if not exists +func (task *CmdsImporterTask) createImportLogDirNotExists() { + _, err := os.Stat(task.RowData.SqlfileDir) + if err != nil && os.IsNotExist(err) == true { + task.Err = fmt.Errorf("sql文件夹:%s not exists", task.RowData.SqlfileDir) + task.Logger.Error(task.Err.Error()) + return + } + task.Logger.Info("SqlfileDir is ok", zap.String("SqlfileDir", task.RowData.SqlfileDir)) + + // 创建日志目录 + task.ImportLogDir = filepath.Join(task.RowData.SqlfileDir, "importlogs") + task.Err = util.MkDirIfNotExists(task.ImportLogDir) + if task.Err != nil { + return + } +} + +// getimportLogDir .. +func (task *CmdsImporterTask) getimportLogDir() string { + return task.ImportLogDir +} + +func (task *CmdsImporterTask) confirmHaveListKeys() { + if len(task.ListFiles) == 0 { + return + } + for _, listFile := range task.ListFiles { + file01, err := os.Stat(listFile) + if err != nil { + task.Err = fmt.Errorf("os.stat fail,err:%v,file:%s", err, listFile) + task.Logger.Error(task.Err.Error()) + return + } + if file01.Size() > 0 { + task.SetSrcHaveListKeys(1) + task.UpdateRow() + task.Logger.Info(fmt.Sprintf("srcRedis:%s#%d have list keys", task.RowData.SrcIP, task.RowData.SrcPort)) + return + } + } + task.Logger.Info(fmt.Sprintf("srcRedis:%s#%d no list keys", task.RowData.SrcIP, task.RowData.SrcPort)) +} + +// Execute 执行命令导入 +func (task *CmdsImporterTask) Execute() { + if task.Err != nil { + return + } + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + + task.SetStatus(1) + task.UpdateDbAndLogLocal("开始对执行cmdsImporter") + + redisClient, err := util.IsToolExecutableInCurrDir("redis-cli") + if err != nil { + task.Err = err + return + } + task.createImportLogDirNotExists() + if task.Err != nil { + return + } + parallelLimit := task.ImportParallelLimit() + dstPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.DstPassword) + + // 找到 del 文件 + task.GetDelFiles() + if task.Err != nil { + return + } + // 找到所有 output 文件 + task.GetOutputFiles() + if task.Err != nil { + return + } + // 找到所有 list 文件 + task.GetListFiles() + if task.Err != nil { + return + } + // 找到 expire 文件 + task.GetExpireFiles() + if task.Err != nil { + return + } + // 确定是否有list文件 + task.confirmHaveListKeys() + if task.Err != nil { + return + } + + task.LookupDstRedisProxyAddrs() + if task.Err != nil { + return + } + + task.UpdateDbAndLogLocal("found %d del files", len(task.DelFiles)) + + if len(task.DelFiles) > 0 && task.RowData.RetryTimes > 0 { + // 如果是重试迁移,则优先导入 del 命令 + task.UpdateDbAndLogLocal("found %d del files", len(task.DelFiles)) + + delsTasks := []*ImporterItem{} + for _, del1 := range task.DelFiles { + taskItem, err := task.NewImporterItem(redisClient, del1, string(dstPasswd)) + if err != nil { + task.Err = err + return + } + delsTasks = append(delsTasks, &taskItem) + } + task.UpdateDbAndLogLocal("开始执行dels文件导入...") + + _, err = task.parallelImportV2(delsTasks, parallelLimit) + if task.RowData.SyncOperate == constvar.RedisForceKillTaskSuccess { + // task had been terminated + // restore src redis 'slave-log-keep-count' + task.RestoreSrcSSDKeepCount() + // clear output、expires,keep importlogs + task.PreClear(true, true, false) + return + } + if err != nil { + task.Err = err + return + } + } else { + task.Logger.Info(fmt.Sprintf("found %d del files,task %d retry times,no need import del", + len(task.DelFiles), task.RowData.RetryTimes)) + } + + task.UpdateDbAndLogLocal("found %d output files", len(task.OutputFiles)) + + outputFileTasks := []*ImporterItem{} + for _, output01 := range task.OutputFiles { + taskItem, err := task.NewImporterItem(redisClient, output01, string(dstPasswd)) + if err != nil { + task.Err = err + return + } + outputFileTasks = append(outputFileTasks, &taskItem) + } + if len(outputFileTasks) > 0 { + task.UpdateDbAndLogLocal("共有%d个output文件需导入,开始执行output文件导入...", len(outputFileTasks)) + + _, err = task.parallelImportV2(outputFileTasks, parallelLimit) + if task.RowData.SyncOperate == constvar.RedisForceKillTaskSuccess { + // task had been terminated + // restore src redis 'slave-log-keep-count' + task.RestoreSrcSSDKeepCount() + // clear output、expires,keep importlogs + task.PreClear(true, true, false) + return + } + if err != nil { + task.Err = err + return + } + } else { + task.UpdateDbAndLogLocal("该实例无任何hash/string/set/zset需导入...") + } + + listFileTasks := []*ImporterItem{} + task.UpdateDbAndLogLocal("found %d list files", len(task.ListFiles)) + + for _, list01 := range task.ListFiles { + taskItem, err := task.NewImporterItem(redisClient, list01, string(dstPasswd)) + if err != nil { + task.Err = err + return + } + listFileTasks = append(listFileTasks, &taskItem) + } + + if len(listFileTasks) > 0 { + task.UpdateDbAndLogLocal("共有%d个list文件需导入,开始执行list文件导入...", len(listFileTasks)) + + _, err = task.SyncImport(listFileTasks) + if task.RowData.SyncOperate == constvar.RedisForceKillTaskSuccess { + // task had been terminated + // restore src redis 'slave-log-keep-count' + task.RestoreSrcSSDKeepCount() + // clear output、expires,keep importlogs + task.PreClear(true, true, false) + return + } + if err != nil { + task.Err = err + return + } + } else { + task.UpdateDbAndLogLocal("该实例无任何list key需导入....") + } + + if len(task.ExpireFiles) > 0 { + task.UpdateDbAndLogLocal("found %d expire files", len(task.ExpireFiles)) + + expiresTasks := []*ImporterItem{} + for _, expires1 := range task.ExpireFiles { + taskItem, err := task.NewImporterItem(redisClient, expires1, string(dstPasswd)) + if err != nil { + task.Err = err + return + } + expiresTasks = append(expiresTasks, &taskItem) + } + task.UpdateDbAndLogLocal("开始执行expires文件导入...") + + _, err = task.parallelImportV2(expiresTasks, parallelLimit) + if task.RowData.SyncOperate == constvar.RedisForceKillTaskSuccess { + // task had been terminated + // restore src redis 'slave-log-keep-count' + task.RestoreSrcSSDKeepCount() + // clear output、expires,keep importlogs + task.PreClear(true, true, false) + return + } + if err != nil { + task.Err = err + return + } + } else { + task.Logger.Info("没有找到expire文件,无需import") + } + + task.EndClear() + if task.Err != nil { + return + } + + task.SetTaskType(task.NextTask()) + task.SetStatus(0) + task.UpdateDbAndLogLocal("等待启动redis_sync") + return +} + +// EndClear 命令导入完成后清理output、expires文件 +func (task *CmdsImporterTask) EndClear() { + debug := viper.GetBool("TENDIS_DEBUG") + if debug == true { + return + } + task.PreClear(true, true, false) // output、expires文件,importlogs保留 +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/makeSync.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/makeSync.go new file mode 100644 index 0000000000..700eb2ed81 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/makeSync.go @@ -0,0 +1,1015 @@ +package tendisssd + +import ( + "context" + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask" + "dbm-services/redis/redis-dts/util" + "encoding/base64" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/jinzhu/gorm" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +const ( + // SyncSubOffset TODO + SyncSubOffset = 100000 +) + +// MakeSyncTask 启动redis-sync +type MakeSyncTask struct { + dtsTask.FatherTask + RedisCliTool string `json:"redisCliTool"` + RedisSyncTool string `json:"redisSyncTool"` + SyncLogFile string `json:"syncLogFile"` + SyncConfFile string `json:"syncConfFile"` + SrcADDR string `json:"srcAddr"` + SrcPassword string `json:"srcPassword"` + DstADDR string `json:"dstAddr"` + DstPassword string `json:"dstPassword"` + LastSeq uint64 `json:"lastSeq"` + Runid string `json:"runnid"` + syncSeqSave dtsTask.ISaveSyncSeq +} + +// TaskType task类型 +func (task *MakeSyncTask) TaskType() string { + return constvar.MakeSyncTaskType +} + +// NextTask 下一个task类型 +func (task *MakeSyncTask) NextTask() string { + return "" +} + +// NewMakeSyncTask 新建一个 redis-sync启动task +func NewMakeSyncTask(row *tendisdb.TbTendisDTSTask) *MakeSyncTask { + return &MakeSyncTask{ + FatherTask: dtsTask.NewFatherTask(row), + } +} + +// PreClear 关闭以前生成的redis-sync +func (task *MakeSyncTask) PreClear() { + if task.Err != nil { + return + } + if task.RowData.SyncerPort == 0 { + return + } + defer func() { + // clear old sync config file and log file + syncDir := task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + rmCmd := fmt.Sprintf("cd %s && rm -rf *-taskid%d-*.log *-taskid%d-*.conf", syncDir, task.RowData.ID, task.RowData.ID) + task.Logger.Info(fmt.Sprintf("tendisplus makeSync preClear execute:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 10*time.Second, task.Logger) + + }() + + task.RedisSyncStop() + return +} + +// Execute 执行启动redis-sync +func (task *MakeSyncTask) Execute() { + var err error + if task.Err != nil { + return + } + + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + task.SetStatus(1) + task.UpdateDbAndLogLocal("开始创建sync关系") + + _, task.Err = util.IsFileExistsInCurrDir("tendisssd-sync-template.conf") + if task.Err != nil { + task.Logger.Error(task.Err.Error()) + return + } + + redisClient, err := util.IsToolExecutableInCurrDir("redis-cli") + if err != nil { + task.Err = err + return + } + task.RedisCliTool = redisClient + + task.SetSyncSeqSaveInterface() + if task.Err != nil { + return + } + + srcPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.SrcPassword) + dstPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.DstPassword) + + task.SrcADDR = fmt.Sprintf("%s:%d", task.RowData.SrcIP, task.RowData.SrcPort) + task.SrcPassword = string(srcPasswd) + task.DstADDR = task.RowData.DstCluster + task.DstPassword = string(dstPasswd) + + isSyncOK := task.IsSyncStateOK() + if isSyncOK { + // 同步状态本来就是ok的,直接watcht redis-sync即可 + task.Logger.Info(fmt.Sprintf("redis:%s 同步状态ok,开始watch...", task.SrcADDR)) + task.WatchSync() + return + } + + task.GetMyRedisSyncTool(true) + if task.Err != nil { + return + } + + lastSeq := task.GetLastSyncSeq(SyncSubOffset) + if task.Err != nil { + return + } + task.Logger.Info(fmt.Sprintf("lastSeq=>%s", lastSeq.String())) + + task.PreClear() + if task.Err != nil { + return + } + task.LastSeq = lastSeq.Seq + task.Runid = lastSeq.RunID + + // before start redis-sync, we must confirm binlog is ok + task.ConfirmSrcRedisBinlogOK(task.LastSeq) + if task.Err != nil { + return + } + + task.RedisSyncStart(true) + if task.Err != nil { + return + } + + task.UpdateDbAndLogLocal("redis-sync 启动成功,pid:%d", task.RowData.SyncerPid) + + task.WatchSync() + return +} + +// MkSyncDirIfNotExists TODO +// create sync directory if not exists +func (task *MakeSyncTask) MkSyncDirIfNotExists() (syncDir string) { + task.Err = task.InitTaskDir() + if task.Err != nil { + return + } + syncDir = filepath.Join(task.TaskDir, "syncs") + task.Err = util.MkDirIfNotExists(syncDir) + if task.Err != nil { + return + } + return +} + +// SetSyncSeqSaveInterface TODO +// set syncSeqSave interface +func (task *MakeSyncTask) SetSyncSeqSaveInterface() { + syncDir := task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + syncRuntimeSeqFile := filepath.Join(syncDir, "sync-runtime-pos.txt") + task.syncSeqSave, task.Err = dtsTask.NewSaveSyncSeqToFile(syncRuntimeSeqFile, task.Logger) + return +} + +// GetLastSyncSeq 获取最后一条sync seq +// - 先从 task.syncSeqSave 中获取得到 savedLastSeq; +// (task.syncSeqSave中保存的seq结果是redis-sync info Tendis-SSD得到,仅表示redis-sync接收到的binlog, +// +// 并非dstRedis实际已执行的binlog, redis-sync最多保存10w条,故从 task.syncSeqSave中得到的savedLastSeq需要减去一个偏移) +// +// - 在从 full backup 中获取 bakSeq; +// - 比较 savedLastSeq 与 bakSeq, 较大者即为我们需要的 lastSeq +func (task *MakeSyncTask) GetLastSyncSeq(subOffset uint64) (lastSeq dtsTask.SyncSeqItem) { + var err error + var savedLastSeq dtsTask.SyncSeqItem + if task.syncSeqSave.HaveOldSyncSeq() == true { + savedLastSeq, err = task.syncSeqSave.GetLastSyncSeq() + if err != nil { + task.Err = err + return + } + task.Logger.Info("GetLastSyncSeq before sub offset", zap.Any("savedLastSeq", savedLastSeq)) + savedLastSeq.Seq = savedLastSeq.Seq - subOffset + task.Logger.Info("GetLastSyncSeq after sub offset", zap.Any("savedLastSeq", savedLastSeq)) + } + bakSeq := task.GetSyncSeqFromFullBackup() + if task.Err != nil { + return + } + // tendis slave将发送 lastSeq - 1 + 1=lastSeq 及其以后的binlog给redis-sync + // 因此这里必须lastSeq-1,否则将缺少lastSeq对应的命令 + bakSeq.Seq = bakSeq.Seq - 1 + + // 两者取较大值 + if savedLastSeq.RunID != "" && savedLastSeq.Seq > bakSeq.Seq { + lastSeq = savedLastSeq + } else { + lastSeq = *bakSeq + } + + task.Logger.Info("GetLastSyncSeq finally result", zap.Any("lastSeq", lastSeq)) + return +} + +// GetSpecificTimeSyncSeq 获取某个时间点(精确到分钟)第一条sync seq +// - 先从 task.syncSeqSave 中获取得到 savedSpecTimeSeq; +// (task.syncSeqSave中保存的seq结果是redis-sync info Tendis-SSD得到,仅表示redis-sync接收到的binlog, +// +// 并非dstRedis实际已执行的binlog, redis-sync最多保存10w条,故从 task.syncSeqSave中得到的 savedSpecTimeSeq 需要减去一个偏移) +// +// - 在从 full backup 中获取 bakSeq; +// - 比较 savedSpecTimeSeq 与 bakSeq, 较大者即为我们需要的 lastSeq +func (task *MakeSyncTask) GetSpecificTimeSyncSeq(time01 time.Time, subOffset uint64) (lastSeq dtsTask.SyncSeqItem) { + var err error + var savedSpecTimeSeq dtsTask.SyncSeqItem + if task.syncSeqSave.HaveOldSyncSeq() == true { + savedSpecTimeSeq, err = task.syncSeqSave.GetSpecificTimeSyncSeq(time01) + if err != nil && util.IsNotFoundErr(err) == false { + task.Err = err + return + } + if err == nil { + task.Logger.Info("GetSpecificTimeSyncSeq before sub offset", zap.Any("savedSpecTimeSeq", savedSpecTimeSeq)) + savedSpecTimeSeq.Seq = savedSpecTimeSeq.Seq - subOffset + task.Logger.Info("GetSpecificTimeSyncSeq after sub offset", zap.Any("savedSpecTimeSeq", savedSpecTimeSeq)) + } else if err != nil && util.IsNotFoundErr(err) == true { + // reset err + err = nil + } + } + bakSeq := task.GetSyncSeqFromFullBackup() + if task.Err != nil { + return + } + // tendis slave将发送 lastSeq - 1 + 1=lastSeq 及其以后的binlog给redis-sync + // 因此这里必须lastSeq-1,否则将缺少lastSeq对应的命令 + bakSeq.Seq = bakSeq.Seq - 1 + + // 两者取较大值 + if savedSpecTimeSeq.RunID != "" && savedSpecTimeSeq.Seq > bakSeq.Seq { + lastSeq = savedSpecTimeSeq + } else { + lastSeq = *bakSeq + } + + task.Logger.Info("GetSpecificTimeSyncSeq finally result", zap.Any("lastSeq", lastSeq)) + return +} + +// GetMyRedisSyncTool Get [latest] redis-sync binary +func (task *MakeSyncTask) GetMyRedisSyncTool(fetchLatest bool) { + task.GetRedisSyncClientFromLocal() + return +} + +// GetRedisSyncClientFromLocal 本地获取redis-sync +func (task *MakeSyncTask) GetRedisSyncClientFromLocal() { + currentPath, err := util.CurrentExecutePath() + if err != nil { + task.Err = err + task.Logger.Error(err.Error()) + return + } + syncClient := filepath.Join(currentPath, "redis-sync") + _, err = os.Stat(syncClient) + if err != nil && os.IsNotExist(err) == true { + task.Err = fmt.Errorf("%s not exists,err:%v", syncClient, err) + task.Logger.Error(task.Err.Error()) + return + } else if err != nil && os.IsPermission(err) == true { + err = os.Chmod(syncClient, 0774) + if err != nil { + task.Err = fmt.Errorf("%s os.Chmod 0774 fail,err:%v", syncClient, err) + task.Logger.Error(task.Err.Error()) + return + } + } + task.Logger.Info(fmt.Sprintf("%s is ok", syncClient)) + task.RedisSyncTool = syncClient +} + +// getMySyncPort 获取redis-sync port, 10000<=port<20000 +func (task *MakeSyncTask) getMySyncPort(initSyncPort int) { + taskTypes := []string{} + var syncerPort int + taskTypes = append(taskTypes, constvar.MakeSyncTaskType) + if initSyncPort <= 0 { + initSyncPort = 10000 + localIP, _ := util.GetLocalIP() + dtsSvrMaxSyncPortTask, err := tendisdb.GetDtsSvrMaxSyncPort(task.RowData.BkCloudID, localIP, + constvar.TendisTypeTendisSSDInsance, taskTypes, task.Logger) + if (err != nil && gorm.IsRecordNotFoundError(err)) || dtsSvrMaxSyncPortTask == nil { + initSyncPort = 10000 + } else if err != nil { + task.Err = err + return + } else { + if dtsSvrMaxSyncPortTask.SyncerPort >= 10000 { + initSyncPort = dtsSvrMaxSyncPortTask.SyncerPort + 1 + } + } + } + if initSyncPort > 20000 { + initSyncPort = 10000 + } + syncerPort, task.Err = util.GetANotUsePort("127.0.0.1", initSyncPort, 1) + if task.Err != nil { + task.Logger.Error(task.Err.Error()) + return + } + task.SetSyncerPort(syncerPort) + + return +} + +// WatchSync 监听redis-sync,binlog-lag与last-key等信息 +func (task *MakeSyncTask) WatchSync() { + // ssd slave中slave-log-keep-count是否减少到1800w + // (redis-sync同步落后10分钟以内,则将slave-log-keep-count修改为1200w) + slaveLogCountDecr := false + + jobRows, err := tendisdb.GetTendisDTSJob(task.RowData.BillID, task.RowData.SrcCluster, + task.RowData.DstCluster, task.Logger) + if err != nil { + task.Err = err + return + } + + lastSeqAndTime := dtsTask.SyncSeqItem{} + for { + time.Sleep(10 * time.Second) + + task.RefreshRowData() + if task.Err != nil { + return + } + if task.RowData.KillSyncer == 1 || + task.RowData.SyncOperate == constvar.RedisSyncStopTodo || + task.RowData.SyncOperate == constvar.RedisForceKillTaskTodo { // stop redis-sync + + succ := constvar.RedisSyncStopSucc + fail := constvar.RedisSyncStopFail + if task.RowData.SyncOperate == constvar.RedisForceKillTaskTodo { + succ = constvar.RedisForceKillTaskSuccess + fail = constvar.RedisForceKillTaskFail + } + task.Logger.Info(fmt.Sprintf("start execute %q ...", task.RowData.SyncOperate)) + task.RedisSyncStop() + if task.Err == nil { + task.RestoreSrcSSDKeepCount() // 恢复 src ssd slave-log-keep-count值 + task.SetSyncOperate(succ) + task.SetStatus(2) + task.UpdateDbAndLogLocal("redis-sync:%d终止成功", task.RowData.SyncerPid) + task.Err = nil + } else { + task.SetSyncOperate(fail) + } + task.Logger.Info(fmt.Sprintf("end %q ...", task.RowData.SyncOperate)) + return + } + // pause and resume redis-sync + if task.RowData.SyncOperate == constvar.RedisSyncPauseTodo { + task.Logger.Info(fmt.Sprintf("start execute %q ...", task.RowData.SyncOperate)) + task.PauseAndResumeSync() + task.Logger.Info(fmt.Sprintf("end %q ...", task.RowData.SyncOperate)) + if task.Err != nil { + return + } + continue + } + // upgrade redis-sync + if task.RowData.SyncOperate == constvar.RedisSyncUpgradeTodo { + task.Logger.Info(fmt.Sprintf("start execute %q ...", task.RowData.SyncOperate)) + task.UpgradeSyncMedia() + if task.Err != nil { + return + } + task.SetSyncOperate(constvar.RedisSyncUpgradeSucc) + task.UpdateDbAndLogLocal(constvar.RedisSyncUpgradeSucc + "...") + + task.Logger.Info(fmt.Sprintf("end %q ...", task.RowData.SyncOperate)) + continue + } + // resync from specific time + if task.RowData.SyncOperate == constvar.ReSyncFromSpecTimeTodo { + task.Logger.Info(fmt.Sprintf("start execute %q ...", task.RowData.SyncOperate)) + task.ReSyncFromSpecTime(task.RowData.ResyncFromTime.Time) + if task.Err != nil { + return + } + task.SetSyncOperate(constvar.ReSyncFromSpecTimeSucc) + task.UpdateDbAndLogLocal(constvar.ReSyncFromSpecTimeSucc + "...") + + task.Logger.Info(fmt.Sprintf("end %q ...", task.RowData.SyncOperate)) + continue + } + syncInfoMap := task.RedisSyncInfo("tendis-ssd") + if task.Err != nil { + return + } + binlogLag, _ := strconv.ParseInt(syncInfoMap["tendis_binlog_lag"], 10, 64) + if binlogLag < 0 { // 说明 redis-sync没有正常运行 + task.SetTendisBinlogLag(binlogLag) + task.SetStatus(-1) + task.UpdateDbAndLogLocal("redis-sync 同步异常,binlog延迟:%d", binlogLag) + continue + } + tendisIP := syncInfoMap["tendis-ssd_ip"] + tendisPort := syncInfoMap["tendis_port"] + if tendisIP != task.RowData.SrcIP || tendisPort != strconv.Itoa(task.RowData.SrcPort) { + task.Err = fmt.Errorf("redis-sync:%s#%d 同步redis:%s#%s的binlog 不等于 %s#%d,同步源redis不对?", + "127.0.0.1", task.RowData.SyncerPort, + tendisIP, tendisPort, + task.RowData.SrcIP, task.RowData.SrcPort) + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + return + } + if binlogLag < 600 && slaveLogCountDecr == false { + // 如果redis-sync同步延迟在600s以内,则修改srcSlave slave-log-keep-count=1200w,避免告警 + task.ChangeSrcSSDKeepCount(1200 * 10000) + if task.Err != nil { + return + } + slaveLogCountDecr = true + } + lastKey, _ := syncInfoMap["tendis_last_key"] + task.SetTendisBinlogLag(binlogLag) + task.SetMessage("binlog延迟:%d秒,lastKey:%s", binlogLag, lastKey) + task.SetStatus(1) + task.UpdateRow() + + nowSeq := dtsTask.SyncSeqItem{} + nowSeq.Time.Time = time.Now().Local() + nowSeq.RunID = syncInfoMap["tendis_run_id"] + nowSeq.Seq, _ = strconv.ParseUint(syncInfoMap["tendis_last_seq"], 10, 64) + task.LastSeq = nowSeq.Seq + task.Runid = nowSeq.RunID + task.Err = task.syncSeqSave.SyncSeqWriter(&nowSeq, true) + if task.Err != nil { + task.SetMessage(task.Err.Error()) + task.SetStatus(-1) + task.UpdateRow() + // not return + } + // 如果redis-sync seq 60分钟没有任何变化,则代表同步hang住了 + // 例外情况: + // 1. 回档临时环境,不会有心跳写入,tendis_last_seq不会变; + if nowSeq.Seq == lastSeqAndTime.Seq && jobRows[0].SrcClusterType != constvar.UserTwemproxyType { + if time.Now().Local().Sub(lastSeqAndTime.Time.Time).Minutes() > 60 { + task.Err = fmt.Errorf("binlog seq 已经60分钟未更新,redis-sync是否hang住?") + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + return + } + } else { + lastSeqAndTime = nowSeq + } + } +} + +// IsSyncAlive sync是否存活 +func (task *MakeSyncTask) IsSyncAlive() (isAlive bool, err error) { + isSyncAliaveCmd := fmt.Sprintf("ps -ef|grep %s_%d|grep 'taskid%d-'|grep -v grep|grep sync|grep conf || true", + task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.ID) + task.Logger.Info("", zap.String("isSyncAliaveCmd", isSyncAliaveCmd)) + ret, err := util.RunLocalCmd("bash", []string{"-c", isSyncAliaveCmd}, "", nil, 1*time.Minute, task.Logger) + if err != nil { + task.Logger.Error("RedisSyncStop IsSyncAlive fail", zap.Error(err)) + return false, err + } + ret = strings.TrimSpace(ret) + if ret != "" { + return true, nil + } + return false, nil +} + +// IsSyncStateOK 同步状态是否本来就ok +func (task *MakeSyncTask) IsSyncStateOK() (ok bool) { + ok, task.Err = task.IsSyncAlive() + if task.Err != nil { + return false + } + if !ok { + return false + } + + jobRows, err := tendisdb.GetTendisDTSJob(task.RowData.BillID, task.RowData.SrcCluster, + task.RowData.DstCluster, task.Logger) + if err != nil { + return false + } + syncInfoMap := task.RedisSyncInfo("tendis-ssd") + if task.Err != nil { + return false + } + if jobRows[0].SrcClusterType != constvar.UserTwemproxyType { + // 回档临时环境不会时时有心跳写入,tendis_last_seq不会变 + // 所以直接返回成功 + return true + } + firstSeq, _ := strconv.ParseUint(syncInfoMap["tendis_last_seq"], 10, 64) + + time.Sleep(10 * time.Second) + + syncInfoMap = task.RedisSyncInfo("tendis-ssd") + if task.Err != nil { + return false + } + secondSeq, _ := strconv.ParseUint(syncInfoMap["tendis_last_seq"], 10, 64) + + // 第二次获取的seq比第一次大,则认为sync同步正常 + if secondSeq > firstSeq { + return true + } + return false +} + +// RedisSyncStop 关闭redis-sync +func (task *MakeSyncTask) RedisSyncStop() { + var isAlive bool + var err error + isAlive, _ = task.IsSyncAlive() + if isAlive == false { + task.Logger.Info(fmt.Sprintf("RedisSyncStop srcRedis:%s#%d sync is not alive", + task.RowData.SrcIP, task.RowData.SrcPort)) + return + } + task.Logger.Info(fmt.Sprintf("RedisSyncStop srcRedis:%s#%d sync is alive", task.RowData.SrcIP, task.RowData.SrcPort)) + + // record last sync seq + opts := []string{"SYNCADMIN", "stop"} + stopRet := task.redisSyncRunCmd(opts, true) + if task.Err != nil { + task.Err = nil // 这里已经需要关闭sync,所以 SYNCADMIN stop 执行错误可忽略 + } else { + lastSeq, err := dtsTask.SyncSeqItemDecode(stopRet) + if err != nil { + task.Err = err + return + } + lastSeq.Time.Time = time.Now().Local() + task.Err = task.syncSeqSave.SyncSeqWriter(&lastSeq, true) + if task.Err != nil { + return + } + task.LastSeq = lastSeq.Seq + } + + // kill redis-sync + killCmd := fmt.Sprintf(` + ps -ef|grep %s_%d|grep 'taskid%d-'|grep -v grep|grep sync|grep conf|awk '{print $2}'|while read pid + do + kill -9 $pid + done + `, task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.ID) + task.Logger.Info("RedisSyncStop...", zap.String("killCmd", killCmd)) + retryTimes := 0 + for isAlive == true && retryTimes < 5 { + msg := fmt.Sprintf("Killing redis-sync times:%d ...", retryTimes+1) + task.Logger.Info(msg) + // redis-sync is alive, now kill it + _, err = util.RunLocalCmd("bash", []string{"-c", killCmd}, "", nil, 1*time.Minute, task.Logger) + if err != nil { + task.Logger.Error("Kill redis-sync process fail", zap.Error(err)) + } + time.Sleep(10 * time.Second) + retryTimes++ + isAlive, _ = task.IsSyncAlive() + if isAlive == true { + task.Logger.Error(fmt.Sprintf("srcRedis:%s#%d,Kill redis-sync fail,process still alive", + task.RowData.SrcIP, task.RowData.SrcPort)) + } + } + if isAlive == true && retryTimes == 5 { + task.Logger.Error(fmt.Sprintf("srcRedis:%s#%d,Kill redis-sync process failed", + task.RowData.SrcIP, task.RowData.SrcPort)) + task.Err = fmt.Errorf("Kill redis-sync process failed") + return + } + task.Logger.Info(fmt.Sprintf("srcRedis:%s#%d,kill redis-sync success", task.RowData.SrcIP, task.RowData.SrcPort)) + return +} + +func (task *MakeSyncTask) clearOldSyncConfigFile() { + task.SyncConfFile = strings.TrimSpace(task.SyncConfFile) + if task.SyncConfFile == "" { + return + } + _, err := os.Stat(task.SyncConfFile) + if err == nil { + // rm old sync config file + rmCmd := fmt.Sprintf("cd %s && rm -rf %s", + filepath.Dir(task.SyncConfFile), filepath.Base(task.SyncConfFile)) + task.Logger.Info(rmCmd) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 20*time.Second, task.Logger) + } +} +func (task *MakeSyncTask) clearOldSyncLogFile() { + task.SyncLogFile = strings.TrimSpace(task.SyncLogFile) + if task.SyncLogFile == "" { + return + } + _, err := os.Stat(task.SyncLogFile) + if err == nil { + // rm old sync log file + rmCmd := fmt.Sprintf("cd %s && rm -rf %s", + filepath.Dir(task.SyncLogFile), filepath.Base(task.SyncLogFile)) + task.Logger.Info(rmCmd) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 20*time.Second, task.Logger) + } +} + +// createSyncConfigFile create redis-sync config file if not exists +func (task *MakeSyncTask) createSyncConfigFile() { + syncDir := task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + task.SyncConfFile = filepath.Join(syncDir, + fmt.Sprintf("sync-taskid%d-%d.conf", task.RowData.ID, task.RowData.SyncerPort)) + + _, err := os.Stat(task.SyncConfFile) + if err == nil { + // if config file exists,return + task.Logger.Info(fmt.Sprintf("redis-sync config file:%s already exists", task.SyncConfFile)) + return + } + + currentPath, _ := util.CurrentExecutePath() + tempFile := filepath.Join(currentPath, "tendisssd-sync-template.conf") + tempContent, err := ioutil.ReadFile(tempFile) + if err != nil { + task.Logger.Error("Read redis-sync template conf fail", + zap.Error(err), zap.String("templateConfig", tempFile)) + task.Err = fmt.Errorf("Read redis-sync template conf fail.err:%v", err) + return + } + loglevel := "warning" + debug := viper.GetBool("TENDIS_DEBUG") + if debug == true { + loglevel = "debug" + } + var keyWhiteRegex string = "" + var keyBlackRegex string = "" + if task.RowData.KeyWhiteRegex != "" && !task.IsMatchAny(task.RowData.KeyWhiteRegex) { + // 部分key迁移时,额外迁移 master_ip 这个 key目的是让binglog seq始终更新 + keyWhiteRegex = task.RowData.KeyWhiteRegex + "|^master_ip" + } + if task.RowData.KeyBlackRegex != "" && !task.IsMatchAny(task.RowData.KeyBlackRegex) { + keyBlackRegex = task.RowData.KeyBlackRegex + } + tempData := string(tempContent) + tempData = strings.ReplaceAll(tempData, "{{SYNC_PORT}}", strconv.Itoa(task.RowData.SyncerPort)) + tempData = strings.ReplaceAll(tempData, "{{SYNC_LOG_FILE}}", task.SyncLogFile) + tempData = strings.ReplaceAll(tempData, "{{SRC_ADDR}}", task.SrcADDR) + tempData = strings.ReplaceAll(tempData, "{{SRC_PASSWORD}}", task.SrcPassword) + tempData = strings.ReplaceAll(tempData, "{{KEY_WHITE_REGEX}}", keyWhiteRegex) + tempData = strings.ReplaceAll(tempData, "{{KEY_BLACK_REGEX}}", keyBlackRegex) + tempData = strings.ReplaceAll(tempData, "{{DST_ADDR}}", task.DstADDR) + tempData = strings.ReplaceAll(tempData, "{{DST_PASSWORD}}", task.DstPassword) + tempData = strings.ReplaceAll(tempData, "{{LOG_LEVEL}}", loglevel) + + err = ioutil.WriteFile(task.SyncConfFile, []byte(tempData), 0755) + if err != nil { + task.Logger.Error("Save redis-sync conf fail", zap.Error(err), zap.String("syncConfig", task.SyncConfFile)) + task.Err = fmt.Errorf("Save redis-sync conf fail.err:%v", err) + return + } + task.Logger.Info(fmt.Sprintf("create redis-sync config file:%s success", task.SyncConfFile)) + return +} +func (task *MakeSyncTask) createSyncLogFile() { + syncDir := task.MkSyncDirIfNotExists() + if task.Err != nil { + return + } + task.SyncLogFile = filepath.Join(syncDir, + fmt.Sprintf("log-taskid%d-%d.log", task.RowData.ID, task.RowData.SyncerPort)) + // _, err := os.Stat(syncLogFile) + // if err == nil { + // // rm old sync log file + // rmCmd := fmt.Sprintf("cd %s && rm -rf %s", syncDir, filepath.Base(syncLogFile)) + // task.logger.Info(rmCmd) + // util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 20*time.Second, task.logger) + // } + return +} + +func (task *MakeSyncTask) redisSyncRunCmd(cmds []string, recordLog bool) (cmdRet string) { + localIP, err := util.GetLocalIP() + if err != nil { + task.Err = err + task.Logger.Error(err.Error()) + return + } + localIP = "127.0.0.1" // redis-sync 绑定的是 127.0.0.1 + + opts := []string{"--no-auth-warning", "-h", localIP, "-p", strconv.Itoa(task.RowData.SyncerPort)} + opts = append(opts, cmds...) + + logCmd := task.RedisCliTool + " " + strings.Join(opts, " ") + if recordLog == true { + task.Logger.Info("redis-sync cmd ...", zap.String("cmd", logCmd)) + } + + cmdRet, err = util.RunLocalCmd(task.RedisCliTool, opts, "", nil, 5*time.Second, task.Logger) + if err != nil { + task.Err = err + task.Logger.Error("redis-sync cmd fail", zap.Error(task.Err), zap.String("cmd", logCmd)) + return + } + if strings.HasPrefix(cmdRet, "ERR ") == true { + task.Logger.Info("redis-sync cmd fail", zap.String("cmdRet", cmdRet)) + task.Err = fmt.Errorf("redis-sync cmd fail,err:%v", cmdRet) + return + } + if recordLog == true { + task.Logger.Info("redis-sync cmd success", zap.String("cmdRet", cmdRet)) + } + return cmdRet +} + +// RedisSyncInfo redis-sync执行info [tendis-ssd]等 +func (task *MakeSyncTask) RedisSyncInfo(section string) (infoRets map[string]string) { + opts := []string{"info", section} + var str01 string + maxRetryTimes := 5 + for maxRetryTimes >= 0 { + maxRetryTimes-- + task.Err = nil + str01 = task.redisSyncRunCmd(opts, false) + if task.Err != nil { + time.Sleep(5 * time.Second) + continue + } + break + } + if task.Err != nil { + return + } + infoList := strings.Split(str01, "\n") + infoRets = make(map[string]string) + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + infoRets[list01[0]] = list01[1] + } + return infoRets +} + +// RedisSyncStart 启动redis-sync +func (task *MakeSyncTask) RedisSyncStart(reacquirePort bool) { + task.Logger.Info(fmt.Sprintf("redis-sync start 源%s 目的%s ...", task.SrcADDR, task.DstADDR)) + defer task.Logger.Info("end redis-sync start") + + dtsTask.PortSyncerMut.Lock() // 串行获取redis-sync端口 和 启动 + defer dtsTask.PortSyncerMut.Unlock() + + if reacquirePort == true { + task.getMySyncPort(0) + if task.Err != nil { + return + } + } + maxRetryTimes := 5 + for maxRetryTimes >= 0 { + maxRetryTimes-- + task.Err = nil + + task.createSyncLogFile() + if task.Err != nil { + return + } + task.createSyncConfigFile() + if task.Err != nil { + return + } + + logFile, err := os.OpenFile(task.SyncLogFile, os.O_RDWR|os.O_CREATE, 0755) + if err != nil { + task.Logger.Error("open logfile fail", zap.Error(err), zap.String("syncLogFile", task.SyncConfFile)) + task.Err = fmt.Errorf("open logfile fail,err:%v syncLogFile:%s", err, task.SyncLogFile) + return + } + + logCmd := fmt.Sprintf("%s -f %s", task.RedisSyncTool, task.SyncConfFile) + task.Logger.Info(logCmd) + + ctx, cancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext(ctx, task.RedisSyncTool, "-f", task.SyncConfFile) + cmd.Stdout = logFile + cmd.Stderr = logFile + + err = cmd.Start() + if err != nil { + defer cancel() + logFile.Close() + task.Logger.Error("cmd.Start fail", zap.Error(err), zap.String("cmd", logCmd)) + task.Err = fmt.Errorf("cmd.Start fail,err:%v command:%s", err, logCmd) + return + } + go func() { + err = cmd.Wait() + if err != nil { + task.Logger.Error("redis-sync cmd.wait error", zap.Error(err)) + } + }() + + time.Sleep(5 * time.Second) + isAlive, err := task.IsSyncAlive() + if err != nil { + defer cancel() + logFile.Close() + task.Err = err + task.Logger.Error(task.Err.Error()) + return + } + if isAlive == false { + defer cancel() + logFile.Close() + logContent, _ := ioutil.ReadFile(task.SyncLogFile) + task.Logger.Error("redis-sync start fail", zap.String("failDetail", string(logContent))) + task.Err = fmt.Errorf("redis-sync start fail,detail:%s", string(logContent)) + if strings.Contains(string(logContent), "Address already in use") { + // port address already used + // clear and get sync port again and retry + task.clearOldSyncLogFile() + task.clearOldSyncConfigFile() + task.getMySyncPort(task.RowData.SyncerPort + 1) + if task.Err != nil { + return + } + continue + } + } + task.SetSyncerPid(cmd.Process.Pid) + break + } + if task.Err != nil { + task.Err = fmt.Errorf("make sync start fail") + return + } + + // 命令: redis-cli -h $redis_sync_ip -p $redis_sync_port CNYS 2 $last_seq x $runid + opts := []string{"CNYS", "2", strconv.FormatUint(task.LastSeq, 10), "x", task.Runid} + ret01 := task.redisSyncRunCmd(opts, true) + if task.Err != nil { + return + } + task.Logger.Info("redis-sync CNYS cmd success", zap.String("cmdRet", ret01)) + + // 命令: redis-cli -h $redis_sync_ip -p $redis_sync_port SYNCADMIN start + opts = []string{"SYNCADMIN", "start"} + ret02 := task.redisSyncRunCmd(opts, true) + if task.Err != nil { + return + } + task.Logger.Info("redis-sync SYNCADMIN cmd success", zap.String("cmdRet", ret02)) + task.UpdateDbAndLogLocal("redis-sync %d start success", task.RowData.SyncerPort) + + return +} + +// PauseAndResumeSync pause and resume redis-sync +func (task *MakeSyncTask) PauseAndResumeSync() { + // record last sync seq + opts := []string{"SYNCADMIN", "stop"} + stopRet := task.redisSyncRunCmd(opts, true) + if task.Err != nil { + task.SetSyncOperate(constvar.RedisSyncPauseFail) + task.UpdateDbAndLogLocal("redis-sync pause fail") + return + } + lastSeq, err := dtsTask.SyncSeqItemDecode(stopRet) + if err != nil { + task.Err = err + task.SetSyncOperate(constvar.RedisSyncPauseFail) + task.UpdateDbAndLogLocal("redis-sync pause fail,err:%v", err) + return + } + lastSeq.Time.Time = time.Now().Local() + task.Err = task.syncSeqSave.SyncSeqWriter(&lastSeq, true) + if task.Err != nil { + return + } + task.SetSyncOperate(constvar.RedisSyncPauseSucc) + task.UpdateDbAndLogLocal("Redis-sync 暂停同步成功,seq:%d", lastSeq.Seq) + + for { + time.Sleep(10 * time.Second) + + row01, err := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if err != nil { + task.Err = err + return + } + if row01 == nil { + task.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", task.RowData.ID, row01) + return + } + task.RowData = row01 + if task.RowData.SyncOperate == constvar.RedisSyncResumeTodo { + opts = []string{"SYNCADMIN", "start"} + task.redisSyncRunCmd(opts, true) + if task.Err != nil { + task.SetSyncOperate(constvar.RedisSyncResumeFail) + task.Logger.Error("redis-sync resume fail", zap.Error(task.Err)) + return + } + task.SetSyncOperate(constvar.RedisSyncResumeSucc) + task.UpdateDbAndLogLocal("Redis-sync 恢复同步成功") + return + } + } +} + +// UpgradeSyncMedia 更新redis-sync介质 +func (task *MakeSyncTask) UpgradeSyncMedia() { + defer func() { + if task.Err != nil { + task.SetSyncOperate(constvar.RedisSyncUpgradeFail) + } + }() + // stop redis-sync and save lastSeq + task.RedisSyncStop() + if task.Err != nil { + return + } + task.GetMyRedisSyncTool(true) + if task.Err != nil { + return + } + task.RedisSyncStart(false) + if task.Err != nil { + return + } +} + +// ReSyncFromSpecTime 从某个时间点重新开始同步 +func (task *MakeSyncTask) ReSyncFromSpecTime(time01 time.Time) { + defer func() { + if task.Err != nil { + task.SetSyncOperate(constvar.ReSyncFromSpecTimeFail) + } + }() + specTimeSyncSeq := task.GetSpecificTimeSyncSeq(time01, SyncSubOffset) + if task.Err != nil { + return + } + // tendisSSD slave确保binlog存在 + task.ConfirmSrcRedisBinlogOK(specTimeSyncSeq.Seq) + if task.Err != nil { + return + } + // shutdown sync + task.RedisSyncStop() + if task.Err != nil { + return + } + // 更新LastSeq + task.LastSeq = specTimeSyncSeq.Seq + + // start sync + task.RedisSyncStart(false) + if task.Err != nil { + return + } +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisBackup.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisBackup.go new file mode 100644 index 0000000000..8da4cb8823 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisBackup.go @@ -0,0 +1,303 @@ +package tendisssd + +import ( + "dbm-services/redis/redis-dts/models/myredis" + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask" + "dbm-services/redis/redis-dts/pkg/scrdbclient" + "dbm-services/redis/redis-dts/util" + "encoding/base64" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// TendisBackupTask src tendisssd备份task +type TendisBackupTask struct { + dtsTask.FatherTask + srcClient *myredis.RedisWorker `json:"-"` + dstClient *myredis.RedisWorker `json:"-"` +} + +// TaskType task类型 +func (task *TendisBackupTask) TaskType() string { + return constvar.TendisBackupTaskType +} + +// NextTask 下一个task类型 +func (task *TendisBackupTask) NextTask() string { + return constvar.BackupfileFetchTaskType +} + +// NewTendisBackupTask 新建一个src tendisssd备份拉取task +func NewTendisBackupTask(row *tendisdb.TbTendisDTSTask) *TendisBackupTask { + return &TendisBackupTask{ + FatherTask: dtsTask.NewFatherTask(row), + } +} + +// Init 初始化 +func (task *TendisBackupTask) Init() { + if task.Err != nil { + return + } + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.UpdateDbAndLogLocal(task.Err.Error()) + } + }() + task.FatherTask.Init() + if task.Err != nil { + return + } + + srcAddr := fmt.Sprintf("%s:%d", task.RowData.SrcIP, task.RowData.SrcPort) + srcPasswd, err := base64.StdEncoding.DecodeString(task.RowData.SrcPassword) + if err != nil { + task.Logger.Error(constvar.TendisBackupTaskType+" init base64.decode srcPasswd fail", + zap.Error(err), zap.String("rowData", task.RowData.ToString())) + task.Err = fmt.Errorf("[%s] get src password fail,err:%v", task.TaskType(), err) + return + } + task.srcClient, err = myredis.NewRedisClient(srcAddr, string(srcPasswd), 0, task.Logger) + if err != nil { + task.Err = err + return + } + dstAddr := strings.TrimSpace(task.RowData.DstCluster) + dstPasswd, err := base64.StdEncoding.DecodeString(task.RowData.DstPassword) + if err != nil { + task.Logger.Error(constvar.TendisBackupTaskType+" init base64.decode dstPasswd fail", + zap.Error(err), zap.String("rowData", task.RowData.ToString())) + task.Err = fmt.Errorf("[%s] get dst password fail,err:%v", task.TaskType(), err) + return + } + task.dstClient, err = myredis.NewRedisClient(dstAddr, string(dstPasswd), 0, task.Logger) + if err != nil { + task.Err = err + return + } + defer task.dstClient.Close() // dstClient主要测试连接性, 未来用不到 + + task.SetStatus(1) + task.UpdateDbAndLogLocal("[%s] 源:%s 目的:%s 连接测试成功", task.TaskType(), srcAddr, dstAddr) + + return +} + +// PreClear 清理以往产生的垃圾数据 +func (task *TendisBackupTask) PreClear() { + var err error + task.Err = task.InitTaskDir() + if task.Err != nil { + return + } + if strings.Contains(task.TaskDir, fmt.Sprintf("%s_%d", task.RowData.SrcIP, task.RowData.SrcPort)) == false { + return + } + _, err = os.Stat(task.TaskDir) + if err == nil { + // 目录存在,清理 + rmCmd := fmt.Sprintf("rm -rf %s >/dev/null 2>&1", filepath.Join(task.TaskDir, "*")) + task.Logger.Info(fmt.Sprintf("TendisBackupTask PreClear execute localCommand:%s", rmCmd)) + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 120*time.Second, task.Logger) + } +} + +// Execute src tendisSSD执行backup命令 +func (task *TendisBackupTask) Execute() { + if task.Err != nil { + return + } + layout := "20060102150405" + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + + task.PreClear() + if task.Err != nil { + return + } + + waitTimeout := 3600 // 等待执行中的备份完成,等待1小时 + limit := waitTimeout / 60 + for { + if limit == 0 { + task.Err = fmt.Errorf("timeout") + task.Logger.Error("timeout") + return + } + isRunning, err := task.srcClient.TendisSSDIsBackupRunning() + if err != nil { + task.Err = err + return + } + if isRunning == false { + break + } + task.UpdateDbAndLogLocal("当前有一个backup任务执行中,等待其完成") + time.Sleep(60 * time.Second) + limit = limit - 1 + } + // backupDir:=fmt.Sprintf("tasks/%d_%s_%s/backup") + backupFile := fmt.Sprintf("REDIS_FULL_rocksdb_%s_%d_%s", + task.RowData.SrcIP, task.RowData.SrcPort, time.Now().Format(layout)) + backupDir := "/data/dbbak/" + backupFile + task.UpdateDbAndLogLocal("tendis:%s 开始创建备份目录:%s", task.srcClient.Addr, backupDir) + + cli, err := scrdbclient.NewClient(constvar.BkDbm, task.Logger) + if err != nil { + task.Err = err + return + } + _, err = cli.ExecNew(scrdbclient.FastExecScriptReq{ + Account: "mysql", + Timeout: 3600, + ScriptLanguage: 1, + ScriptContent: fmt.Sprintf(`mkdir -p %s`, backupDir), + IPList: []scrdbclient.IPItem{ + { + BkCloudID: int(task.RowData.BkCloudID), + IP: task.RowData.SrcIP, + }, + }, + }, 5) + if err != nil { + task.Err = err + return + } + + ssdSlaveLogKeepCount := viper.GetInt64("ssdSlaveLogKeepCount") + if ssdSlaveLogKeepCount == 0 { + ssdSlaveLogKeepCount = 200000000 // 默认值设置为两亿 + } + task.UpdateDbAndLogLocal("开始执行备份slave-log-keep-count参数,并将%s中slave-log-keep-count设置为:%d", + task.srcClient.Addr, ssdSlaveLogKeepCount) + task.SaveSrcSSDKeepCount() + if task.Err != nil { + return + } + task.SetSrcNewLogCount(ssdSlaveLogKeepCount) + _, err = task.srcClient.ConfigSet("slave-log-keep-count", ssdSlaveLogKeepCount) + if err != nil { + task.Err = err + return + } + + task.UpdateDbAndLogLocal("开始执行backup任务...") + + task.Err = task.srcClient.TendisSSDBakcup(backupDir) + if task.Err != nil { + return + } + + task.UpdateDbAndLogLocal("%s backup %s 执行中...", task.srcClient.Addr, backupDir) + + waitTimeout = 7200 // 等待执行中的备份完成,最多等待2小时 + limit = waitTimeout / 60 + msg := "" + for { + time.Sleep(60 * time.Second) + if limit == 0 { + task.Err = fmt.Errorf("timeout") + task.Logger.Error("timeout") + break + } + isRunning, err := task.srcClient.TendisSSDIsBackupRunning() + if err != nil { + task.Err = err + return + } + if isRunning == false { + time.Sleep(1 * time.Minute) // 确认备份成功后,再sleep 60s + break + } + row01, _ := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if task.RowData.SyncOperate != row01.SyncOperate { + task.SetSyncOperate(row01.SyncOperate) + msg = row01.SyncOperate + "等待备份完成," + } + if row01 == nil { + task.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", task.RowData.ID, row01) + return + } + task.UpdateDbAndLogLocal("%s%s backup %s 执行中...", msg, task.srcClient.Addr, backupDir) + limit = limit - 1 + } + + if !constvar.IsGlobalEnv() { + lsCmd := fmt.Sprintf(`ls %s`, backupDir) + lsRets, err := cli.ExecNew(scrdbclient.FastExecScriptReq{ + Account: "mysql", + Timeout: 3600, + ScriptLanguage: 1, + ScriptContent: lsCmd, + IPList: []scrdbclient.IPItem{ + { + BkCloudID: int(task.RowData.BkCloudID), + IP: task.RowData.SrcIP, + }, + }, + }, 5) + if err != nil { + task.Err = err + return + } + + ret01 := strings.TrimSpace(lsRets[0].LogContent) + if ret01 == "" { + task.Err = fmt.Errorf("备份文件:%s 不存在?,shellCmd:%s,ret:%s", backupDir, lsCmd, ret01) + task.Logger.Error(task.Err.Error()) + return + } + } + task.SetTendisbackupFile(backupDir) + task.EndClear() + if task.Err != nil { + return + } + + task.SetTaskType(task.NextTask()) + task.SetStatus(0) + task.SetMessage("等待拉取%s", backupDir) + task.UpdateRow() + + return +} + +// EndClear src tendisSSD备份且打包完成后,清理原始备份目录 +func (task *TendisBackupTask) EndClear() { + row01, err := tendisdb.GetTaskByID(task.RowData.ID, task.Logger) + if err != nil { + task.Err = err + return + } + if row01 == nil { + task.UpdateDbAndLogLocal("根据task_id:%d获取task row失败,row01:%v", task.RowData.ID, row01) + return + } + if row01.SyncOperate == constvar.RedisForceKillTaskTodo { + // 任务提前终止,清理src redis备份 + task.ClearSrcHostBackup() + if task.Err != nil { + return + } + task.RestoreSrcSSDKeepCount() + task.SetSyncOperate(constvar.RedisForceKillTaskSuccess) + task.Err = fmt.Errorf("%s...", constvar.RedisForceKillTaskSuccess) + return + } + + return +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisdump.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisdump.go new file mode 100644 index 0000000000..1d13bcb984 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisdump.go @@ -0,0 +1,239 @@ +package tendisssd + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/dtsTask" + "dbm-services/redis/redis-dts/util" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// TredisdumpTask 对备份文件执行tredisdump +type TredisdumpTask struct { + dtsTask.FatherTask +} + +// TaskType task类型 +func (task *TredisdumpTask) TaskType() string { + return constvar.TredisdumpTaskType +} + +// NextTask 下一个task类型 +func (task *TredisdumpTask) NextTask() string { + return constvar.CmdsImporterTaskType +} + +// NewTredisdumpTask 新建tredisdump task +func NewTredisdumpTask(row *tendisdb.TbTendisDTSTask) *TredisdumpTask { + return &TredisdumpTask{ + FatherTask: dtsTask.NewFatherTask(row), + } +} + +// PreClear 清理以往生成的垃圾数据,如拉取到本地的备份.tar文件 +func (task *TredisdumpTask) PreClear() { + if task.Err != nil { + return + } + task.ClearLocalSQLDir() +} + +// Execute 执行tredisdump +func (task *TredisdumpTask) Execute() { + if task.Err != nil { + return + } + layout := "20060102150405" + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + task.SetStatus(1) + task.UpdateDbAndLogLocal("开始对执行tredisdump task") + + var dumperClient string + dumperClient, task.Err = util.IsToolExecutableInCurrDir("tredisdump") + if task.Err != nil { + return + } + task.Logger.Info("tredisdump client is ok", zap.String("dumperClient", dumperClient)) + + // 本地目录: deps 是否存在,保存着 tredisdumper依赖的包 + currExecPath, err := util.CurrentExecutePath() + if err != nil { + return + } + + depsDir := filepath.Join(currExecPath, "deps") + _, err = os.Stat(depsDir) + if err != nil && os.IsNotExist(err) == true { + task.Err = fmt.Errorf("%s not exists,err:%v", depsDir, err) + task.Logger.Error(task.Err.Error()) + return + } + task.Logger.Info("library deps is ok", zap.String("depsDir", depsDir)) + + _, err = os.Stat(task.RowData.FetchFile) + if err != nil && os.IsNotExist(err) == true { + task.Err = fmt.Errorf("备份文件:%s not exists", task.RowData.FetchFile) + task.Logger.Error(task.Err.Error()) + return + } + task.Logger.Info("backupFile is ok", zap.String("backupFile", task.RowData.FetchFile)) + + task.Err = task.InitTaskDir() + if task.Err != nil { + return + } + task.PreClear() + if task.Err != nil { + return + } + + cmdFilesBase := fmt.Sprintf("TREDISDUMP_SQL_%s_%d_%s", + task.RowData.SrcIP, task.RowData.SrcPort, time.Now().Format(layout)) + cmdFilesDir := filepath.Join(task.TaskDir, cmdFilesBase) + task.Err = util.MkDirIfNotExists(cmdFilesDir) + if task.Err != nil { + return + } + task.SetSqlfileDir(cmdFilesDir) + task.Logger.Info("create dump sqldir ok", zap.String("sqlDir", cmdFilesDir)) + + outputFormat := task.TredisdumpOuputFormat() + outputFileSize := task.TredisdumpOuputFileSize() + threadCnt := task.TredisdumpThreadCnt() + + task.SetMessage("开始执行") + dumperLogFile := filepath.Join(task.TaskDir, fmt.Sprintf("tredisdump_%s_%d.log", task.RowData.SrcIP, + task.RowData.SrcPort)) + + var keyWhiteRegex string = "" + var keyBlackRegex string = "" + if task.RowData.KeyWhiteRegex != "" && !task.IsMatchAny(task.RowData.KeyWhiteRegex) { + keyWhiteRegex = fmt.Sprintf(" --key_white_regex %q ", task.RowData.KeyWhiteRegex) + } + if task.RowData.KeyBlackRegex != "" && !task.IsMatchAny(task.RowData.KeyBlackRegex) { + keyBlackRegex = fmt.Sprintf(" --key_black_regex %q ", task.RowData.KeyBlackRegex) + } + + dumperCmd := fmt.Sprintf( + `export LD_LIBRARY_PATH=LD_LIBRARY_PATH:%s && cd %s && %s --db_path %s/private/1 --sst_path %s/shared --file_size %d --output_format %s --threads %d %s %s`, + depsDir, cmdFilesDir, dumperClient, task.RowData.FetchFile, + task.RowData.FetchFile, outputFileSize, outputFormat, threadCnt, + keyWhiteRegex, keyBlackRegex) + + if task.RowData.SrcSegStart >= 0 && + task.RowData.SrcSegEnd <= 419999 && + task.RowData.SrcSegStart < task.RowData.SrcSegEnd { + if task.RowData.SrcSegStart < 0 || task.RowData.SrcSegEnd < 0 { + task.Err = fmt.Errorf("srcTendis:%s#%d segStart:%d<0 or segEnd:%d<0", + task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.SrcSegStart, task.RowData.SrcSegEnd) + task.Logger.Error(err.Error()) + return + } + if task.RowData.SrcSegStart >= task.RowData.SrcSegEnd { + task.Err = fmt.Errorf("srcTendis:%s#%d segStart:%d >= segEnd:%d", + task.RowData.SrcIP, task.RowData.SrcPort, task.RowData.SrcSegStart, task.RowData.SrcSegEnd) + task.Logger.Error(err.Error()) + return + } + dumperCmd = fmt.Sprintf("%s --start_segment %d --end_segment %d", + dumperCmd, task.RowData.SrcSegStart, task.RowData.SrcSegEnd) + } + + dumperCmd = fmt.Sprintf("%s >%s 2>&1", dumperCmd, dumperLogFile) + + task.UpdateDbAndLogLocal("开始解析全备,Command:%s", dumperCmd) + + timeout := viper.GetInt("tredisdumperTimeout") + if timeout == 0 { + timeout = 604800 + } + _, err = util.RunLocalCmd("bash", []string{"-c", dumperCmd}, "", task, time.Duration(timeout)*time.Second, task.Logger) + if task.RowData.SyncOperate == constvar.RedisForceKillTaskSuccess { + task.ClearLocalFetchBackup() + task.ClearLocalSQLDir() + task.RestoreSrcSSDKeepCount() + return + } + if err != nil && strings.Contains(err.Error(), "exit status 255") { + // 如果tredisdump 出现 exit status 255 错误,则该任务从头再来一次 + task.SetTaskType(constvar.TendisBackupTaskType) + task.SetTendisbackupFile("") + task.SetFetchFile("") + task.SetStatus(0) + task.UpdateDbAndLogLocal("备份文件不对,tredisdump解析失败,重新发起任务...") + return + } else if err != nil { + task.Err = err + return + } + // grep 语句的错误结果直接忽略,因为grep 如果结果为空,则exit 1 + grepRet, _ := util.RunLocalCmd("bash", + []string{"-c", "grep -i -w -E 'err|error' " + dumperLogFile + "||true"}, "", + nil, 60*time.Second, task.Logger) + if grepRet != "" { + task.Err = fmt.Errorf("tredisdump some error occur,pls check logfile:%s", dumperLogFile) + task.Logger.Error(task.Err.Error()) + return + } + grepRet, _ = util.RunLocalCmd("bash", + []string{"-c", "grep -i -w 'fail' " + dumperLogFile + "||true"}, "", + nil, 1*time.Hour, task.Logger) + if grepRet != "" { + task.Err = fmt.Errorf("tredisdump some error occur,pls check logfile:%s", dumperLogFile) + task.Logger.Error(task.Err.Error()) + return + } + task.Logger.Info("tredisdump 执行成功") + + // confirm binlog seq is OK in src redis + fullBackupSyncSeq := task.GetSyncSeqFromFullBackup() + if task.Err != nil { + return + } + task.ConfirmSrcRedisBinlogOK(fullBackupSyncSeq.Seq - 1) + if task.Err != nil { + return + } + + task.EndClear() + if task.Err != nil { + return + } + + task.SetTaskType(task.NextTask()) + task.SetStatus(0) + task.UpdateDbAndLogLocal("等待执行数据导入") + + return +} + +// EndClear tredisdump完成后清理本地tendisSSD备份文件 +func (task *TredisdumpTask) EndClear() { + if task.RowData.FetchFile == "" { + return + } + if strings.Contains(task.RowData.FetchFile, "REDIS_FULL_rocksdb_") == false { + return + } + + debug := viper.GetBool("TENDIS_DEBUG") + if debug == true { + return + } + + // 备份文件tredisdump解析完后, 备份:REDIS_FULL_rocksdb_ 目录可删除 + task.ClearLocalFetchBackup() +} diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisssd.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisssd.go new file mode 100644 index 0000000000..0176418132 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/tendisssd.go @@ -0,0 +1,2 @@ +// Package tendisssd TODO +package tendisssd diff --git a/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/watchOldSync.go b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/watchOldSync.go new file mode 100644 index 0000000000..cecab0d950 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/dtsTask/tendisssd/watchOldSync.go @@ -0,0 +1,76 @@ +package tendisssd + +import ( + "dbm-services/redis/redis-dts/models/mysql/tendisdb" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/util" + "encoding/base64" + "fmt" +) + +// WatchOldSyncTask 很多时候 redis-sync 已经拉起,状态为runnig(taskrow.status==1 taskrow.taskType="makeSync") +// 而此时我们需要暂停 dbm-services/redis/redis-dts 重新替换 dbm-services/redis/redis-dts的介质 +// 再次拉起后, 以前(taskrow.status==1 taskrow.taskType="makeSync")的task其相关状态依然需要我们不断更新 +// 注意: 该任务只在 dbm-services/redis/redis-dts 被拉起一瞬间创建,只监听 以往 (taskrow.status==1 taskrow.taskType="makeSync")的task +// 对于新增的 (taskrow.status==1 taskrow.taskType="makeSync")的task 不做任何处理 +type WatchOldSyncTask struct { + MakeSyncTask +} + +// TaskType task类型 +func (task *WatchOldSyncTask) TaskType() string { + return constvar.WatchOldSyncTaskType +} + +// NextTask 下一个task类型 +func (task *WatchOldSyncTask) NextTask() string { + return "" +} + +// NewWatchOldSync 新建任务 +func NewWatchOldSync(row *tendisdb.TbTendisDTSTask) *WatchOldSyncTask { + ret := &WatchOldSyncTask{ + MakeSyncTask: *NewMakeSyncTask(row), + } + return ret +} + +// Execute 程序重新拉起后监听以往处于taskType='makeSync',status=1状态的redis-sync +func (task *WatchOldSyncTask) Execute() { + if task.Err != nil { + return + } + + defer func() { + if task.Err != nil { + task.SetStatus(-1) + task.SetMessage(task.Err.Error()) + task.UpdateRow() + } + }() + + if (task.RowData.TaskType != constvar.MakeSyncTaskType) || (task.RowData.Status != 1) { + return + } + redisClient, err := util.IsToolExecutableInCurrDir("redis-cli") + if err != nil { + task.Err = err + return + } + task.SetSyncSeqSaveInterface() + if task.Err != nil { + return + } + + srcPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.SrcPassword) + dstPasswd, _ := base64.StdEncoding.DecodeString(task.RowData.DstPassword) + + task.RedisCliTool = redisClient + task.SrcADDR = fmt.Sprintf("%s:%d", task.RowData.SrcIP, task.RowData.SrcPort) + task.SrcPassword = string(srcPasswd) + task.DstADDR = task.RowData.DstCluster + task.DstPassword = string(dstPasswd) + + task.WatchSync() + return +} diff --git a/dbm-services/redis/redis-dts/pkg/osPerf/osPerf.go b/dbm-services/redis/redis-dts/pkg/osPerf/osPerf.go new file mode 100644 index 0000000000..b9bdf310ad --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/osPerf/osPerf.go @@ -0,0 +1,130 @@ +// Package osPerf 系统性能 +package osPerf + +import ( + "dbm-services/redis/redis-dts/tclog" + "dbm-services/redis/redis-dts/util" + "fmt" + "log" + "os" + "time" + + "github.com/shirou/gopsutil/v3/mem" + "github.com/spf13/viper" + "go.uber.org/zap" + "golang.org/x/sys/unix" +) + +// GetMyHostDisk 获取'我'所在目录的磁盘情况 +func GetMyHostDisk() (myHostDisk HostDiskUsage, err error) { + mydir, err := util.CurrentExecutePath() + if err != nil { + tclog.Logger.Error(err.Error()) + return myHostDisk, err + } + myHostDisk, err = GetLocalDirDiskUsg(mydir, tclog.Logger) + if err != nil { + log.Fatal(err) + } + return +} + +// HostDiskUsage 主机磁盘使用情况(byte) +type HostDiskUsage struct { + DirName string `json:"MountedOn"` + TotalSize uint64 `json:"ToTalSize"` + UsedSize uint64 `json:"UsedSize"` + AvailSize uint64 `json:"AvailSize"` + UsageRatio int `json:"UsageRatio"` +} + +// GetLocalDirDiskUsg 获取本地路径所在磁盘使用情况 +// 参考: +// https://stackoverflow.com/questions/20108520/get-amount-of-free-disk-space-using-go +// http://evertrain.blogspot.com/2018/05/golang-disk-free.html +func GetLocalDirDiskUsg(localDir string, logger *zap.Logger) (diskUsg HostDiskUsage, err error) { + var stat unix.Statfs_t + if err = unix.Statfs(localDir, &stat); err != nil { + err = fmt.Errorf("unix.Statfs fail,err:%v,localDir:%s", err, localDir) + return + } + diskUsg.TotalSize = stat.Blocks * uint64(stat.Bsize) + diskUsg.AvailSize = stat.Bavail * uint64(stat.Bsize) + diskUsg.UsedSize = (stat.Blocks - stat.Bfree) * uint64(stat.Bsize) + diskUsg.UsageRatio = int(diskUsg.UsedSize * 100 / diskUsg.TotalSize) + diskUsg.DirName = localDir + return +} + +// GetHostsMemInfo 获取当前机器内存概况(byte) +func GetHostsMemInfo(logger *zap.Logger) (vMem *mem.VirtualMemoryStat, err error) { + vMem, err = mem.VirtualMemory() + if err != nil { + err = fmt.Errorf("mem.VirtualMemory fail,err:%v", err) + return + } + return +} + +// WatchDtsSvrPerf 监听DTS server性能并发送告警 +func WatchDtsSvrPerf() { + localIP, err := util.GetLocalIP() + if err != nil { + tclog.Logger.Error("GetLocalIP fail", zap.Error(err)) + os.Exit(-1) + } + warnMsgNotifier := viper.GetString("WarnMessageNotifier") + if warnMsgNotifier == "" { + warnMsgNotifier = "{default_recipients}" + } + diskMaxUsgRatio := viper.GetInt("DtsServerDiskMaxUsgRatio") + if diskMaxUsgRatio == 0 { + diskMaxUsgRatio = 90 + } + memMaxUsgRatio := viper.GetInt("DtsServerMemMaxUsgRatio") + if memMaxUsgRatio == 0 { + memMaxUsgRatio = 80 + } + app := "mocpub" + dbType := "NOSQL" + dbCat := "DTSserver" + warnLevel := 1 + warnType := "BACKUP" + sendCnt := 0 + warnDetail := "" + + for { + time.Sleep(1 * time.Minute) + myDisk, err := GetMyHostDisk() + if err != nil { + continue + } + myMem, err := GetHostsMemInfo(tclog.Logger) + if err != nil { + continue + } + warnDetail = "" + if myDisk.UsageRatio > diskMaxUsgRatio { + warnDetail = fmt.Sprintf("DTS server %s disk usage:%d%% > %d%%", + myDisk.DirName, myDisk.UsageRatio, diskMaxUsgRatio) + } else if int(myMem.UsedPercent) > memMaxUsgRatio { + warnDetail = fmt.Sprintf("DTS server memory usage:%.2f%% > %d%%", + myMem.UsedPercent, memMaxUsgRatio) + } + if warnDetail == "" { + continue + } + warnCmd := fmt.Sprintf( + `./warn_client/warn_send.pl --app=%q --db_type=%q \\ + --db_cat=%q --ip=%q --warn_level=%d --warn_type=%s \\ + --warn_detail=%q --notifier=%q`, + app, dbType, dbCat, localIP, warnLevel, warnType, warnDetail, warnMsgNotifier) + util.RunLocalCmd("bash", []string{"-c", warnCmd}, "", nil, 1*time.Minute, tclog.Logger) + sendCnt++ + if sendCnt == 3 { + // 发送了3次后,则sleep 1 hour + time.Sleep(30 * time.Minute) + sendCnt = 0 + } + } +} diff --git a/dbm-services/redis/redis-dts/pkg/remoteOperation/abs.go b/dbm-services/redis/redis-dts/pkg/remoteOperation/abs.go new file mode 100644 index 0000000000..1f43699789 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/remoteOperation/abs.go @@ -0,0 +1,116 @@ +package remoteOperation + +import ( + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/util" + "fmt" + "path/filepath" + "time" + + "go.uber.org/zap" +) + +// 无实际作用,仅确保实现了 RemoteOperation 接口 +var _ RemoteOperation = (*IAbsClient)(nil) + +// IAbsClient do remote operations by ssh.exp/scp.exp.2 +type IAbsClient struct { + RemoteUser string // 如 test_user + RemotePassword string // 如 test_password + RemoteIP string // 如 1.1.1.1 + RemotePort int // 如 22 + scpTool string + sshTool string + logger *zap.Logger +} + +// NewIAbsClient new +func NewIAbsClient(remoteUser, remotePasswd, remoteIP string, remotePort int, logger *zap.Logger) (cli *IAbsClient, + err error) { + cli = &IAbsClient{ + RemoteUser: remoteUser, + RemotePassword: remotePasswd, + RemoteIP: remoteIP, + RemotePort: remotePort, + logger: logger, + } + cli.scpTool, err = util.IsToolExecutableInCurrDir("scp.exp.2") + if err != nil { + logger.Error(err.Error()) + return + } + cli.sshTool, err = util.IsToolExecutableInCurrDir("ssh.exp") + if err != nil { + logger.Error(err.Error()) + return + } + return +} + +// NewIAbsClientByEnvVars new client by env variables +func NewIAbsClientByEnvVars(remoteIP string, logger *zap.Logger) (cli *IAbsClient, err error) { + var absPasswd, absUser string + var absPort int + absPasswd, err = constvar.GetABSPassword() + if err != nil { + logger.Error(err.Error()) + return + } + absUser = constvar.GetABSUser() + absPort = constvar.GetABSPort() + return NewIAbsClient( + absUser, absPasswd, + remoteIP, absPort, logger, + ) +} + +// RemoteDownload download file from remote server +func (c *IAbsClient) RemoteDownload(srcDir, dstDir string, fileName string, bwlimitMB int64) (err error) { + srcFile := filepath.Join(srcDir, fileName) + if bwlimitMB == 0 { + bwlimitMB = 400 * 1024 + } + if fileName == "" { + srcFile = srcDir + } + pullTimeout := constvar.GetABSPullTimeout() + /*example: + ./scp.exp.2 $remoteIP $remoteUser "$remoteSPASSWD" $ABSPORT /data/dbbak/30000_backup /data/dbbak/ pull 400000 3600 + */ + pullCmd := fmt.Sprintf(`%s %s %s %s %d %s %s pull %d %d`, + c.scpTool, c.RemoteIP, + c.RemoteUser, c.RemotePassword, c.RemotePort, + srcFile, dstDir, bwlimitMB, int64(pullTimeout.Seconds())) + logPullCmd := fmt.Sprintf(`%s %s %s 'xxxxx' %d %s %s pull %d %d`, + c.scpTool, c.RemoteIP, c.RemoteUser, c.RemotePort, + srcFile, dstDir, bwlimitMB, int64(pullTimeout.Seconds())) + c.logger.Info(logPullCmd) + + // _, err = util.RunLocalCmd("bash", []string{"-c", pullCmd}, "", nil, pullTimeout, c.logger) + _, err = util.RunLocalCmd("bash", []string{"-c", pullCmd}, "", nil, 3600*time.Second, c.logger) + return +} + +// RemoteBash TODO +func (c *IAbsClient) RemoteBash(cmd string) (ret string, err error) { + bashCmd := fmt.Sprintf(` +export ABSIP=%s +export ABSUSER=%s +export ABSPASSWD=%s +export ABSPORT=%d +export ABSSSHTIMEOUT=3600 +%s %q`, c.RemoteIP, c.RemoteUser, c.RemotePassword, c.RemotePort, c.sshTool, cmd) + + logCmd := fmt.Sprintf(` +export ABSIP=%s +export ABSUSER=%s +export ABSPASSWD=xxxxx +export ABSPORT=%d +export ABSSSHTIMEOUT=3600 +%s %q`, c.RemoteIP, c.RemoteUser, c.RemotePort, c.sshTool, cmd) + c.logger.Info(logCmd) + + ret, err = util.RunLocalCmd("bash", []string{"-c", bashCmd}, "", nil, + 3600*time.Second, c.logger) + return +} diff --git a/dbm-services/redis/redis-dts/pkg/remoteOperation/init.go b/dbm-services/redis/redis-dts/pkg/remoteOperation/init.go new file mode 100644 index 0000000000..573a759ef9 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/remoteOperation/init.go @@ -0,0 +1,7 @@ +package remoteOperation + +// RemoteOperation remote server operations interface +type RemoteOperation interface { + RemoteDownload(srcDir, dstDir string, fileName string, bwlimitMB int64) (err error) + RemoteBash(cmd string) (ret string, err error) +} diff --git a/dbm-services/redis/redis-dts/pkg/remoteOperation/ssh.go b/dbm-services/redis/redis-dts/pkg/remoteOperation/ssh.go new file mode 100644 index 0000000000..7854c2fe30 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/remoteOperation/ssh.go @@ -0,0 +1,358 @@ +// Package remoteOperation TODO +package remoteOperation + +import ( + "bytes" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/util" + "fmt" + "io" + "math" + "os" + "path/filepath" + "regexp" + "strconv" + "time" + + "github.com/dustin/go-humanize" + "github.com/juju/ratelimit" + "github.com/pkg/sftp" + "go.uber.org/zap" + "golang.org/x/crypto/ssh" +) + +// ISSHConfig represents SSH connection parameters. +type ISSHConfig struct { + RemoteUser string // 如 test_user + RemotePassword string // 如 test_password + PrivateKey string // 可为空 + RemoteServer string // 如 1.1.1.1:22 + KeyExchanges []string // 可为空 + Timeout time.Duration +} + +// 无实际作用,仅确保实现了 RemoteOperation 接口 +var _ RemoteOperation = (*ISSHClient)(nil) + +// ISSHClient provides basic functionality to interact with a SFTP server. +type ISSHClient struct { + config ISSHConfig + sshClient *ssh.Client + sftpClient *sftp.Client + logger *zap.Logger +} + +// NewISshClient initialises SSH and SFTP clients and returns Client type to use. +func NewISshClient(config ISSHConfig, logger *zap.Logger) (*ISSHClient, error) { + c := &ISSHClient{ + config: config, + logger: logger, + } + + if err := c.connect(); err != nil { + return nil, err + } + + return c, nil +} + +// NewISshClientByEnvAbsVars new +func NewISshClientByEnvAbsVars(remoteIP string, logger *zap.Logger) (cli *ISSHClient, err error) { + var absPasswd string + var absPort int + absPasswd, err = constvar.GetABSPassword() + if err != nil { + logger.Error(err.Error()) + return + } + absPort = constvar.GetABSPort() + conf := ISSHConfig{ + RemoteUser: constvar.GetABSUser(), + RemotePassword: absPasswd, + RemoteServer: remoteIP + ":" + strconv.Itoa(absPort), + Timeout: constvar.GetABSPullTimeout(), + } + return NewISshClient(conf, logger) +} + +// Create creates a remote/destination file for I/O. +func (c *ISSHClient) Create(filePath string) (io.ReadWriteCloser, error) { + if err := c.connect(); err != nil { + return nil, fmt.Errorf("connect: %w", err) + } + + return c.sftpClient.Create(filePath) +} + +// Upload writes local/source file data streams to remote/destination file. +func (c *ISSHClient) Upload(source io.Reader, destination io.Writer, size int) error { + if err := c.connect(); err != nil { + return fmt.Errorf("connect: %w", err) + } + + chunk := make([]byte, size) + + for { + num, err := source.Read(chunk) + if err == io.EOF { + tot, err := destination.Write(chunk[:num]) + if err != nil { + return err + } + + if tot != len(chunk[:num]) { + err = fmt.Errorf("write_size:%d != read_size:%d", tot, num) + c.logger.Error(err.Error()) + return err + } + + return nil + } + + if err != nil { + return err + } + + tot, err := destination.Write(chunk[:num]) + if err != nil { + return err + } + + if tot != len(chunk[:num]) { + err = fmt.Errorf("write_size:%d != read_size:%d", tot, num) + c.logger.Error(err.Error()) + return err + } + } +} + +// CalcFileSizeIncr 计算文件大小增长速度 +func CalcFileSizeIncr(f string, secs uint64) string { + var err error + var t1Size, t2Size int64 + if t1Size, err = util.GetFileSize(f); err != nil { + return "0" + } + time.Sleep(time.Duration(secs) * time.Second) + if t2Size, err = util.GetFileSize(f); err != nil { + return "0" + } + + bytesIncr := uint64(math.Abs(float64(t2Size-t1Size))) / secs + return humanize.Bytes(bytesIncr) +} + +// IOLimitRate io.Copy 限速 +func IOLimitRate(dst io.Writer, src io.Reader, bwlimitMB int64) (written int64, err error) { + bwlimit := bwlimitMB * 1024 * 1024 + srcBucket := ratelimit.NewBucketWithRate(float64(bwlimit), bwlimit) + return io.Copy(dst, ratelimit.Reader(src, srcBucket)) +} + +// RemoteDownload download file from remote server +func (c *ISSHClient) RemoteDownload(srcDir, dstDir string, fileName string, bwlimitMB int64) (err error) { + + srcFile := filepath.Join(srcDir, fileName) + dstFile := filepath.Join(dstDir, fileName) + if fileName == "" { + srcFile = srcDir + dstFile = dstDir + } + c.logger.Info(fmt.Sprintf("start download to %s", dstFile)) + // Get remote file stats. + info, err := c.Info(srcFile) + if err != nil { + return err + } + c.logger.Info(fmt.Sprintf("download source file info:%+v", info)) + + // Download remote file. + + r, err := c.sftpClient.Open(srcFile) + if err != nil { + err = fmt.Errorf("sftp.Open fail,err:%v,srcFile:%s", err, srcFile) + c.logger.Error(err.Error()) + return err + } + defer r.Close() + + // create local file + f, err := os.Create(dstFile) + if err != nil { + err = fmt.Errorf("os.Create %s fail,err:%v", dstFile, err) + c.logger.Error(err.Error()) + return err + } + defer f.Close() + + done := make(chan int, 1) + defer close(done) + go func(chan int) { + for true { + speed := CalcFileSizeIncr(dstFile, 1) + if speed != "0" { + c.logger.Info(fmt.Sprintf("file %s change speed %s", dstFile, speed)) + } else { + break + } + select { + case _, beforeClosed := <-done: + if !beforeClosed { + return + } + case <-time.After(2 * time.Hour): + return + default: + time.Sleep(time.Duration(10) * time.Second) + } + } + }(done) + + // Read downloaded file. + _, err = IOLimitRate(f, r, bwlimitMB) + if err != nil { + return err + } + return nil +} + +// RemoteBash do remote bash -c "$cmd" +func (c *ISSHClient) RemoteBash(cmd string) (ret string, err error) { + bashCmd := fmt.Sprintf("bash -c %q", cmd) + err = c.connect() + if err != nil { + return "", fmt.Errorf("connct,err:%v", err) + } + var session *ssh.Session = nil + var outBuf bytes.Buffer + var errBuf bytes.Buffer + session, err = c.sshClient.NewSession() + if err != nil { + err = fmt.Errorf("client.NewSession fail,err:%v,server:%s", err, c.config.RemoteServer) + c.logger.Error(err.Error()) + return + } + session.Stdout = &outBuf + session.Stderr = &errBuf + err = session.Run(bashCmd) + if err != nil { + err = fmt.Errorf("session.Run fail,err:%v,server:%s,cmd:%q", err, c.config.RemoteServer, cmd) + c.logger.Error(err.Error()) + return + } + if errBuf.String() != "" { + err = fmt.Errorf("session.Run fail,err:%s,server:%s,cmd:%q", errBuf.String(), c.config.RemoteServer, cmd) + c.logger.Error(err.Error()) + return + } + return outBuf.String(), nil +} + +// Info gets the details of a file. If the file was not found, an error is returned. +func (c *ISSHClient) Info(filePath string) (os.FileInfo, error) { + if err := c.connect(); err != nil { + return nil, fmt.Errorf("connect: %w", err) + } + + info, err := c.sftpClient.Lstat(filePath) + if err != nil { + err = fmt.Errorf("file lstat fail,err:%v,server:%s,filePath:%s", err, c.config.RemoteServer, filePath) + c.logger.Error(err.Error()) + return nil, err + } + + return info, nil +} + +// Close closes open connections. +func (c *ISSHClient) Close() { + if c.sftpClient != nil { + c.sftpClient.Close() + } + if c.sshClient != nil { + c.sshClient.Close() + } +} + +// GetAuthMethods TODO +func (c *ISSHConfig) GetAuthMethods(password string) []ssh.AuthMethod { + auth := ssh.Password(password) + /* + if c.config.PrivateKey != "" { + signer, err := ssh.ParsePrivateKey([]byte(c.config.PrivateKey)) + if err != nil { + return fmt.Errorf("ssh parse private key: %w", err) + } + auth = ssh.PublicKeys(signer) + } + */ + keyboardInteractiveChallenge := func( + user, + instruction string, + questions []string, + echos []bool, + ) (answers []string, err error) { + if len(questions) == 0 { + return []string{}, nil + } + /* + for i, question := range questions { + log.Debug("SSH Question %d: %s", i+1, question) + } + */ + answers = make([]string, len(questions)) + for i := range questions { + yes, _ := regexp.MatchString("*yes*", questions[i]) + if yes { + answers[i] = "yes" + + } else { + answers[i] = password + } + } + return answers, nil + } + auth2 := ssh.KeyboardInteractive(keyboardInteractiveChallenge) + + methods := []ssh.AuthMethod{auth2, auth} + return methods +} + +// connect initialises a new SSH and SFTP client only if they were not +// initialised before at all and, they were initialised but the SSH +// connection was lost for any reason. +func (c *ISSHClient) connect() error { + if c.sshClient != nil { + _, _, err := c.sshClient.SendRequest("keepalive", false, nil) + if err == nil { + return nil + } + } + + cfg := &ssh.ClientConfig{ + User: c.config.RemoteUser, + Auth: c.config.GetAuthMethods(c.config.RemotePassword), + // HostKeyCallback: func(string, net.Addr, ssh.PublicKey) error { return nil }, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + // HostKeyCallback: ssh.FixedHostKey(hostKey), + Timeout: c.config.Timeout, + } + + sshClient, err := ssh.Dial("tcp", c.config.RemoteServer, cfg) + if err != nil { + err = fmt.Errorf("ssh dial %s fail,err:%w", c.config.RemoteServer, err) + c.logger.Error(err.Error()) + return err + } + c.sshClient = sshClient + + sftpClient, err := sftp.NewClient(sshClient) + if err != nil { + err = fmt.Errorf("sftp new client fail,sshClient:%+v,err:%v", c.config, err) + c.logger.Error(err.Error()) + return err + } + c.sftpClient = sftpClient + + return nil +} diff --git a/dbm-services/redis/redis-dts/pkg/scrdbclient/dtsRemote.go b/dbm-services/redis/redis-dts/pkg/scrdbclient/dtsRemote.go new file mode 100644 index 0000000000..a0ed37196a --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/scrdbclient/dtsRemote.go @@ -0,0 +1,117 @@ +package scrdbclient + +import ( + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/util" + "encoding/json" + "fmt" + "log" + "net/http" + + "github.com/spf13/viper" + "go.uber.org/zap" +) + +// IsDtsServerInBlachList dts_server是否在黑名单中 +func (c *Client) IsDtsServerInBlachList(dtsSvr string) bool { + type inBlacklistReq struct { + IP string `json:"ip"` + } + type inBlacklistResp struct { + In bool `json:"in"` + } + var subURL string + param := inBlacklistReq{ + IP: dtsSvr, + } + ret := inBlacklistResp{} + if c.servicename == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sIsDtsSvrInBlacklistURL + } else if c.servicename == constvar.BkDbm { + subURL = constvar.DbmIsDtsSvrInBlacklistURL + } + data, err := c.Do(http.MethodPost, subURL, param) + if err != nil { + return false + } + err = json.Unmarshal(data.Data, &ret) + if err != nil { + err = fmt.Errorf("IsDtsServerInBlachList unmarshal data fail,err:%v,resp.Data:%s", err.Error(), string(data.Data)) + c.logger.Error(err.Error()) + return false + } + if ret.In { + return true + } + return false +} + +// IsMyselfInBlacklist 本机器是否在黑名单中 +func IsMyselfInBlacklist(logger *zap.Logger) bool { + myLocalIP, err := util.GetLocalIP() + if err != nil { + logger.Error(err.Error()) + log.Fatal(err) + } + cli01, err := NewClient(viper.GetString("serviceName"), logger) + if err != nil { + log.Fatal(err.Error()) + } + return cli01.IsDtsServerInBlachList(myLocalIP) +} + +// DtsLockKey dts key上锁 +func (c *Client) DtsLockKey(lockkey, holder string, ttlSec int) (lockOK bool, err error) { + type dtsLockKeyReq struct { + LockKey string `json:"lockkey"` + Holder string `json:"holder"` + TTLSecs int `json:"ttl_sec"` + } + + var subURL string + param := dtsLockKeyReq{ + LockKey: lockkey, + Holder: holder, + TTLSecs: ttlSec, + } + var ret bool + if c.servicename == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsLockKeyURL + } else if c.servicename == constvar.BkDbm { + subURL = constvar.DbmDtsLockKeyURL + } + data, err := c.Do(http.MethodPost, subURL, param) + if err != nil { + return false, err + } + err = json.Unmarshal(data.Data, &ret) + if err != nil { + err = fmt.Errorf("DtsKeyLock unmarshal data fail,err:%v,resp.Data:%s", err.Error(), string(data.Data)) + c.logger.Error(err.Error()) + return false, err + } + return ret, nil +} + +// DtsUnLockKey dts key解锁 +func (c *Client) DtsUnLockKey(lockkey, holder string, ttlSec int, logger *zap.Logger) (err error) { + type dtsUnlockKeyReq struct { + LockKey string `json:"lockkey"` + Holder string `json:"holder"` + } + var subURL string + param := dtsUnlockKeyReq{ + LockKey: lockkey, + Holder: holder, + } + if c.servicename == constvar.DtsRemoteTendisxk8s { + subURL = constvar.K8sDtsUnlockKeyURL + } else if c.servicename == constvar.BkDbm { + subURL = constvar.DbmDtsUnlockKeyURL + } + _, err = c.Do(http.MethodPost, subURL, param) + if err != nil { + return err + } + return nil +} diff --git a/dbm-services/redis/redis-dts/pkg/scrdbclient/fileService.go b/dbm-services/redis/redis-dts/pkg/scrdbclient/fileService.go new file mode 100644 index 0000000000..a35754a45b --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/scrdbclient/fileService.go @@ -0,0 +1,21 @@ +package scrdbclient + +import ( + "fmt" + "net/http" +) + +// GetFile from fileserver +func (c *Client) GetFile(fileStr string) (*http.Response, error) { + var resp *http.Response + var err error + + fullUrl := c.apiserver + "/fileserver/" + fileStr + resp, err = http.Get(fullUrl) + if err != nil { + err = fmt.Errorf("http.Get fail,err:%v,fullURL:%s", err, fullUrl) + c.logger.Error(err.Error()) + return nil, err + } + return resp, err +} diff --git a/dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiRequest.go b/dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiRequest.go new file mode 100644 index 0000000000..39a0d626a8 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiRequest.go @@ -0,0 +1,336 @@ +// Package scrdbclient 封装jobapi相关请求,包括执行脚本,传输文件,查看状态等 +package scrdbclient + +import ( + "dbm-services/redis/db-tools/dbmon/util" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/pkg/remoteOperation" + "encoding/json" + "fmt" + "net/http" + "path/filepath" + "time" +) + +// FastExecuteScript 快速执行脚本 +func (c *Client) FastExecuteScript(param FastExecScriptReq) (ret FastExecScriptResp, err error) { + result, err := c.Do(http.MethodPost, constvar.DbmJobApiFastExecuteScriptURL, param) + if err != nil { + return + } + err = json.Unmarshal(result.Data, &ret) + if err != nil { + err = fmt.Errorf("FastExecuteScript unmarshal data fail,err:%v,result.Data:%s", err, string(result.Data)) + c.logger.Error(err.Error()) + return + } + return +} + +// GetJobInstanceStatus 获取job实例状态 +func (c *Client) GetJobInstanceStatus(param GetJobInstanceStatusReq) (ret GetJobInstanceStatusResp, err error) { + result, err := c.Do(http.MethodPost, constvar.DbmJobApiGetJobInstanceStatusURL, param) + if err != nil { + return + } + err = json.Unmarshal(result.Data, &ret) + if err != nil { + err = fmt.Errorf("GetJobInstanceStatus unmarshal data fail,err:%v,result.Data:%s", err, string(result.Data)) + c.logger.Error(err.Error()) + return + } + return +} + +// BatchGetJobInstanceIpLog 获取job实例ip日志 +func (c *Client) BatchGetJobInstanceIpLog(param BatchGetJobInstanceIpLogReq) ( + ret BatchGetJobInstanceIpLogResp, err error, +) { + result, err := c.Do(http.MethodPost, constvar.DbmJobApiBatchGetJobInstanceIPLogURL, param) + if err != nil { + return + } + err = json.Unmarshal(result.Data, &ret) + if err != nil { + err = fmt.Errorf("BatchGetJobInstanceIpLog unmarshal data fail,err:%v,result.Data:%s", err, string(result.Data)) + c.logger.Error(err.Error()) + return + } + return +} + +// FastTransferFile 快速传输文件 +func (c *Client) FastTransferFile(param TransferFileReq) (ret FastExecScriptResp, err error) { + result, err := c.Do(http.MethodPost, constvar.DbmJobApiTransferFileURL, param) + if err != nil { + return + } + err = json.Unmarshal(result.Data, &ret) + if err != nil { + err = fmt.Errorf("FastTransferFile unmarshal data fail,err:%v,result.Data:%s", err, string(result.Data)) + c.logger.Error(err.Error()) + return + } + return +} + +// ExecNew 执行脚本并等待结果 +func (c *Client) ExecNew(param FastExecScriptReq, maxRetryTimes int) (retList []BatchScriptLogItem, err error) { + var isLocalCmd bool + isLocalCmd, err = param.IsLocalScript() + if err != nil { + return + } + if isLocalCmd && param.ScriptLanguage == 1 { + cmdRet, err := util.RunBashCmd(param.ScriptContent, "", nil, time.Duration(param.Timeout)*time.Second) + if err != nil { + return retList, err + } + retList = append(retList, BatchScriptLogItem{ + IPItem: param.IPList[0], + LogContent: cmdRet, + }) + return retList, nil + } + ret := BatchGetJobInstanceIpLogResp{} + if constvar.IsGlobalEnv() { + return c.ExecBySshNew(param, maxRetryTimes) + } + msg := fmt.Sprintf("starting exec command,params:%s", util.ToString(param)) + c.logger.Info(msg) + + if maxRetryTimes <= 0 { + maxRetryTimes = 1 + } + var execRet FastExecScriptResp + var i int + for i = 0; i < maxRetryTimes; i++ { + execRet, err = c.FastExecuteScript(param) + if err != nil { + time.Sleep(2 * time.Second) + continue + } + break + } + if i >= maxRetryTimes && err != nil { + // 如果调用Job平台api失败,通过ssh方式继续执行 + c.logger.Info("FastExecuteScript api fail,try to use ssh to exec command") + return c.ExecBySshNew(param, maxRetryTimes) + } + msg = fmt.Sprintf("GetJobInstanceStatus job_instance_id:%d", execRet.JobInstanceID) + c.logger.Info(msg) + + statusReq := GetJobInstanceStatusReq{} + statusReq.JobInstanceID = execRet.JobInstanceID + statusReq.StepInstanceID = execRet.StepInstanceID + + var statusResp GetJobInstanceStatusResp + var times int = 0 + i = 0 + for { + statusResp, err = c.GetJobInstanceStatus(statusReq) + if err != nil { + times++ + err = fmt.Errorf("GetJobInstanceStatus fail,err:%v,job_instance_id:%d,step_instance_id:%d", + err, execRet.JobInstanceID, execRet.StepInstanceID) + if times >= maxRetryTimes { + c.logger.Error("Finally ..." + err.Error()) + return retList, err + } else { + c.logger.Warn("Retry... " + err.Error()) + time.Sleep(2 * time.Second) + continue + } + } + if statusResp.JobInstance.Status >= 3 { + break + } + time.Sleep(2 * time.Second) + i++ + if i%30 == 0 { + // 每分钟打印一次进度日志 + c.logger.Info(fmt.Sprintf("ExecNew job_instance_id:%d,step_instance_id:%d still running,status:%d", + execRet.JobInstanceID, execRet.StepInstanceID, statusResp.JobInstance.Status)) + } + } + if statusResp.JobInstance.Status != 3 { + err = fmt.Errorf("GetJobInstanceStatus fail,job_instance_id:%d,step_instance_id:%d,status:%d", + execRet.JobInstanceID, execRet.StepInstanceID, statusResp.JobInstance.Status) + c.logger.Error(err.Error()) + return retList, err + } + c.logger.Info(fmt.Sprintf("ExecNew job_instance_id:%d,step_instance_id:%d success,status:%d", + statusReq.JobInstanceID, statusReq.StepInstanceID, statusResp.JobInstance.Status)) + + logReq := BatchGetJobInstanceIpLogReq{} + logReq.JobInstanceID = execRet.JobInstanceID + logReq.StepInstanceID = execRet.StepInstanceID + logReq.IPList = param.IPList + for i := 0; i < maxRetryTimes; i++ { + if i >= maxRetryTimes { + return retList, err + } + ret, err = c.BatchGetJobInstanceIpLog(logReq) + if err != nil { + time.Sleep(2 * time.Second) + continue + } + break + } + return ret.ToBatchScriptLogList(), nil +} + +// SendNew 文件传输并等待结果 +func (c *Client) SendNew(param TransferFileReq, maxRetryTimes int) (err error) { + msg := fmt.Sprintf("starting send file,params:%s", util.ToString(param)) + c.logger.Info(msg) + + if maxRetryTimes <= 0 { + maxRetryTimes = 1 + } + var execRet FastExecScriptResp + var i int + for i = 0; i < maxRetryTimes; i++ { + execRet, err = c.FastTransferFile(param) + if err != nil { + time.Sleep(2 * time.Second) + continue + } + break + } + if i >= maxRetryTimes && err != nil { + // 如果调用Job平台api失败,通过ssh方式继续执行 + c.logger.Info("FastTransferFile api fail,try to use ssh to download file") + return c.DownloadFileToLocalBySSH(param, maxRetryTimes) + } + c.logger.Info(fmt.Sprintf("FastTransferFile api success,ret:%s", util.ToString(execRet))) + + msg = fmt.Sprintf("GetJobInstanceStatus job_instance_id:%d", execRet.JobInstanceID) + c.logger.Info(msg) + + statusReq := GetJobInstanceStatusReq{} + statusReq.JobInstanceID = execRet.JobInstanceID + statusReq.StepInstanceID = execRet.StepInstanceID + + var statusResp GetJobInstanceStatusResp + var times int = 0 + i = 0 + for { + statusResp, err = c.GetJobInstanceStatus(statusReq) + if err != nil { + times++ + err = fmt.Errorf("GetJobInstanceStatus fail,err:%v,job_instance_id:%d,step_instance_id:%d", + err, execRet.JobInstanceID, execRet.StepInstanceID) + if times >= maxRetryTimes { + c.logger.Error("Finally ..." + err.Error()) + return err + } else { + c.logger.Warn("Retry... " + err.Error()) + time.Sleep(2 * time.Second) + continue + } + } + if statusResp.JobInstance.Status >= 3 { + break + } + time.Sleep(2 * time.Second) + i++ + if i%30 == 0 { + // 每分钟打印一次进度日志 + c.logger.Info(fmt.Sprintf("GetJobInstanceStatus job_instance_id:%d,step_instance_id:%d still running,status:%d", + execRet.JobInstanceID, execRet.StepInstanceID, statusResp.JobInstance.Status)) + } + } + if statusResp.JobInstance.Status != 3 { + err = fmt.Errorf("GetJobInstanceStatus fail,job_instance_id:%d,step_instance_id:%d,status:%d", + statusReq.JobInstanceID, statusReq.StepInstanceID, statusResp.JobInstance.Status) + c.logger.Error(err.Error()) + return err + } + c.logger.Info(fmt.Sprintf("SendNew job_instance_id:%d,step_instance_id:%d success,status:%d", + statusReq.JobInstanceID, statusReq.StepInstanceID, statusResp.JobInstance.Status)) + return nil +} + +// ExecBySshNew 通过ssh执行脚本并等待结果 +func (c *Client) ExecBySshNew(param FastExecScriptReq, maxRetryTimes int) (retList []BatchScriptLogItem, err error) { + var isLocalCmd bool + isLocalCmd, err = param.IsLocalScript() + if err != nil { + return + } + if isLocalCmd && param.ScriptLanguage == 1 { + cmdRet, err := util.RunBashCmd(param.ScriptContent, "", nil, time.Duration(param.Timeout)*time.Second) + if err != nil { + return retList, err + } + retList = append(retList, BatchScriptLogItem{ + IPItem: param.IPList[0], + LogContent: cmdRet, + }) + return retList, nil + } + for _, ip := range param.IPList { + sshCli, err := remoteOperation.NewISshClientByEnvAbsVars(ip.IP, c.logger) + if err != nil { + c.logger.Error(err.Error()) + return retList, err + } + bashRet, err := sshCli.RemoteBash(param.ScriptContent) + if err != nil { + c.logger.Error(err.Error()) + return retList, err + } + retList = append(retList, BatchScriptLogItem{ + IPItem: ip, + LogContent: bashRet, + }) + } + return retList, nil +} + +// DownloadFileToLocalBySSH 如果目标机器是本机,则通过ssh下载文件 +func (c *Client) DownloadFileToLocalBySSH(param TransferFileReq, maxRetryTimes int) (err error) { + isLocalCopy, err := param.IsLocalCopy() + if err != nil { + return + } + if isLocalCopy { + for _, sitem := range param.SourceList { + for _, file := range sitem.FileList { + cpCmd := fmt.Sprintf("cp -r %s %s", file, param.TargetDir) + c.logger.Info(cpCmd) + _, err = util.RunBashCmd(cpCmd, "", nil, 30*time.Minute) + if err != nil { + return nil + } + } + } + } + + isLocalTarget, err := param.IsLocalTarget() + if err != nil { + return + } + if !isLocalTarget { + err = fmt.Errorf("target not local,targetData:%s", util.ToString(param.TargetIPList)) + c.logger.Error(err.Error()) + return err + } + + for _, sitem := range param.SourceList { + sshCli, err := remoteOperation.NewISshClientByEnvAbsVars(sitem.IP, c.logger) + if err != nil { + c.logger.Error(err.Error()) + return err + } + for _, file := range sitem.FileList { + err = sshCli.RemoteDownload(filepath.Dir(file), param.TargetDir, filepath.Base(file), 400) + if err != nil { + c.logger.Error(err.Error()) + return err + } + } + } + return +} diff --git a/dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiSchema.go b/dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiSchema.go new file mode 100644 index 0000000000..d52b1bc1eb --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/scrdbclient/jobapiSchema.go @@ -0,0 +1,203 @@ +// Package scrdbclient .. +package scrdbclient + +import ( + "dbm-services/redis/redis-dts/util" + + "github.com/spf13/viper" +) + +// IPItem bk_cloud_id and ip +type IPItem struct { + BkCloudID int `json:"bk_cloud_id"` + IP string `json:"ip"` +} + +// FastExecScriptReq jobapi fast_execute_script request +type FastExecScriptReq struct { + IPList []IPItem `json:"ip_list"` + ScriptLanguage int `json:"script_language"` + ScriptContent string `json:"script_content"` + Account string `json:"account"` + Timeout int `json:"timeout"` +} + +// IsLocalScript 是否是本地命令 +func (req *FastExecScriptReq) IsLocalScript() (ret bool, err error) { + var localIP string + localIP, err = util.GetLocalIP() + if err != nil { + return + } + for _, ipitem := range req.IPList { + if ipitem.IP != localIP { + return false, nil + } + if ipitem.BkCloudID != viper.GetInt("bkDbm.bk_cloud_id") { + return false, nil + } + } + return true, nil +} + +// FastExecScriptResp jobapi get_job_instance_status response +type FastExecScriptResp struct { + JobInstanceItem + JobInstanceName string `json:"job_instance_name"` +} + +// JobInstanceItem .. +type JobInstanceItem struct { + JobInstanceID int64 `json:"job_instance_id"` + StepInstanceID int64 `json:"step_instance_id"` +} + +// GetJobInstanceStatusReq jobapi get_job_instance_status request +type GetJobInstanceStatusReq struct { + JobInstanceItem +} + +// GetJobInstanceStatusResp jobapi get_job_instance_status response +type GetJobInstanceStatusResp struct { + JobInstance struct { + BkBizID int `json:"bk_biz_id"` + JobInstanceID int64 `json:"job_instance_id"` + Name string `json:"name"` + BkScopeType string `json:"bk_scope_type"` + StartTime int64 `json:"start_time"` + BkScopeID string `json:"bk_scope_id"` + CreateTime int64 `json:"create_time"` + Status int `json:"status"` + EndTime int64 `json:"end_time"` + TotalTime int `json:"total_time"` + } `json:"job_instance"` + Finished bool `json:"finished"` + StepInstanceList []struct { + Status int `json:"status"` + TotalTime int `json:"total_time"` + Name string `json:"name"` + StartTime int64 `json:"start_time"` + StepInstanceID int64 `json:"step_instance_id"` + StepIPResultList []struct { + Status int `json:"status"` + TotalTime int `json:"total_time"` + IP string `json:"ip"` + StartTime int64 `json:"start_time"` + BkHostID int `json:"bk_host_id"` + ExitCode int `json:"exit_code"` + BkCloudID int `json:"bk_cloud_id"` + Tag string `json:"tag"` + EndTime int64 `json:"end_time"` + ErrorCode int `json:"error_code"` + } `json:"step_ip_result_list"` + CreateTime int64 `json:"create_time"` + EndTime int64 `json:"end_time"` + ExecuteCount int `json:"execute_count"` + Type int `json:"type"` + } `json:"step_instance_list"` +} + +// BatchGetJobInstanceIpLogReq jobapi batch_get_job_instance_ip_log request +type BatchGetJobInstanceIpLogReq struct { + JobInstanceItem + IPList []IPItem `json:"ip_list"` +} + +// ScriptTaskLogItem jobapi et_job_instance_ip_log response item +type ScriptTaskLogItem struct { + HostID int `json:"host_id"` + Ipv6 interface{} `json:"ipv6"` + LogContent string `json:"log_content"` + BkCloudID int `json:"bk_cloud_id"` + IP string `json:"ip"` +} + +// BatchGetJobInstanceIpLogResp jobapi batch_get_job_instance_ip_log response +type BatchGetJobInstanceIpLogResp struct { + JobInstanceID int64 `json:"job_instance_id"` + FileTaskLogs interface{} `json:"file_task_logs"` + ScriptTaskLogs []ScriptTaskLogItem `json:"script_task_logs"` + StepInstanceID int64 `json:"step_instance_id"` + LogType int `json:"log_type"` +} + +// ToBatchScriptLogList 转换为BatchScriptLogList +func (rsp *BatchGetJobInstanceIpLogResp) ToBatchScriptLogList() (ret []BatchScriptLogItem) { + for _, item := range rsp.ScriptTaskLogs { + ret = append(ret, BatchScriptLogItem{ + IPItem: IPItem{ + BkCloudID: item.BkCloudID, + IP: item.IP, + }, + LogContent: item.LogContent, + }) + } + return +} + +// BatchScriptLogItem 脚本执行结果日志 +type BatchScriptLogItem struct { + IPItem + LogContent string `json:"log_content"` +} + +// TransferFileSourceItem jobapi transfer_file file_source +type TransferFileSourceItem struct { + BkCloudID int `json:"bk_cloud_id"` + IP string `json:"ip"` + Account string `json:"account"` + FileList []string `json:"file_list"` +} + +// TransferFileReq jobapi transfer_file request +type TransferFileReq struct { + SourceList []TransferFileSourceItem `json:"source_list"` + TargetAccount string `json:"target_account"` + TargetDir string `json:"target_dir"` + TargetIPList []IPItem `json:"target_ip_list"` + Timeout int `json:"timeout"` +} + +// IsLocalCopy 是否源和目标都是本机 +func (req *TransferFileReq) IsLocalCopy() (ret bool, err error) { + var localIP string + localIP, err = util.GetLocalIP() + if err != nil { + return + } + for _, ipitem := range req.TargetIPList { + if ipitem.IP != localIP { + return false, nil + } + if ipitem.BkCloudID != viper.GetInt("bkDbm.bk_cloud_id") { + return false, nil + } + } + for _, sitem := range req.SourceList { + if sitem.IP != localIP { + return false, nil + } + if sitem.BkCloudID != viper.GetInt("bkDbm.bk_cloud_id") { + return false, nil + } + } + return true, nil +} + +// IsLocalTarget 是否目标是本机 +func (req *TransferFileReq) IsLocalTarget() (ret bool, err error) { + var localIP string + localIP, err = util.GetLocalIP() + if err != nil { + return + } + for _, ipitem := range req.TargetIPList { + if ipitem.IP != localIP { + return false, nil + } + if ipitem.BkCloudID != viper.GetInt("bkDbm.bk_cloud_id") { + return false, nil + } + } + return true, nil +} diff --git a/dbm-services/redis/redis-dts/pkg/scrdbclient/scrdbclient.go b/dbm-services/redis/redis-dts/pkg/scrdbclient/scrdbclient.go new file mode 100644 index 0000000000..622c741ae2 --- /dev/null +++ b/dbm-services/redis/redis-dts/pkg/scrdbclient/scrdbclient.go @@ -0,0 +1,273 @@ +// Package scrdbclient 向scr/dbm发起http请求 +package scrdbclient + +import ( + "bytes" + "dbm-services/redis/redis-dts/pkg/constvar" + "dbm-services/redis/redis-dts/util" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "time" + + "github.com/dgrijalva/jwt-go/v4" + "github.com/spf13/viper" + "go.uber.org/zap" +) + +const ( + // apiserver response code + statusSuccess int = 0 + + // job executer user + // jobExecuterUser = "pub" + jobExecuterUser = "scr-system" +) + +// APIServerResponse .. +type APIServerResponse struct { + Code int `json:"code"` + Message string `json:"message"` + Data json.RawMessage `json:"data"` +} + +// Client http request client +type Client struct { + servicename string + apiserver string + + // JWT token + token string + + // client for apiservers + client *http.Client + // logger + logger *zap.Logger +} + +// NewClient .. +func NewClient(serviceName string, logger *zap.Logger) (*Client, error) { + if logger == nil { + return nil, fmt.Errorf("logger cann't be nil") + } + var err error + cli := &Client{} + cli.servicename = serviceName + err = cli.getapiserver() + if err != nil { + return nil, err + } + tr := &http.Transport{} + cli.client = &http.Client{ + Transport: tr, + } + cli.logger = logger + return cli, nil +} + +// GetServiceName get servicename +func (c *Client) GetServiceName() string { + return c.servicename +} +func (c *Client) getapiserver() (err error) { + switch c.servicename { + case constvar.DtsRemoteTendisxk8s: + c.apiserver = viper.GetString("dtsRemoteTendisxk8s.rootUrl") + case constvar.BkDbm: + c.apiserver = viper.GetString("bkDbm.rootUrl") + default: + c.apiserver = "" + } + if c.apiserver == "" { + err := fmt.Errorf("%s rootUrl(%s) cann't be empty", c.servicename, c.apiserver) + c.logger.Error(err.Error()) + return err + } + return nil +} + +func (c *Client) getSecretKey() (secretKey string, err error) { + switch c.servicename { + case constvar.DtsRemoteTendisxk8s: + secretKey = viper.GetString("dtsRemoteTendisxk8s.secret_key") + default: + secretKey = "" + } + if secretKey == "" { + err = fmt.Errorf("%s secret_key(%s) cann't be empty", c.servicename, secretKey) + c.logger.Error(err.Error()) + return + } + return +} + +func (c *Client) getSecretID() (secretID string, err error) { + switch c.servicename { + case constvar.DtsRemoteTendisxk8s: + secretID = viper.GetString("dtsRemoteTendisxk8s.secret_id") + default: + secretID = "" + } + if secretID == "" { + err = fmt.Errorf("%s secret_id(%s) cann't be empty", c.servicename, secretID) + c.logger.Error(err.Error()) + return + } + return +} + +func (c *Client) getReqBody(method, url string, params interface{}) (body []byte, err error) { + if params == nil { + return + } + + // 将 params 转换为 JSON 字符串 + jsonParams, err := json.Marshal(params) + if err != nil { + err = fmt.Errorf("getReqBody json.Marshal %+v get an error: %v", params, err) + c.logger.Error(err.Error()) + return + } + if method != http.MethodPost || c.GetServiceName() != constvar.BkDbm { + return + } + + // 反序列化 JSON 字符串为 map[string]interface{} + var mapParams map[string]interface{} + err = json.Unmarshal(jsonParams, &mapParams) + if err != nil { + err = fmt.Errorf("getReqBody json.Unmarshal %+v get an error: %v", params, err) + c.logger.Error(err.Error()) + return + } + + mapParams["db_cloud_token"] = viper.GetString("bkDbm.db_cloud_token") + mapParams["bk_cloud_id"] = viper.GetInt("bkDbm.db_cloud_id") + body, err = json.Marshal(mapParams) + if err != nil { + err = fmt.Errorf("getReqBody json.Marshal %+v get an error: %v", mapParams, err) + c.logger.Error(err.Error()) + return + } + return +} + +// DoNew 发起请求 +func (c *Client) DoNew(method, url string, params interface{}, others map[string]string) (*APIServerResponse, error) { + var resp *http.Response + var maxRetryTimes int = 5 + var req *http.Request + body, err := c.getReqBody(method, url, params) + if err != nil { + return nil, err + } + for maxRetryTimes >= 0 { + maxRetryTimes-- + err = nil + + req, err = http.NewRequest(method, c.apiserver+url, bytes.NewReader(body)) + if err != nil { + err = fmt.Errorf("scrDbClient http.NewRequest(%s,%s,%s) get an error:%s", + method, c.apiserver+url, string(body), err.Error()) + c.logger.Error(err.Error()) + return nil, err + } + c.setHeader(req, others) + + resp, err = c.client.Do(req) + if err != nil { + err = fmt.Errorf( + "an error occur while invoking client.Do, error:%v,url:%s,params:%s,resp:%s,retry...", + err, req.URL.String(), util.ToString(params), util.ToString(resp)) + c.logger.Error(err.Error()) + time.Sleep(5 * time.Second) + continue + } + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := httputil.DumpResponse(resp, true) + err = fmt.Errorf("http response: %s, status code: %d,methods:%s,url: %s,params:%s,retry...", + string(bodyBytes), resp.StatusCode, method, req.URL.String(), string(body)) + c.logger.Error(err.Error(), zap.String("Authorization", req.Header.Get("Authorization"))) + resp.Body.Close() + time.Sleep(5 * time.Second) + continue + } + break + } + if err != nil { + return nil, err + } + defer resp.Body.Close() + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + err = fmt.Errorf("scrDbClient DoNew read resp.body error:%s,methods:%s,url: %s,params:%s", + err.Error(), method, req.URL.String(), string(body)) + c.logger.Error(err.Error(), zap.String("Authorization", req.Header.Get("Authorization"))) + return nil, err + } + result := &APIServerResponse{} + err = json.Unmarshal(b, result) + if err != nil { + err = fmt.Errorf("scrDbClient DoNew unmarshal %s to %+v get an error:%s,methods:%s,url: %s,params:%s", + string(b), *result, err.Error(), + method, req.URL.String(), string(body)) + c.logger.Error(err.Error(), zap.String("Authorization", req.Header.Get("Authorization"))) + return nil, err + } + + // check response and data is nil + if result.Code != statusSuccess { + err = fmt.Errorf("scrDbClient DoNew fail,code:%d,message:%s,methods:%s,url: %s,params:%s", + result.Code, result.Message, method, req.URL.String(), string(body)) + c.logger.Error(err.Error(), zap.String("Authorization", req.Header.Get("Authorization"))) + return nil, err + } + return result, nil +} + +// Do .. +func (c *Client) Do(method, url string, params interface{}) (*APIServerResponse, error) { + return c.DoNew(method, url, params, map[string]string{}) +} + +// Sign 获取token +func (c *Client) Sign(rtx string) (tokenStr string, err error) { + var secretID, secretKey string + secretID, err = c.getSecretID() + if err != nil { + return + } + secretKey, err = c.getSecretKey() + if err != nil { + return + } + // The token content. + token := jwt.NewWithClaims(jwt.SigningMethodHS256, jwt.MapClaims{ + "sub": secretID, + "user": rtx, + "iat": time.Now().Unix(), + }) + // Sign the token with the specified secret. + tokenStr, err = token.SignedString([]byte(secretKey)) + return +} +func (c *Client) setHeader(req *http.Request, others map[string]string) { + req.Header.Set("Content-Type", "application/json") + if c.GetServiceName() == constvar.BkDbm { + return + } + user := jobExecuterUser + if _, ok := others["user"]; ok { + user = strings.TrimSpace(others["user"]) + } + req.Header.Set("user", user) + req.Header.Set("x-cse-src-microservice", "tendisk8s") + // Set JWT token + if token, err := c.Sign(user); err == nil { + req.Header.Set("Authorization", "Bearer "+token) + } +} diff --git a/dbm-services/redis/redis-dts/tclog/tclog.go b/dbm-services/redis/redis-dts/tclog/tclog.go new file mode 100644 index 0000000000..1dd1db42d9 --- /dev/null +++ b/dbm-services/redis/redis-dts/tclog/tclog.go @@ -0,0 +1,117 @@ +// Package tclog .. +package tclog + +import ( + "time" + + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +// Logger is a global log descripter +var Logger *zap.Logger + +// timeEncoder format log time +func timeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02 15:04:05.000 -07:00")) +} + +// InitStdoutLog 所有日志都将被输出到标准输出中 +func InitStdoutLog() { + debug := viper.GetBool("TENDIS_DEBUG") + var level zap.AtomicLevel + if debug == true { + level = zap.NewAtomicLevelAt(zapcore.DebugLevel) + } else { + level = zap.NewAtomicLevelAt(zapcore.InfoLevel) + } + cfg := zap.Config{ + Encoding: "json", + Level: level, + OutputPaths: []string{"stdout"}, + ErrorOutputPaths: []string{"stdout"}, + EncoderConfig: zapcore.EncoderConfig{ + MessageKey: "message", + + LevelKey: "level", + + TimeKey: "time", + EncodeTime: zapcore.ISO8601TimeEncoder, + + CallerKey: "caller", + EncodeCaller: zapcore.ShortCallerEncoder, + + NameKey: "logger", + StacktraceKey: "stacktrace", + + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + }, + } + Logger, _ = cfg.Build() +} +func newEncoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + // Keys can be anything except the empty string. + TimeKey: "timestamp", + LevelKey: "level", + NameKey: "name", + CallerKey: "file", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.CapitalLevelEncoder, + EncodeTime: timeEncoder, + EncodeDuration: zapcore.StringDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +// InitMainlog 所有日志输出到log/main.log文件中 +func InitMainlog() { + debug := viper.GetBool("TENDIS_DEBUG") + var level zap.AtomicLevel + if debug == true { + level = zap.NewAtomicLevelAt(zapcore.DebugLevel) + } else { + level = zap.NewAtomicLevelAt(zapcore.InfoLevel) + } + w := zapcore.AddSync(&lumberjack.Logger{ + Filename: "log/main.log", + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days + }) + core := zapcore.NewTee( + // 同时输出到文件 和 stdout + zapcore.NewCore(zapcore.NewJSONEncoder(newEncoderConfig()), zapcore.AddSync(w), level), + // zapcore.NewCore(zapcore.NewJSONEncoder(newEncoderConfig()), zapcore.AddSync(os.Stdout), level), + ) + Logger = zap.New(core, zap.AddCaller()) +} + +// NewFileLogger 新建一个logger +func NewFileLogger(logFile string) *zap.Logger { + debug := viper.GetBool("TENDIS_DEBUG") + var level zap.AtomicLevel + if debug == true { + level = zap.NewAtomicLevelAt(zapcore.DebugLevel) + } else { + level = zap.NewAtomicLevelAt(zapcore.InfoLevel) + } + w := zapcore.AddSync(&lumberjack.Logger{ + Filename: logFile, + MaxSize: 500, // megabytes + MaxBackups: 3, + MaxAge: 28, // days + }) + core := zapcore.NewTee( + // 同时输出到文件 和 stdout + zapcore.NewCore(zapcore.NewJSONEncoder(newEncoderConfig()), zapcore.AddSync(w), level), + // zapcore.NewCore(zapcore.NewJSONEncoder(newEncoderConfig()), zapcore.AddSync(os.Stdout), level), + ) + return zap.New(core, zap.AddCaller()) +} diff --git a/dbm-services/redis/redis-dts/util/httpReqNew.go b/dbm-services/redis/redis-dts/util/httpReqNew.go new file mode 100644 index 0000000000..1098949de7 --- /dev/null +++ b/dbm-services/redis/redis-dts/util/httpReqNew.go @@ -0,0 +1,165 @@ +package util + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "go.uber.org/zap" +) + +// HTTPPostJSON http POST请求,发送JSON数据 +func HTTPPostJSON(url string, params interface{}, logger *zap.Logger) ([]byte, error) { + var ret []byte + var err error + jsonStr, err := json.Marshal(params) + if err != nil { + logger.Error("HttpPostJSON json.Marshal fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("HttpPostJSON json.Marshal fail,err:%v", err) + } + logger.Info("post start ...", zap.String("url", url), zap.Any("params", params)) + req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr)) + if err != nil { + logger.Error("new a post request fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("new a post request fail,err:%v,url:%v", err, url) + } + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + logger.Error("do post request fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("do post request fail,err:%v,url:%v", err, url) + } + defer resp.Body.Close() + + fmt.Println("response status:", resp.Status) + // fmt.Println("response headers:", resp.Header) + // fmt.Println("response body:", resp.Body) + ret, err = ioutil.ReadAll(resp.Body) + if err != nil { + logger.Error("post read response fail", zap.Error(err), zap.Any("respBody", resp.Body), zap.String("url", url), + zap.Any("params", params)) + return ret, fmt.Errorf("post read response fail,err:%v", err) + } + if resp.StatusCode != 200 { + logger.Error("do post request fail,resp.StatusCode != 200", + zap.Int("statusCode", resp.StatusCode), + zap.String("respStatus", resp.Status), + zap.String("respBody", string(ret)), + zap.String("url", url), zap.Any("params", params)) + err = fmt.Errorf("do post requst fail,resp.status:%s resp.StatusCode:%d err:%v", + resp.Status, resp.StatusCode, err) + return ret, err + } + return ret, nil +} + +// HTTPGetURLParams http Get请求将参数解析到url中,然后再发送请求 +func HTTPGetURLParams(url string, params interface{}, logger *zap.Logger) ([]byte, error) { + var ret []byte + var err error + var jsonStr []byte + var fullURL string + if params != nil { + jsonStr, err = json.Marshal(params) + if err != nil { + logger.Error("HttpGetUrlParams json.Marshal fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("HttpGetUrlParams json.Marshal fail,err:%v", err) + } + paramsMap := make(map[string]interface{}) + if err = json.Unmarshal(jsonStr, ¶msMap); err != nil { + logger.Error("HttpGetUrlParams json.Unmarshal fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("HttpGetUrlParams json.Unmarshal fail,err:%v", err) + } + + paramsStr := "?" + for k, v := range paramsMap { + if len(paramsStr) == 1 { + paramsStr = paramsStr + fmt.Sprintf("%v=%v", k, v) + } else { + paramsStr = paramsStr + fmt.Sprintf("&%v=%v", k, v) + } + } + fullURL = url + paramsStr + } else { + fullURL = url + } + resp, err := http.Get(fullURL) + if err != nil { + logger.Error("do get request fail", zap.Error(err), zap.String("fullURL", fullURL)) + return ret, fmt.Errorf("do get request fail,err:%v", err) + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + respStr, _ := json.Marshal(resp) + logger.Error("get read response fail,resp.StatusCode != 200", zap.Int("statusCode", resp.StatusCode), + zap.String("respStatus", resp.Status), zap.String("respBody", string(respStr))) + err = fmt.Errorf("get read response fail,resp.status:%s!=200 resp.StatusCode:%d err:%v", resp.Status, resp.StatusCode, + err) + return ret, err + } + + ret, err = ioutil.ReadAll(resp.Body) + if err != nil { + logger.Error("get read response fail", zap.Error(err), + zap.String("fullURL", fullURL), zap.Any("respBody", resp.Body)) + return ret, fmt.Errorf("get read response fail,err:%v", err) + } + return ret, nil +} + +// HTTPGetJSON http Get请求,发送json数据 +func HTTPGetJSON(url string, params interface{}, logger *zap.Logger) ([]byte, error) { + var ret []byte + var err error + jsonStr, err := json.Marshal(params) + if err != nil { + logger.Error("HttpGetJSON json.Marshal fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("HttpGetJSON json.Marshal fail,err:%v", err) + } + logger.Info("get start ...", zap.String("url", url), zap.Any("params", params)) + req, err := http.NewRequest("GET", url, bytes.NewBuffer(jsonStr)) + if err != nil { + logger.Error("new a get request fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("new a get request fail,err:%v,url:%v", err, url) + } + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + logger.Error("do get request fail", zap.Error(err), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("do get request fail,err:%v,url:%v", err, url) + } + defer resp.Body.Close() + + // fmt.Println("response headers:", resp.Header) + // fmt.Println("response body:", resp.Body) + fmt.Println("response status:", resp.Status) + ret, err = ioutil.ReadAll(resp.Body) + if err != nil { + logger.Error("get read response fail", zap.Error(err), zap.Any("respBody", resp.Body), + zap.String("url", url), zap.Any("params", params)) + return ret, fmt.Errorf("get read response fail,err:%v", err) + } + if resp.StatusCode != 200 { + logger.Error("do get json request fail,resp.StatusCode != 200", zap.Int("statusCode", resp.StatusCode), + zap.String("respStatus", resp.Status), zap.String("respBody", string(ret)), + zap.String("url", url), zap.Any("params", params)) + err = fmt.Errorf("do get json requst fail,resp.status:%s resp.StatusCode:%d err:%v", + resp.Status, resp.StatusCode, err) + return ret, err + } + return ret, nil +} diff --git a/dbm-services/redis/redis-dts/util/osCmd.go b/dbm-services/redis/redis-dts/util/osCmd.go new file mode 100644 index 0000000000..24687f4534 --- /dev/null +++ b/dbm-services/redis/redis-dts/util/osCmd.go @@ -0,0 +1,71 @@ +package util + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "strings" + "time" + + "go.uber.org/zap" +) + +// DealLocalCmdPid 处理本地命令得到pid +type DealLocalCmdPid interface { + DealProcessPid(pid int) error +} + +// RunLocalCmd 运行本地命令并得到命令结果 +func RunLocalCmd( + cmd string, opts []string, outFile string, + dealPidMethod DealLocalCmdPid, + timeout time.Duration, logger *zap.Logger) (retStr string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + cmdCtx := exec.CommandContext(ctx, cmd, opts...) + var retBuffer bytes.Buffer + var errBuffer bytes.Buffer + var outFileHandler *os.File + if len(strings.TrimSpace(outFile)) == 0 { + cmdCtx.Stdout = &retBuffer + } else { + outFileHandler, err = os.Create(outFile) + if err != nil { + logger.Error("RunLocalCmd create outfile fail", zap.Error(err), zap.String("outFile", outFile)) + return "", fmt.Errorf("RunLocalCmd create outfile fail,err:%v,outFile:%s", err, outFile) + } + defer outFileHandler.Close() + logger.Info("RunLocalCmd create outfile success ...", zap.String("outFile", outFile)) + cmdCtx.Stdout = outFileHandler + } + cmdCtx.Stderr = &errBuffer + logger.Debug("Running a new local command", zap.String("cmd", cmd), zap.Strings("opts", opts)) + + if err = cmdCtx.Start(); err != nil { + logger.Error("RunLocalCmd cmd Start fail", zap.Error(err), zap.String("cmd", cmd), zap.Strings("opts", opts)) + return "", fmt.Errorf("RunLocalCmd cmd Start fail,err:%v", err) + } + if dealPidMethod != nil { + dealPidMethod.DealProcessPid(cmdCtx.Process.Pid) + } + if err = cmdCtx.Wait(); err != nil { + logger.Error("RunLocalCmd cmd wait fail", zap.Error(err), + zap.String("errBuffer", errBuffer.String()), + zap.String("retBuffer", retBuffer.String()), + zap.String("cmd", cmd), zap.Strings("opts", opts)) + return "", fmt.Errorf("RunLocalCmd cmd wait fail,err:%v", err) + } + retStr = retBuffer.String() + if len(errBuffer.String()) > 0 { + logger.Error("RunLocalCmd fail", zap.String("err", errBuffer.String()), + zap.String("cmd", cmd), zap.Strings("opts", opts)) + err = fmt.Errorf("RunLocalCmd fail,err:%s", retBuffer.String()+"\n"+errBuffer.String()) + } else { + err = nil + } + retStr = strings.TrimSpace(retStr) + return +} diff --git a/dbm-services/redis/redis-dts/util/redis_util.go b/dbm-services/redis/redis-dts/util/redis_util.go new file mode 100644 index 0000000000..a506e6f8c9 --- /dev/null +++ b/dbm-services/redis/redis-dts/util/redis_util.go @@ -0,0 +1,80 @@ +package util + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +func convertVersionToUint(version string) (total uint64, err error) { + version = strings.TrimSpace(version) + if version == "" { + return 0, nil + } + list01 := strings.Split(version, ".") + billion := "" + thousand := "" + single := "" + if len(list01) == 0 { + err = fmt.Errorf("version:%s format not correct", version) + return 0, err + } + billion = list01[0] + if len(list01) >= 2 { + thousand = list01[1] + } + if len(list01) >= 3 { + single = list01[2] + } + + if billion != "" { + b, err := strconv.ParseUint(billion, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,billion:%s,version:%s", err, billion, version) + return 0, err + } + total += b * 1000000 + } + if thousand != "" { + t, err := strconv.ParseUint(thousand, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,thousand:%s,version:%s", err, thousand, version) + return 0, err + } + total += t * 1000 + } + if single != "" { + s, err := strconv.ParseUint(single, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,single:%s,version:%s", err, single, version) + return 0, err + } + total += s + } + return total, nil +} + +// TendisVersionParse tendis版本解析 +func TendisVersionParse(version string) (baseVersion, subVersion uint64, err error) { + reg01 := regexp.MustCompile(`[\d+.]+`) + rets := reg01.FindAllString(version, -1) + if len(rets) == 0 { + err = fmt.Errorf("TendisVersionParse version:%s format not correct", version) + return 0, 0, err + } + if len(rets) >= 1 { + baseVersion, err = convertVersionToUint(rets[0]) + if err != nil { + return 0, 0, err + } + } + if len(rets) >= 2 { + subVersion, err = convertVersionToUint(rets[1]) + if err != nil { + return 0, 0, err + } + } + + return baseVersion, subVersion, nil +} diff --git a/dbm-services/redis/redis-dts/util/util.go b/dbm-services/redis/redis-dts/util/util.go new file mode 100644 index 0000000000..e62aa75f9e --- /dev/null +++ b/dbm-services/redis/redis-dts/util/util.go @@ -0,0 +1,289 @@ +// Package util TODO +package util + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "syscall" + "time" + + "go.uber.org/zap" +) + +// NotFound error +const NotFound = "not found" + +// NewNotFound .. +func NewNotFound() error { + return errors.New(NotFound) +} + +// IsNotFoundErr .. +func IsNotFoundErr(err error) bool { + if err.Error() == NotFound { + return true + } + return false +} + +// MkDirIfNotExists 如果目录不存在则创建 +func MkDirIfNotExists(dir string) error { + _, err := os.Stat(dir) + if err == nil { + return nil + } + if os.IsNotExist(err) == true { + err = os.MkdirAll(dir, 0750) + if err != nil { + return fmt.Errorf("MkdirAll fail,err:%v,dir:%s", err, dir) + } + } + return nil +} + +// CurrentExecutePath 当前可执行文件所在目录 +func CurrentExecutePath() (string, error) { + path01, err := os.Executable() + if err != nil { + return "", fmt.Errorf("os.Executable fail,err:%v", err) + } + return filepath.Dir(path01), nil +} + +// GetLocalIP 获得本地ip +func GetLocalIP() (string, error) { + var localIP string + var err error + addrs, err := net.InterfaceAddrs() + if err != nil { + return localIP, fmt.Errorf("GetLocalIP net.InterfaceAddrs fail,err:%v", err) + } + for _, addr := range addrs { + if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + localIP = ipnet.IP.String() + return localIP, nil + } + } + } + return localIP, fmt.Errorf("can't find local ip") +} + +// FileLineCounter 计算文件行数 +// 参考: https://stackoverflow.com/questions/24562942/golang-how-do-i-determine-the-number-of-lines-in-a-file-efficiently +func FileLineCounter(filename string) (lineCnt uint64, err error) { + _, err = os.Stat(filename) + if err != nil && os.IsNotExist(err) == true { + return 0, fmt.Errorf("file:%s not exists", filename) + } + file, err := os.Open(filename) + if err != nil { + return 0, fmt.Errorf("file:%s open fail,err:%v", filename, err) + } + defer file.Close() + reader01 := bufio.NewReader(file) + buf := make([]byte, 32*1024) + lineCnt = 0 + lineSep := []byte{'\n'} + + for { + c, err := reader01.Read(buf) + lineCnt += uint64(bytes.Count(buf[:c], lineSep)) + + switch { + case err == io.EOF: + return lineCnt, nil + + case err != nil: + return lineCnt, fmt.Errorf("file:%s read fail,err:%v", filename, err) + } + } +} + +// CheckProcessAlive :通过kill 0检查进程是否存在 +// https://stackoverflow.com/questions/15204162/check-if-a-process-exists-in-go-way +func CheckProcessAlive(pid int) (isAlive bool, err error) { + process, err := os.FindProcess(pid) + if err != nil { + return false, fmt.Errorf("failed to find process,err:%v,pid:%d", err, pid) + } + err = process.Signal(syscall.Signal(0)) + if err != nil { + // 进程不存在 + return false, nil + } + return true, nil +} + +// KillProcess 杀死进程 +func KillProcess(pid int) error { + process, err := os.FindProcess(pid) + if err != nil { + return fmt.Errorf("Fail to find process,err:%v,pid:%d", err, pid) + } + err = process.Kill() + if err != nil { + return fmt.Errorf("Fail to kill process,err:%v pid:%d", err, pid) + } + return nil +} + +// CheckPortIsInUse 检查端口是否被占用 +func CheckPortIsInUse(ip, port string) (inUse bool, err error) { + timeout := 3 * time.Second + conn, err := net.DialTimeout("tcp", net.JoinHostPort(ip, port), timeout) + if err != nil && strings.Contains(err.Error(), "connection refused") { + return false, nil + } else if err != nil { + return false, fmt.Errorf("net.DialTimeout fail,err:%v", err) + } + if conn != nil { + defer conn.Close() + return true, nil + } + return false, nil +} + +// GetANotUsePort 获取一个进程未使用的端口 +func GetANotUsePort(ip string, startPort, step int) (dstPort int, err error) { + var inUse bool + dstPort = startPort + for { + inUse, err = CheckPortIsInUse(ip, strconv.Itoa(dstPort)) + if err != nil { + return 0, err + } + if inUse == false { + return + } + dstPort = dstPort + step + } +} + +// GetPidThatUsePort 获取使用指定端口的进程的pid, +// 可能获取不成功,pid返回为"" or - 的情况 +func GetPidThatUsePort(port int, logger *zap.Logger) (pid string, err error) { + ret, err := RunLocalCmd("bash", + []string{"-c", fmt.Sprintf("netstat -ntlp|grep %d|awk '{print $7}'|awk -F / '{print $1}'", port)}, + "", nil, 30*time.Second, logger) + if err != nil && strings.Contains(err.Error(), "Not all processes could be identified") == false { + return "", err + } + pids := strings.Fields(ret) + if len(pids) == 0 { + return "", nil + } + return pids[0], nil +} + +// IsDbDNS function to check is db domain +// for example: +// gamedb.test.spider.db +// gamedb.test.spider.db. +// gamedb.test.spider.db#20000 +// gamedb.test.spider.db.#20000 +func IsDbDNS(domainName string) bool { + domainName = strings.TrimSpace(domainName) + var pattern = `^((\w|-)+)\.((\w|-)+)\.((\w|-)+)\.db\.*#(\d+)|((\w|-)+)\.((\w|-)+)\.((\w|-)+)\.db\.*$` + reg01 := regexp.MustCompile(pattern) + idDNS := reg01.MatchString(domainName) + if idDNS { + return true + } + return false +} + +// LookupDbDNSIPs nsloopup domain +func LookupDbDNSIPs(addr01 string) (addrs []string, err error) { + addr01 = strings.TrimSpace(addr01) + list01 := strings.Split(addr01, ":") + if len(list01) != 2 { + err = fmt.Errorf("target addr[%s] format not corret", addr01) + return + } + ipPart := list01[0] + portPart := list01[1] + if IsDbDNS(ipPart) { + uniqMap := make(map[string]bool) + var idx int + for idx = 0; idx < 1000; idx++ { + ips, err := net.LookupIP(ipPart) + if err != nil { + err = fmt.Errorf("target addr[%s] could not get ips,err:%v\n", addr01, err) + return addrs, err + } + for _, ip01 := range ips { + ip02 := ip01 + if _, ok := uniqMap[ip02.String()]; ok == false { + addrs = append(addrs, fmt.Sprintf("%s:%s", ip02.String(), portPart)) + uniqMap[ip02.String()] = true + } + } + time.Sleep(1 * time.Microsecond) + } + return addrs, nil + } + addrs = []string{addr01} + return +} + +// ToString string +func ToString(param interface{}) string { + if param == nil { + return "" + } + ret, _ := json.Marshal(param) + return string(ret) +} + +// GetFileSize get file size +func GetFileSize(f string) (int64, error) { + fd, err := os.Stat(f) + if err != nil { + return 0, err + } + return fd.Size(), nil +} + +// IsFileExistsInCurrDir (TendisDTSServer可执行文件)同目录下,文件是否存在 +func IsFileExistsInCurrDir(file string) (fullPath string, err error) { + currentPath, err := CurrentExecutePath() + if err != nil { + return "", err + } + + fullPath = filepath.Join(currentPath, file) + _, err = os.Stat(fullPath) + if err != nil && os.IsNotExist(err) == true { + err = fmt.Errorf("%s not exists,err:%v", file, err) + return "", err + } + return fullPath, nil +} + +// IsToolExecutableInCurrDir (TendisDTSServer可执行文件)同目录下,工具是否存在,是否可执行 +func IsToolExecutableInCurrDir(tool string) (fullPath string, err error) { + fullPath, err = IsFileExistsInCurrDir(tool) + if err != nil { + return "", err + } + _, err = os.Stat(fullPath) + if err != nil && os.IsPermission(err) == true { + err = os.Chmod(fullPath, 0774) + if err != nil { + err = fmt.Errorf("%s os.Chmod 0774 fail,err:%v", fullPath, err) + return fullPath, err + } + } + return +} diff --git a/dbm-ui/.coveragerc b/dbm-ui/.coveragerc new file mode 100644 index 0000000000..0ab7c506f0 --- /dev/null +++ b/dbm-ui/.coveragerc @@ -0,0 +1,2 @@ +[run] +source = backend/ diff --git a/dbm-ui/.gitignore b/dbm-ui/.gitignore new file mode 100644 index 0000000000..241c96652d --- /dev/null +++ b/dbm-ui/.gitignore @@ -0,0 +1,136 @@ +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +# lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg +scripts/tests + +# PyInstaller +# Usually these storages are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*,cover +.hypothesis/ + +# Translations +#*.mo +#*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +../docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# dotenv +.env + +# virtualenv +.venv/ +venv/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject +.DS_Store +.idea + +# Backups +*~ + +node_modules/ +npm-debug.log +yarn-error.log +package-lock.json +public/ +webpack-stats.json +*.sqlite3 + +.vscode/ +.envrc +.vimlocal + +# markeditor +.Archive +../docs/.Archive/ +../docs/.md_configs.data +../docs/.me_configs.data +celerybeat* +staticfiles +static + +backend/settings/dev_*.py +logs +#scripts +# +#load_db_meta_test.sh + +# test +backend/tests/test.db +pressure_test.py +test.py +*.env + +# 国际化 +*translate_info.json +*formatted_string_info.json + +# 忽略ssl +backend/components/conf +backend/dbm_init/tmp/ +scripts/ssls/ +.codecc +.vscode \ No newline at end of file diff --git a/dbm-ui/.pylintrc b/dbm-ui/.pylintrc new file mode 100644 index 0000000000..e001f7452d --- /dev/null +++ b/dbm-ui/.pylintrc @@ -0,0 +1,46 @@ +[MASTER] +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 +[MESSAGES CONTROL] +# Disable the message, report, category or checker with the given id(s). +disable=all +# Enable the message, report, category or checker with the given id(s). +enable=c-extension-no-member, + bad-indentation, + bad-whitespace, + bare-except, + broad-except, + dangerous-default-value, + function-redefined, + len-as-condition, + line-too-long, + misplaced-future, + missing-final-newline, + mixed-indentation, + mixed-line-endings, + multiple-imports, + multiple-statements, + singleton-comparison, + trailing-comma-tuple, + trailing-newlines, + trailing-whitespace, + unexpected-line-ending-format, + unused-import, + unused-variable, + wildcard-import, + wrong-import-order +[FORMAT] +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format=LF +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ +# Maximum number of characters on a single line. +max-line-length=120 +# Maximum number of lines in a module. +max-module-lines=2000 +[EXCEPTIONS] +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=BaseException, + Exception \ No newline at end of file diff --git a/dbm-ui/DBM_README.md b/dbm-ui/DBM_README.md new file mode 100644 index 0000000000..bf1f8dfb7d --- /dev/null +++ b/dbm-ui/DBM_README.md @@ -0,0 +1,224 @@ +# 蓝鲸智云 DB 平台 + +## 代码规范 + +为规范代码风格,建议启用本地`pre-commit` + +引入包这里可以自动化排序和格式化,可以装下isort和black(可以保证整个项目代码格式规范一致)。 +在.pre-commit-config.yaml中这样的配置 +```yaml +- repo: https://github.com/timothycrosley/isort + rev: 5.7.0 + hooks: + - id: isort + exclude: > + (?x)^( + .*/migrations/.* + | backend/packages/.* + )$ + additional_dependencies: [toml] +``` + +### 使用方法 +```bash +# 直接使用pip安装即可 +pip install pre-commit + +# 直接执行此命令,设置git hooks钩子脚本 +pre-commit install + +# 直接执行此命令,进行全文件检查 +pre-commit run --all-storages +``` + +### pytest +```bash +# 执行命令 +pytest backend/tests +``` + +### 其他快速脚本见 /bin/xxx.sh + + +### 异常类封装 +1. 异常类统一继承 `backend.exceptions` 中的 `AppBaseException` +2. 各模块继承 `AppBaseException` 实现自己的异常类,可参考 `ticket.exceptions` +3. 可在异常类中定义 `MESSAGE_TPL`,以支持异常信息的格式化 +4. 错误码由 `PLAT_CODE`、`MODULE_CODE`、`ERROR_CODE` 组成 +5. 尽量在代码中抛出封装好的异常,会由 `backend.bk_web.middleware.AppBaseExceptionHandlerMiddleware`统一捕获处理, +其他异常会被统一返回为系统异常 + + +### API请求封装 +1. 第三方系统的接口请求统一使用 `backend.api`,使用方法为 + ``` + from backend.api import CCApi + CCApi.search_business({...}) + ``` +2. 此封装默认自动处理标准接口返回对 `code`,`result`,`message`,`data`进行处理, +若接口成功则直接返回`data`数据, 若接口失败则抛出`ApiError`异常由中间件捕获处理, +告知调用什么系统失败,失败原因及错误码等。 +3. 如接口返回非蓝鲸标准返回,可传入参数`raw=True`进行请求,则直接返回`response`结果,`CCApi.search_business({...}, raw=True)` +4. 封装的API请求会根据不同的环境自动追加 `app_code`,`app_secret`,`bk_ticket`/`bk_token` 等必要的认证参数 +(其他非认证参数如 `bk_biz_id` 不应该在此处添加) + + +### 环境变量 +1. 环境变量统一在 `backend.env` 进行配置和使用 +2. 避免环境变量散落在其他文件下,以便统一管理,明确本系统 所需/可配置 的环境变量 + + + +## BK-DBM 本地部署 + +>本机环境: +> +>MacBook Pro (13-inch, M1, 2020) + +### 1. 资源准备 + +#### 1.1 准备本地RabbitMQ资源 + +在本地安装 `rabbitmq`,并启动 `rabbitmq-server`,服务监听的端口保持默认**5672**。 + +#### 1.2 准备本地Redis资源 + +在本地安装 `redis`,并启动 `redis-server`,服务监听的端口保持默认**6379**。 + +#### 1.3 准备本地MySQL资源 + +在本地安装 `mysql`,并启动 `mysql-server`,服务监听的端口保持默认**3306**。 + +#### 1.4 安装Python和依赖库 + +本地准备python环境,python版本要求在**>=3.6.2, <3.7**。 + +>python版本过高会导致后续poetry安装依赖报错 + +bk-dbm的依赖安装采用的是`poetry`,使用步骤如下: + +首先安装`poetry` + +```shell +pip install poetry +``` + +然后进入到工作目录中利用`poetry`安装依赖 + +```shell +poetry install +``` + +安装成功后会成功生成`.venv`虚拟环境,如果用Pycharm进行开发则可以直接使用该虚拟环境 + +### 2. 环境配置 + +#### 2.1 环境变量配置 + +在执行django的`manage.py`命令前,需要保证以下存在以下环境变量 + +```python +BK_LOG_DIR=/tmp/bkdbm; +BK_COMPONENT_API_URL="{BK_COMPONENT_API_URL}"; +BKPAAS_APP_ID=bk-dbm; +APP_TOKEN="{你的蓝鲸应用 APP_TOKEN}"; +DBA_APP_BK_BIZ_ID="{DBA_APP_BK_BIZ_ID}"; +BK_BASE_URL="{BK_BASE_URL}"; +DBCONFIG_APIGW_DOMAIN="{DBCONFIG_APIGW_DOMAIN}"; +BKREPO_USERNAME="{你的制品库用户名}"; +BKREPO_PASSWORD="{你的制品库密码}"; +BKREPO_PROJECT=bk-dbm; +BKREPO_PUBLIC_BUCKET=bk-dbm-package; +BKREPO_ENDPOINT_URL="{BKREPO_ENDPOINT_URL}"; +BKLOG_APIGW_DOMAIN="{BKLOG_APIGW_DOMAIN}"; +BKPAAS_LOGIN_URL="{BKPAAS_LOGIN_URL}"; +BKPAAS_APIGW_OAUTH_API_URL="{BKPAAS_APIGW_OAUTH_API_URL}"; +DJANGO_SETTINGS_MODULE=config.dev; +BK_IAM_V3_INNER_HOST="{BK_IAM_V3_INNER_HOST}"; +BK_IAM_V3_SAAS_HOST="{BK_IAM_V3_SAAS_HOST}"; +BK_LOGIN_URL="{BK_LOGIN_URL}"; +``` + +#### 2.2 数据库准备 + +1. 修改项目目录的`./backend/settings/dev.py`中的数据库配置 + +```python +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.mysql', + 'NAME': os.environ.get('DB_NAME', 'bk-dbm'), #'APP_CODE, + 'USER': os.environ.get('DB_USER', 'username'), # 本机数据库账号 + 'PASSWORD': os.environ.get('DB_PASSWORD', 'password'), # 本地数据库密码 + 'HOST': os.environ.get('DB_HOST', '127.0.0.1'), + 'PORT': os.environ.get('DB_PORT', '3306'), + 'OPTIONS': { + 'init_command': """SET default_storage_engine=INNODB, sql_mode='STRICT_ALL_TABLES'""", + }, + } +} +``` + +2. 在mysql中创建名为`bk-dbm`的数据库 + +```shell +CREATE DATABASE `bk-dbm` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci; +``` + +3. 在项目目录下执行以下命令初始化数据库 + +```shell +python manage.py migrate +python manage.py createcachetable django_cache +``` + +#### 2.3 前端环境准备 + +1. 在工程目录下打包前端资源,进入`./frontend`执行命令: + +```shell +npm install +npm run build +``` + +>如果install的时候提示某些依赖not found(比如 iconcool),可以尝试切换npm腾讯源: +> +>```shell +>npm config set registry https://mirrors.tencent.com/npm/ +>``` + +2. 打包成功后将`./frontend/dist`下的所有文件复制到`./backend/static`中 + +```shell +cp -rf ./frontend/dist/* ./backend/static/ +``` + +3. 收集静态资源 + +```shell +python manage.py collectstatic --settings=config.dev --noinput +``` + +#### 2.4 配置本地hosts + +* windows: 在 C:\Windows\System32\drivers\etc\host 文件中添加“127.0.0.1 dev.{BK_PAAS_HOST}” +* mac: 执行 “sudo vim /etc/hosts”,添加“127.0.0.1 dev.{BK_PAAS_HOST}”。 + +### 3. 启动进程 + +#### 3.1 启动celery + +```shell +celery worker -A config.prod -Q er_execute,er_schedule -l info +``` + +>如果用pycharm进行配置的话,可以在运行/调试配置中新建python,在配置选项中选择模块名称(注意不是脚本路径),然后选择.venv的celery文件夹(模块),并在形参中配置celery启动参数 + +#### 3.2 启动Django + +```shell +python manage.py runserver appdev.{BK_PAAS_HOST}:8000 +``` + +使用浏览器开发 [http://appdev.{BK_PAAS_HOST}:8000/](http://appdev.{bk_paas_host}:8000/) 访问应用。 + diff --git a/dbm-ui/Dockerfile b/dbm-ui/Dockerfile new file mode 100644 index 0000000000..675ce88e27 --- /dev/null +++ b/dbm-ui/Dockerfile @@ -0,0 +1,85 @@ +FROM node:14.19.3-stretch-slim AS static-builder + +ENV NPM_VERSION 6.14.4 +RUN npm config set registry https://mirrors.tencent.com/npm/ + +WORKDIR /frontend +COPY frontend ./ +RUN yarn install +RUN yarn build +RUN pwd +RUN ls -lth + +FROM python:3.6.12-slim-buster AS base + +ENV LC_ALL=C.UTF-8 \ + LANG=C.UTF-8 + +## PYTHON +# Seems to speed things up +ENV PYTHONUNBUFFERED=1 +# Turns off writing .pyc files. Superfluous on an ephemeral container. +ENV PYTHONDONTWRITEBYTECODE=1 + +# Ensures that the python and pip executables used +# in the image will be those from our virtualenv. +ENV PATH="/venv/bin:$PATH" + +RUN set -ex && \ + chmod 1777 /tmp && \ + rm /etc/apt/sources.list && \ + echo "deb https://mirrors.cloud.tencent.com/debian buster main contrib non-free" >> /etc/apt/sources.list && \ + echo "deb https://mirrors.cloud.tencent.com/debian buster-updates main contrib non-free" >> /etc/apt/sources.list && \ + echo "deb-src https://mirrors.cloud.tencent.com/debian buster main contrib non-free" >> /etc/apt/sources.list && \ + echo "deb-src https://mirrors.cloud.tencent.com/debian buster-updates main contrib non-free" >> /etc/apt/sources.list + +RUN set -ex && mkdir ~/.pip && printf '[global]\nindex-url = https://mirrors.tencent.com/pypi/simple/' > ~/.pip/pip.conf + +FROM base AS builder + +WORKDIR / + +# Install OS package dependencies. +# Do all of this in one RUN to limit final image size. +RUN set -ex && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + gcc gettext mariadb-client libmariadbclient-dev default-libmysqlclient-dev && \ + rm -rf /var/lib/apt/lists/* + +RUN ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime + +COPY ./pyproject.toml ./poetry.lock / + +# 创建 Python 虚拟环境并安装依赖 +RUN set -ex && python -m venv /venv && . /venv/bin/activate && pip install --upgrade pip && pip install poetry && poetry install + +FROM base AS base-app + +# 安装运行时依赖 +RUN set -ex && \ + apt-get update && \ + apt-get install -y --no-install-recommends \ + gettext curl vim default-libmysqlclient-dev && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /app +USER root + +ADD ./ ./ + +# 拷贝构件 + +COPY --from=builder /venv /venv +COPY --from=static-builder /frontend/dist /app/static/ + +ENV APP_ID=bk-dbm +ENV APP_TOKEN=xxxx + +# 创建 celery 的migrations +RUN python manage.py makemigrations django_celery_beat + +# 收集静态文件 +RUN python manage.py collectstatic --settings=config.prod --noinput + +ENTRYPOINT ["/app"] diff --git a/dbm-ui/backend/.flake8 b/dbm-ui/backend/.flake8 new file mode 100644 index 0000000000..ecf89a0b2b --- /dev/null +++ b/dbm-ui/backend/.flake8 @@ -0,0 +1,19 @@ +[flake8] +ignore = W292,E116,E302,F401,W605,C901,F405,F403,W504,E741,E125,W503,F841,E203,E231,F541,E265 +exclude = + *migrations*, + *.pyc, + .git, + __pycache__, + node_modules/*, + */templates_module*, + */bin/*, + */settings/*, + scripts/*, + backend/packages/*, +max-line-length=129 +max-complexity=12 +format=pylint +show_source = True +statistics = True +count = True \ No newline at end of file diff --git a/dbm-ui/backend/__init__.py b/dbm-ui/backend/__init__.py new file mode 100644 index 0000000000..dbc70164b7 --- /dev/null +++ b/dbm-ui/backend/__init__.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +# import os +# import logging.config +# from backend.flow.engine.logger.json_formatter import JSONFilter +# +# configFile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'settings/logging.conf.bak') +# logging.config.fileConfig(configFile) +# # filter_ = JSONFilter() +# # logger.addFilter(filter_) diff --git a/dbm-ui/backend/admin.py b/dbm-ui/backend/admin.py new file mode 100644 index 0000000000..aa5085c628 --- /dev/null +++ b/dbm-ui/backend/admin.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" diff --git a/dbm-ui/backend/asgi.py b/dbm-ui/backend/asgi.py new file mode 100644 index 0000000000..e89d30b8ad --- /dev/null +++ b/dbm-ui/backend/asgi.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +import os + +from django.core.asgi import get_asgi_application + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config") + +application = get_asgi_application() diff --git a/dbm-ui/backend/bk_dataview/README.md b/dbm-ui/backend/bk_dataview/README.md new file mode 100644 index 0000000000..71768d8ce5 --- /dev/null +++ b/dbm-ui/backend/bk_dataview/README.md @@ -0,0 +1,256 @@ +# BK-DataView-Py +BK-Dataview 可提供标准数据源接入, 也是一个标准的grafana frontend程序, 提供grafana代理访问, 完整的权限控制, 动态注入数据源和视图等 + +## DBM 系统中初始化grafana的步骤 +- 向监控获取DBA业务对应的监控数据源token(暂不支持线上获取) +- 打开dbm的grafana代理根路径,且必须带上orgName=dbm参数:/grafana/?orgName=dbm + +目前包含 2 个模块 +- datasources: 标准数据源接入 +- grafana: 代理访问后端grafana服务,可内嵌到其他产品使用,提供完整的权限控制, 动态注入数据源和视图等 + +## Grafana 特性 +- 代理访问 +- 自定义用户鉴权&权限控制 +- 支持多租户, 无缝对接第三方系统的项目/业务等 +- 基于租户的,动态数据源和视图注入 +- 内置多种鉴权,注入工具,开箱即用 + +## 安装依赖 +- Python (3.6+) +- django (1.11+) +- requests +- PyYAML + +后端依赖Grafana +- 建议版本: Grafana(9.1.x) + +## 使用示例 + +先下载好grafana(测试版本为9.1.8), 修改配置后,启动服务,grafana 配置参考`bkdbm.ini` +- 解压到安装目录,比如/data +- 拷贝bkdbm.ini到/data/conf目录 +- 添加监控数据源插件(可选) +- 启动grafana:./bin/grafana-server -config ./conf/bkdbm.ini + +pip install bk-dataview + +添加URL配置 +```python +# urls.py +from django.conf.urls import include, url + +urlpatterns = [ + # grafana访问地址, 需要和grafana前缀保持一致 + url(r"^grafana/", include("bk_dataview.grafana.urls")), +] +``` + +修改 settings.GRAFANA 配置项 +```python +# settings.py +GRAFANA = { + "HOST": "http://127.0.0.1:3000", + "PREFIX": "/grafana/", + "ADMIN": ("admin", "admin"), + "CODE_INJECTIONS": { + "": """ + +""" + } +} +``` +配置说明: +- HOST: 访问后端 Grafana 的 IP:Port。 +- PREFIX: 访问前缀,需要和Grafana的配置 root_url 保持一致。 +- ADMIN: admin账号,默认是("admin", "admin"), 请务必修改 Grafana 配置或者通过 grafana-cli 修改管理员密码。 +- CODE_INJECTIONS: 代码注入配置,用于在Grafana的html页面中注入一些代码,实现在不修改Grafana源码的情况下调整Grafana页面。 + + 默认会注入一段css代码隐藏Grafana的导航栏。 + + 该配置为字典结构,key和value会作为replace函数的参数。 + ```python + content = content.replace(key, value) + ``` + +访问 Grafana,使用orgName指定访问的业务/项目 +``` +http://dev.open.examle.com/path/grafana/?orgName=xxx +``` + +grafana 通过3个步骤控制权限和注入流程 +- AUTHENTICATION_CLASSES : 用户认证,验证OK, 在grafana创建用户 +- PERMISSION_CLASSES: org_name权限校验, 验证OK,会创建org, 同时把用户加入到当前org +- PROVISIONING_CLASSES: 提供自定义注入dashboard, datasources到当前org + + +## 自定义用户认证 +```python +from bk_dataview.grafana.authentication import BaseAuthentication + +class BKAuthentication(BaseAuthentication): + def authenticate(self, request): + """ + - return None 用户校验失败 + - return user 对象,用户校验OK + """ + pass +``` + +修改配置项目 +```python +GRAFANA = { + "AUTHENTICATION_CLASSES": ["BKAuthentication"], +} +``` + +已经默认提供的鉴权 +- bk_dataview.grafana.authentication.SessionAuthentication +大部分SaaS,在中间件已经做了鉴权,SessionAuthentication只校验是否request.user是否合法 + +## 自定义权限校验 +```python +from bk_dataview.grafana.permissions import BasePermission + +class BKPermission(BasePermission): + def has_permission(self, request, view, org_name: str) -> bool: + pass +``` + +修改配置项目 +```python +GRAFANA = { + "PERMISSION_CLASSES": ["BKPermission"], +} +``` + +已经提供的权限校验 +- bk_dataview.grafana.permissions.AllowAny 允许所有 +- bk_dataview.grafana.permissions.IsAuthenticated 只校验用户登入态 + +对线上业务,`请务必实现自己的逻辑` + +## 自动注入数据源和Dashboard + +### 添加自定义的注入 +```python +from bk_dataview.grafana.provisioning import BaseProvisioning, Datasource, Dashboard + +class BKProvisioning(BaseProvisioning): + def datasources(self, request, org_name: str, org_id: int) -> List[Datasource]: + for x in xxx: + yield Datasource(**x) + + def dashboards(self, request, org_name: str, org_id: int) -> List[Dashboard]: + for x in xxx: + yield Dashboard(**x) +``` + +Datasource & Dashboard 标准格式 +```python +@dataclass +class Datasource: + """数据源标准格式 + """ + + name: str + type: str + url: str + access: str = "direct" + isDefault: bool = False + withCredentials: bool = True + database: Union[None, str] = None + jsonData: Union[None, Dict] = None + version: int = 0 + + +@dataclass +class Dashboard: + """面板标准格式 + """ + + title: str + dashboard: Dict + folder: str = "" + folderUid: str = "" + overwrite: bool = True +``` + +修改配置项目 +```python +GRAFANA = { + "PROVISIONING_CLASSES": ["BKProvisioning"], +} +``` + +### SimpleProvisioning +bk_dataview已经默认提供了一个简单的注入,通过配置PROVISIONING_PATH, 系统会自动查找datasource, dashboards配置项 + +datasources +```yaml +apiVersion: 1 +datasources: + - name: BK-BCS-Prometheus + type: prometheus + access: direct + url: $SETTINGS_DEVOPS_MONITOR_API_URL/api/metric/$ORG_NAME/datasources/data_prometheus/query + isDefault: true + withCredentials: true + jsonData: + httpMethod: "POST" +``` + +dashboards +```yaml +apiVersion: 1 +providers: + - name: "default" # 兼容字段 暂无无实际用处 + folder: "" + folderUid: "" + type: file # 兼容字段 暂无无实际用处 + options: + path: $SETTINGS_BASE_DIR/backend/apps/grafana/dashboards +``` + +注入可以使用环境变量,语法格式如$ENV_NAME, ${ENV_NAME}, 其他SDK已经添加了系统变量 +- ORG_NAME: 组织名,一般是业务id/项目id +- ORG_ID: 组织ID,ORG_NAME对应的id +- SETTINGS_XX: Django的配置项目,只有值是字符串类型是,才能使用 + +配置具体参考 [Grafana Provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/) + + +## 处理 backend +BACKEND_CLASS: 选择通过API还是DB处理上面的流程,目前支持 +- bk_dataview.grafana.backends.api.APIHandler +- bk_dataview.grafana.backends.api.DBHandler + +目前差异是 datasources 批量注入, 使用 DBHandler 对大量数据源的注入性能较高 + +修改配置项目 +```python +GRAFANA = { + "BACKEND_CLASS": "bk_dataview.grafana.backends.api.APIHandler", +} +``` + +## 本地开发 +```bash +# 先fork 到自己的仓库, 再clone到本地 + +# 本地开发 +workon env # 切换到自己的 python环境 +python setup.py develop # done, 本地修改后,代码会立即生效 + +# 欢迎PR +``` \ No newline at end of file diff --git a/dbm-ui/backend/bk_dataview/__init__.py b/dbm-ui/backend/bk_dataview/__init__.py new file mode 100644 index 0000000000..5c8072a32b --- /dev/null +++ b/dbm-ui/backend/bk_dataview/__init__.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +__version__ = "1.0.1" diff --git a/dbm-ui/backend/bk_dataview/bkdbm.ini b/dbm-ui/backend/bk_dataview/bkdbm.ini new file mode 100644 index 0000000000..67ec6fe92a --- /dev/null +++ b/dbm-ui/backend/bk_dataview/bkdbm.ini @@ -0,0 +1,69 @@ +[server] +http_port = 80 +root_url = /grafana/ +serve_from_sub_path = true + +[database] +# type = sqlite3 +type = mysql +host = 127.0.0.1 +name = grafana +user = root +password = + +[analytics] +reporting_enabled = false +check_for_updates = false + +[security] +# disable creation of admin user on first start of grafana +disable_initial_admin_creation = false + +# default admin user, created on startup +admin_user = admin + +# default admin password, can be changed before first start of grafana, or in profile settings +admin_password = admin + +# used for signing +secret_key = "{secret_key}" + +# set to true if you want to allow browsers to render Grafana in a ,

6Hjj&Bun&lPOTqGN{2 zZPS5|Z-b5@OZ;YIs5aA8+F!m7;B&on6Vin_s0Oc3>dQh4Qmbyx5%r~mXEPCVxi9|V z3exlO0X45{&R5t?je5P;C@~{Z zv_B3DGFt(Y5DNtnrO`o9NCl)i!*aokGF)hD?9nmekLz-|hY>=ooX^Oc6 zAiPbb3nG_!Oey~gZj#G|_uAh)nuA;B+8pbb4B6XDr81n=9rgg4y&8ZUpFR!?S~OI^ z-4RH6xQ9P0E(${hC44KB7Cl_Q*a=VJcw#a;q=6a`ieGzHzeD=4ZQXXF>A~99F#)!L zU+3*T6?5+N2lLY(q4}C91AVpxwl7pvH)&}#i=%E#j`JX?5stz>r`~O>0ZlFC2aeqroC$s z{lREtkIg1p9>Yk7dz)%N|Hj`-_kBJ3K(sU(*a{D^of;xxU zOh#^F8&NVIxiP$NW8>mZQYqR11{RL56EERuTTngS;e2W#aIErQ2ss@Wk+Dmg4BSv?UebGYP!D-a><>*TBUF?{wo*WqmhG3v{DsRZ9@s@3_s zPj>ZQ0F9$oXe7M_nKKvcNi{MkWD&>>K2b9}0e~(Fjs~j2cdt&l=>pI_9%a%s`6S8r zijm22VY)}P@QG(IpQm*fxZMu0e!3j01~pBnoB4GDv9f(Wat{u;Rq5AxuY*o@Q-{xS z-U6QKIwLr%cJUjNIvxUp+Wxs96`8Pfn%Hb*OXxK^pfemGJsO1l;17 z5Oq^CXikP;MUjE>{_LJxM$(=6n$3oj#N)VVcR*^*{cL1 zQ?>OX%RU~Vm`ki%(vy~vC5@~~R13~CLo1h)9u^L{!8G6iUz=9YX>RS(L&hynJE)#c zmO$@V-mw3uKr3o8J1z94jNe}Z%yD7QX^h5A47ML2rhq#68He7xndQD)h-57=cee~v zCkedX{n$8f4P4bZ{9=SgU$tu)a}E}+r_s%a%PS$VOy7?S3u}bHKtfHNVj+QFkB~~v z^H!_2kVOF8J{A?#v@M-abvF7Y0BR$MQH9`lLR1U?f`HKQG@uz*2hKpY=n0yLZP3xh zdgm$UGH-Rl?_qG>Z8da@VDCmVQ+cf@sxx`9X}!gT#0Jp}#D^Kz{JEkvP8|H}rZYeJ zQM=_R*P7U|&ntf%Y=AwGZbJ{~eqTbK_R-FTkOX49a_XULWd0|)Ok}l*j`O*LP7mpU z3c$lwj@ARIv5m&jLPHC(W>_cAaQ(qG)F{XLqk-JXhCFQe8Q|U*>zKk^ z54uQgL#q14Yo9>&d*kA;v<57)QOhUV|JQ5Z)=-BVE~{o`x4-#%$0o1Fwu$N*U(x{O zCG(hMm7H;WM}*tOq71#><(nUOwl>ND<43%Dxj>+EOihE5x5O_<)@_0Jjx99ee#QHh$Sz;mPiTSgjJg|$0--G^r=mRVXjJkH zA<5qQ4UBRF)!aSvto)Ais~>Y^eaYliJlo;LU3l|oJJLe|c7D8bS)VP zJO|jFg;I~j%rozk=xMm5kZ5te|Gwy_&BaqXlqiX8XTpJ$Cd^=QSB-S|B@W8)3F@iw z9U&j@1ghfnv3Lj+teWC zbO^k$#}pUSiPNYpa1O90p;FeFg*V4@8k*qpZ)B~io`K#kHa5Ujd#g=QS*VgMqH`!0 zG7|?422S>}&MYCcW#EFbK6>FdNLR8_m^Uia z>cKS2{?}iOH8Gjb()SUgyci8-qLU7w5~pcKA`Z_&5BX|wUxiVzsVv0X zdV2F?iRU13!B#|W@U>OVjzfEO9taAoLeZ1c{FQJubDXprta6?=l`|HNj4gnoJ(zKP zXiYFXnIZBpHaVQw&+gg!bAVHdrf-mm9X7$W*+p~cpalc*z0(V;e5L~%3PL+}G@o)@ zkWbZ+@Cs^UOP< zY^SId+8@1_B2r)}e=$tWEy%R?yl|FDN=)wPN|lAe^Nn-<9C~jAjLN@eGv8-Oq%b1P zCG2?2c->8n>Xk|rUpq6h3VV@Ie~*TeA(jX3+J}fbSKUrDsmAm;Xyyk>Z*Cdt@_Fn)of{4B2W} zoMu5I)dyKIPb-yq2Y2qWe48m5cJwb}`!V0c`WjgjVn_J!$s%re{W%A~o{hni>|H}w z$xg6MeO?Y0#v?2hI@;gs4_q!rL074AkveKml(~0E#(g}!*VgtKgXC*|!~x!6xti*{ z+RVjh_i&wj{dQ~R16;S5DI;hOru!bdMpDR4RXF4Pt( z)6kMKyFL}8z)bmA6U18VxB0$-({j}@_duG@#n_s&BlZ4f5ZdBIFcA#5V<)KvyG1_Hr4+ zaaVxs+(B!E4+>b-2v$9J^~cZEyoB}qmsawI~Bz=U@jaL-45A`U) zPT^~{&d%zF8Ex0Sy5#em`+Bu#7PhjKQnzcirlJo~O%>N{>_=T}dCuKftbiJr;a9K% zu&MMEm&t2-<{y|kT^nZQw5P}bZ&f}|0Qr;u4GXvPQwsw4w|2Ee2V#gjsE2bdJsaLo zNu!+_I)oTN(y*lL0VforTIHt#!CEd>qxEg6_J+WpNpF|>>Gj5NcUOh~(datUC$}AU zsZBW`04}HqX5b!qR*y;M>mx&w-iLSOKAUh2_3w9$Bq1$@o@eV+ifWYPQJ<|ix*t2a?>FCw!mx6Vc7vxyO9}x=|a~c;kyqxw* z*%)2{Bi{0P+>BSsxqb82GEbDPw_L3nUL_P7z{(8r`IMLER#^gqpQH7ik{AXmZC)U| zLO96%&RxsbDEbg7ryR?epWQV2^f*J620`a&AT?EbZ90T>Y&NFKO>s>LW_@+erW!y( zYw&H^#fj+GPIdBp1cZ!;R3!gP7qyXlV9tduujMEFI&**?IDYevSbG+=LrfOmS}iX^ z!6H88#!*C&n!P%JQavK;QPbFvp`TP`j@PF?HoayY1y^#{>>66s=CP3uzSF)At|6D=%ijN z?bPZAFL#e_6&+JGt6{Ux;WHM^@x1g~INjdsv9(oI~N><%YhU(cI^S z9-QV%N#==!tjw!bZW1o)CvLiZh=>$5_Kt{@?(z0Hdfp0HVuDCkL;{AF`G@qW9Rs%$ zV1&LyX4Tg60$m;tWS-T{wgQbPdmTp@f~~hA4sJv#U|p4<){b z^)Cj~>hbGaEgrju$O!ZuiDe1730&>{I^0r(lZp?+LiAm(Mu)Tl<`|5Z&WP6?mIa)i zk)le}WmNHdmZBWnjt%oy*=hZV*$vsnb^k%BT#jnWX@bco9%YA!iwJez@I?RQpYJ$@ zc=Pjl?wtwQv<#0x7Xn>SaiZ~`KRpO_+`V!KP>=D*6Mzpq*c z@?ld%kt{}Woy_?0Cd_>Ujo&jJjhmH@sb2ZPd?TU9y5PXZdpmP7lEy}61~CD-qf48s z(__T9At3maO3y3MD~k2263K4%=O7zEu5w9bS0!SKnOx=5GG6lPI%GnlJ88yD6WOC= zQXxViddka_vE2C)iD*+MeN{uH`+8}zGP>EkL2`^wz2#2D=hf`G zc!c7~kzFlTH`Qtryroi6ywiy}2k@IcKOll@U`L(Ek}vosVQ_cnk&j%~p$OfA=jb9M zQ<<_PU-*n$0-$0`B$rzMqqe+z8T#8^9cB4FzYaoDOP%*J(@6X@A91rQhU>S`s!d!U zDX-UiBdH~8zUcs`W+B9l+RwBVq#pK`-xJT`+>>18h`PnRcq<=PiWFzL9$Y{Pxoz~- zytGJvl)FPnJbs{(E8zt(+3IaTW6!w3*_s7`-_tEw)mswja~+UZeP^(y@vY+(zy~~( zOwv78omESzOAsLv$jNGzitki0eUGGBq+$)>C@OF=jt5t?Aq#z420GbA z{2ohtmint{TdG4&&jp+dJ#;G+%&R~=XB%fWr291wLH-tK`javoA)VwzwrVJGLd0Q~ z#EiC3vJn5T#d@4{ibL!L=nZ2vAN2Og>GhRl3MDe1_O&){bsy%)z8C+TO0hPPes&}22Run58EQN&SL~6)9|9?GBiP|kogB{7r(>)1o6KysZemzv-UQ(uG z$$&Ow_;%r!J0QDCK=32jQ=jXOq`y*amO0wWZ#Sjic@JCoZbr4a{ejKmEKPTXeNIg~ z+8F<%eMx?5NDmZg!C*)jq)JOe>%bLeAIEy89U*x#A{(Y))H-wHzW$F(JP&`|S1BzD;HdZ%#4GAsOKeO-&N(y{S+8w8AkSj2vL4{e7DIMsN8hPQ_eh4_$Uqmvg`dl(TuaaHr0U-ic_a{b3gAZP5zG!N$i{HVa z`Z~lhuJRt@F}@ym)2)I$ei=W6gy$?(GIfIw z-qI?UzCq=gu@2JB4{JrS$m6DK(QnoK>&mY<5n()H;8~6iq5N(5Xw!q!+X{}!*M}6Q zJwC+K|GDjOAQB29PDGG2GIK?fP$LT%MP9lpdT2LlHrx>S5Fzdba)O?rcOd*3SGbH$ z*%tyb-(FaDGC`~Xdr(hDmwaq|lQSGWOIH8IsXFXVY>*NWlZ&`Hi)9q7>rR_1v<1;- z`>a1}-hD4NSSp5NeV<_=We)U)udDjLSoB}aCq?T*D@-3>=T(SBzek~%pEBlikt0_x zM@~CTbBXjfvLP=J$zjui0E%4DJ|eksj(qri8802bJja}xiGB{5jwi2*hL;Tlp5ls+ zoTvo)kG&ZTxaXWN$`~phOQNa$=EhT4D2nQ0_LP+qF=%LA9-XAWed__ZIT~tfPyWM` z(fN|Jv2K_gp)b(-jjnl(AA@n*M*D!oA1A>von(;X@;EK(cZql%YUmzx8as^O_2fpp zS*is_XD=AyU(~{a!KsCjXg&9(Ov(gBVLU7)o8?hi4392(2?%&pr~b3y+f*l0YjL+Z z^J@x;#lM4tXVQ6Kd3Fbp8Q1aZO1zZX|HaJ!?nkRgZLA5pYEQo(N_?B@T3EE4Mo?AT z6Z`~KHc*Kzu_L*zEf8r&*RB09qAu`ta${!=B*D8%-`DH;*JqU(jrJY0t zUTlm-RsdQtA$YRe?#IdoG~0*JAmTc-17b94ON548325=eEdf`4<%tWZIU{uyjVdXP zVDByUhHEG^)1pK=LwHGcL`*LP=2O}}MUq>ztjgafw8PyohZaO#Hv@N1iDlnc<053I zb|HJ5+TzJ2`Dhj_K0;82ND|r8+VmNY(lGu!PQ~Oe2=8~?!ZPz1&-qh5PD^Iw^u!Q3 z6%c0`9AUFar#VDj+WmRHudKzmNV$QR+qiET?dt|Dv{me`8AOqu0U+Sswz2GEvhnvV zqOWF?Zx;RH1f}$d&;scTCEuF%!&LdkYb%-h;c!1VSK2N>ic~scI-+lkxvFx%2Xy6U#ZA0K@New_39ER+ zLNkf#3ItVhq>swiR9Hz?u~uZ-QVNzaamKHuNWfUfnubowtn}&co<#f61CicERR|;H z9IiA3*&`3j9;F(Ma7Uhv2j&s#ccWIlt1um>$GNII5zvjsjsH^OwJ8F>bge2Y zp5k{k`UQ9XjcMh2qCKYW)Zni@5xU*d7%B+-^ZX)yUQ9-h2vzJ{cW$H(vy`}o@Go!4 zIg1hfgfV@a10)RD#Ic@~>~UW087wk=ruE_c1g66I2#|k^mr~zsOnrHm+g>^O+G)u^ zMU5Rbja-EwO`hFkVq#HN8HQ}vvtz_3tCO>(b_y$IGvci9*BT=V`JKmRAxnvHD$w3y zfh&DD1;BFSsTt!mm5g)7)&3n2U1bT2>kWLOqo1&~xv-P-&`N52sPZfst^o4BPe%*2 zs0zHdUb?b#MVFrIT*)r_((dJC3SZsE+@u4voK7t{Ft=YP`Q^l+JB$n~?*j15zlW7S zw@JnQX3IQV6 z>MYgLG1ME7;M7&Q@nGyj4+xOHXFT#i(r6>R%z*Adi0N4TyKiEwz*eJ*&WQ6#T&Yw6 zEz|}3hF7y7JzkG?{?<~3BHM+qj`X-{Boa-(F5WdQJ)n9JS=}U??2IQZv z{2nj<^U8&Z3`0|21RdtzwmuRkSUR(8cbm*W$tMitZ6`H2msgd?@pae<84cSGs@1fg zx97k*T8Jpqt7Nb1>Y?oV-8Kf)pw}ovf`mAZr2&(Wp?nX?q=E&jZHSx-KcfNzyLEfo zL-qA&*ow`SQh@kmV%OC@1KB#pk7dvGmvg-vrC`MTXry|%1bdjuo%7jJ@UI&*<h#qe$tyRieAKr!CrU0}fkRICKHjWyWiQj(S`-H-OhU*OhJ z&!4tMhFX^*`h~hC33^t8rnJ76M};lUxqx#IUR?O+)P-aKmXSXFnnvQEn4DbVG_^wMtT?9b-&XE_QOb|b&+cxBjg zP+{M>d-v|wGHboEhlq-obLS!R&ul<^L+9RIOEFgmO?Z&2l)3Lj5MmXGD}Ho@j|uGt zzM>w`7RBT#hwyZlTE@u)ui_qLv#0n_yg>ZcVF?l1l-E=lrKROJcV3t)Y`#I{T429; z#M2UPY~GNWbgJYr2o3ZqY###gKON;v9^si`bahA8c?_HyWkBx^ea2OUQAlx}#;CcB z`dx0zje6n}mhRo#oK2x_fz7r%(D1<bIXuJSN=!JF77GRbsCG2ihK1LB_79Wznb);a_ z6NJ;6H-m>b&Ka$DMmRDgew&dOslR#V;kau*DNR;al5y8S-s^Ya25|m5IeFj!khDBs z@@-c_i`$bTD(mn<>a5lDETju(7#mqUc?;9(VG}MWWQDXW84F# z@>}e26ueb4^5&8sm~IebZYaME4-<*#PP=&08mVbY4f4@C4zV&fl(1AD1`%a$5<`uU zYlwIg5vSQpjUo$(5t)C;w*bLwqS1?KYWFbb@C}L=l+;KSkn!=W2(_E)lKJx zu?boY@<iIbHV)cW^LI2~v|IK!e9KQ|1 z4an^MiimJ%IzWXsN09(QjCe&7Bn+D7z})Pu2mH(I2O0gDyrdf#aJAzKPRa&@APbpd zxuyfRtLwn-OUQluTI1mXPazKRL*)s^Q6zgyfVMB+|u2ZHGolD!DKkckl1*Js4+Xo?JsC2Hb0kVdU`lmB9z+cR< zib7S1 z5n{0C@MZmfU5v{L`hZGte7OrO{d^jTlHz6Tl(CuB(wa{vsk3aJ>h1%<`V%E;H@jfJ z0GHP)+_~*n6Enwems4fgj4xTWIPDIt1A9_%uP9@GPH%7xu>!|Cqw6q3(Ww%Hx;Xe^ zWOiYUrppIcrWB>IRO_2hH&VuIs;Yi$Kb6rxO9Imb(=hQJ!uG+fIgR3Vc^ZSH3>9Kg z_Oq`ep8RtG{iecyACp)z*#y4Mk~_+U;VcPRZ3yV{5@y2`X@@qH)7)&TS zZ6#!w53GI5m{Z5Uy?+PvWN2c@6td}h(K1pc<$yWG#DRPOaBNXgCa3H84=8f{KeCsX z)j-pR%EPsT0dPdV1BYc*?`*hd8&PK3((?t1sC68dX)rD9kd%0<(mbnt_d7^{xZX`DCtJBhX-u)PX04x z{pZTHp??L)TlCZ_FvqW}k&rzI0?$Em@5*mz?#_;49V@`f(sVB~M96f4?G6R9@pl%8 zf}<)@_1^Hx)r$+0R%66W{&|xB|NlL@Wd9q;+KS@hYh~&Xny4+F3BA(zFq2=89Z1dQ z>s1{wfkLML@g09ULqw6d{2?;+55)8PyF;ob*+E$_crx-sJLTvAf!yL03E+FqYSkM6 zi>4v8ohbZBK978A=vwDLN(aJF>{ z+nGVmcI_|6{s+DRB@kyRP095yQvL6rA3_Tsx-hk#=f6Mj?>|{W^D_q~LQ08r}Yv%J1J_0yFy+ z$9Zk_&S~%m*mgN1gQr*~2w#}9^S59KFM%-hcGYww{9W3Oo#ej)YvGW6qzWP|Y=2ap zzws*MWsIP{U1Sm@W9KpvYue8Z%J?T7U!;dr?l@WgjHrDt#o+~m%tsgmED^Y3u9B0m z;bt=+_yQuwRH<7Rj1KPvANfAK!tm132O_GH>Buj>o0KdZN_yMiPE`NjkMir6GLqDX zjv9RIl875twm}{@4&x!477z0)ooFV6MKnP zD9yh=FGuT1XS=0~0^g3a`TpmH{^#rQswWo!KHmYz@op$wveXgCjte9%9Ep{8m`d@z zBv6Ya6T#e~GupXlqH^0vka59V>O%u=)di&0%`#v88WOVj2kWQiXJPSyf<9IJUIJDv z=kSjFy1V37*AwI3fX@NkY!8kjJ5>}zP*p<>g9}sM7NP20@^4ji8*H@y`zHKdC#7O) zA$3rwEMtg)o%tbdkLtbd%;RKO_5A-qMIPU4Tjs--Be?+9vz7VFF|Yxxmao( zxC|z5h%w3ljDQE@#P>YGj|cR9ACq6J-Ztn1ztt=tk`5c_5u)SBp;4?wt(N148a4-B z4t=>p`_TRITJtd);)7JGUPeE+rPjAZ->MW2iHu7As~`OJa={sYS_|^qQ5<6eGlDL~+10wx)XV{xZJ&}|q9gwi zJe~s_w0w4-rt>V$Q+O%+p0${(EVf0wpC5N&yblqRKqzP^^sNi}I~-qI7fk;1c930c zN8XO}6p3LwM=1WYMdR1o(GBgUW&P9RYUMV@m2muWWn%%Yvn(rLw}+H~!%sqRGRT2^ zzXpSi*2esR4r3+&PnxitM<)=uaEE9(w9Swx|KQO?NO|hpMZ||++%3*>cZ@Q4cr1K3A z^`x9I6P1#)|98RG2h_{Kat2Wwz*?#TxVB#NJV5q6Ak*E*Ef`@6AXB;#c6S&dCLAQW znbANFaA=q%D5&)yRNex-=T@;efoZ~(mgd`V%8|wm@qV^nJ=ws1#i92k|L7+MZR5gr zdH$evhXyx(KZs=xlNGjnx>~88JG+W}^l;!~14Mo!oxHou1COE2~ z8uw}>;Ty@zQcWFk*?WVRrG^1~E`@HRa^m^DS;Qy>J;L>8%@Hf_W(WdjA)0x*NM1Cz zwqu@E_v#yf!Ac*DNjT0L2J?PkOn)64Ludk5JOrD}M8R$Vzr1R{GCxAm76SYLQ|O#C zx(Q(!9%aBB#lE@s>1|T?G7LWDG`4r{4xH!;IC`TSS)&DZs)D_mAPA}3I@rut#Wsjh z$r2QHP*HBsB(ZR0&qTa|rt`#o@o5?az(T@UwLs=D2ZM7V5P=y(Qywxf&mDEtE_uHN zbpzecVfMaWT(EyM!=CT2Toa9hB$uAa7inY{uw zz`;TgDr=s_EhuygEN7fpTW*XU+E8wao868Ru=niuByIjA$Q5WK3e5fs__ z0K`9QGIbn9>`KT=hswCjJ}cl$GRCQ~Z#4m zUYrt!bIZf%bGIK7iX1>4Ai)!`Guxa~!@fNcXFZ#h8tfn66`<}XgduBpYgeR*#!;lr zAc6lnFTyE?u0@b^l-yH~mQsBo=BKE=4g`{RVLOPBjA4%Qt5?rfZq5;P5=zCGCEa*n z;M0{^j{_IL?WCJ-7BIh@vZ5*Wew;}cxyZgXvUT3_i}(BWWn8r7n9otmK9ADSebjII z#Khp}rV+b{=xi>wslheLKQ_zta{3#MR<@`O;p~}9f87*%Yj^!A3OT}bkwQXmZSo5- zPGl`fvrohWZ~UtxKUh!;(Lo6u6>v>I-+fMid&l8B;=@xXO$D-Ky0nwHx+z%QIIx!T z72=8&rn^h>1||}z!f@|hM0!Ncsut+)0d*BJRjaa$dt#j-;(87!8GLibbSc<~^bqpw z72IDLTA3tF*jLW5Bb=R+LObGBn!mCxBO@wV?e1Vm4{)0kpL+qeJs$h@u}8v0V+6Ay zZoa6@5NzE6eD%QMO;6anbM{D+cl642CL)7v)||?n3Ghq;5M0S_0H~;Aw5%P({yZ67 zPIQ@Jjt24kp}k}u@V@8h)wt)4A(1=WR{h;zXdA)3GLAU7P*S9F>l_0&YIU@FEhx8N z-CUPmea|u&LPPw}ef$Z_$GydF2LqtnyHieC|Et>}rEHbS1%;W!j=crsx@qTxRkxjyihYq>D?CXx_UEkIVe*B~274udMa7?>i)!f8k& zFav%pxXG0g?uqqF1CAwzE9%GnHDlk!&fbi!Kc|*k%TY1)!p={0cP`>!`noF;E#aE6 zl;78PwbIO+>ZXY9=lwimDPIlusp6)FQpkclHIj@59b`wogKhB?6C=I`MPtXs*#)}K1kk# z=i5Z?=~kKVkBXb&TqSC|&--$jGd4+RVLrabkQWFw8n~IEBY$_W?|@ zF%)gs>Q85IFH*m{B7q+WqOKKRIAlnfXh*0@Q8UHtwsEC=_^~uLu!4v>s^w(at#_pF z1L9W(YKG?&Bk%Yk(gfjjN`*bk6Y&%4(zcajx}{daDJbgN85tp*@nedB&uYff|{lN^V z6UOu37Tx_|^7r3<6oFd zl+z+ogj~S%lnTDz21kAHMLtWA-ys%c-X|2PRcV>Y_W~_q9+5Jtee3)IP_ZSz*-DY( zMXnqk-r?(@39!~Kj4ty4bNw7#VfIBfs_B;*g%Qjfdm9M@)IrCpf0M@6Mo69ba4{A< zsjtN@GxFb^npE!m7$Kz!roA$@F9fBqaDcH_(_*TA6s}k^Wfputhbu|I*-cj!kSgdl zHUN}6zS?T>-=RskjSQYjaar*1vC-=X8A1sm1_f8R<1NA2_iUk#SJQ_WN>Cg^LLtkK z%7O@}k`r0!Ef7O$3K{t&=LB84!T36^NrM=+k?l0=ccHU^M2H4CSHe%wpCJU_s>#3x zVtQIwa-lzKfB)~*9=GTh;UP%iK06UHk1#uAJ(-3Dg{dc8{ls7!!~G-~oTmotY0I*N z?EPuYEOGYjAz4Tj@;+f&E}Og1k&y#QUKuT%1V!RVp=KCUW5W_7%tjA0u4hF?hq)P0 zid0!vEdX}FMf>gyfcP7iEFrHQx#laKOCGmR=3VdLlWUPWZ4(O|TN$1)#fjRXSTQ$X z=N&;iJ?#qPx%Y-s71+b9W=yftdZ#h>!O$2Q_jTnlK(?ss19ss$HVurh*Y8x>%|nD( z@Vz0qryQYDotdfwQhxGqFB}qe$|$wp{Hbz=KM2bE*Y`Ai_$z~TELY_sdl4^1bXEpW ze-i;x4n&NCatVCgGvfM%dMe#x1Ynlh zO2AS*TauxazW;EEO%GfHs_1ad%_$V*vhEVbc95}2DDa+T8rezSQ@hb+yge;7xzQ2d z^gdiedI;e`(jQt;(`-OV8|w!Q!ir5+Ttz$3%Cd8>-)zpXs^n+|A^x46mBP_4$T*0Y zqovQzr@bLC_1uL?=M5tvLG}IR!yawnH#xB*QVo3&A0{20wKgR98Ce@s3D{b7kKItU zJ?6$u7(E%jM^1eUf_CqcxefsL)!2-=?JaS&$40kEmEq?<>?rc>9m;0Zec?;dtomgd ziCsv^+9{$)u-V)WQ}0TdWZ!8*+PZnF$#~n~fP6rTobB;hV=%sdWvpfOQo<%iEV?5q z{f(*z_R z;Br2Q9_UFG+(VAZF8~)j|0Is(j#<{*cktk?T&`vOLrFWs5i*f( z=5%Dhd821dB{{;V&wN#a+Cw$vlTb&Zx*p8l9H-tV9hx2D;BUvcyFk#>Kn7YF9qA*0 z==847?R?bAbhb{20wBwN@KVYT4t{KtImjw5o?~p9njD9qhhVnG7C)Gl%O{^h5@=Jx zI`{x=Hu6AK@bi>}&!qC293x*gs#pRlkPbJR(eRt2IO9&-9^WvSzJ)}UKGzgcgG@^7 zq0fsK_Hvj04eJS$Ar-~@dZf%hOo~?(jbddaNX2z)<<2N-y0Ti|!6YtJo>c7ZH(GHz)5lMAYRl1dH zX2i0;+Aat7E9Mxb@oE(UGdSMJ!|xdY!d7+>vCx3|A@F9OS`ydP`#5*fm%y8P)Q>1$8TwL`Mz)TW=m+R5Bz0Y7QO(*zS3C;h)Xo(56q#0j z|1$aN`f4l=qljdvY3=PRip*)YgCkb?lTtz83nR~TIb)e9`>p|HB80G=mwti&{2=n> zDu6Iz4%LunGz&0iP}2dq`kVWi=o^z3)7(@?+V!roPTg42%~<#5%S<%Awz`6w5yxEY zjT0+z`jAa#xT$~7-OS68txO8vijnSkg9kV2*UE%eLc!OG^r_8+t8)r zffPPeNgh-0({Sp}9#0nZCgpOTi&>z^trJE&^_p@@6ih_f??hDDfwD=$GNYzYU51X( zDkH{t|9z&{xv=5Xz9Tm3et``D5Y^|th#%~-u^7yH?}v3nTS(?^~+D-9|2B34t= z4iXeE6jFHai2BLtl4A+WFgABHlIhuZVv+3-1e}GO5b$oHGmm;jT8QA#+LAWM($Zx% zT*TFe_b3M3WPCKe(kf*48DjCoj6x|=s{G|OvrN5`qg+C13u6=u+hOR8d9TK+MeE9m zcDd9zlLdHtDRfbiJnb!>i89MM2BC>Tt5vCKc8nh1ofZA$d!#YXND_Q0H?O}iViVlB z2Xo_x_nD?&uLO1{?Pt8eQpZtdIQrt8RTs?P6YIxj|D=lljl2r?qfX>jx6>9Oll8}p zw<>hx+g`4O3jD3pskJWQd{(4%&nH0dYssdf>T?IC(aP} z+*tPrcHuNYko?ij5Cg>;Ek&i}$Hr;+X|}t<5o6c3lI=s$ANiDe^}vBx+ROP}*)PJQ zA3MFLR-eTds_1^iFa3I$zcA2GtX^-O?e&9yXHusRY^olOQ~} zcX>)Ls;8L34;E{MWv$zb^a-uDii!ki{JnojxS!-4Xi)7wD#vwoe|dH1UAMT`3Q$9m z{W&{KS1*^okBiEt)}39hO)k8hWtiokfwS2B2m{>yKH{yujk~UHc{w?K17^lNhFw<|_V$bfm7D(k|gv8ru zeS~aBf=R?|nviA;G=C66!DiPWUm;oHfvImxPDn5u7W&g}?EM!?(UVL8wU}Ys@XsHF z@DpK=ke$Yrj#pRC~Y4JI=4rOuCxyIFHc89Ff9WWsp&`5s1R|B`V zID2~&0_6gT%+b@Z)p(yE;pcMq1&tm~KRs$s{+o?cnQdKj`daYa07?4&j-z67mf^Il zj=H!z*Wo@7fYAK_)h~Adv)Jvls(|Z1!!VnD2nu~@Ud5x}El9_XVYxH@UNMq<;5(#; zyykg|v-@@)qYX{)B*!zp&h#u}Ym4K`$ zIX&~we9$y!!M2+|r=({}2X=-6dnZwc880Dhv`Ef{#??b(f|Tj$>E&rbfDv}Vo>60D z(WFsjzwFe>O#!vGFR;N_drQELV|}{)^1#UIPuf+BWMh~Yk3MWxMrAo2!j`uciOT!- z9{+GjAUpSg)dxiIZB>T!Hzm6=Wd@ulream7!|n39?R2r#s)h3Im%2^uNO2Ch9}%l> zAd6ZBbq)41tJimMRVGGbpZNa*IDUP3!XnUwXVVSZ=bifJ*xRrrM#0)MIlsD z0lO=_@!_Ayb>=AfRi6lR!eH&H1GtJEJIdapD10%ko*5gQ1uc#%X+i?c-CkW$PO?W~ z^B|xap-7B4na)|A_O<1&z1|{u_#g>mwNZyv2|!&LJZ6K6VPQh$nsfcIGHWJRenUrE z6|AqAGHr5$aGOUwI9PDtnt=n(vBNe{vu>L8oM?-Z%MnHK6wui5y&C9(^tbSl?A7@6 zAIs)UN*XO8ytMwzJWA8hH#bh->0bx#%eLjhu514I0um?$yUA;`X}KISVP#%LQaZBR zg0cAfF3T6@Qg*#r_n+KIkWGbY2HYDJD)0cC!Tk_FHaWTNe|=p57aWx(r1~`I$)6zT zf4(0IdSRJLu`FBpe+VWiJ}=PWWd>;YF~MYS{RnYCVn>%Isu#=)!g^ZC#Y$pU!V-#e zGeETbrFLNS%hzTP{Sx~xKPi{G*3D0iGV(KDl5@r8ZC`Z<1Ltl}?cM6Ewaff>Q$>&P z)+0+8t+uS+{Jd1imW2w^b$K5QKwM@{ObVH-$ei`|7LIxZj`pPHF!RVX;HJy@4(&)r zGBYXbWl@LSloS4czenDT!vukqJOO^`5{#HqfI0!}`3I2gsJr41k`VkQH7sG&-J3wG z*nq`|J)=eP;DHxocX-fI6=u85J{S?P3&WGqPwLfe3j5*ecqJ?c;Rm1>po@AQoO>cY z0h-9Zv$PCmhS+0Rba3Lws-yyBAfHjXBjtc3>d3LjuNCDRWg=S`A>P~qEo)d8}A|1tYA&nh@yId-|`#)n@Iyc6r@NrwHqq$9Q?PrW&@s z%J75Q<&~(FJ)tt)PZHJlOb%xs)E_ShcrnpQQ39uTB0mvQTRo7ZxjBfaD1tt{8y1Lb z=j3P6jtQh1eRmRSpIW&s%_bt+H2?r(GM0l>qB9?6*ZpOK)~OB-JcPd}Xd7>783bzE=c zSHIBA-Q)3HUHdLBZ^0KgJE|fha#)ZndsG#D`7nyH^>SN`?SC}^AxXqQ?Pl^!{u;0z z(jcNnn4a`PS8(FKxSer362^B%9V`+bZ%)J*wk6!^Btf5vWgIutv3T2FL)ckpIubo* z56agPBvlW>0?W@XR9@$ghl$Ip5;Cu^-FH?a2LZ$J*M6F1M%N%O)V3a&%88!9|_yot`jXMYS`0D#)Bv|1l zmXW(aykwZm4=Q>GQRG8V{>il_Byhl*azn?D$GqXhwTQ*A_9d7idZ1RswGW^NbQ|PP zF$!ecWQ0XmrQk!%zh$IM);EM-uB^`ll(`=pw;EBCdw{XU_?4?sf#wX$E@JdIHfQJuu;e?)Q;p)p zQW&Ywt#dA|p%)g4c6=teb;83y$6{ZBknipqkZ=weqzdL5j(FzG z7#ziwzk%R`6y5N9RJ=ds_QoARyRS%~-HT8UrdUZ!ts*YKn?n;m-zxhIai*hr1LU&Q zd8@MHi~@Jdd#Q5saC6K;(kyS2L(sQnNrL}cPbXX78;RmP!h3>D&lK~DKFkC zJUJ=d?##~22Y$R=r6ASf>2Io}r47?Qt9;cpSpZr^i8djFv@PcyJDGp9lk!mxRY$ zB2?*5fghmjY0urmW!W2>iAw}{#{#+f1DeNz`8ftSfTYsRqexu!ZX&$BGT_Iab2v z5C>qQc<*R7`;os#aBdQB&37gd!-%yzO~wH)B_d4^7bD)xzKva-pPyixg4-r3;Zlj~ zxghR;8*F_RKNd%(^He&o>`ArkVacLD4n3yTJatTEI@@4`^)bt)i~2;O-y0mqHVbSTqy$ zM3MA!a#3t}mR4C_wta|Z_xmG^TmwkF?Go@mO-W5@QC9)i4(Jt^cHFg$sQQ-ltT$%Q zWaO20;vh!(C(u^J`OK-NG}eZU;_%W&W{`w}pWiuP<(n(?j&mUK#CMjjI#)mxsO$!o z@e<=i*37e;G$jhHip#A4zj&*a8y8qU!E!SdeSusL3-uQK?2W;S?z9BB)ey~`Nr7L> z;QA@IXKoYi4yPEsTtBv!cf6U8HBEdF5b-13U!F$CF0@SgRt@Vh@jko{%o|W{JHNwXe;stBrG}^v2x1kUfl&{qzZhA&D3$n zGH6V8AAF+s{b4##-0NLmD(?*7TbpIHEIIVk$2-qSP*>X}6l{og(nlOz1Iki@!y16I z0>nXJ;Bn%@86&h3grw}Lu6r)Hbb_?=j(OeKc?dZO+<}{Q!di|}YkCAEC~FIE{|14{ zNJ$t4P>e-WVFA>v6JaW={V6FVnXo*mQ(R#6KxUd;!xqMW{qm9|54??}vjy+q`uGk2#4JSuxJ)?8Znww*xqkFz$hRTR znhfk0_^SRt_P#q1>%EQpGhyyyA*eV_mC(Ea;;$F)D#=d$14koh#St1eKsOgcd!j=5Oi?3T=V z87=9GHS0mT6QUa^BibDV=M)2`&s+dBoFh`yRS}^DSHGac&S)v1`7{Ke`$yV&KB7_n9K7HFl}fDIw!rvU>jA z63kl~n^xact7x^h0ljuG5#Y9 zW~fk}s1sK$lQ!^5jva)YD?4YIw0-Z@Q-$Eew}FRThJm!MHS|dM4R^O06>afc+z1q4 z5^dOL`o9~Z-mHW! zfMssqeJJ?pb%*q0 z(kI$ec3BF(^DKqi`SfS%EPY!*JaV#dt&y$m-Ut1Qx>V>GAK@}VljQu{Pd~f zw7|Z<{MS#l@#`=iz@J?1%y}9nXZ#}LnR498=Z5m=InnUHt^`#gQ&Og zK^PFNu$zqNxFzzGYM3?m4q`2b>{Y0^2^r6s6d74fPqJ$VL_phNAB%a%3^NtKKP@GHYW^To z?qy-}#KA1{9&sKQ=nPeFNYxyH1hG#W{hw2b((xWCpaUQA!<2aMF7X2G^I ziefe8!7ylowk^MrNL>7@=f^J(66Jtdms7LA_seXVk)~|Yp+XV4^Il9+m=^{gpks*T zMUCHDqxkaG8rflu9 zeuW|*{owtiwci%=P(DplE|6@DGolTA@lV4hpRLzJxt;rEpL&7+_x;7Bl+ioC=IX>| zpW;IbqA{m1;#Z0jd;52rhVYE7twKxR;!UZk#M6p!ALMH!l`fR^gJ1C3AoI5Q&m~WRYc}6iX04|%|It)U)upC(&^rhaX~d}0Jq6nzWLF|EIC!>9B+YA&xKH$18*rBCVZ zFNvV}!3|8u`!6OQi0gg|^4Nqr)U&SWHUWQ;5hTuB%dVVD=~hsEArmDuO0Ozwt^^P~ zNw%_Ss+II({kHab!F94Ne$g?pY>Kt+ z$w{!k3JvPXP&Z@U*;btP6gM;6?{dNz2Cuk~<5m=&DGaMX3rsB?JrAR=S{JH`RXKWS z2GGAh4W5)f`3cEK$2%C^)$3Sc_7~ZM@kiS>^3Iz=M@B2#A4PS_f$X-H^hOgY@$riy zE(cSCTnuQSq*$yHaz;<1=Xd(EJ=6ESwGKD~`U>!drJuBGhT=REFAT(W0gxdm$*@*} zn?CEsC6D-kM{!iLkQA+`o9Ha`V*6mVBGii){bZroP=n|_T4Lo5vnN%W@#35Kb3Ifc2|p!qN2Y6+@0kRFYjMydfeO} zb#O)fkyNB1G&6cXeWBwMQukVmbcdql2yxu%IhXznr;kQWdvSTR9INZ(04H3hZS` zFB$JbgxXAhFh#R~`?gDVzWk&V04nH)W?sj4hcyP;v~L$zk$aXAxqD{e@Y3TKGxqhk zsTYwP)07G{pZO!#meAA$vbN=vndKqMq~@;+A7TC-)fJOTwuY?K3+~eAuE?dg^nP3h zx3MRY&yHm+6^5`-A*$fXu4$s)!{k&mw;P(Xq(2f^RA`+1XGTe6e!X}@)Px=Ie{(Hi zoZ{2F4T{vuirZF7^3}XP+!%(|_t6pseaaYfizA98ypX}M$TTb(em?7{nw?u7Br^H^ zcw%@oTeequc}$S?xt%hJwPv+f#q#8b;xe1FUua5K^8#IqhC0oPqeES?LdcEvjv&8g zLc0r;c{+X(udP-QVq~hS*C13}E>GtM8v0ACOdwwBE@d0aaZ_3$fPwz2L4Rf+OyfD@ z)xNiLo8e84T>+dj3{YoeB3 zx`$pd$v_SYf;W`ubytCn!qQzDS{h#Q7pKa&19{TVrTD)=B+y#o9`*Z%2Mpj;GG>c=SP{WNchqNJ9-GRjmh_cWN+IEs|{a;rfdoZk~lXr4wVL@iPR9S7e~=I04Ow zLxdU1br%An+#WC~CrtW1(0YpI-ugtVPOw-gXxT3d z^A>b<9kTs#eGe14F(+?orw)5sW4gSWLBt*FZC0BN$W z!136e+u|lQH%E|P96HxrsJH(SS$)7sE-2AA7)bBDY%rI!-*q*ns@K*VLdPb^j%#GJ z0GzKihO!`0qeSVnq9YC~WBnVSiix-<8JrzTvySyRKOvpr9`p(FaHaqZtZMzPaOq5LS$f**Ne_wc z8drs{@`_}c9JQe;#@<|LWUjn}&hG4MAgnoz-+6huKmli?;f>i1TGR=zgTuPIEsxTx z76QB`^$=E|Fvb>9R{>Oia(~`fqC&n&8(+DJoLX(AStLwZdz!%Vy!nKot-G{%zfO0` zZ!9B|^+C^n@zcAdX_Er2A;(x5P`D-C@SCgbt{1z5h2JK?B#s=wU&|iPzDLGfa#xO| z06GPlq3-cyRJ_hP4HW)BU|$~r{euLzxaLO8&lpta2Unx3F1?d?En3m|!HtRTE2)}m zGFjxRdX+%dck%=OR1WL+Swxx;=@Uc9E9eG6?z;)#g=8T&pIsvsfZbHeHPI{>h!4}q zkcUX*IV*B~WNu~nHIpWem;kGoMus0*IO<59%M{O$uT9a}zQvjl5YFyaDW&^KUN-i? z_4xi^0EtOhPq4-qHD$CP?~vCJVHxrN(oLVoIFJB+i-^?qik7)Lk~CF+1p&f?FYbhlM#M?h5+Cs!3ICr>3p zQ3CFE!+M(9G zOi!-w+sHQ0f3t%u_A#VO87)rbk#2bhXgpG&L~|jMo@P1Bnb^1^Y=*lu04w&E&?DM= zZnMsdV3+4K1Ykh6*#rPL87L<95pH?o*g$wjf|YRIA$-rEAd_v$2i#u^WKX+O2SW{0 z)1mz$q`C3_bpfCfKwT8;B$il2;Z+3m+xPzz(Aj$W6qIoKmrR+1@Xeul@|pnU&CrY@ zq_!`G+x3Rj!}RO|sCQj3y9(SV<|8xuRgoi|2`cH&W?-@zbox+u%oG0@?z_>MC8rL( zj}J3CAt6)ax;s{gL!RRm-~cdy(^Z7QIei)_kq)I=+|fQ#ynbv zQcF6}%r!y1SW^J}z)VPHw*dP1Fy7Vd@`j@!>eI6LiC^}IviOrC7^sybOq#(T^h^cH zfO<&(Cqeq@1=NpLCg$|nIK zlsF||n4CZIGe(C2JZqUU42SK1=T=SBBsi_+?$WTpY;EU-5)TSS9q3KXG)z;5IYHC> zFaPTQ{-G-2)2EvtsFMf7alZzC=s+nnToDNxV2x?+kh!BV?PTuI+fBa9AF)c-)YKS( ziD8Z0L&NuK9u3LEdqkwD|8@EB=DqlFJkXi~4lWECRt!f&0&U`9*s>Zl)TMx3v~%no zrg4Uh{JM}rnxSovR48XUV5ozOIPl!3mq`Rg|>CF8M|&^p$zN4kRx=A7O_ zQ)PhM?<7UTJk%**+IW}bPSrzjJ+h&k(X^YVl@$L%h*1af<@-Mq+5TA7@clQ?(TDm5 zt)aA3@LU1LH!9j1bQG;<9p-W_Kzy^cTBguOJ_~-23gD}aKRx*q< z1W4h5du7ia-@J4p%S1q}+Px1L*}_G#tTG%yqDdf(bJ)O20ybJP?XnJpO7fK&?cCjql##M;~2o4M=_ zr3azO9>`^sZ1cVFQ3$vq;v-a{Mc0u^8)!1X<&M&Xp6i%5h{h7Jbf5`ls0kaO zxUJdE@=N)kiXYI4ZnfkvLi-p9u}N(sK7xhlH@k*g^Kd2wBi!Kc$8wy!`s^OkfIzcj z0PSeEPioZWn}C$fLjTSy+gi0WcqQPa3XuIEM*8CvHa@*+hg@0~kNxl_wCu;SXl0`b z!6+DuA6bORpv}srzxpkj4+Iy`@#Nv!924l4gsN&@ll z9^lh`Fo=W^_Dz86?EcLQ|HI;oi0^cSlpU~Kk~#o?P^(oPqU#ZY5s!Qn4S9Oy`%z$4 z0mEZZcQ~{(M(?KKs|CFWgI*{}EJ^6sn9=?8B zW%dLx!Z1s!Oje@^*#u9#$OZw>)QnKQKU7O;%aeT16<8g0b zxatVmL&&SKQMq8HBT)Y+G#~fKYE-r6S+KCMQ~<5%P-h-=#zL#;INp_A4%OlH~z z8nTR`Dw}~uZOI~#84R$>+RvhPoR6~-k4xO0w2m}(6g^vz^`XbY3@h0EDx!|?e8{>d7OWqN=hpi8PZZzH4*K?qa6KGrYQuqDCg8t>( z(29#(xdv!j48u7Qu_<(Uo3%s&{yB9}0r9l8 z2JOZp%5HQZW9Ra48Vp@aeap=zBUD zHS@C&4FUvd>p{Rto#R_W4G~&qP`dYfq)8FA!LODCK*(V4^?`mZ8;DSFG^m5j#$CqX zIwW^}Uq!KDcPvBdA29^P?{|P6U7Lba>`6!fA)gq2#CrtK1`_-UOLKiyq4C3DZt4%& zaY9H<-$H3Z4O2&a@j-O$!#6x6qPYZNL zbq5{6db{T@j_oJ+HxM{UQih|!Pr2%F2%2oRhSG%XD8+&j*^> ztQfhAiuG`oL=h`$^hSrn4NO%ivEG(PnU*LUz$kx0`^Iv`_y9FI{hG{l1zJ>w+ z`5eziYjRp5-oVfNBmyUc36NZ%hywg?8WEK3pz}qM8Z+pmc*zhQXvVz#JsjHiQg_u| zqSBCra>TU z=)BNFMTLX0!DvdO64*o850~*M7?DJDJWMuEhwvPaoWeC2?`A!ek}H~;3Rf-lvpf_F z=*r2?vRe%*080=|`l2d@fMmVT5g1}m;6Iz#WH=i}2gAJ6crJV%5sHu?)VGM$sX?zN6>x2uA`FD1pRw)l`zLm-@1tD}r(= z-G+m(QN9h7JNdKV?m3#BOuYpX6&M(}?f_2spHi+wTpDQT8s0352yGn^$mb0Iml-^P z(-pdxc2oLDbqWG60&6FfB3mGcN&#}Im{%wn)cnDnTyrTO0eMSeFJggJ5H)q(Bq$Tcey86Fjj{ zwr4#%gb<_n{c(!paIbgOwDqdtQLayR0M0bsN}+v>e{&{ z{;u5CT@zdZ?6?8MG4b~pydV;41V~siVExzH?yTW;t|Vda?tQh;*?QrkzwLAcHBw(- zW2z3wzk+$-xPaS$AoE0};G!Bqy$O`t9G6f{k5I4uB`ATgah~ z=W-Kwhk*z!h*r4&q}KkHSXVd3XoD+|-SpCMgwjlkX-AO~*q%=r7o`Cm@)24Vy&^=x zB|?E`Jh#JExF#gb#!(nu5IYqar;K*Nz8;GD^gFg&c&Ummk zVHR<<+16^wj0^!O_kTzTh|sFQsa&H9#CKxda^C>9nuVIB!2q=GV^U59h?5E6{56iS z7ZosWGlZxGxapv*3#$4Zw?Xa6p{7hMBtn>r7`lEQCbzi)Geie48hd!1mg1lOFjxeH zVg;>I1;f$y?1=9EQ_*s;tspmmS@n!kv$Vzr7r^(EK+u)}MA5GBb44v1Fg;T$X>>co_-*%S< zJ#KaVMM^u;+>n-V|HE?&${9Vc8&R%lM5k|zAlO_q9N-%lqDbdO49cF`QThS-vk`LV zVNxsu#YJeV0IeP01RW`w0*D9EP`d#p5aYK)zY&~8TiB;Rj3@r1uyut>PX`KLfwISZ zKNJ-#MutEaknajZ!!f|Dl3~(N#XhLN7IKz2&ULKlKhHsg)(+{@b_cO;JU6e11Ud82 zF>j1$j`~<3VwSM1$v|L`jxKk>?+CIN{>+~B#1#|sdl-HD#{ zE==?jo@db!qUjcG;D3(`$PfVh7D4hH(hw*@Q2_xXJGj3O zPZEXnQ6`A0+4i2>c%G8D^GlR)2a+X_REr>lD{2G_F#@>TT_tfsz-a&+m4JF=jf7$y zXeDT*`VTsrmQ)**cIQyp&jU|4G=l#=*|_8Md}xgPXeb~efo?Aq{voT;1&*N+BpJBD zEuoTuY?S6Rc?e99MB&$P5Sz1sid&XC`(uRAreFG`mLRb^HZ}%zMrwXU6BNiI&}l7= zwI{0 z`AgzBnl76JGbGU2j211KfEop~Z2oDWGNFTzz~_9B0q}_5Wr9V8|?%E+}3PxFIr&ojOJ+=w)FzM102=|viH2-x0$r$R6rJ2 zLlFzW*RLhdZFfLS@*flWa5TJd)yQsEh2SxA_lQqoZo%wcUkjU|`U>FoFoiSrLh%(C zAI)dDb?^NZfINV6<+}BM_TOnq$>1U9&5sdpRTEET)@=ib#0jZyh#1gNdk#Ro`N5EG zlmmXoESxk+36}s+9)R?qOnfBxqRjw$P6k!?pHAr=3L1Gwf4};(>Ri9tB2Raow7Ufs zlN(bLTP;=u?2$kfq$rsPb!G|6eCS5Iux|;_=*9r*okclOTU3iiNj<|nDv#fGZY{p7 z^TghtUFJAXx$o^y&m*Npvl?^#KQ{b!c>lV!IDYXPjC;2NT!uK%Qa``<;Yr<g%q!Z@|bG%>nO^hdg(EG8V2U7r~MW9RX>=WMr8yZ(JRx=d8R zhkmdi=jVhhh%|)pYNLcTR5kMo0orCF^hen`rXvEZS*m8l(+HHTLM(SgB>SHX`$tC| zl*W?~Js$QOW#rQl?|qwn@v_XuDgU~Rgh7*0_R zSXUEhOM`(Lp2&v*ms?jWL%yTPNi!KLTxG&1EAMS;=^yS1FasYKi2IV#eSSk9Zk`3L zFyU)!YoH*@=5K=_rOk!~^%hPZBed}bczMC#WdJT_KxIB(52Z<9lFLWPDEpe4=~_O6 znEe&Ey@56lj8s|z-iZ-J=LHFgMxZg0qKw3U8@fgB1X|}^y>WEYL3eee0(#;Nh35L3 zE8OQ+#Ki#cva|&As}Om%D#4VCmvBSWVFYZ2#G_q}f+n6Q16JrT=Y3eF?XmCSL zWib1aC%SQgU!V>gt0Y}89NuhXNWpze@9O#+ees;_~BoofbUvMoFSKjUF0U#Rs7W-Lwu?DIyaU&>$ z0o{7}ISzK$-?491o{#{EM1nUKX;Jc%r2M5Io=CVN?g_!Pyx!0k$6lngetUbjEd!8^ z7gGQN3uRKNlfMUnNK0G*prsilj$Y zIT5OunqB9fB8?KfqW4QWD%2u9CT`6KMgv$P^k#wn<>|2h zNWqr}TTTK5C~qIU#Xkx=v>l*9{KvY(*Q1L!3@@FQtWQC9RDIHAxQ!^7rmpF?Zx=4p z+PZcU?tJ>&+X&jXj3t_${1k?+bi!(}aV#vcuN+=>?s>a2<`pOJ@mCHLF2ykxA%*-l znG)P(ZG~fYk>wF*E%bZCZS=>w2OL)_LAPcy_UsA^V`0s%l^cL);WDrtU*_w4bS%`^ zu>3O+=|x&Ybr|_V`V~Q{`XS8EacO{F&lwhKZfUjai%=EC0s$x;e*-5@($8C9@>ACP zJ^YVnE%>)>BOoLur4_mJ+vW#X(kI``oPPev`mYYSi$u0zJXvyXaWyjTu^4~52DZ`-y_=EiSLjK98HCChD?@^bU{Mi?Z8n@pvb13ARw8;^Yl}uYf&^q%Ampv>;T2mr5otd!3vleIwm?mZi)24j%LF96WdOveqK6AqHwlr_ z<^hHag~8I^chII02LH6|uTG}+b!CcAQ@?(nvQ$4eJnYB8{Cz^vZ;5nm4aTY8jPLyA z;OdDc2ZrfLKjc1YW6A9Z1Us3JrO^%Q;Y63+Lk7|@Oji5x9L-}X*U=kO-IY=s zue9;DK)E7NfGedoZB4y~J%FN}8gRh_;B=yx%W(h`4Tp@p|85)Dk#d3vL>tNZ{T{iu zl^bA3c_V;eRrMtfjxJz^A-=(LU`QW^S*5oN(`>n|;LA}w`JV9NddS9MMVr7jC2rh~ z8Unasw*$QfV6MHgn!`wTi(vgH((HG;PQ}6vEY2{MB+mt;A{KIMO6W4cOoRX;z8+O9 zY+eX+H72hokUFf*++>TWZX@6#A{3~LO?TaVa3mr809RK{1%J%>;0@#wa5|nWf%>7C zcr<8eb@&`2m_TxT88m{9$7AVJDOojDLP}zzwm2ysJy+=lO&d+?#!r_i4@TrP8zgxP zc-`14prrk`x5+nK@h1FE_RDr?p8JModp$@8u%A!OZEk2?q6mr1fdWa0E_#0O0LcprnyhtNP1#A=MG}opo|A8VOuxvsgOPls zxR_t=muu>uw_!$w-dkzI)I9wlVr#yUEL_IG5M6=1Kfo+Peh zmL(vFJTz!d&HT6IZu~{{epg^u5;n}KQ&P{4vun%AgMPjY5p0-AKI2dqbLN+)M102S zCHt*fZqgjvxXeG^`ScmsdN0XO)|(rWPkK5|Q&VGm1pJZ6g>}p)1_-aD<)zQ+=?&&q za*v~Nfh3k5l8Scj2}66JUEi^e=a;ua;wycM_Pso|(30<0$$z}Z6#)=Pfw#^ zhBfTC%Xj#vpP`FXZAXN}orpi><-1)GCmIJp94ZMS0F{yRM-iY;5e0=f908`UkktOy z^@FC3rUaXm@~!dW#xb7OC)~dGNO;68sGi>Fy;r$@9|;YJ!SE0@1w!{?x~S)3f-}&@ z!-YXf(`0$L<)CR+#~%vU$}r5_+mPCgZxp%n<`!u`_aX0rCqJxmGd-!R^uFmV(55## zMw%B~y!P@BzcgBl%1m=wzpWy~e;gGOK*M1Y07ig;VFjt`{s0_|B{opQ#0AJ58K&5# z1~eS{O4_+J4eT46|I_x+9H;%3aF>?m!Nv<+t0kI@#?BH7U5ds*8pLl8@N!tfLcHx6 z4zKH)LrUKuarL>hsGPN+HtsFLH%kjld|<8~XuRRGE{$r?xQqiXzZ&T935^HR-TdKP zX?%+0maAFl>c@#^$8P?Bm~-dFQtbT2H}YDEzeh~kH$|B5Tz?!`I+T0EhwmyHW=Rn7 zo&!S<$?@#$3o+1XgAgIvn1tsH`O7w z8WQtEFccRR-!lEuzyzo&Vedn`{TJ)frXQQMu?XVY1Q>S6S9m)A(%(zHrQrH$oT3L@ zovI?S*{DyW3gMF`x+BI6>#MFKfc8=oG~FEsF;XzVB$Nv69;W~gT^v|j#F7({31tk` zXdKY{3zsf_yNKt^`0m0pD{lu(`)*v===eLs(p(rLw6(6>ySkeq&@eUbQFi;#5`X)B z!?ln`2DQ=QG)E)IV6K2i1|l@B4X3s7h;@HS?Bb0hJtyAC{mqfHA?ms|I8(wnIm{#xxn-d(l|?7`r|U_bTdc_a}S z@ANR3`HdS{QIYo~wG!6YSENbF8RFb2?a(I{h=l4vG52ryHlFN2-$R(s_uT%+YHrBW zo)JXzPvSHm4NHSK+#KKzSbvFV76NUUMiy#6n1BN`SU4melQ;oom#vrkwOJn$bO})9 z9e@~K23nHDVSw=WTY^z{H+N$5xJmmTgY~s4O^UDDT%2*}Dd{Rx@Lg+Xfmzqcj2l8ElTd{dT{ZyTe~uJs=t zpmenltxVo|%w^fCKtArV%mWaR1&Cfdt z=EH}ei+e@5ckkUF?nrbb5nYnJKTI5ASm-kp{Vt3ia(Wj5I_H}aR!V9>ii!)js0@h@ z*72x)F6MKUx3j+~8&FFFP=>z(><`!M0pE&)fuVf%(%mmM-|@!>zK}P(!khv2gHz-@ zBPud&Uy=8@zBpIDeY^{9F4#+bVL~itIxMYfjMwNMtS`fb7h)ybLfwo%>f6n&u+;=EX+*R;_GMa5j|IsTYh9mBZ=&ftA=^MO zm13=?IVaQi&t3S<)tj7jyw)txd(&3`eG_W)-i-4IU-1!~vOe^6nx_0%`!c7x^V)MZ z2?wFJT5=wedxJO!k6MTLGKm)N)X$!&&U`dBpH~uOnxTM2lRz$CVZD9$sB+0pf&*JWisz zQB++^0V+q@{Z$9Lm30%oly1?$!m>E;S0ygPYtzvbN)V$jvl8kSvp+b^RP9>35N2^8 zp2@f+^0T4o>r*M26OounZOPChN6qa*ujePUWESKQ=4Xi<89)ABKB(x+_8$g)eXBxu z65AS-UcdkIwykeX{+sf&qR2>>iR|f4&-Xabv}FoH%7f2@)}Hm$+ID%ju#|7FOJk(2 zU6$G=>${stNM2G%b%-IcIe$ia2X>0Bh;`)U%;JE*%VD|fIwsdrN;v*k4eg))A-~aP z5G-+xX7iZ;y8hpZv>Y+E`-x}H%b%_#xz9MMWAHkyXf?hh#D4a$Fv-2p;*N0D-)fil z5p$V#K#VA;p5^7i5aleylHD0N;K{ZcHd;8-n$_Jo6_g`?CHgDGU|h|VJX-&J&J#Hs z;vX`zab=&go>m{zVidyktXAyV-xS?B_0BVGHTd9Q+&(E>_e$_VgZ|JH+6hITY-}!70Tc}&~Vtn4x ztaNVcsz`qKWLV7!>SQG&oz1ZKhd4kK0w?{b+~_Aqc74Snf;%6#q^J*ad}MCIHl~>t zt>kc$GM%}&`{;yH{z&DHQx!hEy%gJID0h==-77ETr~0Pmr2iUpTCr3=cbMZ;j%tT5 z{fdt6480aRN3EHQ;Hi<~s+^Mez<*qYzh2bR4mL$|m5iG9S4nLmDm|Je7J4(aDO1Lb zS$lOZX}Z2;42!{rWT+V9TW-*PxN&i8X>Wh^!!%W|xkYwi=TB~34dJ0H(dx%`*Q?-p zwGGR#aZBKT_qT$b{SeF?%QAYYV$9C{_wvq|9Lkywden~ zjcpc+Yo?q}ceZmj#gYe^P-_MZndaEocP8|yA6`v}QI5-7 z{L=hPi|7}L$0J11S}3k__j|2?MMeGRt7<0s<26ILFw?>Y zqxZaBu~{wNVeyfI9}SgEx0p9P-+t#!BPr=PCwHybL^##%{Ohq|uZ9!VLkS|S4`K>* z?mqYCXAzuxmsFByWQW_WEQ6EbQS`K@#CsZ>xApH2rCB1m>`v$VsJWoWWko>KvB~S0 z;HRd(wNp;u#~m}q;z{~%-454Eq-;4fQ{s!X9NvH9p0WKfWQyJKP^c+B=g_ZO4)1&Q zGb4pn9l%W0Nh4RBrPOJBoaSmfH{z8lMIL^0(UDyQYvv#2-%?UNFP7YW=T5#8ozV{Kj z@?Pw9on>Ss=D`3~nw;AyS4n8KJ9D~MeJuW{^JUCzdts@^Rl}9?+!BfhGJ;d*!QtJD zTRQ3(UV89m_?L+wslcfUj+=u`7O!7p9y)$pXt$k;7*@%gi)6GK6B<29GY17^iDT`y z{DqUBhZw(LL-aon9|^V|WW>#P*%)<*B|Nu2f$tLh!x-~z9UUYp3! z2XAC8Y}KwgZUDSF$coH3V!~d8>vp<>txgSeE^gq)}S|eQw~3MVe&Y z4NaY|3pMf7t4t%WMz&*KPuNrJa0FytwKlu?(K9 z1sMD8^qcC_z0oVymvdXKT@EdN!m;ESrfVz z1!;lRy{fVmFZ;Bw3>m$?>3Jt|@U9qX<&5$k$>Lz6r-M=>YCgkiEu)hzaZ8Sy`pO}N zQKBwheI)3};l+ysvuXkRI4ygH_saN5pSgDFk_H<0F~|KEH$?OV?Kh0>d8-V; zc;Zv1d`E7@EcA&RE*msuRH(Mi@U+2P-4`a09!nQ%xzuKum2g@FX*kOl~9uXNQOhrE=BV^6GoZyA@5?2+8ZIc*_9Xh<{e!jNS+Hyb(j?U=OWoY43! zPYcJXid;-r$8u3gtsBds`WKJxBq}zvpLJT9;o(wy@$brkc=;r#WAfIW`~I+Kztj*o zYnkb#R26@Bh`%oxQ`vNr)!5=KeM3;aSPwuV;@UI61UngUh1w@FG8xB*StsgOc&|YQgcRyFL|`*2mzX`v%c&G3w9ZiMAv$&G2pwftFcUwvzjA=0S&&j0h~9vwbKvyp%dktX@Cdtn|uM75~jc<_(uH;4S%&+I(Qg=Gcj6 z&D3i#YTEXuW18Zqq&en7S&{j$tcWI8uQ+bCLiszcmwJt_g0m|NQy+2@;07<{o3Dq> zNG#Q~C$x+2tttIX!kU+=FVe#3B7B_2xjGe7bH(}d6Rx(bV#%Ng7sqDjp&9Wi(rqSl zd+lCZs;lBhN?uGg%FD23-I2;Zm6Z@QBO!9sO5XgMQiA_t$&%4_A1ZJM4j z&2si!dKjith59Gcv~Az!m4{-__66r<2{9EzUNp3Y&-^_{RgsGQt3vCRXP0D^)4xyS zqylmpb-i>H^%jJy3guKyjIaAd@JNtLS7&CG9VeGjn3}ID95?W*lG%2Br8ly0`T*2A z&ML=q@w6V3V(+TKxNi%?uIxSgXPa%gpz0m9VddqEvn>CG16>^oM_q3@c&()*s7k{ZgM{y77oMTgNljv{%EQ)hA1?u9~o%W;{cltwhHGLhrwWR*-+Vr`|fnD9rZR z&Qs+j_uUy-)6ys#A@6CY|BP3k@QK&xB;YJ#V|)ZO?L+exE0xr=lf!KyCkM7gIVRd| zkM9&J>7l36j(*jwlUH>(GwVh9sdmSqHpg@f)9ZR4Kc2_wN7m*nYPPEnLpY!(qGp3D ztf5@XPf;CLXs^dpbN%&*qa~xQv&2*lt!^mLp5d6K;Aw7}7$t}0z#4bLsr)6IKB;!mY21xC@-=P-^i@hh;@?vgvi zkS%PRV4M6YYo>kas8R$=T!6C58RR>%47R9EEK-lv8XY}91*&C(>D`{JW7!Brst~6G8)w}qXQD`Mdg*L+K zvoCU{n_mW<_@`w4$P8_|yduX)h;0+QlC<|%``hruQ+N*ZjxeTIfg}X=M%h%(4l6a! z0-}Z~B`v@%A>XWO;v) z-UqIXZpeL6>!-kQquH#hi~Alh%DJ8Cn1!P1!ngN6<%wZ#UPWvvX{q6uQcO}3(~v1e z{Np4AC6?C{y)wAZlsxxuNYe0TOhV=-U8+w*i z*J~JeV3RE$E!9o z=|M!8%1lKS=``BYX`r{@CnKB&|D+rGN+|e|O(_lr!lg9X7n!%0H1vALbwC-`if`~ty;E(%X|Ja}CJoPO7oVLx0YEh2iqV8uX&zC;8fqpXmukSPcsg3t6F&Ww zdM+-Q;C9~#j{(;b4p;SB_G}K(f06&l21RG!%cq(~`Ke}Ko8efNH5B77c;ZB7pRYyHTUiBa5=;+=I|epEKkj_@~qNt8CJ%Slk6MLZvMQ2dsBQQ_{gg%H~kimQ>HoB znTmBB%Ku#t+<>A$;`<@N;RWTIYOzSxktU_Ew19S}nN(lrc4xcW70i58TINn3!y?8k zM;%p!j9c0ZOjYe45x-NlN{Ogx8GGGfw3OT79OUd$t&JlJ^1n%m0#>=SL-HTF_^S`7 zF-l_^Oh=;#96hi{AqW{*^FE}KEPgZGLN6a&&Ct4@Q-wE20@##_a$5}AKAX?HOEetQ zuxGni-E3WOM6FFgJ<99>rbew;s|a~AXHJ>c(Y+iDXlTEDYS~~ZqgS=fyTwGMN$J!D zMcoPe+x_GL)+P5L%@k@tq_<-Jc`{+rV@dtrJ^Vw;(GFdE3Z2EAn*wrchg(r-jWje@ zE)|ba#&?5(>CCi~N_yo{&a-y;Gu}ID4i&4VmgHo36wdOq8o#V$!o1t*5FZzs!B@fF zBB`2bRj=`9`J@8(C9T>S)oUqPC}^__jW=>w@v^C*vDtm4YPl~iLtfX)MPNFYE7!|w zcgY=WT#(x7inB{!2J)!1jK2m5uMSLqIZI%xAdNc+9vsX4Y*eUt1T2SxVyhI;38}U722{goWFc`$O-07%ShM#voigg9Ce&tpxVg_ zVbdszx6;{XluJf^n+%&)Nff;)ShK~~C|&&aoV|u+&XkuVnJ=MQlVaCv%jRjf;z3)Wd@M-10^dLHe%vCWvkgo_`@Ef&tnlG(>us}?O^Bu{uJAi&9`KymS-Fq`;wC8g9n=i44tpFTs75y0Ja zE!&stRGTv@*3CaoK0Lof6UsHv?$r9FH&&j_L@6o_Iqr-?#*F1`xhji2{>8T>Ay;m>vl@8I}w`m!JkLL&iKz7WO^yf!2B{n(Ufe-vq4(U@+d7E8iB2^)vY+Bn8KVr zzl_PH;Ymm|?l90L<{O^_pei=Kns517|8QTRtBc@iDbDYNaRoBm10Mz+yFLssF_h^6 zx7DAH*U&NmIaq7a4Y+JiSPF>=0?@ zu?L4;FK2(=ZkVQED34jWM?Nh{oti5?kn zcz^!b>{N{o$y`!hY?e?}y<&cP;+go@1>LQ}ah(b-c78kUy`#2AtOap){ekON99i^U z)wcVpjT$Tk%A`ghA;3N>bhn+?kk!p0a!t`gp3E|{i^Yf($mu&JOlFe!2xf1=;A zSb5gazJJs+G`_;H!;bEGwYK+yX8u%ZW(~w|xZ&VzB^?2VBWn*ib;mC;#@85{CIk!^ zygni*zLyKI4FUB&JX&^3HKkGkC|!=M{eIlGdV>y!z=OE#porjwS_Apyj$12=I13-` z27_p|bSLJqyrJHK`wVF^63l$sGx47n?~(i83~V-Q*$U+d4uujSQ~^9R+n*_WvAN?Ugx-rcd=MW-Z7Mx3r(%mP`_M`I$4@{2xVHz#cVjDVa8N&F?I^9o5 zW>(doVrihb)zJRtC4i$&2+n*V8Tk5f&F_d)XH&exH@{jaJPz}s2zn;eteKg*7KuHb zMc@{A#(W-uOdHSX7OSE<>7~4fFOUA>`B#@gQp75QB);HRVMl-XmzP5#@z38yUnP{p zG@Mj!=Ka_EioV1W!Rj@?(H;6P0!zSAiK5EO|GxYeW&FQw!RwR%KesKmD(BQUxB&i> z*8878;SFH^w=MtMmj6EthwG^3wLcz+T+7;}Ve$T(<$_{v{L({#z;$nDX~gFFjN=|? zC2*~zt1eu6mC*1Q7azA@o7VW{=x0ny-44sZR3Bewd*{dd^cnsXi6-A=>umYXGai4; z{DQ;P%=s46Of7c3rYVhTn)12RaodQF*bO&jjk$ir7L2Xf{O;ZU1e}3tXIH%-E#8%$ z{*Ji&`-78K_-$eY1%&gfOE>{^QTpgk-^T;jt;vv6fN{l+QU`mPf?qOJ)5XdA%iaCP z05EdrjF%Uq=}1-2a<}SqFo)qJIV${Nq*-njz=*LzPK&X?-lEWq`gb6$2emF93_ywp z9#ETHh6bE31Q&xeu6tTG5xHO*jbtK^dM#63A0{NuQhYP@JVmYkUpc_U~CKk7|#ix8#C z>LR19jw7U7(p=UTjlQ+~pwtxeoU+PCw4k$8;*N+IF`U=EXE%PWZms)h(Gv&-*v`e% zsYIy5^b8%12WFTq({J4|93g+yQ8~ZctD|OK%Gkei;p<;+)&Wfduf^p9nvvcT_)Ly# zDk~HLleDjQ83w)SQEm&(?l8`6)3r7T3P+Is`Kz>NtUj3D@?Yke`N*QVRJ@=fC=660 z+GCm%Bel_1q+J(_S9ZCp3Og6XZ* zu+RpZo-xPdg~ozO6)!Jv`=7&T_T3dO@?s5khlB9v8TpDKeO2kQ+bjBaA|w_3i29pv z{KXz<7jP{l`{Z(As-sr)fRj+Y}x9X$l_ z!a=@sD=&bqlvQ^3Gm@M>d-e>*beb&w9sF_$riGY zqxz>Cy|LG_J_w=zbH@b!MQyxP9Q?Vss=~hqzvyxQ`|^!#`^V=0Z(IJq-X@!BPQNzg2oj&j~w0qGcDeV=UmbHgYo{ATSiTRM*aMk4XaiM){)GvM3r%6j}Z7(*;{+cQ&tRWnRc+-ZA=&+=BYewcB_ zBDpm0_2eA_n$Rye0Bx~f7N>w~!~uQ+EGRaNT6aVTfCgf)y|Bw!KMMMGJXU~8UU{c& zqGFWQI~O%#M|e353^S-4Db$^3GLAW}_3k&;kUGuk(jL0QvE zk#BkYJx3O?N#*a60y51uN9`iBsw515G4>mpu-X^e+lMYoeWVkK{!Y|MZ?5~^qYAKa zU&oKE_ac4k+i4rQEWqP{-*$uA^!l^fEJ5th`|^`JIbXklmf+xez;AxJ4qa`BhmOD` z?Q3zu@{;T&|A)QzjEf?B!bTNQQBeWO=0QZVWXZ4+lqixjlA}n@;K&-75fCK_5)>IS z$bcXjlq3i!Lq>uiIp++w+7183b>I7bzxUJ5&uzL-pFXFmPSsP-qcx+f@GLxHz|mmj zN3Z+1YWc!zu`H~?f8Lq64m@jEN-~2)JgyuhMPoUDCMT-9X7NF!0cV`Cy+T~~8nUOr zNw~*4$Jkh1*ky43ae9?#r-2N|QdTy(>s~Z4s0wU>=rL;W_z;bB45f=hgPDHY$f#=mZ4*E0hMnz| zvN2FHP=KCHgD6m;QiUdV>ul4dki;=SC@6rsjodmk$`PPPDBMRT&oJ%aR$nz|=_<;J zp37K@9{Ab`Ky(m+^4EZl`fmJ&mem4~J>Z~)j-v$nt5cwD^?4OEXcC4?A2)glPQM-P zhF0?@9*rUkpR{+nJe6ya2*? zfIjPzy1wfoJSc*p?B*GbxFnwR8lG=3Wl97kVY_JGBcjSthOM*MzRA_C#~bXYx^=o7 zv#PUwr^^DXhZnw&EDKgocu*Yvqtolp9rQn&2n}}~!D5z_R)ZHl%b=khl3jYwM-U&3 zLtR0J`q4m$vuF~Jw4K_yhLSw#0FhJz!gApgu4NnmKX}acDuZ@`3AAA4N4@3D^ae@r zvAg?@)!qxiy>P6XOmtB=l;0>c@mlwGL1IU4=e(jokD6c~^j;EdyMg8|j^kbbJ>Czb zJf@s}n$S<1JD!?`gT8!5uQlJ#k_Qt>9*oyIR+7uAt%JE~Qak1YnlI)-w_xSCnRAW; zZ}S-7Qct|DqSj?&e^XAReW#8kE&XW;S;nN6);m~6gq!m0VS!1|ePM z7^=bYfVjOcjMZYK+cF_=J~bDjTyqx(MeGn5xNESMETSx51>grWisym;dFGVUrx5*# zl9gWJBY2i+9{zAdcwEDlp??CgDS2mH9rP2*o+T6kKQgxJ+vJ_udjy)X9io6V6TlPfp#x&ip#?REW)}Dn+;j3; zS?2`EBp*Ff;J%(`U*@~j%;`Ia&8;|a@-@ObR_Kwl1kJ&03y;e>XCM34^!XGoim#6* zUffANAF&Ryqv>vAuHHdTSQGyq!uz>pUWy*x*_K6j3=Q%6zSAR#jA*3P`g)W<%8X zxZg@qUm`Ho6l-sQ+Tt6>%~dZv@jWrZAZn|phR5l-S$N7O^>kkMZN4~%z-^HL8_E@H5x)<2BJ^Fa zf1}yC9pJ|*Rc`;aD8I*57c1Y{>*b=hZO5Az%aqVOvS&fW%Aq^?U(5T)*#Uh3u;49w zXgbM+BP2|P9#LpV@PkQbi+pWM98$q>&tZEpN&GEIM*!(r(EISzasDDrKnSIcS3tzz z!ja4?;2`mnhdun2#kY9%p;K6UwhD}Oh^D|sP()XV&p9z*dvnOKL&OHUpw(A*k;$G? z^3)uvWWCD0bZf+0HZP=DoP5ud2$In{dE#QJa~}~RE=~1Pd!PpS(Rtn*R)@RqUT{_t zk%KnMF2sAcs%x6<h!p~kb=h7pUCJcQsuvXC|hf=$B$7mT)m$C`*q^mm5af16orLS z!%sw)I~b(z=zT3}Wn2#KFDHq#kw3IT;tFIeSp0CvAr7Jw40q+n3W>-~Y0H)zj#n)O zjZ}a^{%Tx~^p%G5r3*cIP5YYGkBOXCJI3%toa7f;$rIZaE^v1b zG2UYp07e$`rp69xlU49J&k=qM>b>!?Ow31;v0g5S1ao`Ywu{hEsH$Ugo!eJK@;!Q| zzxK3w@g3u3+2NH<@FyeP!0DsvS&2JePDp zf4KWzL+_L_F!yFIew%}5bNJ`tfGCFB>)XltFL9&t#GfkXK5)8W3Sp$2QLWM+_njXw z^EK57P&Os6mp5%mrMe&>G&4j(it%ERBVPaAo;XihBa1z+12tnn9Z5-Nn<`&y863XD7ptuwK7h}y^DBNxrL}n2xKxaH2Cmw3_@;-UT)Cc9Jso5fuI3Ds8V0E??!dl;v`fIk)R+OFhbu&3Fm z$zeRM6rfGLI`o|9(r)z{;54wIg`ayfdP5l8qaUwos%@PLeK zG)eNKu`jQrE-MjtNgcq57_s1y}IcK_HDOF<_y zPoYXDO(ttFz_|btv`5_X;+0Vk)$C)y+@5lxRV6N-{-|upFWq~PxRkBqJT3X)89|+&y&JW8<%+aFLO6+??bhA$|TTG+V*U@8|i+$!I!r>f{vAlxRw>=}Y93 zlof_mrJ++vnKp-|o){39RL|&yM!F*OB=xw4$41GkQsxDly{Q_Hwiq5=;ualZoX)Q8 z507x?pxpNqA-l3oZCcI1_I-G_gSz1a5k#YFz=-PCzPav`jO!=LoaUnGDpWiI$_7O` zczopAwMkig=-Gw*DkzvAM({{ z%rO|^Bfms1X*%b1k};O`Ev}utNYf-vWerx1XY6A+STzHwtpBlSYKeO2J!Y$w;(Pa_ zEHB~SvK$bu;+3ePX$uLvc-*zF=EU9;9uhJw(#yY#Ovq%55piPx{?C6;Y(9dJ-P?Ba zg>QzZbRQv71FH{;(mVm8Th*VDs6_y^G)tl zlExv4$?1=Syb7+c_wQ`=?WhxxVn#R**hp!F;2q(lng31z4sUtpekZGJLM*tkCejb0 z@p}?TV(a%^L17zMvx=X_tk=& zG&tSrcQP<=fRK8m_zY#yGRAtUM7!J z(LiXYCK;U+0;yGxmVl?XdO&Ahi$m3Q&*1AhU_xeqK+#O{y2pcL^OH8|-?amborvQq z1sMn}NK|Z=9HuZ{|s4rEXm{FP^hvx>uCyms>Dz7sNH$N4>9gecm z=u+d9GRKK@Yfhf1xCZ1iX}s9}r^c5}O3yc*AgEUhGPq|KN?m+TBJz=^U+S*Js%(Ei z^E;xDFDdD4C5z2rK}z&Zk8=uNcRa|{TBJ3KxwJ8=X4FmHS%zhG z)?7Dg7GA%&r7%god;EX{C#Wb|D3?3OJ6<1>4lJwUa8r8^u5V^}> zue!-0?B?LLG!KuARjdjxyAhnyJwMis;<=_U_7>R0%p}jYZUjv);UyxjSeRNLe(1ek zSbUazmTIq1h$yQZ@w4xHMMK6eRa2+}4e}((s+Ij3c;_5aMy@Iyy$#FY5sfIEaZ<5!Ay6*GVm-Vly?ECp3HWBFWjf z9i#$mG&hSd%th8=JDVfoVTvA4jP)jpKYP>sLtl*6m;JhK54fW^k zcO#yrnrTN>fQo|QA>Y9D+G>GTHC3u;Q^|`BEZg}vNs}WXuC0$UbqToz4t0^1t`@Tu zFw*k!;h89eIH_2B*$4QlLX~7`%Ie9J=?Om40nC&0W0DKZED2(GVXCGRE(t|y-9sZ- zu@T0mWZH>Xrh+=3*`TZX$Xcy&Ya0cpqg$6%lDQ;Ll@G%e*5g6UZLsg!h+-lErn+BRkFA%{3Qt5T#E3UyY- zlQ+99ZAF!35}Uq->?qbizVFhny7wVP@!1jz0Q6IsllDn8k-!(oYVzpQK{yonn3cS} zPtwS&6ilU>{8j^Gu(sOxBW`2MU-@k>#Ek*rXd{liXfQ~)nLYn&^JJo4i?r+O+EG<% zVpY$6m2|dfep}Ork7-uRHhN~7?fo{oouP3zY1B=@o@T32-Zbt}x_YqavKIAbXnXs5 zP((DBs6lq6{HFWZS;9osk$RGH=4juncTF8y9Oi5(Vt#W5%)N1~Q>pD(yKXN|%|2oA z5qiy)B=(6aG1(3L*t@!*XUeP^#d%d_HFzJLG|)KkD_kAG<`@+Li zHXEI7eEU%gS~iKaQgoePx^w`b^-7LFVw_sK!+J8B8dGt%wR7L#3ImZSmT|hfYoxT{o?*VMEr*ONCAKyz zx=W>1c`3tTeq3P^DT7SdqXKxRMmDbDCJs7|)D*&dn_ z{P-g`0W^E&aUC;Zng-Vq_8TBy^mC1je9Fp=t_;1iWPrNFh?Bx9n|fhLWe^>hE=Vxp z6%<0uF5xwS-R?HS={h)7t5}^NZ0du^4m9_B4%)X>hR3Xl_MV9K&Vv zE~lUSQz`j%NV9i_g2 z@|?dXQ@;ftg`^cD(e!!Y>mRo5|a)M0b6GS&BU}Y zcSt36YgJA83$=DS!YXMlrYyRo7F|6Hc3HFQ^qum(iF<{BBHlhxQIuWkiB5(itTj!3 zcE}Ef4TZ2j9xo9=*jAOuB`wW<9NGLru+v^^es)b{8+^>hX;d1#QG0fQT zD@wzDgmo=(Lkwtcwc}+Mh4EL^BWgcCJLo3euP4^3ncEFXMFO}E)e`Iy2M1Y&3?KwO z$1VX0+;W}MD-df-w6j)k6x)~#QzVbqPSXQbwJ^n2bUH#<2cphiW?q{aG7`21d0!pt z*;e@tIeqORObu&AxO$IJG_dkRlE6DVvrJT3#~^kf=gm7l{QhCOV$_>l(G?pD??29J z7`I@vUw;uxDS-P0y0-=vs2-`LyvoI>7wZ1e2W0rdgP3sP*mGf#b(T<6Nj81o0qo7J zQ(#j%>aeqNW`y@~;2j67qr({%6ko97-KAub@bi#))<|U#RxSyT)ZnP5hyy0_(%k5( zY$;p@o;(1&^1U zLQ{CG5#qK(%{=Pb2Zir#9|J~1xmHMw-qb!5cK;}UM?Bt(cn&Fpge`fWzn2CiccGJK zFom`0C(Ixx+5`a+Q;hRXek%RM0ly<4W}=s94X1!)wsxIbp8m>Zl5mXG`_SqWkMy4bKoT2M1P}Gu~5!d`_9#IHq&jIJIP;Zw%Z`IO-Z_ z5LFqC%0Mjpfe#wTT5{jiqOfYXBPEbNy6OFj4bn(qWhv>ef_4Tyn&$^9owGxz(hDn{ z-+2UVyk7nEX~M?Gt@@A1`E^@Q{C~jK>7}7al%QudIC+zZ>BXJj%tU_p-zFdw4ILPo zhj1&>OqE5+pNf-a0S^TvHG^}q5^AQV80I{bJIiulN*9VOwS@8bd(4*c8pIlP>nA24 z)9Jmw(YF(;SU@$QLak%@@+{5xY#C6z-b~cwGV4Tgne%&+-~b>jjSI=h>q>3Dh%2g> z=UQdH_d9abq@+`NRaw7Lya`!61+Zs!;ABaBF=NxlEO_^|xCj!CRq$Rcodxsj*-NLj zwNcTkUv0Po^}jxxNY7D96y{3Q@iMqk6O`b{Df=M8n7 zmHw;~TKl_-ja+HEr116$!nNV&e77MFmBe&JBZD}IxaTP(HP?8JL+mnE z^CAByZ~nXu)xo~KZH&nhNoH8)JaB2aCT% zQEIQFBpmKjX(SV0s1~K`e9)ze`p^hWoG+#*njRq;c_1rQ?3}r&KttCQM55pUoT+&J zw9G3^}^T%#+{{isXvCZvhGh?9+Dl z!s{VM%$Ufa2gUP>0Rbo`#wj6nha97Z+QXf_KjOA#h`E=0&v*UDSxWQ7Xm9HQQxRf% z$#Xb5%$|oN#g8aBvDoM2m;*mxG*IH&AFl%-aFQiJM$G^14L=Ea33SrT1tM>q+&YI#M(aM7w97S@D$(-L${dcyh(n zbZ=VNAvRoe{XUs_r?cL(6$q1gEs@HrKJv@z8Ux$d6jb-kfm63altpRLfpaU}y?b)= zUdTtUQ(XbSdDFX2?yEBBH0{LsKz+e7ynxd;c)&MsqcmNL%*d3zUj8fAA!~#YFQf>c zY0VllHjawqjqBp#maI~8y-6|Wq zjjBdF5?wcsT%HwSE;`Zj>k6bq0z%}8;B|xpC7_b*TvS%n& z6+-qiRQ27_4%lEQ&IgW1d_$+GM#=eg8r4zAYL_7dyIJeg14JR}GmsT!-|7jlsDRn& zLAhtrl4|<+(G;LgsuFXCnd#JZOB)#yH_6P(7y$Qov^UqrR0jaZSSQwgbFeNVAq!-4 zXk`qd^RxE|C{JEJNq;(V^oC7Ao4rhS>3V+FnK-7ZZR!Y{z#@i{8AvQQ`}tKm%`Os@ z?dK-#CQ9giPK81GA_I)@V&}qZh8YDYWdBv${^t*LE|eem8wk z)|dHqY9(gGW28i6Wao>TRiwxL1?lM!P*b(EV3oD59*_NRB(}FvSLyQ)xp~Kl8os1D=m3n5VUo z3I+Lh%IA)$l5G3NyhTh_wcWfEeW3$tuh@f`xD=@~k&ASRY6W_fdT#BBO95?PI2 zyu8(Ry(%hOH>TGPoAuL4M55?0P~EkMv}XApB3(K%;md~)JQfAYSL9MHaU9YT*NQDQ zt&i}&&(kPZoyyUcH*!TQdEF9iX$scatDF=3+eB8tx%$X19y#I%G7@&Bd5$wEcLChZ zDSK@oYR5`cZJ+6zuHhT-xXId|($lk;wL`Qf(966xXDh<-0s@Cq1DYyzzMn|{(2ZoY zt#mytK=l}T{ju38oyo+GtbbdO)`iQUQLnVICH=g{NM&!H?hYg?ftIR~yC z2bBEzeJp}kHOg2m7xsxNcX{7+3I1nc=^wA3n+%x->eE*mE`B~nBzxLL4IvHQXbl53 zgCfHkmpVyXVYAmu&m=YuA)MG_;Cb0%j|;*%TnL09`qxe$9;C>8c_2_gp6BgkY1%StRL*ksiRcvVuIBSZ3^eqbi@6_t zsZYPkiUz#wvvLr>#7uwkbkGrYNxN+*B0k>S0p<)XcF4JFAwl`d50)u(ArZkoM$PtQ zQR|q?%Kx+h{^l9Tbho2LS5UsQtqU9*kk7oHuNVW27z1OxPQ&RHA~A&s zvYDGGK_*b4p1sJl%hKP$YUFgDLbqZ^x>;A|%by-1v49MIo2&V^U4yseMtk8zBaev< z{O(L(cN5UA=Y0s@q2WB)-uN=V#g79s#YeKP4GL&;#;gWjZ7!QsOJ<_)lrY>D(`TkhjAA%rJR~(F^F5mi%p2u7PfdD-$6l?@mR5Xs# z9grf;kLh@tLzz`UdGr}(&zGJ2s@r2sBe9pdRaJu`Wqz8MHj2o!w^3(Zbenw*HBd+wNw;asD0^B%9Br(`ayHfj3vs#n~FIFYWJDvD1yp7R0 z$YvdQHKMiaMMp28Bda+KI%=>RfU&;%L{&Dhv=PvHhG;k!c$pvb#CWCrYroPW`v^TM z7`|(x3CWzp;fuz?i&Pa%+{ROqTouY(5I)jCibq}+U#|`t(66Zdi~8fUAsk}2!kl)~ z`8$V}S4HTlF1ig=25<|zwA4<3$O7C)LD}JW;M19uz*Jq)-P3ee!UFot$J6K9ba%gB zQn{UK5|yk{qIB~2HwUg^_sxJLtXMD| z4{mTh^8-BAGE}1U;jou=!npeJzc2jb$7gMrWFed4rQK7r&U_hG_&{ys)9XKURJIR& z_p&TaTF#=D-Mt9*62%}Ga6;CetzS@#|8rn}KJPbpo{r@k-mw1-`KKZMpzg{j+Vt#EU;u&CPxqN>!;VgXOY$q#%(9EAGeM9Fm0DD zxWczm!Y-6}0_L3780~$$VWs7~SP9|hDf?q3NE|`fhhJ7Sw^Yz{faR#V4nn2et4vM@ zU_s=j{PD@6U328~9SxoOifk8utVK`8Co?bCV!P7(Cx2#>{&QWNW`+;K@K8T#;?QLA zhwv&KD1|19B+8WdWFVgi-MN3Dmu}_rrL|b04TGutiz}@Q30u_~!7yfew33f6X<%l~ zUrTJ)gv4(lG+BL-<6rg==S9EKrZ4;GIhI{>M*iTbJLP+<9WTL$6jD7m9Rq9L%j&DD za3b_0)Lolk+Dqim_&KF1xME(7^OqmHOCNF%MZs^jm$dsU?N2?G38a4xYHBZ*d)y44 z$F8%D?twc)H-S^7D9@j_PlCP&rIQX@j1uwWM9{)uveXi(`cS;tdszV3&mC&l%jTDdCz9ame~qq4eV|{_&VHc+9^x z{@)X2iYB)&)}D_rUoDT#mBOdP4Hyy_n~bebf-*Jg+> zZQ*fEBb>Y$C%5bSz_%Typ>5q%Z267o0|Oc7qvV<$eMNZS zY)Pek)VI%11ok_cqh+=@Z@q*t=nDM3|6pp{_7QjA#lFUnpUmLjTUo;C(5F_thA;HH z%;8V|{2zbZ0$5qKzG~|KyUU{#U>Q6+`e^v?5B7ijal;TU_c^Ki)BOJZ-n)NTa)VD$ zBr}3>b^k3hePn$+iMonTH zwmJ}t5V3e+=ez0^V9c7E73eMiY56ipzPx_CO4s)C(ChLPaK8mcD)!Z zEF-P`n@yw-!}>n%_FLusB%j20HUX38%EHoTd#eRU$UTRm5s1Kt)pcp@{ zuOD<`U9`BWae)2j2K&1b6UgbVC!6pSY5WaMBSeOR#?afLXNsR}^upe)yU8EerC`ckeIqV#UKLcOKQq>3R zrRParF$98DOAuUq%7tnR?n{E~N=ok)j?5$kLv9ChHdgTIrU-K-ookRBmV+#kgePR! zu}&>Q!%%tD^<4e-kN{}%XKh+0fxPW{POV)T$`kK5z6kg+K{-W12aZ3@2v`HrDwEtP z`F#jMjt;CF0+IsLXJuFe3|u>2g>Y6o5=`4Jc)BlnV$!W_L;n^6I8J1Y(8DYDF1h+b zQKETgduE4v+N36k9cPoCpS84Wx)4YDl7e=1XT>U6XIy0W|ASK*O6$A4;rdPTciHg5 zNp^XiIFi@2eAa4006A7A&UEG^*W&>ZX;gpmM&73A_+y^|P9j4`4*gUk z=t*ljyV-xx3f^D&2-_P89jN|bo8|^2~E79yj@@nq|GSPQB zq0EJ~F@i+;56w96;-z5-%s13!sk&64>>-!{{AvSR>FNM;HIfcj{1QhybM=_u1?hgx zm3ge;=BvfRZx2plBvNSGpc6>^$odVjX!zic~c z5|S!taoeq$H%f~pxwBp^P^x0eHr_jiKuTe)m`lq%AJ40eI>AUpKORPMvz8IRe~4o5 zdGb?~lGGvRlLTx+mF@rH0${xNJm|{6qg*iBNg(Q#Q-7Y^)(M)Hs3m3f`FUsMi7!gO zQ6&~_WwChSAVdD>slN5pv-T3GM0;={7RR1LicE(iq#;QMwaR$!1(I?bA9oZa3PNEU z{f~=jrKcU}4;9|BdD+!_SqyR+*gdhJLHKqfa}1n{GEy}Zntb!&rs=-4@Nq%t;D~zV z6XKB+l4wnZmUduoVIkklsX62I8kCfHwwCkDrttEnL+g+;xTK;skkI~vu2UQ`^?=0k}- z79!0ULGpBU+N~_Q>{$KCO4-g9C7rM18jLO$MKT7hA<9S%H(O|hVTO?0R7-neFK>Yq z|I6BD%1&QjsQKdDiU&_u;YczA_GDMEh1g9boZYhYz>vHNka!)5XD5>AT`66m>PE$X zhgM_n>{E14Kwkb8>Jr=8)sM=)cr75c@ogq%Yf_MD$%h3M?yqAN4RgrLP zGVsW!q{{-$ApE?S@)7d##QY{86vVMvT=(@iZyPIo zV2Zc6MkL}Ah@dgHSF!u6knR9aiKioXxh($Cqh9m+>zSs?4j!KJTe%3FsP zt2yPQfH4Y*EVWCPH}y$5Ff27IL+ti_K0DZlP5MSXP3i3Y;?$RMt>sThT-Q6Be8Y#M zCgOI4GXOr3MIsdNSY8EX^Z7bcOv~p5O;+r=wAvRZ81W-&{9 zfL{pE_GM_}bhSwXpvEX;F7V^fpjjtn^R`OX&RnFyVv39~154Vo`zR##tW*qCM zA^jOo&AI5n?8kFKd(zKtjmoC9Q9g1|Am){c?|>Snt{S;XArxA|&<=<&=i15fCKrv< z55HE^`qMr$3)cq5?{s6K5)!he$&3Hkv;Yw3&Xc&(v1lPX$_hROO>c7w`!fM}y;uQ5pP2^GY zH>#!!Yp^BT89SI8bsLDN>7JxkC}oSv5mzJFjw(mf)8h^9RlFV!jjP!5mpY91>>S6e zQfrEvUWk1VFkAua=@Ma-p5&BPejt`reXVy30H5Dti6@{gI%hN#^#WRBf$W+L?d+LgFn(}%<+Xb2POlj8?> z$Db0M#N%ea2V7kN5DWf6FY$v=VSzi}K=|&GO+6tujCK1h8Cr}>R3o7tJkd=Jx(ZuG@F=9hr8%V}MZ%G^iBUvMfP!)Nm^^{)Y5$(khlzxP+O2Q)G2Z zJCG2{y7QJyJJGQK&Jgp)5lxY|?|@yNz-N9jJO;%W>9*FvG4*{`+9Qo?O z%$lycT40e0fHTtFl)h{9b`wq_?10JC$)qY4=k5 zJWRxz{^qxyGno*(4&@;-Z$tApI7Gl14UHMEzwu1e;E(g%Sa$Vk^rYOVS#(*FBMwsY zmmQxH_`F1Zjgk(6nzBn=f!dqW0e90Qb=Yw%*t=mOQwG@%$96nGxw1|ab%zZnl%~E7 zej+>#>8EupD0~BMS+nd|-8@9!{JJGP7L9DU>-8s%50`o-cJ-B-(qj5?AYLcjCk~Ul={xV#Pj|1^B1XmWriBT$;TTJSGmkw zO~}ld?u?l)7z8#vL^?d~C5tF%BX(Jh#msnHtV7dEFXN3ND9k9#t%PNHGfI-$5Oct`fBTx zkA9{(jV?40PMkS|w&RL1X(XW{%(xsUK@JMzXAJE>NMz+erH{W=SlO_Bg>sY1rOKY+ zpvaP|?MByuji22Hu_px_o}LBS?Osapl0*o4>t+e*<&g7?5vArg*6R(opB_K`*@AC$ z967i2OkPE>mIs|E5)Vig%5kE(3y-l+TMjSy~_KFL5g@IT!!*w*M#df;uUu@K7Ud6FQAKHCOn` z6NMNxwXcg)8)I^z*pPH(9?2Ij;ri1K_$&_c@*NMbGt}BM|QnMYf1fsx`^7?94O0E`Grg|6Mce2>Ci6&br@7r3DVGFQs&$gPellq7DhtE`Ojm7q=-lOUV9Q1$5mqF;#tfPbj3Iige_))FDKc<+fP#?s|4U* z?{4!U`IZ8^9+!6Zh=cgcTJ4<@U7^8&UX4)ce_rOgRt%r8u0QMLi+gx^2JfGbKtcwP zQ>!r~+ASVxbcq;2(;4gJPLHg^)>S#+X$u1ARg3x_aR9tK#~H3|Igg2SWFfMrS4Yma z_*7^$W0RDQwY&u zKfC__=YPA5;;9=<7bOf5=jfGbG{p%N2%vs)fPu}oBkX=J5sEO8e?VP$9kMrCP zJ~Li@tTk-hdzaJ-Ee>HO;=KZFI5YiwL#LOQf}TU|t$;amBhnM^6<~D=+Tkl5U0k|K z9L>b3!R;>%wv-56AZjZNw$oeTI&H!+SQFfY72EB`VR>C;0mYDO1u;MBLw_GogtYr% zk=-G`;s8)ZL=QNgb zsoF2+1s`lI&Zf;8Ukv0D)nB=boA!x>&TEWB zUNM;79q&R2M4FLAma*qslVDMP=~+#!A&Pcc+onXOjbdd8{BGnzVu&U!VS2SeAHcEF zGH;WG@1^JUdQODydn2}5aU5rT&Uj^gectC>yS8B?q0fhb)(M*am@~hSG-BrNK=4@UcF=8%u&q4u@R8 zMd>?Art}!2#{gR@DJ;+mvXupd?}F{bMh*(UB(+CLVK2={8Wm+OG^gX>ZE@nQEA9%! z4O0ErP4{CQAra8_Rh;O7=d(^0B1_a|CT?bF!JW+oY?*Z;sOsLLD^}4z`#kTx5dP5p z4*>nvu?^ajx@y4wWjK((D_M`KGu81>%`9de)xjc{%T&!uWNGY0{G^2iwu++=oZ)Qj z=$Bo%V)~E!&ibRsE{ULyXp|^uk>`f(!wk2~zS~CfKF0a)s;%`cT5T>6eH)mWt2wJY zw-NQsr)0qolGp6`tg6ULdv>B1)?u~3E%(g$=C|Q|GfvjJv}K)G9*oav6k@G1%%CU> zR!P__qqgbP`gu}GOYf2@``b^F?()f>UlO&Z^OJw4*n8})oGFnIP)IZbTA|R^0J`k4 zq&yyl`Qz0jv+uwJ6q(Ye#%2}j9hG||5wDjzVWO{)P1Tu{O6VT)8C*b)&_oLJh2w(r zh1n2o&nN}F|an#Q-zsry81zdD6BSn(YszwV1yW;7lbrS zYLXlR`JO9b+8FmJ>}Cg)>KH&UnRD!2QZ@|@QFmCm?;}}SeSXb}3=X%>u?9|{gE7oz zxmT5%Sk|sReKj**$HE{pFR9ltlj-a;ag+5bh&PLdwJM!M1bUoR5!66$4;1uv&9#J4 z?S7f!Idi)CoaH<3H!{1jItl?b;?+@q5iWGjEJsROF>!#QYLx!$g#7Jj&fO8&xR0kK zjQG);!t|p%bf+axAh~Buc>=<0Qx&R1p3Id*p_)?V>`Lx@LXyTNJzHk{j0jhf&^lfe zf4m2{XR1OS2de{F@gr=-v+sRUUiED>z7XUQg&6(5s(Tg$?##I4Xu#ZTs9Pw!r5;9z z0uUfA)@lItE=^zvh)~#Y^O#9-AR4jOu}(>BmSfFOQxMIsq>yjP7=Uv+3-=Z24U-)m zNf8ePhC7zfyChxSxjHxaqvI^U*Swo-3v0uO%RQ1HO#^~w2+)n7D8Rnn61QBvXyq_f z73#vJ6cUiO;a(N`gsXJ`8j7DnpLEQ?N@!E-$&F(Bm8QtwxT+bp5$Wz%(@5|#s=4BG za;*fu1w%KP>MBCfAst3O>%2A;kwn|~CTMK(J;S{hsAn1_5-GiTOb6Zg$9FPgd?#KH zw3y^uH>O*bxMcr}$=3_&O(pqw zX!secqA{FoPx0i5F)U-5F86d-wlA%N&|4+u06Gl@r}Z zcKO)8awnu%z578D@<=^C2iz0VRb z&qpiOsQz{9J!lO|T2W8uLP*l;W{-6#{PibrR7Wm$jxX0c-C?2s|IW&f3^`sJLHdlA zTk-$ijj+^mP zE}RQ5YTgjfRW92X7qmXSf*I^SEMxIwf@JqL1)uS-1@gXK@Amg#x{zvc^wH#LsC%6u z5-5E>WQ7&*k6ou&r9>+M$}OUFdw3+o@N1dqxy2uO;q?psYhRc+g#U9=*dZQEkv^!D zbBN5+5S?|V)K6%>q3lwLhlBo6XN(EI7mvJ}>H3Wyqst){5&t*j=I_CgIOEHcm8OeJ z;bHOW#HiV^>gW~bd(R!C89p=P1zkAt!Y(v(G4N(FtT3tqldO^d_@MDI`%V7>%jsyu zMl#!K(14C>PRAlfUZ-!Pvq|2=sGRt7z-`<^6vJ)*9Lhxde zCuTQkd3C7%vK#70M-k$>y zSdv4_((^s&Jjy3A3G5Hm5uy6Ojn#j=@m&dj*c*JbY6x9~{$nTe`iSf;#5Oya*snkrC!u?ZZp1|9mPZJavhp?lxRN z%Yha&8++1Y^-iYR1D5mWuV7yrM8$O4qY$7V8~t{dP1alUgSk=2ozMM&<{?&WCjQAD z#qURUd4IeWA~cptgm+T=15_(v53k|LY9>x(e~2xA@@Z8B9glhO+vXpXqKnth^<7;M zeXD%1`(Luqvz)M3IS`4L+`AGbY^U+UZ_E3wP;mDLD&-3%v~w|q?ECqV(kbgabRcp&n(mATtg62NNWTIZme*#L zXdk#KnGozV9AZeRUTU(obW;EAI*jY{JW)t1bKN6y=F=Xl+w6U3Pa}&R{SwR`Exm=s z7%4h2Tu&sUs4;SpAfI&|xGvlf9vp0Z5}bjqu;Xh{R*MTJs-qL1>-hDl zZ%{R8dkqFOw6NKY8KLWOUn4n6zehQDw^)tMF~urD8gks|s6K$l%5{n6%03y~?qnp$ zoO~=VJo6@znYs5Bb2u>hqF2H7qx}x$)-MdkS{=vv2SYWA z20D}Y?h0xCBxoNLQ+#oSOEL2D`9M0Jiw#hsWba~a03R%2vfmhkfo=5~HUc1FFD60> zGyTOYnv818qfNn~MrT(FA<~y>8l0|KI$T>3MKfo%0f#ms`~|92JXj$XNHYyU~m?iMy=pe2=8?iHNXyQI>emqNfWTvUksT*EL0%-4H~w0{^#zAXEZrmyW^ zS6rvl5V*I|n?FFH2PZ_vf35L_ZbL@9ZjyZPDCG};ZCCYUIzqXQIzk-WNbv&%I`w<+ zNa=8?+i!78NN^ZP{RiUId29UV1OFdmMK2?Y7GfqTOh?Hg1)Vfhwx9vefS3~)G1ev_3!uH^&_4Rae#x#mM4BWH@mJy=7SF|Z#B(+ zx@*b)>#9HZ6WSbE%a0RY{j|Y;wPKLRx4`9U&#L5pa&7-{d6Wu#H=ZC2KYWn?xa6M$ zyP*n~D^j!mWZC}XwJl{}cQ#yM>G}`U>~4#3S3(gk=hIIA&k*vTBa!(3C;iu7`2S0j zPOI*e5eBO$(*J+JOs^@(o{ZVq!=cX`U`($Olta}#Fv!GFYnz4$w(!1-M%Y3Z0VI+H zP`IREpGgycFZ`{{EnqQKLpNTvK#x@XgK$Ad?HTHF8I>ge-O&fb#h3`NlaUF8`HZKj1x~HzZu)6ncEb=1ha2bQp0TtO_Z}fO}M01GxLo%T5 zb1~(T2T!XJKa`e9Cy}}-boJalV&hMP_TW=vzF3u9kTALd9bE^)@~v0}rWJ*G>1BRx zXRflH`#>P`7e0-4rEzaB>J5VmWzwr?D7ufnaa*~1cKwEqMT|=`d^Yh(yRqK80=@1D z8!Ua@YVksCtwa1%nYYOy8fbc!cd9LXI&|c^{PDQzUmujp_aLDO+2?O?6r*~SLYQB) z($6Pt%73OLWaX|jnT~wA&l*IrOm=)Pg#Hs=ocj)Gd^ju7z(lq)u`Ry&?f4krkbGc; zylocloeS!`VI0@7jgaahgtbUCnKMNoV1U0!Y-_E_PYG$rhnTlNKQO}3S5Oo%iqb_U zm@F=n>erBVDNedb+L?#s;gJsMYw{cgraER}>PR}(Q)K1Sa~=X@(=}EbqxjNB>w(TQ z0+5~kJP%}a-;Y6+nYr3FB{5vCo-Id#9#Sa5cJz&bP_2qPjD-xF(ESfLLZPpp9eRYW zGFKY|si8uGZ1D7w)d%;kT&pnzgV{_W&laKq`S@JOuP?2yR&+U>^Px}YPsv}}Z{WAJ zvhK_^>W)pT-SGf-(SD}+PU7n_IlTP)>_Z=toyzn&|)c= z2E2o4sYiirFa64wQ>!Q|^k<1pr}I-h^^mEJK{P*ScC!t67W;fztiIt;r(c9&b(`Wu za)qWJ04w)?5%4LDr`cv#03h~We?{u{{G8*;+!GG0vI++%XsQ`f6RA;ZTVH(FIG~rY zdiDPC2;3PnK8Uux#(t`+7;bws(F~Hh_1nZg3ktTKmAoa>4dY?ATeG3aQG6U9di~D8TbfziK0BJ9;!T$#C;sQqS?8i>ONuUPui$& z-Dl>VCH3GWS_DDZ41z4d*!y|}v6P)0f8_gb|@5Z1DtwFSt4d1bfA1;V1+$rXNgi z^T~&^buz%H2yQ9p$r6&`WxAh#q3bg*CfY_eoCQK)`SDA%r^74HvXfXVbpbAx&CwUZ zQs)AC*F*OnsB?�C>f)?YWtxX%{G{_%bp)vC9GUXq7Pm#M)SfJQlCYU~}OQF+(^0 z7`tbt<8B~}E-SmqGB%#NbOX?=EDn3H#pgIy##fe$(g+BO7eh6w?Eou*VzI=YVT_CK z${XgfZDyUWMHo!dc|wmkAu_SZwlgW)@y3TnyRZ^IftI~3y zF625+WdF<)`X>cw6^+X$tL_EUaa79x1CED;sd&!t33FlXYo|yQ(KISm5HtA}g7|Ut z9{YilEWD(*xR49d7PxYJ5BEIb4vVdw{=t!9hbA_p{8Uv}I%(N1k26h2vc18fBoz@PSC`BW=>Qh6LF?fKJ7DQWBM1SE zDeE5Vn;!vWVcr5Z;E|g!JT-Z=aKe)e&yVPXr^Xxk%)JiBa{EbV9QE?p!!#HVl#2_QP2D058geD z-0)7ec#8Ft;`bg7zip0O!DdEYy4=VB@j7=jcEZIs zN?)kTJS%XA+uKkcZ`iP~ttCcdH1%2f7RCpB4!#pd4c|XytcY8*=l&B>M`y$w;cZZn zo@pt7&GUWPz8fLka~%aGe3ya1!7?V@9xjl2Be50kXg&I+W~^>ivc&kQNSdm&87CYJ zJ7{BDAKQamiDL7yFQRfvreamyItTUuB*k0_Z7uMm-Y)D@r#V_zGGz#Eqxf3W4~*Tz zZqgWT9<-UPc{f;7q$xCL^S;&bR7sIA!nf$2t6M%wUlfJ;qYdFG9IVL47V%kp7oz{i zhvewmp@R=72e~RQv{$ca`nI`k9a)g^1I+~Oerk9w%rRAlkVg(aU`3tk-qMV$ZANJv?S<+wMdc&+R!XEhjyd{Olm`sq08&J4W&QWUJQv4Xo<|TBg9nh}l65YE(LxT0NZo1D)MS zYHu!nIxevUg%?7B@1d7^Fs)V+r-QC6TmEBZYmeI@K4=JfT^w z-twd{R(xzb0?P#f&pr41Wzp~YCd20P57WT~%e?VITff+OvARq8m#LR8Sh^>$NGfxEHcbqE~WeBst7KcisAL+mx%qJij==IF1)*KZH#e`{O z?%-HRUQqkIVBr;2k(P<^m(Ksg>)J`425G-Qcjh$eQl1I(BWZ0V^{0Fam?{ytI$E#9 zJfArafGD~7iJpl>6E6i(P7CL*1GB_@&RUR~E9Iu!oumQjd3MAz^o4@3BClxd4UWD0N z`C=Q^l>(2;k#Kn!lFD4hgwi)x2jqZ9q9&q+gQX%FW&(Wy`+d(yxs-4EKdm@R&LyXp z`;u9Zneka>ND^!GrZ;$59?=GGhv`U0UDjFe5l#ej3(_%OCuI&&%H>Ez-~_mtP&?vV z^#ohenuGDT6&V9L8E(a~HCr@hPInRJ4oxMg`tJDSN6f}BcimXNm_ztwFnI~l z(5dPj;=f{}k3K93DLQGi+)S3l9TkViIe;E%6uAbu zy}B`N{?JCu?FpJ;#;Wrc8fBlWk6#CE_Pvb9Shk5v97;2qO>R1Tm%;o>df@(enh~112vskdgInY|vwgO=>cdln3t~QUz&^plbI`ZkT)z zQv6wj)s{CxNn~k5lhfn?RJnjwy@k-RxXky z;fS+65$xJ2d0n&P2vzvz!8+ZHQ=y}WNvIncTh)?C)mO~p2vU=|U+%MrFdi;j-NRd9=R&HEyN-FCJ0(b3sBDMt!Fz7URyx6SX(!4UOIq z{dvYrsbw{jh}Xs_>oA*-om5B-?{P$KQOqD=tbuj3X2q;K>d~dS*-tOkURV;SSlhB4c zS>~^bDB^+taaPhL^#bDnGcT=5dU#1zg^X&#+rSfS0ho#eD^JzU2}r2b{or(MW?Um#Ib2wZWiIK;c>IV^M5#ud;!N~QaAJ|5DmpZZ_-c#C&9 zM`qXbp4YaCsxn(ufcV#!{bwcMRk2jLu6I?j@8nP%oC?*9si33t6!aq0Z{M;tqxErB0x#4*V6~fj_S$v zmYrE3K*;!)?Otn2OU3x(`x<(0zr{O9Z945(i36Sr}7)hSD#TUl;b=q3VZUbn#s8GZf<>yfFu$6qSjrBcwwuW&r0oZmsc^H3Rxs_ zM;?_n;Ee1iCn(3%#0$_F zp^vFFjC{&6dg`HXIeDcrH3sWptUKBo9kZ3opVx&S z#%>B(*+t1YkrzJ~oS3WrGl)?VYi+j8QH{)i7TE|VbUfE?K*bH^iiQIej7LjBhB^as zY5t1{QT9S1U{wJj{Stvhqi2*GXEJEH@_Ixj<{)uZQ1y&wxdwsjk{y>@6(8WxzAX;C zQLS0yXfDuAVOf|2Zh&Lf6(plH2;j=m_ccR)`ss9^lgWALG%OY#{S7~F5=c8Ea5C4?4M% zc9(18&%Xs1XEMmcnC+}UEn}Kt0aBKUZhu{;WJC!Rh|ylEMwHpbCP)PXG+sDhN3eB9 zGcn*yEenS03CV*{vXR#K0mV?%vk#(4hm$ZLs(JyBU+IxZ!`n}{GqT{l+Iz!y{YjvzjsLMv&b1k@Rr;j&xp+T zZ_!?sl?xTh98;lJ!u2@@%#Z=O3bk+Dl>yg+d!fvz(cR;auHyJTs7qCc0&c$ju4hs z4%zalsOLL_lB~(DK~LXOTlMo>E37iEZe0 zQG3OkBSy-`fAGwRoRN$!3^|i54zuMEr4)X+!SU3euYjCHOE2lxe=cc|z31pWunUrx z3KD_%JWuii4e#&sBq9n?z^FGhWXB8cWY4HQp#=%FS}C*{Y*BorWERLp55jSN4~OkJ zWr`Qa8Uo6og42!4W;;fOgS-wB0BFc8@zX-+z3#Z!LcjU`aB(CbS$cqH`5lx=^6Qqg zLyJjrSHb?QB~$PhA8lzNa=W4<$MbYib+huNd4%KVWWFEVsOe4!2UxTT{gaTR73PW# zBya>Y9gdDga%=9m2&#k&Vk`4CF_6IFD6M(}NmWrK;lP5jimwxsoQ)jg5}ukkIibGG z?+avu{kpk$(@Fx%GD#n)KcUY-^!IIeBvoULx-Ot!mRgYFb!?1YH3>c|0ES482xqi# z3zEVbzc>L2>g)X(VD@Ouz$K_#_*AKV$;LQXM@(yYL!Z&Br<&jJ+!%)9$~=2eo!!($&tr%!!d%7e)1)`>M+E}u@D zKT54GhKQG3YALVCqN)=syU7_AoKrHL)4uuH)RCWBNHYMasZF^Tfg{>9gdnJoEw=2F zr!7-GW)ZXF{)K_?x*UP#nkTeg6*OeW4=5sBgb&rW*imm8>SwQ`6ct+y5vaI<0 zYxzpOVbB|YXV)cQ??^E4sv|0`8U#(i)MK0|sY|(O%m@d#@|yTWxA3+_iP_*G zWwz3{7KFV@U8Vfup3I8!Z~c|&8jXbYDsyPbltc~CPe0B07&05sxiLh7q-6|dEN+ax zS$YzbnF*laa2oLi2tkbLCua2ZuDMcr`GhixAS?y*PK}*DL@c~~%2nWaY0D6i&mdUd z(rBIKq4SbbQD4k~OGHkDPNgLh7nQ}5FHT35klmWNYaw?K}`g8;eCR8lcBY1p?FgOJ|>NeL~m1E04alK$fRh z7ri2RN27q$q_UMMT}6i4PntG7ro3vHF}(;n@iuz&R?*x;@@3-#C@$Cr^2wS%J}D9X2+_h) z;|JxXumq)G$xV9b>z}$x5gK0UL@&{b#+P^A4u0`Ut)O@rKP*g)a0#t@SK5-Y%QtAAl=M$BL+fO4gKLx3ImpV_*$Nct8Z7Ba~ca zD-&U{1sLo};n(ErW=cGsSi8-DhhB8upl6E(+UeFFOZUx3K#iL0I8IO+H+S|DYYHa+ zbySPLZIzGUAKDT9hQ^br4K;z2?Ao6MSesaefHGkFY-qu@c~|@}ldu9O_7+9mm~ss2 zzDWZm;ayE&SsWxH7Vm@mfZ5^EJ9hAH1#&c#aaHF9R9q%~q2xn1kn(ExaGD@*ltf%s zcJ75yV~ZT=gUd^#L_4vc6>hW?V$Qi2=h2Rk#Rx*p4`6ef#$U0mtai0cC=^s+$SUXY ziHfFS_wlX@$6h1jvQGd?HiyH$i%p`%tn`$#*b;Ig=?2ms`Z^;MxKD3r=rzAA>S22` zZlOH)m>6omk*CQuI9R%^opJIgGkXHEYO1x6Y;SkRg%}d7J&q=D4)@fWM>jY5QYu$^=)A8{+;VT?L~xR-Uoe1{WR6?s( zAI@`=_@5Z%`wvvtfiv%7hYl%osMb znKnex_woz1i_YZl-PxW6UW$(m@l5cSl;NRr^qD!lpj{1H$Wcj`zbZn=d&u$V33U!0 z&Q*;lAltfMQ)dj|!XMyk0^46CPZBb>9mNpO$E@~l(J50ihN zg`>x2Z&4iR7evR3r+um(JSy;Ds@$f>!I$s2 z#-%ss%fj4W8y%nHCP=yGi5q?xBl?OWs215e$0Rg5|IX^b zsa13r2TYYw+*%^je_(47&Dv^nsq-e8cKIUr4L>hvG76v0Z&JEKsVki4Cg4dqkU{yO zdUh$6{B!*m#r*^tApI;{%6)zH%w>DTBfDZ;uLA`D$%x#SIk5Q+!3>pz51p0eP+@s; zK;TBJFmxrK>9IZpU{^T+=9DC$t+587M{|HX9{VuX3HB3D3Ts3U3dp&w<3@Uwg>Ze5 z-&vkLg}>Hnb8E&E*N>6d8VqoSxDitseJCmc%FBk3Sxn4~J3nmhlBhk}c2U?S>{ipA zVb1R@_8*YfRYvH9EI(Dy2?SsYOjBzifQxi$yep6DkZEo#XKlS=m1V-`Cv*ao7z-yK zCO@rUCsPX3f@s(I1p=n|0OhlT_K(}t*lnE_abqd=YXQJSrdHzmNIvMrR*-kA>)I#0 znFzH^;>t3JNR&Fz4n7dvo)YT&U=(n6={T82D=6P>W!Mdj<5*+Xy*_dTkeqV5&pJl0aD9t!A(em#O1}p_u}2X?f>#M@Pq6GG00C_LG$ID!GiV4B z(L)WXmg{SRua4T)Jzz5}dbBg&WNH+4B*LUiGAQIc@uc8{I$lSHJfE$1m-081Zu%6? zC58|y=(0|>S~}1AzkqWD2Y2RF^-lrv-~TAF2c&y1UbH&*ZH#{G zz<>X;uFmOyANVd@(p7`L?An><`-5=7caMFWzFK-PzGcOq?yLV;>DLdh7=hKG))b1+ z0@hDp{1>{2CqTe9s$<@}VD4;K|UXDHeeap!6Fr~#xj`g%Zg#~>#S;Z;P z7a9u7!#`aW-*qxV9P%&=k$qGfcunYx3seuOOGy!C*~qZKszZRU^aADI_3)=d%FT{o zot@jytd9WuHkIGTbHE$S3rs!QWD5w1s!tR#woZ3t?9f6#bb zb#rA|7AqnHo0^&63C)InrBAd_7kmKi^Y2FZ?{5F6x2xofY=nGaLT`aEyPY_`k}sP2 zja?RQf(mT#W7!ty4_qOtnp9oRER@i5FV#nMpKY9+1%ssP?ZbfXX#}T(1>Ub zg1rkLja`LoM}D$1s^h;-vhT0Op?MzHiugmgh@>q-GxP60IK|&wx5HEj=gY}8Vd%r& zrt2@S7%m+eM)2|MNXet5Mj{T#Faf!uq=0Yiu^E@Aoly)KD78`>E(7{51r06psD-VM zvBu>g^4N3O4@F`-LlC9wSVg8fz>Fa3l5BNK;alh+1#);_x}GOr3Z4GmnNf5m&!+ z*-i1>pv7shi%IS@f}P*SHXsDnXv>ALBK#xeh%Mt;Z5+Nbe01Ler{Lz1v{gOZY4%mE z|J{1oLIwSFCTrJji$Jov48IzKc2g&J@0(lAe*5Z511|Vp4;JDOC6v?|_PtTx zCFVN>wpSa3;<0Equi|`!SXv91MV;l|BgWNU{LdSJv9nbLu=OT~f$L;>GXCWHb8n7# znIUc4!REQS@jg2}*;oUC*Na#AboG1luHA~_70G6}00ETr&mKHq9Bs=!9 za+amCVpL9S_lxL?*!|p8pj9FyWLsmZ?O%;^lV0O(cH)$GZ95A332*xw!(|qH z199P9O2k5*MMC6+w1xhD$1j&3G}`{w6DH$g_^f=}gY5JjTq~L|T`vE!KS6wALEuG< zfs@?CBm0CHiBjJuzkRg`H&o~$&dMNU1Lnt5-OgQl<#RnSPD7JesI)F5tSDq`I^Wgt zH};D2MTdN@e|lYGCq!ge>sh}T14?oy`G$u(gazB<-cS4>$*{&L;#a|S#l?kYa-z^0 zEM2!^RI}Qj{L(BLYFWWXR5M#IZ6}HKE!Kz;Ny^+BBb;G|p*5Q)9=1*S=Bu&@!R#YuTB!H+pxi67BQU7H{H^+vFnas#&mDy4K9HH_)&=p4 zt=YcUejmRT7L7nkoIg}toV0w(vLP_dz&ZJZdU3v3WAD5h^KQ4GWo36Zgm6A4T zUHnKyO7}UcCs}Tn)roZMfKo&q^`6$O>pSe`WSn2!o4w~$x401U0^;&t#R~&E&^t#y z`3#A!4=-yj&Zcv{dYEujGwo6pkhpM;D4tEWbDb-LgpkEW@t1bv`P6du6Nc7$Lz#p_ z=0|`1<_jbw)2eCKR)fStd6wP-RK-5Clz5B(w=0sY7AhV6ki zzArHex5S#qcK(LT7XeF1SUwvSA_Jl&UUBn|DUNoacX;Z9*S?vFo1@XksBL7GG=gO zbd~&mM)>${72WxdC-(d1@hF&^9oO9d`o4{IM-N*2pK*V)>3_`df5!dKP<;FFKjZ#k zHT^%`xogkt*@x^>_y7*mxv_TB#o5s@*o^D{bw8e*@t>$S9g(BLd0;swg1x(_$=6`Gc%_?$lk%3p+euT6zSi(eS67y;n1N& zVP3l?=j-k*|4`U%ZOC7*raSU_NaftJxc!P*c8J!E8b4)!4Gl(a!jQ66l=Je9cck{< zSw@^e>FMdU_lgk4Px$)lbNr`MWXvhnCeJh*hUp!&bS-{3N6j(EKC;}47u?vfR>278 zt8>E!uLeGW!&>>+XpO4%+E@HGVQEMJaZzA{xqCr0I?Oya=y&nO*IB|t>Rw)5O?HER z!QW2h##9#=JmBo6P~G4~&jX0z{x{s;rhIjFzWMO~8}5HMmH!Xc4IxtuUq_O?jZIFL zvBELf(WE#OMjtnX2@Toi|1v|1zp@>Y3JKw=>BsRVnnx`R)HF0&x@7i4ZWL#?Wi`NP zWxM!&VW+i={_m~fZOe|4cD{xrdk2TY|BGHgbZ%u37#zH_;XJR2=~-_`V@0TZmEg*c z7tK`JM6m{0@crhEvjneqq(occFdkz3)u?=|J&)3hPFOYOeFJP7ct-SUwZi8CWugsh zgq6N;Y7_nu+aL9-AqRc?>xOgPO2iP?p-_z0#<#5fK`bPmH%tQ)=VuK?)!~ z56E59!kV+?+m2z(@wu$t_+#r0IniCBqfMzyA;-)Z**Q6Ph&SY>yB~&}Gm7UE*<9K4 zQ)LBdJ3t}~s+mHb?N^O|BB8QNv+WJEt`&4#AoPqy=5>To-`Y`wrE)J$at?nrVxZ#! zG4*bJF<)4*5re1+2NL zv=JwXKCXLHl$_4PM=4$|Z`<>#va%;AS!N#kO67%H8p;KuaxSooe);j_ z;;R>f_OBi_vH$WtNYqZ@x(w-P>9MY;3Cb@HZsSFWz}$Dc%#X1EjvAHhr|81^yXXG? zX00Zi{n&8 z;E>6uwYqsf@o{!wA>Mw9t^BQDwff@`*>L#XV6ytnQ2#v$3Xm#QI^e0W^@mqlca#bs zsI%Rdv%w)e0S>_q!#{a9Ha`Q9D5!6(pxf9Gl7ZW!e45E`gY$C(wv!I2u?-F(2|Rs* z<}vXG>;%Tm7Pgm}Bj&5uvcDg7XYi@W-(&QD|0FUHwVDxii~jH0Qmsv{^K;FDy4^+w z(;acJQ12WoQKH?jfa1L_#D)T0**W=nhF~=@MzA5s_|?92x=1q4Rsk zea_xz@BQ!l;W+cwtXcUy*L^R-HPqyBA5uI-K|#S)RFHjvf`T3oypIH70APN*35jRk9KUz;vbR z-q;rgKpM&jY;pN(vFmF;vfl>kQ(a*_kdlE3J-{U(s7qQHo>F2LaS+1i+-hl|`t4BM zob?rbTV*M17-La+;MxAZmnT^Qbx(n{usApzW)^%fYiKX{tK4I+t|cpW0Kc*nzHrV6 ziB$g>)P8L{^^Ic0iU<1nv8v#)F(pyV`wf>!H&|u}jIh4tQ#fX}K9!9_Q(Fxf7uyK? z+(ywwyOH%zc-`-C615_Mx~fO7Jmtk|ggBif=j2kYHXVqhOd@bwqUcNRn$pVQu%CG~ z(VZgL3F{jYG!beMjfZUHeBSP`#~-vb%8l;tOtc1)JMJH51{acu!VuzQEw$VTBBh`wQol=6HwQq@q`9xShRzeLrq#aR<8kn053QSDm1_u70QD_o zrmJYKs*1u2ya%D6hFYOu0Pj$Nf5@{aC}=ro|9b>IJ_r4O-=jn#PnvI~0` zJm@uG8*eo*aJ>8}n$=FcB7ctFUY#135UI=o@%Vwjdbr0|eBpE81%GCEe>GzP#j0i9 zx4&-Pu+%uHvewsW_i*8<50In*gD}wk^FKih>99W`8lUk0>l8PLt!9wNbeztF51h(R zF%B>O2O}TjqX|Z+wbpI5B%=EKEQp>FYgdp=Phr$ zaRmn}cV;cJ-xzr9)%D0r9LKL(se9?4xoGtji?rU~9vti>Xjb(4-668a%M~|eaM->` z9$oZ8+xjVb|CaUS3F_6KoH1D%*w3++gRvdpXOJ0VfT^k7mjhPe_jhgqDBOl!@bwYD zJEtSR`*XjZab?aoY|X#(>hx~T+wa-!T2Eh{tLr&*K8R#YG5G4fqm`{cAe#2wtqqac zlhSrlP^l+&*{9C&O5dtLfY!09^^7$}WT*I8-`gHfXxV?2H~TWG=yn+k`t%Mc7fJi= zp8bF;9jvnY2gL@zRx6^e%BQ07e90@aqG72ymEY#o(Vo5E{QPxtLrXzESa?JX5mhp` znPG4E$z@v4cDt;iUjL}ESG-b1^!8*TatnHYx$)u8KWl#?S~9gpO>kAy5NNJPd}ts> z(l^eUAFlTr)L)Cw=-X7F1-vcFbUo(57haEh+q9AFgz6yn@nXo{SF2@{zltU6_KyWA zr`PO2gig!q;~qj~HqG}fM?ZRo1mO$GQ-bZ6sw3HY>pHDF+C4jERfb}>+v=RIi!M5( zhCM2o#jj5r`dQ0}fu3|7F>m)xwY={2a8=E)NF~dH8{7wH?(A~Jug!f0$LRM>QI@Cm zhgq|f$1_HLK1Vg#x8*`d--#mOxBG4n&^Tb=n-Ia4&fs}u!*aaLAG$e;_4@szs&IBQhjp3OI`JkJ7sWwA?2BkbZzJ@op)o3+v&gfH<+51b6Y*%Z8%2;>uzG){V$ zltt$Ej~;{_kYU$cqRuE^ef!+_Rr@pW1{Sg>b18tzzwI|0K7nPh*Z+E}UmAi<&(LW_ z7o1n;y+#TDt>3hB3cnn7i+F9fPIWK!YnrG8du_>aIZIl;qIl< z@;`r}%&z?~QpezfTwkOI8#+JiW=rC{>F$~O54FG3f{*MnzTbrm0aJxf3#t#B2<)ds zAVltOHED#RbUj0R-oO|$ZsT{75BZ3;zT|cA=1&V^ct7Z3>AbB$Qlo1Du^P$A0j_Vm znyN}p$#NPNuH2T<3jYCKd1$)~OcVNSwWHFEkitTvyW^BwGN;)@HIb5w9!}qtbYYAU zpjoTdRoxD8ymw#2cM9vQM7pTcC&bSeYWQS6hD5U$*5&z}&Grt=+BPhT-R47Xi`72P zkNBR|Y}^&381B$+3H~w;;;GQTpHLGWdZ0b`Lgya1a+<&KokQ;vw?;*FvOxSmgn5ST ztIfq)#K$V{9{v{9t*Vw|CDO31Y(HOn_%ox+{)ERhhA@3tUq$^~Y54%P*B{N}@BgSB zptFo}A}31HV@*5cxFkjcJ%j+=bU-B6?0O>mZrwcf;wiQ3FOwdRtxTN-hi=@S0i!#| znHXA2$xrgUA=|57O9*Pg(Ayt^Z7WPAAW9JGMZeU=ut{6kEZbdZs?;m&Au?b4NcMVN z+M(a@yEDP|{crGFyUx2^!}DgeT+`uC!ddvIgtU#hb%A)$8wY%`Zi&0t(JJ55XYf20 znU0`Wx+cMBQjchZU2`ryk0Oo!Y+dA6@>H#p5oy6{fobx*=R@z4tzT0W%q@LP8N?u; zlDOW}JawNPtvU7vj3ucrfujn(`%c*iAHIsz-aPDetPkS5Nx8yI3iwQ=kgL(qn>(Ry zQ@^RJ_T4uJtgj3`HdcR7+OzFuAFqWLpE>#w{N~09X~aauM}5=#-fh;5bVh@2ROb4i z-LE3#Ox4~~D?pfw_X5;#(Jrk*^_fA8+pLF=oXD`Yc{wp3TNM#;`*(RNL<bLTV6oYHkEq#kzcJ+hH(gV_cCF$SyUU?$4@CC=KRVY$6=!d5P@kd7*L7y(Fddk z%hDof0(T+cJ*m`Q$SJnEdtec5PDC*T)n83`Bk9G(RMR9|@|Bai$W-=jF}_54Y5Pv6 za9&SoYpsm*v#kzd6B1i%CD0wqO1_?aY8}sE*E!+=r+YxgL|3R}xH{|i8pefDCF#_! zj(%aXk(fFRpIqQchKrlW9~Qr~wolbNewtr0L}?F?6G}VQHq=uPWB*Ffi3RgvoVo0LWR};YjfCT7Zq(IKPim{drxn~A$=pn|c2_ay zBxLMyi8qf3RfnX@r)lO1%XDkcHg6YxAt#Bm+Og(3rV}#C!d);vd@!>#CmEn+?G6U$)vQ<7`j-^ zi!6D=H6pk|4(S5$IuA?BDvu#)H0};DG(vx5ELj;*2(Vv{NfIy+?2N{Ym?d_jOb&gRLeqkJSckkpFXD$2^SwMwFIiO2zV+0AMqZ=mo~ZNaCw(kijL<4* z#`T!0P@=f2Ne6DVBlP}&)1~r7)rY>JSWgV!sc+m3(BEgDFnvv{=^3;BMuDf)?ecqN zJBaAXza!*1ChDN7mXkCFsV~T%4y|>;W#&y~wM-aHcR0X|TsAeRALH=~5nL2o@q$4b zBy=%F`!XjjX|!0Zbl&+^X3#90Q0YV)#&ayKA&ny^N$yQvLWzbA8;uwpI5-~QbVGe) zf`uB!haGoF$A#$jCblhx2@X%i*Cu@$(CYkuJRRrYGC2-NGlIQh;%nqwz>i^E_QE_O{Tr-82s$E|(RNRcFi)y~gXyk8 zzQj*}6G5^5e4&#`IegmEPDw9zAPEos4rxxLE_jrl1xxxH!$zlN%dv}3(FyRzx{lW7 z)cvj|3hb#V#4sdbc-{;I6yd-?`sgnTN(ZBX<5iCsTQ<{eK4DcnEo`vt;Gz$1C#o(F zQ7|tRshrQd>>eSaQ@{-*M5};7y`>bD!;K|Cv0VwELQ8=Hmn3jFiKc$90c~W5M4t8S zn19<@U6o5Tt#ow@2O=x;R>)7o5JS zg>R|Dn;4}jzJH~>pss8^n|ojb&NJBvg>Sw`*f1EuTGV90l*w8CTqEV_UkhiH9HfJE z8Pf_1w~t$CaEG2rfA)SrAX1WD+j_p>6l=q(XT#g7^VzMGXtQ=fUEwrEnBB^_&_$Fx z2u)|YK{ia@LC$9AGm7^lR_B4Oy%%LlJ;CO;1wjzP0=pgKN=i9gW27~HI%QeBmH?t& zn+!svGCSGhiyca4(@3T!X)M0d{YO0@)0PRCBq)9Y!|x+p zII8%|h6Px5@uiHA4^Wjl1GsLJ^q4vgz+p7C*(|xY37hf$T$8BLjIg*#1~Gcs*57iR zj_?saC|}`n7?bucGb_G|Slz+{^B0tMjkygeyV#XMvz8x;JGHk#KUuVQ{U$Z#o8S~; znLAZt<2+-sbh7>LFMzb*8&3R`i$rJSR?xMoPrVOYI;RJrvjq^Nygb{744}>_?(^#_ zGd+P}Zw@T0zS(GaZAc~hzcK9ORiV0e*xPGlP57WmMBoCQp;OksQ#6oxK=upqk6wv0 zZv41F%!d_P)}Bf^6v;p0I%X^6EOC##f*4R-zl9*mDx0mh*TP(JX*JQFq_gh?w=be% zQ7wC05?_|Eve%AUVmA5CdlXB@_g#Zag;-HuDLgxXdR281NppHlUL3@*?G2m-=O zVb9g28Mbs+(eez9qm9|5Un{&ml#z{OrHfNlQa!aGk5xO>p&qAmdNd6dqGKT9W8pXb z_$4fvw2O_T!nU;MM|p}OhKk9olJdako-(6bYa^bJ4mCRLKWk~VqjlBDA0}yB3I_T8 zERl9SMBvgo9v)~;asmCIkN}mWUS3Etr|Tw>hLx=$K;wCm$|Xl$D?KY~VR{KT?IF>W zHLq!8N6@Ac8l|TGn6YL^aXOZ-1Bl%6epJ?)oB%^1uYA?07hfu(LxK>jjTy*1W4$PI zEJLIyI))h$d#p!-F;s-nVEyK${IPMS0tQ!F*^WUoUpoE10#l(Y$VP>v_>RTCS$)=O zv;c_M$~e{?pN!zd3lSulzuljI72u3(kfPQ^D3iAug$359Uz-V;f$DdbS|_3`vF z`sE$cIpj@k7{1-4B>p=q0wL%LE-FHNTj7k`?r8UU>t?0Of3vQU;o%1GsX zr~Z!t@ZYNznxd}-9PHE1O8?xe)J*Zv=9(4e`*O%JpEnU^W8<Rl#Y~_5L?;za493%UTtuR|E1HmPOdT?1*oOvyGFYq^XzfwbbiokOu5q zHla8RGgP}9Pog}1ro(y_q8T34YMU&kg#zu8#hKe;@iYd3qoYrJbtB34M;45Ig|<2u zX($Gp)wAXqm#vKIh;Zj9+i);+q({fl!)&0onw@vicA%`p*H*1d9mG67%#&DQH#Ss8EUJjiOlVo0qJ;^c-EdHHm1y4JkY*-$9c~t!= zOhsnGd)y}Ds(mB*b>+LVX?13<>z8+*`Hw!txUv>2GfTK@S(h61%lK;Z4xhA~)lKK| z#eX)P#ui{(>~thc^-h4KLkH!t15LZWR+TA$GfOX7_4}FLYja#~Xb?e0uu)wO8Rd5S zY69Bnd=1202mxcv>2k|!|b@?S? zu-li*q4=mTVCr@ekt%*J68|DdP;jdD>4aseEcD_~aa+F0I8MuMvTEB3`zAfE&!ckn z(ngx|py}Za8WbFM=ixXawM=?}9bo9&@ROIh-Te-B_iHa_CA7*noLMA86( z!tNv?eekbm|Gg|f8f8o`=s6MaYQ(`-xr`r3m(&BegM}n65EZJr{4aB!k)Dd z-=}t#9($}`@rvNoMqUEw(^XFdp8&)!&nE*JSvU1w&&P1f2f%ezegrB1t-s$!u%ObeH-)#?sqR>kVX8PA8)0VI5?WT*M*;s7+KOaQA+(s5dhY~fnnEnx%uG; z3o$5VJnfAbN|{RcJsjd45tD=VKZyI}nD09jtG28YtBS{QbTeg|0+hn2r9H7*_z>p? zCdqkS=k6E0^UId=L(I7wTboi}r|=<>zFYvf3E&RN*a1e^a`S3<90qaSf`1D!Z};I} ztM~scjx(|z)qa0(`j2*^0T;{B5pB?to zr1`bx%zr%tgmD#xTp!y|HNF3%5xJs33(ga&-MV|lPWWID?N=B~*3gP_er z|DB~C-PM~8YAy?sBpxmQ57{OCZ?fA2;%q$`mw1JLv-vh*e*>A{E(IOm1T*opPE$0lCsCf-_&TGtd%kyc2;Zur{)U9dtGAg{ z!vRLi%ov`tRXx|wUiR8~g*&yhn;vN^;SC35k5YWY{o9%FfX?*yd%xZp%3J?OJ%T~b zl0ZASqF97vKcsaGfiGJkS>^w&AFjtBS?B2zNf)8xN3@PY`&yGc|9-*WPe4V&eg-t+ zYKH$uGa5I7br z3)bSE>A~!xnpKxiceC$rERNQhp-o!Va#aIEL!AHL?EZE~r=1mvaQuW|e7Arax; z{9lzoddJ7ODr%fBwS1e`_`pG=fZioI>(GV(Im$mWr|B|7bU4^%Y_QqoxRR<|qHZ z-AV&a9YgjBavv{I0S?&Dn8WXVlE&`fi>sM-!<*xj_V;CnNOVT^;X6SO5?9rqoS=x_ zjQH$3_3+OaBr>I31Jql;`u^^k6o3O3Y139)r0o}S)c{%`5nOUNWv=JmlDM87Apsa? zPwGj2tVDATo{Q`v1ds`Sz`OCG3$C-LGe$-lcrK3~{;MbS{hgdc;ao;fq}RFNt;Jq6 zhh%8yi*82MNKE#uqV~mynKy198~=PeX!|Q)yam`b=R~O}QU{0>Kl_z0v|C;?r*;bn zAI~ICa`7L}zpqiP9+9{v29B#80pPYSx@aFo>To#-pTiYUMpp0d4(@k!e|)BO@C6X@ znZOOepE%Cu?b}zQc!v*np@#wZ#MxJqML!H=x`ss$4A>f%12^n*gGpE~SfUYt(qJ!j zB9zw&fPN^#v9jg3f9{5k``>CiM-;85xduZXXpl!DXv~?;2|6EYh6J7gEOF*Bpt<*jNZ#C+V&v=b7g4KwuM$Pd;EUI~Z;8UU0~aEVZdTs{ z9P&Zeg6D39jyG_WC0XB^rF|19J0yR@d*hjr9^1D+Upcj}!qeMT2u zq~4I+Q>6L;WW3&aIqblM<v~~}#HPIZ`G=dM z-fwX?tp`3;9FYqDXZ&K*eHr8#P&w>xeE*98MC4^;%fFxnQ^VP?nMv+JTLhx2OrNz*h4jIgWP$`k&pXak;JTH&+$j{;z@an;jTb zbq(%#T6Q(H9Ew^61!I;XRVRjscut(x&u?Le`SS*!(A!-XocV~3WE8pwPqeE%wF9OP z0o^E8kTif07mg(UEdX~X!PrQ4+ntBsxpI4*{dv1xkiyXhAa)V}Tz$#-qT~RDYbD7<0O_4N^;{1q8LC(b37 zPK>x@HE#l&S!LUvaN;S?!&SVYa}>J5KW*)+E{mUw`F+eV2}RCdNZ|E1I7co|d442> zP7N*x@|JX4t3+NhF(`%6fNc8isEf4C;pgiFBjjkiUNWyl%BI?X>gVlxkn-P%q?GHS zs)2F9gE^i|V5VHaIGKKJe|oZ!Vzi{HeU#1cbGY)`zCL(#6Og{_zTpKRfQ8dFH%1J} ztMA8}9PlchZsuPwtY(B`66M+*^^kIA9Yt9*VkUl-gJ1j*d}zh9(sVMe+w$7OKI%<7^4Z4{LxMz;a1G zgWSCrw3|)#fCeW11yAfkV|=<+g)Cg*nKX?YQ3PZ?;#uYIF8;QUzS1 z&OSx9bqh>OhR3*`x1)qaaV0tjl&LXR6b!0V}y@n(V8o1iHf=-xfg5I7VF2+ds|h^uE7_8*6KQ;jUp!XB$ZOz8o+*^CWU-wzLM56`b2zgQE=o8St;j8e0zPk(cvn zHb+S^w-xRd(cgwp+BrB$^-I&JVGU1c-W4YWbbRf4_>34SOGJ_aT#2`N)^eI_MivpW z{2+IG?a9U5OxtEJ5LO;U0^Li!0CSjVPn|-=;cYZCOd3Oi&%^Te?yRBxk~g9do81i0 zB&Po=j;&W}nQBmF_-e@1D47;aS=HUC;YChFgA>(hBN$+G?*AWV}v+jOscZ=z#<#^!iS< z@C=<+mSw}ECHfBa(MdY4orC$w zJj$kj#%|~l{$cuMe8GvXUQgkvqlQ zv|kR{doI1#&T=1uRndGerwiANWuvN-@$3LOFI_ZaWlD0M6I{!8GFqbXR_M z(UR_z*H6;EEX_W7_8zcSSkAtaAK?3Ncb5ftBsQGh5%Du-XR(NzzE^dBp9bE%so(r; zKSVXvARxT@I^!n`Lc#65UhJ#WMW$Fv`xb7qXXjQ`*o|35*db=kuJ2j!py$By)@}oR zw^=?s@GUFh(!*0^qt-b30JH2=A!6 zs;_Io@AtrKMtT=%<5uC-+5Fe^v21$dgnZt29pC+ z-i3tXlfg)W!_qJqHJ30%q`rlny>teQ6&?)AfvLuYSh=(BJkV>Q;I?<#fuuy8eAeb4 znPVVwH2gJ`aw?;fM`HPJjC@X~-r$VzDjt&ORYf3!vh$*M9>igFBxblM?b9BzYgi`y zqq3GudPOe|1n%O+Ml7dQL2O8yixoDj%R%co$%Xtl6ukGCXtDifgR?EhTx*fu#}O2D zbuA!#vWhg_fG|QxQCC?pfP8a^zKi8Mx{2a<=@mdyC=AzQhG`U??tbqjK~<9`-7EqH zf&6$plsz)e^qT!U8Qb2m(W01O_8Jr2v+bazC66{xnZAgKC6#8AracV;$)Z{14AD?% zX?Y(l_gdggXscX;MCjg%LkA7(azv#x!)O$JK~rEe0&-+kN;AtY5GYQnY0xyjg2&+n5AP#_cK8- zI)F93tnsi5C$r%S3xAH1F9j3;ioKzMy~O+lie`~0%}2}2xr~?0*o>`8PeXiEe6%y+ z-A{xoYAw@kr9M1V1sKaDo}6l({G0(4sYoEzbbZG@##XkQs5%bD15V!*d!OCc znW0;xLyY=kssgj$;SZ9jE`fP^f51Mqq#6qM{4sImg?UzVo@_d$x%Deu&wBowV%(FR z7QpOs9qSutrg5mF7JjnremBlDTV*5jQNB0G5IV)XsCMEr061P^MQ}xYHQP`6Je;BX z2f8W>eaM4iYnSN3t`BeKjz%>F4Rme&tK|hHHhKjBGhDR%Ilz3|WG#HICbA9l+<*ux zDKe>0nl7FmnfoQgX7UMPQGj8rVP=mCBe=3ZqJ&>=ml>Xxt{!&dyRpaif2Ov?C14nH z&Uq5>i!klQX65QkvH+bs&qZ=tWipq4=rWC!vm$nDvqb$OL0sCC&CX4ux!XY6bx9go zhly(q+b)saaxDX=A^zaY%jQ0JZGV{GN-*gejW!{LAdm|Nx=?XvZu-0dwBU$PYR{k9 zy^nPa#^{?IpD>kLWO)=~Wl1~O&{aw$#!PjL$wT&R)RZU{?#lNRoPV))1_`sCmeJS7 z>e3%p9@1)Mu|X0DyO?Zr=qB^HH!^oW3h=R8Or96*_?YHJD+)5Pm%^qV5|Qhs9hM6c zP*?|kslHKRtDc07|1LBgkM&Y%-rxV7N5GDcaZC_)#|J4(`ypIry)-@9VR*cf&)8*; zgQy3ve^t~@7$*tE18-)zM-L~4Gs9vnwa0gtgR3elk2l`Nl^|TKwrPkZl9m0*Uebrr zoR~0ya-{+ws`Q_W%O2GO+47t72QnyT>@Ch311{^)0vNoawW6tTZVhuE=nZqpJdkV} zrIRE;XP!dp0#2Be=I6fWpM1Jm-eZKnoK)XaY)GA@U7%N3w#V71HD%pN(b{+44NX4$ z8=YEr*-Hwhg=J042NE!94$$LUs>Zgm#4K+d`W6@);3Si6qDDB~3 zkpdI#rd6{S%mqwsK&;DchPZ9MiZ=0N%hlg^J_Lf*bgtCj*Ux>s$lG5TJmTsxr&A@0 zGVWTZJ3#C;!1wiCR51@1moqxw%`dva_tKqDQyS_1H1Ad}`Pnyb7y4!$myY0gd`|p$ z`1$iyftAa(us0C#%2)WxCYrxl_*9U)dN(b8>I8>h)i`OPw>Rm2_7ZOF$nF-jKCnj; z+W?5dl-AYF(CZPNx)MvY!;HYgrne}OG=+@ZTz)b*FBmTl&0xCqD7cuYqN86`9`_BN z{HYmnZCAyB#>=rf=Eg{Ip_9WDFy5Ir*QsC)1V~;;M(F0pN{V=Yk*y8#2141gE;=g` z#0b={w==3T(v}(x#PP^(V8+W`RC9kSrmKi44z*mf8vh?8ixy{>P{_dkuZ89$~* zb#JAhsOge((K6fMde5dUbdY93h}cz7q%C_s#^?z^pZEbjz*GgNYhPT{n!{VOW%__` z2OlZA9vZ*Z$y}%CGMD{`9$+Pha$WLnj+9S(-43Qr!0j!yv9<_>SDBQ|+%Fi0fpHWZ^?&$gvO8A1A zLN1i0Q?FJ?pSNEAMAR^nFJO>Rfu;NK(|IjoG6t`tj%Oz8VT`Vn;8a%LZy~F6!eBBh z4A0ZHv8p0Cx)mNODXy9IT{fDUKQBh_0}|bix)`L>?}m?r@z{%5N}}PbQYV`EFKnr& zmTy5cl5by)FbIw~XaK9Av%J3$Gr%1Y&AgbsQ8LEV*UQc~tH1LzOF4HnQlCQ1m1P}e z5kI1wN(0^r=ojWq@o<~bIbh9jJxwjWDlstEf6DykbE`fyullQ{sI5OI?yvl-F+(&T zF4JKYtgbYY?0&#A1y^!M{Er~-r+$aeyKz8 zJdht6=WNyD;g36Jqr0O+H7twLV30)pR!?|oBiI=`n$|bw;$tL`Ab&$*2*(t(%J`=ULB8IP*{yC8VVka;=*5nqy8c%@tA6^}Z#>5wSLB zIL+CxV9-`loZSWiB{b$vrq!L*_|`VfJ3T%{FR1=&2V#txyOzqx$bv55{5Ap#L!sQ% zxPGkJl_Tco!)>8TYp#eo{nb>w>7~Dl$Ie-b?pti51CC7X%C%i>iW;^O6ulHqrZOaV z5%0i8d-r6_aEAkxkQ!4O1Cy3Zi5c}PN4o+g1nm)+OZ4uv%1^uvCrA_sOMXOmyn_p} z@i{n+9sp76(_Kj}tgH1C<^g=}Y6c=ShVPQ0zJj;awS>^Thea?J_Oa-rDKqj?0H1F3 z%-K}!6~^dxgPz?{Rr2K=01vCm=j%Ts zfA^M<|AKrdM@|EUc;Fj8-)wvL{C zfQ~~H4p>IJdGIG>J?G>BOu@t6`lI$+1(JKP6FnV3Y^v|fXe7I{zbGkku{O24Ej^#% zt-KuASQ|Gy=d6AGF5RIwz*gzD9Af5{Uu+~U{kO9yRb{vPE*fQB+pXmyQMNguZ)=Nv zcbE~JKmH5_>jBzCgnwQV;Uvpbpy>-q0{U9)bf~h1gW?1EjcIGWFMi1Y z3sL5EPSM?&<*(7Uf_8v&c3z6!J^_p&Z3aY}b>L3lry1JO$J-yWeD=jGrQG@o-lw#D(W#9}HDRtP#SO3D zR|jWr#n!!}RSA~EU@o z;AwC^xK(JNLhxWVB=WD3S%LG90wkHx)_mZ^hao8M0GqE%KG#IbhVdrz@a(4BCVa9O&J3qh)#1 zX1Z?^8om|#8w|PW#0r~yZ_Xxg;zIc32+os3C>LgL>U*j~%Ewa~fIU4^p-t zpq_F?l2570j;2uU@I%bT1LSY##{Oa0tnmDtW{&xgLP*FyHPw&Dr( zg-KGG;U};2#RzH@ihQ@P;y)LlGkepP!8F22J}%p1#F~=F3~+oK4C*0ZJd2aTJNgN? zWTC~FuEqPT$uXd^a^FYDbV1#3pAZJOBpo`LuJ(tlQtYU=$=)TCAjWE9@4995WI?^~ zq|@+D=|HAHW)Ei@`Wb(lhAT}&yp7{{($lh4ZFYLWb|wkT8Cjc+r7vnOL`HQB+oixu zvC-pr`B*1FqRp^6cWb$IdTe%6o}zDE;tnnC5Oe)>ZLo2SJC2xXV3eTv60=V3j}kx{ zMk2JxKvcEhNI;X;2VD)Lk9gxc%Xt2E83zONVSsO|eaSRCR@_mGJs^G*StxR)InUzq zSpdjH%A_qOIC-NuS#|PPqPj#=<3Zc?Twoo+c1D1jlI;2)+049lj;}xIHVTlz<_uOR z$=hv$sSE)s&)>Oi^^AqZukBmwurz}Xit}UMt_2och|^x?SEOe90uK1YyN9^UU6jhw zdw|!wvPAWlPm0u&4awcgZ+4wuUt+it>lOqlwCW{FCS z(tBwRY#dsFcv4VJU~?K0Kw4u$H>T6BEhbnV6tPKGv#A!KbOs15_;@{nHqB@_oAulm zA*2CECnMeS{ly+t1gTTRUmNr_pciD1PVx=rvXCZ&t0XbcD3p@48_$?d#VHluHswcT zFCwlp^2XXA2lArSBK@5YmZ;hmw(JXA>{&QcKTAJKMfytMTd@HbNWCuhC+nk5>{VT> z3l04^G-&ddKF=;yjM&Oxd5^;F?ewI>yj(()@$fGpeXL{pO~YvO?pYTkbh1cZ?&iuD zD(e(n*|_=;{>-^YAdC8Jk7-*fqb;RBlHL`_=Ip(KV(1o^MEmkw4y+9LK!0f2y$E*H z39{i^C2Vxsm0y1|{e4ryq@Jzf5La+v*}TuPI8C<^@TT-Tf#TV?y99}C1sI~AZH53F zB;q)|x_zYQvl1@C@Zc3OS^-84JlZo)PoWwft2E1DSm~lR1qbRGPNBNq}B(11qm4@W`i{H<05F?y=;4gBu=(I0BFH2In z+0dW!7Wop|z5MaTmcKDs&&KZdh(+%A8uf=*J-grnQ(eam<@%FH(u{YtpVpd;S;({K zK{d96xw{~vl|rXOQxSH*ndaS4MqDZVL`jVJIbaV!NWqSb-pImT)0_|+Vc@@f8$=kX zR_2>2C=Y=>AvTUou0T?sGXrw=f{#kSo$jR}n`loWmUEuc)=^EYIU^`{UIawNKl+~l z1@+6BKCZz;;^3#qU-I9~2f~PD-FvFQ>v@cg*Xcl>NynLLCE%h+_4c+zIuBgjH zz@l$3)F1mej+I}g(u}I>yGgYjg|y{->EQJsFZo)ofbMDzDj?WW`iHNNN&9z_rK78i zka1xkQ-ev`(QThZY{;gkcEXSH#LY@i)kW?xC9Uk?(hx$#AZxBvDc(B*5VxXSWA`8l zVw5pi_idC0jS2S<0?GYNO8bFMxzobh>1GlZ~-t-6A)UVC&HiaplOX3%^_IeYs9K0RWBhV|3h~E z$2Z_n6~xO*oFB{h_bEVMgTJvRDk=lza7$AnMrH7|)$5)R#XW#jtHCa5l_`*W)iQrw z`NrV$o~qElJ8OL*UMg91j%!ORJyV!rCsN2)YhxKS4eZ7OyxgCiHV0YK1J`LVXyXcl z-1)4${#46%9zy`Rl+=ejmZ$r}K9@u9AhDJ#w$9ohUE~w!^V$^4m*FAl*3UC|Byk>Q zCE|7@O#BF+tSVUwN*`WMw=i!}rUkQ*+Sd6Vo3#KE>W$&?&1Y!sFw4qFKZpMxp?3k(%CCZX-bD$B~-J+XNm-!OEN*khp7RRuSbT!evTK!iyfz@^jGo=-# z-yeOthAFy@qR<%g#y#mX7kquwjPy*5n7e)ZpRjh4^X5-=Wz{J7^PGH*8APGVDk9q_ zRbSxn{VYv=#p9-t*0j*tLC%mmkUq(qF~pbHN{CkBQ$gs(#MSoIj=<0eX0~CL6<_Il zalhE7#x18>1%v88cd=j9ySFfBG-Vg1d8GWZZYa!kl)!m8?ztR*W$p1EWkXEhuvnb) zQzLZQLc(k~d#PwAm}Pj%Nu*wqF$;h1YiE7}Wy7Y;=FOR=?RxG)+F{$qG8U-G`%vxV za&3-vBROM0{!X)SPnr?9}_~Wer z58z~lK_v5v-pkAVzeIn<`U7<6Wm^j;lR`Zkm+rveS096dY66#))-U14Be50((-Wr_ zQ9eX`1Lr>iwLfkje)bkn)SCZneQTXYtvuKhEA~y(uBpj8N$}|@j_POeI;jAEaK~Vb z-jH))8o9B#o!-+=vtN%0=W5(qci&L^2T77W{FK8VFrGGYv?)&Qd+Dbq&!06=m^pLR z`&ETKO@qmK*5r+tcgW%8i`u>uLN4eEj7Y4cuS((K_noEa>@|ZQ<3pV+tHp7jJP|{; zMHlM0{AMc2e!)N2o^ahInx|kgOpFT&$&08B!T@Iw3{hJoUa?2$ZL)1Y2|2XQxi?iL z`3cM6?tI5pP;sYQYSvPVOuX9J;sa>e_`+Hug@xx{g&jPe(;6(vjpS@=G!7@HBZwU3 zC^+D}H2y#jF}WDc(!o{+A9({=&pD0ER}5)W>qP%4Ka+brXo$lsO=DH@ z;!J8X)>BGWRBisJoBz)~YkH5-AK}v@IBB$gmY+P9Caz!4)Rv0+ySw87f-dKbXN@5T zh3v63rOF|5pwCgXd`6T26%O%q_mtPyPE5*+xwaP38?FK!*cy^3PAFL18u_l8-Y%Fh zaCPQk4#b*_)rP3Ab4gffZn4PF@UQ){E~{8*30x*mq!mFI(E>pkF^Q$tWKjIR0Nxg_ z!M8_K1{c5I11`ayaONysOkzb3P|b)jT1`hGzEMBC$_F<|_ow)qwp^prv{J{@Hf~Gp z<+KvkXUDi0p23pL*-N#LR0C;0adT0PFffdDnQ=t0u)t|=?8(K>1Ait;32&CFG)tx0 zTgMG^6X||tAtSZ|_Tpq^d?$p3+fyc%T@|w6u7FpFq!qf$o0SNO6^gRElO89&X#1#w zv5KWH527rOjhIJ6>orSx^XjXnTj1Mx2>cO@7{l48n}Fjec$%A(aG|&|`l0lL5%(Re zdllN#kwr8m7ewd{V9}Z-s|DYm6xvg5_6~5ix4^a9?i<}CfvuT(8R2bAmXE;|Rzr&> z&79lsu4rbym4U-=d7jXq@9)eGfm=kc?>cK%6POCdY{k|H z#;X!ZFg1c2Lc!i>zdu`9)a9EmvIoD{nb(sq+X?NKF^Gy?MP@xJRQN_=JxD zT}66gZ9YdtGo@Bbt*St)5O44g_gZf`VdhZp8E2R}N}W0iqtp zDPwL*9eI#=i_O9_^CE1V5q&o>Izck?a?npUYTDB@TQh-FNoqQgHRN8G9&MdB8wcbz zMud+O&dXz-M#iEa(2uaiQ#HVSy(}0}up|t@6yg$-7q4hBi1y;im9jL^#HidBKJ3SK zc}TQu`*-vHJ`SBu!IwDQ%rB_>H9i35(zS^OUng~UJh&IS`7$V5W}NHHn|b6L96<1L zONZVs3b~znxLXpI_9D+$;{PyZ~$xr?VI@RW+AfelcCU&kQSp_YtKb6y4*ZJt^s1!;8!4CBge>6GbOI&Meg^! z)xUb`6R8_ot8{W3RpN+kqHbEaBbZ$stdK7Cq((dAut@CqC%w;L?c@t(VVp#%6BEm` z&$R25FXg>)(hhl39%TQL@%kQnMe~`LR#)D*mxnKtj~|)9pAQ*}!EB$QpgiHP>r|L|740Gb#cSN8gk_6`_v>(OCG25E!aq zwmd>1fB%-we~nEnmH3d0>!}T$2+EGUyoljj|2`QD*Ks?-gz4B$`!~G-Ts_NasNNK? z01+OWUX+Z`bT!c1FC%{xK7vypr)N28YEI~F64rdM$o4rSbH(g=bTV|%$#D5l<}=nghONQMc4E7Cl|imi*&u}85;@v~Q5GIcNg)F6 z)aBmW%~`q*wA~*CsOW=+ONe#xWppJJG?!X2(o|);uYF8rTo2qEpO0~-X^eKD-lADxbQus_u7AUxvU(urT2%LJT`J_=^ zh0o+6=5*J`@c0u$?)dyzd^DaJE`mh7=kEBj>oUGP=#@*%J8DlF)h8qI%@%#$#yc_< z!JwB~0teK*6_Yqp*=`wE<7y5xc}#(}bC^@A01g$Qo@PIwOnkfuGp}oyxMq6fnAN^; z1MEC~lJDEB<*WjSj`RmpTQC|T#Sq?L!(yv*m=6fOCP`MXa!;hYK6_m7aoD~!ni zGRWc5B!Q{#4>7tQ^-)C{=9&^~thHGx{)dB>pGLm*yc1fZ3B(vvTPM;(qiH|fH}3o{ zlwJ)Up+3!Tf7_L{OU5g$GK{zhpTzs2rs8fI68!}S?fX)mhRKZ639ar0L~LqR+anxo zBWcOwRaGJ|{f$q(m>=0~at;!)@T#T*>i+oz7klCD>L-`f^5$pAiraX!3yoc@RjO(08V)CL8RQ|3O^{d*%#2+Tlj>pH0enfo# zuc@<+iz-^fwZH&_h?LSDLr4wXNH<7INjrd)64FCThxE|h(v6gWAl==h24-*eT6@i2-}k=HJFxfTr?7&0qrYav4dTtdTJOFy`)pOcf6ZaqM?K?8%Dq2I zeIKt53LG@l3Ed5!f}~ooez1G}x#M295;y%D$yNmSvTmi={n@rA4BsruqL|QupFgm+ zwS0+b4bEs9E!26HW=75Gn4Es%N{30jm#!39zA;t6e!BW<5{pE-^GTW!%VZPr(Oz?| zU0Sv^zKi7L5a@Zws%O?3vfZiN-43;9P@CLc&hB%QsAE^G$i-ribjoMoc`oU^hb6Oh zf8)TL$fk0WUVY^gVa*^fq1wHHnD2WzJ`qo%{#8i6!De~G*IuY4W%+hxr!c7W&Fx&O zuX@B~@@*_eE*DA_o6ZjG>4uHq#Vg!fa2=bb2|33S^+_#?YJbt~86ZSzQ$e#G*!i6G zO`5ZkRsnSX1X)`Uga0W@k%>l9AXV`&4Tr3SZ}t$=ek3P-JEqm?WRH*fJmw~aVUcF)F@D6WsmxpJAXFw09%ihq3`!(1At3wHFKsUs`iiP$$b$Uaq{8WgZuz$0 zls{}?N`>1o4GK@lEAeb>`x0jjz6S8?F~|h)9b(B8b!MYI^EgxxI6!o`fkRrIZLCad zwT#upuwi8SNso%(MsYLLd=Ru=vej+ez!c?tw3=!BD_))Z4jvKx*Da$O zXe-OaXWxz38j~MWyRljjrJ{}N@7={UOSto-mC@8ckJ_|J?Ws}!El5cWP)6RxzkXIK zwAJ@Su;5J0+>TECMiWBK=dWO5JHu@eq;N9f772~iUro5@@<xR0 zSpiAAPSOc;a+h$yiG`)K<~Y)*I?3pS4r$QkqtX*{e$ADNZ?yUIyI@>5oQaOt@$CGrRna5&<6JCNm}V+%`zTIEN2;%fO8yO>)!1E) zy;(7n`dPaW#vJminvijqqiZS5&+GM%CIc!mJf$~+{ z=(-J9=#B39bSH=d`&T*8=H#GY-?!WW2;xh~;cQC3xi04TY)mp&2H|{u{AE3>@{a0a z3sg6?(1)DY%P{o0ZJ_IDG!*UclM)k&Re>8Fa3g8x;geZO1U`Q(PWtj?+A%@`y!nQv5at4)&gy9!$Fm zg|+F@4xK@X?Sb3IHPYw8y$@$EBR5F@ZQOoqM>Fny)9QE!w)`Sg!6Ga=z`kuCu=qa1 z!M=yhm+m|-a)AZx^~nGue{?OaL&K&JT|@%*C)PnLF6aaQSwUUYb$(j{JaPwxLHxr0 ziN*+bc3gQJOp`g@8?j?mNPC}>z3^K;75(0NN+va`COL{Ix^=gBBt4PuH{y6=)EMJ! zkroqd7@>g&En|+H%U!PWqqMi#xmLSPZBtPt7CC-9I_yZj@_Qi$Hsmuy})o+i4W_%TMJAz3zBb)rzMt2>-Cn@cIPt`ZHuck0 zKKt+V0p($SUN3pCoEH-6#dqVn$ZnMu=IQ7%s+kgzN)`EudFdNg`~xo9hN%{w#UZvd zmA6nI;*_Huj{%u4F1NoDE+cNG;i+aShlH1}+SIqav>FO_#0ydtDv}NPSnydaE5PPZ^ z2>0+SzV~@-=SUXyN%+X^kE>uP%_{BgoXkdZ$B`~pc^pm`yPs|`?d=>L zA09Gx`}C5k8SQ)6b2vJVXVj5ytZzl_yoTj7GmbVyWtxG8X)5pG0O!vbzUcmrn~mi) z5k=ukg3=Fk*S+ShUvx;4#N*hnIOq-Yfo}I%YbtN75t>G$%U#!8&Ib?u@-p7Ji8w8{ zsQAk{3tUd(gW`%>EPT#a_jyK{JiW-@lSFTYR-nbDVGP3w?ns2pto7g(t`c9&jvyKi z`+J~0VKJG06HDBYu)rJpxr;LP(+&Ta*y~`}bou0I3MAi@u;DI5>km*UMvjv_dIa3O z8_4Ysyb^}m+o$~W(Xq>{(|U0@V5MNXJQ}K)cLv`j7kaS-aA1K+xKb9~Ltq{)S4$c% zyk{^OYwVy+hm3qol9KPiyfDW7_iXBmXeMi+s?8zPaQEst^N7UlP@{f33Le5K%mWOb zsG&Dp7RB=%98Ew0c_}4*9-6Bf$0-w}6V3Oks)s;1!C$_KI29!aN3cdqeB@6lJrBp% z1qD1R82wlkS42?Ku(5FrjSa3eY5h>pFd1}9M9t7H^ex^?$ zy~G5YkX5}@+@9CuyaB`L`$N2x<~(_uTg!%8op7zV3l0sVw>%*h`1a%upxxYhf`q__ zZw}2Uum_LXYT~c3Mgx1Hl6}s8yTdtc zr{L5VLR&69DekV_pDW|}z0+?`1-a7Ip627tt6Kc|pImfGvzcy`KE;vg%oqN+U<+YU z!BOYP-V}>_UTJ!JbpzabdeySUkeIvRs9jjx`Q12?&2cH9vbO(XqLrrF$?Bo$J8$ezX)Nx7F7rXM=z4X>)OLXZN8b|l!Yfrv zN?e;IrfUV~C6}xTQYqe^uP~G5=e|@8sj9<29=pqRfhGb2Nct?JJ5PO}GyU*<>o+R( zVP_RV66oY#__pKzyCI+FO-@rs*=Q*V#Sw=<4nFQndy1H0=I=Y&Qu9r5ujN;$rrT5_v9(zMf#TpO)Jzv;PdFM9>8TMbCTD+$$BPwXAG&9@MWnpsY)KK1rA=LRmojK)= zhiqrN!uX4g#V;{TwUX8WLb&*>B)jK!g;v6AzShIPIJ)(6z-k>!7jW)~vf ziXGJIms}*f1zkpr{k2YWH5sddMto5ZRfv#DvkBo$lWy_xzURGO!_7THw#d0cWa@cm zhB!_*vcongLj++-aioF@JTCB~IqFe*Z@RiuRdJNxij6^i4KdQGoA-)_0hCjT>{Rvg zeO(Arl~7W3g~Ledxz+voS|5JAbeW+Go{hKiL&Be1)IZ2i`Y7#{_|zrQv597D8h76mu2MR#L^$RTDne{dSSKJ1wkfilKL&a@Z{9g zd64R565pw&{u0hoAVj?vK4H?F0PY>@n|!lGoI04G9nG$*)_rjvZ>;Hn>vR@YmvM3_ zgdj_JH-z(T=@+Tfln)-`ug(8O-t*sXqEVHU^Izvn{ACe;a#a|cHG-e=O=;)8JX1Ml zoj%Kq4i;Vgy)rJd1kYQ^12}z zjGhq2BKW2ba>M8KcU{J-v-C-bH+Vc@vcg3Or=qOPKu-@&?WlOD~|g2>yQ*rS zmSiVcw6jfQfRSoGZ=mIXd*~r_Fful46aQtg`|}bA0kyRIwOK3}3eU+@va*%ZF=Q~m zA~B2zsj=Za`8q*@J&o_3E`1uxMh|a@oaWn7|on)SSoe=Z;5roqdK7h?wn@Qtr8 zt96Wzq!g}?4Bp?77{ZR-rB198?L;uCk&-fu+>-{Q*wkvFr_(h$oD~*XqCX=lUyPHF zV*ooE+i8fE_r5wZ4ih_=AnGZ(<=x;I!SBh!FEz*ZnrYhY4KkzOY9*O`>td(*^QNKB zLbD)3p_k_u%8`=7)}cx;mvieH)V-pk-s zcs*3@%tj)1_1{W(+ik`xhzp(P-LYof(d#7bg0of#Cj>`D*d(4DpME=ubftqGyzf*0 zo`MyaAnysZ+IN~=Rw=Fri$7e^&mtM&>mj;T;pOhT14!{>G)yzv_LkfH*8k3pxA&y!OP@DS#xsxCLRN)i4-h-$a<`0}0|gTGSY#dBGJR@c%)ZvgDaStp zo|vz@<%;CW%I}#JwA?^>>k8r?WS==j41EaieqCV@h?3 zv%T1VmEvgc>8tPmr0YY~4JE7q-5M}MvX35%N*#qu40BQj?um{gt9uD``YO(R=_+U? z%*7NQ)+lx&O^)OXMq~{8wh6GgZ=4d*ISqOee#I#C)NE}S4R9J?kyvp(z@91O8}vT! zDad9U`9wLR=54jvt(c0HDL!NM6JxhuiSUYxFZoQwA>RRf>hrIUdJW3GKft#WR-d>s_|0b zrIOW`Ux(C}MC}|ke?sg-!GGk6C$I{;OhauuLw|%|II52=P@~Yreo5>7%?=2UoAY*!zwT&~Dt)*7nO=(j)UTYA`pu+9X69LMl_8ek z*Ro`UA%#91wOgrYw?X~9sAz;TO-CT@;Xz(# zY&QePD%%oGYLEg3eZnw^MvCEJ%?qWPZa_rDVfDLnDwOtG@Wpb_ zua#s9Aq>>X5WgV<4L|MW*Wj&?Th--!imn$=V_Yakn^LOFmfn<>n`$De%L3G#sNywm ztU)R_5=#~krAU1ilMSn&x)K^tkBee$Iyaat0YiSMxiGqDp;tKbc*arpoi=>|?dy}+ zFJpC3L*>3ujF@?iyJy#2;nd<6NVMLhr|S_i+H~H>`HZpDFL5?0@ z&6)c8u`s4$%)s+HyDP?VdIk;O*b~{Z(mNOXdQ~HOtSsD`=oCg%nlE@P=1+236MJ>f zO$+nXUdQA0t5$rc`Y{>MB*<^tOXdfr1NT|`(C|32&vtGA(81y@GCHYs3rr~UdO-mW z_~(_i)H6K<(z*s(S(avkxXZyASVxN}?J$muI@FXXHiRGXxr%2>4pZ-W`io?|p~UKF zah|SoIH&g{lE5gNYj@i?{G55>r7uy>RAeL!&n=O9bW>~uNlNVYF}xUUfz3@-wAf4R z7I|*2r5kpnU)qZv;=2x&XD!6n9$d6PDn^n6saLt%1-xU9h?Du#xvw(uG{{#;u=lk; zu%uk;YO7w+T-OHhJnZO{79LY(*vEUV*(o1g^|0I$@Ln6^k)}M5n4>}&Z*1w#CF`_g zsKC1>HHWTBDisBXp!Ok^(9gLPcP>67r>Kq`Stpb;?5pbSmCXkSm?jd3j%r@=)$s1C zR^?N*ESV0}orhe&PxNgq6d8MwN7grU98bOHf1(`bVi~-2m{tK8xs4hre{OSP5?3i* z3xA+XQT&-eASD7elX_Y(Xy-1~O-+Ucq6E`oo^uOO6zM5wc2Ha1K?!M9$@V?n5iI=F zAmUJk**=61xK;<|S{8A;0uXmrF2p(J!+AE$X zR5DU{U;qLM+@P+~sKXf$4AN&%@b5I)tX~2kXZFxTOkGN_o64UY{e+|pcpBFM*b{t) z;D_o@$(()nmIu?Wh7EPBs!s~=%%>12OG>IhKFl-m7Jz>c1buQV=9n9~G(1o}jksT- z%`UM0D1!AAB!_~MQqTQEZk32v3XFijp0QL#{}|i7h^H^J*BQ%hO-6`4h}>z`=5 zecW&K*U+a9pqsz>OjSw}VgOg>Y^&o$n({Beo3Szq0;>avKS>F`9W18WKKb;n5)Ic+ znPVMqr0r0qt-@sa^euZw)|KpisaxLV=)FoyN1ONRBaLc9ZL77TLHFalM5)R242F#T zebL1HM6wkB&fAM*n9{+$<+)*kaUa|ji(=x>B5g#~!MO$v|47Q~-JI1)KY|fEUr#j91CEoYJ)M`(K#;WtsTESEya z0pksUp;0k+uFTwyux(u41(wTZU|V)+Ea5A3SzZCJPgS50z^dz_YrcZLF1d-bK*h?w z?}9WdSsUs(pqhyB!q|8^p*?XFL|Buj(zb7>Db{4d=SzkHn|f!qoG0ln(n4Q@5-@X6 zdPechS+EC^dM60uC8iF9Lof&=l3N%#tV05FOj&#OITzZd{^2{z)6F6oA_wfuBm6U_X(|oJx&+XA&N6qh`Btjzn{k7I z(OQmGGqre(w}lJcOGyBbhU*Xb*~xA2Aja1InSYb@*r-Lt@F--il@ny5B7a=ch$VNp z@m{s7u@j`xfvzKf9=7aiIi^9p0DPVYP36SjBmX^v$+bIWl-H@W!8 zTSeb!EcR8Fx>9eGIeC%!v*nSJw?{5=H1AsE{AGnr@Bv$Eia+q#Q9ae|L~kvFXPunf zM>VaOY0>N{VBrYMW@k@=cO0{j0NPvrS?d9!!(C^hZ0YN*{jnNh*?vm}UKf^`YWHzMY@In%?8-HvQYbR|0B zvK#v;kcm64cioD5?gRWe(|n=c)Mb|O+3*p_E5oTU3qqJEP8n5fQfN)5ozia8!R&3? zsgdh9_bj#CFvaBXU1GQ>r_hUY@(9h!+vKIYHS(ElaoKUmfC2w{B82ZR06!v1UE2Qg z3KPA<01&DT`Op)o;ywixK1GoYVJOvmw0fzqqXs^-qj31$Bh?l2YpdYlIxKQ+?w71y} zXTsie$8RAQPsmhI-E(kWu49L7#mzn0N*#x#&}Gi4JwsuOai1w#?1P)vN0p)amq_~g zY4-d9lTm^&)IupJ|Kt@+Mm>SK%AA%P*4nr15g^e~c_{tO+*e7C=FA5jPSkm|hvdS@ z6sY|@UU6WDb20zGal;L@NJ{WH`hhuE93;cWv|uYBwPqS+rXab8NAiebzZEtGjr^#W ztRRx%ktt>nZr(MkopGRw_{buK_=!Zhe7?}-qzV?KLoLF`Q`qZd6>Er>_PY&Rql%A@ z?}k=T8H2#gFE`58+k6SjJ;1SqvK$rUte_W^1X;P+fN30I018I>bYrGxmidP zf`UU!R>c>@WZ$Dp&Ulwsa6XGc=PElPB?O95V6G_O0lWt1W-VIyM0_Wf+b zF8uHOc2u69r^ayaAoAb`f3|;9T-9&LVaiuM4ba*&CmA&n38p}rV>&(}v3cWjh9>e55Y?gk?}Pp1@u=a-?Dg8Q9GqtDslfXhyuAiU z6ly6US>LX;?mws{DmGu{&3eB_ziEwEOZt;ptP~h+GNtbJ;V(b1zJ*YHu zNKE)v2?~{M($3eVOQ@~7c;~h}okYDyU8{!d!sgr`H{)zt zLlr2x2f#<08s;(LH-Au_=M#RFg+s+t?qt3hVpWqgxxXiPl_|MEP~X;~d+IV3Q~b99 zG|&`2VX62nN`PZ7zypKUkMtGnw0mxKG<$KZv2duAV|iRV9)5I8`~LwWcc|ghWHyB) z2d_QVC+~Opgrjd?G4wk8H{2vpnPW9u!q*k7vh^QlxTEM*2|Eym5&LkgMz}= zHfoR*#=flvP!&$@%t9!)ltb4zi;y)5)w@jo(R+!NBY-kYeGs2|yN6!9!8O4+{_pH@ zT%j;;hWe!QNuWeXjMvxhzI&RlZ_jOM$=CRZBC4hLU~4DXNd z+t-81pC*w>cLE{?r+%i8O2hj{^>^0{AVR5e`T;i2tl8g7fANCd+ zmM7CMK7a49xdo2Zk?ak)+Mm}Q772zxYADkLQ_a9!S`;X_usPMJt!Heg-FNNy?-Ork z@e50f(w&r!f&74)zh?;E-1ZgUfA9B=)FzeyP(3&^^N!Wnz?#u188LIqNPMAeAe_?nl7+@r)Tl zA(#6?+vyvGG~nHa^v+H`P_2W)!nuFh8dzK5{vI%COQ4oVL1rHLdz7rP{PcLHg(TsA zm^{Bf%h6*{_?!8z;x)X0y4JkO0)R#0Pu_~FYKQ<$lo;;Ry~g(Q@&M`{Zkop+5f=~A z`$Q1xb=4H66vNs~Z ztT9wz^8b|**DC=he*fQ{SviR&H z=;$3`7#8c`=Wi{$r8|ts2e*#f<>=HGHu{z)rP%dDzeHA!pI3@ztHQtf@bCW8E#7HC z&EpZ!eM=Z)79FHW`{v-#SCy?b?tohPaCDcUCUvU7D*aQi#P(I# zZ>!P#-+<}e*bZQMQHhpwI72MmVOb{$MK47DHd&BkLmu?9k1~E(d1wD(U5OpC%>G$P z4y>;yLqWNQ)y9oLVN4}2IIB&M5pO1}ZYZ)|bVlv3^V5GW>sI)Lh8L0m0HNH`zP#yo zczb>Qsy4FM{@p#$zTDk>09MM|-d5_>+m*|KQ<&4+tz{hIy&eOIV7*;c+%)yqPX-d? z`R>h|@dU_1S8>3Ia91Zg?g}^j@FQk#SNMW41Kld^IFu_%I}N(#WS{=slR-(lYM2H8sprC9!TbxLL1Ck(rz3<09Ay>1>e@j>J$8yJwa!S?QiseSorm(0_1R|j5I8vT96zWm6dwqj{L7tHuMp#x zE2d%3)HCfqAEI|5W4+n+q`2#=KiQ!8tb3@u`R8L!SQ7|q!?(ts?-nYAR~&3uiQ9Qs zTlUn%&b8GcsjxR1&lYcqF4^!9%&3U!nuH%TDb?n|_GokF8Ll(Oe5%tDwdpSxh|fRpDDR;RRM$qa52LJ{w#g?B{< z3%OR5c7TMD&%=cip37zL`A{6W8VA}s?wT<#)^-}KLEO`&+%TG@no z@mqmm9?~4gpT0BzJnbdr!t@vvj2>ava4sR_7c~u(B~Erw*7Kyo;wJym&vPq{{+PV1 zf>mHH{Vq$LNwVzw(kthzTO$D+#_(MLaPbb~Lzl}L03iQNXjPl~k*Xb6MJ=4fln@l| zIO+&LC8wr%Oe}DTf0JTQUsYAXEK@}Zc9u*^_9r1Ji(yi-THq{nl}6~`l@zv2W5pt> zq^^!rVI>H_vl=nm_ScoNBchIwmb!M3Q`0#$q zsJJ68#?h4_x|+4{W)#zvVdp`;$S{2T_CJwkT<1wb^+BG;Q}!t!e|I>siT5!-k;*Xb z(9aa>km5=k$@@NN*Jga?8N+9Q41F@{27fTAhgc;xX3gZxKD*T}^n0K_cHf)H*IfQlH$uPUDB28DR=b{B75_ z_M6?mhQXuC8J`KVXm*>IoCZC-jujQK?Ee5B$^$ZxxI1{;+Frzl@_H?M9NC9@gh$N? zUJd?2Wbo@!{@M{3HjVbT;}>D{**|J;+QYV|ZeQ&*;?vNAr10-KQbpk2#56Z9L0sM# zvS*2ptUv6TmITvc=bfbbCj~i@;1}y|9ld0>SG3~YECsI09b&4kWqUECQ_@!f8g8K| zLJPAHrCCM$-(hFR^kY-2my6VK#WRs!GNRNJr+E%Hk65CSt|VU7=szvSj>n`)aekk0 z1DFrVH}Inrnd3iE!H4KB&N+@A%&|JJp00X=7Ju4N)`l~W^%b;sc%LYPMhffP>rYO|K-$Q4pU_xl<(-kdf zQc_dmghoPGB4Bq+h}r-dD4{({}K6;XK2alW9c!yFE}dah7Mzt?gkFQ)5<*z zPR0@lk_*@v`}g6Rq>kL$lplzT;&&FSPz)4-{?SP}%Shd~98>drw>&}Pn6X=EUD>5R zxI&X%s`A&LSLH^o&`0onSn|d8O0ALjiT3@RWA1-s>OP+O5-0JHcH(=V$~}2MW-zZx!1o_dop7sNS$Sxx!GaY}tIw+s6^W_FjFVy-@ zc8hc00)OTGX%^cX4z5qbdSL~KH7n6Bcl0Pm+q?-ii1UOntAu^oxt6D^2xxcwM&*p9X2sdt;OVGo1W9VUzK(3oA1y=@}2CE)|bj`$PgTXxt0l zZ*;3*qHAz0sL+W^Z4Dj3p#)6|d0Gq~s3}YK&q?vPLciLJ*G><$NOC*JaYax^flZzS z@d;0>1zj8i1RM@sKucpKji+XHpkR3phiTi_pLHG{2>M6F@pTtkbtoKza@fvi0A z*nl0*YL_3DF%MK(9N#3bV85*rKX9d7DPs}B!Ru=6$Iljym$$z=16JJRc2gpEhPFC; z-!R4`=9siy2uh7*9i^%;9k0O?USjahHZm*er1xQpW=xfk)BboWLA=^1EO)s0(lw|Nv}N@*m{4{4s}LWypq2c|0d8b#5Z=PJFyRC+V@#$h5BL)> z9|^0BmdNMha>I8aJMaIceALe@TU1=d&5d`;xGoZWHI9}TgB62B!VEMD(>yc=rHUHu zU}dr{ntK$6u!;pDtV5H3H_0YtttubSLM>?_-Pr-*Vd1Z86La1ImWG+2BS?2g;69y!mpa=20I&URHG3XpkBG$iR8r7iR;AeCM^< z=(OmYFzQ2}#HpB?FE{!U18QN`U#I4S7P(2OCOs2;Nb&VmMf9lBDzh(bc0-z1*fb2b zv~*9#-b#66PZ4V-*~lB5T`F0mZK-BLKHk*3x^Bocwp{;=xi9%wE9qvKVspJeD{IvT z&<-ZP)f_I;EUz|Th<@e*a5vo!GQ7HJTy~b^b$`BvZw;Pi#n{}>+ov;Hr1`pb;!f!s{`tDsfn(xuU@6?uYC_D3WdL2#jWE;tQEMbb9^ed@uavVls6sYI~K+4fxix{ z=v3F!!>5oZl3zAZ57M164SM93#BBIMxLo6DI|*mh^7{$G_-JGsc%u)wbvua>*}&qu*1b8Q(bF`~^0RjadHP5kP>G!6ge*j4Y9U&tP}@ zbAJLY$smP_vga_7Y5P&JOSaF8_5zn_m1TzS@{YzWu`nW z&?YO4V(IHCjKMZBEq>^lx zQNKsOSG4+xHv~S-T(~~{(FaZUPT>ni2uXvxse^wN1GTKc*|(@wJUS0}YmanocWoWI z?r;F2kA83i0Q`m~Xl*Iw)wsy}TdeS?$#}7cP89tTYFl|kFG4!QOrtueDTO44-IXWP zPWs?LwReaO#On@|veygq<6VZd9z|`=%2*RzbLh-6%0%RbCL5(aFHAd8nsnYxI;kDK z%ro>Vo$Wc^#QO>nuRkjpjJ3)!>8bs7Nftq$^p(~D?Gup``mYade8wb>X)n@)p_pWS z%Bp$&S0QsKk!B=rYu%wqFW6gqsivs0pA1MZM!S+&yaqW#E@a7=SfZ-wCEagSX=N^M zpUYg+M?LX>ZkgNtjic6xh1QOi#4Ht5%@7QdJ%);46ciGJv%sQ333A}lkr#UgurtOesU z5=rfnKW1R|tH0?t!pz5Ey*N9exCbA@5t3=mXZ zSGANxGC_TD5&VSXS}7vumm@b}YUde;);u-8ZVO&4Xffv$ls$t~-^u>2k=fJ_YA}%7 zjNc*{q)u)so=xxrU4XdaMImRPWH=*IcpK`2P9NJ^b+f)iN^2>#tcmD@N*N=@34Y+3 z8Q^-Z4f0k``g(iYO=N4El?F{H9Fhj0hMm#!t4Cv0&%t8Gd2M6Vffl^9-AUW|{ zn<-$j{MuA<@`}d+X2t{U2VFjhf6y zy6On@3>XDo6KAX-b(U}O> z#MTkd_x}EtRiWt8OP%!lREq=zFcbBsng~i$Xpg_U7H46A#1qsl( zluqaPY9+!=8bXr1Nt8^+;Hk>+&71d+6R=4fwTM5nvJxXn0reD|0Q+|8B)mGWC!GV5 zGxzAZaQKkgi^JTf2SNRGv92408pcOBKzUl8im>Zk-?{Xh&fC|K!$6HCrQj0c0WQ4%$a zGOWYJMYr%rrw_U^eQq709#en`ZG{+kMy@A=Ii@y?xkN^??fZA~C~8UZNY*5HczVRz z9->qDR&05dVz7=%NXY}AZjJ{v{N?n)fxQ;)i$#k79TNK3kEa0c~ zxVe+ed3CaP@=N#e0GoTK#4QJ!aP|nN@+4(@;ggH&S6GL$NVi`>V)aHJCS$h^G9^%d zWn-B34ul$PX%|Mv8gSI_{BmQzD;`Wbr}v@rKz)Iw@cMt3+%B@yYFIBYEZeo00vu(} z-j`=>7ZF&W_}>Bf&o6csf`@RJr(+7+S=`AhzbS3@Qa>rpOYRTSwNV|kP zlUr-?&_Jcud{yXUy17f?<|1hn8bA6-ric8 zB1#N)aD`;8+xjTOj1JrCVgGM({NIx%V#+3H7S!Yc7O%%C^WQVP!NSYXnTPmirOdwn zc0)}~Z4n}To2(|kC3tJl{(Q@^J{>uWOk%(OBIBn2cvYKk|1G{BO1gWVlHLUjGd9X9 zC+#@L{Xc8{1l#{y#>P<-WyGIv0s;c?RRe~M`i1RMe##V+!P86Pq-wFXr>ym&Pk=8) MSv8poDbs-e1ID8nrT_o{ literal 0 HcmV?d00001 diff --git a/dbm-services/redis/db-tools/dbactuator/main.go b/dbm-services/redis/db-tools/dbactuator/main.go new file mode 100644 index 0000000000..c5784e0c8c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/main.go @@ -0,0 +1,12 @@ +// Package main main func +/* +Copyright © 2022 NAME HERE + +*/ +package main + +import "dbm-services/redis/db-tools/dbactuator/cmd" + +func main() { + cmd.Execute() +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/client.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/client.go new file mode 100644 index 0000000000..a3835d746d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/client.go @@ -0,0 +1,1681 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8" +) + +// RedisClient redis连接信息 +type RedisClient struct { + Addr string `json:"addr"` + Password string `json:"password"` + DB int `json:"db"` + MaxRetryTime int `json:"maxRetryTimes"` + DbType string `json:"dbType"` // db类型 + InstanceClient *redis.Client `json:"-"` + ClusterClient *redis.ClusterClient `json:"-"` + addrMapToNodes map[string]*ClusterNodeData `json:"-"` // NOCC:vet/vet(设计如此) + nodeIDMapToNodes map[string]*ClusterNodeData `json:"-"` // NOCC:vet/vet(设计如此) + nodesMu *sync.Mutex // 写入/读取 AddrMapToNodes NodeIDMapToNodes 时加锁 +} + +// NewRedisClient 建redis客户端 +func NewRedisClient(addr, passwd string, db int, dbType string) (conn *RedisClient, err error) { + conn = &RedisClient{ + Addr: addr, + Password: passwd, + DB: db, + MaxRetryTime: 60, // 默认重试60次 + DbType: dbType, + nodesMu: &sync.Mutex{}, + } + err = conn.newConn() + if err != nil { + return nil, err + } + return +} + +// NewRedisClientWithTimeout 建redis客户端,可指定超时时间 +func NewRedisClientWithTimeout(addr, passwd string, db int, dbType string, timeout time.Duration) ( + conn *RedisClient, err error) { + conn = &RedisClient{ + Addr: addr, + Password: passwd, + DB: db, + MaxRetryTime: int(timeout.Seconds()), + DbType: dbType, + nodesMu: &sync.Mutex{}, + } + err = conn.newConn() + if err != nil { + return nil, err + } + return +} + +func (db *RedisClient) newConn() (err error) { + // 执行命令失败重连,确保重连后,databases正确 + var redisConnHook = func(ctx context.Context, cn *redis.Conn) error { + pipe01 := cn.Pipeline() + _, err := pipe01.Select(context.TODO(), db.DB).Result() + if err != nil { + err = fmt.Errorf("newConnct pipeline change db fail,err:%v", err) + mylog.Logger.Error(err.Error()) + return err + } + _, err = pipe01.Exec(context.TODO()) + if err != nil { + err = fmt.Errorf("newConnct pipeline.exec db fail,err:%v", err) + mylog.Logger.Error(err.Error()) + return err + } + return nil + } + redisOpt := &redis.Options{ + Addr: db.Addr, + DB: db.DB, + DialTimeout: 1 * time.Minute, + ReadTimeout: 1 * time.Minute, + MaxConnAge: 24 * time.Hour, + MaxRetries: db.MaxRetryTime, // 失败自动重试,重试次数 + MinRetryBackoff: 1 * time.Second, // 重试间隔 + MaxRetryBackoff: 1 * time.Second, + PoolSize: 10, + OnConnect: redisConnHook, + } + clusterOpt := &redis.ClusterOptions{ + Addrs: []string{db.Addr}, + DialTimeout: 1 * time.Minute, + ReadTimeout: 1 * time.Minute, + MaxConnAge: 24 * time.Hour, + MaxRetries: db.MaxRetryTime, // 失败自动重试,重试次数 + MinRetryBackoff: 1 * time.Second, // 重试间隔 + MaxRetryBackoff: 1 * time.Second, + PoolSize: 10, + OnConnect: redisConnHook, + } + if db.Password != "" { + redisOpt.Password = db.Password + clusterOpt.Password = db.Password + } + if db.DbType == consts.TendisTypeRedisCluster { + db.ClusterClient = redis.NewClusterClient(clusterOpt) + _, err = db.ClusterClient.Ping(context.TODO()).Result() + } else { + db.InstanceClient = redis.NewClient(redisOpt) + _, err = db.InstanceClient.Ping(context.TODO()).Result() + } + if err != nil { + errStr := fmt.Sprintf("redis new conn fail,sleep 10s then retry.err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(errStr) + return fmt.Errorf("redis new conn fail,err:%v addr:%s", err, db.Addr) + } + return +} + +// RedisClusterConfigSetOnlyMasters run 'config set ' on all redis cluster running masters +func (db *RedisClient) RedisClusterConfigSetOnlyMasters(confName string, val string) (rets []string, err error) { + nodes, err := db.GetClusterNodes() + if err != nil { + return + } + confSetFunc := func(node001 *ClusterNodeData, confName, val string) (ret string, err error) { + cli01, err := NewRedisClient(node001.Addr, db.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + return + } + defer cli01.Close() + return cli01.ConfigSet(confName, val) + } + for _, nodeItem := range nodes { + node01 := nodeItem + if IsRunningMaster(node01) { + ret, err := confSetFunc(node01, confName, val) + if err != nil { + return rets, err + } + rets = append(rets, ret) + } + } + return +} + +// DoCommand Do command(auto switch db) +func (db *RedisClient) DoCommand(cmdArgv []string, dbnum int) (interface{}, error) { + err := db.SelectDB(dbnum) + if err != nil { + return nil, err + } + var ret interface{} + dstCmds := []interface{}{} + for _, cmd01 := range cmdArgv { + dstCmds = append(dstCmds, cmd01) + } + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.Do(context.TODO(), dstCmds...).Result() + } else { + ret, err = db.InstanceClient.Do(context.TODO(), dstCmds...).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error("Redis DoCommand fail,err:%v,command:%+v,addr:%s", err, cmdArgv, db.Addr) + return nil, err + } else if err != nil && err == redis.Nil { + return nil, err + } + return ret, nil +} + +// SelectDB db +func (db *RedisClient) SelectDB(dbNum int) (err error) { + if db.DB == dbNum { + return nil + } + if db.DbType != consts.TendisTypeRedisInstance { + err = fmt.Errorf("redis:%s dbtype:%s cannot change db", db.Addr, db.DbType) + mylog.Logger.Error(err.Error()) + return + } + if db.InstanceClient == nil { + err = fmt.Errorf("redis:%s not connect", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + pipe01 := db.InstanceClient.Pipeline() + _, err = pipe01.Select(context.TODO(), dbNum).Result() + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis:%s selectdb fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return + } + _, err = pipe01.Exec(context.TODO()) + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis:%s selectdb fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return + } + db.DB = dbNum + return nil +} + +// DelForce 删除key +func (db *RedisClient) DelForce(keyname string) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.Del(context.TODO(), keyname).Result() + } else { + ret, err = db.InstanceClient.Del(context.TODO(), keyname).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error("Redis 'del %s' command fail,err:%v,addr:%s", keyname, err, db.Addr) + return 0, err + } + return +} + +// KeyType key类型 +func (db *RedisClient) KeyType(keyname string) (keyType string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + keyType, err = db.ClusterClient.Type(context.TODO(), keyname).Result() + } else { + keyType, err = db.InstanceClient.Type(context.TODO(), keyname).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error("Redis 'type %s' command fail,err:%v,addr:%s", keyname, err, db.Addr) + return + } + return +} + +// DbSize 'dbsize' +func (db *RedisClient) DbSize() (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.DBSize(context.TODO()).Result() + } else { + ret, err = db.InstanceClient.DBSize(context.TODO()).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error("Redis 'dbsize' command fail,err:%v,addr:%s", err, db.Addr) + return + } + return ret, nil +} + +// Info 执行info [section]命令并将返回结果保存在map中 +func (db *RedisClient) Info(section string) (infoRet map[string]string, err error) { + infoRet = make(map[string]string) + var str01 string + ctx := context.TODO() + if section == "" && db.DbType == consts.TendisTypeRedisCluster { + str01, err = db.ClusterClient.Info(ctx).Result() + } else if section != "" && db.DbType == consts.TendisTypeRedisCluster { + str01, err = db.ClusterClient.Info(ctx, section).Result() + } else if section == "" && db.DbType != consts.TendisTypeRedisCluster { + str01, err = db.InstanceClient.Info(ctx).Result() + } else if section != "" && db.DbType != consts.TendisTypeRedisCluster { + str01, err = db.InstanceClient.Info(ctx, section).Result() + } + if err != nil { + err = fmt.Errorf("redis:%s 'info %s' fail,err:%v", db.Addr, section, err) + mylog.Logger.Error(err.Error()) + return + } + infoList := strings.Split(str01, "\n") + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + infoRet[list01[0]] = list01[1] + } + return +} + +// GetTendisType 获取redis类型,返回RedisInstance or TendisplusInstance or TendisSSDInsance +func (db *RedisClient) GetTendisType() (dbType string, err error) { + var infoRet map[string]string + infoRet, err = db.Info("server") + if err != nil { + return + } + version := infoRet["redis_version"] + if strings.Contains(version, "-rocksdb-") { + dbType = consts.TendisTypeTendisplusInsance + } else if strings.Contains(version, "-TRedis-") { + dbType = consts.TendisTypeTendisSSDInsance + } else { + dbType = consts.TendisTypeRedisInstance + } + return +} + +// GetTendisVersion 获取redis版本:redis_version +func (db *RedisClient) GetTendisVersion() (version string, err error) { + var infoRet map[string]string + infoRet, err = db.Info("server") + if err != nil { + return + } + version = infoRet["redis_version"] + return +} + +// GetDir config get dir 获取数据路径 +func (db *RedisClient) GetDir() (dir string, err error) { + var ok bool + confRet, err := db.ConfigGet("dir") + if err != nil { + return + } + dir, ok = confRet["dir"] + if !ok { + err = fmt.Errorf("config get dir result not include dir?,result:%+v,addr:%s", confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + dir = strings.TrimPrefix(dir, `"`) + dir = strings.TrimSuffix(dir, `"`) + return +} + +// GetKvstoreCount config get kvstorecount 获取kvstore 数量 +func (db *RedisClient) GetKvstoreCount() (kvstorecount string, err error) { + var ok bool + confRet, err := db.ConfigGet("kvstorecount") + if err != nil { + return + } + kvstorecount, ok = confRet["kvstorecount"] + if !ok { + err = fmt.Errorf("config get kvstorecount result not include dir?,result:%+v,addr:%s", confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + kvstorecount = strings.TrimPrefix(kvstorecount, `"`) + kvstorecount = strings.TrimSuffix(kvstorecount, `"`) + return +} + +// GetRole info replication中获取角色 +func (db *RedisClient) GetRole() (role string, err error) { + var infoRet map[string]string + infoRet, err = db.Info("replication") + if err != nil { + return + } + role = infoRet["role"] + return +} + +// GetMasterData info replication中master信息 +func (db *RedisClient) GetMasterData() (masterHost, masterPort, linkStatus, selfRole string, err error) { + var infoRet map[string]string + infoRet, err = db.Info("replication") + if err != nil { + return + } + selfRole = infoRet["role"] + if selfRole != consts.RedisSlaveRole { + return + } + masterHost = infoRet["master_host"] + masterPort = infoRet["master_port"] + linkStatus = infoRet["master_link_status"] + return +} + +// Bgsave 执行bgsave命令 +func (db *RedisClient) Bgsave() (ret string, err error) { + // 执行 bgsave 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("bgsave command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + str01, err := db.InstanceClient.BgSave(context.TODO()).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'bgsave' fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return str01, err + } + return str01, nil +} + +// IsBgsaveInProgress .. +func (db *RedisClient) IsBgsaveInProgress() (ret bool, err error) { + persisInfo, err := db.Info("Persistence") + if err != nil { + return false, err + } + inProgress := persisInfo["rdb_bgsave_in_progress"] + if inProgress == "1" { + return true, nil + } + return false, nil +} + +// BgRewriteAOF 执行bgrewriteaof命令 +func (db *RedisClient) BgRewriteAOF() (ret string, err error) { + // 执行 bgrewriteaof 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("bgrewriteaof command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + str01, err := db.InstanceClient.BgRewriteAOF(context.TODO()).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'bgrewriteaof' fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return str01, err + } + return str01, nil +} + +// IsAofRewriteInProgress .. +func (db *RedisClient) IsAofRewriteInProgress() (ret bool, err error) { + persisInfo, err := db.Info("Persistence") + if err != nil { + return false, err + } + inProgress := persisInfo["aof_rewrite_in_progress"] + if inProgress == "1" { + return true, nil + } + return false, nil +} + +// BgRewriteAOFAndWaitForDone 执行bgrewriteaof命令并等待结束 +func (db *RedisClient) BgRewriteAOFAndWaitForDone() (err error) { + mylog.Logger.Info(fmt.Sprintf("redis:%s begin to bgrewriteaof", db.Addr)) + _, err = db.BgRewriteAOF() + if err != nil { + return err + } + count := 0 // 每分钟输出一次日志 + var msg string + var inProgress bool + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsAofRewriteInProgress() + if err != nil { + return err + } + if inProgress == false { + msg = fmt.Sprintf("redis:%s bgrewriteaof success", db.Addr) + mylog.Logger.Info(msg) + return nil + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("redis:%s bgrewriteaof is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// BgSaveAndWaitForFinish 执行bgsave命令并等待结束 +func (db *RedisClient) BgSaveAndWaitForFinish() (err error) { + mylog.Logger.Info(fmt.Sprintf("redis:%s begin to bgsave", db.Addr)) + _, err = db.Bgsave() + if err != nil { + return err + } + count := 0 // 每分钟输出一次日志 + var msg string + var inProgress bool + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsBgsaveInProgress() + if err != nil { + return err + } + if inProgress == false { + msg = fmt.Sprintf("redis:%s bgsave success", db.Addr) + mylog.Logger.Info(msg) + return nil + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("redis:%s bgsave is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// TendisplusBackup backup +func (db *RedisClient) TendisplusBackup(targetDir string) (ret string, err error) { + // 执行 backup 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("backup command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + cmd := []interface{}{"backup", targetDir} + res, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + ret = res.(string) + return ret, err + } + ret = res.(string) + return ret, nil +} + +// IsTendisplusBackupInProgress .. +func (db *RedisClient) IsTendisplusBackupInProgress() (ret bool, err error) { + bakInfo, err := db.Info("backup") + if err != nil { + return false, err + } + inProgress := bakInfo["current-backup-running"] + if inProgress == "yes" { + return true, nil + } + return false, nil +} + +// TendisplusBackupAndWaitForDone 执行backup命令并等待结束 +func (db *RedisClient) TendisplusBackupAndWaitForDone(targetDir string) (err error) { + mylog.Logger.Info(fmt.Sprintf("tendisplus:%s 'backup %s'", + db.Addr, targetDir)) + _, err = db.TendisplusBackup(targetDir) + if err != nil { + return err + } + count := 0 // 每分钟输出一次日志 + var msg string + var inProgress bool + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsTendisplusBackupInProgress() + if err != nil { + return err + } + if inProgress == false { + msg = fmt.Sprintf("tendisplus:%s backup success", db.Addr) + mylog.Logger.Info(msg) + return nil + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("tendisplus:%s backup is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// IsTendisSSDBackupInProgress tendisSSD是否在备份中 +func (db *RedisClient) IsTendisSSDBackupInProgress() (ret bool, err error) { + bakInfo, err := db.Info("Backups") + if err != nil { + return false, err + } + inProgress := bakInfo["current-backup-running"] + if inProgress == "yes" { + return true, nil + } + return false, nil +} + +// WaitForBackupFinish 无论是redis or tendisplus等待其backup结束 +func (db *RedisClient) WaitForBackupFinish() (err error) { + count := 0 // 每分钟输出一次日志 + var msg string + var aofRewriteRunning bool + var bgsaveRunning bool + var plusBakRunning bool + var ssdBakRunning bool + var tendisType string + tendisType, err = db.GetTendisType() + if err != nil { + return + } + for { + switch tendisType { + case consts.TendisTypeRedisInstance: + aofRewriteRunning, _ = db.IsAofRewriteInProgress() + bgsaveRunning, err = db.IsAofRewriteInProgress() + msg = fmt.Sprintf("redis:%s bgrewriteaof or bgsave is still running ...", db.Addr) + case consts.TendisTypeTendisplusInsance: + plusBakRunning, err = db.IsTendisplusBackupInProgress() + msg = fmt.Sprintf("tendisplus:%s backup is still running ...", db.Addr) + case consts.TendisTypeTendisSSDInsance: + ssdBakRunning, err = db.IsTendisSSDBackupInProgress() + msg = fmt.Sprintf("tendisSSD:%s backup is still running ...", db.Addr) + } + if err != nil { + return + } + if aofRewriteRunning || bgsaveRunning || plusBakRunning || ssdBakRunning { + count++ + if (count % 12) == 0 { + mylog.Logger.Info(msg) + } + time.Sleep(5 * time.Second) + continue + } + msg = fmt.Sprintf("redis:%s rdb_bgsave_in_progress=0,aof_rewrite_in_progress=0,current-backup-running=no", db.Addr) + mylog.Logger.Info(msg) + break + } + return nil +} + +// TendisSSDBackup pipeline执行 binlogsize + bakcup $dir命令,并返回结果 +func (db *RedisClient) TendisSSDBackup(targetDir string) ( + binlogsizeRet TendisSSDBinlogSize, backupCmdRet string, err error, +) { + // 执行 backup 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("backup command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + pipe01 := db.InstanceClient.Pipeline() + cmd := []interface{}{"binlogsize"} + binlogRetInter := pipe01.Do(context.TODO(), cmd...) + cmd = []interface{}{"backup", targetDir} + backupRetInter := pipe01.Do(context.TODO(), cmd...) + + _, err = pipe01.Exec(context.TODO()) + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis(%s) pipeline.Exec fail,err:%v,cmds:['binlogsize','backup %s']", db.Addr, err, targetDir) + mylog.Logger.Error(err.Error()) + return + } + binlogsizeRet, err = db.parseBinlogSizeCmdRet(binlogRetInter.Val()) + if err != nil { + return + } + + backupCmdRet = backupRetInter.Val().(string) + return +} + +// TendisSSDBackupAndWaitForDone 执行backup命令并等待结束 +func (db *RedisClient) TendisSSDBackupAndWaitForDone(targetDir string) ( + binlogsizeRet TendisSSDBinlogSize, backupCmdRet string, err error, +) { + mylog.Logger.Info(fmt.Sprintf("tendisSSD:%s 'backup %s'", db.Addr, targetDir)) + binlogsizeRet, backupCmdRet, err = db.TendisSSDBackup(targetDir) + if err != nil { + return + } + count := 0 // 每分钟输出一次日志 + var msg string + var inProgress bool + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsTendisSSDBackupInProgress() + if err != nil { + return + } + if inProgress == false { + msg = fmt.Sprintf("tendisSSD:%s backup success", db.Addr) + mylog.Logger.Info(msg) + return + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("tendisSSD:%s backup is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// Scan 命令 +func (db *RedisClient) Scan(match string, cursor uint64, count int64) (keys []string, retcursor uint64, err error) { + // 执行scan命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("Scan redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + keys, retcursor, err = db.InstanceClient.Scan(context.TODO(), cursor, match, count).Result() + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis scan fail,err:%v,match:%s,cursor:%d,count:%d,addr:%s", err, match, cursor, count, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return keys, retcursor, nil +} + +// Sscan 'sscan' +func (db *RedisClient) Sscan(keyname string, cursor uint64, match string, count int64) (fields []string, + retCursor uint64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + fields, retCursor, err = db.ClusterClient.SScan(context.TODO(), keyname, cursor, match, count).Result() + } else { + fields, retCursor, err = db.InstanceClient.SScan(context.TODO(), keyname, cursor, match, count).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error("Redis 'sscan %s %d match %s count %s' command fail,err:%v,addr:%s", keyname, cursor, match, count, + err, db.Addr) + return fields, 0, err + } + return fields, retCursor, nil +} + +// GetClusterNodes 获取cluster nodes命令结果并解析 +func (db *RedisClient) GetClusterNodes() (clusterNodes []*ClusterNodeData, err error) { + db.nodesMu.Lock() + defer db.nodesMu.Unlock() + var nodesStr01 string + if db.DbType == consts.TendisTypeRedisCluster { + nodesStr01, err = db.ClusterClient.ClusterNodes(context.TODO()).Result() + } else { + nodesStr01, err = db.InstanceClient.ClusterNodes(context.TODO()).Result() + } + if err != nil { + err = fmt.Errorf("cluster nodes fail,err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + clusterNodes, err = DecodeClusterNodes(nodesStr01) + if err != nil { + return + } + db.addrMapToNodes = make(map[string]*ClusterNodeData) + db.nodeIDMapToNodes = make(map[string]*ClusterNodeData) + for _, tmpItem := range clusterNodes { + infoItem := tmpItem + db.addrMapToNodes[infoItem.Addr] = infoItem + db.nodeIDMapToNodes[infoItem.NodeID] = infoItem + } + return +} + +// GetAddrMapToNodes 返回addr=>clusterNode 映射 +func (db *RedisClient) GetAddrMapToNodes() (ret map[string]*ClusterNodeData, err error) { + _, err = db.GetClusterNodes() + if err != nil { + return + } + ret = db.addrMapToNodes + return +} + +// GetNodeIDMapToNodes 返回nodeId=>clusterNode 映射 +func (db *RedisClient) GetNodeIDMapToNodes() (ret map[string]*ClusterNodeData, err error) { + _, err = db.GetClusterNodes() + if err != nil { + return + } + ret = db.nodeIDMapToNodes + return +} + +// GetRunningMasters 获取running状态的master,无所谓是否负责slot +func (db *RedisClient) GetRunningMasters() (map[string]*ClusterNodeData, error) { + ret := make(map[string]*ClusterNodeData) + _, err := db.GetClusterNodes() + if err != nil { + return ret, err + } + + addrMap, err := db.GetAddrMapToNodes() + if err != nil { + return ret, err + } + for _, addr := range addrMap { + addrItem := addr + if len(addrItem.FailStatus) == 0 && addr.Role == "master" { + ret[addrItem.Addr] = addrItem + mylog.Logger.Info("master addr:%+v", addr) + } + mylog.Logger.Info("addr:%+v", addr) + } + return ret, nil +} + +// GetMasterNodeBySlaveAddr 根据slaveAddr获取master node信息 +func (db *RedisClient) GetMasterNodeBySlaveAddr(slaveAddr string) (*ClusterNodeData, error) { + _, err := db.GetClusterNodes() + if err != nil { + return nil, err + } + m01, err := db.GetAddrMapToNodes() + if err != nil { + return nil, err + } + slaveNode, ok := m01[slaveAddr] + if ok == false { + err = fmt.Errorf("not found slave node:%s", slaveAddr) + mylog.Logger.Error(err.Error()) + return nil, err + } + if slaveNode.Role != "slave" { + err = fmt.Errorf("node:%s not a slave(role:%s)", slaveAddr, slaveNode.Role) + mylog.Logger.Error(err.Error()) + return nil, err + } + masterNodeID := slaveNode.MasterID + s01, err := db.GetNodeIDMapToNodes() + if err != nil { + return nil, err + } + masterNode, ok := s01[masterNodeID] + if ok == false { + err = fmt.Errorf("slave:%s master id:%s not belong cluster:%s", + slaveAddr, masterNodeID, db.Addr) + mylog.Logger.Error(err.Error()) + return nil, err + } + return masterNode, nil +} + +// GetAllSlaveNodesByMasterAddr 根据masterAddr获取其全部slaveNode +func (db *RedisClient) GetAllSlaveNodesByMasterAddr(masterAddr string) (slaveNodes []*ClusterNodeData, err error) { + _, err = db.GetClusterNodes() + if err != nil { + return nil, err + } + m01, err := db.GetAddrMapToNodes() + if err != nil { + return nil, err + } + masterNode, ok := m01[masterAddr] + if ok == false { + err = fmt.Errorf("not found master node:%s", masterAddr) + mylog.Logger.Error(err.Error()) + return nil, err + } + if masterNode.Role != "master" { + err = fmt.Errorf("node:%s not a master(role:%s)", masterAddr, masterNode.Role) + mylog.Logger.Error(err.Error()) + return nil, err + } + for _, info01 := range m01 { + infoItem := info01 + if infoItem.Role == "slave" && len(infoItem.FailStatus) == 0 && infoItem.MasterID == masterNode.NodeID { + msg := fmt.Sprintf("master:%s 找到一个slave:%s ", masterAddr, infoItem.Addr) + mylog.Logger.Info(msg) + slaveNodes = append(slaveNodes, infoItem) + } + } + if len(slaveNodes) == 0 { + msg := fmt.Sprintf("master:%s 没有找到任何slave信息", masterAddr) + mylog.Logger.Info(msg) + return nil, util.NewNotFound() + } + return +} + +// FindNodeFunc find node function +type FindNodeFunc func(node *ClusterNodeData) bool + +// GetNodesByFunc returns first node found by the FindNodeFunc +func (db *RedisClient) GetNodesByFunc(f FindNodeFunc) (map[string]*ClusterNodeData, error) { + nodes := make(map[string]*ClusterNodeData) + addrMap, err := db.GetAddrMapToNodes() + if err != nil { + return nil, err + } + + for _, n01 := range addrMap { + n02 := n01 + if f(n02) { + nodes[n02.Addr] = n02 + } + } + + if len(nodes) == 0 { + return nodes, util.NewNotFound() + } + return nodes, nil + +} + +// ClusterForget 'cluster gorget' 去掉集群中的特定节点: +// forget 文档 https://redis.io/commands/cluster-forget/ +func (db *RedisClient) ClusterForget(nodeID string) (err error) { + if db.DbType == consts.TendisTypeRedisCluster { + _, err = db.ClusterClient.ClusterForget(context.TODO(), nodeID).Result() + } else { + _, err = db.InstanceClient.ClusterForget(context.TODO(), nodeID).Result() + } + if err != nil { + err = fmt.Errorf("redis(%s) 'cluster forget %s' failed,err:%v", db.Addr, nodeID, err) + mylog.Logger.Error(err.Error()) + return err + } + mylog.Logger.Info("Forget node done,nodeID:%v", nodeID) + return nil +} + +// ClusterClear .. +// 详情: http://tendis.cn/#/Tendisplus/%E5%91%BD%E4%BB%A4/cluster_setslot?id=cluster-setslot-taskinfo-state-taskid +// http://tendis.cn/#/Tendisplus/%E7%9F%A5%E8%AF%86%E5%BA%93/%E9%9B%86%E7%BE%A4/gc_delete +func (db *RedisClient) ClusterClear() error { + cmd := []interface{}{"cluster", "clear"} + var err error + if db.DbType == consts.TendisTypeRedisCluster { + _, err = db.ClusterClient.Do(context.TODO(), cmd...).Result() + } else { + _, err = db.InstanceClient.Do(context.TODO(), cmd...).Result() + } + if err != nil && strings.Contains(err.Error(), "no dirty data to delete") == false { + err = fmt.Errorf("ClusterClear fail,err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return err + } + return nil +} + +// ClusterStopTaskID 执行cluster setslot stop taskid +// detail: http://tendis.cn/#/Tendisplus/%E5%91%BD%E4%BB%A4/cluster_setslot?id=cluster-setslot-stop-taskid +func (db *RedisClient) ClusterStopTaskID(taskID string) error { + cmd := []interface{}{"cluster", "setslot", "stop", taskID} + var err error + if db.DbType == consts.TendisTypeRedisCluster { + _, err = db.ClusterClient.Do(context.TODO(), cmd...).Result() + } else { + _, err = db.InstanceClient.Do(context.TODO(), cmd...).Result() + } + if err != nil { + err = fmt.Errorf("ClusterStopTaskId fail,err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return err + } + mylog.Logger.Info(fmt.Sprintf("tendisplus:%s 'cluster setslot stop %s' success", taskID, db.Addr)) + return nil +} + +// IsSlotsBelongMaster 判断slots是否属于某个master节点 +func (db *RedisClient) IsSlotsBelongMaster(masterAddr string, + slots []int) (allBelong bool, notBelongList []int, err error) { + + allBelong = false + clusterNodes, err := db.GetAddrMapToNodes() + if err != nil { + mylog.Logger.Error(err.Error()) + return allBelong, nil, err + } + masterNode, ok := clusterNodes[masterAddr] + if ok == false { + err = fmt.Errorf("IsSlotsBelongMaster cluster not include the target node,masterAddr:%s", masterAddr) + allBelong = false + mylog.Logger.Error(err.Error()) + return allBelong, nil, err + } + if masterNode.Role != "master" { + err = fmt.Errorf("IsSlotsBelongMaster target node not a master node"+ + "masteraddr:%s,masterNode role:%s", masterAddr, masterNode.Role) + allBelong = false + mylog.Logger.Error(err.Error()) + return allBelong, nil, err + } + for _, slot01 := range slots { + if _, ok := masterNode.SlotsMap[int(slot01)]; ok == false { + notBelongList = append(notBelongList, int(slot01)) + } + } + if len(notBelongList) == 0 { + allBelong = true + } + + return allBelong, notBelongList, nil + +} + +// ConfigSet tendis执行confxx set +func (db *RedisClient) ConfigSet(confName string, val string) (string, error) { + var err error + var ok bool + // 执行 config set 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ConfigSet redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + // 先执行config set,如果报错则执行 confxx set + data, err := db.InstanceClient.ConfigSet(context.TODO(), confName, val).Result() + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "set", confName, val} + confRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + data, ok = confRet.(string) + if ok == false { + err = fmt.Errorf(`confxx set result not interface{},cmd:%v,cmdRet:%v,nodeAddr:%s`, + cmd, confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + } else if err != nil { + err = fmt.Errorf("redis config set %s %s fail,err:%v,addr:%s", confName, val, err, db.Addr) + mylog.Logger.Error(err.Error()) + return data, err + } + return data, nil +} + +// ConfigGet tendis执行config get or confxx get +func (db *RedisClient) ConfigGet(confName string) (ret map[string]string, err error) { + var confInfos []interface{} + var ok bool + ret = map[string]string{} + + // 先执行config get,如果报错则执行 confxx get + if db.DbType == consts.TendisTypeRedisCluster { + confInfos, err = db.ClusterClient.ConfigGet(context.TODO(), confName).Result() + } else { + confInfos, err = db.InstanceClient.ConfigGet(context.TODO(), confName).Result() + } + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "get", confName} + var confRet interface{} + if db.DbType == consts.TendisTypeRedisCluster { + confRet, err = db.ClusterClient.Do(context.TODO(), cmd...).Result() + } else { + confRet, err = db.InstanceClient.Do(context.TODO(), cmd...).Result() + } + if err != nil { + err = fmt.Errorf("cmd:%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + confInfos, ok = confRet.([]interface{}) + if ok == false { + err = fmt.Errorf("cmd:%v result not []interface{},cmdRet:%v,nodeAddr:%s", cmd, confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + } else if err != nil { + err = fmt.Errorf(" cmd:config get %q failed,err:%v", confName, err) + mylog.Logger.Error(err.Error()) + return ret, err + } + + var k01, v01 string + for idx, confItem := range confInfos { + conf01 := confItem.(string) + if idx%2 == 0 { + k01 = conf01 + continue + } + v01 = conf01 + ret[k01] = v01 + } + return ret, nil +} + +// ConfigRewrite tendis执行confxx rewrite +func (db *RedisClient) ConfigRewrite() (string, error) { + var err error + var data string + var ok bool + // 执行 config rewrite 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ConfigRewrite redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + data, err = db.InstanceClient.ConfigRewrite(context.TODO()).Result() + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "rewrite"} + confRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + data, ok = confRet.(string) + if ok == false { + err = fmt.Errorf( + `confxx rewrite result not string,cmd:%v,cmdRet:%v,addr:%s`, + cmd, confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + } else if err != nil { + err = fmt.Errorf("redis config rewrite fail,err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + return data, nil +} + +// SlaveOf 'slaveof' command +func (db *RedisClient) SlaveOf(masterIP, masterPort string) (ret string, err error) { + // 执行slaveof 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("SlaveOf redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + ret, err = db.InstanceClient.SlaveOf(context.TODO(), masterIP, masterPort).Result() + if err != nil { + err = fmt.Errorf("'slaveof %s %s' failed,err:%v,addr:%s", masterIP, masterPort, err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// IsClusterEnabled 'cluster-enabled' 是否启动 +func (db *RedisClient) IsClusterEnabled() (clusterEnabled bool, err error) { + confData, err := db.ConfigGet("cluster-enabled") + if err != nil { + return + } + val, ok := confData["cluster-enabled"] + if ok && strings.ToLower(val) == "yes" { + clusterEnabled = true + } + return +} + +// ClusterMeet 'cluster meet' command +func (db *RedisClient) ClusterMeet(ip, port string) (ret string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.ClusterMeet(context.TODO(), ip, port).Result() + } else { + ret, err = db.InstanceClient.ClusterMeet(context.TODO(), ip, port).Result() + } + if err != nil { + err = fmt.Errorf("redis(%s) 'cluster meet %s %s' failed,err:%v", db.Addr, ip, port, err) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// ClusterAddSlots 添加slots, 'cluster addslots 'command +func (db *RedisClient) ClusterAddSlots(slots []int) (ret string, err error) { + // 执行 cluster addslots 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ClusterAddSlots redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + ret, err = db.InstanceClient.ClusterAddSlots(context.TODO(), slots...).Result() + if err != nil { + slotStr := ConvertSlotToStr(slots) + err = fmt.Errorf("redis(%s) 'cluster addslots %s' failed,err:%v", db.Addr, slotStr, err) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// ClusterReplicate 'cluster replicate' +func (db *RedisClient) ClusterReplicate(masterID string) (ret string, err error) { + // 执行cluster replicate 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ClusterReplicate redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + ret, err = db.InstanceClient.ClusterReplicate(context.TODO(), masterID).Result() + if err != nil { + err = fmt.Errorf("'cluster replicate %s' failed,err:%v,addr:%s", masterID, err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// GetMyself 获取myself的节点信息 +func (db *RedisClient) GetMyself() (ret *ClusterNodeData, err error) { + // cluster nodes中找到 myself 节点,只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("GetMyself redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return nil, err + } + addrMap, err := db.GetAddrMapToNodes() + if err != nil { + return ret, err + } + for _, info01 := range addrMap { + infoItem := info01 + if infoItem.IsMyself == true { + ret = infoItem + break + } + } + return ret, nil +} + +// TendisplusDataSize tendisplus数据量大小,'info Dataset' rocksdb.total-sst-files-size,单位byte +func (db *RedisClient) TendisplusDataSize() (dataSize uint64, err error) { + // 命令'info Dataset',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("TendisplusDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + var infoRet map[string]string + infoRet, err = db.Info("Dataset") + if err != nil { + return + } + sizeStr := infoRet["rocksdb.total-sst-files-size"] + dataSize, err = strconv.ParseUint(sizeStr, 10, 64) + if err != nil { + err = fmt.Errorf("strconv.ParseUint fail,err:%v,value:%s", err, sizeStr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// RedisInstanceDataSize redis数据量大小,'info memory' used_memory,单位byte +func (db *RedisClient) RedisInstanceDataSize() (dataSize uint64, err error) { + // 命令'info Dataset',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("RedisInstanceDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + var infoRet map[string]string + infoRet, err = db.Info("memory") + if err != nil { + return + } + sizeStr := infoRet["used_memory"] + dataSize, err = strconv.ParseUint(sizeStr, 10, 64) + if err != nil { + err = fmt.Errorf("strconv.ParseUint fail,err:%v,value:%s", err, sizeStr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// TendisSSDDataSize 获取tendisSSD数据量大小,单位 byte +func (db *RedisClient) TendisSSDDataSize() (rockdbSize uint64, err error) { + // 命令'info',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("TendisSSDDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + infoRet, err := db.Info("") + if err != nil { + return 0, err + } + + rockdbSize = 0 + levelHeadRegexp := regexp.MustCompile(`^level-\d+$`) + levelDataRegexp := regexp.MustCompile(`^bytes=(\d+),num_entries=(\d+),num_deletions=(\d+)`) + for k01, v01 := range infoRet { + if levelHeadRegexp.MatchString(k01) { + list01 := levelDataRegexp.FindStringSubmatch(v01) + if len(list01) != 4 { + err = fmt.Errorf("redis:%s info 'RocksDB Level stats' format not correct,%s:%s", db.Addr, k01, v01) + mylog.Logger.Error(err.Error()) + return + } + size01, _ := strconv.ParseUint(list01[1], 10, 64) + rockdbSize = rockdbSize + size01 + } + } + return +} + +// TendisSSDBinlogSize tendis ssd binlog size +type TendisSSDBinlogSize struct { + FirstSeq uint64 `json:"firstSeq"` + EndSeq uint64 `json:"endSeq"` +} + +// String 字符串 +func (t *TendisSSDBinlogSize) String() string { + return fmt.Sprintf("[%d,%d]", t.FirstSeq, t.EndSeq) +} + +// parseBinlogSizeCmdRet 解析tendisSSD binlogsize命令的结果 +func (db *RedisClient) parseBinlogSizeCmdRet(cmdRet interface{}) (ret TendisSSDBinlogSize, err error) { + sizeInfos, ok := cmdRet.([]interface{}) + if ok == false { + err = fmt.Errorf("parseBinlogSizeCmdRet 'binlogsize' result not []interface{},cmdRet:%v,nodeAddr:%s", + cmdRet, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + if len(sizeInfos) != 4 { + err = fmt.Errorf("'binlogsize' result not correct,length:%d != 4,data:%+v,addr:%s", + len(sizeInfos), sizeInfos, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + firstSeqStr := sizeInfos[1].(string) + endSeqStr := sizeInfos[3].(string) + + ret.FirstSeq, err = strconv.ParseUint(firstSeqStr, 10, 64) + if err != nil { + err = fmt.Errorf("'binlogsize' firstSeq:%s to uint64 fail,err:%v,data:%+v,addr:%s", + firstSeqStr, err, sizeInfos, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + ret.EndSeq, err = strconv.ParseUint(endSeqStr, 10, 64) + if err != nil { + err = fmt.Errorf("'binlogsize' endSeq:%s to uint64 fail,err:%v,data:%+v,addr:%s", + endSeqStr, err, sizeInfos, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + return ret, nil +} + +// TendisSSDBinlogSize binlogsize +func (db *RedisClient) TendisSSDBinlogSize() (ret TendisSSDBinlogSize, err error) { + // 命令'info',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("TendisSSDDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + cmd := []interface{}{"binlogsize"} + ret = TendisSSDBinlogSize{} + sizeRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("TendisSSDBinlogSize fail,cmd:%v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + return db.parseBinlogSizeCmdRet(sizeRet) +} + +// Randomkey command +func (db *RedisClient) Randomkey() (key string, err error) { + // 命令'RANDOMKEY',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("Randomkey redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + key, err = db.InstanceClient.RandomKey(context.TODO()).Result() + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis:%s 'randomkey' failed,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return + } + return key, nil +} + +// Shutdown command +func (db *RedisClient) Shutdown() (err error) { + // 命令'shutdown',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("Shutdown redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + redisCliBin := filepath.Join(consts.UsrLocal, "redis/bin/redis-cli") + if util.FileExists(redisCliBin) { + // 如果redis-cli存在,则优先使用redis-cli 执行shutdown + // db.InstanceClient.Shutdown() 会返回一些其他错误 + var opt string + if util.IsCliSupportedNoAuthWarning(redisCliBin) { + opt = "--no-auth-warning" + } + l01 := strings.Split(db.Addr, ":") + cmd := fmt.Sprintf("%s -h %s -p %s -a %s %s shutdown", + redisCliBin, l01[0], l01[1], db.Password, opt) + logcmd := fmt.Sprintf("%s -h %s -p %s -a xxxx %s shutdown", + redisCliBin, l01[0], l01[1], opt) + mylog.Logger.Info(logcmd) + _, err = util.RunBashCmd(cmd, "", nil, 1*time.Minute) + if err != nil { + return + } + return + } + + db.InstanceClient.Shutdown(context.TODO()).Result() + return nil +} + +// IsReplicaStatusOk '我'是slave,判断我和master的复制状态是否ok +func (db *RedisClient) IsReplicaStatusOk(masterIP, masterPort string) (ok bool, err error) { + var infoRet map[string]string + ok = false + infoRet, err = db.Info("replication") + if err != nil { + return + } + replRole := infoRet["role"] + if replRole != consts.RedisSlaveRole { + return false, nil + } + replMasterHost := infoRet["master_host"] + replMasterPort := infoRet["master_port"] + replLinkStatus := infoRet["master_link_status"] + if replMasterHost != masterIP || replMasterPort != masterPort { + err = fmt.Errorf("slave(%s) 'info replication' master(%s:%s) != (%s:%s)", + db.Addr, replMasterHost, replMasterPort, masterIP, masterPort) + return + } + if replLinkStatus != consts.MasterLinkStatusUP { + err = fmt.Errorf("slave(%s) 'info replication' master(%s:%s) master_link_status:%s", + db.Addr, replMasterHost, replMasterPort, replLinkStatus) + return + } + return true, nil +} + +// IsTendisSSDReplicaStatusOk '我'是tendisssd slave,判断我和master的复制状态是否ok +func (db *RedisClient) IsTendisSSDReplicaStatusOk(masterIP, masterPort string) (ok bool, err error) { + ok, err = db.IsReplicaStatusOk(masterIP, masterPort) + if err != nil { + return + } + if !ok { + return + } + ok = false + // master上执行 info slaves,结果中 slave的状态必须是 IncrSync/REPL_FOLLOW + var confRet map[string]string + var masterCli *RedisClient + var slavesState TendisSSDInfoSlavesData + masterAddr := masterIP + ":" + masterPort + confRet, err = db.ConfigGet("masterauth") + if err != nil { + return + } + masterAuth := confRet["masterauth"] + masterCli, err = NewRedisClient(masterAddr, masterAuth, 0, consts.TendisTypeRedisInstance) + if err != nil { + return + } + defer masterCli.Close() + + slavesState, err = masterCli.TendisSSDInfoSlaves() + if err != nil { + return + } + if len(slavesState.SlaveList) == 0 { + err = fmt.Errorf("slave(%s) master_link_status:up but master(%s) 'info slaves' not found slaves", db.Addr, masterAddr) + return + } + for _, slave01 := range slavesState.SlaveList { + slaveItem := slave01 + if slaveItem.Addr() == db.Addr { + if slaveItem.State == consts.TendisSSDIncrSyncState || + slaveItem.State == consts.TendisSSDReplFollowtate { + return true, nil + } + mylog.Logger.Info("master(%s) 'info slaves' ret:%s", masterAddr, slavesState.String()) + err = fmt.Errorf( + "slave(%s) master_link_status:up but master(%s) 'info slaves' slave.state:%s != IncrSync|REPL_FOLLOW", + db.Addr, masterAddr, slaveItem.State) + return + } + } + mylog.Logger.Info("master(%s) 'info slaves' ret:%s", masterAddr, slavesState.String()) + err = fmt.Errorf("slave(%s) master_link_status:up but master(%s) 'info slaves' not found record", db.Addr, masterAddr) + return +} + +// Set set $k $v ex/px $expiration +func (db *RedisClient) Set(k string, val interface{}, expiration time.Duration) (ret string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.Set(context.TODO(), k, val, expiration).Result() + } else { + ret, err = db.InstanceClient.Set(context.TODO(), k, val, expiration).Result() + } + if err != nil { + err = fmt.Errorf("'set %s %v ex %d' fail,err:%v,addr:%s", k, val, int(expiration.Seconds()), err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Mset mset ... +func (db *RedisClient) Mset(vals []interface{}) (ret string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.MSet(context.TODO(), vals...).Result() + } else { + ret, err = db.InstanceClient.MSet(context.TODO(), vals...).Result() + } + if err != nil { + err = fmt.Errorf("mset %+v fail,err:%v,addr:%s", vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Hmset hmset ... +func (db *RedisClient) Hmset(k string, vals []interface{}) (ret bool, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.HMSet(context.TODO(), k, vals...).Result() + } else { + ret, err = db.InstanceClient.HMSet(context.TODO(), k, vals...).Result() + } + if err != nil { + err = fmt.Errorf("hmset %s %+v fail,err:%v,addr:%s", k, vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Rpush rpush ... +func (db *RedisClient) Rpush(k string, vals []interface{}) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.RPush(context.TODO(), k, vals...).Result() + } else { + ret, err = db.InstanceClient.RPush(context.TODO(), k, vals...).Result() + } + if err != nil { + err = fmt.Errorf("rpush %s %+v fail,err:%v,addr:%s", k, vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Sadd sadd ... +func (db *RedisClient) Sadd(k string, vals []interface{}) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.SAdd(context.TODO(), k, vals...).Result() + } else { + ret, err = db.InstanceClient.SAdd(context.TODO(), k, vals...).Result() + } + if err != nil { + err = fmt.Errorf("Sadd %s %+v fail,err:%v,addr:%s", k, vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Zadd zadd ... +func (db *RedisClient) Zadd(k string, members []*redis.Z) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.ZAdd(context.TODO(), k, members...).Result() + } else { + ret, err = db.InstanceClient.ZAdd(context.TODO(), k, members...).Result() + } + if err != nil { + err = fmt.Errorf("Zadd %s %+v fail,err:%v,addr:%s", k, members, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Zrem zrem ... +func (db *RedisClient) Zrem(k string, members ...interface{}) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.ZRem(context.TODO(), k, members...).Result() + } else { + ret, err = db.InstanceClient.ZRem(context.TODO(), k, members...).Result() + } + if err != nil { + err = fmt.Errorf("Zrem %s %+v fail,err:%v,addr:%s", k, members, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// AdminSet tendisplus 'adminset' 命令 +func (db *RedisClient) AdminSet(key, val string) (ret string, err error) { + var ok bool + // 命令'adminset ',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("'adminset' redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + cmd := []interface{}{"adminset", key, val} + adminsetRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'adminset %s' fail,err:%v\n", db.Addr, key, err) + mylog.Logger.Error(err.Error()) + return + } + ret, ok = adminsetRet.(string) + if ok == false { + err = fmt.Errorf("'adminset %s %s' result not string,ret:%+v,nodeAddr:%s", key, val, adminsetRet, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// GetTendisplusHeartbeat 获取tendisplus 心跳数据 +/* for example: +> adminget 1.1.1.1:heartbeat + 1) 1) "0" + 2) "2021-06-01 16:47:00" + 2) 1) "1" + 2) "2021-06-01 16:47:00" + 3) 1) "2" + 2) "2021-06-01 16:47:00" + 4) 1) "3" + 2) "2021-06-01 16:47:00" + 5) 1) "4" + 2) "2021-06-01 16:47:00" + 6) 1) "5" + 2) "2021-06-01 16:47:00" + 7) 1) "6" + 2) "2021-06-01 16:47:00" + 8) 1) "7" + 2) "2021-06-01 16:47:00" + 9) 1) "8" + 2) "2021-06-01 16:47:00" +10) 1) "9" + 2) "2021-06-01 16:47:00" +*/ +func (db *RedisClient) GetTendisplusHeartbeat(key string) (heartbeat map[int]time.Time, err error) { + // 命令'adminget ',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("'adminget' redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + heartbeat = make(map[int]time.Time) + cmd := []interface{}{"adminget", key} + adminGetRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'adminget %s' fail,err:%v\n", db.Addr, key, err) + mylog.Logger.Error(err.Error()) + return heartbeat, err + } + adminGetRets, ok := adminGetRet.([]interface{}) + if ok == false { + err = fmt.Errorf("GetTendisplusHeartbeat 'adminget %s' result not []interface{},nodeAddr:%s", key, db.Addr) + mylog.Logger.Error(err.Error()) + return heartbeat, err + } + var storeID int + var value, storeIDStr string + for _, confItem := range adminGetRets { + conf01 := confItem.([]interface{}) + if conf01[1] == nil { + continue + } + storeIDStr = conf01[0].(string) + value = conf01[1].(string) + storeID, _ = strconv.Atoi(storeIDStr) + heartbeat[storeID], _ = time.ParseInLocation(consts.UnixtimeLayout, value, time.Local) + } + return heartbeat, nil +} + +// Close 关闭连接 +func (db *RedisClient) Close() { + if db.InstanceClient == nil && db.ClusterClient == nil { + return + } + + if db.DbType == consts.TendisTypeRedisCluster { + db.ClusterClient.Close() + db.ClusterClient = nil + return + } + db.InstanceClient.Close() + db.InstanceClient = nil + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_info.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_info.go new file mode 100644 index 0000000000..e9fe7bb9f0 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_info.go @@ -0,0 +1,103 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "fmt" + "strconv" + "strings" +) + +// CmdClusterInfo 命令:cluster info的结果 +type CmdClusterInfo struct { + ClusterState string `json:"cluster_state"` + ClusterSlotsAssigned int `json:"cluster_slots_assigned"` + ClusterSlotsOk int `json:"cluster_slots_ok"` + ClusterSlotsPfail int `json:"cluster_slots_pfail"` + ClusterSlotsFail int `json:"cluster_slots_fail"` + ClusterKnownNodes int `json:"cluster_known_nodes"` + ClusterSize int `json:"cluster_size"` + ClusterCurrentEpoch int `json:"cluster_current_epoch"` + ClusterMyEpoch int `json:"cluster_my_epoch"` + ClusterStatsMessagesPingSent uint64 `json:"cluster_stats_messages_ping_sent"` + ClusterStatsMessagesPongSent uint64 `json:"cluster_stats_messages_pong_sent"` + ClusterStatsMessagesMeetSent uint64 `json:"cluster_stats_messages_meet_sent"` + ClusterStatsMessagesPublishSent uint64 `json:"cluster_stats_messages_publish_sent"` + ClusterStatsMessagesUpdateSent uint64 `json:"cluster_stats_messages_update_sent"` + ClusterStatsMessagesSent uint64 `json:"cluster_stats_messages_sent"` + ClusterStatsMessagesPingReceived uint64 `json:"cluster_stats_messages_ping_received"` + ClusterStatsMessagesPongReceived uint64 `json:"cluster_stats_messages_pong_received"` + ClusterStatsMessagesMeetReceived uint64 `json:"cluster_stats_messages_meet_received"` + ClusterStatsMessagesUpdateReceived uint64 `json:"cluster_stats_messages_update_received"` + ClusterStatsMessagesReceived uint64 `json:"cluster_stats_messages_received"` +} + +// DecodeClusterInfo 解析cluster info命令结果 +func DecodeClusterInfo(cmdRet string) (clusterInfo *CmdClusterInfo) { + clusterInfo = &CmdClusterInfo{} + list01 := strings.Split(cmdRet, "\n") + for _, item01 := range list01 { + item01 = strings.TrimSpace(item01) + if len(item01) == 0 { + continue + } + list02 := strings.SplitN(item01, ":", 2) + if len(list02) < 2 { + continue + } + if list02[0] == "cluster_state" { + clusterInfo.ClusterState = list02[1] + } else if list02[0] == "cluster_slots_assigend" { + clusterInfo.ClusterSlotsAssigned, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_slots_ok" { + clusterInfo.ClusterSlotsOk, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_slots_pfail" { + clusterInfo.ClusterSlotsPfail, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_known_nodes" { + clusterInfo.ClusterKnownNodes, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_size" { + clusterInfo.ClusterSize, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_current_epoch" { + clusterInfo.ClusterCurrentEpoch, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_my_epoch" { + clusterInfo.ClusterMyEpoch, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_stats_messages_ping_sent" { + clusterInfo.ClusterStatsMessagesPingSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_pong_sent" { + clusterInfo.ClusterStatsMessagesPongSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_meet_sent" { + clusterInfo.ClusterStatsMessagesMeetSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_sent" { + clusterInfo.ClusterStatsMessagesSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_ping_received" { + clusterInfo.ClusterStatsMessagesPingReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_pong_received" { + clusterInfo.ClusterStatsMessagesPongReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_meet_received" { + clusterInfo.ClusterStatsMessagesMeetReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_update_received" { + clusterInfo.ClusterStatsMessagesUpdateReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_received" { + clusterInfo.ClusterStatsMessagesReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } + } + return +} + +// ClusterInfo 获取cluster info结果并解析 +func (db *RedisClient) ClusterInfo() (clusterInfo *CmdClusterInfo, err error) { + var ret01 string + if db.DbType == consts.TendisTypeRedisCluster { + ret01, err = db.ClusterClient.ClusterInfo(context.TODO()).Result() + } else { + ret01, err = db.InstanceClient.ClusterInfo(context.TODO()).Result() + } + if err != nil { + err = fmt.Errorf("ClusterInfo execute cluster info fail,err:%v,clusterAddr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return nil, err + } + clusterInfo = DecodeClusterInfo(ret01) + return clusterInfo, nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes.go new file mode 100644 index 0000000000..71d678ea86 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes.go @@ -0,0 +1,441 @@ +package myredis + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "strconv" + "strings" +) + +const ( + slotSeparator = "-" + importingSeparator = "-<-" + migratingSeparator = "->-" +) + +// ClusterNodeData 获取并解析 cluster nodes命令结果 +type ClusterNodeData struct { + NodeID string `json:"ID"` + Addr string `json:"addr"` + IP string `json:"ip"` // 如果是k8s中的redis cluster,则ip代表节点的pod名,否则ip表示redis IP + Port int `json:"port"` + CPort int `json:"cport"` + Role string `json:"role"` // master or slave + IsMyself bool `json:"is_myself"` + LinkState string `json:"link_state"` // connected or disconnected + MasterID string `json:"master_iD"` + FailStatus []string `json:"fail_status"` + PingSent int64 `json:"ping_sent"` + PongRecv int64 `json:"pong_recv"` + ConfigEpoch int64 `json:"config_epoch"` + SlotSrcStr string `json:"slot_src_str"` + Slots []int `json:"slots"` + SlotsMap map[int]bool `json:"slots_map"` // convenient to know whether certain slots belong to the node + MigratingSlots map[int]string `json:"migrating_slots"` // key:slot,value:dst redis ID + ImportingSlots map[int]string `json:"importing_slots"` // key:slot.value:src redis ID + + balance int `json:"-"` // NOCC:vet/vet(设计如此) (扩缩容)迁移slot时使用 + endSlotIdx int `json:"-"` // NOCC:vet/vet(设计如此) +} + +// NewDefaultNode builds and returns new defaultNode instance +func NewDefaultNode() *ClusterNodeData { + return &ClusterNodeData{ + Slots: []int{}, + SlotsMap: map[int]bool{}, + MigratingSlots: map[int]string{}, + ImportingSlots: map[int]string{}, + } +} + +// String 用于打印 +func (n *ClusterNodeData) String() string { + return fmt.Sprintf( + `{Redis ID:%s,Addr:%s,role:%s,master:%s,link:%s,status:%s,slots:%s,len(migratingSlots):%d,len(importingSlots):%d}`, + n.NodeID, n.Addr, n.GetRole(), n.MasterID, n.LinkState, n.FailStatus, + ConvertSlotToStr(n.Slots), len(n.MigratingSlots), len(n.ImportingSlots)) +} + +// Balance TODO +// balance:the expected number of slots for each tendis during scale up or scale down. +func (n *ClusterNodeData) Balance() int { + return n.balance +} + +// SetBalance TODO +// set +func (n *ClusterNodeData) SetBalance(balance int) { + n.balance = balance +} + +// EndSlotIdx TODO +// slot idx +func (n *ClusterNodeData) EndSlotIdx() int { + return n.endSlotIdx +} + +// SetEndSlotIdx TODO +// slot idx +func (n *ClusterNodeData) SetEndSlotIdx(idx int) { + n.endSlotIdx = idx +} + +// SetRole from a flags string list set the Node's role +func (n *ClusterNodeData) SetRole(flags string) error { + n.Role = "" // reset value before setting the new one + vals := strings.Split(flags, ",") + for _, val := range vals { + switch val { + case consts.RedisMasterRole: + n.Role = consts.RedisMasterRole + case consts.RedisSlaveRole: + n.Role = consts.RedisSlaveRole + } + } + + if n.Role == "" { + err := fmt.Errorf("node setRole failed,addr:%s,flags:%s", n.Addr, flags) + return err + } + + return nil +} + +// GetRole return the Redis Cluster Node GetRole +func (n *ClusterNodeData) GetRole() string { + switch n.Role { + case consts.RedisMasterRole: + return consts.RedisMasterRole + case consts.RedisSlaveRole: + return consts.RedisSlaveRole + default: + if n.MasterID != "" { + return consts.RedisSlaveRole + } + if len(n.Slots) > 0 { + return consts.RedisMasterRole + } + } + + return consts.RedisNoneRole +} + +// SlotCnt slot count +func (n *ClusterNodeData) SlotCnt() int { + return len(n.Slots) +} + +// SetLinkStatus set the Node link status +func (n *ClusterNodeData) SetLinkStatus(status string) error { + n.LinkState = "" // reset value before setting the new one + switch status { + case consts.RedisLinkStateConnected: + n.LinkState = consts.RedisLinkStateConnected + case consts.RedisLinkStateDisconnected: + n.LinkState = consts.RedisLinkStateDisconnected + } + + if n.LinkState == "" { + err := fmt.Errorf("Node SetLinkStatus failed,addr:%s,status:%s", n.Addr, status) + return err + } + + return nil +} + +// SetFailureStatus set from inputs flags the possible failure status +func (n *ClusterNodeData) SetFailureStatus(flags string) { + n.FailStatus = []string{} // reset value before setting the new one + vals := strings.Split(flags, ",") + for _, val := range vals { + switch val { + case consts.NodeStatusFail: + n.FailStatus = append(n.FailStatus, consts.NodeStatusFail) + case consts.NodeStatusPFail: + n.FailStatus = append(n.FailStatus, consts.NodeStatusPFail) + case consts.NodeStatusHandshake: + n.FailStatus = append(n.FailStatus, consts.NodeStatusHandshake) + case consts.NodeStatusNoAddr: + n.FailStatus = append(n.FailStatus, consts.NodeStatusNoAddr) + case consts.NodeStatusNoFlags: + n.FailStatus = append(n.FailStatus, consts.NodeStatusNoFlags) + } + } +} + +// SetReferentMaster set the redis node parent referent +func (n *ClusterNodeData) SetReferentMaster(ref string) { + n.MasterID = "" + if ref == "-" { + return + } + n.MasterID = ref +} + +// DecodeClusterNodes decode from the cmd output the Redis nodes info. +// Second argument is the node on which we are connected to request info +func DecodeClusterNodes(input string) ([]*ClusterNodeData, error) { + infos := []*ClusterNodeData{} + lines := strings.Split(input, "\n") + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 8 { + // last line is always empty + // not enough values in line split, skip line + mylog.Logger.Info(fmt.Sprintf("not enough values in line split, ignoring line: '%s'", line)) + continue + } else { + node := NewDefaultNode() + + node.NodeID = values[0] + // remove trailing port for cluster internal protocol + ipPort := strings.Split(values[1], "@") + node.Addr = ipPort[0] + if node.Addr != "" { + list02 := strings.Split(node.Addr, ":") + if util.IsValidIP(list02[0]) { + node.IP = list02[0] + } else { + l01 := strings.Split(node.Addr, ".") + if len(l01) > 0 { + node.IP = l01[0] + } + } + node.Port, _ = strconv.Atoi(list02[1]) + } + node.CPort, _ = strconv.Atoi(ipPort[1]) + node.SetRole(values[2]) + node.SetFailureStatus(values[2]) + node.SetReferentMaster(values[3]) + if i, err := strconv.ParseInt(values[4], 10, 64); err == nil { + node.PingSent = i + } + if i, err := strconv.ParseInt(values[5], 10, 64); err == nil { + node.PongRecv = i + } + if i, err := strconv.ParseInt(values[6], 10, 64); err == nil { + node.ConfigEpoch = i + } + node.SetLinkStatus(values[7]) + + for _, slot := range values[8:] { + if node.SlotSrcStr == "" { + node.SlotSrcStr = slot + } else { + node.SlotSrcStr = fmt.Sprintf("%s %s", node.SlotSrcStr, slot) + } + slots01, _, importingSlots, migratingSlots, err := DecodeSlotsFromStr(slot, " ") + if err != nil { + return infos, err + } + node.Slots = append(node.Slots, slots01...) + for _, s01 := range slots01 { + node.SlotsMap[s01] = true + } + for s01, nodeid := range importingSlots { + node.ImportingSlots[s01] = nodeid + } + for s01, nodeid := range migratingSlots { + node.MigratingSlots[s01] = nodeid + } + } + + if strings.HasPrefix(values[2], "myself") { + node.IsMyself = true + } + infos = append(infos, node) + } + } + + return infos, nil +} + +// IsRunningMaster anonymous function for searching running Master Node +var IsRunningMaster = func(n *ClusterNodeData) bool { + if (n.GetRole() == consts.RedisMasterRole) && + (len(n.FailStatus) == 0) && (n.LinkState == consts.RedisLinkStateConnected) { + return true + } + return false +} + +// IsMasterWithSlot anonymous function for searching Master Node withslot +var IsMasterWithSlot = func(n *ClusterNodeData) bool { + if (n.GetRole() == consts.RedisMasterRole) && (len(n.FailStatus) == 0) && + (n.LinkState == consts.RedisLinkStateConnected) && (n.SlotCnt() > 0) { + return true + } + return false +} + +// IsRunningNode anonymous function for searching running Node +var IsRunningNode = func(n *ClusterNodeData) bool { + if (len(n.FailStatus) == 0) && (n.LinkState == consts.RedisLinkStateConnected) { + return true + } + return false +} + +// DecodeSlotsFromStr 解析 slot 字符串,如 0-10,12,100-200,seq为',' +// 同时可以解析: +// migrating slot: ex: [42->-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1] +// importing slot: ex: [42-<-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1] +func DecodeSlotsFromStr(slotStr string, seq string) (slots []int, slotMap map[int]bool, migratingSlots, + importingSlots map[int]string, err error) { + slotMap = make(map[int]bool) + migratingSlots = make(map[int]string) + importingSlots = make(map[int]string) + var items []string + var slot int + if seq == "" || seq == " " || seq == "\t" || seq == "\n" { + items = strings.Fields(slotStr) + } else { + items = strings.Split(slotStr, seq) + } + for _, slotItem := range items { + slotItem = strings.TrimSpace(slotItem) + list02 := strings.Split(slotItem, slotSeparator) + if len(list02) == 3 { + separator := slotSeparator + list02[1] + slotSeparator + slot, err = strconv.Atoi(strings.TrimPrefix(list02[0], "[")) + if err != nil { + err = fmt.Errorf("DecodeSlotsFromStr fail,strconv.Atoi err:%v,str:%s", err, slotItem) + mylog.Logger.Error(err.Error()) + return + } + nodeID := strings.TrimSuffix(list02[2], "]") + if separator == importingSeparator { + importingSlots[slot] = nodeID + } else if separator == migratingSeparator { + migratingSlots[slot] = nodeID + } else { + err = fmt.Errorf("impossible to decode slotStr:%s", slotItem) + mylog.Logger.Error(err.Error()) + return + } + } else if len(list02) == 1 { + num01, _ := strconv.Atoi(list02[0]) + if num01 < consts.DefaultMinSlots || num01 > consts.DefaultMaxSlots { + err = fmt.Errorf("slot:%d in param:%s not correct,valid range [%d,%d]", num01, slotStr, + consts.DefaultMinSlots, consts.DefaultMaxSlots) + mylog.Logger.Error(err.Error()) + return + } + slots = append(slots, num01) + slotMap[num01] = true + } else if len(list02) == 2 { + start, _ := strconv.Atoi(list02[0]) + end, _ := strconv.Atoi(list02[1]) + if start < consts.DefaultMinSlots || start > consts.DefaultMaxSlots { + err = fmt.Errorf("slot:%d in param:%s not correct,valid range [%d,%d]", start, slotStr, + consts.DefaultMinSlots, consts.DefaultMaxSlots) + mylog.Logger.Error(err.Error()) + return + } + if end < consts.DefaultMinSlots || end > consts.DefaultMaxSlots { + err = fmt.Errorf("slot:%d in param:%s not correct,valid range [%d,%d]", end, slotStr, + consts.DefaultMinSlots, consts.DefaultMaxSlots) + mylog.Logger.Error(err.Error()) + return + } + for num01 := start; num01 <= end; num01++ { + slots = append(slots, num01) + slotMap[num01] = true + } + } + } + return +} + +// ConvertSlotToStr 将slots:[0,1,2,3,4,10,11,12,13,17] 按照 0-4,10-13,17 打印 +func ConvertSlotToStr(slots []int) string { + if len(slots) == 0 { + return "" + } + str01 := "" + start := slots[0] + curr := slots[0] + for _, item := range slots { + next := item + if next == curr { + continue + } + if curr == next-1 { + // slot连续,继续 + curr = next + continue + } + // slot不连续了 + if start == curr { + str01 = fmt.Sprintf("%s,%d", str01, start) + } else { + str01 = fmt.Sprintf("%s,%d-%d", str01, start, curr) + } + start = next + curr = next + } + // 最后再处理一次start curr + if start == curr { + str01 = fmt.Sprintf("%s,%d", str01, start) + } else { + str01 = fmt.Sprintf("%s,%d-%d", str01, start, curr) + } + str01 = strings.Trim(str01, ",") + return str01 +} + +// ConvertSlotToShellFormat 将slots:[0,1,2,3,4,10,11,12,13,17] 按照 {0..4} {10..13} 17 打印 +func ConvertSlotToShellFormat(slots []int) string { + if len(slots) == 0 { + return "" + } + str01 := "" + start := slots[0] + curr := slots[0] + for _, item := range slots { + next := item + if next == curr { + continue + } + if curr == next-1 { + // slot连续,继续 + curr = next + continue + } + // slot不连续了 + if start == curr { + str01 = fmt.Sprintf("%s %d", str01, start) + } else { + str01 = fmt.Sprintf("%s {%d..%d}", str01, start, curr) + } + start = next + curr = next + } + // 最后再处理一次start curr + if start == curr { + str01 = fmt.Sprintf("%s %d", str01, start) + } else { + str01 = fmt.Sprintf("%s {%d..%d}", str01, start, curr) + } + str01 = strings.Trim(str01, " ") + return str01 +} + +// SlotSliceDiff 寻找在slotB中存在,但在 slotA中不存在的slots +func SlotSliceDiff(slotsA []int, slotsB []int) (diffSlots []int) { + if len(slotsA) == 0 { + return slotsB + } + aMap := make(map[int]struct{}) + for _, slot := range slotsA { + aMap[slot] = struct{}{} + } + for _, slot := range slotsB { + if _, ok := aMap[slot]; !ok { + diffSlots = append(diffSlots, slot) + } + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes_test.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes_test.go new file mode 100644 index 0000000000..e994836437 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/cluster_nodes_test.go @@ -0,0 +1,89 @@ +package myredis + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "fmt" + "testing" + + "github.com/smartystreets/goconvey/convey" +) + +// test unit +func TestDecodeClusterNodes(t *testing.T) { + mylog.UnitTestInitLog() + convey.Convey("cluster nodes decode", t, func() { + clusterNodesStr := ` + 17922e98b0b8f7a9d233422cf8ae55f2d22fdab7 4.4.4.4:30003@40003 master - 0 1655005291000 20 connected 7509-8191 + e81c4276dce41ae3ed4a5fe18e460ed5b9f77e8b 3.3.3.3:30003@40003 slave 17922e98b0b8f7a9d233422cf8ae55f2d22fdab7 0 1655005291000 20 connected + 56e53ca70ef13f3ca1817b0746d64319a4b66fed synctest-redis-rdsplus1-0.synctest-svc.vip:30000@40000 myself,slave 72ffcd1f8d39d1b6011ab38f5e1a42dd6f66f765 0 1655006313000 3 connected + 72ffcd1f8d39d1b6011ab38f5e1a42dd6f66f765 synctest-redis-rdsplus1-1.synctest-svc.vip:30000@40000 master - 0 1655006315419 7 connected 5461-10921 + ` + nodes, err := DecodeClusterNodes(clusterNodesStr) + if err != nil { + t.Fatalf(err.Error()) + } + convey.So(len(nodes), convey.ShouldEqual, 4) + convey.So(nodes[0].NodeID, convey.ShouldEqual, "17922e98b0b8f7a9d233422cf8ae55f2d22fdab7") + convey.So(nodes[0].IP, convey.ShouldEqual, "4.4.4.4") + convey.So(nodes[0].Port, convey.ShouldEqual, 30003) + convey.So(nodes[0].SlotsMap, convey.ShouldContainKey, 7560) + convey.So(nodes[1].MasterID, convey.ShouldEqual, "17922e98b0b8f7a9d233422cf8ae55f2d22fdab7") + convey.So(IsMasterWithSlot(nodes[0]), convey.ShouldBeTrue) + convey.So(nodes[2].IP, convey.ShouldEqual, "synctest-redis-rdsplus1-0") + convey.So(IsMasterWithSlot(nodes[3]), convey.ShouldBeTrue) + convey.So(nodes[3].SlotsMap, convey.ShouldContainKey, 5470) + }) + + convey.Convey("cluster nodes decode2", t, func() { + clusterNodesStr := `36b96240e16051711d2391472cfd5900d33dc8bd 5.5.5.5:46000@56000 master - 0 1660014754278 5 connected +a32f9cb266d85ea96a1a87ce56872f339e2a257f 5.5.5.5:45001@55001 master - 0 1660014755280 4 connected 5462-10923 +5d555b4ab569de196f71afd275c1edf8c046959a 5.5.5.5:45000@55000 myself,master - 0 1660014753000 1 connected 0-5461 +90ed7be9db5e4b78e959ad3b40253c2ffb3d5845 5.5.5.5:46002@56002 master - 0 1660014752269 3 connected +dcff36cc5e915024d12173b1c5a3235e9186f193 5.5.5.5:46001@56001 master - 0 1660014753273 2 connected +ff29e2e2782916a0451d5f4064cb55483f4b2a97 5.5.5.5:45002@55002 master - 0 1660014753000 0 connected 10924-16383 +` + nodes, err := DecodeClusterNodes(clusterNodesStr) + if err != nil { + t.Fatalf(err.Error()) + } + var selfNode *ClusterNodeData = nil + for _, node01 := range nodes { + nodeItem := node01 + if nodeItem.IsMyself { + selfNode = nodeItem + break + } + } + fmt.Printf("%s\n", selfNode.String()) + convey.So(IsMasterWithSlot(selfNode), convey.ShouldBeTrue) + }) + + convey.Convey("decode slots from string", t, func() { + slotStr := "0-10,12,100-200" + slots, slotMap, _, _, err := DecodeSlotsFromStr(slotStr, ",") + if err != nil { + t.Fatalf(err.Error()) + } + convey.So(len(slots), convey.ShouldEqual, 11+1+101) + convey.So(slotMap, convey.ShouldContainKey, 12) + convey.So(slotMap, convey.ShouldNotContainKey, 11) + // convey.So(len(migratingSlots), convey.ShouldEqual, 0) + // convey.So(len(importingSlots), convey.ShouldEqual, 0) + + slotStr = "[93-<-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f] [77->-e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca]" + _, _, migratingSlots, importingSlots, err := DecodeSlotsFromStr(slotStr, " ") + if err != nil { + t.Fatalf(err.Error()) + } + // convey.So(len(slots), convey.ShouldEqual, 0) + // convey.So(len(slotMap), convey.ShouldEqual, 0) + convey.So(migratingSlots, convey.ShouldContainKey, 77) + convey.So(importingSlots, convey.ShouldContainKey, 93) + convey.So(importingSlots[93], convey.ShouldEqual, "292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f") + }) + convey.Convey("convert slot slice to string", t, func() { + slots := []int{0, 1, 2, 3, 4, 10, 11, 12, 13, 17} + str01 := ConvertSlotToStr(slots) + convey.So(str01, convey.ShouldEqual, "0-4,10-13,17") + }) +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/myredis.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/myredis.go new file mode 100644 index 0000000000..78669a6058 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/myredis.go @@ -0,0 +1,107 @@ +// Package myredis 该文件中保存一些公共函数 +package myredis + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "path/filepath" + "strconv" + "strings" + "sync" + "time" +) + +// GetRedisLoccalConfFile 本地获取redis实例配置文件 +func GetRedisLoccalConfFile(port int) (confFile string, err error) { + dataDir := consts.GetRedisDataDir() + instConf := filepath.Join(dataDir, "redis", strconv.Itoa(port), "instance.conf") + redisConf := filepath.Join(dataDir, "redis", strconv.Itoa(port), "redis.conf") + if util.FileExists(instConf) { + return instConf, nil + } + if util.FileExists(redisConf) { + return redisConf, nil + } + err = fmt.Errorf("[%s,%s] not exists", instConf, redisConf) + mylog.Logger.Error(err.Error()) + return +} + +// GetPasswordFromLocalConfFile (从配置文件中)获取本地redis实例密码 +func GetPasswordFromLocalConfFile(port int) (password string, err error) { + confFile, err := GetRedisLoccalConfFile(port) + if err != nil { + err = fmt.Errorf("get redis local config file failed,err:%v,port:%d", err, port) + mylog.Logger.Error(err.Error()) + return + } + cmd01 := fmt.Sprintf(`grep -E '^requirepass' %s|awk '{print $2}'|head -1`, confFile) + password, err = util.RunBashCmd(cmd01, "", nil, 10*time.Second) + if err != nil { + return + } + password = strings.TrimPrefix(password, "\"") + password = strings.TrimSuffix(password, "\"") + return +} + +type connTestItem struct { + IP string + Port int + Password string + Err error +} + +func (c *connTestItem) addr() string { + return c.IP + ":" + strconv.Itoa(c.Port) +} + +// LocalRedisConnectTest 本地Redis连接性测试 +// 从本地获取redis的password,并确认每个redis是否可链接 +func LocalRedisConnectTest(ip string, ports []int, password string) (err error) { + if len(ports) == 0 { + err = fmt.Errorf("LocalRedisConnectTest ports(%+v) cannot be empty", ports) + return + } + l01 := make([]*connTestItem, 0, len(ports)) + for _, port := range ports { + if password == "" { + password, err = GetPasswordFromLocalConfFile(port) + if err != nil { + return + } + } + l01 = append(l01, &connTestItem{ + IP: ip, + Port: port, + Password: password, + }) + } + // 并发测试 + wg := sync.WaitGroup{} + for _, item := range l01 { + test01 := item + wg.Add(1) + go func(test01 *connTestItem) { + defer wg.Done() + cli01, err := NewRedisClientWithTimeout(test01.addr(), test01.Password, 0, + consts.TendisTypeRedisInstance, 10*time.Second) + if err != nil { + test01.Err = err + return + } + cli01.Close() + }(test01) + } + wg.Wait() + + for _, item := range l01 { + test01 := item + if test01.Err != nil { + return test01.Err + } + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/slot.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/slot.go new file mode 100644 index 0000000000..6f33a66ab6 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/slot.go @@ -0,0 +1,238 @@ +package myredis + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "fmt" + "sort" + "strconv" + "strings" +) + +// Slot represent a Redis Cluster slot +type Slot int + +// String string representation of a slot +func (s Slot) String() string { + return strconv.FormatUint(uint64(s), 10) +} + +// SlotSlice attaches the methods of sort.Interface to []string, sorting in increasing order. +type SlotSlice []Slot + +// Len 用于排序 +func (s SlotSlice) Len() int { return len(s) } + +// Less 用于排序 +func (s SlotSlice) Less(i, j int) bool { return s[i] < s[j] } + +// Swap 用于排序 +func (s SlotSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// String 用于打印 +func (s SlotSlice) String() string { + return fmt.Sprintf("%s", SlotRangesFromSlots(s)) +} + +// DecodeSlot parse a string representation of a slot slot +func DecodeSlot(s string) (Slot, error) { + slot, err := strconv.ParseUint(s, 10, 64) + return Slot(slot), err +} + +// SlotRange represent a Range of slots +type SlotRange struct { + Min Slot `json:"min"` + Max Slot `json:"max"` +} + +// String string representation of a slotrange +func (s SlotRange) String() string { + return s.Min.String() + slotSeparator + s.Max.String() +} + +// Total returns total slot present in the range +func (s SlotRange) Total() int { + return int(s.Max - s.Min + 1) +} + +// ImportingSlot represents an importing slot (slot + importing from node id) +type ImportingSlot struct { + SlotID Slot `json:"slot"` + FromNodeID string `json:"fromNodeId"` +} + +// String string representation of an importing slot +func (s ImportingSlot) String() string { + return s.SlotID.String() + importingSeparator + s.FromNodeID +} + +// MigratingSlot represents a migrating slot (slot + migrating to node id) +type MigratingSlot struct { + SlotID Slot `json:"slot"` + ToNodeID string `json:"toNodeId"` +} + +// String string representation of a migratting slot +func (s MigratingSlot) String() string { + return s.SlotID.String() + migratingSeparator + s.ToNodeID +} + +// DecodeSlotRange decode from a string a RangeSlot +// +// each entry can have 4 representations: +// * single slot: ex: 42 +// * slot range: ex: 42-52 +// * migrating slot: ex: [42->-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1] +// * importing slot: ex: [42-<-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1] +func DecodeSlotRange(str string) ([]Slot, *ImportingSlot, *MigratingSlot, error) { + val := strings.Split(str, slotSeparator) + var min, max, slot Slot + slots := []Slot{} + var err error + if len(val) == 3 { + // migrating or importing slot + separator := slotSeparator + val[1] + slotSeparator + slot, err = DecodeSlot(strings.TrimPrefix(val[0], "[")) + if err != nil { + mylog.Logger.Error(fmt.Sprintf("DecodeSlotRange fail,err:%v,str:%s", err, str)) + return slots, nil, nil, err + } + if separator == importingSeparator { + return slots, &ImportingSlot{SlotID: slot, FromNodeID: strings.TrimSuffix(val[2], "]")}, nil, err + } else if separator == migratingSeparator { + return slots, nil, &MigratingSlot{SlotID: slot, ToNodeID: strings.TrimSuffix(val[2], "]")}, err + } else { + err = fmt.Errorf("impossible to decode slot %s", str) + mylog.Logger.Error(err.Error()) + return slots, nil, nil, err + } + } else if len(val) > 0 { + min, err = DecodeSlot(val[0]) + if err != nil { + mylog.Logger.Error(fmt.Sprintf("DecodeSlotRange fail,err:%v,str:%s", err, str)) + return slots, nil, nil, err + } + if len(val) > 1 { + max, err = DecodeSlot(val[1]) + if err != nil { + mylog.Logger.Error(fmt.Sprintf("DecodeSlotRange fail,err:%v,str:%s", err, str)) + return slots, nil, nil, err + } + } else { + max = min + } + } else { + err = fmt.Errorf("impossible to decode slot '%s'", str) + mylog.Logger.Error(err.Error()) + return slots, nil, nil, err + } + + slots = BuildSlotSlice(min, max) + + return slots, nil, nil, err +} + +// SlotRangesFromSlots return a slice of slot ranges from a slice of slots +func SlotRangesFromSlots(slots []Slot) []SlotRange { + ranges := []SlotRange{} + min := Slot(0) + max := Slot(0) + first := true + sort.Sort(SlotSlice(slots)) + for _, slot := range slots { + if first { + min = slot + max = slot + first = false + continue + } + if slot > max+1 { + ranges = append(ranges, SlotRange{Min: min, Max: max}) + min = slot + } + max = slot + } + if !first { + ranges = append(ranges, SlotRange{Min: min, Max: max}) + } + + return ranges +} + +// SlotSliceInter TODO +func SlotSliceInter(slots01, slots02 []Slot) []Slot { + m01 := make(map[Slot]bool) + m02 := make(map[Slot]bool) + for _, item01 := range slots01 { + m01[item01] = true + } + for _, item02 := range slots02 { + m02[item02] = true + } + ret := []Slot{} + for item01 := range m01 { + if _, ok := m02[item01]; ok == true { + ret = append(ret, item01) + } + } + sort.Sort(SlotSlice(ret)) + return ret +} + +// RemoveSlots return a new list of slot where a list of slots have been removed, doesn't work if duplicates +func RemoveSlots(slots []Slot, removedSlots []Slot) []Slot { + for i := 0; i < len(slots); i++ { + s := slots[i] + for _, r := range removedSlots { + if s == r { + slots = append(slots[:i], slots[i+1:]...) + i-- + break + } + } + } + + return slots +} + +// RemoveSlot return a new list of slot where a specified slot have been removed. +func RemoveSlot(slots []Slot, removedSlot Slot) []Slot { + for i := 0; i < len(slots); i++ { + s := slots[i] + if s == removedSlot { + slots = append(slots[:i], slots[i+1:]...) + break + } + } + + return slots +} + +// AddSlots return a new list of slots after adding some slots in it, duplicates are removed +func AddSlots(slots []Slot, addedSlots []Slot) []Slot { + for _, s := range addedSlots { + if !Contains(slots, s) { + slots = append(slots, s) + } + } + return slots +} + +// Contains returns true if a node slice contains a node +func Contains(s []Slot, e Slot) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +// BuildSlotSlice return a slice of all slots between this range +func BuildSlotSlice(min, max Slot) []Slot { + slots := []Slot{} + for s := min; s <= max; s++ { + slots = append(slots, s) + } + return slots +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_cluster_setslotinfo.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_cluster_setslotinfo.go new file mode 100644 index 0000000000..8aba98d14e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_cluster_setslotinfo.go @@ -0,0 +1,202 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +// ClusterSetSlotInfo 命令:cluster setslot info信息 +type ClusterSetSlotInfo struct { + // dst Node命令cluster setslot info结果 + ImportingTaskIDs []string + ImportingSlotList []int + ImportingSlotMap map[int]bool `json:"-"` + SuccessImportSlotList []int + SuccessImportSlotMap map[int]bool `json:"-"` + FailImportSlotList []int + FailImportSlotMap map[int]bool `json:"-"` + RunningRcvTaskNum int + SuccessRcvTaskNum int + FailRcvTaskNum int + + // src Node命令cluster setslot info结果 + MigratingTaskIDs []string + MigratingSlotList []int + MigratingSlotMap map[int]bool `json:"-"` + SuccessMigrateSlotList []int + SuccessMigrateSlotMap map[int]bool `json:"-"` + FailMigrateSlotList []int + FailMigrateSlotMap map[int]bool `json:"-"` + RunningSendTaskNum int + SuccessSendTaskNum int + FailSendTaskNum int +} + +// ToString .. +func (info *ClusterSetSlotInfo) ToString() string { + ret, _ := json.Marshal(info) + return string(ret) +} + +// IsImportingSlot 是否是import中的slots +func (info *ClusterSetSlotInfo) IsImportingSlot(slotid int) bool { + _, ok := info.ImportingSlotMap[slotid] + return ok +} + +// IsSuccessImportSlot 是否是成功import的slots +func (info *ClusterSetSlotInfo) IsSuccessImportSlot(slotid int) bool { + _, ok := info.SuccessImportSlotMap[slotid] + return ok +} + +// IsFailImportSlot 是否是import失败的slot +func (info *ClusterSetSlotInfo) IsFailImportSlot(slotid int) bool { + _, ok := info.FailImportSlotMap[slotid] + return ok +} + +// GetDstRedisSlotsStatus 获取目标slots的状态 +func (info *ClusterSetSlotInfo) GetDstRedisSlotsStatus(slotList []int) ( + importing, successImport, failImport, unknow []int, +) { + for _, slotItem := range slotList { + if info.IsImportingSlot(slotItem) { + importing = append(importing, slotItem) + } else if info.IsSuccessImportSlot(slotItem) { + successImport = append(successImport, slotItem) + } else if info.IsFailImportSlot(slotItem) { + failImport = append(failImport, slotItem) + } else { + unknow = append(unknow, slotItem) + } + } + return +} + +// IsMigratingSlot 是否是migrate中的slot +func (info *ClusterSetSlotInfo) IsMigratingSlot(slotid int) bool { + _, ok := info.MigratingSlotMap[slotid] + return ok +} + +// IsSuccessMigrateSlot 是否是成功migrate的slot +func (info *ClusterSetSlotInfo) IsSuccessMigrateSlot(slotid int) bool { + _, ok := info.SuccessMigrateSlotMap[slotid] + return ok +} + +// IsFailMigrateSlot 是否是migrate失败的slot +func (info *ClusterSetSlotInfo) IsFailMigrateSlot(slotid int) bool { + _, ok := info.FailMigrateSlotMap[slotid] + return ok +} + +// GetSrcSlotsStatus 获取迁移任务中src节点上的slots状态 +func (info *ClusterSetSlotInfo) GetSrcSlotsStatus(slotList []int) ( + migrating, successMigrate, failMigrate, unknow []int) { + for _, slotItem := range slotList { + if info.IsMigratingSlot(slotItem) { + migrating = append(migrating, slotItem) + } else if info.IsSuccessMigrateSlot(slotItem) { + successMigrate = append(successMigrate, slotItem) + } else if info.IsFailMigrateSlot(slotItem) { + failMigrate = append(failMigrate, slotItem) + } else { + unknow = append(unknow, slotItem) + } + } + return +} + +// GetClusterSetSlotInfo 获取'cluster setslot info'的结果并解析 +func GetClusterSetSlotInfo(nodeAddr, nodePassword string) ( + setSlotInfo *ClusterSetSlotInfo, err error) { + // 测试nodeAddr的连通性 + cli01, err := NewRedisClient(nodeAddr, nodePassword, 0, consts.TendisTypeRedisInstance) + if err != nil { + return nil, err + } + defer cli01.Close() + + cmd := []interface{}{"cluster", "setslot", "info"} + ret, err := cli01.InstanceClient.Do(context.TODO(), cmd...).Result() + setSlotsInfo := &ClusterSetSlotInfo{} + setSlotsInfo.ImportingSlotMap = make(map[int]bool) + setSlotsInfo.SuccessImportSlotMap = make(map[int]bool) + setSlotsInfo.FailImportSlotMap = make(map[int]bool) + setSlotsInfo.MigratingSlotMap = make(map[int]bool) + setSlotsInfo.SuccessMigrateSlotMap = make(map[int]bool) + setSlotsInfo.FailMigrateSlotMap = make(map[int]bool) + + taskTimePattern := regexp.MustCompile(`\[.*?\]`) + + importInfos, ok := ret.([]interface{}) + if ok == false { + err = fmt.Errorf( + `GetClusterSetSlotInfo cmd:'cluster setslot info' result not []interface{},nodeAddr:%s.cmd:%v`, + nodeAddr, cmd) + mylog.Logger.Error(err.Error()) + return nil, err + } + for _, info01 := range importInfos { + infoItem := info01.(string) + infoItem = strings.TrimSpace(infoItem) + if infoItem == "" { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "importing taskid" { + list01[1] = strings.TrimSpace(list01[1]) + if list01[1] == "" { + continue + } + list01[1] = taskTimePattern.ReplaceAllString(list01[1], "") // 将task 时间替换掉 + setSlotsInfo.ImportingTaskIDs = strings.Fields(list01[1]) + } else if list01[0] == "importing slots" { + setSlotsInfo.ImportingSlotList, setSlotsInfo.ImportingSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "success import slots" { + setSlotsInfo.SuccessImportSlotList, setSlotsInfo.SuccessImportSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "fail import slots" { + setSlotsInfo.FailImportSlotList, setSlotsInfo.FailImportSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "running receiver task num" { + setSlotsInfo.RunningRcvTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "success receiver task num" { + setSlotsInfo.SuccessRcvTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "fail receiver task num" { + setSlotsInfo.FailRcvTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "migrating taskid" { + if infoItem == "" { + continue + } + list01[1] = taskTimePattern.ReplaceAllString(list01[1], "") // 将task 时间替换掉 + setSlotsInfo.MigratingTaskIDs = strings.Fields(list01[1]) + } else if list01[0] == "migrating slots" { + setSlotsInfo.MigratingSlotList, setSlotsInfo.MigratingSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "success migrate slots" { + setSlotsInfo.SuccessMigrateSlotList, setSlotsInfo.SuccessMigrateSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "fail migrate slots" { + setSlotsInfo.FailMigrateSlotList, setSlotsInfo.FailMigrateSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "running sender task num" { + list01[1] = strings.TrimSpace(list01[1]) + setSlotsInfo.RunningSendTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "success sender task num" { + list01[1] = strings.TrimSpace(list01[1]) + setSlotsInfo.SuccessSendTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "fail sender task num" { + list01[1] = strings.TrimSpace(list01[1]) + setSlotsInfo.FailSendTaskNum, _ = strconv.Atoi(list01[1]) + } + } + return setSlotsInfo, nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_infoRepl.go b/dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_infoRepl.go new file mode 100644 index 0000000000..74275fb8b4 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/models/myredis/tendisplus_infoRepl.go @@ -0,0 +1,412 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +// InfoReplSlave Tendisplus master中执行info replication结果中slave状态 +// 如: slave0:ip=luketest03-redis-rdsplus4-1.luketest03-svc.dmc,port=30000,state=online,offset=930327677,lag=0 +type InfoReplSlave struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + Offset int64 `json:"offset"` + Lag int64 `json:"lag"` +} + +func (slave *InfoReplSlave) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + if len(list01) < 2 { + return fmt.Errorf(`%s format not correct, + the correct format is as follows:slave0:ip=xx,port=48000,state=online,offset=2510,lag=0`, line) + } + slave.Name = list01[0] + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + slave.IP = list02[1] + } else if list02[0] == "port" { + slave.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + slave.State = list02[1] + } else if list02[0] == "offset" { + slave.Offset, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + slave.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// InfoReplRocksdb .. +type InfoReplRocksdb struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + BinlogPos int64 `json:"binlog_pos"` + Lag int64 `json:"lag"` +} + +// InfoReplRocksdbSlave 在tendisplus master上执行info replication结果中rocksdb_slave0解析 +// 如: rocksdb0_slave0:ip=127.0.0.1,port=48000,dest_store_id=0,state=online,binlog_pos=249,lag=0,binlog_lag=0 +type InfoReplRocksdbSlave struct { + InfoReplRocksdb + DestStoreID int `json:"dest_store_id"` + BinlogLag int64 `json:"binlog_lag"` +} + +func (slave *InfoReplRocksdbSlave) decode(line string) error { + line = strings.TrimSpace(line) + var err error + list01 := strings.Split(line, ":") + if len(list01) < 2 { + err = fmt.Errorf(`%s format not correct, + the correct format is as follows: + rocksdb0_slave0:ip=xx,port=xx,dest_store_id=0,state=online,binlog_pos=249,lag=0,binlog_lag=0`, line) + mylog.Logger.Error(err.Error()) + return err + } + slave.Name = list01[0] + + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + slave.IP = list02[1] + } else if list02[0] == "port" { + slave.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "dest_store_id" { + slave.DestStoreID, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + slave.State = list02[1] + } else if list02[0] == "binlog_pos" { + slave.BinlogPos, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + slave.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "binlog_lag" { + slave.BinlogLag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// InfoReplRocksdbMaster 在tendisplus slave上执行info replication结果中rocksdb_master解析 +// 如: rocksdb0_master:ip=127.0.0.1,port=47000,src_store_id=0,state=online,binlog_pos=249,lag=0 +type InfoReplRocksdbMaster struct { + InfoReplRocksdb + SrcStoreID int `json:"src_store_id"` +} + +func (master *InfoReplRocksdbMaster) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + var err error + if len(list01) < 2 { + err = fmt.Errorf(`%s format not correct, + the correct format is as follows: + rocksdb0_master:ip=xxxx,port=47000,src_store_id=0,state=online,binlog_pos=249,lag=0`, line) + mylog.Logger.Error(err.Error()) + return err + } + master.Name = list01[0] + + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + master.IP = list02[1] + } else if list02[0] == "port" { + master.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "src_store_id" { + master.SrcStoreID, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + master.State = list02[1] + } else if list02[0] == "binlog_pos" { + master.BinlogPos, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + master.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// TendisplusInfoReplData tendisplus info replication命令结果解析 +type TendisplusInfoReplData struct { + Addr string `json:"addr"` + Role string `json:"role"` + MasterHost string `json:"master_host"` + MasterPort int `json:"master_port"` + MasterLinkStatus string `json:"master_link_status"` + MasterLastIoSecondsAgo int64 `json:"master_last_io_seconds_ago"` + MasterSyncInPogress int64 `json:"master_sync_in_progress"` + SlaveReplOffset int64 `json:"slave_repl_offset"` + SlavePriority int64 `json:"slave_priority"` + SlaveReadOnly int `json:"slave_read_only"` + ConnectedSlaves int `json:"connected_slaves"` + MasterReplOffset int64 `json:"master_repl_offset"` + SlaveList []InfoReplSlave `json:"slave_list"` + RocksdbMasterList []InfoReplRocksdbMaster `json:"rocksdb_master_list"` + RocksdbSlaveList []InfoReplRocksdbSlave `json:"rocksdb_slave_list"` +} + +// String 用于打印 +func (rpl *TendisplusInfoReplData) String() string { + tmp, _ := json.Marshal(rpl) + return string(tmp) +} + +// GetRole master/slave +func (rpl *TendisplusInfoReplData) GetRole() string { + return rpl.Role +} + +// GetMasterLinkStatus up/down +func (rpl *TendisplusInfoReplData) GetMasterLinkStatus() string { + return rpl.MasterLinkStatus +} + +// SlaveMaxLag .. +// - 如果我的角色是slave,则从 RocksdbMasterList 中获取maxLag; +// - 如果我的角色是master,则先根据slaveAddr找到slave,然后从 SlaveList 中获取获取maxLag; +// - 如果slaveAddr为空,则获取master第一个slave的lag作为 maxLag; +func (rpl *TendisplusInfoReplData) SlaveMaxLag(slaveAddr string) (int64, error) { + var maxLag int64 = 0 + var err error = nil + slaveAddr = strings.TrimSpace(slaveAddr) + if rpl.GetRole() == "slave" { + if rpl.GetMasterLinkStatus() == "down" { + err = fmt.Errorf("slave:%s master_link_status is %s", rpl.Addr, rpl.GetMasterLinkStatus()) + mylog.Logger.Error(err.Error()) + return maxLag, err + } + for _, rdbMaster01 := range rpl.RocksdbMasterList { + if rdbMaster01.Lag > 18000000000000000 { + // 以前tendisplus的一个bug, 新版本已修复 + continue + } + if rdbMaster01.Lag > maxLag { + maxLag = rdbMaster01.Lag + } + } + return maxLag, nil + } + // role==master + if len(rpl.SlaveList) == 0 { + err = fmt.Errorf("master:%s have no slave", rpl.Addr) + mylog.Logger.Error(err.Error()) + return maxLag, err + } + if slaveAddr == "" { + // default first slave lag + maxLag = rpl.SlaveList[0].Lag + return maxLag, nil + } + var destSlave *InfoReplSlave = nil + for _, slave01 := range rpl.SlaveList { + slaveItem := slave01 + addr01 := fmt.Sprintf("%s:%d", slaveItem.IP, slaveItem.Port) + if slaveAddr == addr01 { + destSlave = &slaveItem + break + } + } + if destSlave == nil { + err = fmt.Errorf("master:%s not find slave:%s", rpl.Addr, slaveAddr) + mylog.Logger.Error(err.Error()) + return maxLag, err + } + maxLag = destSlave.Lag + return maxLag, nil +} + +// TendisplusInfoRepl tendisplus info replication结果解析 +// 参考内容: http://tendis.cn/#/Tendisplus/%E5%91%BD%E4%BB%A4/info?id=replication +func (db *RedisClient) TendisplusInfoRepl() (replData TendisplusInfoReplData, err error) { + var replRet string + if db.DbType == consts.TendisTypeRedisCluster { + replRet, err = db.ClusterClient.Info(context.TODO(), "replication").Result() + } else { + replRet, err = db.InstanceClient.Info(context.TODO(), "replication").Result() + } + if err != nil { + err = fmt.Errorf("info replication fail,err:%v,aadr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + infoList := strings.Split(replRet, "\n") + replData = TendisplusInfoReplData{} + replData.Addr = db.Addr + + slaveReg := regexp.MustCompile(`^slave\d+$`) + rocksdbSlaveReg := regexp.MustCompile(`^rocksdb\d+_slave\d+$`) + rocksdbMasterReg := regexp.MustCompile(`^rocksdb\d+_master$`) + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "role" { + replData.Role = list01[1] + } else if list01[0] == "master_host" { + replData.MasterHost = list01[1] + } else if list01[0] == "master_port" { + replData.MasterPort, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "master_link_status" { + replData.MasterLinkStatus = list01[1] + } else if list01[0] == "master_last_io_seconds_ago" { + replData.MasterLastIoSecondsAgo, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "master_sync_in_progress" { + replData.MasterSyncInPogress, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_repl_offset" { + replData.SlaveReplOffset, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_priority" { + replData.SlavePriority, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_read_only" { + replData.SlaveReadOnly, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "connected_slaves" { + replData.ConnectedSlaves, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "master_repl_offset" { + replData.MasterReplOffset, _ = strconv.ParseInt(list01[1], 10, 64) + } else if slaveReg.MatchString(list01[0]) == true { + slave01 := InfoReplSlave{} + err = slave01.decode(infoItem) + if err != nil { + return + } + replData.SlaveList = append(replData.SlaveList, slave01) + } else if rocksdbSlaveReg.MatchString(list01[0]) == true { + rdbSlave01 := InfoReplRocksdbSlave{} + err = rdbSlave01.decode(infoItem) + if err != nil { + return + } + replData.RocksdbSlaveList = append(replData.RocksdbSlaveList, rdbSlave01) + } else if rocksdbMasterReg.MatchString(list01[0]) == true { + rdbMaster01 := InfoReplRocksdbMaster{} + err = rdbMaster01.decode(infoItem) + if err != nil { + return + } + replData.RocksdbMasterList = append(replData.RocksdbMasterList, rdbMaster01) + } + } + return +} + +// TendisSSDInfoSlaveItem TendisSSD master中执行info slaves结果中slave状态 +// 如: slave0: ip=127.0.0.1,port=30000,state=IncrSync,seq=1111,lag=1 +type TendisSSDInfoSlaveItem struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + Seq int64 `json:"seq"` + Lag int64 `json:"lag"` +} + +// Addr addr字符串 +func (item *TendisSSDInfoSlaveItem) Addr() string { + return item.IP + ":" + strconv.Itoa(item.Port) +} + +func (item *TendisSSDInfoSlaveItem) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + if len(list01) < 2 { + return fmt.Errorf(`%s format not correct, + the correct format is as follows:slave0:ip=xx,port=48000,state=IncrSync,seq=1111,lag=1`, line) + } + item.Name = list01[0] + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + list02[0] = strings.TrimSpace(list02[0]) + list02[1] = strings.TrimSpace(list02[1]) + if list02[0] == "ip" { + item.IP = list02[1] + } else if list02[0] == "port" { + item.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + item.State = list02[1] + } else if list02[0] == "seq" { + item.Seq, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + item.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// TendisSSDInfoSlavesData tendisSSD 'info slaves'结果 +type TendisSSDInfoSlavesData struct { + ConnectedSlaves int `json:"connected-slaves"` + DisConnectedSlaves int `json:"disconnected-slaves"` + SlaveList []TendisSSDInfoSlaveItem `json:"slave_list"` +} + +// String 用于打印 +func (data *TendisSSDInfoSlavesData) String() string { + tmp, _ := json.Marshal(data) + return string(tmp) +} + +// TendisSSDInfoSlaves tendisSSD 'info slaves'解析 +func (db *RedisClient) TendisSSDInfoSlaves() (ret TendisSSDInfoSlavesData, err error) { + var replRet string + replRet, err = db.InstanceClient.Info(context.TODO(), "slaves").Result() + if err != nil { + err = fmt.Errorf("info slaves fail,err:%v,aadr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + infoList := strings.Split(replRet, "\n") + slaveReg := regexp.MustCompile(`^slave\d+$`) + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "connected-slaves" { + ret.ConnectedSlaves, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "disconnected-slaves" { + ret.DisConnectedSlaves, _ = strconv.Atoi(list01[1]) + } else if slaveReg.MatchString(list01[0]) { + slave01 := TendisSSDInfoSlaveItem{} + err = slave01.decode(infoItem) + if err != nil { + return + } + ret.SlaveList = append(ret.SlaveList, slave01) + } + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/mylog/mylog.go b/dbm-services/redis/db-tools/dbactuator/mylog/mylog.go new file mode 100644 index 0000000000..1bfb0bf01d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/mylog/mylog.go @@ -0,0 +1,28 @@ +// Package mylog 便于全局日志操作 +package mylog + +import ( + "dbm-services/common/go-pubpkg/logger" + "os" +) + +// Logger 和 jobruntime.Logger 是同一个 logger +var Logger *logger.Logger + +// SetDefaultLogger 设置默认logger +func SetDefaultLogger(log *logger.Logger) { + Logger = log +} + +// UnitTestInitLog 单元测试初始化Logger +func UnitTestInitLog() { + extMap := map[string]string{ + "uid": "1111", + "node_id": "localhost", + "root_id": "2222", + "version_id": "3333", + } + log01 := logger.New(os.Stdout, true, logger.InfoLevel, extMap) + log01.Sync() + SetDefaultLogger(log01) +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_shard_to_cluster.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_shard_to_cluster.go new file mode 100644 index 0000000000..e616584a4a --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_shard_to_cluster.go @@ -0,0 +1,247 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// AddConfParams 参数 +type AddConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + AdminUsername string `json:"adminUsername" validate:"required"` + AdminPassword string `json:"adminPassword" validate:"required"` + Shards map[string]string `json:"shard" validate:"required"` // key->clusterId,value->ip:port,ip:port 不包含隐藏节点 +} + +// AddShardToCluster 添加分片到集群 +type AddShardToCluster struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + ConfFilePath string + ConfFileContent string + ConfParams *AddConfParams +} + +// NewAddShardToCluster 实例化结构体 +func NewAddShardToCluster() jobruntime.JobRunner { + return &AddShardToCluster{} +} + +// Name 获取原子任务的名字 +func (a *AddShardToCluster) Name() string { + return "add_shard_to_cluster" +} + +// Run 运行原子任务 +func (a *AddShardToCluster) Run() error { + // 获取配置内容 + if err := a.makeConfContent(); err != nil { + return err + } + + // 生成js脚本 + if err := a.createAddShardToClusterScript(); err != nil { + return err + } + + // 执行js脚本 + if err := a.execScript(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (a *AddShardToCluster) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (a *AddShardToCluster) Rollback() error { + return nil +} + +// Init 初始化 +func (a *AddShardToCluster) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + a.runtime = runtime + a.runtime.Logger.Info("start to init") + a.BinDir = consts.UsrLocal + a.Mongo = filepath.Join(a.BinDir, "mongodb", "bin", "mongo") + a.ConfFilePath = filepath.Join("/", "tmp", "addShardToCluster.js") + a.OsUser = consts.GetProcessUser() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(a.runtime.PayloadDecoded), &a.ConfParams); err != nil { + a.runtime.Logger.Error(fmt.Sprintf( + "get parameters of initiateReplicaset fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of initiateReplicaset fail by json.Unmarshal, error:%s", err) + } + a.runtime.Logger.Info("init successfully") + + // 进行校验 + if err := a.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (a *AddShardToCluster) checkParams() error { + // 校验重启配置参数 + validate := validator.New() + a.runtime.Logger.Info("start to validate parameters of addShardToCluster") + if err := validate.Struct(a.ConfParams); err != nil { + a.runtime.Logger.Error(fmt.Sprintf("validate parameters of addShardToCluster fail, error:%s", err)) + return fmt.Errorf("validate parameters of addShardToCluster fail, error:%s", err) + } + a.runtime.Logger.Info("validate parameters of addShardToCluster successfully") + return nil +} + +// makeConfContent 生成配置内容 +func (a *AddShardToCluster) makeConfContent() error { + a.runtime.Logger.Info("start to make config content of addShardToCluster") + var shards []string + for key, value := range a.ConfParams.Shards { + shards = append(shards, strings.Join([]string{key, "/", value}, "")) + } + + for _, v := range shards { + a.ConfFileContent += strings.Join([]string{"sh.addShard(\"", v, "\")\n"}, "") + } + a.runtime.Logger.Info("make config content of addShardToCluster successfully") + return nil +} + +// createAddShardToClusterScript 生成js脚本 +func (a *AddShardToCluster) createAddShardToClusterScript() error { + a.runtime.Logger.Info("start to create addShardToCluster script") + confFile, err := os.OpenFile(a.ConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + defer confFile.Close() + if err != nil { + a.runtime.Logger.Error( + fmt.Sprintf("create script file of addShardToCluster fail, error:%s", err)) + return fmt.Errorf("create script file of addShardToCluster fail, error:%s", err) + } + + if _, err = confFile.WriteString(a.ConfFileContent); err != nil { + a.runtime.Logger.Error( + fmt.Sprintf("create script file of addShardToCluster write content fail, error:%s", + err)) + return fmt.Errorf("create script file of addShardToCluster write content fail, error:%s", + err) + } + a.runtime.Logger.Info("create addShardToCluster script successfully") + return nil +} + +// checkShard 检查shard是否已经加入到cluster中 +func (a *AddShardToCluster) checkShard() (bool, error) { + a.runtime.Logger.Info("start to check shard") + cmd := fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --quiet --authenticationDatabase=admin --eval \"db.getMongo().getDB(\\\"config\\\").shards.find()\" admin'", + a.OsUser, a.Mongo, a.ConfParams.AdminUsername, a.ConfParams.AdminPassword, a.ConfParams.IP, a.ConfParams.Port) + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + a.runtime.Logger.Error(fmt.Sprintf("get shard info fail, error:%s", err)) + return false, fmt.Errorf("get shard info fail, error:%s", err) + } + result = strings.Replace(result, "\n", "", -1) + if result == "" { + a.runtime.Logger.Info("check shard successfully") + return false, nil + } + + for k, _ := range a.ConfParams.Shards { + + if strings.Contains(result, k) { + continue + } + + return false, fmt.Errorf("add shard %s fail", k) + } + a.runtime.Logger.Info("check shard successfully") + return true, nil +} + +// execScript 执行脚本 +func (a *AddShardToCluster) execScript() error { + // 检查 + flag, err := a.checkShard() + if err != nil { + return err + } + if flag == true { + a.runtime.Logger.Info(fmt.Sprintf("shards have been added")) + // 删除脚本 + if err = a.removeScript(); err != nil { + return err + } + + return nil + } + + // 执行脚本 + a.runtime.Logger.Info("start to execute addShardToCluster script") + cmd := fmt.Sprintf("su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet %s'", + a.OsUser, a.Mongo, a.ConfParams.AdminUsername, a.ConfParams.AdminPassword, a.ConfParams.IP, a.ConfParams.Port, + a.ConfFilePath) + if _, err = util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + a.runtime.Logger.Error(fmt.Sprintf("execute addShardToCluster script fail, error:%s", err)) + return fmt.Errorf("execute addShardToCluster script fail, error:%s", err) + } + a.runtime.Logger.Info("execute addShardToCluster script successfully") + + // time.Sleep(15 * time.Second) + + // 检查 + flag, err = a.checkShard() + if err != nil { + return err + } + if flag == false { + a.runtime.Logger.Error(fmt.Sprintf("add shard fail, error:%s", err)) + return fmt.Errorf("add shard fail, error:%s", err) + } + + // 删除脚本 + if err = a.removeScript(); err != nil { + return err + } + + return nil +} + +// removeScript 删除脚本 +func (a *AddShardToCluster) removeScript() error { + // 删除脚本 + a.runtime.Logger.Info("start to remove addShardToCluster script") + if err := common.RemoveFile(a.ConfFilePath); err != nil { + a.runtime.Logger.Error(fmt.Sprintf("remove addShardToCluster script fail, error:%s", err)) + return fmt.Errorf("remove addShardToCluster script fail, error:%s", err) + } + a.runtime.Logger.Info("remove addShardToCluster script successfully") + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_user.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_user.go new file mode 100644 index 0000000000..96a351238c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/add_user.go @@ -0,0 +1,262 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// AddUserConfParams 参数 +type AddUserConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + InstanceType string `json:"instanceType" validate:"required"` + Username string `json:"username" validate:"required"` + Password string `json:"password" validate:"required"` + AdminUsername string `json:"adminUsername"` + AdminPassword string `json:"adminPassword"` + AuthDb string `json:"authDb"` // 为方便管理用户,验证库默认为admin库 + Dbs []string `json:"dbs"` // 业务库 + Privileges []string `json:"privileges"` // 权限 + +} + +// AddUser 添加分片到集群 +type AddUser struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + PrimaryIP string + PrimaryPort int + OsUser string + ScriptContent string + ConfParams *AddUserConfParams +} + +// NewAddUser 实例化结构体 +func NewAddUser() jobruntime.JobRunner { + return &AddUser{} +} + +// Name 获取原子任务的名字 +func (u *AddUser) Name() string { + return "add_user" +} + +// Run 运行原子任务 +func (u *AddUser) Run() error { + // 生成脚本内容 + if err := u.makeScriptContent(); err != nil { + return err + } + + // 执行js脚本 + if err := u.execScript(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (u *AddUser) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (u *AddUser) Rollback() error { + return nil +} + +// Init 初始化 +func (u *AddUser) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + u.runtime = runtime + u.runtime.Logger.Info("start to init") + u.BinDir = consts.UsrLocal + u.Mongo = filepath.Join(u.BinDir, "mongodb", "bin", "mongo") + u.OsUser = consts.GetProcessUser() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(u.runtime.PayloadDecoded), &u.ConfParams); err != nil { + u.runtime.Logger.Error(fmt.Sprintf( + "get parameters of addUser fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of addUser fail by json.Unmarshal, error:%s", err) + } + + // 获取primary信息 + if u.ConfParams.InstanceType == "mongos" { + u.PrimaryIP = u.ConfParams.IP + u.PrimaryPort = u.ConfParams.Port + } else { + var info string + var err error + // 安装时无需密码验证。安装成功后需要密码验证 + if u.ConfParams.AdminUsername != "" && u.ConfParams.AdminPassword != "" { + info, err = common.AuthGetPrimaryInfo(u.OsUser, u.Mongo, u.ConfParams.AdminUsername, + u.ConfParams.AdminPassword, u.ConfParams.IP, u.ConfParams.Port) + if err != nil { + u.runtime.Logger.Error(fmt.Sprintf( + "get primary db info of addUser fail, error:%s", err)) + return fmt.Errorf("get primary db info of addUser fail, error:%s", err) + } + getInfo := strings.Split(info, ":") + u.PrimaryIP = getInfo[0] + u.PrimaryPort, _ = strconv.Atoi(getInfo[1]) + } + } + u.runtime.Logger.Info("init successfully") + + // 进行校验 + if err := u.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (u *AddUser) checkParams() error { + // 校验重启配置参数 + validate := validator.New() + u.runtime.Logger.Info("start to validate parameters of addUser") + if err := validate.Struct(u.ConfParams); err != nil { + u.runtime.Logger.Error(fmt.Sprintf("validate parameters of addUser fail, error:%s", err)) + return fmt.Errorf("validate parameters of addUser fail, error:%s", err) + } + u.runtime.Logger.Info("validate parameters of addUser successfully") + return nil +} + +// makeScriptContent 生成user配置内容 +func (u *AddUser) makeScriptContent() error { + u.runtime.Logger.Info("start to make script content") + user := common.NewMongoUser() + user.User = u.ConfParams.Username + user.Pwd = u.ConfParams.Password + + // 判断验证db + if u.ConfParams.AuthDb == "" { + u.ConfParams.AuthDb = "admin" + } + + // 判断业务db是否存在 + if len(u.ConfParams.Dbs) == 0 { + u.ConfParams.Dbs = []string{"admin"} + } + + for _, db := range u.ConfParams.Dbs { + for _, privilege := range u.ConfParams.Privileges { + role := common.NewMongoRole() + role.Role = privilege + role.Db = db + user.Roles = append(user.Roles, role) + } + } + + content, err := user.GetContent() + if err != nil { + u.runtime.Logger.Error(fmt.Sprintf("make config content of addUser fail, error:%s", err)) + return fmt.Errorf("make config content of addUser fail, error:%s", err) + } + content = strings.Replace(content, "\"", "\\\"", -1) + + // 获取mongo版本 + mongoName := "mongod" + if u.ConfParams.InstanceType == "mongos" { + mongoName = "mongos" + } + version, err := common.CheckMongoVersion(u.BinDir, mongoName) + if err != nil { + u.runtime.Logger.Error(fmt.Sprintf("check mongo version fail, error:%s", err)) + return fmt.Errorf("check mongo version fail, error:%s", err) + } + mainVersion, _ := strconv.Atoi(strings.Split(version, ".")[0]) + if mainVersion >= 3 { + u.ScriptContent = strings.Join([]string{"db", + fmt.Sprintf("createUser(%s)", content)}, ".") + u.runtime.Logger.Info("make script content successfully") + return nil + } + u.ScriptContent = strings.Join([]string{"db", + fmt.Sprintf("addUser(%s)", content)}, ".") + u.runtime.Logger.Info("make script content successfully") + + return nil +} + +// checkUser 检查用户是否存在 +func (u *AddUser) checkUser() (bool, error) { + var flag bool + var err error + // 安装时检查管理用户是否存在无需密码验证。安装后检查业务用户是否存在需密码验证 + if u.ConfParams.AdminUsername != "" && u.ConfParams.AdminPassword != "" { + flag, err = common.AuthCheckUser(u.OsUser, u.Mongo, u.ConfParams.AdminUsername, u.ConfParams.AdminPassword, + u.PrimaryIP, u.PrimaryPort, u.ConfParams.AuthDb, u.ConfParams.Username) + } else { + flag, err = common.AuthCheckUser(u.OsUser, u.Mongo, u.ConfParams.Username, u.ConfParams.Password, + u.ConfParams.IP, u.ConfParams.Port, u.ConfParams.AuthDb, u.ConfParams.Username) + } + return flag, err +} + +// execScript 执行脚本 +func (u *AddUser) execScript() error { + var cmd string + if u.ConfParams.AdminUsername != "" && u.ConfParams.AdminPassword != "" { + // 检查用户是否存在 + flag, err := u.checkUser() + if err != nil { + return err + } + if flag == true { + u.runtime.Logger.Info("user:%s has been existed", u.ConfParams.Username) + return nil + } + cmd = fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \"%s\" %s'", + u.OsUser, u.Mongo, u.ConfParams.AdminUsername, u.ConfParams.AdminPassword, u.PrimaryIP, u.PrimaryPort, + u.ScriptContent, u.ConfParams.AuthDb) + } else if u.ConfParams.AdminUsername == "" && u.ConfParams.AdminPassword == "" { + // 复制集初始化后,马上创建db管理员用户,需要等3秒 + time.Sleep(time.Second * 3) + cmd = fmt.Sprintf( + "su %s -c '%s --host %s --port %d --quiet --eval \"%s\" %s'", + u.OsUser, u.Mongo, "127.0.0.1", u.ConfParams.Port, u.ScriptContent, u.ConfParams.AuthDb) + if u.ConfParams.AdminUsername != "" && u.ConfParams.AdminPassword != "" { + + } + } + + // 执行脚本 + u.runtime.Logger.Info("start to execute addUser script") + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + u.runtime.Logger.Error("execute addUser script fail, error:%s", err) + return fmt.Errorf("execute addUser script fail, error:%s", err) + } + u.runtime.Logger.Info("execute addUser script successfully") + + // 检查用户是否存在 + flag, err := u.checkUser() + if err != nil { + return err + } + if flag == false { + u.runtime.Logger.Error("add user:%s fail, error:%s", u.ConfParams.Username, err) + return fmt.Errorf("add user:%s fail, error:%s", u.ConfParams.Username, err) + } + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/atommongodb.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/atommongodb.go new file mode 100644 index 0000000000..f022fb23e5 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/atommongodb.go @@ -0,0 +1,2 @@ +// Package atommongodb mongodb原子任务 +package atommongodb diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_balancer.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_balancer.go new file mode 100644 index 0000000000..6d449c0978 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_balancer.go @@ -0,0 +1,158 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "time" + + "github.com/go-playground/validator/v10" +) + +// BalancerConfParams 参数 +type BalancerConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + Open bool `json:"open"` // true:打开 false:关闭 + AdminUsername string `json:"adminUsername" validate:"required"` + AdminPassword string `json:"adminPassword" validate:"required"` +} + +// Balancer 添加分片到集群 +type Balancer struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + ConfParams *BalancerConfParams +} + +// NewBalancer 实例化结构体 +func NewBalancer() jobruntime.JobRunner { + return &Balancer{} +} + +// Name 获取原子任务的名字 +func (b *Balancer) Name() string { + return "cluster_balancer" +} + +// Run 运行原子任务 +func (b *Balancer) Run() error { + // 执行脚本 + if err := b.execScript(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (b *Balancer) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (b *Balancer) Rollback() error { + return nil +} + +// Init 初始化 +func (b *Balancer) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + b.runtime = runtime + b.runtime.Logger.Info("start to init") + b.BinDir = consts.UsrLocal + b.Mongo = filepath.Join(b.BinDir, "mongodb", "bin", "mongo") + b.OsUser = consts.GetProcessUser() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(b.runtime.PayloadDecoded), &b.ConfParams); err != nil { + b.runtime.Logger.Error( + "get parameters of clusterBalancer fail by json.Unmarshal, error:%s", err) + return fmt.Errorf("get parameters of clusterBalancer fail by json.Unmarshal, error:%s", err) + } + b.runtime.Logger.Info("init successfully") + + // 进行校验 + if err := b.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (b *Balancer) checkParams() error { + // 校验配置参数 + validate := validator.New() + b.runtime.Logger.Info("start to validate parameters of clusterBalancer") + if err := validate.Struct(b.ConfParams); err != nil { + b.runtime.Logger.Error(fmt.Sprintf("validate parameters of clusterBalancer fail, error:%s", err)) + return fmt.Errorf("validate parameters of clusterBalancer fail, error:%s", err) + } + b.runtime.Logger.Info("validate parameters of clusterBalancer successfully") + return nil +} + +// execScript 执行脚本 +func (b *Balancer) execScript() error { + // 检查状态 + b.runtime.Logger.Info("start to get balancer status") + result, err := common.CheckBalancer(b.OsUser, b.Mongo, b.ConfParams.IP, b.ConfParams.Port, + b.ConfParams.AdminUsername, b.ConfParams.AdminPassword) + if err != nil { + b.runtime.Logger.Error("get cluster balancer status fail, error:%s", err) + return fmt.Errorf("get cluster balancer status fail, error:%s", err) + } + flag, _ := strconv.ParseBool(result) + b.runtime.Logger.Info("get balancer status successfully") + if flag == b.ConfParams.Open { + b.runtime.Logger.Info("balancer status has been %t", b.ConfParams.Open) + os.Exit(0) + } + + // 执行脚本 + var cmd string + if b.ConfParams.Open == true { + cmd = fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \"sh.startBalancer()\"'", + b.OsUser, b.Mongo, b.ConfParams.AdminUsername, b.ConfParams.AdminPassword, b.ConfParams.IP, b.ConfParams.Port) + } else { + cmd = fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \"sh.stopBalancer()\"'", + b.OsUser, b.Mongo, b.ConfParams.AdminUsername, b.ConfParams.AdminPassword, b.ConfParams.IP, b.ConfParams.Port) + } + b.runtime.Logger.Info("start to execute script") + _, err = util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + b.runtime.Logger.Error("set cluster balancer status fail, error:%s", err) + return fmt.Errorf("set cluster balancer status fail, error:%s", err) + } + b.runtime.Logger.Info("execute script successfully") + + // 检查状态 + b.runtime.Logger.Info("start to check balancer status") + result, err = common.CheckBalancer(b.OsUser, b.Mongo, b.ConfParams.IP, b.ConfParams.Port, + b.ConfParams.AdminUsername, b.ConfParams.AdminPassword) + if err != nil { + b.runtime.Logger.Error("get cluster balancer status fail, error:%s", err) + return fmt.Errorf("get cluster balancer status fail, error:%s", err) + } + flag, _ = strconv.ParseBool(result) + b.runtime.Logger.Info("check balancer status successfully") + if flag == b.ConfParams.Open { + b.runtime.Logger.Info("set balancer status:%t successfully", b.ConfParams.Open) + } + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_install_test.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_install_test.go new file mode 100644 index 0000000000..9e292ef0f6 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/cluster_install_test.go @@ -0,0 +1,888 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "net" + "path" + "strings" + "testing" + "time" +) + +// TestShard1 安装shard1测试 +func TestShard1(t *testing.T) { + // 设置环境变量 + err := consts.SetMongoDataDir("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard1 install SetMongoData fail, error:%s", err)) + t.Errorf("Shard1 install SetMongoData fail, error:%s", err) + return + } + err = consts.SetMongoBackupDir("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard1 install SetMongoBackup fail, error:%s", err)) + t.Errorf("Shard1 install SetMongoBackup fail, error:%s", err) + return + } + + err = consts.SetProcessUser("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard1 install SetProcessUser fail, error:%s", err)) + t.Errorf("Shard1 install SetProcessUser fail, error:%s", err) + return + } + err = consts.SetProcessUserGroup("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard1 install SetProcessUserGroup fail, error:%s", err)) + t.Errorf("Shard1 install SetProcessUserGroup fail, error:%s", err) + return + } + + // 初始化节点 + osSysInitParam := "{\n\"user\":\"mysql\",\n\"password\":\"Qwe123d\"\n}" + osSysInit := &atomsys.OsMongoInit{} + osSysInitRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: osSysInitParam, + } + osSysInitRuntime.SetLogger() + if err := osSysInit.Init(osSysInitRuntime); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install osSysInit init fail, error:%s", err)) + t.Errorf("shard1 replicate install osSysInit init fail, error:%s", err) + return + } + if err := osSysInit.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install osSysInit run fail, error:%s", err)) + t.Errorf("shard1 replicate install osSysInit run fail, error:%s", err) + return + } + + // 获取本机IP地址 + var ip string + addrs, _ := net.InterfaceAddrs() + for _, addr := range addrs { + if !strings.Contains(addr.String(), "127.0.0.1") { + ip = strings.Split(addr.String(), "/")[0] + break + } + } + + // node1 + node1 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"auth\": true,\n \"clusterRole\":\"shardsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node1 = strings.Replace(node1, "{{ip}}", ip, -1) + + // node2 + node2 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27002,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"auth\": true,\n \"clusterRole\":\"shardsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node2 = strings.Replace(node2, "{{ip}}", ip, -1) + + // node3 + node3 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27003,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"auth\": true,\n \"clusterRole\":\"shardsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node3 = strings.Replace(node3, "{{ip}}", ip, -1) + + node1MongodInstall := NewMongoDBInstall() + node2MongodInstall := NewMongoDBInstall() + node3MongodInstall := NewMongoDBInstall() + + node1Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node1, + } + node1Runtime.SetLogger() + node2Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node2, + } + node2Runtime.SetLogger() + node3Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node3, + } + node3Runtime.SetLogger() + + // 安装节点 + if err := node1MongodInstall.Init(node1Runtime); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install node1 init fail, error:%s", err)) + t.Errorf("shard1 replicate install node1 init fail, error:%s", err) + return + } + if err := node1MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install node1 run fail, error:%s", err)) + t.Errorf("shard1 replicate install node1 run fail, error:%s", err) + return + } + if err := node2MongodInstall.Init(node2Runtime); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install node2 init fail, error:%s", err)) + t.Errorf("shard1 replicate install node2 init fail, error:%s", err) + return + } + if err := node2MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install node2 run fail, error:%s", err)) + t.Errorf("shard1 replicate install node2 run fail, error:%s", err) + return + } + if err := node3MongodInstall.Init(node3Runtime); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install node3 init fail, error:%s", err)) + t.Errorf("shard1 replicate install node3 init fail, error:%s", err) + return + } + if err := node3MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install node3 run fail, error:%s", err)) + t.Errorf("shard1 replicate install node3 run fail, error:%s", err) + return + } + + // 复制集初始化 + initReplicasetParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"configSvr\":false,\n \"ips\":[\n \"{{ip}}:27001\",\n \"{{ip}}:27002\",\n \"{{ip}}:27003\"\n ],\n \"priority\":{\n \"{{ip}}:27001\":1,\n \"{{ip}}:27002\":1,\n \"{{ip}}:27003\":0\n },\n \"hidden\":{\n \"{{ip}}:27001\":false,\n \"{{ip}}:27002\":false,\n \"{{ip}}:27003\":true\n }\n}" + initReplicasetParam = strings.Replace(initReplicasetParam, "{{ip}}", ip, -1) + initReplicasetRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: initReplicasetParam, + } + initReplicasetRuntime.SetLogger() + initReplicaset := NewInitiateReplicaset() + if err := initReplicaset.Init(initReplicasetRuntime); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install initReplicaset init fail, error:%s", err)) + t.Errorf("shard1 replicate install initReplicaset init fail, error:%s", err) + return + } + if err := initReplicaset.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install initReplicaset run fail, error:%s", err)) + t.Errorf("shard1 replicate install initReplicaset run fail, error:%s", err) + return + } + time.Sleep(time.Second * 3) + // 创建管理员用户 + addAdminUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"instanceType\":\"mongod\",\n \"username\":\"dba\",\n \"password\":\"dba\",\n \"adminUsername\":\"\",\n \"adminPassword\":\"\",\n \"authDb\":\"admin\",\n \"dbs\":[\n\n ],\n \"privileges\":[\n \"root\"\n ]\n}" + addAdminUserParam = strings.Replace(addAdminUserParam, "{{ip}}", ip, -1) + addAdminUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: addAdminUserParam, + } + addAdminUserRuntime.SetLogger() + addAdminUser := NewAddUser() + if err := addAdminUser.Init(addAdminUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install addAdminUser init fail, error:%s", err)) + t.Errorf("shard1 replicate install addAdminUser init fail, error:%s", err) + return + } + if err := addAdminUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard1 replicate install addAdminUser run fail, error:%s", err)) + t.Errorf("shard1 replicate install addAdminUser run fail, error:%s", err) + return + } +} + +// TestShard2 安装shard2测试 +func TestShard2(t *testing.T) { + // 设置环境变量 + err := consts.SetMongoDataDir("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard2 install SetMongoData fail, error:%s", err)) + t.Errorf("Shard2 install SetMongoData fail, error:%s", err) + return + } + err = consts.SetMongoBackupDir("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard2 install SetMongoBackup fail, error:%s", err)) + t.Errorf("Shard2 install SetMongoBackup fail, error:%s", err) + return + } + + err = consts.SetProcessUser("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard2 install SetProcessUser fail, error:%s", err)) + t.Errorf("Shard2 install SetProcessUser fail, error:%s", err) + return + } + err = consts.SetProcessUserGroup("") + if err != nil { + fmt.Println(fmt.Sprintf("Shard2 install SetProcessUserGroup fail, error:%s", err)) + t.Errorf("Shard2 install SetProcessUserGroup fail, error:%s", err) + return + } + + // 初始化节点 + osSysInitParam := "{\n\"user\":\"mysql\",\n\"password\":\"Qwe123d\"\n}" + osSysInit := &atomsys.OsMongoInit{} + osSysInitRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: osSysInitParam, + } + osSysInitRuntime.SetLogger() + if err := osSysInit.Init(osSysInitRuntime); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install osSysInit init fail, error:%s", err)) + t.Errorf("shard2 replicate install osSysInit init fail, error:%s", err) + return + } + if err := osSysInit.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install osSysInit run fail, error:%s", err)) + t.Errorf("shard2 replicate install osSysInit run fail, error:%s", err) + return + } + + // 获取本机IP地址 + var ip string + addrs, _ := net.InterfaceAddrs() + for _, addr := range addrs { + if !strings.Contains(addr.String(), "127.0.0.1") { + ip = strings.Split(addr.String(), "/")[0] + break + } + } + + // node1 + node1 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27005,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s2\",\n \"auth\": true,\n \"clusterRole\":\"shardsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node1 = strings.Replace(node1, "{{ip}}", ip, -1) + + // node2 + node2 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27006,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s2\",\n \"auth\": true,\n \"clusterRole\":\"shardsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node2 = strings.Replace(node2, "{{ip}}", ip, -1) + + // node3 + node3 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27007,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s2\",\n \"auth\": true,\n \"clusterRole\":\"shardsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node3 = strings.Replace(node3, "{{ip}}", ip, -1) + + node1MongodInstall := NewMongoDBInstall() + node2MongodInstall := NewMongoDBInstall() + node3MongodInstall := NewMongoDBInstall() + + node1Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node1, + } + node1Runtime.SetLogger() + node2Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node2, + } + node2Runtime.SetLogger() + node3Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node3, + } + node3Runtime.SetLogger() + + // 安装节点 + if err := node1MongodInstall.Init(node1Runtime); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install node1 init fail, error:%s", err)) + t.Errorf("shard2 replicate install node1 init fail, error:%s", err) + return + } + if err := node1MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install node1 run fail, error:%s", err)) + t.Errorf("shard2 replicate install node1 run fail, error:%s", err) + return + } + if err := node2MongodInstall.Init(node2Runtime); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install node2 init fail, error:%s", err)) + t.Errorf("shard2 replicate install node2 init fail, error:%s", err) + return + } + if err := node2MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install node2 run fail, error:%s", err)) + t.Errorf("shard2 replicate install node2 run fail, error:%s", err) + return + } + if err := node3MongodInstall.Init(node3Runtime); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install node3 init fail, error:%s", err)) + t.Errorf("shard2 replicate install node3 init fail, error:%s", err) + return + } + if err := node3MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install node3 run fail, error:%s", err)) + t.Errorf("shard2 replicate install node3 run fail, error:%s", err) + return + } + + // 复制集初始化 + initReplicasetParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27005,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s2\",\n \"configSvr\":false,\n \"ips\":[\n \"{{ip}}:27005\",\n \"{{ip}}:27006\",\n \"{{ip}}:27007\"\n ],\n \"priority\":{\n \"{{ip}}:27005\":1,\n \"{{ip}}:27006\":1,\n \"{{ip}}:27007\":0\n },\n \"hidden\":{\n \"{{ip}}:27005\":false,\n \"{{ip}}:27006\":false,\n \"{{ip}}:27007\":true\n }\n}" + initReplicasetParam = strings.Replace(initReplicasetParam, "{{ip}}", ip, -1) + initReplicasetRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: initReplicasetParam, + } + initReplicasetRuntime.SetLogger() + initReplicaset := NewInitiateReplicaset() + if err := initReplicaset.Init(initReplicasetRuntime); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install initReplicaset init fail, error:%s", err)) + t.Errorf("shard2 replicate install initReplicaset init fail, error:%s", err) + return + } + if err := initReplicaset.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install initReplicaset run fail, error:%s", err)) + t.Errorf("shard2 replicate install initReplicaset run fail, error:%s", err) + return + } + time.Sleep(time.Second * 3) + // 创建管理员用户 + addAdminUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27005,\n \"instanceType\":\"mongod\",\n \"username\":\"dba\",\n \"password\":\"dba\",\n \"adminUsername\":\"\",\n \"adminPassword\":\"\",\n \"authDb\":\"admin\",\n \"dbs\":[\n\n ],\n \"privileges\":[\n \"root\"\n ]\n}" + addAdminUserParam = strings.Replace(addAdminUserParam, "{{ip}}", ip, -1) + addAdminUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: addAdminUserParam, + } + addAdminUserRuntime.SetLogger() + addAdminUser := NewAddUser() + if err := addAdminUser.Init(addAdminUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install addAdminUser init fail, error:%s", err)) + t.Errorf("shard2 replicate install addAdminUser init fail, error:%s", err) + return + } + if err := addAdminUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("shard2 replicate install addAdminUser run fail, error:%s", err)) + t.Errorf("shard2 replicate install addAdminUser run fail, error:%s", err) + return + } +} + +// TestConfigDB 安装ConfigDB测试 +func TestConfigDB(t *testing.T) { + // 设置环境变量 + err := consts.SetMongoDataDir("") + if err != nil { + fmt.Println(fmt.Sprintf("configdb install SetMongoData fail, error:%s", err)) + t.Errorf("configdb install SetMongoData fail, error:%s", err) + return + } + err = consts.SetMongoBackupDir("") + if err != nil { + fmt.Println(fmt.Sprintf("configdb install SetMongoBackup fail, error:%s", err)) + t.Errorf("configdb install SetMongoBackup fail, error:%s", err) + return + } + + err = consts.SetProcessUser("") + if err != nil { + fmt.Println(fmt.Sprintf("configdb install SetProcessUser fail, error:%s", err)) + t.Errorf("configdb install SetProcessUser fail, error:%s", err) + return + } + err = consts.SetProcessUserGroup("") + if err != nil { + fmt.Println(fmt.Sprintf("configdb install SetProcessUserGroup fail, error:%s", err)) + t.Errorf("configdb install SetProcessUserGroup fail, error:%s", err) + return + } + + // 初始化节点 + osSysInitParam := "{\n\"user\":\"mysql\",\n\"password\":\"Qwe123d\"\n}" + osSysInit := &atomsys.OsMongoInit{} + osSysInitRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: osSysInitParam, + } + osSysInitRuntime.SetLogger() + if err := osSysInit.Init(osSysInitRuntime); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install osSysInit init fail, error:%s", err)) + t.Errorf("configdb replicate install osSysInit init fail, error:%s", err) + return + } + if err := osSysInit.Run(); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install osSysInit run fail, error:%s", err)) + t.Errorf("configdb replicate install osSysInit run fail, error:%s", err) + return + } + + // 获取本机IP地址 + var ip string + addrs, _ := net.InterfaceAddrs() + for _, addr := range addrs { + if !strings.Contains(addr.String(), "127.0.0.1") { + ip = strings.Split(addr.String(), "/")[0] + break + } + } + + // node1 + node1 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27020,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"conf\",\n \"auth\": true,\n \"clusterRole\":\"configsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node1 = strings.Replace(node1, "{{ip}}", ip, -1) + + // node2 + node2 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27021,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"conf\",\n \"auth\": true,\n \"clusterRole\":\"configsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node2 = strings.Replace(node2, "{{ip}}", ip, -1) + + // node3 + node3 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27022,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"conf\",\n \"auth\": true,\n \"clusterRole\":\"configsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node3 = strings.Replace(node3, "{{ip}}", ip, -1) + // node4 + node4 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27004,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"conf\",\n \"auth\": true,\n \"clusterRole\":\"configsvr\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node4 = strings.Replace(node4, "{{ip}}", ip, -1) + + node1MongodInstall := NewMongoDBInstall() + node2MongodInstall := NewMongoDBInstall() + node3MongodInstall := NewMongoDBInstall() + node4MongodInstall := NewMongoDBInstall() + + node1Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node1, + } + node1Runtime.SetLogger() + node2Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node2, + } + node2Runtime.SetLogger() + node3Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node3, + } + node3Runtime.SetLogger() + node4Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node4, + } + node4Runtime.SetLogger() + + // 安装节点 + if err := node1MongodInstall.Init(node1Runtime); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node1 init fail, error:%s", err)) + t.Errorf("configdb replicate install node1 init fail, error:%s", err) + return + } + if err := node1MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node1 run fail, error:%s", err)) + t.Errorf("configdb replicate install node1 run fail, error:%s", err) + return + } + if err := node2MongodInstall.Init(node2Runtime); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node2 init fail, error:%s", err)) + t.Errorf("configdb replicate install node2 init fail, error:%s", err) + return + } + if err := node2MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node2 run fail, error:%s", err)) + t.Errorf("configdb replicate install node2 run fail, error:%s", err) + return + } + if err := node3MongodInstall.Init(node3Runtime); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node3 init fail, error:%s", err)) + t.Errorf("configdb replicate install node3 init fail, error:%s", err) + return + } + if err := node3MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node3 run fail, error:%s", err)) + t.Errorf("configdb replicate install node3 run fail, error:%s", err) + return + } + if err := node4MongodInstall.Init(node4Runtime); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node4 init fail, error:%s", err)) + t.Errorf("configdb replicate install node4 init fail, error:%s", err) + return + } + if err := node4MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install node4 run fail, error:%s", err)) + t.Errorf("configdb replicate install node4 run fail, error:%s", err) + return + } + + // 复制集初始化 + initReplicasetParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27020,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"conf\",\n \"configSvr\":true,\n \"ips\":[\n \"{{ip}}:27020\",\n \"{{ip}}:27021\",\n \"{{ip}}:27022\"\n ],\n \"priority\":{\n \"{{ip}}:27020\":1,\n \"{{ip}}:27021\":1,\n \"{{ip}}:27022\":0\n },\n \"hidden\":{\n \"{{ip}}:27020\":false,\n \"{{ip}}:27021\":false,\n \"{{ip}}:27022\":true\n }\n}" + initReplicasetParam = strings.Replace(initReplicasetParam, "{{ip}}", ip, -1) + initReplicasetRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: initReplicasetParam, + } + initReplicasetRuntime.SetLogger() + initReplicaset := NewInitiateReplicaset() + if err := initReplicaset.Init(initReplicasetRuntime); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install initReplicaset init fail, error:%s", err)) + t.Errorf("configdb replicate install initReplicaset init fail, error:%s", err) + return + } + if err := initReplicaset.Run(); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install initReplicaset run fail, error:%s", err)) + t.Errorf("configdb replicate install initReplicaset run fail, error:%s", err) + return + } + time.Sleep(time.Second * 3) + // 创建管理员用户 + addAdminUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27020,\n \"instanceType\":\"mongod\",\n \"username\":\"dba\",\n \"password\":\"dba\",\n \"adminUsername\":\"\",\n \"adminPassword\":\"\",\n \"authDb\":\"admin\",\n \"dbs\":[\n\n ],\n \"privileges\":[\n \"root\"\n ]\n}" + addAdminUserParam = strings.Replace(addAdminUserParam, "{{ip}}", ip, -1) + addAdminUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: addAdminUserParam, + } + addAdminUserRuntime.SetLogger() + addAdminUser := NewAddUser() + if err := addAdminUser.Init(addAdminUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install addAdminUser init fail, error:%s", err)) + t.Errorf("configdb replicate install addAdminUser init fail, error:%s", err) + return + } + if err := addAdminUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("configdb replicate install addAdminUser run fail, error:%s", err)) + t.Errorf("configdb replicate install addAdminUser run fail, error:%s", err) + return + } +} + +// TestMongoS 安装mongos测试 +func TestMongoS(t *testing.T) { + // 设置环境变量 + err := consts.SetMongoDataDir("") + if err != nil { + fmt.Println(fmt.Sprintf("mongos install SetMongoData fail, error:%s", err)) + t.Errorf("mongos install SetMongoData fail, error:%s", err) + return + } + err = consts.SetMongoBackupDir("") + if err != nil { + fmt.Println(fmt.Sprintf("mongos install SetMongoBackup fail, error:%s", err)) + t.Errorf("mongos install SetMongoBackup fail, error:%s", err) + return + } + + err = consts.SetProcessUser("") + if err != nil { + fmt.Println(fmt.Sprintf("mongos install SetProcessUser fail, error:%s", err)) + t.Errorf("mongos install SetProcessUser fail, error:%s", err) + return + } + err = consts.SetProcessUserGroup("") + if err != nil { + fmt.Println(fmt.Sprintf("mongos install SetProcessUserGroup fail, error:%s", err)) + t.Errorf("mongos install SetProcessUserGroup fail, error:%s", err) + return + } + + // 初始化节点 + osSysInitParam := "{\n\"user\":\"mysql\",\n\"password\":\"Qwe123d\"\n}" + osSysInit := &atomsys.OsMongoInit{} + osSysInitRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: osSysInitParam, + } + osSysInitRuntime.SetLogger() + if err := osSysInit.Init(osSysInitRuntime); err != nil { + fmt.Println(fmt.Sprintf("mongos install osSysInit init fail, error:%s", err)) + t.Errorf("mongos install osSysInit init fail, error:%s", err) + return + } + if err := osSysInit.Run(); err != nil { + fmt.Println(fmt.Sprintf("mongos install osSysInit run fail, error:%s", err)) + t.Errorf("monogs install osSysInit run fail, error:%s", err) + return + } + + // 获取本机IP地址 + var ip string + addrs, _ := net.InterfaceAddrs() + for _, addr := range addrs { + if !strings.Contains(addr.String(), "127.0.0.1") { + ip = strings.Split(addr.String(), "/")[0] + break + } + } + mongos1Param := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27030,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongos\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"auth\": true,\n \"configDB\":[\"{{ip}}:27020\",\"{{ip}}:27021\",\"{{ip}}:27022\"],\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"destination\":\"file\"\n }\n}" + mongos2Param := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27031,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongos\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"auth\": true,\n \"configDB\":[\"{{ip}}:27020\",\"{{ip}}:27021\",\"{{ip}}:27022\"],\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"destination\":\"file\"\n }\n}" + mongos1Param = strings.Replace(mongos1Param, "{{ip}}", ip, -1) + mongos2Param = strings.Replace(mongos2Param, "{{ip}}", ip, -1) + + mongos1MongoSInstall := NewMongoSInstall() + mongos2MongoSInstall := NewMongoSInstall() + + mongos1Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: mongos1Param, + } + mongos1Runtime.SetLogger() + mongos2Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: mongos2Param, + } + mongos2Runtime.SetLogger() + + // 安装mongos + if err := mongos1MongoSInstall.Init(mongos1Runtime); err != nil { + fmt.Println(fmt.Sprintf("mongos1 install init fail, error:%s", err)) + t.Errorf("mongos1 install node1 init fail, error:%s", err) + return + } + if err := mongos1MongoSInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("mongos1 install node1 run fail, error:%s", err)) + t.Errorf("mongos1 install node1 run fail, error:%s", err) + return + } + if err := mongos2MongoSInstall.Init(mongos2Runtime); err != nil { + fmt.Println(fmt.Sprintf("mongos2 install init fail, error:%s", err)) + t.Errorf("mongos3 install init fail, error:%s", err) + return + } + if err := mongos2MongoSInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("mongos2 install node1 run fail, error:%s", err)) + t.Errorf("mongos2 install run fail, error:%s", err) + return + } + +} + +func TestCluster(t *testing.T) { + // 设置环境变量 + err := consts.SetMongoDataDir("") + if err != nil { + fmt.Println(fmt.Sprintf("cluster SetMongoData fail, error:%s", err)) + t.Errorf("cluster SetMongoData fail, error:%s", err) + return + } + err = consts.SetMongoBackupDir("") + if err != nil { + fmt.Println(fmt.Sprintf("cluster SetMongoBackup fail, error:%s", err)) + t.Errorf("cluster SetMongoBackup fail, error:%s", err) + return + } + + err = consts.SetProcessUser("") + if err != nil { + fmt.Println(fmt.Sprintf("cluster SetProcessUser fail, error:%s", err)) + t.Errorf("cluster SetProcessUser fail, error:%s", err) + return + } + err = consts.SetProcessUserGroup("") + if err != nil { + fmt.Println(fmt.Sprintf("cluster SetProcessUserGroup fail, error:%s", err)) + t.Errorf("cluster SetProcessUserGroup fail, error:%s", err) + return + } + + // 初始化节点 + osSysInitParam := "{\n\"user\":\"mysql\",\n\"password\":\"Qwe123d\"\n}" + osSysInit := &atomsys.OsMongoInit{} + osSysInitRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: osSysInitParam, + } + osSysInitRuntime.SetLogger() + if err := osSysInit.Init(osSysInitRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster osSysInit init fail, error:%s", err)) + t.Errorf("cluster osSysInit init fail, error:%s", err) + return + } + if err := osSysInit.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster osSysInit run fail, error:%s", err)) + t.Errorf("cluster osSysInit run fail, error:%s", err) + return + } + + // 获取本机IP地址 + var ip string + addrs, _ := net.InterfaceAddrs() + for _, addr := range addrs { + if !strings.Contains(addr.String(), "127.0.0.1") { + ip = strings.Split(addr.String(), "/")[0] + break + } + } + + // cluster添加shard + addShardParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27030,\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"shard\":{\n \"test-test1-s1\":\"{{ip}}:27001,{{ip}}:27002\",\n \"test-test1-s2\":\"{{ip}}:27005,{{ip}}:27006\"\n }\n}" + addShardParam = strings.Replace(addShardParam, "{{ip}}", ip, -1) + addShard := NewAddShardToCluster() + + addShardRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: addShardParam, + } + addShardRuntime.SetLogger() + + if err := addShard.Init(addShardRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster addShard init fail, error:%s", err)) + t.Errorf("cluster addShard init fail, error:%s", err) + return + } + if err := addShard.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster addShard run fail, error:%s", err)) + t.Errorf("cluster addShard run fail, error:%s", err) + return + } + + // 创建业务用户 + addUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27030,\n \"instanceType\":\"mongos\",\n \"username\":\"test\",\n \"password\":\"test\",\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"authDb\":\"admin\",\n \"dbs\":[\n\n ],\n \"privileges\":[\n \"readWriteAnyDatabase\"\n ]\n}" + addUserParam = strings.Replace(addUserParam, "{{ip}}", ip, -1) + addUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: addUserParam, + } + addUserRuntime.SetLogger() + addUser := NewAddUser() + if err := addUser.Init(addUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster addUser init fail, error:%s", err)) + t.Errorf("cluster addUser init fail, error:%s", err) + return + } + if err := addUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster addUser run fail, error:%s", err)) + t.Errorf("cluster addUser run fail, error:%s", err) + return + } + + // 删除业务用户 + delUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27030,\n \"instanceType\":\"mongos\",\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"username\":\"test\",\n \"authDb\":\"admin\"\n}" + delUserParam = strings.Replace(delUserParam, "{{ip}}", ip, -1) + delUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: delUserParam, + } + delUserRuntime.SetLogger() + delUser := NewDelUser() + if err := delUser.Init(delUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster delUser init fail, error:%s", err)) + t.Errorf("cluster delUser init fail, error:%s", err) + return + } + if err := delUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster delUser run fail, error:%s", err)) + t.Errorf("cluster delUser run fail, error:%s", err) + return + } + + // 执行脚本 + execScriptParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27030,\n \"script\":\"var mongo = db;\\nmongo.getSisterDB('admin').runCommand({listDatabases:1}).databases.forEach (function (x) { print(x.name)});\\n\",\n \"type\":\"cluster\",\n \"secondary\": false,\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"repoUrl\":\"\",\n \"repoUsername\":\"\",\n \"repoToken\":\"\",\n \"repoProject\":\"\",\n \"repoRepo\":\"\",\n \"repoPath\":\"\"\n}" + execScriptParam = strings.Replace(execScriptParam, "{{ip}}", ip, -1) + execScriptRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: execScriptParam, + } + execScriptRuntime.SetLogger() + execScript := NewExecScript() + if err := execScript.Init(execScriptRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster execScript init fail, error:%s", err)) + t.Errorf("cluster execScript init fail, error:%s", err) + return + } + if err := execScript.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster execScript run fail, error:%s", err)) + t.Errorf("cluster execScript run fail, error:%s", err) + return + } + + // 重启mongod + restartParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27005,\n \"instanceType\":\"mongod\",\n \"singleNodeInstallRestart\":false, \n \"auth\":true,\n \"cacheSizeGB\": 2,\n \"mongoSConfDbOld\":\"\",\n \"MongoSConfDbNew\":\"\",\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\"\n}" + restartParam = strings.Replace(restartParam, "{{ip}}", ip, -1) + restartRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: restartParam, + } + restartRuntime.SetLogger() + restart := NewMongoRestart() + if err := restart.Init(restartRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster shard mongod restart init fail, error:%s", err)) + t.Errorf("cluster shard mongod restart init fail, error:%s", err) + return + } + if err := restart.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster shard mongod restart run fail, error:%s", err)) + t.Errorf("cluster shard mongod restart run fail, error:%s", err) + return + } + + time.Sleep(time.Second * 3) + // 替换config节点 + replaceParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27020,\n \"sourceIP\":\"{{ip}}\",\n \"sourcePort\":27020,\n \"sourceDown\":false,\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"targetIP\":\"{{ip}}\",\n \"targetPort\":27004,\n \"targetPriority\":\"\",\n \"targetHidden\":\"\"\n}" + replaceParam = strings.Replace(replaceParam, "{{ip}}", ip, -1) + replaceRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: replaceParam, + } + replaceRuntime.SetLogger() + replace := NewMongoDReplace() + if err := replace.Init(replaceRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster replace config mongod init fail, error:%s", err)) + t.Errorf("cluster replace config mongod init fail, error:%s", err) + return + } + if err := replace.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster replace config mongod run fail, error:%s", err)) + t.Errorf("cluster replace config mongod run fail, error:%s", err) + return + } + + // 重启mongos + restart1Param := "{\n \"ip\":\"{{ip}}\",\n \"port\":27030,\n \"instanceType\":\"mongos\",\n \"singleNodeInstallRestart\":false, \n \"auth\":true,\n \"cacheSizeGB\": 0,\n \"mongoSConfDbOld\":\"{{ip}}:27020\",\n \"MongoSConfDbNew\":\"{{ip}}:27004\",\n \"adminUsername\":\"\",\n \"adminPassword\":\"\"\n}" + restart1Param = strings.Replace(restart1Param, "{{ip}}", ip, -1) + restart1Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: restart1Param, + } + restart1Runtime.SetLogger() + restart1 := NewMongoRestart() + if err := restart1.Init(restart1Runtime); err != nil { + fmt.Println(fmt.Sprintf("cluster mongos port:%d restart init fail, error:%s", 27030, err)) + t.Errorf("cluster mongos port:%d restart init fail, error:%s", 27030, err) + return + } + if err := restart1.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster mongos port:%d restart run fail, error:%s", 27030, err)) + t.Errorf("cluster mongos restart port:%d run fail, error:%s", 27030, err) + return + } + + restart2Param := "{\n \"ip\":\"{{ip}}\",\n \"port\":27031,\n \"instanceType\":\"mongos\",\n \"singleNodeInstallRestart\":false, \n \"auth\":true,\n \"cacheSizeGB\": 0,\n \"mongoSConfDbOld\":\"{{ip}}:27020\",\n \"MongoSConfDbNew\":\"{{ip}}:27004\",\n \"adminUsername\":\"\",\n \"adminPassword\":\"\"\n}" + restart2Param = strings.Replace(restart2Param, "{{ip}}", ip, -1) + restart2Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: restart2Param, + } + restart2Runtime.SetLogger() + restart2 := NewMongoRestart() + if err := restart2.Init(restart2Runtime); err != nil { + fmt.Println(fmt.Sprintf("cluster mongos port:%d restart init fail, error:%s", 27031, err)) + t.Errorf("cluster mongos port:%d restart init fail, error:%s", 27031, err) + return + } + if err := restart2.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster mongos port:%d restart run fail, error:%s", 27031, err)) + t.Errorf("cluster mongos restart port:%d run fail, error:%s", 27031, err) + return + } + + time.Sleep(time.Second * 3) + // 下架mongos + for _, i := range []int{27030, 27031} { + deinstallParam := fmt.Sprintf("{\n \"ip\":\"{{ip}}\",\n \"port\":%d,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"nodeInfo\":[\n \"{{ip}}\"\n ],\n \"instanceType\":\"mongos\"\n}", i) + deinstallParam = strings.Replace(deinstallParam, "{{ip}}", ip, -1) + deinstallRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: deinstallParam, + } + deinstallRuntime.SetLogger() + deinstal := NewDeInstall() + if err := deinstal.Init(deinstallRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster deinstal mongos port:%d init fail, error:%s", i, err)) + t.Errorf("cluster deinstal mongos port:%d init fail, error:%s", i, err) + return + } + if err := deinstal.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster deinstal mongos deinstal port:%d run fail, error:%s", i, err)) + t.Errorf("cluster deinstal mongos port:%d run fail, error:%s", i, err) + return + } + } + + time.Sleep(time.Second * 2) + // 下架shard + for _, i := range []int{27001, 27002, 27003, 27005, 27006, 27007} { + deinstallParam := fmt.Sprintf("{\n \"ip\":\"{{ip}}\",\n \"port\":%d,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"nodeInfo\":[\n \"{{ip}}\"\n ],\n \"instanceType\":\"mongod\"\n}", i) + deinstallParam = strings.Replace(deinstallParam, "{{ip}}", ip, -1) + deinstallRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: deinstallParam, + } + deinstallRuntime.SetLogger() + deinstal := NewDeInstall() + if err := deinstal.Init(deinstallRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster deinstal shard mongod port:%d init fail, error:%s", i, err)) + t.Errorf("cluster deinstal shard mongod port:%d init fail, error:%s", i, err) + return + } + if err := deinstal.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster deinstal shard mongod deinstal port:%d run fail, error:%s", i, err)) + t.Errorf("cluster deinstal shard mongod port:%d run fail, error:%s", i, err) + return + } + } + + time.Sleep(time.Second * 2) + // 下架configdb + for _, i := range []int{27004, 27021, 27022} { + deinstallParam := fmt.Sprintf("{\n \"ip\":\"{{ip}}\",\n \"port\":%d,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"nodeInfo\":[\n \"{{ip}}\"\n ],\n \"instanceType\":\"mongod\"\n}", i) + deinstallParam = strings.Replace(deinstallParam, "{{ip}}", ip, -1) + deinstallRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: deinstallParam, + } + deinstallRuntime.SetLogger() + deinstal := NewDeInstall() + if err := deinstal.Init(deinstallRuntime); err != nil { + fmt.Println(fmt.Sprintf("cluster deinstal configdb mongod port:%d init fail, error:%s", i, err)) + t.Errorf("cluster deinstal configdb mongod port:%d init fail, error:%s", i, err) + return + } + if err := deinstal.Run(); err != nil { + fmt.Println(fmt.Sprintf("cluster deinstal configdb mongod deinstal port:%d run fail, error:%s", i, err)) + t.Errorf("cluster deinstal configdb mongod port:%d run fail, error:%s", i, err) + return + } + } + + // 删除相关目录 + dbData := path.Join(consts.GetMongoDataDir(), "mongodata") + dbLog := path.Join(consts.GetMongoBackupDir(), "mongolog") + softInstall := path.Join(consts.UsrLocal, "mongodb") + cmd := fmt.Sprintf("rm -rf %s;rm -rf %s;rm -rf %s", dbData, dbLog, softInstall) + if _, err = util.RunBashCmd(cmd, "", nil, 10*time.Second); err != nil { + fmt.Println(fmt.Sprintf("delete directories fail, error:%s", err)) + t.Errorf("delete directories fail, error:%s", err) + } + +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/del_user.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/del_user.go new file mode 100644 index 0000000000..78223d8414 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/del_user.go @@ -0,0 +1,203 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// DelUserConfParams 参数 +type DelUserConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + InstanceType string `json:"instanceType" validate:"required"` + AdminUsername string `json:"adminUsername" validate:"required"` + AdminPassword string `json:"adminPassword" validate:"required"` + Username string `json:"username" validate:"required"` + AuthDb string `json:"authDb"` // 为方便管理用户,验证库默认为admin库 +} + +// DelUser 添加分片到集群 +type DelUser struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + PrimaryIP string + PrimaryPort int + ScriptContent string + ConfParams *DelUserConfParams +} + +// NewDelUser 实例化结构体 +func NewDelUser() jobruntime.JobRunner { + return &DelUser{} +} + +// Name 获取原子任务的名字 +func (d *DelUser) Name() string { + return "delete_user" +} + +// Run 运行原子任务 +func (d *DelUser) Run() error { + // 生成脚本内容 + if err := d.makeScriptContent(); err != nil { + return err + } + + // 执行js脚本 + if err := d.execScript(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (d *DelUser) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (d *DelUser) Rollback() error { + return nil +} + +// Init 初始化 +func (d *DelUser) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + d.runtime = runtime + d.runtime.Logger.Info("start to init") + d.BinDir = consts.UsrLocal + d.Mongo = filepath.Join(d.BinDir, "mongodb", "bin", "mongo") + d.OsUser = consts.GetProcessUser() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(d.runtime.PayloadDecoded), &d.ConfParams); err != nil { + d.runtime.Logger.Error(fmt.Sprintf( + "get parameters of deleteUser fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of deleteUser fail by json.Unmarshal, error:%s", err) + } + + // 获取primary信息 + if d.ConfParams.InstanceType == "mongos" { + d.PrimaryIP = d.ConfParams.IP + d.PrimaryPort = d.ConfParams.Port + } else { + info, err := common.AuthGetPrimaryInfo(d.OsUser, d.Mongo, d.ConfParams.AdminUsername, d.ConfParams.AdminPassword, + d.ConfParams.IP, d.ConfParams.Port) + if err != nil { + d.runtime.Logger.Error(fmt.Sprintf( + "get primary db info of addUser fail, error:%s", err)) + return fmt.Errorf("get primary db info of addUser fail, error:%s", err) + } + getInfo := strings.Split(info, ":") + d.PrimaryIP = getInfo[0] + d.PrimaryPort, _ = strconv.Atoi(getInfo[1]) + } + d.runtime.Logger.Info("init successfully") + + // 进行校验 + if err := d.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (d *DelUser) checkParams() error { + // 校验重启配置参数 + validate := validator.New() + d.runtime.Logger.Info("start to validate parameters of deleteUser") + if err := validate.Struct(d.ConfParams); err != nil { + d.runtime.Logger.Error(fmt.Sprintf("validate parameters of deleteUser fail, error:%s", err)) + return fmt.Errorf("validate parameters of deleteUser fail, error:%s", err) + } + d.runtime.Logger.Info("validate parameters of deleteUser successfully") + return nil +} + +// makeScriptContent 生成user配置内容 +func (d *DelUser) makeScriptContent() error { + d.runtime.Logger.Info("start to make deleteUser script content") + // 判断验证db + if d.ConfParams.AuthDb == "" { + d.ConfParams.AuthDb = "admin" + } + + // 获取mongo版本 + mongoName := "mongod" + if d.ConfParams.InstanceType == "mongos" { + mongoName = "mongos" + } + version, err := common.CheckMongoVersion(d.BinDir, mongoName) + if err != nil { + d.runtime.Logger.Error(fmt.Sprintf("check mongo version fail, error:%s", err)) + return fmt.Errorf("check mongo version fail, error:%s", err) + } + mainVersion, _ := strconv.Atoi(strings.Split(version, ".")[0]) + if mainVersion >= 3 { + d.ScriptContent = strings.Join([]string{fmt.Sprintf("db.getMongo().getDB('%s')", d.ConfParams.AuthDb), + fmt.Sprintf("dropUser('%s')", d.ConfParams.Username)}, ".") + d.runtime.Logger.Info("make deleteUser script content successfully") + return nil + } + d.ScriptContent = strings.Join([]string{fmt.Sprintf("db.getMongo().getDB('%s')", d.ConfParams.AuthDb), + fmt.Sprintf("removeUser('%s')", d.ConfParams.Username)}, ".") + d.runtime.Logger.Info("make deleteUser script content successfully") + return nil +} + +// execScript 执行脚本 +func (d *DelUser) execScript() error { + // 检查 + flag, err := common.AuthCheckUser(d.OsUser, d.Mongo, d.ConfParams.AdminUsername, d.ConfParams.AdminPassword, + d.PrimaryIP, d.PrimaryPort, d.ConfParams.AuthDb, d.ConfParams.Username) + if err != nil { + return err + } + if flag == false { + d.runtime.Logger.Info(fmt.Sprintf("user:%s is not existed", d.ConfParams.Username)) + return nil + } + + // 执行脚本 + d.runtime.Logger.Info("start to execute deleteUser script") + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"%s\\\"\"", + d.OsUser, d.Mongo, d.ConfParams.AdminUsername, d.ConfParams.AdminPassword, d.PrimaryIP, + d.PrimaryPort, d.ScriptContent) + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + d.runtime.Logger.Error(fmt.Sprintf("execute addUser script fail, error:%s", err)) + return fmt.Errorf("execute addUser script fail, error:%s", err) + } + + time.Sleep(2 * time.Second) + + // 检查 + flag, err = common.AuthCheckUser(d.OsUser, d.Mongo, d.ConfParams.AdminUsername, d.ConfParams.AdminPassword, + d.PrimaryIP, d.PrimaryPort, d.ConfParams.AuthDb, d.ConfParams.Username) + if err != nil { + return err + } + if flag == true { + d.runtime.Logger.Error(fmt.Sprintf("delete user:%s fail, error:%s", d.ConfParams.Username, err)) + return fmt.Errorf("delete user:%s fail, error:%s", d.ConfParams.Username, err) + } + d.runtime.Logger.Info("execute deleteUser script successfully") + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/initiate_replicaset.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/initiate_replicaset.go new file mode 100644 index 0000000000..712ee29e89 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/initiate_replicaset.go @@ -0,0 +1,278 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// InitConfParams 参数 +type InitConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + App string `json:"app" validate:"required"` + AreaId string `json:"areaId" validate:"required"` + SetId string `json:"setId" validate:"required"` + ConfigSvr bool `json:"configSvr"` // shardsvr configsvr + Ips []string `json:"ips" validate:"required"` // ip:port + Priority map[string]int `json:"priority" validate:"required"` // key->ip:port,value->priority + Hidden map[string]bool `json:"hidden" validate:"required"` // key->ip:port,value->hidden(true or false) +} + +// InitiateReplicaset 复制集初始化 +type InitiateReplicaset struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + ConfFilePath string + ConfFileContent string + ConfParams *InitConfParams + ClusterId string + StatusChan chan int +} + +// NewInitiateReplicaset 实例化结构体 +func NewInitiateReplicaset() jobruntime.JobRunner { + return &InitiateReplicaset{} +} + +// Name 获取原子任务的名字 +func (i *InitiateReplicaset) Name() string { + return "init_replicaset" +} + +// Run 运行原子任务 +func (i *InitiateReplicaset) Run() error { + // 获取配置内容 + if err := i.makeConfContent(); err != nil { + return err + } + + // 生成js脚本 + if err := i.createInitiateReplicasetScript(); err != nil { + return err + } + + // 执行js脚本 + if err := i.execScript(); err != nil { + return err + } + + // 检查状态 + go i.checkStatus() + + // 获取状态 + if err := i.getStatus(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (i *InitiateReplicaset) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (i *InitiateReplicaset) Rollback() error { + return nil +} + +// Init 初始化 +func (i *InitiateReplicaset) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + i.runtime = runtime + i.runtime.Logger.Info("start to init") + i.BinDir = consts.UsrLocal + i.Mongo = filepath.Join(i.BinDir, "mongodb", "bin", "mongo") + i.OsUser = consts.GetProcessUser() + i.ConfFilePath = filepath.Join("/", "tmp", "initiateReplicaset.js") + i.StatusChan = make(chan int, 1) + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(i.runtime.PayloadDecoded), &i.ConfParams); err != nil { + i.runtime.Logger.Error(fmt.Sprintf( + "get parameters of initiateReplicaset fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of initiateReplicaset fail by json.Unmarshal, error:%s", err) + } + i.ClusterId = strings.Join([]string{i.ConfParams.App, i.ConfParams.AreaId, i.ConfParams.SetId}, "-") + i.runtime.Logger.Info("init successfully") + + // 进行校验 + if err := i.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (i *InitiateReplicaset) checkParams() error { + // 校验重启配置参数 + validate := validator.New() + i.runtime.Logger.Info("start to validate parameters of initiateReplicaset") + if err := validate.Struct(i.ConfParams); err != nil { + i.runtime.Logger.Error(fmt.Sprintf("validate parameters of initiateReplicaset fail, error:%s", err)) + return fmt.Errorf("validate parameters of initiateReplicaset fail, error:%s", err) + } + i.runtime.Logger.Info("validate parameters of initiateReplicaset successfully") + return nil +} + +// makeConfContent 获取配置内容 +func (i *InitiateReplicaset) makeConfContent() error { + i.runtime.Logger.Info("start to make config content of initiateReplicaset") + jsonConfReplicaset := common.NewJsonConfReplicaset() + jsonConfReplicaset.Id = i.ClusterId + for index, value := range i.ConfParams.Ips { + member := common.NewMember() + member.Id = index + member.Host = i.ConfParams.Ips[index] + member.Priority = i.ConfParams.Priority[value] + member.Hidden = i.ConfParams.Hidden[value] + jsonConfReplicaset.Members = append(jsonConfReplicaset.Members, member) + } + jsonConfReplicaset.ConfigSvr = i.ConfParams.ConfigSvr + + var err error + confJson, err := json.Marshal(jsonConfReplicaset) + if err != nil { + i.runtime.Logger.Error( + fmt.Sprintf("config content of initiateReplicaset json Marshal fial, error:%s", err)) + return fmt.Errorf("config content of initiateReplicaset json Marshal fial, error:%s", err) + } + i.ConfFileContent = strings.Join([]string{"var config=", + string(confJson), "\n", "rs.initiate(config)\n"}, "") + i.runtime.Logger.Info("make config content of initiateReplicaset successfully") + return nil +} + +// createInitiateReplicasetScript 生成js脚本 +func (i *InitiateReplicaset) createInitiateReplicasetScript() error { + i.runtime.Logger.Info("start to create initiateReplicaset script") + confFile, err := os.OpenFile(i.ConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + defer confFile.Close() + if err != nil { + i.runtime.Logger.Error( + fmt.Sprintf("create script file of initiateReplicaset json Marshal fail, error:%s", err)) + return fmt.Errorf("create script file of initiateReplicaset json Marshal fail, error:%s", err) + } + + if _, err = confFile.WriteString(i.ConfFileContent); err != nil { + i.runtime.Logger.Error( + fmt.Sprintf("create script file of initiateReplicaset write content fail, error:%s", + err)) + return fmt.Errorf("create script file of initiateReplicaset write content fail, error:%s", + err) + } + i.runtime.Logger.Info("create initiateReplicaset script successfully") + return nil +} + +// getPrimaryInfo 检查状态 +func (i *InitiateReplicaset) getPrimaryInfo() (bool, error) { + i.runtime.Logger.Info("start to check replicaset status") + result, err := common.InitiateReplicasetGetPrimaryInfo(i.OsUser, i.Mongo, i.ConfParams.IP, i.ConfParams.Port) + if err != nil { + i.runtime.Logger.Error(fmt.Sprintf("get initiateReplicaset primary info fail, error:%s", err)) + return false, fmt.Errorf("get initiateReplicaset primary info fail, error:%s", err) + } + i.runtime.Logger.Info("check replicaset status successfully") + for _, v := range i.ConfParams.Ips { + if v == result { + return true, nil + } + } + + return false, nil +} + +// checkStatus 检查复制集状态 +func (i *InitiateReplicaset) checkStatus() { + for { + result, err := common.NoAuthGetPrimaryInfo(i.OsUser, i.Mongo, i.ConfParams.IP, i.ConfParams.Port) + if err != nil { + i.runtime.Logger.Error("check replicaset status fail, error:%s", err) + fmt.Sprintf("check replicaset status fail, error:%s\n", err) + panic(fmt.Sprintf("check replicaset status fail, error:%s\n", err)) + } + if result != "" { + i.StatusChan <- 1 + } + time.Sleep(2 * time.Second) + } +} + +// execScript 执行脚本 +func (i *InitiateReplicaset) execScript() error { + // 检查 + flag, err := i.getPrimaryInfo() + if err != nil { + return err + } + if flag == true { + i.runtime.Logger.Info("replicaset has been initiated") + if err = i.removeScript(); err != nil { + return err + } + + return nil + } + + // 执行脚本 + i.runtime.Logger.Info("start to execute initiateReplicaset script") + cmd := fmt.Sprintf("su %s -c \"%s --host %s --port %d --quiet %s\"", + i.OsUser, i.Mongo, "127.0.0.1", i.ConfParams.Port, i.ConfFilePath) + if _, err = util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + i.runtime.Logger.Error("execute initiateReplicaset script fail, error:%s", err) + return fmt.Errorf("execute initiateReplicaset script fail, error:%s", err) + } + i.runtime.Logger.Info("execute initiateReplicaset script successfully") + return nil +} + +// getStatus 检查复制集状态,是否创建成功 +func (i *InitiateReplicaset) getStatus() error { + for { + select { + case status := <-i.StatusChan: + if status == 1 { + i.runtime.Logger.Info("initiate replicaset successfully") + // 删除脚本 + if err := i.removeScript(); err != nil { + return err + } + return nil + } + default: + + } + } +} + +// removeScript 删除脚本 +func (i *InitiateReplicaset) removeScript() error { + // 删除脚本 + i.runtime.Logger.Info("start to remove initiateReplicaset script") + if err := common.RemoveFile(i.ConfFilePath); err != nil { + i.runtime.Logger.Error(fmt.Sprintf("remove initiateReplicaset script fail, error:%s", err)) + return fmt.Errorf("remove initiateReplicaset script fail, error:%s", err) + } + i.runtime.Logger.Info("remove initiateReplicaset script successfully") + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_deinstall.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_deinstall.go new file mode 100644 index 0000000000..61ea7637c7 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_deinstall.go @@ -0,0 +1,229 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// DeInstallConfParams 参数 +type DeInstallConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + App string `json:"app" validate:"required"` + AreaId string `json:"areaId" validate:"required"` + NodeInfo []string `json:"nodeInfo" validate:"required"` // []string ip,ip 如果为复制集节点,则为复制集所有节点的ip;如果为mongos,则为mongos的ip + InstanceType string `json:"instanceType" validate:"required"` // mongod mongos +} + +// DeInstall 添加分片到集群 +type DeInstall struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + DataDir string + BackupDir string + DbpathDir string + InstallPath string + PortDir string + LogPortDir string + DbPathRenameDir string + LogPathRenameDir string + Mongo string + OsUser string + ServiceStatus bool + IPInfo string + ConfParams *DeInstallConfParams +} + +// NewDeInstall 实例化结构体 +func NewDeInstall() jobruntime.JobRunner { + return &DeInstall{} +} + +// Name 获取原子任务的名字 +func (d *DeInstall) Name() string { + return "mongo_deinstall" +} + +// Run 运行原子任务 +func (d *DeInstall) Run() error { + // 检查实例状态 + if err := d.checkMongoService(); err != nil { + return err + } + + // 关闭进程 + if err := d.shutdownProcess(); err != nil { + return err + } + + // rename目录 + if err := d.DirRename(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (d *DeInstall) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (d *DeInstall) Rollback() error { + return nil +} + +// Init 初始化 +func (d *DeInstall) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + d.runtime = runtime + d.runtime.Logger.Info("start to init") + d.BinDir = consts.UsrLocal + d.DataDir = consts.GetMongoDataDir() + d.BackupDir = consts.GetMongoBackupDir() + + d.OsUser = consts.GetProcessUser() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(d.runtime.PayloadDecoded), &d.ConfParams); err != nil { + d.runtime.Logger.Error( + "get parameters of deInstall fail by json.Unmarshal, error:%s", err) + return fmt.Errorf("get parameters of deInstall fail by json.Unmarshal, error:%s", err) + } + + // 获取各种目录 + d.InstallPath = filepath.Join(d.BinDir, "mongodb") + d.Mongo = filepath.Join(d.BinDir, "mongodb", "bin", "mongo") + strPort := strconv.Itoa(d.ConfParams.Port) + d.PortDir = filepath.Join(d.DataDir, "mongodata", strPort) + d.DbpathDir = filepath.Join(d.DataDir, "mongodata", strPort, "db") + d.DbPathRenameDir = filepath.Join(d.DataDir, "mongodata", fmt.Sprintf("%s_%s_%s_%d", + d.ConfParams.InstanceType, d.ConfParams.App, d.ConfParams.AreaId, d.ConfParams.Port)) + d.IPInfo = strings.Join(d.ConfParams.NodeInfo, "|") + d.LogPortDir = filepath.Join(d.BackupDir, "mongolog", strPort) + d.LogPathRenameDir = filepath.Join(d.BackupDir, "mongolog", fmt.Sprintf("%s_%s_%s_%d", + d.ConfParams.InstanceType, d.ConfParams.App, d.ConfParams.AreaId, d.ConfParams.Port)) + + // 进行校验 + if err := d.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (d *DeInstall) checkParams() error { + // 校验配置参数 + d.runtime.Logger.Info("start to validate parameters") + validate := validator.New() + d.runtime.Logger.Info("start to validate parameters of deInstall") + if err := validate.Struct(d.ConfParams); err != nil { + d.runtime.Logger.Error("validate parameters of deInstall fail, error:%s", err) + return fmt.Errorf("validate parameters of deInstall fail, error:%s", err) + } + return nil +} + +// checkMongoService 检查mongo服务 +func (d *DeInstall) checkMongoService() error { + d.runtime.Logger.Info("start to check process status") + flag, _, err := common.CheckMongoService(d.ConfParams.Port) + if err != nil { + d.runtime.Logger.Error("get mongo service status fail, error:%s", err) + return fmt.Errorf("get mongo service status fail, error:%s", err) + } + d.ServiceStatus = flag + return nil +} + +// checkConnection 检查连接 +func (d *DeInstall) checkConnection() error { + d.runtime.Logger.Info("start to check connection") + cmd := fmt.Sprintf( + "source /etc/profile;netstat -nat | grep %d |awk '{print $5}'|awk -F: '{print $1}'|sort|uniq -c|sort -nr |grep -Ewv '0.0.0.0|127.0.0.1|%s' || true", + d.ConfParams.Port, d.IPInfo) + + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + d.runtime.Logger.Error("check connection fail, error:%s", err) + return fmt.Errorf("check connection fail, error:%s", err) + } + result = strings.Replace(result, "\n", "", -1) + if result != "" { + d.runtime.Logger.Error("check connection fail, there are some connections") + return fmt.Errorf("check connection fail, there are some connections") + } + return nil +} + +// shutdownProcess 关闭进程 +func (d *DeInstall) shutdownProcess() error { + if d.ServiceStatus == true { + d.runtime.Logger.Info("start to shutdown service") + // 检查连接 + if err := d.checkConnection(); err != nil { + return err + } + + // 关闭进程 + if err := common.ShutdownMongoProcess(d.OsUser, d.ConfParams.InstanceType, d.BinDir, d.DbpathDir, + d.ConfParams.Port); err != nil { + d.runtime.Logger.Error("shutdown mongo service fail, error:%s", err) + return fmt.Errorf("shutdown mongo service fail, error:%s", err) + } + } + + return nil +} + +// DirRename 打包数据目录 +func (d *DeInstall) DirRename() error { + // renameDb数据目录 + flag := util.FileExists(d.PortDir) + if flag == true { + d.runtime.Logger.Info("start to rename db directory") + cmd := fmt.Sprintf( + "mv %s %s", + d.PortDir, d.DbPathRenameDir) + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + d.runtime.Logger.Error("rename db directory fail, error:%s", err) + return fmt.Errorf("rename db directory fail, error:%s", err) + } + } + + // renameDb日志目录 + flag = util.FileExists(d.LogPortDir) + if flag == true { + d.runtime.Logger.Info("start to rename log directory") + cmd := fmt.Sprintf( + "mv %s %s", + d.LogPortDir, d.LogPathRenameDir) + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + d.runtime.Logger.Error("rename log directory fail, error:%s", err) + return fmt.Errorf("rename log directory fail, error:%s", err) + } + } + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_execute_script.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_execute_script.go new file mode 100644 index 0000000000..202f8698e7 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_execute_script.go @@ -0,0 +1,330 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// ExecScriptConfParams 参数 +type ExecScriptConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + Script string `json:"script" validate:"required"` + Type string `json:"type" validate:"required"` // cluster:执行脚本为传入的mongos replicaset:执行脚本为指定节点 + Secondary bool `json:"secondary"` // 复制集是否在secondary节点执行script + AdminUsername string `json:"adminUsername" validate:"required"` + AdminPassword string `json:"adminPassword" validate:"required"` + RepoUrl string `json:"repoUrl"` // 制品库url + RepoUsername string `json:"repoUsername"` // 制品库用户名 + RepoToken string `json:"repoToken"` // 制品库token + RepoProject string `json:"repoProject"` // 制品库project + RepoRepo string `json:"repoRepo"` // 制品库repo + RepoPath string `json:"repoPath"` // 制品库路径 +} + +// ExecScript 添加分片到集群 +type ExecScript struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + OsGroup string + execIP string + execPort int + ScriptDir string + ScriptContent string + ScriptFilePath string + ResultFilePath string + ConfParams *ExecScriptConfParams +} + +// NewExecScript 实例化结构体 +func NewExecScript() jobruntime.JobRunner { + return &ExecScript{} +} + +// Name 获取原子任务的名字 +func (e *ExecScript) Name() string { + return "mongo_execute_script" +} + +// Run 运行原子任务 +func (e *ExecScript) Run() error { + // 生成script内容 + if err := e.makeScriptContent(); err != nil { + return err + } + + // 创建script文件 + if err := e.creatScriptFile(); err != nil { + return err + } + + // 执行脚本生成结果文件 + if err := e.execScript(); err != nil { + return err + } + + // 上传结果文件到制品库 + if err := e.uploadFile(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (e *ExecScript) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (e *ExecScript) Rollback() error { + return nil +} + +// Init 初始化 +func (e *ExecScript) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + e.runtime = runtime + e.runtime.Logger.Info("start to init") + e.BinDir = consts.UsrLocal + e.OsUser = consts.GetProcessUser() + e.OsGroup = consts.GetProcessUserGroup() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(e.runtime.PayloadDecoded), &e.ConfParams); err != nil { + e.runtime.Logger.Error( + "get parameters of execScript fail by json.Unmarshal, error:%s", err) + return fmt.Errorf("get parameters of execScript fail by json.Unmarshal, error:%s", err) + } + + // 获取各种目录 + e.Mongo = filepath.Join(e.BinDir, "mongodb", "bin", "mongo") + e.ScriptDir = filepath.Join("/", "home", e.OsUser, e.runtime.UID) + e.ScriptFilePath = filepath.Join(e.ScriptDir, strings.Join([]string{"script", "js"}, ".")) + e.ResultFilePath = filepath.Join(e.ScriptDir, strings.Join([]string{"result", "txt"}, ".")) + e.runtime.Logger.Info("init successfully") + + // 复制集获取执行脚本的IP端口 默认为primary节点 可以指定secondary节点 + if e.ConfParams.Type == "cluster" { + e.execIP = e.ConfParams.IP + e.execPort = e.ConfParams.Port + } + if e.ConfParams.Type == "replicaset" { + primaryInfo, err := common.AuthGetPrimaryInfo(e.OsUser, e.Mongo, e.ConfParams.AdminUsername, + e.ConfParams.AdminPassword, + e.ConfParams.IP, e.ConfParams.Port) + if err != nil { + e.runtime.Logger.Error("init get primary info fail, error:%s", err) + return fmt.Errorf("init get primary info fail, error:%s", err) + } + e.execIP = strings.Split(primaryInfo, ":")[0] + e.execPort, _ = strconv.Atoi(strings.Split(primaryInfo, ":")[1]) + if e.ConfParams.Secondary == true { + _, _, _, _, _, memberInfo, err := common.GetNodeInfo(e.OsUser, e.Mongo, e.ConfParams.IP, e.ConfParams.Port, + e.ConfParams.AdminUsername, e.ConfParams.AdminPassword, e.ConfParams.IP, e.ConfParams.Port) + if err != nil { + e.runtime.Logger.Error("init get member info fail, error:%s", err) + return fmt.Errorf("init get member info fail, error:%s", err) + } + for _, v := range memberInfo { + if v["state"] == "2" && v["hidden"] == "false" { + e.execIP = strings.Split(v["name"], ":")[0] + e.execPort, _ = strconv.Atoi(strings.Split(v["name"], ":")[1]) + } + } + } + } + + // 进行校验 + if err := e.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (e *ExecScript) checkParams() error { + // 校验配置参数 + e.runtime.Logger.Info("start to validate parameters") + validate := validator.New() + e.runtime.Logger.Info("start to validate parameters of deInstall") + if err := validate.Struct(e.ConfParams); err != nil { + e.runtime.Logger.Error("validate parameters of execScript fail, error:%s", err) + return fmt.Errorf("validate parameters of execScript fail, error:%s", err) + } + e.runtime.Logger.Info("validate parameters successfully") + return nil +} + +// makeScriptContent 生成script内容 +func (e *ExecScript) makeScriptContent() error { + // 复制集,判断在primary节点还是在secondary节点执行脚本 + e.runtime.Logger.Info("start to make script content") + if e.ConfParams.Type == "replicaset" && e.ConfParams.Secondary == true { + // 获取mongo版本呢 + mongoName := "mongod" + version, err := common.CheckMongoVersion(e.BinDir, mongoName) + if err != nil { + e.runtime.Logger.Error("get mongo service version fail, error:%s", err) + return fmt.Errorf("get mongo service version fail, error:%s", err) + } + splitVersion := strings.Split(version, ".") + mainVersion, _ := strconv.ParseFloat(strings.Join([]string{splitVersion[0], splitVersion[1]}, "."), 32) + + // secondary执行script + secondaryOk := "rs.slaveOk()\n" + if mainVersion >= 3.6 { + secondaryOk = "rs.secondaryOk()\n" + } + e.ScriptContent = strings.Join([]string{secondaryOk, e.ConfParams.Script}, "") + e.runtime.Logger.Info("make script content successfully") + return nil + } + e.ScriptContent = e.ConfParams.Script + e.runtime.Logger.Info("make script content successfully") + return nil +} + +// creatScriptFile 创建script文件 +func (e *ExecScript) creatScriptFile() error { + // 创建目录 + e.runtime.Logger.Info("start to make script directory") + if err := util.MkDirsIfNotExists([]string{e.ScriptDir}); err != nil { + e.runtime.Logger.Error("create script directory:%s fail, error:%s", e.ScriptDir, err) + return fmt.Errorf("create script directory:%s fail, error:%s", e.ScriptDir, err) + } + + // 创建文件 + e.runtime.Logger.Info("start to create script file") + script, err := os.OpenFile(e.ScriptFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + defer script.Close() + if err != nil { + e.runtime.Logger.Error( + fmt.Sprintf("create script file fail, error:%s", err)) + return fmt.Errorf("create script file fail, error:%s", err) + } + if _, err = script.WriteString(e.ScriptContent); err != nil { + e.runtime.Logger.Error( + fmt.Sprintf("script file write content fail, error:%s", + err)) + return fmt.Errorf("script file write content fail, error:%s", + err) + } + e.runtime.Logger.Info("create script file successfully") + // 修改配置文件属主 + e.runtime.Logger.Info("start to execute chown command for script file") + if _, err = util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", e.OsUser, e.OsGroup, e.ScriptDir), + "", nil, + 10*time.Second); err != nil { + e.runtime.Logger.Error(fmt.Sprintf("chown auth config file fail, error:%s", err)) + return fmt.Errorf("chown auth config file fail, error:%s", err) + } + e.runtime.Logger.Info("start to execute chown command for script file successfully") + return nil +} + +// execScript 执行脚本 +func (e *ExecScript) execScript() error { + e.runtime.Logger.Info("start to execute script") + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet %s > %s\"", + e.OsUser, e.Mongo, e.ConfParams.AdminUsername, e.ConfParams.AdminPassword, e.execIP, e.execPort, + e.ScriptFilePath, e.ResultFilePath) + cmdX := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet %s > %s\"", + e.OsUser, e.Mongo, e.ConfParams.AdminUsername, "xxx", e.execIP, e.execPort, + e.ScriptFilePath, e.ResultFilePath) + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + e.runtime.Logger.Error("execute script:%s fail, error:%s", cmdX, err) + return fmt.Errorf("execute script:%s fail, error:%s", cmdX, err) + } + e.runtime.Logger.Info("execute script:%s successfully", cmdX) + return nil +} + +// Output 请求响应结构体 +type Output struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// uploadFile 上传结果文件 +func (e *ExecScript) uploadFile() error { + if e.ConfParams.RepoUrl == "" { + return nil + } + e.runtime.Logger.Info("start to upload result file") + // url + url := strings.Join([]string{e.ConfParams.RepoUrl, e.ConfParams.RepoProject, e.ConfParams.RepoRepo, + e.ConfParams.RepoPath, e.runtime.UID, "result.txt"}, "/") + + // 生成请求body内容 + file, err := ioutil.ReadFile(e.ResultFilePath) + if err != nil { + e.runtime.Logger.Error("get result file content fail, error:%s", err) + return fmt.Errorf("get result file content fail, error:%s", err) + } + + // 生成请求 + request, err := http.NewRequest("PUT", url, strings.NewReader(string(file))) + if err != nil { + e.runtime.Logger.Error("create request for uploading result file fail, error:%s", err) + return fmt.Errorf("create request for uploading result file fail, error:%s", err) + } + + // 设置请求头 + auth := base64.StdEncoding.EncodeToString([]byte(strings.Join([]string{e.ConfParams.RepoUsername, + e.ConfParams.RepoToken}, ":"))) + request.Header.Set("Authorization", "Basic "+auth) + request.Header.Set("X-BKREPO-EXPIRES", "30") + request.Header.Set("X-BKREPO-OVERWRITE", "true") + request.Header.Set("Content-Type", "multipart/form-data") + if err != nil { + e.runtime.Logger.Error("set request head for uploading result file fail, error:%s", err) + return fmt.Errorf("set request head for uploading result file fail, error:%s", err) + } + + // 执行请求 + response, err := http.DefaultClient.Do(request) + defer response.Body.Close() + if err != nil { + e.runtime.Logger.Error("request server for uploading result file fail, error:%s", err) + return fmt.Errorf("request server for uploading result file fail, error:%s", err) + } + + // 解析响应 + resp, err := ioutil.ReadAll(response.Body) + if err != nil { + e.runtime.Logger.Error("read data from response fail, error:%s", err) + return fmt.Errorf("read data from response fail, error:%s", err) + } + output := Output{} + _ = json.Unmarshal(resp, &output) + if output.Code != 0 && output.Message == "" { + e.runtime.Logger.Error("upload file fail, error:%s", output.Message) + return fmt.Errorf("upload file fail, error:%s", output.Message) + } + e.runtime.Logger.Info("upload result file successfully") + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_process_restart.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_process_restart.go new file mode 100644 index 0000000000..c0e4e2b3be --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_process_restart.go @@ -0,0 +1,398 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/go-playground/validator/v10" + "gopkg.in/yaml.v2" +) + +// RestartConfParams 重启进程参数 +type RestartConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + InstanceType string `json:"instanceType" validate:"required"` // mongos mongod + SingleNodeInstallRestart bool `json:"singleNodeInstallRestart"` // mongod替换节点安装后重启 + Auth bool `json:"auth"` // true->auth false->noauth + CacheSizeGB int `json:"cacheSizeGB"` // 可选,重启mongod的参数 + MongoSConfDbOld string `json:"mongoSConfDbOld"` // 可选,ip:port + MongoSConfDbNew string `json:"MongoSConfDbNew"` // 可选,ip:port + AdminUsername string `json:"adminUsername"` + AdminPassword string `json:"adminPassword"` +} + +// MongoRestart 重启mongo进程 +type MongoRestart struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + DataDir string + DbpathDir string + Mongo string + OsUser string // MongoDB安装在哪个用户下 + OsGroup string + ConfParams *RestartConfParams + AuthConfFilePath string + NoAuthConfFilePath string +} + +// NewMongoRestart 实例化结构体 +func NewMongoRestart() jobruntime.JobRunner { + return &MongoRestart{} +} + +// Name 获取原子任务的名字 +func (r *MongoRestart) Name() string { + return "mongo_restart" +} + +// Run 运行原子任务 +func (r *MongoRestart) Run() error { + // 修改配置文件参数 + if err := r.changeParam(); err != nil { + return err + } + + // mongod的primary进行主备切换 + if err := r.RsStepDown(); err != nil { + return err + } + + // 关闭服务 + if err := r.shutdown(); err != nil { + return err + } + + // 启动服务 + if err := r.startup(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (r *MongoRestart) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (r *MongoRestart) Rollback() error { + return nil +} + +// Init 初始化 +func (r *MongoRestart) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + r.runtime = runtime + r.runtime.Logger.Info("start to init") + r.BinDir = consts.UsrLocal + r.DataDir = consts.GetRedisDataDir() + r.OsUser = consts.GetProcessUser() + r.OsGroup = consts.GetProcessUserGroup() + r.Mongo = filepath.Join(r.BinDir, "mongodb", "bin", "mongo") + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(r.runtime.PayloadDecoded), &r.ConfParams); err != nil { + r.runtime.Logger.Error(fmt.Sprintf( + "get parameters of mongo restart fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of mongo restart fail by json.Unmarshal, error:%s", err) + } + + // 设置各种路径 + strPort := strconv.Itoa(r.ConfParams.Port) + r.DbpathDir = filepath.Join(r.DataDir, "mongodata", strPort, "db") + r.AuthConfFilePath = filepath.Join(r.DataDir, "mongodata", strPort, "mongo.conf") + r.NoAuthConfFilePath = filepath.Join(r.DataDir, "mongodata", strPort, "noauth.conf") + r.runtime.Logger.Info("init successfully") + + // 安装前进行校验 + if err := r.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (r *MongoRestart) checkParams() error { + // 校验重启配置参数 + validate := validator.New() + r.runtime.Logger.Info("start to validate parameters of restart") + if err := validate.Struct(r.ConfParams); err != nil { + r.runtime.Logger.Error(fmt.Sprintf("validate parameters of restart fail, error:%s", err)) + return fmt.Errorf("validate parameters of restart fail, error:%s", err) + } + r.runtime.Logger.Info("validate parameters of restart successfully") + return nil +} + +// changeParam 修改参数 +func (r *MongoRestart) changeParam() error { + if r.ConfParams.InstanceType == "mongos" && + r.ConfParams.MongoSConfDbOld != "" && r.ConfParams.MongoSConfDbNew != "" { + if err := r.changeConfigDb(); err != nil { + return err + } + return nil + } + if err := r.changeCacheSizeGB(); err != nil { + return err + } + return nil +} + +// changeConfigDb 修改mongoS的ConfigDb参数 +func (r *MongoRestart) changeConfigDb() error { + r.runtime.Logger.Info("start to change configDB value of config file") + // 获取配置文件内容 + readAuthConfFileContent, _ := ioutil.ReadFile(r.AuthConfFilePath) + readNoAuthConfFileContent, _ := ioutil.ReadFile(r.NoAuthConfFilePath) + + // 修改configDB配置 + yamlAuthMongoSConf := common.NewYamlMongoSConf() + yamlNoAuthMongoSConf := common.NewYamlMongoSConf() + _ = yaml.Unmarshal(readAuthConfFileContent, yamlAuthMongoSConf) + _ = yaml.Unmarshal(readNoAuthConfFileContent, yamlNoAuthMongoSConf) + yamlAuthMongoSConf.Sharding.ConfigDB = strings.Replace(yamlAuthMongoSConf.Sharding.ConfigDB, + r.ConfParams.MongoSConfDbOld, r.ConfParams.MongoSConfDbNew, -1) + yamlNoAuthMongoSConf.Sharding.ConfigDB = strings.Replace(yamlNoAuthMongoSConf.Sharding.ConfigDB, + r.ConfParams.MongoSConfDbOld, r.ConfParams.MongoSConfDbNew, -1) + authConfFileContent, _ := yamlAuthMongoSConf.GetConfContent() + noAuthConfFileContent, _ := yamlNoAuthMongoSConf.GetConfContent() + + // 修改authConfFile + authConfFile, err := os.OpenFile(r.AuthConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + defer authConfFile.Close() + if err != nil { + r.runtime.Logger.Error( + fmt.Sprintf("create auth config file fail, error:%s", err)) + return fmt.Errorf("create auth config file fail, error:%s", err) + } + if _, err = authConfFile.WriteString(string(authConfFileContent)); err != nil { + r.runtime.Logger.Error( + fmt.Sprintf("change configDB value of auth config file write content fail, error:%s", + err)) + return fmt.Errorf("change configDB value of auth config file write content fail, error:%s", + err) + } + + // 修改noAuthConfFile + noAuthConfFile, err := os.OpenFile(r.NoAuthConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + defer noAuthConfFile.Close() + if err != nil { + r.runtime.Logger.Error(fmt.Sprintf("create no auth config file fail, error:%s", err)) + return fmt.Errorf("create no auth config file fail, error:%s", err) + } + if _, err = noAuthConfFile.WriteString(string(noAuthConfFileContent)); err != nil { + r.runtime.Logger.Error( + fmt.Sprintf("change configDB value of no auth config file write content fail, error:%s", + err)) + return fmt.Errorf("change configDB value of no auth config file write content fail, error:%s", + err) + } + r.runtime.Logger.Info("change configDB value of config file successfully") + + return nil +} + +// changeCacheSizeGB 修改CacheSizeGB +func (r *MongoRestart) changeCacheSizeGB() error { + if r.ConfParams.CacheSizeGB == 0 { + return nil + } + + // 检查mongo版本 + r.runtime.Logger.Info("start to check mongo version") + version, err := common.CheckMongoVersion(r.BinDir, "mongod") + if err != nil { + r.runtime.Logger.Error(fmt.Sprintf("check mongo version fail, error:%s", err)) + return fmt.Errorf("check mongo version fail, error:%s", err) + } + mainVersion, _ := strconv.Atoi(strings.Split(version, ".")[0]) + r.runtime.Logger.Info("check mongo version successfully") + + if mainVersion >= 3 { + r.runtime.Logger.Info("start to change CacheSizeGB value of config file") + // 获取配置文件内容 + readAuthConfFileContent, _ := ioutil.ReadFile(r.AuthConfFilePath) + readNoAuthConfFileContent, _ := ioutil.ReadFile(r.NoAuthConfFilePath) + + // 修改CacheSizeGB大小并写入文件 + yamlAuthConfFile := common.NewYamlMongoDBConf() + yamlNoAuthConfFile := common.NewYamlMongoDBConf() + _ = yaml.Unmarshal(readAuthConfFileContent, &yamlAuthConfFile) + _ = yaml.Unmarshal(readNoAuthConfFileContent, &yamlNoAuthConfFile) + if r.ConfParams.CacheSizeGB == 0 { + return nil + } + if r.ConfParams.CacheSizeGB != yamlAuthConfFile.Storage.WiredTiger.EngineConfig.CacheSizeGB { + yamlAuthConfFile.Storage.WiredTiger.EngineConfig.CacheSizeGB = r.ConfParams.CacheSizeGB + yamlNoAuthConfFile.Storage.WiredTiger.EngineConfig.CacheSizeGB = r.ConfParams.CacheSizeGB + authConfFileContent, _ := yamlAuthConfFile.GetConfContent() + noAuthConfFileContent, _ := yamlNoAuthConfFile.GetConfContent() + + // 修改authConfFile + authConfFile, err := os.OpenFile(r.AuthConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + defer authConfFile.Close() + if err != nil { + r.runtime.Logger.Error( + fmt.Sprintf("create auth config file fail, error:%s", err)) + return fmt.Errorf("create auth config file fail, error:%s", err) + } + if _, err = authConfFile.WriteString(string(authConfFileContent)); err != nil { + r.runtime.Logger.Error( + fmt.Sprintf("change CacheSizeGB value of auth config file write content fail, error:%s", + err)) + return fmt.Errorf("change CacheSizeGB value of auth config file write content fail, error:%s", + err) + } + + // 修改noAuthConfFile + noAuthConfFile, err := os.OpenFile(r.NoAuthConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + defer noAuthConfFile.Close() + if err != nil { + r.runtime.Logger.Error(fmt.Sprintf("create no auth config file fail, error:%s", err)) + return fmt.Errorf("create no auth config file fail, error:%s", err) + } + if _, err = noAuthConfFile.WriteString(string(noAuthConfFileContent)); err != nil { + r.runtime.Logger.Error( + fmt.Sprintf("change CacheSizeGB value of no auth config file write content fail, error:%s", + err)) + return fmt.Errorf("change CacheSizeGB value of no auth config file write content fail, error:%s", + err) + } + } + r.runtime.Logger.Info("change CacheSizeGB value of config file successfully") + } + return nil +} + +// checkPrimary 检查该节点是否是primary +func (r *MongoRestart) checkPrimary() (bool, error) { + r.runtime.Logger.Info("start to check if it is primary") + var info string + var err error + // 安装时无需密码验证。安装成功后需要密码验证 + if r.ConfParams.AdminUsername != "" && r.ConfParams.AdminPassword != "" { + info, err = common.AuthGetPrimaryInfo(r.OsUser, r.Mongo, r.ConfParams.AdminUsername, + r.ConfParams.AdminPassword, r.ConfParams.IP, r.ConfParams.Port) + } else { + info, err = common.NoAuthGetPrimaryInfo(r.OsUser, r.Mongo, + r.ConfParams.IP, r.ConfParams.Port) + } + if err != nil { + r.runtime.Logger.Error("get primary info fail, error:%s", err) + return false, fmt.Errorf("get primary info fail, error:%s", err) + } + if info == fmt.Sprintf("%s:%d", r.ConfParams.IP, r.ConfParams.Port) { + return true, nil + } + r.runtime.Logger.Info("check if it is primary successfully") + return false, nil +} + +// RsStepDown 主备切换 +func (r *MongoRestart) RsStepDown() error { + if r.ConfParams.InstanceType != "mongos" { + if r.ConfParams.SingleNodeInstallRestart == true { + return nil + } + r.runtime.Logger.Info("start to check mongod service before rsStepDown") + flag, _, err := common.CheckMongoService(r.ConfParams.Port) + if err != nil { + r.runtime.Logger.Error("check mongod service fail, error:%s", err) + return fmt.Errorf("check mongod service fail, error:%s", err) + } + r.runtime.Logger.Info("check mongod service before rsStepDown successfully") + if flag == false { + return nil + } + + // 检查是否是primary + flag1, err := r.checkPrimary() + if err != nil { + return err + } + if flag1 == true { + r.runtime.Logger.Info("start to convert primary secondary db") + // 安装时无需密码验证。安装成功后需要密码验证 + var flag2 bool + if r.ConfParams.AdminUsername != "" && r.ConfParams.AdminPassword != "" { + flag2, err = common.AuthRsStepDown(r.OsUser, r.Mongo, r.ConfParams.IP, r.ConfParams.Port, + r.ConfParams.AdminUsername, r.ConfParams.AdminPassword) + } else { + flag2, err = common.NoAuthRsStepDown(r.OsUser, r.Mongo, r.ConfParams.IP, r.ConfParams.Port) + } + if err != nil { + r.runtime.Logger.Error("convert primary secondary db fail, error:%s", err) + return fmt.Errorf("convert primary secondary db fail, error:%s", err) + } + if flag2 == true { + r.runtime.Logger.Info("convert primary secondary db successfully") + return nil + } + } + } + + return nil +} + +// shutdown 关闭服务 +func (r *MongoRestart) shutdown() error { + // 检查服务是否存在 + r.runtime.Logger.Info("start to check %s service", r.ConfParams.InstanceType) + result, _, err := common.CheckMongoService(r.ConfParams.Port) + if err != nil { + r.runtime.Logger.Error("check %s service fail, error:%s", r.ConfParams.InstanceType, err) + return fmt.Errorf("check %s service fail, error:%s", r.ConfParams.InstanceType, err) + } + if result != true { + r.runtime.Logger.Info("%s service has been close", r.ConfParams.InstanceType) + return nil + } + r.runtime.Logger.Info("check %s service successfully", r.ConfParams.InstanceType) + + // 关闭服务 + r.runtime.Logger.Info("start to shutdown %s", r.ConfParams.InstanceType) + if err = common.ShutdownMongoProcess(r.OsUser, r.ConfParams.InstanceType, r.BinDir, r.DbpathDir, + r.ConfParams.Port); err != nil { + r.runtime.Logger.Error(fmt.Sprintf("shutdown %s fail, error:%s", r.ConfParams.InstanceType, err)) + return fmt.Errorf("shutdown %s fail, error:%s", r.ConfParams.InstanceType, err) + } + r.runtime.Logger.Info("shutdown %s successfully", r.ConfParams.InstanceType) + return nil +} + +// startup 开启服务 +func (r *MongoRestart) startup() error { + // 检查服务是否存在 + r.runtime.Logger.Info("start to check %s service", r.ConfParams.InstanceType) + result, _, err := common.CheckMongoService(r.ConfParams.Port) + if err != nil { + r.runtime.Logger.Error("check %s service fail, error:%s", r.ConfParams.InstanceType, err) + return fmt.Errorf("check %s service fail, error:%s", r.ConfParams.InstanceType, err) + } + if result == true { + r.runtime.Logger.Info("%s service has been open", r.ConfParams.InstanceType) + return nil + } + r.runtime.Logger.Info("check %s service successfully", r.ConfParams.InstanceType) + + // 开启服务 + r.runtime.Logger.Info("start to startup %s", r.ConfParams.InstanceType) + if err = common.StartMongoProcess(r.BinDir, r.ConfParams.Port, r.OsUser, r.ConfParams.Auth); err != nil { + r.runtime.Logger.Error("startup %s fail, error:%s", r.ConfParams.InstanceType, err) + return fmt.Errorf("startup %s fail, error:%s", r.ConfParams.InstanceType, err) + } + r.runtime.Logger.Info("startup %s successfully", r.ConfParams.InstanceType) + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_set_profiler.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_set_profiler.go new file mode 100644 index 0000000000..9077f55821 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongo_set_profiler.go @@ -0,0 +1,185 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// SetProfilerConfParams 参数 +type SetProfilerConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + DbName string `json:"dbName" validate:"required"` + Level int `json:"level" validate:"required"` + ProfileSize int `json:"profileSize"` // 单位:GB + AdminUsername string `json:"adminUsername" validate:"required"` + AdminPassword string `json:"adminPassword" validate:"required"` +} + +// SetProfiler 添加分片到集群 +type SetProfiler struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + PrimaryIP string + PrimaryPort int + ConfParams *SetProfilerConfParams +} + +// NewSetProfiler 实例化结构体 +func NewSetProfiler() jobruntime.JobRunner { + return &SetProfiler{} +} + +// Name 获取原子任务的名字 +func (s *SetProfiler) Name() string { + return "mongo_set_profiler" +} + +// Run 运行原子任务 +func (s *SetProfiler) Run() error { + // 生成script内容 + if err := s.setProfileSize(); err != nil { + return err + } + + // 执行脚本生成结果文件 + if err := s.setProfileLevel(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (s *SetProfiler) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (s *SetProfiler) Rollback() error { + return nil +} + +// Init 初始化 +func (s *SetProfiler) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + s.runtime = runtime + s.runtime.Logger.Info("start to init") + s.BinDir = consts.UsrLocal + s.OsUser = consts.GetProcessUser() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(s.runtime.PayloadDecoded), &s.ConfParams); err != nil { + s.runtime.Logger.Error( + "get parameters of setProfiler fail by json.Unmarshal, error:%s", err) + return fmt.Errorf("get parameters of setProfiler fail by json.Unmarshal, error:%s", err) + } + + // 获取primary信息 + info, err := common.AuthGetPrimaryInfo(s.OsUser, s.Mongo, s.ConfParams.AdminUsername, s.ConfParams.AdminPassword, + s.ConfParams.IP, s.ConfParams.Port) + if err != nil { + s.runtime.Logger.Error("get primary db info fail, error:%s", err) + return fmt.Errorf("get primary db info fail, error:%s", err) + } + sliceInfo := strings.Split(info, ":") + s.PrimaryIP = sliceInfo[0] + s.PrimaryPort, _ = strconv.Atoi(sliceInfo[1]) + + // 获取各种目录 + s.Mongo = filepath.Join(s.BinDir, "mongodb", "bin", "mongo") + s.runtime.Logger.Info("init successfully") + + // 进行校验 + if err = s.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (s *SetProfiler) checkParams() error { + // 校验配置参数 + s.runtime.Logger.Info("start to validate parameters") + validate := validator.New() + s.runtime.Logger.Info("start to validate parameters of deInstall") + if err := validate.Struct(s.ConfParams); err != nil { + s.runtime.Logger.Error("validate parameters of setProfiler fail, error:%s", err) + return fmt.Errorf("validate parameters of setProfiler fail, error:%s", err) + } + s.runtime.Logger.Info("validate parameters successfully") + return nil +} + +// setProfileSize 设置profile大小 +func (s *SetProfiler) setProfileSize() error { + // 获取profile级别 + status, err := common.GetProfilingLevel(s.OsUser, s.Mongo, s.ConfParams.IP, s.ConfParams.Port, + s.ConfParams.AdminUsername, s.ConfParams.AdminPassword, s.ConfParams.DbName) + if err != nil { + s.runtime.Logger.Error("get profile level fail, error:%s", err) + return fmt.Errorf("get profile level fail, error:%s", err) + } + if status != 0 { + if err = common.SetProfilingLevel(s.OsUser, s.Mongo, s.ConfParams.IP, s.ConfParams.Port, s.ConfParams.AdminUsername, + s.ConfParams.AdminPassword, s.ConfParams.DbName, 0); err != nil { + s.runtime.Logger.Error("set profile level 0 fail, error:%s", err) + return fmt.Errorf("set profile level 0 fail, error:%s", err) + } + } + + // 删除profile.system + cmd := fmt.Sprintf( + "su %s \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"db.getMongo().getDB('%s').system.profile.drop()\\\"\"", + s.OsUser, s.Mongo, s.ConfParams.AdminUsername, s.ConfParams.AdminPassword, s.ConfParams.IP, s.ConfParams.Port, + s.ConfParams.DbName) + if _, err = util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + s.runtime.Logger.Error("delete system.profile fail, error:%s", err) + return fmt.Errorf("set system.profile fail, error:%s", err) + } + + // 设置profile.system + s.runtime.Logger.Info("start to set system.profile size") + profileSizeBytes := s.ConfParams.ProfileSize * 1024 * 1024 * 1024 + cmd = fmt.Sprintf( + "su %s \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"db.getMongo().getDB('%s').createCollection('system.profile',{ capped: true, size:%d })\\\"\"", + s.OsUser, s.Mongo, s.ConfParams.AdminUsername, s.ConfParams.AdminPassword, s.ConfParams.IP, s.ConfParams.Port, + s.ConfParams.DbName, profileSizeBytes) + if _, err = util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + s.runtime.Logger.Error("set system.profile size fail, error:%s", err) + return fmt.Errorf("set system.profile size fail, error:%s", err) + } + s.runtime.Logger.Info("set system.profile size successfully") + return nil +} + +// setProfileLevel 生成脚本内容 +func (s *SetProfiler) setProfileLevel() error { + s.runtime.Logger.Info("start to set profile level") + if err := common.SetProfilingLevel(s.OsUser, s.Mongo, s.ConfParams.IP, s.ConfParams.Port, s.ConfParams.AdminUsername, + s.ConfParams.AdminPassword, s.ConfParams.DbName, s.ConfParams.Level); err != nil { + s.runtime.Logger.Error("set profile level fail, error:%s", err) + return fmt.Errorf("set profile level fail, error:%s", err) + } + s.runtime.Logger.Info("set profile level successfully") + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_install.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_install.go new file mode 100644 index 0000000000..641e77693c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_install.go @@ -0,0 +1,491 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +/* + +1.预检查 +检查输入的参数 检查端口是否合规 检查安装包 检查端口是否被使用(如果使用,则检查是否是mongodb服务) + +2.解压安装包 判断是否已经解压过,版本是否正确 +解压文件 做软链接 修改文件属主 + +3.安装 判断目录是否已被创建 +创建相关各级目录-判断目录是否已经创建过 修改目录属主 创建配置文件(noauth, auth) 创建dbtype文件 复制集创建key文件 + +4.启动服务 +以noauth启动服务 + +*/ + +// MongoDBPortMin MongoDB最小端口 +const MongoDBPortMin = 27000 + +// MongoDBPortMax MongoDB最大端口 +const MongoDBPortMax = 27230 + +// DefaultPerm 创建目录、文件的默认权限 +const DefaultPerm = 0755 + +// MongoDBConfParams 配置文件参数 +type MongoDBConfParams struct { + common.MediaPkg `json:"mediapkg"` + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + DbVersion string `json:"dbVersion" validate:"required"` + InstanceType string `json:"instanceType" validate:"required"` // mongos mongod + App string `json:"app" validate:"required"` + AreaId string `json:"areaId" validate:"required"` + SetId string `json:"setId" validate:"required"` + Auth bool `json:"auth"` // true:以验证方式启动mongod false:以非验证方式启动mongod + ClusterRole string `json:"clusterRole"` // 部署cluster时填写,shardsvr configsvr;部署复制集时为空 + DbConfig struct { + SlowOpThresholdMs int `json:"slowOpThresholdMs"` + CacheSizeGB int `json:"cacheSizeGB"` + OplogSizeMB int `json:"oplogSizeMB" validate:"required"` + Destination string `json:"destination"` + } `json:"dbConfig" validate:"required"` +} + +// MongoDBInstall MongoDB安装 +type MongoDBInstall struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + BackupDir string + DataDir string + OsUser string // MongoDB安装在哪个用户下 + OsGroup string + ConfParams *MongoDBConfParams + DbpathDir string + BackupPath string + AuthConfFilePath string + AuthConfFileContent []byte + NoAuthConfFilePath string + NoAuthConfFileContent []byte + DbTypeFilePath string + LogPath string + PidFilePath string + KeyFilePath string + InstallPackagePath string + LockFilePath string // 锁文件路径 +} + +// NewMongoDBInstall 实例化结构体 +func NewMongoDBInstall() jobruntime.JobRunner { + return &MongoDBInstall{} +} + +// Name 获取原子任务的名字 +func (m *MongoDBInstall) Name() string { + return "mongod_install" +} + +// Run 运行原子任务 +func (m *MongoDBInstall) Run() error { + // 进行校验 + status, err := m.checkParams() + if err != nil { + return err + } + if status { + return nil + } + + // 解压安装包并修改属主 + if err = m.unTarAndCreateSoftLink(); err != nil { + return err + } + + // 创建目录并修改属主 + if err = m.mkdir(); err != nil { + return err + } + + // 创建配置文件,key文件并修改属主 + if err = m.createConfFileAndKeyFileAndDbTypeFile(); err != nil { + return err + } + + // 启动服务 + if err = m.startup(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (m *MongoDBInstall) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (m *MongoDBInstall) Rollback() error { + return nil +} + +// Init 初始化 +func (m *MongoDBInstall) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + m.runtime = runtime + m.runtime.Logger.Info("start to init") + m.BinDir = consts.UsrLocal + m.BackupDir = consts.GetMongoBackupDir() + m.DataDir = consts.GetMongoDataDir() + m.OsUser = consts.GetProcessUser() + m.OsGroup = consts.GetProcessUserGroup() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(m.runtime.PayloadDecoded), &m.ConfParams); err != nil { + m.runtime.Logger.Error(fmt.Sprintf( + "get parameters of mongodb config file fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of mongodb config file fail by json.Unmarshal, error:%s", err) + } + + // 获取信息 + m.InstallPackagePath = m.ConfParams.MediaPkg.GetAbsolutePath() + + // 设置各种路径 + strPort := strconv.Itoa(m.ConfParams.Port) + m.DbpathDir = filepath.Join(m.DataDir, "mongodata", strPort, "db") + m.BackupPath = filepath.Join(m.BackupDir, "dbbak") + m.AuthConfFilePath = filepath.Join(m.DataDir, "mongodata", strPort, "mongo.conf") + m.NoAuthConfFilePath = filepath.Join(m.DataDir, "mongodata", strPort, "noauth.conf") + m.LogPath = filepath.Join(m.BackupDir, "mongolog", strPort, "mongo.log") + PidFileName := fmt.Sprintf("pid.%s", strPort) + m.PidFilePath = filepath.Join(m.DataDir, "mongodata", strPort, PidFileName) + m.KeyFilePath = filepath.Join(m.DataDir, "mongodata", strPort, "key_of_mongo") + m.DbTypeFilePath = filepath.Join(m.DataDir, "mongodata", strPort, "dbtype") + m.LockFilePath = filepath.Join(m.DataDir, "mongoinstall.lock") + + m.runtime.Logger.Info("init successfully") + + // 生成配置文件内容 + if err := m.makeConfContent(); err != nil { + return err + } + + return nil +} + +// makeConfContent 生成配置文件内容 +func (m *MongoDBInstall) makeConfContent() error { + mainVersion, err := strconv.Atoi(strings.Split(m.ConfParams.DbVersion, ".")[0]) + if err != nil { + return err + } + + // mongodb 3.0及以上得到配置文件内容 + if mainVersion >= 3 { + m.runtime.Logger.Info("start to make mongodb config file content") + conf := common.NewYamlMongoDBConf() + conf.Storage.DbPath = m.DbpathDir + conf.Storage.Engine = "wiredTiger" + conf.Storage.WiredTiger.EngineConfig.CacheSizeGB = m.ConfParams.DbConfig.CacheSizeGB + conf.Replication.OplogSizeMB = m.ConfParams.DbConfig.OplogSizeMB + conf.Replication.ReplSetName = strings.Join([]string{m.ConfParams.App, m.ConfParams.AreaId, m.ConfParams.SetId}, + "-") + conf.SystemLog.LogAppend = true + conf.SystemLog.Path = m.LogPath + conf.SystemLog.Destination = m.ConfParams.DbConfig.Destination + conf.ProcessManagement.Fork = true + conf.ProcessManagement.PidFilePath = m.PidFilePath + conf.Net.Port = m.ConfParams.Port + conf.Net.BindIp = strings.Join([]string{"127.0.0.1", m.ConfParams.IP}, ",") + conf.Net.WireObjectCheck = false + conf.OperationProfiling.SlowOpThresholdMs = m.ConfParams.DbConfig.SlowOpThresholdMs + conf.Sharding.ClusterRole = m.ConfParams.ClusterRole + // 获取非验证配置文件内容 + m.NoAuthConfFileContent, err = conf.GetConfContent() + if err != nil { + m.runtime.Logger.Error(fmt.Sprintf( + "version:%s make mongodb no auth config file content fail, error:%s", m.ConfParams.DbVersion, err)) + return fmt.Errorf("version:%s make mongodb no auth config file content fail, error:%s", + m.ConfParams.DbVersion, err) + } + conf.Security.KeyFile = m.KeyFilePath + // 获取验证配置文件内容 + m.AuthConfFileContent, err = conf.GetConfContent() + if err != nil { + m.runtime.Logger.Error(fmt.Sprintf( + "version:%s make mongodb auth config file content fail, error:%s", + m.ConfParams.DbVersion, err)) + return fmt.Errorf("version:%s make mongodb auth config file content fail, error:%s", + m.ConfParams.DbVersion, err) + } + m.runtime.Logger.Info("make mongodb config file content successfully") + return nil + } + + // mongodb 3.0以下获取配置文件内容 + // 获取非验证配置文件内容 + m.runtime.Logger.Info("start to make mongodb config file content") + NoAuthConf := common.IniNoAuthMongoDBConf + AuthConf := common.IniAuthMongoDBConf + replSet := strings.Join([]string{m.ConfParams.App, m.ConfParams.AreaId, m.ConfParams.SetId}, + "-") + NoAuthConf = strings.Replace(NoAuthConf, "{{replSet}}", replSet, -1) + AuthConf = strings.Replace(AuthConf, "{{replSet}}", replSet, -1) + NoAuthConf = strings.Replace(NoAuthConf, "{{dbpath}}", m.DbpathDir, -1) + AuthConf = strings.Replace(AuthConf, "{{dbpath}}", m.DbpathDir, -1) + NoAuthConf = strings.Replace(NoAuthConf, "{{logpath}}", m.LogPath, -1) + AuthConf = strings.Replace(AuthConf, "{{logpath}}", m.LogPath, -1) + NoAuthConf = strings.Replace(NoAuthConf, "{{pidfilepath}}", m.PidFilePath, -1) + AuthConf = strings.Replace(AuthConf, "{{pidfilepath}}", m.PidFilePath, -1) + strPort := strconv.Itoa(m.ConfParams.Port) + NoAuthConf = strings.Replace(NoAuthConf, "{{port}}", strPort, -1) + AuthConf = strings.Replace(AuthConf, "{{port}}", strPort, -1) + bindIP := strings.Join([]string{"127.0.0.1", m.ConfParams.IP}, ",") + NoAuthConf = strings.Replace(NoAuthConf, "{{bind_ip}}", bindIP, -1) + AuthConf = strings.Replace(AuthConf, "{{bind_ip}}", bindIP, -1) + strOplogSize := strconv.Itoa(m.ConfParams.DbConfig.OplogSizeMB) + NoAuthConf = strings.Replace(NoAuthConf, "{{oplogSize}}", strOplogSize, -1) + AuthConf = strings.Replace(AuthConf, "{{oplogSize}}", strOplogSize, -1) + NoAuthConf = strings.Replace(NoAuthConf, "{{instanceRole}}", m.ConfParams.ClusterRole, -1) + AuthConf = strings.Replace(AuthConf, "{{instanceRole}}", m.ConfParams.ClusterRole, -1) + AuthConf = strings.Replace(AuthConf, "{{keyFile}}", m.KeyFilePath, -1) + m.NoAuthConfFileContent = []byte(NoAuthConf) + m.AuthConfFileContent = []byte(AuthConf) + m.runtime.Logger.Info("make mongodb config file content successfully") + + return nil +} + +// checkParams 校验参数 检查输入的参数 检查端口是否合规 检查安装包 检查端口是否被使用(如果使用,则检查是否是mongodb服务) +func (m *MongoDBInstall) checkParams() (bool, error) { + // 校验MongoDB配置文件 + m.runtime.Logger.Info("start to validate parameters") + validate := validator.New() + m.runtime.Logger.Info("start to validate parameters of mongodb config file") + if err := validate.Struct(m.ConfParams); err != nil { + m.runtime.Logger.Error(fmt.Sprintf("validate parameters of mongodb config file fail, error:%s", err)) + return false, fmt.Errorf("validate parameters of mongodb config file fail, error:%s", err) + } + // 校验port是否合规 + m.runtime.Logger.Info("start to validate port if it is correct") + if m.ConfParams.Port < MongoDBPortMin || m.ConfParams.Port > MongoDBPortMax { + m.runtime.Logger.Error(fmt.Sprintf( + "validate port if it is correct, port is not within defalut range [%d,%d]", + MongoDBPortMin, MongoDBPortMax)) + return false, fmt.Errorf("validate port if it is correct, port is not within defalut range [%d,%d]", + MongoDBPortMin, MongoDBPortMax) + } + + // 校验安装包是否存在,md5值是否一致 + m.runtime.Logger.Info("start to validate install package") + if flag := util.FileExists(m.InstallPackagePath); !flag { + m.runtime.Logger.Error(fmt.Sprintf("validate install package, %s is not existed", + m.InstallPackagePath)) + return false, fmt.Errorf("validate install file, %s is not existed", + m.InstallPackagePath) + } + md5, _ := util.GetFileMd5(m.InstallPackagePath) + if m.ConfParams.MediaPkg.PkgMd5 != md5 { + m.runtime.Logger.Error(fmt.Sprintf("validate install package md5 fail, md5 is incorrect")) + return false, fmt.Errorf("validate install package md5 fail, md5 is incorrect") + } + + // 校验端口是否使用 + m.runtime.Logger.Info("start to validate port if it has been used") + flag, _ := util.CheckPortIsInUse(m.ConfParams.IP, strconv.Itoa(m.ConfParams.Port)) + if flag { + // 校验端口是否是mongod进程 + cmd := fmt.Sprintf("netstat -ntpl |grep %d | awk '{print $7}' |head -1", m.ConfParams.Port) + result, _ := util.RunBashCmd(cmd, "", nil, 10*time.Second) + if strings.Contains(result, "mongod") { + // 检查配置文件是否一致,读取已有配置文件与新生成的配置文件内容对比 + content, _ := ioutil.ReadFile(m.AuthConfFilePath) + if strings.Compare(string(content), string(m.AuthConfFileContent)) == 0 { + // 检查mongod版本 + version, err := common.CheckMongoVersion(m.BinDir, "mongod") + if err != nil { + m.runtime.Logger.Error( + fmt.Sprintf("mongod has been installed, port:%d, check mongod version fail. error:%s", + m.ConfParams.Port, version)) + return false, fmt.Errorf("mongod has been installed, port:%d, check mongod version fail. error:%s", + m.ConfParams.Port, version) + } + if version == m.ConfParams.DbVersion { + m.runtime.Logger.Info(fmt.Sprintf("mongod has been installed, port:%d, version:%s", + m.ConfParams.Port, version)) + return true, nil + } + m.runtime.Logger.Error(fmt.Sprintf("other mongod has been installed, port:%d, version:%s", + m.ConfParams.Port, version)) + return false, fmt.Errorf("other mongod has been installed, port:%d, version:%s", + m.ConfParams.Port, version) + } + + } + m.runtime.Logger.Error( + fmt.Sprintf("validate port if it has been used, port:%d is used by other process", + m.ConfParams.Port)) + return false, fmt.Errorf("validate port if it has been used, port:%d is used by other process", + m.ConfParams.Port) + } + m.runtime.Logger.Info("validate parameters successfully") + return false, nil +} + +// unTarAndCreateSoftLink 解压安装包,创建软链接并给目录授权 +func (m *MongoDBInstall) unTarAndCreateSoftLink() error { + // 解压目录 + unTarPath := filepath.Join(m.BinDir, m.ConfParams.MediaPkg.GePkgBaseName()) + + // soft link目录 + installPath := filepath.Join(m.BinDir, "mongodb") + + // 解压安装包并授权 + // 安装多实例并发执行添加文件锁 + m.runtime.Logger.Info("start to get install file lock") + fileLock := common.NewFileLock(m.LockFilePath) + // 获取锁 + err := fileLock.Lock() + if err != nil { + for { + err = fileLock.Lock() + if err != nil { + time.Sleep(1 * time.Second) + continue + } + m.runtime.Logger.Info("get install file lock successfully") + break + } + } else { + m.runtime.Logger.Info("get install file lock successfully") + } + + if err = common.UnTarAndCreateSoftLinkAndChown(m.runtime, m.BinDir, + m.InstallPackagePath, unTarPath, installPath, m.OsUser, m.OsGroup); err != nil { + return err + } + // 释放锁 + _ = fileLock.UnLock() + m.runtime.Logger.Info("release install file lock successfully") + + // 检查mongod版本 + m.runtime.Logger.Info("start to check mongod version") + version, err := common.CheckMongoVersion(m.BinDir, "mongod") + if err != nil { + m.runtime.Logger.Error(fmt.Sprintf("%s has been existed, check mongodb version, error:%s", + installPath, err)) + return fmt.Errorf("%s has been existed, check mongodb version, error:%s", + installPath, err) + } + if version != m.ConfParams.DbVersion { + m.runtime.Logger.Error( + fmt.Sprintf("%s has been existed, check mongodb version, version:%s is incorrect", + installPath, version)) + return fmt.Errorf("%s has been existed, check mongodb version, version:%s is incorrect", + installPath, version) + } + m.runtime.Logger.Info("check mongod version successfully") + return nil +} + +// mkdir 创建相关目录并给目录授权 +func (m *MongoDBInstall) mkdir() error { + // 创建日志文件目录 + logPathDir, _ := filepath.Split(m.LogPath) + m.runtime.Logger.Info("start to create log directory") + if err := util.MkDirsIfNotExistsWithPerm([]string{logPathDir}, DefaultPerm); err != nil { + m.runtime.Logger.Error(fmt.Sprintf("create log directory fail, error:%s", err)) + return fmt.Errorf("create log directory fail, error:%s", err) + } + m.runtime.Logger.Info("create log directory successfully") + + // 创建数据文件目录 + m.runtime.Logger.Info("start to create data directory") + if err := util.MkDirsIfNotExistsWithPerm([]string{m.DbpathDir}, DefaultPerm); err != nil { + m.runtime.Logger.Error(fmt.Sprintf("create data directory fail, error:%s", err)) + return fmt.Errorf("create data directory fail, error:%s", err) + } + m.runtime.Logger.Info("create data directory successfully") + + // 创建备份文件目录 + m.runtime.Logger.Info("start to create backup directory") + if err := util.MkDirsIfNotExistsWithPerm([]string{m.BackupDir}, DefaultPerm); err != nil { + m.runtime.Logger.Error(fmt.Sprintf("create backup directory fail, error:%s", err)) + return fmt.Errorf("create backup directory fail, error:%s", err) + } + m.runtime.Logger.Info("create backup directory successfully") + + // 修改目录属主 + m.runtime.Logger.Info("start to execute chown command for dbPath, logPath and backupPath") + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", m.OsUser, m.OsGroup, filepath.Join(logPathDir, "../")), + "", nil, + 10*time.Second); err != nil { + m.runtime.Logger.Error(fmt.Sprintf("chown log directory fail, error:%s", err)) + return fmt.Errorf("chown log directory fail, error:%s", err) + } + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", m.OsUser, m.OsGroup, filepath.Join(m.DbpathDir, "../../")), + "", nil, + 10*time.Second); err != nil { + m.runtime.Logger.Error(fmt.Sprintf("chown data directory fail, error:%s", err)) + return fmt.Errorf("chown data directory fail, error:%s", err) + } + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", m.OsUser, m.OsGroup, m.BackupDir), + "", nil, + 10*time.Second); err != nil { + m.runtime.Logger.Error(fmt.Sprintf("chown backup directory fail, error:%s", err)) + return fmt.Errorf("chown backup directory fail, error:%s", err) + } + m.runtime.Logger.Info("execute chown command for dbPath, logPath and backupPath successfully") + return nil +} + +// createConfFileAndKeyFileAndDbTypeFile 创建配置文件以及key文件 +func (m *MongoDBInstall) createConfFileAndKeyFileAndDbTypeFile() error { + if err := common.CreateConfFileAndKeyFileAndDbTypeFileAndChown( + m.runtime, m.AuthConfFilePath, m.AuthConfFileContent, m.OsUser, m.OsGroup, m.NoAuthConfFilePath, + m.NoAuthConfFileContent, m.KeyFilePath, m.ConfParams.App, m.ConfParams.AreaId, m.DbTypeFilePath, + m.ConfParams.InstanceType, DefaultPerm); err != nil { + return err + } + return nil +} + +// startup 启动服务 +func (m *MongoDBInstall) startup() error { + // 声明mongod可执行文件路径,把路径写入/etc/profile + if err := common.AddPathToProfile(m.runtime, m.BinDir); err != nil { + return err + } + + // 启动服务 + m.runtime.Logger.Info("start to startup mongod") + if err := common.StartMongoProcess(m.BinDir, m.ConfParams.Port, + m.OsUser, m.ConfParams.Auth); err != nil { + m.runtime.Logger.Error("startup mongod fail, error:%s", err) + return fmt.Errorf("startup mongod fail, error:%s", err) + } + flag, service, err := common.CheckMongoService(m.ConfParams.Port) + if err != nil { + m.runtime.Logger.Error("check %s fail, error:%s", service, err) + return fmt.Errorf("check %s fail, error:%s", service, err) + } + if flag == false { + m.runtime.Logger.Error("startup %s fail", service) + return fmt.Errorf("startup %s fail", service) + } + m.runtime.Logger.Info("startup %s successfully", service) + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_replace.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_replace.go new file mode 100644 index 0000000000..55dd3b5c2c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongod_replace.go @@ -0,0 +1,355 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// MongoDReplaceConfParams 参数 // cluster 关闭stopBalancer +type MongoDReplaceConfParams struct { + IP string `json:"ip" validate:"required"` // 执行节点 + Port int `json:"port" validate:"required"` + SourceIP string `json:"sourceIP" validate:"required"` // 源节点 + SourcePort int `json:"sourcePort" validate:"required"` + SourceDown bool `json:"sourceDown"` // 源端已down机 true:已down false:未down + AdminUsername string `json:"adminUsername" validate:"required"` + AdminPassword string `json:"adminPassword" validate:"required"` + TargetIP string `json:"targetIP" validate:"required"` // 目标节点 + TargetPort int `json:"targetPort" validate:"required"` + TargetPriority string `json:"targetPriority"` // 可选,默认为null,如果为null,则使用source端的Priority,取值:0-正无穷 + TargetHidden string `json:"targetHidden"` // 可选,默认为null,如果为null,则使用source端的Hidden,取值:null,0,1,0:显现 1:隐藏 +} + +// MongoDReplace 添加分片到集群 +type MongoDReplace struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + DataDir string + DbpathDir string + PrimaryIP string + PrimaryPort int + AddTargetScript string + ConfParams *MongoDReplaceConfParams + TargetIPStatus int + TargetPriority int + TargetHidden bool + StatusCh chan int +} + +// NewMongoDReplace 实例化结构体 +func NewMongoDReplace() jobruntime.JobRunner { + return &MongoDReplace{} +} + +// Name 获取原子任务的名字 +func (r *MongoDReplace) Name() string { + return "mongod_replace" +} + +// Run 运行原子任务 +func (r *MongoDReplace) Run() error { + // 主节点进行切换 + if err := r.primaryStepDown(); err != nil { + return err + } + + // 生成添加新节点脚本 + if err := r.makeAddTargetScript(); err != nil { + return err + } + + // 生成添加新节点脚本 + if err := r.execAddTargetScript(); err != nil { + return err + } + + // 查看新节点状态 + go r.checkTargetStatus() + + // 执行删除老节点脚本 + if err := r.checkTargetStatusAndRemoveSource(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (r *MongoDReplace) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (r *MongoDReplace) Rollback() error { + return nil +} + +// Init 初始化 +func (r *MongoDReplace) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + r.runtime = runtime + r.runtime.Logger.Info("start to init") + r.BinDir = consts.UsrLocal + r.Mongo = filepath.Join(r.BinDir, "mongodb", "bin", "mongo") + r.OsUser = consts.GetProcessUser() + r.DataDir = consts.GetMongoDataDir() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(r.runtime.PayloadDecoded), &r.ConfParams); err != nil { + r.runtime.Logger.Error(fmt.Sprintf( + "get parameters of mongodReplace fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of mongodReplace fail by json.Unmarshal, error:%s", err) + } + + r.DbpathDir = filepath.Join(r.DataDir, "mongodata", strconv.Itoa(r.ConfParams.Port), "db") + + // 获取primary信息 + info, err := common.AuthGetPrimaryInfo(r.OsUser, r.Mongo, r.ConfParams.AdminUsername, r.ConfParams.AdminPassword, + r.ConfParams.IP, r.ConfParams.Port) + if err != nil { + r.runtime.Logger.Error(fmt.Sprintf( + "get primary db info of mongodReplace fail, error:%s", err)) + return fmt.Errorf("get primary db info of mongodReplace fail, error:%s", err) + } + // 判断info是否为null + if info == "" { + r.runtime.Logger.Error(fmt.Sprintf( + "get primary db info of mongodReplace fail, error:%s", err)) + return fmt.Errorf("get primary db info of mongodReplace fail, error:%s", err) + } + getInfo := strings.Split(info, ":") + r.PrimaryIP = getInfo[0] + r.PrimaryPort, _ = strconv.Atoi(getInfo[1]) + r.StatusCh = make(chan int, 1) + + // 获取源端的配置信息 + _, _, _, hidden, priority, _, err := common.GetNodeInfo(r.OsUser, r.Mongo, r.PrimaryIP, r.PrimaryPort, + r.ConfParams.AdminUsername, r.ConfParams.AdminPassword, r.ConfParams.SourceIP, r.ConfParams.SourcePort) + if err != nil { + return err + } + r.TargetHidden = hidden + if r.ConfParams.TargetHidden == "0" { + r.TargetHidden = false + } else if r.ConfParams.TargetHidden == "1" { + r.TargetHidden = true + } + + r.TargetPriority = priority + if r.ConfParams.TargetPriority != "" { + r.TargetPriority, _ = strconv.Atoi(r.ConfParams.TargetPriority) + } + + r.runtime.Logger.Info("init successfully") + + // 进行校验 + if err = r.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (r *MongoDReplace) checkParams() error { + // 校验重启配置参数 + validate := validator.New() + r.runtime.Logger.Info("start to validate parameters of mongodReplace") + if err := validate.Struct(r.ConfParams); err != nil { + r.runtime.Logger.Error("validate parameters of mongodReplace fail, error:%s", err) + return fmt.Errorf("validate parameters of mongodReplace fail, error:%s", err) + } + r.runtime.Logger.Info("validate parameters of mongodReplace successfully") + return nil +} + +func (r *MongoDReplace) makeAddTargetScript() error { + // 生成脚本内容 + r.runtime.Logger.Info("start to make addTarget script content") + addMember := common.NewReplicasetMemberAdd() + addMember.Host = strings.Join([]string{r.ConfParams.TargetIP, strconv.Itoa(r.ConfParams.TargetPort)}, ":") + addMember.Priority = r.TargetPriority + addMember.Hidden = r.TargetHidden + addMemberJson, err := addMember.GetJson() + if err != nil { + r.runtime.Logger.Error("get addMemberJson info fail, error:%s", err) + return fmt.Errorf("get addMemberJson info fail, error:%s", err) + } + addMemberJson = strings.Replace(addMemberJson, "\"", "\\\"", -1) + addTargetConfScript := strings.Join([]string{"rs.add(", addMemberJson, ")"}, "") + r.AddTargetScript = addTargetConfScript + r.runtime.Logger.Info("make addTarget script content successfully") + return nil +} + +// execAddTargetScript 执行添加脚本 +func (r *MongoDReplace) execAddTargetScript() error { + // 检查target是否已经存在 + flag, _, _, _, _, _, _ := common.GetNodeInfo(r.OsUser, r.Mongo, r.PrimaryIP, r.PrimaryPort, + r.ConfParams.AdminUsername, r.ConfParams.AdminPassword, r.ConfParams.TargetIP, r.ConfParams.TargetPort) + if flag == true { + r.runtime.Logger.Info("target:%s has been existed", strings.Join( + []string{r.ConfParams.TargetIP, strconv.Itoa(r.ConfParams.TargetPort)}, ":")) + return nil + } + + r.runtime.Logger.Info("start to execute addTarget script") + cmd := fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \"%s\"'", + r.OsUser, r.Mongo, r.ConfParams.AdminUsername, r.ConfParams.AdminPassword, r.PrimaryIP, + r.PrimaryPort, r.AddTargetScript) + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + r.runtime.Logger.Error("execute addTarget script fail, error:%s", err) + return fmt.Errorf("execute addTarget script fail, error:%s", err) + } + r.runtime.Logger.Info("execute addTarget script successfully") + return nil +} + +// checkTargetStatus 检查target状态 +func (r *MongoDReplace) checkTargetStatus() { + // 超时时间 + timeout := time.After(40 * time.Second) + r.runtime.Logger.Info("start to check Target status") + for { + select { + case <-timeout: + r.runtime.Logger.Error("get target status timeout") + default: + _, _, status, _, _, _, err := common.GetNodeInfo(r.OsUser, r.Mongo, r.PrimaryIP, r.PrimaryPort, + r.ConfParams.AdminUsername, + r.ConfParams.AdminPassword, r.ConfParams.TargetIP, r.ConfParams.TargetPort) + if err != nil { + r.runtime.Logger.Error("get target status fail, error:%s", err) + } + if status == 2 { + r.StatusCh <- 2 + r.runtime.Logger.Info("target status is %d", status) + return + } + time.Sleep(5 * time.Second) + } + } +} + +// primaryStepDown 主库切换 +func (r *MongoDReplace) primaryStepDown() error { + if r.ConfParams.SourceIP == r.PrimaryIP && r.ConfParams.SourcePort == r.PrimaryPort { + r.runtime.Logger.Info("start to convert primary secondary db") + flag, err := common.AuthRsStepDown(r.OsUser, r.Mongo, r.PrimaryIP, r.PrimaryPort, r.ConfParams.AdminUsername, + r.ConfParams.AdminPassword) + if err != nil { + r.runtime.Logger.Error(fmt.Sprintf("convert primary secondary db fail, error:%s", err)) + return fmt.Errorf("convert primary secondary db fail, error:%s", err) + } + if flag == true { + info, err := common.AuthGetPrimaryInfo(r.OsUser, r.Mongo, r.ConfParams.AdminUsername, r.ConfParams.AdminPassword, + r.ConfParams.IP, r.ConfParams.Port) + if err != nil { + r.runtime.Logger.Error(fmt.Sprintf("get new primary info fail, error:%s", err)) + return fmt.Errorf("get new primary info fail, error:%s", err) + } + if info != fmt.Sprintf("%s:%d", r.ConfParams.IP, r.ConfParams.Port) { + r.runtime.Logger.Info("convert primary secondary db successfully") + infoSlice := strings.Split(info, ":") + r.PrimaryIP = infoSlice[0] + r.PrimaryPort, _ = strconv.Atoi(infoSlice[1]) + return nil + } + } + } + return nil +} + +// shutdownSourceProcess 关闭源端mongod进程 +func (r *MongoDReplace) shutdownSourceProcess() error { + flag, _, _ := common.CheckMongoService(r.ConfParams.Port) + if flag == false { + r.runtime.Logger.Info("source mongod process has been shut") + return nil + } + r.runtime.Logger.Info("start to shutdown source mongod process") + if err := common.ShutdownMongoProcess(r.OsUser, "mongod", r.BinDir, r.DbpathDir, r.ConfParams.Port); err != nil { + source := fmt.Sprintf("%s:%d", r.ConfParams.IP, r.ConfParams.Port) + r.runtime.Logger.Error(fmt.Sprintf("shutdown source:%s fail, error:%s", source, err)) + return fmt.Errorf("shutdown source:%s fail, error:%s", source, err) + } + r.runtime.Logger.Info("shutdown source mongod process successfully") + return nil +} + +// removeSource 复制集中移除source +func (r *MongoDReplace) removeSource() error { + // 检查source是否存在 + flag, _, _, _, _, _, _ := common.GetNodeInfo(r.OsUser, r.Mongo, r.PrimaryIP, r.PrimaryPort, + r.ConfParams.AdminUsername, r.ConfParams.AdminPassword, r.ConfParams.SourceIP, r.ConfParams.SourcePort) + if flag == false { + r.runtime.Logger.Info("source:%s has been remove", strings.Join( + []string{r.ConfParams.SourceIP, strconv.Itoa(r.ConfParams.SourcePort)}, ":")) + return nil + } + r.runtime.Logger.Info("start to make remove source script content") + removeSourceConfScript := strings.Join([]string{ + "rs.remove(", + fmt.Sprintf("\\\"%s:%d\\\"", r.ConfParams.SourceIP, r.ConfParams.SourcePort), + ")"}, "") + r.runtime.Logger.Info("make remove source script content successfully") + r.runtime.Logger.Info("start to execute remove source script") + cmd := fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \"%s\"'", + r.OsUser, r.Mongo, r.ConfParams.AdminUsername, r.ConfParams.AdminPassword, r.PrimaryIP, + r.PrimaryPort, removeSourceConfScript) + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + r.runtime.Logger.Error(fmt.Sprintf("execute remove source script fail, error:%s", err)) + return fmt.Errorf("execute remove source script fail, error:%s", err) + } + r.runtime.Logger.Info("execute remove source script successfully") + return nil +} + +// checkTargetStatusAndRemoveSource 监控状态并移除 +func (r *MongoDReplace) checkTargetStatusAndRemoveSource() error { + // 超时时间 + timeout := time.After(50 * time.Second) + for { + select { + case <-timeout: + return fmt.Errorf("check target status timeout") + case status := <-r.StatusCh: + if status == 2 && r.ConfParams.SourceDown == false { + if err := r.shutdownSourceProcess(); err != nil { + return err + } + if err := r.removeSource(); err != nil { + return err + } + return nil + } else if status == 2 && r.ConfParams.SourceDown == true { + if err := r.removeSource(); err != nil { + return err + } + return nil + } + default: + + } + } +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongos_install.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongos_install.go new file mode 100644 index 0000000000..13dfa18c00 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/mongos_install.go @@ -0,0 +1,439 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/ioutil" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// MongoSConfParams 配置文件参数 +type MongoSConfParams struct { + common.MediaPkg `json:"mediapkg"` + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + InstanceType string `json:"instanceType" validate:"required"` // mongos mongod + App string `json:"app" validate:"required"` + AreaId string `json:"areaId" validate:"required"` + Auth bool `json:"auth"` // true:以验证方式启动mongos false:以非验证方式启动mongos + ConfigDB []string `json:"configDB" validate:"required"` // ip:port + DbConfig struct { + SlowOpThresholdMs int `json:"slowOpThresholdMs"` + Destination string `json:"destination"` + } `json:"dbConfig" validate:"required"` +} + +// MongoSInstall MongoS安装 +type MongoSInstall struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + DataDir string + OsUser string // MongoDB安装在哪个用户下 + OsGroup string + ConfParams *MongoSConfParams + DbVersion string + AuthConfFilePath string + AuthConfFileContent []byte + NoAuthConfFilePath string + NoAuthConfFileContent []byte + DbTypeFilePath string + LogPath string + PidFilePath string + KeyFilePath string + InstallPackagePath string + LockFilePath string // 锁文件路径 +} + +// NewMongoSInstall 实例化结构体 +func NewMongoSInstall() jobruntime.JobRunner { + return &MongoSInstall{} +} + +// Name 获取原子任务的名字 +func (s *MongoSInstall) Name() string { + return "mongos_install" +} + +// Run 运行原子任务 +func (s *MongoSInstall) Run() error { + // 进行校验 + status, err := s.checkParams() + if err != nil { + return err + } + if status { + return nil + } + + // 解压安装包并修改属主 + if err = s.unTarAndCreateSoftLink(); err != nil { + return err + } + + // 创建目录并修改属主 + if err = s.mkdir(); err != nil { + return err + } + + // 创建配置文件,key文件并修改属主 + if err = s.createConfFileAndKeyFileAndDbTypeFile(); err != nil { + return err + } + + // 启动服务 + if err = s.startup(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (s *MongoSInstall) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (s *MongoSInstall) Rollback() error { + return nil +} + +// Init 初始化 +func (s *MongoSInstall) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + s.runtime = runtime + s.runtime.Logger.Info("start to init") + s.BinDir = consts.UsrLocal + s.DataDir = consts.GetMongoDataDir() + s.OsUser = consts.GetProcessUser() + s.OsGroup = consts.GetProcessUserGroup() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(s.runtime.PayloadDecoded), &s.ConfParams); err != nil { + s.runtime.Logger.Error(fmt.Sprintf( + "get parameters of mongodb config file fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of mongodb config file fail by json.Unmarshal, error:%s", err) + } + + // 获取信息 + s.InstallPackagePath = s.ConfParams.MediaPkg.GetAbsolutePath() + s.DbVersion = strings.Split(s.ConfParams.MediaPkg.GePkgBaseName(), "-")[3] + + // 设置各种路径 + strPort := strconv.Itoa(s.ConfParams.Port) + s.AuthConfFilePath = filepath.Join(s.DataDir, "mongodata", strPort, "mongo.conf") + s.NoAuthConfFilePath = filepath.Join(s.DataDir, "mongodata", strPort, "noauth.conf") + s.LogPath = filepath.Join(s.DataDir, "mongolog", strPort, "mongo.log") + PidFileName := fmt.Sprintf("pid.%s", strPort) + s.PidFilePath = filepath.Join(s.DataDir, "mongodata", strPort, PidFileName) + s.KeyFilePath = filepath.Join(s.DataDir, "mongodata", strPort, "key_of_mongo") + s.DbTypeFilePath = filepath.Join(s.DataDir, "mongodata", strPort, "dbtype") + s.LockFilePath = filepath.Join(s.DataDir, "mongoinstall.lock") + + // 生成配置文件内容 + s.runtime.Logger.Info("make mongos config file content") + if err := s.makeConfContent(); err != nil { + return err + } + + return nil +} + +// makeConfContent 生成配置文件内容 +func (s *MongoSInstall) makeConfContent() error { + // 只支持mongos 3.0及以上得到配置文件内容 + // 判断mongos版本 + s.runtime.Logger.Info("start to make config file content") + mainVersion, err := strconv.Atoi(strings.Split(s.DbVersion, ".")[0]) + if err != nil { + s.runtime.Logger.Error( + "get %s version fail, error:%s", s.ConfParams.InstanceType, err) + return fmt.Errorf("get %s version fail, error:%s", s.ConfParams.InstanceType, err) + } + clusterId := strings.Join([]string{s.ConfParams.App, s.ConfParams.AreaId, "conf"}, "-") + IpConfigDB := strings.Join(s.ConfParams.ConfigDB, ",") + configDB := strings.Join([]string{clusterId, IpConfigDB}, "/") + + // 生成mongos配置文件 + conf := common.NewYamlMongoSConf() + conf.Sharding.ConfigDB = configDB + conf.SystemLog.LogAppend = true + conf.SystemLog.Path = s.LogPath + conf.SystemLog.Destination = s.ConfParams.DbConfig.Destination + conf.ProcessManagement.Fork = true + conf.ProcessManagement.PidFilePath = s.PidFilePath + conf.Net.Port = s.ConfParams.Port + conf.Net.BindIp = strings.Join([]string{"127.0.0.1", s.ConfParams.IP}, ",") + conf.Net.WireObjectCheck = false + // mongos版本小于4获取配置文件内容 + if mainVersion < 4 { + s.NoAuthConfFileContent, err = conf.GetConfContent() + if err != nil { + s.runtime.Logger.Error( + "version:%s make mongos no auth config file content fail, error:%s", s.DbVersion, err) + return fmt.Errorf("version:%s make mongos no auth config file content fail, error:%s", + s.DbVersion, err) + } + conf.Security.KeyFile = s.KeyFilePath + // 获取验证配置文件内容 + s.AuthConfFileContent, err = conf.GetConfContent() + if err != nil { + s.runtime.Logger.Error(fmt.Sprintf( + "version:%s make mongos auth config file content fail, error:%s", + s.DbVersion, err)) + return fmt.Errorf("version:%s make mongos auth config file content fail, error:%s", + s.DbVersion, err) + } + s.runtime.Logger.Info("make config file content successfully") + return nil + } + + // mongos版本4及以上获取配置文件内容 + conf.OperationProfiling.SlowOpThresholdMs = s.ConfParams.DbConfig.SlowOpThresholdMs + conf.OperationProfiling.SlowOpThresholdMs = s.ConfParams.DbConfig.SlowOpThresholdMs + // 获取非验证配置文件内容 + s.NoAuthConfFileContent, err = conf.GetConfContent() + if err != nil { + s.runtime.Logger.Error( + "version:%s make mongos no auth config file content fail, error:%s", s.DbVersion, err) + return fmt.Errorf("version:%s make mongos no auth config file content fail, error:%s", + s.DbVersion, err) + } + conf.Security.KeyFile = s.KeyFilePath + // 获取验证配置文件内容 + s.AuthConfFileContent, err = conf.GetConfContent() + if err != nil { + s.runtime.Logger.Error(fmt.Sprintf( + "version:%s make mongos auth config file content fail, error:%s", + s.DbVersion, err)) + return fmt.Errorf("version:%s make mongos auth config file content fail, error:%s", + s.DbVersion, err) + } + s.runtime.Logger.Info("make config file content successfully") + return nil +} + +// checkParams 校验参数 检查输入的参数 检查端口是否合规 检查安装包 检查端口是否被使用(如果使用,则检查是否是mongodb服务) +func (s *MongoSInstall) checkParams() (bool, error) { + // 校验Mongo配置文件 + s.runtime.Logger.Info("start to validate parameters") + validate := validator.New() + s.runtime.Logger.Info("start to validate parameters of mongos config file") + if err := validate.Struct(s.ConfParams); err != nil { + s.runtime.Logger.Error(fmt.Sprintf("validate parameters of mongos config file fail, error:%s", err)) + return false, fmt.Errorf("validate parameters of mongos config file fail, error:%s", err) + } + s.runtime.Logger.Info("= validate parameters of mongos config file successfully") + + // 校验port是否合规 + s.runtime.Logger.Info("start to validate port if it is correct") + if s.ConfParams.Port < MongoDBPortMin || s.ConfParams.Port > MongoDBPortMax { + s.runtime.Logger.Error(fmt.Sprintf( + "validate port if it is correct, port is not within defalut range [%d,%d]", + MongoDBPortMin, MongoDBPortMax)) + return false, fmt.Errorf("validate port if it is correct, port is not within defalut range [%d,%d]", + MongoDBPortMin, MongoDBPortMax) + } + s.runtime.Logger.Info("validate port if it is correct successfully") + + // 校验安装包是否存在,md5值是否一致 + s.runtime.Logger.Info("start to validate install package") + if flag := util.FileExists(s.InstallPackagePath); !flag { + s.runtime.Logger.Error(fmt.Sprintf("validate install package, %s is not existed", + s.InstallPackagePath)) + return false, fmt.Errorf("validate install file, %s is not existed", + s.InstallPackagePath) + } + md5, _ := util.GetFileMd5(s.InstallPackagePath) + if s.ConfParams.MediaPkg.PkgMd5 != md5 { + s.runtime.Logger.Error(fmt.Sprintf("validate install package md5 fail, md5 is incorrect")) + return false, fmt.Errorf("validate install package md5 fail, md5 is incorrect") + } + s.runtime.Logger.Info("validate install package md5 successfully") + + // 校验端口是否使用 + s.runtime.Logger.Info("start to validate port if it has been used") + flag, _ := util.CheckPortIsInUse(s.ConfParams.IP, strconv.Itoa(s.ConfParams.Port)) + if flag { + // 校验端口是否是mongod进程 + cmd := fmt.Sprintf("netstat -ntpl |grep %d | awk '{print $7}' |head -1", s.ConfParams.Port) + result, _ := util.RunBashCmd(cmd, "", nil, 10*time.Second) + if strings.Contains(result, "mongos") { + // 检查配置文件是否一致,读取已有配置文件与新生成的配置文件内容对比 + content, _ := ioutil.ReadFile(s.AuthConfFilePath) + if strings.Compare(string(content), string(s.AuthConfFileContent)) == 0 { + // 检查mongodb版本 + version, err := common.CheckMongoVersion(s.BinDir, "mongos") + if err != nil { + s.runtime.Logger.Error( + fmt.Sprintf("mongos has been installed, port:%d, check mongos version fail. error:%s", + s.ConfParams.Port, version)) + return false, fmt.Errorf("mongos has been installed, port:%d, check mongos version fail. error:%s", + s.ConfParams.Port, version) + } + if version == s.DbVersion { + s.runtime.Logger.Info(fmt.Sprintf("mongos has been installed, port:%d, version:%s", + s.ConfParams.Port, version)) + return true, nil + } + s.runtime.Logger.Error(fmt.Sprintf("other mongos has been installed, port:%d, version:%s", + s.ConfParams.Port, version)) + return false, fmt.Errorf("other mongos has been installed, port:%d, version:%s", + s.ConfParams.Port, version) + } + + } + s.runtime.Logger.Error( + fmt.Sprintf("validate port if it has been used, port:%d is used by other process", + s.ConfParams.Port)) + return false, fmt.Errorf("validate port if it has been used, port:%d is used by other process", + s.ConfParams.Port) + } + s.runtime.Logger.Info("validate port if it has been used successfully") + s.runtime.Logger.Info("validate parameters successfully") + return false, nil +} + +// unTarAndCreateSoftLink 解压安装包,创建软链接并给目录授权 +func (s *MongoSInstall) unTarAndCreateSoftLink() error { + // 判断解压目录是否存在 + unTarPath := filepath.Join(s.BinDir, s.ConfParams.MediaPkg.GePkgBaseName()) + + // soft link目录 + installPath := filepath.Join(s.BinDir, "mongodb") + + // 解压安装包并授权 + // 安装多实例并发执行添加文件锁 + s.runtime.Logger.Info("start to get install file lock") + fileLock := common.NewFileLock(s.LockFilePath) + // 获取锁 + err := fileLock.Lock() + if err != nil { + for { + err = fileLock.Lock() + if err != nil { + time.Sleep(1 * time.Second) + continue + } + s.runtime.Logger.Info("get install file lock successfully") + break + } + } else { + s.runtime.Logger.Info("get install file lock successfully") + } + if err = common.UnTarAndCreateSoftLinkAndChown(s.runtime, s.BinDir, + s.InstallPackagePath, unTarPath, installPath, s.OsUser, s.OsGroup); err != nil { + return err + } + // 释放锁 + s.runtime.Logger.Info("release install file lock successfully") + _ = fileLock.UnLock() + + // 检查mongos版本 + s.runtime.Logger.Info("start to check mongos version") + version, err := common.CheckMongoVersion(s.BinDir, "mongos") + if err != nil { + s.runtime.Logger.Error(fmt.Sprintf("%s has been existed, check mongodb version, error:%s", + installPath, err)) + return fmt.Errorf("%s has been existed, check mongodb version, error:%s", + installPath, err) + } + if version != s.DbVersion { + s.runtime.Logger.Error( + fmt.Sprintf("%s has been existed, check mongodb version, version:%s is incorrect", + installPath, version)) + return fmt.Errorf("%s has been existed, check mongodb version, version:%s is incorrect", + installPath, version) + } + s.runtime.Logger.Info("check mongos version successfully") + return nil +} + +// mkdir 创建相关目录并给目录授权 +func (s *MongoSInstall) mkdir() error { + // 创建日志文件目录 + logPathDir, _ := filepath.Split(s.LogPath) + s.runtime.Logger.Info("start to create log directory") + if err := util.MkDirsIfNotExistsWithPerm([]string{logPathDir}, DefaultPerm); err != nil { + s.runtime.Logger.Error(fmt.Sprintf("create log directory fail, error:%s", err)) + return fmt.Errorf("create log directory fail, error:%s", err) + } + s.runtime.Logger.Info("create log directory successfully") + + // 创建配置文件目录 + confFilePathDir, _ := filepath.Split(s.AuthConfFilePath) + s.runtime.Logger.Info("start to create data directory") + if err := util.MkDirsIfNotExistsWithPerm([]string{confFilePathDir}, DefaultPerm); err != nil { + s.runtime.Logger.Error(fmt.Sprintf("create data directory fail, error:%s", err)) + return fmt.Errorf("create data directory fail, error:%s", err) + } + s.runtime.Logger.Info("create data directory successfully") + + // 修改目录属主 + s.runtime.Logger.Info("start to execute chown command for dbPath, logPath and backupPath") + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", s.OsUser, s.OsGroup, filepath.Join(logPathDir, "../")), + "", nil, + 10*time.Second); err != nil { + s.runtime.Logger.Error(fmt.Sprintf("chown log directory fail, error:%s", err)) + return fmt.Errorf("chown log directory fail, error:%s", err) + } + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", s.OsUser, s.OsGroup, filepath.Join(confFilePathDir, "../")), + "", nil, + 10*time.Second); err != nil { + s.runtime.Logger.Error(fmt.Sprintf("chown data directory fail, error:%s", err)) + return fmt.Errorf("chown data directory fail, error:%s", err) + } + s.runtime.Logger.Info("execute chown command for dbPath, logPath and backupPath successfully") + return nil +} + +// createConfFileAndKeyFileAndDbTypeFile 创建配置文件以及key文件 +func (s *MongoSInstall) createConfFileAndKeyFileAndDbTypeFile() error { + // 创建配置文件,key文件,dbType文件并授权 + if err := common.CreateConfFileAndKeyFileAndDbTypeFileAndChown( + s.runtime, s.AuthConfFilePath, s.AuthConfFileContent, s.OsUser, s.OsGroup, s.NoAuthConfFilePath, + s.NoAuthConfFileContent, s.KeyFilePath, s.ConfParams.App, s.ConfParams.AreaId, s.DbTypeFilePath, + s.ConfParams.InstanceType, DefaultPerm); err != nil { + return err + } + return nil +} + +// startup 启动服务 +func (s *MongoSInstall) startup() error { + // 申明mongos可执行文件路径,把路径写入/etc/profile + if err := common.AddPathToProfile(s.runtime, s.BinDir); err != nil { + return err + } + + // 启动服务 + s.runtime.Logger.Info("start to startup mongos") + if err := common.StartMongoProcess(s.BinDir, s.ConfParams.Port, + s.OsUser, s.ConfParams.Auth); err != nil { + s.runtime.Logger.Error(fmt.Sprintf("startup mongos fail, error:%s", err)) + return fmt.Errorf("shutdown mongos fail, error:%s", err) + } + flag, service, err := common.CheckMongoService(s.ConfParams.Port) + if err != nil { + s.runtime.Logger.Error("check %s fail, error:%s", service, err) + return fmt.Errorf("check %s fail, error:%s", service, err) + } + if flag == false { + s.runtime.Logger.Error("startup %s fail", service) + return fmt.Errorf("startup %s fail", service) + } + s.runtime.Logger.Info("startup %s successfully", service) + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_install_test.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_install_test.go new file mode 100644 index 0000000000..c97c9fab5c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_install_test.go @@ -0,0 +1,338 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "net" + "path" + "strings" + "testing" + "time" +) + +// TestReplicaset 复制集安装及相关操作测试 +func TestReplicaset(t *testing.T) { + // 设置环境变量 + err := consts.SetMongoDataDir("") + if err != nil { + fmt.Println(fmt.Sprintf("replicate install SetMongoData fail, error:%s", err)) + t.Errorf("replicate install SetMongoData fail, error:%s", err) + return + } + err = consts.SetMongoBackupDir("") + if err != nil { + fmt.Println(fmt.Sprintf("replicate install SetMongoBackup fail, error:%s", err)) + t.Errorf("replicate install SetMongoBackup fail, error:%s", err) + return + } + + err = consts.SetProcessUser("") + if err != nil { + fmt.Println(fmt.Sprintf("replicate install SetProcessUser fail, error:%s", err)) + t.Errorf("replicate install SetProcessUser fail, error:%s", err) + return + } + err = consts.SetProcessUserGroup("") + if err != nil { + fmt.Println(fmt.Sprintf("replicate install SetProcessUserGroup fail, error:%s", err)) + t.Errorf("replicate install SetProcessUserGroup fail, error:%s", err) + return + } + // 初始化节点 + osSysInitParam := "{\n\"user\":\"mysql\",\n\"password\":\"Qwe123d\"\n}" + osSysInit := &atomsys.OsMongoInit{} + osSysInitRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: osSysInitParam, + } + osSysInitRuntime.SetLogger() + if err := osSysInit.Init(osSysInitRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install osSysInit init fail, error:%s", err)) + t.Errorf("replicate install osSysInit init fail, error:%s", err) + return + } + if err := osSysInit.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install osSysInit run fail, error:%s", err)) + t.Errorf("replicate install osSysInit run fail, error:%s", err) + return + } + + // 获取本机IP地址 + var ip string + addrs, _ := net.InterfaceAddrs() + for _, addr := range addrs { + if !strings.Contains(addr.String(), "127.0.0.1") { + ip = strings.Split(addr.String(), "/")[0] + break + } + } + + // node1 + node1 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"auth\": true,\n \"clusterRole\":\"\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node1 = strings.Replace(node1, "{{ip}}", ip, -1) + + // node2 + node2 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27002,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"auth\": true,\n \"clusterRole\":\"\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node2 = strings.Replace(node2, "{{ip}}", ip, -1) + + // node3 + node3 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27003,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"auth\": true,\n \"clusterRole\":\"\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node3 = strings.Replace(node3, "{{ip}}", ip, -1) + + // node4 + node4 := "{\n \"mediapkg\":{\n \"pkg\":\"mongodb-linux-x86_64-3.4.20.tar.gz\",\n \"pkg_md5\":\"e68d998d75df81b219e99795dec43ffb\"\n },\n \"ip\":\"{{ip}}\",\n \"port\":27004,\n \"dbVersion\":\"3.4.20\",\n \"instanceType\":\"mongod\",\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"auth\": true,\n \"clusterRole\":\"\",\n \"dbConfig\":{\n \"slowOpThresholdMs\":200,\n \"cacheSizeGB\":1,\n \"oplogSizeMB\":500,\n \"destination\":\"file\"\n }\n}" + node4 = strings.Replace(node4, "{{ip}}", ip, -1) + + node1MongodInstall := NewMongoDBInstall() + node2MongodInstall := NewMongoDBInstall() + node3MongodInstall := NewMongoDBInstall() + node4MongodInstall := NewMongoDBInstall() + node1Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node1, + } + node1Runtime.SetLogger() + node2Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node2, + } + node2Runtime.SetLogger() + node3Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node3, + } + node3Runtime.SetLogger() + node4Runtime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: node4, + } + node4Runtime.SetLogger() + + // 安装节点 + if err := node1MongodInstall.Init(node1Runtime); err != nil { + fmt.Println(fmt.Sprintf("replicate install node1 init fail, error:%s", err)) + t.Errorf("replicate install node1 init fail, error:%s", err) + return + } + if err := node1MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install node1 run fail, error:%s", err)) + t.Errorf("replicate install node1 run fail, error:%s", err) + return + } + if err := node2MongodInstall.Init(node2Runtime); err != nil { + fmt.Println(fmt.Sprintf("replicate install node2 init fail, error:%s", err)) + t.Errorf("replicate install node2 init fail, error:%s", err) + return + } + if err := node2MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install node2 run fail, error:%s", err)) + t.Errorf("replicate install node2 run fail, error:%s", err) + return + } + if err := node3MongodInstall.Init(node3Runtime); err != nil { + fmt.Println(fmt.Sprintf("replicate install node3 init fail, error:%s", err)) + t.Errorf("replicate install node3 init fail, error:%s", err) + return + } + if err := node3MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install node3 run fail, error:%s", err)) + t.Errorf("replicate install node3 run fail, error:%s", err) + return + } + + if err := node4MongodInstall.Init(node4Runtime); err != nil { + fmt.Println(fmt.Sprintf("replicate install node3 init fail, error:%s", err)) + t.Errorf("replicate install node3 init fail, error:%s", err) + return + } + if err := node4MongodInstall.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install node3 run fail, error:%s", err)) + t.Errorf("replicate install node3 run fail, error:%s", err) + return + } + + // 复制集初始化 + initReplicasetParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"setId\":\"s1\",\n \"configSvr\":false,\n \"ips\":[\n \"{{ip}}:27001\",\n \"{{ip}}:27002\",\n \"{{ip}}:27003\"\n ],\n \"priority\":{\n \"{{ip}}:27001\":1,\n \"{{ip}}:27002\":1,\n \"{{ip}}:27003\":0\n },\n \"hidden\":{\n \"{{ip}}:27001\":false,\n \"{{ip}}:27002\":false,\n \"{{ip}}:27003\":true\n }\n}" + initReplicasetParam = strings.Replace(initReplicasetParam, "{{ip}}", ip, -1) + initReplicasetRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: initReplicasetParam, + } + initReplicasetRuntime.SetLogger() + initReplicaset := NewInitiateReplicaset() + if err := initReplicaset.Init(initReplicasetRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install initReplicaset init fail, error:%s", err)) + t.Errorf("replicate install initReplicaset init fail, error:%s", err) + return + } + if err := initReplicaset.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install initReplicaset run fail, error:%s", err)) + t.Errorf("replicate install initReplicaset run fail, error:%s", err) + return + } + time.Sleep(time.Second * 3) + // 创建管理员用户 + addAdminUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"instanceType\":\"mongod\",\n \"username\":\"dba\",\n \"password\":\"dba\",\n \"adminUsername\":\"\",\n \"adminPassword\":\"\",\n \"authDb\":\"admin\",\n \"dbs\":[\n\n ],\n \"privileges\":[\n \"root\"\n ]\n}" + addAdminUserParam = strings.Replace(addAdminUserParam, "{{ip}}", ip, -1) + addAdminUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: addAdminUserParam, + } + addAdminUserRuntime.SetLogger() + addAdminUser := NewAddUser() + if err := addAdminUser.Init(addAdminUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install addAdminUser init fail, error:%s", err)) + t.Errorf("replicate install addAdminUser init fail, error:%s", err) + return + } + if err := addAdminUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install addAdminUser run fail, error:%s", err)) + t.Errorf("replicate install addAdminUser run fail, error:%s", err) + return + } + + // 创建业务用户 + addUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"instanceType\":\"mongod\",\n \"username\":\"test\",\n \"password\":\"test\",\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"authDb\":\"admin\",\n \"dbs\":[\n\n ],\n \"privileges\":[\n \"readWriteAnyDatabase\"\n ]\n}" + addUserParam = strings.Replace(addUserParam, "{{ip}}", ip, -1) + addUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: addUserParam, + } + addUserRuntime.SetLogger() + addUser := NewAddUser() + if err := addUser.Init(addUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install addUser init fail, error:%s", err)) + t.Errorf("replicate install addUser init fail, error:%s", err) + return + } + if err := addUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install addUser run fail, error:%s", err)) + t.Errorf("replicate install addUser run fail, error:%s", err) + return + } + + // 删除业务用户 + delUserParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"instanceType\":\"mongod\",\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"username\":\"test\",\n \"authDb\":\"admin\"\n}" + delUserParam = strings.Replace(delUserParam, "{{ip}}", ip, -1) + delUserRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: delUserParam, + } + delUserRuntime.SetLogger() + delUser := NewDelUser() + if err := delUser.Init(delUserRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install delUser init fail, error:%s", err)) + t.Errorf("replicate install delUser init fail, error:%s", err) + return + } + if err := delUser.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install delUser run fail, error:%s", err)) + t.Errorf("replicate install delUser run fail, error:%s", err) + return + } + + // 执行脚本 + execScriptParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"script\":\"var mongo = db;\\nmongo.getSisterDB('admin').runCommand({listDatabases:1}).databases.forEach (function (x) { print(x.name)});\\n\",\n \"type\":\"replicaset\",\n \"secondary\": false,\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"repoUrl\":\"\",\n \"repoUsername\":\"\",\n \"repoToken\":\"\",\n \"repoProject\":\"\",\n \"repoRepo\":\"\",\n \"repoPath\":\"\"\n}" + execScriptParam = strings.Replace(execScriptParam, "{{ip}}", ip, -1) + execScriptRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: execScriptParam, + } + execScriptRuntime.SetLogger() + execScript := NewExecScript() + if err := execScript.Init(execScriptRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install execScript init fail, error:%s", err)) + t.Errorf("replicate install execScript init fail, error:%s", err) + return + } + if err := execScript.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install execScript run fail, error:%s", err)) + t.Errorf("replicate install execScript run fail, error:%s", err) + return + } + + // 重启节点 + restartParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27002,\n \"instanceType\":\"mongod\",\n \"singleNodeInstallRestart\":false, \n \"auth\":true,\n \"cacheSizeGB\": 2,\n \"mongoSConfDbOld\":\"\",\n \"MongoSConfDbNew\":\"\",\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\"\n}" + restartParam = strings.Replace(restartParam, "{{ip}}", ip, -1) + restartRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: restartParam, + } + restartRuntime.SetLogger() + restart := NewMongoRestart() + if err := restart.Init(restartRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install restart init fail, error:%s", err)) + t.Errorf("replicate install restart init fail, error:%s", err) + return + } + if err := restart.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install restart run fail, error:%s", err)) + t.Errorf("replicate install restart run fail, error:%s", err) + return + } + + time.Sleep(time.Second * 3) + // 替换节点 + replaceParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27001,\n \"sourceIP\":\"{{ip}}\",\n \"sourcePort\":27001,\n \"sourceDown\":false,\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\",\n \"targetIP\":\"{{ip}}\",\n \"targetPort\":27004,\n \"targetPriority\":\"\",\n \"targetHidden\":\"\"\n}" + replaceParam = strings.Replace(replaceParam, "{{ip}}", ip, -1) + replaceRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: replaceParam, + } + replaceRuntime.SetLogger() + replace := NewMongoDReplace() + if err := replace.Init(replaceRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install replace init fail, error:%s", err)) + t.Errorf("replicate install replace init fail, error:%s", err) + return + } + if err := replace.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install replace run fail, error:%s", err)) + t.Errorf("replicate install replace run fail, error:%s", err) + return + } + + time.Sleep(time.Second * 3) + // 主从切换 + stepDownParam := "{\n \"ip\":\"{{ip}}\",\n \"port\":27002,\n \"adminUsername\":\"dba\",\n \"adminPassword\":\"dba\"\n}" + stepDownParam = strings.Replace(stepDownParam, "{{ip}}", ip, -1) + stepDownRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: stepDownParam, + } + stepDownRuntime.SetLogger() + stepDown := NewStepDown() + if err := stepDown.Init(stepDownRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install stepDown init fail, error:%s", err)) + t.Errorf("replicate install stepDown init fail, error:%s", err) + return + } + if err := stepDown.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install stepDown run fail, error:%s", err)) + t.Errorf("replicate install stepDown run fail, error:%s", err) + return + } + + time.Sleep(time.Second * 3) + // 下架 + for _, i := range []int{27003, 27002, 27004} { + deinstallParam := fmt.Sprintf("{\n \"ip\":\"{{ip}}\",\n \"port\":%d,\n \"app\":\"test\",\n \"areaId\":\"test1\",\n \"nodeInfo\":[\n \"{{ip}}\"\n ],\n \"instanceType\":\"mongod\"\n}", i) + deinstallParam = strings.Replace(deinstallParam, "{{ip}}", ip, -1) + deinstallRuntime := &jobruntime.JobGenericRuntime{ + PayloadDecoded: deinstallParam, + } + deinstallRuntime.SetLogger() + deinstal := NewDeInstall() + if err := deinstal.Init(deinstallRuntime); err != nil { + fmt.Println(fmt.Sprintf("replicate install deinstal port:%d init fail, error:%s", i, err)) + t.Errorf("replicate install deinstal port:%d init fail, error:%s", i, err) + return + } + if err := deinstal.Run(); err != nil { + fmt.Println(fmt.Sprintf("replicate install deinstal port:%d run fail, error:%s", i, err)) + t.Errorf("replicate install deinstal port:%d run fail, error:%s", i, err) + return + } + } + + // 删除相关目录 + dbData := path.Join(consts.GetMongoDataDir(), "mongodata") + dbLog := path.Join(consts.GetMongoBackupDir(), "mongolog") + softInstall := path.Join(consts.UsrLocal, "mongodb") + cmd := fmt.Sprintf("rm -rf %s;rm -rf %s;rm -rf %s", dbData, dbLog, softInstall) + if _, err = util.RunBashCmd(cmd, "", nil, 10*time.Second); err != nil { + fmt.Println(fmt.Sprintf("delete directories fail, error:%s", err)) + t.Errorf("delete directories fail, error:%s", err) + } + +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_stepdown.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_stepdown.go new file mode 100644 index 0000000000..465f085d21 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb/replicaset_stepdown.go @@ -0,0 +1,129 @@ +package atommongodb + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + + "github.com/go-playground/validator/v10" +) + +// StepDownConfParams 参数 +type StepDownConfParams struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + AdminUsername string `json:"adminUsername" validate:"required"` + AdminPassword string `json:"adminPassword" validate:"required"` +} + +// StepDown 添加分片到集群 +type StepDown struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + Mongo string + OsUser string + PrimaryIP string + PrimaryPort int + ConfParams *StepDownConfParams +} + +// NewStepDown 实例化结构体 +func NewStepDown() jobruntime.JobRunner { + return &StepDown{} +} + +// Name 获取原子任务的名字 +func (s *StepDown) Name() string { + return "replicaset_stepdown" +} + +// Run 运行原子任务 +func (s *StepDown) Run() error { + // 执行主备切换 + if err := s.execStepDown(); err != nil { + return err + } + + return nil +} + +// Retry 重试 +func (s *StepDown) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (s *StepDown) Rollback() error { + return nil +} + +// Init 初始化 +func (s *StepDown) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + s.runtime = runtime + s.runtime.Logger.Info("start to init") + s.BinDir = consts.UsrLocal + s.Mongo = filepath.Join(s.BinDir, "mongodb", "bin", "mongo") + s.OsUser = consts.GetProcessUser() + + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(s.runtime.PayloadDecoded), &s.ConfParams); err != nil { + s.runtime.Logger.Error(fmt.Sprintf( + "get parameters of stepDown fail by json.Unmarshal, error:%s", err)) + return fmt.Errorf("get parameters of stepDown fail by json.Unmarshal, error:%s", err) + } + + // 获取primary信息 + info, err := common.AuthGetPrimaryInfo(s.OsUser, s.Mongo, s.ConfParams.AdminUsername, s.ConfParams.AdminPassword, + s.ConfParams.IP, s.ConfParams.Port) + if err != nil { + s.runtime.Logger.Error(fmt.Sprintf( + "get primary db info of stepDown fail, error:%s", err)) + return fmt.Errorf("get primary db info of stepDown fail, error:%s", err) + } + getInfo := strings.Split(info, ":") + s.PrimaryIP = getInfo[0] + s.PrimaryPort, _ = strconv.Atoi(getInfo[1]) + + // 进行校验 + s.runtime.Logger.Info("start to validate parameters") + if err = s.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (s *StepDown) checkParams() error { + // 校验配置参数 + validate := validator.New() + s.runtime.Logger.Info("start to validate parameters of deleteUser") + if err := validate.Struct(s.ConfParams); err != nil { + s.runtime.Logger.Error(fmt.Sprintf("validate parameters of deleteUser fail, error:%s", err)) + return fmt.Errorf("validate parameters of deleteUser fail, error:%s", err) + } + return nil +} + +// execStepDown 执行切换 +func (s *StepDown) execStepDown() error { + s.runtime.Logger.Info("start to convert primary secondary db") + flag, err := common.AuthRsStepDown(s.OsUser, s.Mongo, s.PrimaryIP, s.PrimaryPort, s.ConfParams.AdminUsername, + s.ConfParams.AdminPassword) + if err != nil { + s.runtime.Logger.Error("convert primary secondary db fail, error:%s", err) + return fmt.Errorf("convert primary secondary db fail, error:%s", err) + } + if flag == true { + s.runtime.Logger.Info("convert primary secondary db successfully") + return nil + } + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/atomproxy.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/atomproxy.go new file mode 100644 index 0000000000..e6978a31fb --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/atomproxy.go @@ -0,0 +1,2 @@ +// Package atomproxy proxy原子任务 +package atomproxy diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_install.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_install.go new file mode 100644 index 0000000000..07e6ef5ae0 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_install.go @@ -0,0 +1,394 @@ +package atomproxy + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" + "github.com/pkg/errors" +) + +/* +1.安装前预检查 + 获取主机名,内存,cpu信息 + 检查IP地址 + + 获取信息 app ip 端口port predixy版本 predixy密码 后端redis的密码 后端server数组ip:port + +2.创建相关目录 +/usr/local/predixy/bin/predixy /data/predixy/50004//predixy.conf + /data/predixy/50004/logs + +2.获取安装包 +predixy-1.0.5.tar.gz 放到/usr/local + +3.解压安装包 +安装目录为/usr/local +做软连接 ln -s /usr/local/predixy-1.0.5 predixy + +4.获取predixy的配置文件并把后端信息写入配置文件 + + +5.启动服务 + /usr/local/predixy/bin/start_predixy.sh $port +*/ + +// PredixyPortMin predixy最小端口 +const PredixyPortMin = 50000 + +// PredixyPortMax predixy最大端口 +const PredixyPortMax = 59999 + +// DefaultPerm 创建目录、文件的默认权限 +const DefaultPerm = 0755 + +// PredixyConfParams predixy配置文件参数 +type PredixyConfParams struct { + common.MediaPkg `json:"mediapkg"` + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + PredixyPasswd string `json:"predixypasswd" validate:"required"` + RedisPasswd string `json:"redispasswd" validate:"required"` + Servers []string `json:"servers" validate:"required"` + DbConfig struct { + WorkerThreads string `json:"workerthreads" validate:"required"` + ClientTimeout string `json:"clienttimeout"` + RefreshInterval string `json:"refreshinterval" validate:"required"` + ServerFailureLimit string `json:"serverfailurelimit" validate:"required"` + ServerRetryTimeout string `json:"serverretrytimeout" validate:"required"` + KeepAlive string `json:"keepalive"` + ServerTimeout string `json:"servertimeout"` + } `json:"dbconfig" validate:"required"` +} + +// PredixyInstall predixy安装 +type PredixyInstall struct { + runtime *jobruntime.JobGenericRuntime + BinDir string + DataDir string + OsUser string // predixy安装在哪个用户下 + OsGroup string + ConfParams *PredixyConfParams + ConfDir string + ConfFilePath string + ConfFileContent string + LogDir string + InstallFilePath string +} + +// NewPredixyInstall 实例化结构体 +func NewPredixyInstall() jobruntime.JobRunner { + return &PredixyInstall{} +} + +// Init 初始化 +func (p *PredixyInstall) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + p.BinDir = consts.UsrLocal + p.DataDir = consts.GetRedisDataDir() + p.OsUser = consts.MysqlAaccount + p.OsGroup = consts.MysqlGroup + p.runtime = runtime + p.runtime.Logger.Info("start to init") + + // 获取predixy配置文件参数 + if err := json.Unmarshal([]byte(p.runtime.PayloadDecoded), &p.ConfParams); err != nil { + p.runtime.Logger.Error(fmt.Sprintf( + "%s:get parameters of predixy configer file fail by json.Unmarshal, error:%v", p.Name(), err)) + return fmt.Errorf( + "%s:get parameters of predixy configer file fail by json.Unmarshal, error:%v", p.Name(), err) + } + // 获取各种文件路径 + p.ConfDir = fmt.Sprintf("%s/predixy/%d", p.DataDir, p.ConfParams.Port) + p.ConfFilePath = fmt.Sprintf("%s/predixy.conf", p.ConfDir) + p.LogDir = fmt.Sprintf("%s/logs", p.ConfDir) + p.InstallFilePath = p.ConfParams.MediaPkg.GetAbsolutePath() + p.getConfFileContent() + p.runtime.Logger.Info("init successfully") + // 校验参数 + if err := p.checkParams(); err != nil { + return err + } + + return nil +} + +// getConfFileContent 获取配置文件内容 +func (p *PredixyInstall) getConfFileContent() { + p.runtime.Logger.Info("start to make config file content") + // 配置文件 + conf := common.PredixConf + // 修改配置文件 + bind := fmt.Sprintf("%s:%s", p.ConfParams.IP, strconv.Itoa(p.ConfParams.Port)) + log := fmt.Sprintf("%s/log", p.LogDir) + var servers string + for _, v := range p.ConfParams.Servers { + servers += fmt.Sprintf(" + %s\n", v) + } + conf = strings.Replace(conf, "{{ip:port}}", bind, -1) + conf = strings.Replace(conf, "{{predixy_password}}", p.ConfParams.PredixyPasswd, -1) + conf = strings.Replace(conf, "{{log_path}}", log, -1) + conf = strings.Replace(conf, "{{redis_password}}", p.ConfParams.RedisPasswd, -1) + conf = strings.Replace(conf, "{{server:port}}", servers, -1) + conf = strings.Replace(conf, "{{worker_threads}}", p.ConfParams.DbConfig.WorkerThreads, -1) + conf = strings.Replace(conf, "{{server_timeout}}", p.ConfParams.DbConfig.ServerTimeout, -1) + conf = strings.Replace(conf, "{{keep_alive}}", p.ConfParams.DbConfig.KeepAlive, -1) + conf = strings.Replace(conf, "{{client_timeout}}", p.ConfParams.DbConfig.ClientTimeout, -1) + conf = strings.Replace(conf, "{{refresh_interval}}", + p.ConfParams.DbConfig.RefreshInterval, -1) + conf = strings.Replace(conf, "{{server_failure_limit}}", + p.ConfParams.DbConfig.ServerFailureLimit, -1) + conf = strings.Replace(conf, "{{server_retry_timeout}}", + p.ConfParams.DbConfig.ServerRetryTimeout, -1) + p.ConfFileContent = conf + p.runtime.Logger.Info("make config file content successfully") +} + +// checkParams 校验参数 +func (p *PredixyInstall) checkParams() error { + p.runtime.Logger.Info("start to validate parameters") + // 校验predixy配置文件 + validate := validator.New() + if err := validate.Struct(p.ConfParams); err != nil { + p.runtime.Logger.Error( + fmt.Sprintf("%s:validate parameters of predixy configer file fail, error:%s", p.Name(), err)) + return fmt.Errorf("%s:validate parameters of predixy configer file fail, error:%s", p.Name(), err) + } + // 校验port是否合规 + if p.ConfParams.Port < PredixyPortMin || p.ConfParams.Port > PredixyPortMax { + p.runtime.Logger.Error(fmt.Sprintf( + "%s:validate parameters of predixy configer file fail, port is not within defalut range [%d,%d]", + p.Name(), PredixyPortMin, PredixyPortMax)) + return errors.New(fmt.Sprintf( + "%s:validate parameters of predixy configer file fail, port is not within defalut range [%d,%d]", + p.Name(), PredixyPortMin, PredixyPortMax)) + } + + // 校验servers + if len(p.ConfParams.Servers) < 2 { + p.runtime.Logger.Error(fmt.Sprintf( + "%s:validate parameters of predixy configer file fail, ser, the number of server is incorrect", p.Name())) + return errors.New(fmt.Sprintf( + "%s:validate parameters of predixy configer file fail, ser, the number of server is incorrect", p.Name())) + } + // 校验安装包是否存在,md5值是否一致 + if flag := util.FileExists(p.InstallFilePath); !flag { + p.runtime.Logger.Error(fmt.Sprintf("%s:validate install file, %s is not existed", p.Name(), p.InstallFilePath)) + return errors.New(fmt.Sprintf("%s:validate install file, %s is not existed", p.Name(), p.InstallFilePath)) + } + md5, _ := util.GetFileMd5(p.InstallFilePath) + if p.ConfParams.MediaPkg.PkgMd5 != md5 { + p.runtime.Logger.Error(fmt.Sprintf("%s:validate install file md5 fail, md5 is incorrect", p.Name())) + return errors.New(fmt.Sprintf("%s:validate install file md5 fail, md5 is incorrect", p.Name())) + } + // 校验端口是否使用 + flag, _ := util.CheckPortIsInUse(p.ConfParams.IP, strconv.Itoa(p.ConfParams.Port)) + if flag { + // 校验端口是否是predixy进程 + cmd := fmt.Sprintf("netstat -ntpl |grep %d | awk '{print $7}'", p.ConfParams.Port) + result, _ := util.RunBashCmd(cmd, "", nil, 10*time.Second) + if strings.Contains(result, "predixy") { + // 检查配置文件是否一致 + // 读取已有配置文件与新生成的配置文件内容对比 + content, _ := ioutil.ReadFile(p.ConfFilePath) + if strings.Compare(string(content), p.ConfFileContent) == 0 { + p.runtime.Logger.Info("predixy port:%d has been installed", p.ConfParams.Port) + os.Exit(0) + } + + } + p.runtime.Logger.Error(fmt.Sprintf("%s:validate parameters of predixy configer file fail, port:%s is used", + p.Name(), strconv.Itoa(p.ConfParams.Port))) + return fmt.Errorf("%s:validate parameters of predixy configer file fail, port:%d is used", + p.Name(), p.ConfParams.Port) + } + p.runtime.Logger.Info("validate parameters successfully") + return nil +} + +// Name 获取原子任务的名字 +func (p *PredixyInstall) Name() string { + return "predixy_install" +} + +// Run 运行原子任务 +func (p *PredixyInstall) Run() error { + // 创建相关目录并给目录授权 + if err := p.mkdir(); err != nil { + return err + } + // 解压安装包,创建软链接并给目录授权 + if err := p.unTarAndCreateSoftLink(); err != nil { + return err + } + // 创建配置文件,并把配置文件参数进行替换 + if err := p.createConfFile(); err != nil { + return err + } + // 创建Exporter的配置文件 + if err := p.mkExporterConfigFile(); err != nil { + return errors.Wrap(err, "mkExporterConfigFile") + } + // 启动服务 + if err := p.startup(); err != nil { + return err + } + + return nil +} + +// mkdir 创建相关目录并给目录授权 +func (p *PredixyInstall) mkdir() error { + p.runtime.Logger.Info("start to create directories") + // 创建日志文件路径 + if err := util.MkDirsIfNotExistsWithPerm([]string{p.LogDir}, DefaultPerm); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:create directory fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:create directory fail, error:%s", p.Name(), err)) + } + // 修改属主 + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s/predixy", p.OsUser, p.OsGroup, p.DataDir), + "", nil, + 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:chown directory fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:chown directory fail, error:%s", p.Name(), err)) + } + p.runtime.Logger.Info("create directories successfully") + return nil +} + +// unTarAndCreateSoftLink 解压安装包,创建软链接并给目录授权 +func (p *PredixyInstall) unTarAndCreateSoftLink() error { + if !util.FileExists(fmt.Sprintf("%s/%s", p.BinDir, p.ConfParams.MediaPkg.GePkgBaseName())) { + // 解压到/usr/local目录下 + p.runtime.Logger.Info("start to unTar install package") + tarCmd := fmt.Sprintf("tar -zxf %s -C %s", p.InstallFilePath, p.BinDir) + if _, err := util.RunBashCmd(tarCmd, "", nil, 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:untar install file fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:untar install file fail, error:%s", p.Name(), err)) + } + p.runtime.Logger.Info("unTar install package successfully") + } + + if !util.FileExists(fmt.Sprintf("%s/predixy", p.BinDir)) { + // 创建软链接 + p.runtime.Logger.Info("start to create soft link of install package") + baseName := p.ConfParams.MediaPkg.GePkgBaseName() + softLink := fmt.Sprintf("ln -s %s/%s %s/predixy", p.BinDir, baseName, p.BinDir) + if _, err := util.RunBashCmd(softLink, "", nil, 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:install file create softlink fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:install file create softlink fail, error:%s", p.Name(), err)) + } + // 修改属主 + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s/predixy", p.OsUser, p.OsGroup, p.BinDir), + "", nil, + 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:chown softlink directory fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:chown softlink directory fail, error:%s", p.Name(), err)) + } + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s/%s", p.OsUser, p.OsGroup, p.BinDir, p.ConfParams.MediaPkg.GePkgBaseName()), + "", nil, + 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:chown untar directory fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:chown untar directory fail, error:%s", p.Name(), err)) + } + // 修改启动文件内容 + if _, err := util.RunBashCmd( + fmt.Sprintf("sed -i \"s/\\/data/\\%s/g\" %s/predixy/bin/start_predixy.sh", p.DataDir, p.BinDir), + "", nil, + 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:modfiy start_predixy.sh fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:modfiy start_predixy.sh fail, error:%s", p.Name(), err)) + } + p.runtime.Logger.Info("create soft link of install package successfully") + } + return nil + +} + +// createConfFile 创建配置文件,并把配置文件参数进行替换 +func (p *PredixyInstall) createConfFile() error { + // 创建配置文件 + p.runtime.Logger.Info("start to create config file") + file, err := os.OpenFile(p.ConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, DefaultPerm) + if err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:create configer file fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:create configer file fail, error:%s", p.Name(), err)) + } + defer file.Close() + + if _, err = file.WriteString(p.ConfFileContent); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:configer file write content fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:configer file write content fail, error:%s", p.Name(), err)) + } + // 修改配置文件属主 + if _, err = util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", p.OsUser, p.OsGroup, p.ConfFilePath), + "", nil, + 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:chown configer file fail, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:chown configer file fail, error:%s", p.Name(), err)) + } + p.runtime.Logger.Info("create config file successfully") + return nil +} + +// mkExporterConfigFile 生成Exporter的配置文件 +// 格式为 { "ip:port" : "password" } +func (p *PredixyInstall) mkExporterConfigFile() error { + data := make(map[string]string) + key := fmt.Sprintf("%s:%d", p.ConfParams.IP, p.ConfParams.Port) + data[key] = p.ConfParams.PredixyPasswd + return common.WriteExporterConfigFile(p.ConfParams.Port, data) +} + +// startup 启动服务 +func (p *PredixyInstall) startup() error { + // 启动服务 + p.runtime.Logger.Info("start to startup process") + startCmd := fmt.Sprintf("su %s -c \"%s/predixy/bin/start_predixy.sh %d\"", p.OsUser, p.BinDir, + p.ConfParams.Port) + p.runtime.Logger.Info("startup predixy, run %s", startCmd) + if _, err := util.RunBashCmd( + fmt.Sprintf("su %s -c \"%s/predixy/bin/start_predixy.sh %d\"", p.OsUser, p.BinDir, p.ConfParams.Port), + "", nil, + 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:startup predixy service, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:startup predixy service, error:%s", p.Name(), err)) + } + // 申明predixy可执行文件路径,把路径写入/etc/profile + etcProfilePath := "/etc/profile" + addEtcProfile := fmt.Sprintf(` +if ! grep -i %s/predixy/bin: %s; +then +echo "export PATH=%s/predixy/bin:\$PATH" >> %s +fi`, p.BinDir, etcProfilePath, p.BinDir, etcProfilePath) + p.runtime.Logger.Info(addEtcProfile) + if _, err := util.RunBashCmd(addEtcProfile, "", nil, 10*time.Second); err != nil { + p.runtime.Logger.Error(fmt.Sprintf("%s:binary path add in /etc/profile, error:%s", p.Name(), err)) + return errors.New(fmt.Sprintf("%s:binary path add in /etc/profile, error:%s", p.Name(), err)) + } + p.runtime.Logger.Info("startup process successfully") + return nil +} + +// Retry 重试 +func (p *PredixyInstall) Retry() uint { + return 2 +} + +// Rollback 回滚 +func (p *PredixyInstall) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_operate.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_operate.go new file mode 100644 index 0000000000..bf7bfd26ee --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/predixy_operate.go @@ -0,0 +1,176 @@ +package atomproxy + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "time" + + "github.com/go-playground/validator/v10" +) + +// PredixyDir preidxy dir +const PredixyDir = "predixy" + +// PredixyOperateParams 启停参数 +type PredixyOperateParams struct { + // common.MediaPkg + // DataDirs []string `json:"data_dirs"` // /data /data1 + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` // 只支持1个端口 + Operate string `json:"operate" validate:"required"` + Debug bool `json:"debug"` +} + +// PredixyOperate install Predixy 原子任务 +type PredixyOperate struct { + runtime *jobruntime.JobGenericRuntime + params *PredixyOperateParams +} + +// NewPredixyOperate new +func NewPredixyOperate() jobruntime.JobRunner { + return &PredixyOperate{} +} + +// Init 初始化 +func (job *PredixyOperate) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("PredixyOperate Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("PredixyOperate Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + if job.params.Port < PredixyPortMin || job.params.Port > PredixyPortMax { + err = fmt.Errorf("checkParams. Port(%d) must in range [%d,%d]", job.params.Port, PredixyPortMin, PredixyPortMax) + job.runtime.Logger.Error(err.Error()) + return err + } + + return nil +} + +// Name 原子任务名 +func (job *PredixyOperate) Name() string { + return "predixy_operate" +} + +// Run 执行 +func (job *PredixyOperate) Run() (err error) { + port := job.params.Port + op := job.params.Operate + execUser := consts.MysqlAaccount + binPath := getPathWitChRoot("", consts.UsrLocal, PredixyDir, "bin") + stopScript := filepath.Join(binPath, "stop_predixy.sh") + startScript := filepath.Join(binPath, "start_predixy.sh") + cmd := []string{} + + running, err := job.IsPredixyRunning(port) + if err != nil { + return nil + } + job.runtime.Logger.Info("check predixy %d before exec cmd. status is %s", port, running) + if op == consts.ProxyStart { + if running { + return nil + } + cmd = []string{"su", execUser, "-c", fmt.Sprintf("%s %s", startScript, strconv.Itoa(port))} + } else { + // stop or shutdown + if !running { + return nil + } + cmd = []string{"su", execUser, "-c", fmt.Sprintf("%s %s", stopScript, strconv.Itoa(port))} + } + _, err = util.RunLocalCmd(cmd[0], cmd[1:], "", + nil, 10*time.Second) + job.runtime.Logger.Info(fmt.Sprintf("%s Process %s", op, cmd)) + if err != nil { + return err + } + time.Sleep(5 * time.Second) + + running, err = job.IsPredixyRunning(port) + job.runtime.Logger.Info("check predixy %d after exec cmd. status is %s", port, running) + if running && op == consts.ProxyStart { + return nil + } else if !running && op == consts.ProxyStop { + return nil + } else if !running && op == consts.ProxyShutdown { + // 删除Exporter配置文件,删除失败有Warn,但不会停止 + if err := common.DeleteExporterConfigFile(port); err != nil { + job.runtime.Logger.Warn("predixy %d DeleteExporterConfigFile return err:%v", port, err) + } else { + job.runtime.Logger.Info("predixy %d DeleteExporterConfigFile success", port) + } + return job.DirBackup(execUser, port) + } else { + return err + } +} + +// Retry times +func (job *PredixyOperate) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *PredixyOperate) Rollback() error { + return nil +} + +// IsPredixyRunning 检查进程 +func (job *PredixyOperate) IsPredixyRunning(port int) (installed bool, err error) { + portIsUse, err := util.CheckPortIsInUse(job.params.IP, strconv.Itoa(port)) + return portIsUse, err +} + +// DirBackup 备份目录 +func (job *PredixyOperate) DirBackup(execUser string, port int) error { + job.runtime.Logger.Info("mv %d dir begin.", port) + if job.params.Debug { + return nil + } + dataDir := getPathWitChRoot("", consts.DataPath, PredixyDir) + insDir := fmt.Sprintf("%s/%d", dataDir, port) + // 判断目录是否存在 + job.runtime.Logger.Info("check predixy ins dir[%s] exists.", insDir) + exist := util.FileExists(insDir) + if !exist { + job.runtime.Logger.Info("dir %s is not exists", insDir) + return nil + } + mvCmd := fmt.Sprintf("mv %s/%d %s/bak_%d_%s", dataDir, port, dataDir, port, time.Now().Format("20060102150405")) + job.runtime.Logger.Info(mvCmd) + cmd := []string{"su", execUser, "-c", mvCmd} + _, _ = util.RunLocalCmd(cmd[0], cmd[1:], "", + nil, 10*time.Second) + time.Sleep(10 * time.Second) + exist = util.FileExists(insDir) + if !exist { + job.runtime.Logger.Info("mv Predixy port[%d] dir succ....", port) + return nil + } + job.runtime.Logger.Info("mv Predixy port[%d] dir faild....", port) + return fmt.Errorf("Predixy port[%d] dir [%s] exists too..pleace check", port, insDir) +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_check_backends.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_check_backends.go new file mode 100644 index 0000000000..0a23086b5f --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_check_backends.go @@ -0,0 +1,150 @@ +package atomproxy + +import ( + "crypto/md5" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "sort" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +/* + 校验 所有 proxy 的后端,适用于切换后,扩缩容等步骤检查使用 + { + "instances":[{"ip":"","port":50000,"admin_port":51000}] + } +*/ + +// ProxyInstances TODO +type ProxyInstances struct { + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` + AdminPort int `json:"admin_port"` +} + +// ProxyCheckParam TODO +type ProxyCheckParam struct { + Instances []ProxyInstances +} + +// TwemproxyCheckBackends 原子任务 +type TwemproxyCheckBackends struct { + runtime *jobruntime.JobGenericRuntime + params ProxyCheckParam +} + +// NewTwemproxySceneCheckBackends TODO +// NewTwemproxyOperate new +func NewTwemproxySceneCheckBackends() jobruntime.JobRunner { + return &TwemproxyCheckBackends{} +} + +// Init 初始化 +func (job *TwemproxyCheckBackends) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error( + "TwemproxyCheckBackends Init params validate failed InvalidValidationError,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("TwemproxyCheckBackends Init params validate failed ValidationErrors,err:%v,params:%+v", + err, job.params) + return err + } + } + return nil +} + +// Name 原子任务名 +func (job *TwemproxyCheckBackends) Name() string { + return "twemproxy_check_backends" +} + +// Run 执行 +func (job *TwemproxyCheckBackends) Run() (err error) { + md5s := map[string][]string{} + for idx, p := range job.params.Instances { + md5Val := job.getTwemproxyMd5(fmt.Sprintf("%s:%d", p.IP, p.Port+1000)) + if _, ok := md5s[md5Val]; !ok { + md5s[md5Val] = []string{} + } + md5s[md5Val] = append(md5s[md5Val], p.IP) + job.runtime.Logger.Info(fmt.Sprintf("get {%s} nosqlproxy servers md5 %d:%s", p.IP, idx, md5Val)) + } + + if len(md5s) > 1 { + x, _ := json.Marshal(md5s) + return fmt.Errorf("some proxy failed for servers:{%+v}", x) + } + + job.runtime.Logger.Info(fmt.Sprintf("all twemproxy %+v got same nosqlproxy servers md5 %+v", job.params, md5s)) + return nil +} + +// Retry times +func (job *TwemproxyCheckBackends) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *TwemproxyCheckBackends) Rollback() error { + return nil +} + +func (job *TwemproxyCheckBackends) getTwemproxyMd5(addr string) string { + // 建立一个链接(Dial拨号) + conn, err := net.DialTimeout("tcp", addr, time.Second*2) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("dial failed, {%s} err:%v\n", addr, err)) + return fmt.Sprintf("Dail{%s}Failed:%+v", addr, err) + } + + // 写入数据 + _, err = io.WriteString(conn, "get nosqlproxy servers") + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("wirte string failed, err:%v\n", err)) + return fmt.Sprintf("Write{%s}Failed:%+v", addr, err) + } + + // 读取返回的数据 + rsp, err := ioutil.ReadAll(conn) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("read string failed, err:%v\n", err)) + return fmt.Sprintf("Read{%s}Failed:%+v", addr, err) + } + // 1.1.x.a:30000 tgalive 0-17499 1 + segs := []string{} + for _, seg := range strings.Split(string(rsp), "\n") { + segInfo := strings.Split(seg, " ") + if len(segInfo) != 4 { + return fmt.Sprintf("GetServersFailed:%s|%+v", addr, seg) + } + segs = append(segs, fmt.Sprintf("%s|%s", segInfo[0], segInfo[2])) + } + sort.Strings(segs) + + data, _ := json.Marshal(segs) + // 计算MD5 + md5er := md5.New() + has := md5er.Sum(data) + job.runtime.Logger.Info(fmt.Sprintf("proxy {%s} has backends servers md5:%s:%s", addr, has, data)) + return fmt.Sprintf("%x", has) // 将[]byte转成16进制 +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install.go new file mode 100644 index 0000000000..7bf28d11b8 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install.go @@ -0,0 +1,632 @@ +package atomproxy + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" + "github.com/google/go-cmp/cmp" + "github.com/pkg/errors" +) + +/* + TwemproxyInstall 原子任务 + 检查参数 + 检查环境 + 端口未被监听 + 解压介质 + 创建目录 + 生成配置 + - 如果配置已经存在且相同,可以通过. + 拉起进程 + - 如果已经拉起,可以通过. + + binDir 二进制安装后的目录 + datadir 数据目录 +*/ + +// + +// twemproxyPortMin twemproxy min port +const twemproxyPortMin = 50000 + +// twemproxyPortMax twemproxy max port +const twemproxyPortMax = 59999 + +// twemproxyPrefix prefix +const twemproxyPrefix = "twemproxy" + +// defaultFileMode file mode +const defaultFileMode = 0755 // 默认创建的文件Mode + +// twemproxyDir twemproxy ,在gcs上是twemproxy-0.2.4. 为了不改拉起脚本,继续用这个. +const twemproxyDir = "twemproxy-0.2.4" + +// const AdminPortIncr = 1000 // 管理端口,是端口+1000 + +// TwemproxyInstallParams 安装参数 +type TwemproxyInstallParams struct { + common.MediaPkg + // DataDirs []string `json:"data_dirs"` // /data /data1 + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` // 只支持1个端口 + Password string `json:"password"` + RedisPassword string `json:"redis_password" ` + DbType string `json:"db_type" validate:"required"` + Servers []string `json:"servers"` + ConfConfigs map[string]interface{} `json:"conf_configs"` +} + +// twemproxyInstall install twemproxy 原子任务 +type twemproxyInstall struct { + runtime *jobruntime.JobGenericRuntime + RealDataDir string // /data/redis + BinDir string // /usr/local/redis + // ConfTemplate string // 配置模版 twemproxy 配置比较简单,不需要模板,直接由TwemproxyConf生成配置文件 + ExecUser string // 这里把User Group搞成变量,其实没必要,直接用mysql.mysql就行了 + ExecGroup string + params *TwemproxyInstallParams + Ports int + Chroot string // 不再支持Chroot了 +} + +// NewTwemproxyInstall new +func NewTwemproxyInstall() jobruntime.JobRunner { + return &twemproxyInstall{ + ExecUser: consts.MysqlAaccount, + ExecGroup: consts.MysqlGroup, + } +} + +// GetUserGroup user group +func (ti *twemproxyInstall) GetUserGroup() string { + return fmt.Sprintf("%s.%s", ti.ExecUser, ti.ExecGroup) +} + +// Init 初始化 主要是检查参数 +func (ti *twemproxyInstall) Init(m *jobruntime.JobGenericRuntime) error { + ti.runtime = m + if err := validatorParams(ti); err != nil { + return errors.Wrap(err, "validatorParams") + } + + if err := ti.checkParams(); err != nil { + return errors.Wrap(err, "checkParams") + } + return nil +} + +// Name 原子任务名 +func (ti *twemproxyInstall) Name() string { + return "twemproxy_install" +} + +func validatorParams(ti *twemproxyInstall) error { + err := json.Unmarshal([]byte(ti.runtime.PayloadDecoded), &ti.params) + if err != nil { + ti.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + + // 参数有效性检查,为了兼容部署和拉起2个阶段 + validate := validator.New() + err = validate.Struct(ti.params) + if err == nil { + return err + } + + if _, ok := err.(*validator.InvalidValidationError); ok { + ti.runtime.Logger.Error("%s Init params validate failed,err:%v,params:%+v", ti.Name(), + err, ti.params) + return errors.Wrap(err, "validator") + } + + // 如果有多个error,只返回第1个. + for _, err := range err.(validator.ValidationErrors) { + ti.runtime.Logger.Error("%s Init params validate failed,err:%v,params:%+v", ti.Name(), + err, ti.params) + return errors.Wrap(err, "validator") + } + return nil +} + +// checkParams 检查参数 +func (ti *twemproxyInstall) checkParams() (err error) { + // 要求 ti.params.MediaPkg.Pkg 要以twemproxy开头 + if !strings.HasPrefix(ti.params.MediaPkg.Pkg, twemproxyPrefix) { + err = errors.Errorf("require pkg has prefix %s. pkg:%s", twemproxyPrefix, ti.params.MediaPkg.Pkg) + ti.runtime.Logger.Error(err.Error()) + return err + } + + // install_twemproxy 只做安装,不生成配置文件和拉起进程。所以只需要一个端口参数,其它不需要 + if ti.params.Port < twemproxyPortMin || ti.params.Port > twemproxyPortMax { + err = fmt.Errorf("checkParams. Port(%d) must in range [%d,%d,]", ti.params.Port, twemproxyPortMin, twemproxyPortMax) + ti.runtime.Logger.Error(err.Error()) + return err + } + + // todo 检查密码复杂度 长度大于4 就可以,更复杂的放在前端去搞 + // todo 检查ip 是否正确? + + // 检查Server合法性 + confObj := common.NewTwemproxyConf() + if err = confObj.CheckServersValid(ti.params.Servers); err != nil { + err = fmt.Errorf("checkParams. params [servers] error: %v", err) + ti.runtime.Logger.Error(err.Error()) + return err + } + + // Check HashTag + if v, e := ti.params.ConfConfigs["hast_tag"]; e { + hashTag, ok := v.(string) + if !ok || !(hashTag == "" || hashTag == "{}") { + err = fmt.Errorf("checkParams. params [hast_tag] is not valid") + ti.runtime.Logger.Error(err.Error()) + return err + } + } + + return nil +} + +// Run 执行 +func (ti *twemproxyInstall) Run() (err error) { + // 安装目录 + err = ti.getRealDataDir() + if err != nil { + return errors.Wrap(err, "getRealDataDir") + } + + // 解压 + err = ti.untarMedia() + if err != nil { + return errors.Wrap(err, "untarMedia") + } + + // 创建实例目录 + err = initInstanceDir(ti.RealDataDir, strconv.Itoa(ti.params.Port), ti.GetUserGroup()) + if err != nil { + return errors.Wrap(err, "initInstanceDir") + } + ti.runtime.Logger.Info("initInstanceDir success. %s", path.Join(ti.RealDataDir, strconv.Itoa(ti.params.Port))) + + // 创建配置文件. + err = ti.mkConfigFile(ti.params.Port) + if err != nil { + return errors.Wrap(err, "mkConfigFile") + } + + // 创建Exporter的配置文件 + err = ti.mkExporterConfigFile() + if err != nil { + return errors.Wrap(err, "mkExporterConfigFile") + } + + if err = chownDir(ti.RealDataDir, ti.GetUserGroup()); err != nil { + return errors.Wrap(err, "chownInstanceDir") + } + + if installed, _ := ti.isTwemproxyRunning(ti.params.Port); installed { + ti.runtime.Logger.Info("startProcess success. port %d already running", ti.params.Port) + return + } + + err = ti.startProcess(ti.params.Port) + if err != nil { + return errors.Wrap(err, "startProcess") + } + ti.runtime.Logger.Info("startProcess success. port %d", ti.params.Port) + return nil +} + +// getPathWitChRoot Chroot是为了本地测试方便 +func getPathWitChRoot(chroot string, elem ...string) string { + if chroot == "" || chroot == "/" { + return filepath.Join(elem...) + } + + return filepath.Join(chroot, filepath.Join(elem...)) +} + +// doUnTar 将pkgAbsPath解压到dstPathParent,生成dstPath,并设置 owner +func doUnTar(pkgAbsPath, dstPathParent, dstPath, owner string) (err error) { + tarCmd := fmt.Sprintf("tar -zxf %s -C %s", pkgAbsPath, dstPathParent) + _, err = util.RunBashCmd(tarCmd, "", nil, 10*time.Second) + if err != nil { + return errors.Wrap(err, tarCmd) + } + return chownDir(dstPath, owner) +} + +// addOsPath 把Path加到/etc/profile。这里/etc/profile也做了Chroot转义 +func addOsPath(path, etcProfilePath string) error { + addEtcProfile := fmt.Sprintf(` +if ! grep -i %s: %s; +then +echo "export PATH=%s:\$PATH" >> %s +fi`, path, etcProfilePath, path, etcProfilePath) + _, err := util.RunBashCmd(addEtcProfile, "", nil, 10*time.Second) + return err +} + +// fileIsExists 文件是否存在. +func fileIsExists(filePath string) (e bool, err error) { + _, err = os.Stat(filePath) + if err == nil { + return true, nil + } + + if os.IsNotExist(err) { + return false, nil + } + + return false, err +} + +// untarMedia 解压安装包 +func (ti *twemproxyInstall) untarMedia() (err error) { + err = ti.params.MediaPkg.Check() + if err != nil { + ti.runtime.Logger.Error("UntarMedia failed,err:%v", err) + return + } + pkgBaseName := ti.params.MediaPkg.GePkgBaseName() + + softLink := getPathWitChRoot(ti.Chroot, consts.UsrLocal, "twemproxy") + twemproxyFullPath := getPathWitChRoot(ti.Chroot, consts.UsrLocal, pkgBaseName) + + exists, _ := fileIsExists(twemproxyFullPath) + if exists { + ti.runtime.Logger.Info("untarMedia. %s is exists, skip untar", twemproxyFullPath) + } else { + _ = doUnTar(ti.params.MediaPkg.GetAbsolutePath(), + getPathWitChRoot(ti.Chroot, consts.UsrLocal), twemproxyFullPath, ti.GetUserGroup()) + } + // 再测试一次,不存在就有问题了. + if exists, _ := fileIsExists(twemproxyFullPath); !exists { + err = fmt.Errorf("untarMedia failed. %s->%s", ti.params.MediaPkg.GetAbsolutePath(), twemproxyFullPath) + ti.runtime.Logger.Error(err.Error()) + return + } + + _, err = os.Stat(softLink) + if err != nil && os.IsNotExist(err) { + // 创建软链接 相当于 ln -s ti.BinDir softLink + err = os.Symlink(twemproxyFullPath, softLink) + if err != nil { + err = fmt.Errorf("os.Symlink failed,err:%v,dir:%s,softLink:%s", err, twemproxyFullPath, softLink) + ti.runtime.Logger.Error(err.Error()) + return + } + ti.runtime.Logger.Info("get binDir success. binDir:%s,softLink:%s", twemproxyFullPath, softLink) + } + + realLink, err := filepath.EvalSymlinks(softLink) + if err != nil { + err = fmt.Errorf("filepath.EvalSymlinks failed,err:%v,softLink:%s", err, softLink) + ti.runtime.Logger.Error(err.Error()) + return + } + + baseName := filepath.Base(realLink) + if pkgBaseName != baseName { + err = fmt.Errorf("%s 指向 %s 而不是 %s", softLink, baseName, pkgBaseName) + ti.runtime.Logger.DPanic(err.Error()) + return + } + + ti.BinDir = filepath.Join(softLink, "bin") + _ = addOsPath(ti.BinDir, getPathWitChRoot(ti.Chroot, "/etc/profile")) + _ = chownDir(softLink, ti.GetUserGroup()) + ti.runtime.Logger.Info(fmt.Sprintf("binDir:%s", ti.BinDir)) + return nil +} + +// myFindFirstMountPoint find first mountpoint in prefer order +func myFindFirstMountPoint(paths ...string) (string, error) { + for _, filePath := range paths { + if _, err := os.Stat(filePath); err != nil { + if os.IsNotExist(err) { + continue + } + } + isMountPoint, err := util.IsMountPoint(filePath) + if err != nil { + return "", fmt.Errorf("check whether mountpoint failed, filePath: %s, err: %w", filePath, err) + } + if isMountPoint { + return filePath, nil + } + } + return "", nil +} + +// getDataDir 1,如果存在 /data +// - /data是一个文件或者指向文件的link 返回错误. +// +// 2,如果不存在/data 目录,但存在/data1 MountPoint +// +// mkdir -p /data1/data +// ln -s /data1/data / +// +// 3,如果不存在 /data 目录,也不存在/data1 MountPoint +// +// mkdir -p /data +func getDataDir(dataPath string, dirs []string, userGroup string) (dataDir, dataDirReal string, err error) { + dataFileInfo, err := os.Stat(dataPath) + // data 存在 + if err == nil { + if !dataFileInfo.IsDir() { + // data存在,又不是目录,返回错误. + return "", "", errors.Errorf("%s is not a dir or link to dir", dataPath) + } + dataDir = dataPath + dataDirReal = dataPath + err = nil + if dataFileInfo.Mode()&os.ModeSymlink == os.ModeSymlink { + dataDirReal, err = filepath.EvalSymlinks(dataDir) + } + return + } + // 存在未知错误 + if !os.IsNotExist(err) { + return "", "", err + } + + // 这里如果有错误,可能存在权限访问的异常,应该中止任务 + firstMp, err := myFindFirstMountPoint(dirs...) + if err != nil { + return "", "", err + } + + if firstMp == "" { + if err = mkDir(dataPath); err != nil { + return "", "", err + } + return dataPath, dataPath, nil + } + + firstMpData := filepath.Join(firstMp, "data") + + cmdLine := fmt.Sprintf(`mkdir -p %s +ln -s %s %s`, firstMpData, firstMpData, dataPath) + + if _, err = util.RunBashCmd(cmdLine, + "", nil, 10*time.Second); err != nil { + return "", "", err + } + _ = chownDir(firstMpData, userGroup) + return + +} + +// getRealDataDir 确认redis Data Dir +func (ti *twemproxyInstall) getRealDataDir() error { + // 搞2次,第1次会尝试创建/data, 第2次还不行,就真的不行了. + _, _, _ = getDataDir(consts.DataPath, []string{consts.Data1Path}, ti.GetUserGroup()) + data, realData, err := getDataDir(consts.DataPath, []string{consts.Data1Path}, ti.GetUserGroup()) + if err != nil { + return err + } + ti.runtime.Logger.Info("getDataDir success. dataDir:%s realDataDir:%s", data, realData) + ti.RealDataDir = getPathWitChRoot(ti.Chroot, data, twemproxyDir) + return nil +} + +// mkDir 类似os.Mkdir,它有10秒超时 +func mkDir(filePath string) error { + _, err := util.RunBashCmd(fmt.Sprintf("mkdir -p %s", filePath), "", nil, 10*time.Second) + return err +} + +// chownDir 类似os.Chown,有10秒超时 +func chownDir(filePath, userGroup string) error { + _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s %s", userGroup, filePath), + "", nil, + 10*time.Second) + + return err +} + +// initInstanceDir 初始化实例文件夹 +// mkdir -p %DataDir% +// chown -R user.group %DataDir% +func initInstanceDir(realDataDir, port, userGroup string) (err error) { + instDir := filepath.Join(realDataDir, port) + err = util.MkDirsIfNotExistsWithPerm([]string{instDir}, defaultFileMode) + if err != nil { + return err + } + + return chownDir(realDataDir, userGroup) +} + +// isTwemproxyRunning 检查已经安装 +func (ti *twemproxyInstall) isTwemproxyRunning(port int) (installed bool, err error) { + portIsUse, err := util.CheckPortIsInUse(ti.params.IP, strconv.Itoa(port)) + return portIsUse, err +} + +// mkConfigFile 生成配置文件 +func (ti *twemproxyInstall) mkConfigFile(port int) error { + // /data/twemproxy-0.2.4/50010/nutcracker.50010.yml + instConfigFileName := fmt.Sprintf("nutcracker.%d.yml", port) + portStr := strconv.Itoa(port) + instConfigFilePath := filepath.Join(ti.RealDataDir, portStr, instConfigFileName) + + // ti.params.ConfConfigs + instConfig := common.NewTwemproxyConf() + + instConfig.NosqlProxy.Password = ti.params.Password + instConfig.NosqlProxy.RedisPassword = ti.params.RedisPassword + // 在Init 已经检查过了. + newServers, _ := common.ReFormatTwemproxyConfServer(ti.params.Servers) + instConfig.NosqlProxy.Servers = newServers + instConfig.NosqlProxy.Listen = fmt.Sprintf("%s:%d", ti.params.IP, ti.params.Port) + + instConfig.NosqlProxy.SlowMs = 1000000 // 建议,经验值 + instConfig.NosqlProxy.Backlog = 512 // 建议,经验值 + // 固定参数 + instConfig.NosqlProxy.Redis = true // 必须 + instConfig.NosqlProxy.Distribution = "modhash" // 必须 + instConfig.NosqlProxy.Hash = "fnv1a_64" // 必须 + instConfig.NosqlProxy.AutoEjectHosts = false // 必须 + instConfig.NosqlProxy.ServerConnections = 1 // 必须,避免出现"后发先致"的问题 + instConfig.NosqlProxy.ServerFailureLimit = 3 // 建议,经验值 + instConfig.NosqlProxy.PreConnect = false // 建议,经验值 + instConfig.NosqlProxy.ServerRetryTimeout = 2000 // 建议,经验值 + if v, e := ti.params.ConfConfigs["hash_tag"]; e { + instConfig.NosqlProxy.HashTag, _ = v.(string) + } + + exists, err := fileIsExists(instConfigFilePath) + // 存在未知的错误 + if err != nil { + return err + } + + if exists { + currInstConfig := common.NewTwemproxyConf() + if err = currInstConfig.Load(instConfigFilePath); err != nil { + return errors.Errorf("文件已存在,且读取失败, file:%s", instConfigFilePath) + } + if !cmp.Equal(currInstConfig, instConfig) { + return errors.Errorf("文件已存在,内容不同, file:%s", instConfigFilePath) + } + ti.runtime.Logger.Info("文件已存在,但内容相同. file:%s", instConfigFilePath) + return nil + } + + err = instConfig.Save(instConfigFilePath, defaultFileMode) + if err != nil { + return errors.Errorf("写入文件失败, file:%s, err:%v", instConfigFilePath, err) + } + return nil +} + +// mkExporterConfigFile TODO +// mkConfigFile 生成Exporter的配置文件 +// 格式为 { "$ip:$port" : "password", +// +// "$ip:$port:stat" : "ip:$statPort", +// } +func (ti *twemproxyInstall) mkExporterConfigFile() error { + data := make(map[string]string) + key := fmt.Sprintf("%s:%d", ti.params.IP, ti.params.Port) + data[key] = ti.params.Password + statKey := fmt.Sprintf("%s:%d:stat", ti.params.IP, ti.params.Port) + data[statKey] = fmt.Sprintf("%s:%d", ti.params.IP, ti.params.Port+1000) + return common.WriteExporterConfigFile(ti.params.Port, data) +} + +func findLastLog(instDir string, port int) string { + dir := path.Join(instDir, "log") + prefix := fmt.Sprintf("twemproxy.%d.log.", port) + files, err := ioutil.ReadDir(dir) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + var modTime time.Time + var name string + for _, fi := range files { + if !fi.Mode().IsRegular() { + continue + } + if !strings.HasPrefix(fi.Name(), prefix) { + continue + } + + if fi.ModTime().Before(modTime) { + continue + } + + if fi.ModTime().After(modTime) { + modTime = fi.ModTime() + name = fi.Name() + } + } + return path.Join(dir, name) +} + +// startProcess 拉起实例 +func (ti *twemproxyInstall) startProcess(port int) error { + var installed bool + var err error + + instDir := filepath.Join(ti.RealDataDir, strconv.Itoa(port)) + // log is twemproxy.$PORT.err + instLogFile := filepath.Join(instDir, "log", fmt.Sprintf("twemproxy.%d.err", port)) + startScript := filepath.Join(ti.BinDir, "start_nutcracker.sh") + + installed, err = ti.isTwemproxyRunning(port) + if err != nil { + return err + } else if installed { + return nil + } + + // https://unix.stackexchange.com/questions/327551/etc-profile-not-sourced-for-sudo-su + // su - username -c "script.sh args..." 会load /etc/profile + // su username -c "script.sh args..." 不会load /etc/profile + // twemproxy不需要再次 source/etc/profile + + startCmd := []string{"su", ti.ExecUser, "-c", fmt.Sprintf("%s %s", startScript, strconv.Itoa(port))} + + _, err = util.RunLocalCmd(startCmd[0], startCmd[1:], "", + nil, 10*time.Second) + + if err != nil { + return err + } + ti.runtime.Logger.Info(fmt.Sprintf("startProcess %s", startCmd)) + time.Sleep(2 * time.Second) + + installed, err = ti.isTwemproxyRunning(port) + if installed { + // 启动成功 + return nil + } else if err != nil { + return err + } + + // /data/twemproxy-0.2.4/50144/log/twemproxy.50144.err + logData, err := util.RunBashCmd(fmt.Sprintf("tail -3 %s", instLogFile), "", nil, 10*time.Second) + if err != nil { + return err + } + // /data/twemproxy-0.2.4/50144/log/twemproxy.50144.log.$time + var logData2 string + lastLog := findLastLog(instDir, port) + if lastLog != "" { + logData2, _ = util.RunBashCmd(fmt.Sprintf("tail -4 %s", lastLog), "", nil, 10*time.Second) + } + + err = fmt.Errorf("twemproxy(%s:%d) startup failed,errLog:%s,logData:%s", ti.params.IP, port, logData, logData2) + ti.runtime.Logger.Error(err.Error()) + return err + +} + +// Retry times +func (ti *twemproxyInstall) Retry() uint { + return 2 +} + +// Rollback rollback +func (ti *twemproxyInstall) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install_test.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install_test.go new file mode 100644 index 0000000000..251593a386 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_install_test.go @@ -0,0 +1,13 @@ +package atomproxy + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "log" + "testing" +) + +func TestGetDataDir(t *testing.T) { + mylog.UnitTestInitLog() + o1, o2, err := getDataDir("./xxx", []string{""}, "mysql.mysql") + log.Printf("getDataDir return %s %s %v", o1, o2, err) +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_operate.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_operate.go new file mode 100644 index 0000000000..9189d4a819 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy/twemproxy_operate.go @@ -0,0 +1,182 @@ +package atomproxy + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "time" + + "github.com/go-playground/validator/v10" +) + +/* + TwemproxyInstall 原子任务 + twemproxy进程启停 +*/ + +// + +// TwemproxyOperateParams 启停参数 +type TwemproxyOperateParams struct { + // common.MediaPkg + // DataDirs []string `json:"data_dirs"` // /data /data1 + IP string `json:"ip" validate:"required"` + Port int `json:"port" validate:"required"` // 只支持1个端口 + Operate string `json:"operate" validate:"required"` + Debug bool `json:"debug"` +} + +// TwemproxyOperate install twemproxy 原子任务 +type TwemproxyOperate struct { + runtime *jobruntime.JobGenericRuntime + params *TwemproxyOperateParams +} + +// NewTwemproxyOperate new +func NewTwemproxyOperate() jobruntime.JobRunner { + return &TwemproxyOperate{} +} + +// Init 初始化 +func (job *TwemproxyOperate) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("TwemproxyOperate Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("TwemproxyOperate Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + if job.params.Port < twemproxyPortMin || job.params.Port > twemproxyPortMax { + err = fmt.Errorf("checkParams. Port(%d) must in range [%d,%d]", job.params.Port, twemproxyPortMin, twemproxyPortMax) + job.runtime.Logger.Error(err.Error()) + return err + } + + return nil +} + +// Name 原子任务名 +func (job *TwemproxyOperate) Name() string { + return "twemproxy_operate" +} + +// Run 执行 +func (job *TwemproxyOperate) Run() (err error) { + port := job.params.Port + op := job.params.Operate + execUser := consts.MysqlAaccount + binPath := getPathWitChRoot("", consts.UsrLocal, "twemproxy", "bin") + stopScript := filepath.Join(binPath, "stop_nutcracker.sh") + startScript := filepath.Join(binPath, "start_nutcracker.sh") + var cmd []string + + running, err := job.IsTwemproxyRunning(port) + job.runtime.Logger.Info("check twemproxy %d before exec cmd. status is %s", port, running) + if err != nil { + return nil + } + if op == consts.ProxyStart { + if running { + return nil + } + cmd = []string{"su", execUser, "-c", fmt.Sprintf("%s %s", startScript, strconv.Itoa(port))} + } else { + // stop or shutdown + if !running { + return nil + } + cmd = []string{"su", execUser, "-c", fmt.Sprintf("%s %s", stopScript, strconv.Itoa(port))} + } + _, err = util.RunLocalCmd(cmd[0], cmd[1:], "", + nil, 10*time.Second) + job.runtime.Logger.Info(fmt.Sprintf("%s Process %s", op, cmd)) + if err != nil { + return err + } + time.Sleep(5 * time.Second) + + running, err = job.IsTwemproxyRunning(port) + job.runtime.Logger.Info("check twemproxy %d after exec cmd. status is %s", port, running) + if running && op == consts.ProxyStart { + return nil + } else if !running && op == consts.ProxyStop { + return nil + } else if !running && op == consts.ProxyShutdown { + + // 删除Exporter配置文件,删除失败有Warn,但不会停止 + if err := common.DeleteExporterConfigFile(port); err != nil { + job.runtime.Logger.Warn("twemproxy %d DeleteExporterConfigFile return err:%v", port, err) + } else { + job.runtime.Logger.Info("twemproxy %d DeleteExporterConfigFile success", port) + } + + return job.DirBackup(execUser, port) + } else { + return err + } +} + +// Retry times +func (job *TwemproxyOperate) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *TwemproxyOperate) Rollback() error { + return nil +} + +// IsTwemproxyRunning 检查进程 +func (job *TwemproxyOperate) IsTwemproxyRunning(port int) (installed bool, err error) { + portIsUse, err := util.CheckPortIsInUse(job.params.IP, strconv.Itoa(port)) + return portIsUse, err +} + +// DirBackup 备份目录 +func (job *TwemproxyOperate) DirBackup(execUser string, port int) error { + job.runtime.Logger.Info("mv %d dir begin.", port) + if job.params.Debug { + return nil + } + dataDir := getPathWitChRoot("", consts.DataPath, twemproxyDir) + insDir := fmt.Sprintf("%s/%d", dataDir, port) + // 判断目录是否存在 + job.runtime.Logger.Info("check twemproxy ins dir[%s] exists.", insDir) + exist := util.FileExists(insDir) + if !exist { + job.runtime.Logger.Info("dir %s is not exists", insDir) + return nil + } + mvCmd := fmt.Sprintf("mv %s/%d %s/bak_%d_%s", dataDir, port, dataDir, port, time.Now().Format("20060102150405")) + job.runtime.Logger.Info(mvCmd) + cmd := []string{"su", execUser, "-c", mvCmd} + _, _ = util.RunLocalCmd(cmd[0], cmd[1:], "", + nil, 10*time.Second) + time.Sleep(10 * time.Second) + exist = util.FileExists(insDir) + if !exist { + job.runtime.Logger.Info("mv twemproxy port[%d] dir succ....", port) + return nil + } + job.runtime.Logger.Info("mv twemproxy port[%d] dir faild....", port) + return fmt.Errorf("twemproxy port[%d] dir [%s] exists too..pleace check", port, insDir) +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/atomredis.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/atomredis.go new file mode 100644 index 0000000000..8842935526 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/atomredis.go @@ -0,0 +1,2 @@ +// Package atomredis redis原子任务 +package atomredis diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/bkdbmon_install.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/bkdbmon_install.go new file mode 100644 index 0000000000..18be010fcc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/bkdbmon_install.go @@ -0,0 +1,366 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/go-playground/validator/v10" + "gopkg.in/yaml.v2" +) + +// ConfServerItem servers配置项 +type ConfServerItem struct { + BkBizID string `json:"bk_biz_id" yaml:"bk_biz_id" validate:"required"` + BkCloudID int64 `json:"bk_cloud_id" yaml:"bk_cloud_id" validate:"required"` + App string `json:"app" yaml:"app" validate:"required"` + AppName string `json:"app_name" yaml:"app_name" validate:"required"` + ClusterDomain string `json:"cluster_domain" yaml:"cluster_domain" validate:"required"` + ClusterName string `json:"cluster_name" yaml:"cluster_name" validate:"required"` + ClusterType string `json:"cluster_type" yaml:"cluster_type" validate:"required"` + MetaRole string `json:"meta_role" yaml:"meta_role" validate:"required"` + ServerIP string `json:"server_ip" yaml:"server_ip" validate:"required"` + ServerPorts []int `json:"server_ports" yaml:"server_ports" validate:"required"` + Shard string `json:"shard" yaml:"shard"` +} + +// BkDbmonInstallParams 安装参数 +type BkDbmonInstallParams struct { + BkDbmonPkg common.MediaPkg `json:"bkdbmonpkg" validate:"required"` + DbToolsPkg common.DbToolsMediaPkg `json:"dbtoolspkg" validate:"required"` + GsePath string `json:"gsepath" validate:"required"` + RedisFullBackup map[string]interface{} `json:"redis_fullbackup" validate:"required"` + RedisBinlogBackup map[string]interface{} `json:"redis_binlogbackup" validate:"required"` + RedisHeartbeat map[string]interface{} `json:"redis_heartbeat" validate:"required"` + RedisMonitor map[string]interface{} `json:"redis_monitor" validate:"required"` + RedisKeyLifecyckle map[string]interface{} `json:"redis_keylife" mapstructure:"redis_keylife"` + Servers []ConfServerItem `json:"servers" yaml:"servers" validate:"required"` +} + +// BkDbmonInstall bk-dbmon安装任务 +type BkDbmonInstall struct { + runtime *jobruntime.JobGenericRuntime + params BkDbmonInstallParams + bkDbmonBinUpdated bool // bk-dbmon介质是否被更新 + confFileUpdated bool // 配置文件是否被更新 +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisInstall)(nil) + +// NewBkDbmonInstall new +func NewBkDbmonInstall() jobruntime.JobRunner { + return &BkDbmonInstall{} +} + +// Init 初始化 +func (job *BkDbmonInstall) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("BkDbmonInstall Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("BkDbmonInstall Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + return nil +} + +// Name 原子任务名 +func (job *BkDbmonInstall) Name() string { + return "bkdbmon_install" +} + +// Run 执行 +func (job *BkDbmonInstall) Run() (err error) { + var isStopped bool + err = job.UntarMedia() + if err != nil { + return + } + err = job.GenerateConfigFile() + if err != nil { + return + } + isStopped, err = job.stopDbmonWhenNoServers() + if err != nil { + return + } + if isStopped { + return + } + if !job.bkDbmonBinUpdated && !job.confFileUpdated { + job.runtime.Logger.Info("bk-dbmon media,configfile both not updated") + } else { + err = job.StopBkDbmon() + } + if err != nil { + return + } + err = job.StartBkDbmon() + if err != nil { + return + } + + return +} + +// stopDbmonWhenNoServers servers为空,没有任何需要监控的 redis-servers/proxy +// 直接stop bkdbmon即可 +func (job *BkDbmonInstall) stopDbmonWhenNoServers() (isStopped bool, err error) { + if len(job.params.Servers) == 0 { + err = job.StopBkDbmon() + isStopped = true + return + } + isStopped = true + for _, svrItem := range job.params.Servers { + if len(svrItem.ServerPorts) > 0 { + // 存在实例 + isStopped = false + break + } + } + if isStopped { + err = job.StopBkDbmon() + return + } + return +} + +// UntarMedia 解压介质 +// 如果/home/mysql/bk-dbmon/bk-dbmon 存在,且版本正确,则不解压 +// 否则解压最新bk-dbmon,并修改 /home/mysql/bk-dbmon 的指向; +func (job *BkDbmonInstall) UntarMedia() (err error) { + var remoteVersion, localVersion string + err = job.params.BkDbmonPkg.Check() + if err != nil { + job.runtime.Logger.Error("UntarMedia failed,err:%v", err) + return + } + defer util.LocalDirChownMysql(consts.BkDbmonPath + "/") + verReg := regexp.MustCompile(`bk-dbmon-(v\d+.\d+).tar.gz`) + l01 := verReg.FindStringSubmatch(job.params.BkDbmonPkg.Pkg) + if len(l01) != 2 { + err = fmt.Errorf("%s format not correct? for example bk-dbmon-v0.1.tar.gz", + job.params.BkDbmonPkg.Pkg) + job.runtime.Logger.Error(err.Error()) + return + } + remoteVersion = l01[1] + if util.FileExists(consts.BkDbmonBin) { + cmd := fmt.Sprintf("%s -v |awk '{print $2}'", consts.BkDbmonBin) + localVersion, err = util.RunBashCmd(cmd, "", nil, 1*time.Minute) + if err != nil { + return + } + localVersion = strings.TrimSpace(localVersion) + } + if remoteVersion != "" && remoteVersion == localVersion { + // 如果本地版本和远程版本一致,则无需更新 + job.runtime.Logger.Info("本地bk-dbmon版本%s 与 目标bk-dbmon版本%s 一致,无需更新本地bk-dbmon版本", localVersion, remoteVersion) + return + } + job.bkDbmonBinUpdated = true + err = job.StopBkDbmon() + if err != nil { + return + } + err = job.RemoveBkDbmon() + if err != nil { + return + } + + // 解压新版本 + pkgAbsPath := job.params.BkDbmonPkg.GetAbsolutePath() + pkgBasename := job.params.BkDbmonPkg.GePkgBaseName() + bakDir := filepath.Join(consts.GetRedisBackupDir(), "dbbak") + tarCmd := fmt.Sprintf(" tar -zxf %s -C %s", pkgAbsPath, bakDir) + job.runtime.Logger.Info(tarCmd) + _, err = util.RunBashCmd(tarCmd, "", nil, 1*time.Minute) + if err != nil { + return + } + bkDbmonRealDir := filepath.Join(bakDir, pkgBasename) + if !util.FileExists(bkDbmonRealDir) { + err = fmt.Errorf("untar %s success but %s not exists", pkgAbsPath, bkDbmonRealDir) + job.runtime.Logger.Error(err.Error()) + return + } + // 创建软链接 + var stat fs.FileInfo + var link string + stat, err = os.Stat(consts.BkDbmonPath) + if err == nil && stat.Mode()&os.ModeSymlink != 0 { + link, err = os.Readlink(consts.BkDbmonPath) + if err == nil && link == bkDbmonRealDir { + // 软链接已经存在,且指向正确,无需再创建 + job.runtime.Logger.Info("软链接%s已经存在,且指向正确,无需再创建", consts.BkDbmonPath) + return + } + } + err = os.Symlink(bkDbmonRealDir, consts.BkDbmonPath) + if err != nil { + err = fmt.Errorf("os.Symlink failed,err:%v,dir:%s,softLink:%s", err, bkDbmonRealDir, consts.BkDbmonPath) + job.runtime.Logger.Error(err.Error()) + return + } + util.LocalDirChownMysql(bkDbmonRealDir) + err = job.params.DbToolsPkg.Install() + if err != nil { + return err + } + return nil +} + +// StopBkDbmon stop local bk-dbmon +func (job *BkDbmonInstall) StopBkDbmon() (err error) { + err = util.StopBkDbmon() + return +} + +// StartBkDbmon start local bk-dbmon +func (job *BkDbmonInstall) StartBkDbmon() (err error) { + return util.StartBkDbmon() +} + +// RemoveBkDbmon remove local bk-dbmon +func (job *BkDbmonInstall) RemoveBkDbmon() (err error) { + if !util.FileExists(consts.BkDbmonPath) { + return + } + job.runtime.Logger.Info("RemoveBkDbmon %s exists,start remove it", consts.BkDbmonPath) + var realDir string + if util.FileExists(consts.BkDbmonPath) { + realDir, err = filepath.EvalSymlinks(consts.BkDbmonPath) + if err != nil { + err = fmt.Errorf("filepath.EvalSymlinks failed,err:%v,bkDbmon:%s", err, consts.BkDbmonPath) + job.runtime.Logger.Error(err.Error()) + return + } + rmCmd := fmt.Sprintf("rm -rf %s", consts.BkDbmonPath) + job.runtime.Logger.Info(rmCmd) + util.RunBashCmd(rmCmd, "", nil, 1*time.Minute) + } + if realDir != "" && util.FileExists(realDir) { + rmCmd := fmt.Sprintf("rm -rf %s", realDir) + job.runtime.Logger.Info(rmCmd) + util.RunBashCmd(rmCmd, "", nil, 1*time.Minute) + } + return +} + +// bkDbmonConf 生成bk-dbmon配置 +type bkDbmonConf struct { + ReportSaveDir string `json:"report_save_dir" yaml:"report_save_dir"` + ReportLeftDay int `json:"report_left_day" yaml:"report_left_day"` + HTTPAddress string `json:"http_address" yaml:"http_address"` + GsePath string `json:"gsepath" yaml:"gsepath"` + RedisFullBackup map[string]interface{} `json:"redis_fullbackup" yaml:"redis_fullbackup"` + RedisBinlogBackup map[string]interface{} `json:"redis_binlogbackup" yaml:"redis_binlogbackup"` + RedisHeartbeat map[string]interface{} `json:"redis_heartbeat" yaml:"redis_heartbeat"` + RedisMonitor map[string]interface{} `json:"redis_monitor" yaml:"redis_monitor"` + RedisKeyLifecyckle map[string]interface{} `json:"redis_keylife" yaml:"redis_keylife"` + Servers []ConfServerItem `json:"servers" yaml:"servers"` +} + +// ToString string +func (conf *bkDbmonConf) ToString() string { + tmp, _ := json.Marshal(conf) + return string(tmp) +} + +// GenerateConfigFile 生成bk-dbmon的配置 +func (job *BkDbmonInstall) GenerateConfigFile() (err error) { + var yamlData []byte + var confMd5, tempMd5 string + var notUpdateConf bool = false + confData := &bkDbmonConf{ + ReportSaveDir: consts.DbaReportSaveDir, + ReportLeftDay: consts.RedisReportLeftDay, + HTTPAddress: consts.BkDbmonHTTPAddress, + GsePath: job.params.GsePath, + RedisFullBackup: job.params.RedisFullBackup, + RedisBinlogBackup: job.params.RedisBinlogBackup, + RedisHeartbeat: job.params.RedisHeartbeat, + RedisMonitor: job.params.RedisMonitor, + RedisKeyLifecyckle: job.params.RedisKeyLifecyckle, + Servers: job.params.Servers, + } + + yamlData, err = yaml.Marshal(confData) + if err != nil { + err = fmt.Errorf("yaml.Marshal fail,err:%v", err) + job.runtime.Logger.Info(err.Error()) + return + } + tempFile := consts.BkDbmonConfFile + "_temp" + err = ioutil.WriteFile(tempFile, yamlData, 0755) + if err != nil { + err = fmt.Errorf("ioutil.WriteFile fail,err:%v", err) + job.runtime.Logger.Info(err.Error()) + return + } + if util.FileExists(consts.BkDbmonConfFile) { + confMd5, err = util.GetFileMd5(consts.BkDbmonConfFile) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return + } + tempMd5, err = util.GetFileMd5(tempFile) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return + } + if confMd5 == tempMd5 { + os.Remove(tempFile) + notUpdateConf = true + } + } + if notUpdateConf { + job.runtime.Logger.Info("config file(%s) no need update", consts.BkDbmonConfFile) + return + } + job.confFileUpdated = true + mvCmd := fmt.Sprintf("mv %s %s", tempFile, consts.BkDbmonConfFile) + job.runtime.Logger.Info(mvCmd) + _, err = util.RunBashCmd(mvCmd, "", nil, 1*time.Minute) + if err != nil { + return + } + util.LocalDirChownMysql(consts.BkDbmonConfFile) + return +} + +// Retry times +func (job *BkDbmonInstall) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *BkDbmonInstall) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/clustermeet_slotsassign.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/clustermeet_slotsassign.go new file mode 100644 index 0000000000..ce37697c59 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/clustermeet_slotsassign.go @@ -0,0 +1,334 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// ClusterReplicaItem 集群主从关系对 +type ClusterReplicaItem struct { + MasterIP string `json:"master_ip" validate:"required"` + MasterPort int `json:"master_port" validate:"required"` + SlaveIP string `json:"slave_ip"` // 不是所有 master都需要slave + SlavePort int `json:"slave_port"` + // 如 0-4095 6000 6002-60010, 如果为空,则不进行cluster addslots + Slots string `json:"slots"` +} + +// MasterAddr masteraddr +func (item *ClusterReplicaItem) MasterAddr() string { + return item.MasterIP + ":" + strconv.Itoa(item.MasterPort) +} + +// SlaveAddr slaveaddr +func (item *ClusterReplicaItem) SlaveAddr() string { + return item.SlaveIP + ":" + strconv.Itoa(item.SlavePort) +} + +// ClusterMeetSlotsAssignParams 集群关系建立和slots分配 +type ClusterMeetSlotsAssignParams struct { + Password string `json:"password"` // 如果password为空,则自动从本地获取 + UseForExpansion bool `json:"use_for_expansion"` // 是否用于扩容,true :是用于扩容 + SlotsAutoAssgin bool `json:"slots_auto_assign"` // slots 自动分配 + ReplicaPairs []ClusterReplicaItem `json:"replica_pairs"` +} + +// ClusterMeetSlotsAssign 节点加入集群,建立主从关系,分配slots +type ClusterMeetSlotsAssign struct { + runtime *jobruntime.JobGenericRuntime + params ClusterMeetSlotsAssignParams + AddrMapCli map[string]*myredis.RedisClient `json:"addr_map_cli"` +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*ClusterMeetSlotsAssign)(nil) + +// NewClusterMeetSlotsAssign new +func NewClusterMeetSlotsAssign() jobruntime.JobRunner { + return &ClusterMeetSlotsAssign{} +} + +// Init 初始化与参数校验 +func (job *ClusterMeetSlotsAssign) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v\n", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisClusterMeetSlotsAssign Init params validate failed,err:%v,params:%+v", err, + job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisClusterMeetSlotsAssign Init params validate failed,err:%v,params:%+v", err, + job.params) + return err + } + } + if job.params.SlotsAutoAssgin { + for _, pair := range job.params.ReplicaPairs { + if pair.Slots != "" { + err = fmt.Errorf("SlotsAutoAssgin=%v 和 redis(%s) slots:%s 不能同时指定", + job.params.SlotsAutoAssgin, pair.MasterAddr(), pair.Slots) + job.runtime.Logger.Error(err.Error()) + return err + } + } + } + job.AddrMapCli = make(map[string]*myredis.RedisClient) + job.runtime.Logger.Info("RedisClusterMeetSlotsAssign init success") + return nil +} + +// Name 原子任务名称 +func (job *ClusterMeetSlotsAssign) Name() string { + return "clustermeet_slotsassign" +} + +// Run 执行 +func (job *ClusterMeetSlotsAssign) Run() (err error) { + var ok bool + err = job.allInstsAbleToConnect() + if err != nil { + return + } + defer job.allInstDisconnect() + + err = job.AddInstsToCluster() + if err != nil { + return + } + err = job.SlotsBelongToWho() + if err != nil { + return + } + err = job.AddSlots() + if err != nil { + return + } + err = job.CreateReplicas() + if err != nil { + return + } + maxRetryTimes := 60 // 等待两分钟,等待集群状态ok + i := 0 + for { + i++ + for i > maxRetryTimes { + break + } + ok, err = job.IsClusterStateOK() + if err != nil { + return + } + if !ok { + job.runtime.Logger.Info("redisCluster(%s) cluster_state not ok,sleep 2s and retry...", + job.params.ReplicaPairs[0].MasterAddr()) + time.Sleep(2 * time.Second) + continue + } + break + } + if !ok { + err = fmt.Errorf("wait 120s,redisCluster(%s) cluster_state still not ok", job.params.ReplicaPairs[0].MasterAddr()) + job.runtime.Logger.Error(err.Error()) + return + } + return nil +} + +// allInstsAbleToConnect 检查所有实例可连接 +func (job *ClusterMeetSlotsAssign) allInstsAbleToConnect() (err error) { + instsAddrs := []string{} + for _, item := range job.params.ReplicaPairs { + instsAddrs = append(instsAddrs, item.MasterAddr()) + if item.SlaveIP == "" { + continue + } + instsAddrs = append(instsAddrs, item.SlaveAddr()) + } + for _, addr01 := range instsAddrs { + cli, err := myredis.NewRedisClient(addr01, job.params.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + job.AddrMapCli[addr01] = cli + } + job.runtime.Logger.Info("all redis instances able to connect,(%+v)", instsAddrs) + return nil +} + +// AddInstsToCluster 添加实例到集群中 +func (job *ClusterMeetSlotsAssign) AddInstsToCluster() (err error) { + firstAddr := job.params.ReplicaPairs[0].MasterAddr() + firstIP := job.params.ReplicaPairs[0].MasterIP + firstPort := job.params.ReplicaPairs[0].MasterPort + firstCli := job.AddrMapCli[firstAddr] + addrMap, err := firstCli.GetAddrMapToNodes() + if err != nil { + return + } + for add01, cli := range job.AddrMapCli { + if add01 == firstAddr { + continue + } + node01, ok := addrMap[add01] + if ok && myredis.IsRunningNode(node01) { + continue + } + _, err = cli.ClusterMeet(firstIP, strconv.Itoa(firstPort)) + if err != nil { + return + } + } + time.Sleep(10 * time.Second) + job.runtime.Logger.Info("all redis instances add to cluster") + return nil +} + +// CreateReplicas 建立主从关系 +func (job *ClusterMeetSlotsAssign) CreateReplicas() (err error) { + tasks := []*ReplicaTask{} + for _, item := range job.params.ReplicaPairs { + if item.SlaveIP == "" { + continue + } + task01 := NewReplicaTask(item.MasterIP, item.MasterPort, job.params.Password, + item.SlaveIP, item.SlavePort, job.params.Password, job.runtime) + tasks = append(tasks, task01) + } + err = GroupRunReplicaTasksAndWait(tasks, job.runtime) + if err != nil { + return + } + job.runtime.Logger.Info("all replicas ok") + return nil +} + +// SlotsBelongToWho 确认slots属于哪个节点(只在创建新集群后,调用该函数自动均衡分配slots) +// 用户也可以在上层指定slots应该如何归属,而不由 dbactuator 来分配 +func (job *ClusterMeetSlotsAssign) SlotsBelongToWho() (err error) { + if !job.params.SlotsAutoAssgin { + return nil + } + firstAddr := job.params.ReplicaPairs[0].MasterAddr() + firstCli := job.AddrMapCli[firstAddr] + clusterInfo, err := firstCli.ClusterInfo() + if err != nil { + return err + } + if clusterInfo.ClusterState == consts.ClusterStateOK { + // 集群已经是ok状态,跳过cluster addslot步骤 + job.runtime.Logger.Info("redisCluster:%s cluaster_state:%s, skip slotsAsssign ...", firstAddr, + clusterInfo.ClusterState) + return + } + masterCnt := len(job.params.ReplicaPairs) + perMasterSlotsCnt := (consts.DefaultMaxSlots + 1) / masterCnt + leftSlotCnt := (consts.DefaultMaxSlots + 1) % masterCnt + + stepCnt := perMasterSlotsCnt + if leftSlotCnt > 0 { + // 不能整除的情况,前面的节点每个多分配一个slot + stepCnt++ + } + var start, end int + for idx := 0; idx < masterCnt; idx++ { + start = idx * stepCnt + end = (idx+1)*stepCnt - 1 + if start > consts.DefaultMaxSlots { + break + } + if end > consts.DefaultMaxSlots { + end = consts.DefaultMaxSlots + } + job.params.ReplicaPairs[idx].Slots = fmt.Sprintf("%d-%d", start, end) + + } + return nil +} + +// AddSlots 每个节点添加slots +func (job *ClusterMeetSlotsAssign) AddSlots() (err error) { + var slots []int + var selfNode *myredis.ClusterNodeData = nil + for _, pair := range job.params.ReplicaPairs { + if strings.TrimSpace(pair.Slots) == "" { + continue + } + slots, _, _, _, err = myredis.DecodeSlotsFromStr(pair.Slots, " ") + if err != nil { + return + } + if len(slots) == 0 { + continue + } + masterCli := job.AddrMapCli[pair.MasterAddr()] + selfNode, err = masterCli.GetMyself() + if err != nil { + return err + } + // 找出redis Node中目前缺少的slots + diffSlots := myredis.SlotSliceDiff(selfNode.Slots, slots) + if len(diffSlots) == 0 { + job.runtime.Logger.Info("redis_master(%s) slots(%s) is ok,skip addslots ...", + pair.MasterAddr(), myredis.ConvertSlotToStr(selfNode.Slots)) + continue + } + _, err = masterCli.ClusterAddSlots(diffSlots) + if err != nil { + return err + } + job.runtime.Logger.Info("redis_master(%s) addslots(%s) ok", pair.MasterAddr(), myredis.ConvertSlotToStr(diffSlots)) + } + return nil +} + +// IsClusterStateOK 集群状态是否ok +func (job *ClusterMeetSlotsAssign) IsClusterStateOK() (ok bool, err error) { + // 如果用于扩容,则不检查cluster state 直接返回true + if job.params.UseForExpansion { + return true, nil + } + firstAddr := job.params.ReplicaPairs[0].MasterAddr() + firstCli := job.AddrMapCli[firstAddr] + clusterInfo, err := firstCli.ClusterInfo() + if err != nil { + return false, err + } + if clusterInfo.ClusterState == consts.ClusterStateOK { + return true, nil + } + return false, nil +} + +// allInstDisconnect 所有实例断开连接 +func (job *ClusterMeetSlotsAssign) allInstDisconnect() { + for _, cli := range job.AddrMapCli { + cli.Close() + } +} + +// Retry 返回可重试次数 +func (job *ClusterMeetSlotsAssign) Retry() uint { + return 2 +} + +// Rollback 回滚函数,一般不用实现 +func (job *ClusterMeetSlotsAssign) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_backup.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_backup.go new file mode 100644 index 0000000000..c595f7e564 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_backup.go @@ -0,0 +1,730 @@ +package atomredis + +import ( + "context" + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/backupsys" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/customtime" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/report" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" + "github.com/gofrs/flock" +) + +// TendisSSDSetLogCount tendisSSD设置log参数 +type TendisSSDSetLogCount struct { + LogCount int64 `json:"log-count"` + SlaveLogKeepCount int64 `json:"slave-log-keep-count"` +} + +// RedisBackupParams 备份参数 +type RedisBackupParams struct { + BkBizID string `json:"bk_biz_id" validate:"required"` + Domain string `json:"domain"` + IP string `json:"ip" validate:"required"` + Ports []int `json:"ports"` // 如果端口不连续,可直接指定端口 + StartPort int `json:"start_port"` // 如果端口连续,则可直接指定起始端口和实例个数 + InstNum int `json:"inst_num"` + BackupType string `json:"backup_type" validate:"required"` + WithoutToBackupSys bool `json:"without_to_backup_sys"` // 结果不传输到备份系统,默认需传到备份系统 + SSDLogCount TendisSSDSetLogCount `json:"ssd_log_count"` // 该参数在tendissd 重做dr时,备份master需设置 +} + +// RedisBackup backup atomjob +type RedisBackup struct { + runtime *jobruntime.JobGenericRuntime + params RedisBackupParams + Reporter report.Reporter `json:"-"` +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisBackup)(nil) + +// NewRedisBackup new +func NewRedisBackup() jobruntime.JobRunner { + return &RedisBackup{} +} + +// Init 初始化 +func (job *RedisBackup) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisBackup Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisBackup Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + // ports 和 inst_num 不能同时为空 + if len(job.params.Ports) == 0 && job.params.InstNum == 0 { + err = fmt.Errorf("RedisBackup ports(%+v) and inst_num(%d) is invalid", job.params.Ports, job.params.InstNum) + job.runtime.Logger.Error(err.Error()) + return err + } + if job.params.InstNum > 0 { + ports := make([]int, 0, job.params.InstNum) + for idx := 0; idx < job.params.InstNum; idx++ { + ports = append(ports, job.params.StartPort+idx) + } + job.params.Ports = ports + } + if job.params.BackupType != consts.NormalBackupType && + job.params.BackupType != consts.ForeverBackupType { + err = fmt.Errorf("RedisBackup backup_type(%s) not [%s,%s]", + job.params.BackupType, + consts.NormalBackupType, + consts.ForeverBackupType) + job.runtime.Logger.Error(err.Error()) + return err + } + return nil +} + +// Name 原子任务名 +func (job *RedisBackup) Name() string { + return "redis_backup" +} + +// Run 执行 +func (job *RedisBackup) Run() (err error) { + err = myredis.LocalRedisConnectTest(job.params.IP, job.params.Ports, "") + if err != nil { + return + } + job.runtime.Logger.Info("all redis instances connect success,server:%s,ports:%s", + job.params.IP, myredis.ConvertSlotToStr(job.params.Ports)) + var password string + bakDir := filepath.Join(consts.GetRedisBackupDir(), "dbbak") + util.MkDirsIfNotExists([]string{bakDir}) + util.LocalDirChownMysql(bakDir) + err = job.GetReporter() + if err != nil { + return + } + toBackSys := "yes" + if job.params.WithoutToBackupSys { + toBackSys = "no" + } + bakTasks := make([]*BackupTask, 0, len(job.params.Ports)) + for _, port := range job.params.Ports { + password, err = myredis.GetPasswordFromLocalConfFile(port) + if err != nil { + return + } + task := NewFullBackupTask(job.params.BkBizID, job.params.Domain, job.params.IP, port, password, + toBackSys, job.params.BackupType, bakDir, + true, consts.BackupTarSplitSize, job.Reporter) + + bakTasks = append(bakTasks, task) + } + // 串行备份 + for _, task := range bakTasks { + bakTask := task + bakTask.GoFullBakcup() + if bakTask.Err != nil { + return bakTask.Err + } + } + + // 上下文输出内容 + job.runtime.PipeContextData = bakTasks + + // if job.params.WithoutToBackupSys { + // job.runtime.Logger.Info("not transfer to backup system") + // return + // } + // // 并行上传 + // wg := sync.WaitGroup{} + // genChan := make(chan *BackupTask) + // limit := 3 // 并发度3 + // for worker := 0; worker < limit; worker++ { + // wg.Add(1) + // go func() { + // defer wg.Done() + // for taskItem := range genChan { + // taskItem.TransferToBackupSystem() + // } + // }() + // } + // go func() { + // // 关闭genChan,以便让所有goroutine退出 + // defer close(genChan) + // for _, task := range bakTasks { + // bakTask := task + // genChan <- bakTask + // } + // }() + // wg.Wait() + // for _, task := range bakTasks { + // bakTask := task + // if bakTask.Err != nil { + // return bakTask.Err + // } + // } + + return nil +} + +// Retry times +func (job *RedisBackup) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisBackup) Rollback() error { + return nil +} + +// GetReporter 上报者 +func (job *RedisBackup) GetReporter() (err error) { + err = report.CreateReportDir() + if err != nil { + return + } + util.MkDirsIfNotExists([]string{consts.RedisReportSaveDir}) + util.LocalDirChownMysql(consts.RedisReportSaveDir) + reportFile := fmt.Sprintf(consts.RedisFullbackupRepoter, time.Now().Local().Format(consts.FilenameDayLayout)) + job.Reporter, err = report.NewFileReport(filepath.Join(consts.RedisReportSaveDir, reportFile)) + return +} + +// BackupTask redis备份task +type BackupTask struct { + ReportType string `json:"report_type"` + BkBizID string `json:"bk_biz_id"` + ServerIP string `json:"server_ip"` + ServerPort int `json:"server_port"` + Domain string `json:"domain"` + Password string `json:"-"` + ToBackupSystem string `json:"-"` + DbType string `json:"db_type"` // RedisInstance or TendisplusInstance or TendisSSDInstance + BackupType string `json:"-"` // 常规备份、下线备份 + Role string `json:"role"` + DataSize uint64 `json:"-"` // redis实例数据大小 + DataDir string `json:"-"` + BackupDir string `json:"backup_dir"` + TarSplit bool `json:"-"` // 是否对tar文件做split + TarSplitPartSize string `json:"-"` + BackupFiles []string `json:"backup_files"` // 备份的目标文件,如果文件过大会切割成多个 + BackupFilesSize []int64 `json:"backup_files_size"` // 备份文件大小(已切割 or 已压缩 or 已打包) + BackupTaskIDs []uint64 `json:"backup_taskids"` + BackupMD5s []string `json:"backup_md5s"` // 目前为空 + BackupTag string `json:"backup_tag"` // REDIS_FULL or REDIS_BINLOG + // 全备尽管会切成多个文件,但其生成的起始时间、结束时间一样 + StartTime customtime.CustomTime `json:"start_time"` // 生成全备的起始时间 + EndTime customtime.CustomTime `json:"end_time"` // //生成全备的结束时间 + Status string `json:"status"` + Message string `json:"message"` + Cli *myredis.RedisClient `json:"-"` + SSDLogCount TendisSSDSetLogCount `json:"-"` + reporter report.Reporter + Err error `json:"-"` +} + +// NewFullBackupTask new backup task +func NewFullBackupTask(bkBizID, domain, ip string, port int, password, + toBackupSys, backupType, backupDir string, tarSplit bool, tarSplitSize string, + reporter report.Reporter) *BackupTask { + return &BackupTask{ + ReportType: consts.RedisFullBackupReportType, + BkBizID: bkBizID, + Domain: domain, + ServerIP: ip, + ServerPort: port, + Password: password, + ToBackupSystem: toBackupSys, + BackupType: backupType, + BackupDir: backupDir, + TarSplit: tarSplit, + TarSplitPartSize: tarSplitSize, + BackupTaskIDs: []uint64{}, + BackupMD5s: []string{}, + BackupTag: consts.RedisFullBackupTAG, + reporter: reporter, + } +} + +// Addr string +func (task *BackupTask) Addr() string { + return task.ServerIP + ":" + strconv.Itoa(task.ServerPort) +} + +// ToString .. +func (task *BackupTask) ToString() string { + tmpBytes, _ := json.Marshal(task) + return string(tmpBytes) +} + +// GoFullBakcup 执行备份task,本地备份+上传备份系统 +func (task *BackupTask) GoFullBakcup() { + mylog.Logger.Info("redis(%s) begin to backup", task.Addr()) + defer func() { + if task.Err != nil { + mylog.Logger.Error("redis(%s) backup fail", task.Addr()) + } else { + mylog.Logger.Info("redis(%s) backup success", task.Addr()) + } + }() + + var locked bool + task.newConnect() + if task.Err != nil { + return + } + defer task.Cli.Close() + mylog.Logger.Info("redis(%s) connect success", task.Addr()) + + // 获取文件锁 + lockFile := fmt.Sprintf("lock.%s.%d", task.ServerIP, task.ServerPort) + lockFile = filepath.Join(task.BackupDir, "backup", lockFile) + util.MkDirsIfNotExists([]string{filepath.Dir(lockFile)}) + util.LocalDirChownMysql(filepath.Dir(lockFile)) + mylog.Logger.Info(fmt.Sprintf("redis(%s) try to get filelock:%s", task.Addr(), lockFile)) + + // 每10秒检测一次是否上锁成功,最多等待3小时 + flock := flock.New(lockFile) + lockctx, lockcancel := context.WithTimeout(context.Background(), 3*time.Hour) + defer lockcancel() + locked, task.Err = flock.TryLockContext(lockctx, 10*time.Second) + if task.Err != nil { + task.Err = fmt.Errorf("try to get filelock(%s) fail,err:%v,redis(%s)", lockFile, task.Err, task.Addr()) + mylog.Logger.Error(task.Err.Error()) + return + } + if !locked { + return + } + defer flock.Unlock() + + defer func() { + if task.Err != nil && task.Status == "" { + task.Message = task.Err.Error() + task.Status = consts.BackupStatusFailed + } + task.BackupRecordReport() + }() + + task.Status = consts.BackupStatusRunning + task.Message = "start backup..." + task.BackupRecordReport() + + mylog.Logger.Info(fmt.Sprintf("redis(%s) dbType:%s start backup...", task.Addr(), task.DbType)) + + task.PrecheckDisk() + if task.Err != nil { + return + } + + // 如果有备份正在执行,则先等待其完成 + task.Err = task.Cli.WaitForBackupFinish() + if task.Err != nil { + return + } + if task.DbType == consts.TendisTypeRedisInstance { + task.RedisInstanceBackup() + } else if task.DbType == consts.TendisTypeTendisplusInsance { + task.TendisplusInstanceBackup() + } else if task.DbType == consts.TendisTypeTendisSSDInsance { + task.TendisSSDInstanceBackup() + if task.Err != nil { + return + } + task.TendisSSDSetLougCount() + } + if task.Err != nil { + return + } + defer task.BackupRecordSaveToDoingFile() + // 备份上传备份系统 + if strings.ToLower(task.ToBackupSystem) != "yes" { + task.Status = consts.BackupStatusLocalSuccess + task.Message = "本地备份成功,无需上传备份系统" + return + } + task.TransferToBackupSystem() + if task.Err != nil { + task.Status = consts.BackupStatusToBakSystemFailed + task.Message = task.Err.Error() + return + } + task.Status = consts.BackupStatusToBakSystemStart + task.Message = "上传备份系统中" +} + +func (task *BackupTask) newConnect() { + task.Cli, task.Err = myredis.NewRedisClient(task.Addr(), task.Password, 0, consts.TendisTypeRedisInstance) + if task.Err != nil { + return + } + task.Role, task.Err = task.Cli.GetRole() + if task.Err != nil { + return + } + task.DataDir, task.Err = task.Cli.GetDir() + if task.Err != nil { + return + } + task.DbType, task.Err = task.Cli.GetTendisType() + if task.Err != nil { + return + } + // 获取数据量大小 + if task.DbType == consts.TendisTypeRedisInstance { + task.DataSize, task.Err = task.Cli.RedisInstanceDataSize() + } else if task.DbType == consts.TendisTypeTendisplusInsance { + task.DataSize, task.Err = task.Cli.TendisplusDataSize() + } else if task.DbType == consts.TendisTypeTendisSSDInsance { + task.DataSize, task.Err = task.Cli.TendisSSDDataSize() + } + if task.Err != nil { + return + } + return +} + +// PrecheckDisk 磁盘检查 +func (task *BackupTask) PrecheckDisk() { + // 检查磁盘空间是否足够 + bakDiskUsg, err := util.GetLocalDirDiskUsg(task.BackupDir) + task.Err = err + if task.Err != nil { + return + } + dataDiskUsg, err := util.GetLocalDirDiskUsg(task.DataDir) + task.Err = err + if task.Err != nil { + return + } + // 磁盘空间使用已有85%,则报错 + if bakDiskUsg.UsageRatio > 85 || dataDiskUsg.UsageRatio > 85 { + task.Err = fmt.Errorf("%s disk Used%d%% > 85%% or %s disk Used(%d%%) >85%%", + task.BackupDir, bakDiskUsg.UsageRatio, + task.DataDir, dataDiskUsg.UsageRatio) + mylog.Logger.Error(task.Err.Error()) + return + } + if task.DbType == consts.TendisTypeRedisInstance { + // redisInstance rdb or aof 都会使用data磁盘空间,如备份会导致磁盘空间超95%则报错 + if int((task.DataSize+dataDiskUsg.UsedSize)*100/dataDiskUsg.TotalSize) > 95 { + task.Err = fmt.Errorf("redis(%s) data_size(%dMB) bgsave/bgrewriteaof,disk(%s) space will occupy more than 95%%", + task.Addr(), task.DataSize/1024/1024, task.DataDir) + mylog.Logger.Error(task.Err.Error()) + return + } + } + if int((task.DataSize+bakDiskUsg.UsedSize)*100/bakDiskUsg.TotalSize) > 95 { + // 如果备份会导致磁盘空间超95% + task.Err = fmt.Errorf("redis(%s) data_size(%dMB) backup disk(%s) space will occupy more than 95%%", + task.Addr(), task.DataSize/1024/1024, task.BackupDir) + mylog.Logger.Error(task.Err.Error()) + return + } + mylog.Logger.Info(fmt.Sprintf( + "check disk space ok,redis(%s) data_size(%dMB),backupDir disk(%s) available space %dMB", + task.Addr(), task.DataSize/1024/1024, task.BackupDir, bakDiskUsg.AvailSize/1024/1024)) +} + +// RedisInstanceBackup redis(cache)实例备份 +func (task *BackupTask) RedisInstanceBackup() { + var srcFile string + var targetFile string + var confMap map[string]string + var fileSize int64 + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + task.StartTime.Time = time.Now().Local() + if task.Role == consts.RedisMasterRole { + // redis master backup rdb + confMap, task.Err = task.Cli.ConfigGet("dbfilename") + if task.Err != nil { + return + } + rdbFile := confMap["dbfilename"] + srcFile = filepath.Join(task.DataDir, rdbFile) + targetFile = filepath.Join(task.BackupDir, + fmt.Sprintf("%s-redis-%s-%s-%d-%s.rdb", + task.BkBizID, task.Role, task.ServerIP, task.ServerPort, nowtime)) + task.Err = task.Cli.BgSaveAndWaitForFinish() + } else { + srcFile = filepath.Join(task.DataDir, "appendonly.aof") + targetFile = filepath.Join(task.BackupDir, + fmt.Sprintf("%s-redis-%s-%s-%d-%s.aof", + task.BkBizID, task.Role, task.ServerIP, task.ServerPort, nowtime)) + task.Err = task.Cli.BgRewriteAOFAndWaitForDone() + } + if task.Err != nil { + return + } + task.EndTime.Time = time.Now().Local() + cpCmd := fmt.Sprintf("cp %s %s", srcFile, targetFile) + mylog.Logger.Info(cpCmd) + _, task.Err = util.RunBashCmd(cpCmd, "", nil, 10*time.Minute) + if task.Err != nil { + return + } + // aof文件,压缩; redis-server默认会对rdb做压缩,所以rdb文件不做压缩 + if strings.HasSuffix(srcFile, ".aof") { + targetFile, task.Err = util.CompressFile(targetFile, filepath.Dir(targetFile), true) + if task.Err != nil { + return + } + } + // task.BackupFiles = append(task.BackupFiles, filepath.Base(targetFile)) + task.BackupFiles = append(task.BackupFiles, targetFile) + fileSize, task.Err = util.GetFileSize(targetFile) + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.BackupFilesSize = append(task.BackupFilesSize, fileSize) + util.LocalDirChownMysql(task.BackupDir) + mylog.Logger.Info(fmt.Sprintf("redis(%s) local backup success", task.Addr())) + return +} + +// TendisplusInstanceBackup tendisplus实例备份 +func (task *BackupTask) TendisplusInstanceBackup() { + var tarFile string + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + backName := fmt.Sprintf("%s-TENDISPLUS-FULL-%s-%s-%d-%s", + task.BkBizID, task.Role, task.ServerIP, task.ServerPort, nowtime) + backupFullDir := filepath.Join(task.BackupDir, backName) + mylog.Logger.Info(fmt.Sprintf("MkdirAll %s", backupFullDir)) + task.Err = util.MkDirsIfNotExists([]string{backupFullDir}) + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + task.StartTime.Time = time.Now().Local() + task.Err = task.Cli.TendisplusBackupAndWaitForDone(backupFullDir) + if task.Err != nil { + return + } + task.EndTime.Time = time.Now().Local() + if task.TarSplit && task.TarSplitPartSize != "" { + task.BackupFiles, task.Err = util.TarAndSplitADir(backupFullDir, task.BackupDir, task.TarSplitPartSize, true) + } else { + tarFile, task.Err = util.TarADir(backupFullDir, task.BackupDir, true) + task.BackupFiles = append(task.BackupFiles, tarFile) + } + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.GetBakFilesSize() + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + mylog.Logger.Info(fmt.Sprintf("tendisplus(%s) local backup success", task.Addr())) + return +} + +// tendisSSDBackupVerify 确定tendissd备份是否是有效的 +func (task *BackupTask) tendisSSDBackupVerify(backupFullDir string) { + var err error + if !util.FileExists(consts.TredisverifyBin) { + task.Err = fmt.Errorf("%s not exists", consts.TredisverifyBin) + mylog.Logger.Error(task.Err.Error()) + return + } + cmd := fmt.Sprintf(` +export LD_PRELOAD=/usr/local/redis/bin/deps/libjemalloc.so; +export LD_LIBRARY_PATH=LD_LIBRARY_PATH:/usr/local/redis/bin/deps; +%s %s 1 2>/dev/null + `, consts.TredisverifyBin, backupFullDir) + mylog.Logger.Info(cmd) + _, err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + if err != nil { + task.Err = fmt.Errorf("backupData(%s) verify failed", backupFullDir) + mylog.Logger.Error(task.Err.Error()) + return + } +} + +// TendisSSDInstanceBackup tendisSSD实例备份 +func (task *BackupTask) TendisSSDInstanceBackup() { + var tarFile string + var binlogsizeRet myredis.TendisSSDBinlogSize + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + backName := fmt.Sprintf("%s-TENDISSSD-FULL-%s-%s-%d-%s", + task.BkBizID, task.Role, task.ServerIP, task.ServerPort, nowtime) + backupFullDir := filepath.Join(task.BackupDir, backName) + mylog.Logger.Info(fmt.Sprintf("MkdirAll %s", backupFullDir)) + task.Err = util.MkDirsIfNotExists([]string{backupFullDir}) + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + task.StartTime.Time = time.Now().Local() + binlogsizeRet, _, task.Err = task.Cli.TendisSSDBackupAndWaitForDone(backupFullDir) + if task.Err != nil { + return + } + task.EndTime.Time = time.Now().Local() + + task.tendisSSDBackupVerify(backupFullDir) + if task.Err != nil { + return + } + + // 备份文件名带上 binlogPos + fileWithBinlogPos := fmt.Sprintf("%s-%d", backupFullDir, binlogsizeRet.EndSeq) + task.Err = os.Rename(backupFullDir, fileWithBinlogPos) + if task.Err != nil { + task.Err = fmt.Errorf("rename %s to %s fail,err:%v", backupFullDir, fileWithBinlogPos, task.Err) + mylog.Logger.Error(task.Err.Error()) + return + } + backupFullDir = fileWithBinlogPos + + // 只做打包,不做压缩,rocksdb中已经做了压缩 + if task.TarSplit && task.TarSplitPartSize != "" { + task.BackupFiles, task.Err = util.TarAndSplitADir(backupFullDir, task.BackupDir, task.TarSplitPartSize, true) + } else { + tarFile, task.Err = util.TarADir(backupFullDir, task.BackupDir, true) + task.BackupFiles = append(task.BackupFiles, filepath.Join(task.BackupDir, tarFile)) + } + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.GetBakFilesSize() + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + mylog.Logger.Info(fmt.Sprintf("tendisSSD(%s) local backup success", task.Addr())) + return +} + +// GetBakFilesSize 获取备份文件大小 +func (task *BackupTask) GetBakFilesSize() { + var fileSize int64 + task.BackupFilesSize = make([]int64, 0, len(task.BackupFiles)) + for _, bakFile := range task.BackupFiles { + fileSize, task.Err = util.GetFileSize(bakFile) + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.BackupFilesSize = append(task.BackupFilesSize, fileSize) + } +} + +// TendisSSDSetLougCount tendisSSD设置log-count参数 +func (task *BackupTask) TendisSSDSetLougCount() { + if task.SSDLogCount.LogCount > 0 { + _, task.Err = task.Cli.ConfigSet("log-count", strconv.FormatInt(task.SSDLogCount.LogCount, 10)) + if task.Err != nil { + return + } + } + if task.SSDLogCount.SlaveLogKeepCount > 0 { + _, task.Err = task.Cli.ConfigSet("slave-log-keep-count", strconv.FormatInt(task.SSDLogCount.LogCount, 10)) + if task.Err != nil { + return + } + } +} + +// TransferToBackupSystem 备份文件上传到备份系统 +func (task *BackupTask) TransferToBackupSystem() { + var msg string + cliFileInfo, err := os.Stat(consts.BackupClient) + if err != nil { + err = fmt.Errorf("os.stat(%s) failed,err:%v", consts.BackupClient, err) + mylog.Logger.Error(err.Error()) + return + } + if !util.IsExecOther(cliFileInfo.Mode().Perm()) { + err = fmt.Errorf("%s is unable to execute by other", consts.BackupClient) + mylog.Logger.Error(err.Error()) + return + } + mylog.Logger.Info(fmt.Sprintf("redis(%s) backupFiles:%+v start upload backupSystem", task.Addr(), task.BackupFiles)) + bkTag := consts.RedisFullBackupTAG + if task.BackupType == consts.ForeverBackupType { + bkTag = consts.RedisForeverBackupTAG + } + uploader := backupsys.UploadTask{ + Files: task.BackupFiles, + Tag: bkTag, + } + task.Err = uploader.UploadFiles() + if task.Err != nil { + return + } + task.BackupTaskIDs = uploader.TaskIDs + // task.Err = uploader.WaitForUploadFinish() + // if task.Err != nil { + // return + // } + msg = fmt.Sprintf("redis(%s) backupFiles%+v taskid(%+v) uploading to backupSystem", + task.Addr(), task.BackupFiles, task.BackupTaskIDs) + mylog.Logger.Info(msg) + return +} + +// BackupRecordReport 备份记录上报 +func (task *BackupTask) BackupRecordReport() { + if task.reporter == nil { + return + } + tmpBytes, _ := json.Marshal(task) + // task.Err=task.reporter.AddRecord(string(tmpBytes),true) + task.reporter.AddRecord(string(tmpBytes)+"\n", true) +} + +// BackupRecordSaveToDoingFile 备份记录保存到本地 redis_backup_file_list_${port}_doing 文件中 +func (task *BackupTask) BackupRecordSaveToDoingFile() { + var backupDir string + if len(task.BackupFiles) == 0 { + mylog.Logger.Warn(fmt.Sprintf("redis(%s) backupFiles:%+v empty", task.Addr(), task.BackupFiles)) + backupDir = task.BackupDir + } else { + backupDir = filepath.Dir(task.BackupFiles[0]) + } + // 例如: /data/dbbak/backup/redis_backup_file_list_30000_doing + backupDir = filepath.Join(backupDir, "backup") + util.MkDirsIfNotExists([]string{}) + util.LocalDirChownMysql(backupDir) + doingFile := filepath.Join(backupDir, fmt.Sprintf(consts.DoingRedisFullBackFileList, task.ServerPort)) + f, err := os.OpenFile(doingFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0744) + if err != nil { + task.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", doingFile, err) + mylog.Logger.Error(task.Err.Error()) + return + } + defer f.Close() + tmpBytes, _ := json.Marshal(task) + + if _, err = f.WriteString(string(tmpBytes) + "\n"); err != nil { + task.Err = fmt.Errorf("f.WriteString failed,err:%v,file:%s,line:%s", err, doingFile, string(tmpBytes)) + mylog.Logger.Error(task.Err.Error()) + return + } +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datacheck.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datacheck.go new file mode 100644 index 0000000000..f83245ca74 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datacheck.go @@ -0,0 +1,636 @@ +package atomredis + +import ( + "bufio" + "bytes" + "context" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "errors" + "fmt" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" + "github.com/gofrs/flock" + "github.com/panjf2000/ants/v2" +) + +// PortAndSegment redis port & segment range +type PortAndSegment struct { + Port int `json:"port"` + SegmentStart int `json:"segment_start"` + SegmentEnd int `json:"segment_end"` +} + +// RedisDtsDataCheckAndRpaireParams 数据校验与修复参数 +type RedisDtsDataCheckAndRpaireParams struct { + common.DbToolsMediaPkg + BkBizID string `json:"bk_biz_id" validate:"required"` + DtsCopyType string `json:"dts_copy_type" validate:"required"` + // 如果本机是源redis集群的一部分,如 redis_master/redis_slave,则 ip 为本机ip; + // 否则ip不是本机ip(针对迁移用户自建redis到dbm的情况,数据校验会下发到目的集群 proxy上执行, ip为用户自建redis_ip, 本机ip为目的集群proxy_ip) + SrcRedisIP string `json:"src_redis_ip" validate:"required"` + SrcRedisPortSegmentList []PortAndSegment `json:"src_redis_port_segmentlist" validate:"required"` + SrcHashTag bool `json:"src_hash_tag"` // 源redis 是否开启 hash tag + // redis password 而非 proxy password + // 需用户传递 redis password原因是: 存在迁移用户自建redis到dbm的场景. + // 此时数据校验任务 无法跑在自建的redis机器上,无法本地获取redis password + SrcRedisPassword string `json:"src_redis_password" validate:"required"` + // 源redis 域名 or proxy ip等,如果源redis是一个proxy+redis主从,这里就是集群域名 or proxy ip + SrcClusterAddr string `json:"src_cluster_addr" validate:"required"` + DstClusterAddr string `json:"dst_cluster_addr" validate:"required"` + DstClusterPassword string `json:"dst_cluster_password" validate:"required"` + KeyWhiteRegex string `json:"key_white_regex" validate:"required"` + KeyBlackRegex string `json:"key_black_regex"` +} + +// RedisDtsDataCheck dts 数据校验 +type RedisDtsDataCheck struct { + atomJobName string + saveDir string + dataCheckTool string + dataRepaireTool string + params RedisDtsDataCheckAndRpaireParams + runtime *jobruntime.JobGenericRuntime +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisDtsDataCheck)(nil) + +// NewRedisDtsDataCheck new +func NewRedisDtsDataCheck() jobruntime.JobRunner { + return &RedisDtsDataCheck{} +} + +// Init 初始化 +func (job *RedisDtsDataCheck) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("%s Init params validate failed,err:%v,params:%+v", + job.Name(), err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("%s Init params validate failed,err:%v,params:%+v", job.Name(), err, job.params) + return err + } + } + // ports 和 inst_num 不能同时为空 + if len(job.params.SrcRedisPortSegmentList) == 0 { + err = fmt.Errorf("%s PortSegmentList(%+v) is invalid", job.Name(), job.params.SrcRedisPortSegmentList) + job.runtime.Logger.Error(err.Error()) + return err + } + return nil +} + +// 为何这里 要用这种方式 返回Name()? +// 因为 RedisDtsDataRepaire 继承自 RedisDtsDataCheck, 两者Init()是相同的 +// 只有这样 Init()中 job.Name() 方法才会返回正确的名字 + +// Name 原子任务名 +func (job *RedisDtsDataCheck) Name() string { + if job.atomJobName == "" { + job.atomJobName = "redis_dts_datacheck" + } + return job.atomJobName +} + +// Run 执行 +func (job *RedisDtsDataCheck) Run() (err error) { + // 1. 测试redis是否可连接 + err = job.TestConnectable() + if err != nil { + return + } + // 2. 获取工具 + err = job.GetTools() + if err != nil { + return + } + + // 3. 并发提取与校验,并发度5 + var wg sync.WaitGroup + taskList := make([]*RedisInsDtsDataCheckAndRepairTask, 0, len(job.params.SrcRedisPortSegmentList)) + pool, err := ants.NewPoolWithFunc(5, func(i interface{}) { + defer wg.Done() + task := i.(*RedisInsDtsDataCheckAndRepairTask) + task.KeyPatternAndDataCheck() + }) + if err != nil { + job.runtime.Logger.Error("RedisDtsDataCheck Run NewPoolWithFunc failed,err:%v", err) + return err + } + defer pool.Release() + + for _, portItem := range job.params.SrcRedisPortSegmentList { + wg.Add(1) + task, err := NewRedisInsDtsDataCheckAndRepaireTask(job.params.SrcRedisIP, portItem, job) + if err != nil { + continue + } + taskList = append(taskList, task) + _ = pool.Invoke(task) + } + // 等待所有task执行完毕 + wg.Wait() + + var totalDiffKeysCnt uint64 = 0 + for _, tmp := range taskList { + task := tmp + if task.Err != nil { + return task.Err + } + totalDiffKeysCnt += task.DiffKeysCnt + } + if totalDiffKeysCnt > 0 { + err = fmt.Errorf("RedisDtsDataCheck totalDiffKeysCnt:%d", totalDiffKeysCnt) + job.runtime.Logger.Error(err.Error()) + return + } + job.runtime.Logger.Info("RedisDtsDataCheck success totalDiffKeysCnt:%d", totalDiffKeysCnt) + return +} + +func (job *RedisDtsDataCheck) getSaveDir() { + job.saveDir = filepath.Join(consts.GetRedisBackupDir(), "dbbak/get_keys_pattern") +} + +// TestConnectable 测试redis是否可连接 +func (job *RedisDtsDataCheck) TestConnectable() (err error) { + // 源redis可连接 + ports := make([]int, 0, len(job.params.SrcRedisPortSegmentList)) + for _, v := range job.params.SrcRedisPortSegmentList { + ports = append(ports, v.Port) + } + err = myredis.LocalRedisConnectTest(job.params.SrcRedisIP, ports, job.params.SrcRedisPassword) + if err != nil { + job.runtime.Logger.Error("redis_dts_datacheck TestConnectable failed,err:%v", err) + return + } + job.runtime.Logger.Info("redis_dts_datacheck TestConnectable success,ip:%s,ports:%+v", job.params.SrcRedisIP, ports) + + // 目的redis可连接 + cli01, err := myredis.NewRedisClientWithTimeout(job.params.DstClusterAddr, job.params.DstClusterPassword, 0, + consts.TendisTypeRedisInstance, 10*time.Second) + if err != nil { + return err + } + cli01.Close() + return +} + +// CheckDtsType 检查dtstype是否合法 +func (job *RedisDtsDataCheck) CheckDtsType() (err error) { + if job.params.DtsCopyType == consts.DtsTypeOneAppDiffCluster || + job.params.DtsCopyType == consts.DtsTypeDiffAppDiffCluster || + job.params.DtsCopyType == consts.DtsTypeSyncToOtherSystem || + job.params.DtsCopyType == consts.DtsTypeUserBuiltToDbm { + return + } + err = fmt.Errorf("redis_dts_datacheck CheckDtsType failed, DtsType(%s) is invalid,must be [%s,%s,%s,%s]", + job.params.DtsCopyType, + consts.DtsTypeOneAppDiffCluster, consts.DtsTypeDiffAppDiffCluster, + consts.DtsTypeSyncToOtherSystem, consts.DtsTypeUserBuiltToDbm) + job.runtime.Logger.Error(err.Error()) + return +} + +// GetTools 获取数据校验相关的工具 +func (job *RedisDtsDataCheck) GetTools() (err error) { + job.getSaveDir() + err = job.params.Check() + if err != nil { + job.runtime.Logger.Error("GetTools DbToolsMediaPkg check fail,err:%v", err) + return + } + err = job.params.DbToolsMediaPkg.Install() + if err != nil { + job.runtime.Logger.Error("GetTools DbToolsMediaPkg install fail,err:%v", err) + return + } + // 这一部分和提取key保持一致 + // 复制dbtools/ldb_tendisplus,ldb_with_len.3.8, ldb_with_len.5.13 + // redis-shake redisSafeDeleteTool到get_keys_pattern + cpCmd := fmt.Sprintf("cp %s/ldb* %s/redis-shake %s/redisSafeDeleteTool %s", consts.DbToolsPath, + consts.DbToolsPath, consts.DbToolsPath, job.saveDir) + job.runtime.Logger.Info(cpCmd) + _, err = util.RunBashCmd(cpCmd, "", nil, 100*time.Second) + if err != nil { + return + } + if !util.FileExists(consts.TendisDataCheckBin) { + err = fmt.Errorf("%s not exists", consts.TendisDataCheckBin) + job.runtime.Logger.Error(err.Error()) + return + } + job.dataCheckTool = consts.TendisDataCheckBin + job.dataRepaireTool = consts.RedisDiffKeysRepairerBin + return nil +} + +// Retry times +func (job *RedisDtsDataCheck) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisDtsDataCheck) Rollback() error { + return nil +} + +// RedisInsDtsDataCheckAndRepairTask redis实例数据校验与数据修复task +type RedisInsDtsDataCheckAndRepairTask struct { + keyPatternTask *RedisInsKeyPatternTask + datacheckJob *RedisDtsDataCheck + DiffKeysCnt uint64 `json:"diffKeysCnt"` + HotKeysCnt uint64 `json:"hotKeysCnt"` + Err error `json:"err"` +} + +// NewRedisInsDtsDataCheckAndRepaireTask new +func NewRedisInsDtsDataCheckAndRepaireTask(ip string, portAndSeg PortAndSegment, job *RedisDtsDataCheck) ( + task *RedisInsDtsDataCheckAndRepairTask, err error) { + task = &RedisInsDtsDataCheckAndRepairTask{} + task.datacheckJob = job + task.keyPatternTask, err = NewRedisInsKeyPatternTask( + job.params.BkBizID, + job.params.SrcClusterAddr, + ip, + portAndSeg.Port, + job.params.SrcRedisPassword, + job.saveDir, + job.runtime, + "", "", "", "", "", "", // FileServer相关参数不需要 + job.params.KeyWhiteRegex, job.params.KeyBlackRegex, + false, 0, 0, 0, // key删除相关参数不需要 + portAndSeg.SegmentStart, portAndSeg.SegmentEnd, + ) + return +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getSaveDir() string { + return task.datacheckJob.saveDir +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getSrcRedisAddr() string { + return task.keyPatternTask.IP + ":" + strconv.Itoa(task.keyPatternTask.Port) +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getSrcRedisPassword() string { + return task.datacheckJob.params.SrcRedisPassword +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getDstRedisAddr() string { + return task.datacheckJob.params.DstClusterAddr +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getDstRedisPassword() string { + return task.datacheckJob.params.DstClusterPassword +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getLogger() *logger.Logger { + return task.datacheckJob.runtime.Logger +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getDataCheckDiffKeysFile() string { + basename := fmt.Sprintf("dts_datacheck_diff_keys_%s_%d", task.keyPatternTask.IP, task.keyPatternTask.Port) + return filepath.Join(task.getSaveDir(), basename) +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getRepaireHotKeysFile() string { + basename := fmt.Sprintf("dts_repaire_hot_keys_%s_%d", task.keyPatternTask.IP, task.keyPatternTask.Port) + return filepath.Join(task.getSaveDir(), basename) +} + +func (task *RedisInsDtsDataCheckAndRepairTask) isClusterEnabled() (enabled bool) { + var cli01 *myredis.RedisClient + cli01, task.Err = myredis.NewRedisClientWithTimeout(task.getSrcRedisAddr(), task.keyPatternTask.Password, 0, + consts.TendisTypeRedisInstance, 10*time.Second) + if task.Err != nil { + return false + } + defer cli01.Close() + enabled, task.Err = cli01.IsClusterEnabled() + return +} + +// tryFileLock 尝试获取文件锁,确保单个redis同一时间只有一个进程在进行数据校验 +func (task *RedisInsDtsDataCheckAndRepairTask) tryFileLock(lockFile string, timeout time.Duration) (locked bool, + flockP *flock.Flock) { + msg := fmt.Sprintf("try to get filelock:%s,addr:%s", lockFile, task.getSrcRedisAddr()) + task.getLogger().Info(msg) + + flockP = flock.New(lockFile) + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + locked, task.Err = flockP.TryLockContext(ctx, 10*time.Second) + if task.Err != nil { + task.Err = fmt.Errorf("try to get filelock fail,err:%v,addr:%s", task.Err, task.getSrcRedisAddr()) + task.getLogger().Error(task.Err.Error()) + return false, flockP + } + if !locked { + return false, flockP + } + locked = true + return locked, flockP +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getDataCheckRet() { + // 获取不一致key信息 + var msg string + var diffFileStat os.FileInfo + var headText string + diffFileStat, task.Err = os.Stat(task.getDataCheckDiffKeysFile()) + if task.Err != nil && os.IsNotExist(task.Err) == true { + // 没有不一致的key + msg = fmt.Sprintf("srcAddr:%s dstAddr:%s dts data check success no diff keys", + task.getSrcRedisAddr(), task.getDstRedisAddr()) + task.getLogger().Info(msg) + return + } else if task.Err != nil { + task.Err = fmt.Errorf("diffKeysFile:%s os.stat fail,err:%v", task.getDataCheckDiffKeysFile(), task.Err) + task.getLogger().Error(task.Err.Error()) + return + } + if diffFileStat.Size() == 0 { + msg := fmt.Sprintf("srcAddr:%s dstAddr:%s dts data check success no diff keys", task.getSrcRedisAddr(), + task.getDstRedisAddr()) + task.getLogger().Info(msg) + return + } + task.DiffKeysCnt, task.Err = util.FileLineCounter(task.getDataCheckDiffKeysFile()) + if task.Err != nil { + return + } + if task.DiffKeysCnt == 0 { + msg := fmt.Sprintf("srcAddr:%s dstAddr:%s dts data check success,%d diff keys", + task.getSrcRedisAddr(), task.getDstRedisAddr(), task.DiffKeysCnt) + task.getLogger().Info(msg) + return + } + + var predix string = fmt.Sprintf("srcRedisAddr:%s,dstRedisAddr:%s,dts data check fail,diff keys for example:", + task.getSrcRedisAddr(), task.getDstRedisAddr()) + if task.DiffKeysCnt <= 20 { + predix = "all diffKeys:" + predix = fmt.Sprintf("srcRedisAddr:%s,dstRedisAddr:%s,dts data check fail,all diffKeys:", + task.getSrcRedisAddr(), task.getDstRedisAddr()) + } + + // 打印前20个不一致的key + headCmd := fmt.Sprintf("head -20 %s", task.getDataCheckDiffKeysFile()) + task.getLogger().Info(headCmd) + headText, task.Err = util.RunBashCmd(headCmd, "", nil, 10*time.Second) + if task.Err != nil { + return + } + headLines := strings.ReplaceAll(headText, "\n", ",") + headLines = strings.ReplaceAll(headLines, "\r", ",") + task.getLogger().Info(predix + headLines) +} + +func (task *RedisInsDtsDataCheckAndRepairTask) getDataRepaireRet() { + var msg string + hotFileStat, err := os.Stat(task.getRepaireHotKeysFile()) + if err != nil && os.IsNotExist(err) { + // 没有hotKey + msg = fmt.Sprintf("all keys repaired successfully,srcAddr:%s,dstAddr:%s", task.getSrcRedisAddr(), + task.getDstRedisAddr()) + task.getLogger().Info(msg) + return + } else if err != nil { + task.Err = fmt.Errorf("hotKeysFile:%s os.stat fail,err:%v", task.getRepaireHotKeysFile(), err) + task.getLogger().Info(msg) + return + } + if hotFileStat.Size() == 0 { + msg = fmt.Sprintf("all keys repaired successfully,srcAddr:%s,dstAddr:%s", task.getSrcRedisAddr(), + task.getDstRedisAddr()) + task.getLogger().Info(msg) + return + } + task.HotKeysCnt, err = util.FileLineCounter(task.getRepaireHotKeysFile()) + if err != nil { + task.Err = err + task.getLogger().Error(task.Err.Error()) + return + } + msg = fmt.Sprintf("%d hot keys cannot be repaired,srcRedisAddr:%s", task.HotKeysCnt, task.getSrcRedisAddr()) + task.getLogger().Info(msg) + return +} + +// RunCmdAndWatchLog 执行命令并不断打印命令日志 +func (task *RedisInsDtsDataCheckAndRepairTask) RunCmdAndWatchLog(myCmd, logCmd string) { + var errBuffer bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cmd := exec.CommandContext(ctx, "bash", "-c", myCmd) + stdout, _ := cmd.StdoutPipe() + cmd.Stderr = &errBuffer + + if task.Err = cmd.Start(); task.Err != nil { + task.Err = fmt.Errorf("RedisInsDtsDataCheckAndRepairTask cmd.Start fail,err:%v,cmd:%s", task.Err, logCmd) + task.getLogger().Error(task.Err.Error()) + return + } + + scanner := bufio.NewScanner(stdout) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + // 不断打印进度 + line := scanner.Text() + if strings.Contains(line, `"level":"error"`) == true { + task.Err = errors.New(line) + task.getLogger().Error(task.Err.Error()) + continue + } + line = line + ";" + task.getSrcRedisAddr() + task.getLogger().Info(line) + } + task.Err = scanner.Err() + if task.Err != nil { + task.getLogger().Error(task.Err.Error()) + return + } + + if task.Err = cmd.Wait(); task.Err != nil { + task.Err = fmt.Errorf("RedisInsDtsDataCheckAndRepairTask cmd.Wait fail,err:%v", task.Err) + task.getLogger().Error(task.Err.Error()) + return + } + errStr := errBuffer.String() + errStr = strings.TrimSpace(errStr) + if len(errStr) > 0 { + task.Err = fmt.Errorf("RedisInsDtsDataCheckAndRepairTask fail,err:%s", errStr) + task.getLogger().Error(task.Err.Error()) + return + } +} + +// KeyPatternAndDataCheck key提取和数据校验 +func (task *RedisInsDtsDataCheckAndRepairTask) KeyPatternAndDataCheck() { + var extraOptsBuilder strings.Builder + var checkMode string + var locked bool + var flockP *flock.Flock + clusterEnabled := task.isClusterEnabled() + if task.Err != nil { + return + } + if clusterEnabled { + extraOptsBuilder.WriteString(" --is-src-cluster-replicate ") + } + + // 尝试获取文件锁,确保单个redis同一时间只有一个进程在进行数据校验 + lockFile := filepath.Join(task.getSaveDir(), fmt.Sprintf("lock_dtsdatacheck.%s.%d", + task.keyPatternTask.IP, task.keyPatternTask.Port)) + locked, flockP = task.tryFileLock(lockFile, 24*time.Hour) + if task.Err != nil { + return + } + if !locked { + return + } + defer flockP.Unlock() + + // 源redis 属于dbm管理,则执行key提取,而后执行数据校验 + if consts.IsDtsTypeSrcClusterBelongDbm(task.datacheckJob.params.DtsCopyType) { + checkMode = consts.DtsDataCheckByKeysFileMode + // 获取key + task.keyPatternTask.newConnect() + if task.keyPatternTask.Err != nil { + task.Err = task.keyPatternTask.Err + return + } + task.keyPatternTask.GetTendisKeys() + if task.keyPatternTask.Err != nil { + task.Err = task.keyPatternTask.Err + return + } + if task.keyPatternTask.TendisType == consts.TendisTypeTendisplusInsance { + // 合并 kvstore keys临时文件 + task.keyPatternTask.mergeTendisplusDbFile() + if task.keyPatternTask.Err != nil { + task.Err = task.keyPatternTask.Err + return + } + } + extraOptsBuilder.WriteString(" --keys-file=" + task.keyPatternTask.ResultFile + " --thread-cnt=200 ") + } else { + // 源redis 属于用户自建,则通过scan方式获取key,而后执行数据校验 + checkMode = consts.DtsDataCheckByScanMode + extraOptsBuilder.WriteString(fmt.Sprintf(" --match-pattern=%q --scan-count=20000 --thread-cnt=200 ", + task.datacheckJob.params.KeyWhiteRegex)) + } + // 数据校验 + dataCheckCmd := fmt.Sprintf( + `cd %s && %s %s --src-addr=%s --src-password=%s --dst-addr=%s --dst-password=%s --result-file=%s --ticker=120 %s`, + task.datacheckJob.saveDir, task.datacheckJob.dataCheckTool, checkMode, + task.getSrcRedisAddr(), task.getSrcRedisPassword(), + task.getDstRedisAddr(), task.getDstRedisPassword(), + task.getDataCheckDiffKeysFile(), extraOptsBuilder.String()) + logCmd := fmt.Sprintf( + `cd %s && %s %s --src-addr=%s --src-password=xxxx --dst-addr=%s --dst-password=xxxx --result-file=%s --ticker=120 %s`, + task.datacheckJob.saveDir, task.datacheckJob.dataCheckTool, checkMode, + task.getSrcRedisAddr(), task.getDstRedisAddr(), + task.getDataCheckDiffKeysFile(), extraOptsBuilder.String()) + task.getLogger().Info(logCmd) + + task.RunCmdAndWatchLog(dataCheckCmd, logCmd) + if task.Err != nil { + return + } + // 数据校验结果 + task.getDataCheckRet() + return +} + +// RunDataRepaire 执行数据修复 +func (task *RedisInsDtsDataCheckAndRepairTask) RunDataRepaire() { + var diffKeysCnt uint64 + var locked bool + var flockP *flock.Flock + if !util.FileExists(task.getDataCheckDiffKeysFile()) { + task.getLogger().Info("diff keys file not exists,skip data repair,file:%s", task.getDataCheckDiffKeysFile()) + return + } + diffKeysCnt, task.Err = util.FileLineCounter(task.getDataCheckDiffKeysFile()) + if task.Err != nil { + task.getLogger().Error("FileLineCounter fail,err:%v,file:%s", task.Err, task.getDataCheckDiffKeysFile()) + return + } + if diffKeysCnt == 0 { + task.getLogger().Info("diff keys file is empty,skip data repair,file:%s", task.getDataCheckDiffKeysFile()) + return + } + + // 尝试获取文件锁,确保单个redis同一时间只有一个进程在进行数据修复 + lockFile := filepath.Join(task.getSaveDir(), fmt.Sprintf("lock_dtsdatarepaire.%s.%d", + task.keyPatternTask.IP, task.keyPatternTask.Port)) + locked, flockP = task.tryFileLock(lockFile, 5*time.Hour) + if task.Err != nil { + return + } + if !locked { + return + } + defer flockP.Unlock() + + var extraOptsBuilder strings.Builder + clusterEnabled := task.isClusterEnabled() + if task.Err != nil { + return + } + if clusterEnabled { + extraOptsBuilder.WriteString(" --is-src-cluster-replicate ") + } + + repairCmd := fmt.Sprintf( + `cd %s && %s --src-addr=%s --src-password=%s \ + --dest-addr=%s --dest-password=%s --diff-keys-file=%s --hot-keys-file=%s %s`, + task.getSaveDir(), task.datacheckJob.dataRepaireTool, + task.getSrcRedisAddr(), task.getSrcRedisPassword(), + task.getDstRedisAddr(), task.getDstRedisPassword(), + task.getDataCheckDiffKeysFile(), task.getRepaireHotKeysFile(), + extraOptsBuilder.String()) + logCmd := fmt.Sprintf( + `cd %s && %s --src-addr=%s --src-password=xxxx \ + --dest-addr=%s --dest-password=xxxx --diff-keys-file=%s --hot-keys-file=%s %s`, + task.getSaveDir(), task.datacheckJob.dataRepaireTool, + task.getSrcRedisAddr(), task.getDstRedisAddr(), + task.getDataCheckDiffKeysFile(), task.getRepaireHotKeysFile(), + extraOptsBuilder.String()) + + task.getLogger().Info(logCmd) + task.RunCmdAndWatchLog(repairCmd, logCmd) + if task.Err != nil { + return + } + task.getDataRepaireRet() + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datarepaire.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datarepaire.go new file mode 100644 index 0000000000..9aa9abafce --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_dts_datarepaire.go @@ -0,0 +1,92 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "fmt" + "sync" + + "github.com/panjf2000/ants/v2" +) + +// RedisDtsDataRepaire dts数据修复 +type RedisDtsDataRepaire struct { + *RedisDtsDataCheck +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisDtsDataRepaire)(nil) + +// NewRedisDtsDataRepaire 创建dts数据修复实例 +func NewRedisDtsDataRepaire() jobruntime.JobRunner { + dataCheck := &RedisDtsDataCheck{} + return &RedisDtsDataRepaire{dataCheck} +} + +// Init 初始化 +func (job *RedisDtsDataRepaire) Init(m *jobruntime.JobGenericRuntime) error { + job.Name() // 这一步是必须的 + return job.RedisDtsDataCheck.Init(m) +} + +// Name 原子任务名 +func (job *RedisDtsDataRepaire) Name() string { + if job.atomJobName == "" { + job.atomJobName = "redis_dts_datarepaire" + } + return job.atomJobName +} + +// Run 运行 +func (job *RedisDtsDataRepaire) Run() (err error) { + // 1. 测试redis是否可连接 + err = job.TestConnectable() + if err != nil { + return + } + // 2. 获取工具 + err = job.GetTools() + if err != nil { + return + } + // 3. 并发提取与校验,并发度5 + var wg sync.WaitGroup + taskList := make([]*RedisInsDtsDataCheckAndRepairTask, 0, len(job.params.SrcRedisPortSegmentList)) + pool, err := ants.NewPoolWithFunc(5, func(i interface{}) { + defer wg.Done() + task := i.(*RedisInsDtsDataCheckAndRepairTask) + task.RunDataRepaire() + }) + if err != nil { + job.runtime.Logger.Error("RedisDtsDataRepaire Run NewPoolWithFunc failed,err:%v", err) + return err + } + defer pool.Release() + + for _, portItem := range job.params.SrcRedisPortSegmentList { + wg.Add(1) + task, err := NewRedisInsDtsDataCheckAndRepaireTask(job.params.SrcRedisIP, portItem, job.RedisDtsDataCheck) + if err != nil { + continue + } + taskList = append(taskList, task) + _ = pool.Invoke(task) + } + // 等待所有task执行完毕 + wg.Wait() + + var totalHotKeysCnt uint64 = 0 + for _, tmp := range taskList { + task := tmp + if task.Err != nil { + return task.Err + } + totalHotKeysCnt += task.HotKeysCnt + } + if totalHotKeysCnt > 0 { + err = fmt.Errorf("RedisDtsDataRepaire totalHotKeysCnt:%d", totalHotKeysCnt) + job.runtime.Logger.Error(err.Error()) + return + } + job.runtime.Logger.Info("RedisDtsDataRepaire success totalHotKeysCnt:%d", totalHotKeysCnt) + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_flush_data.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_flush_data.go new file mode 100644 index 0000000000..981e0d44ce --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_flush_data.go @@ -0,0 +1,387 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "sync" + + "github.com/go-playground/validator/v10" +) + +// RedisFlushDataParams 清档参数 +type RedisFlushDataParams struct { + IP string `json:"ip" validate:"required"` + DbType string `json:"db_type" validate:"required"` + Ports []int `json:"ports" validate:"required"` + Password string `json:"password" validate:"required"` + IsForce bool `json:"is_force"` // 这里应该是必传的,但是如果是false会报错 + DBList []int `json:"db_list"` + IsFlushAll bool `json:"is_flush_all"` + Debug bool `json:"debug"` +} + +// RedisFlushData atomjob +type RedisFlushData struct { + runtime *jobruntime.JobGenericRuntime + params RedisFlushDataParams + RedisBinDir string // /usr/local/redis + DataBases int + + errChan chan error +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisFlushData)(nil) + +// NewRedisFlushData new +func NewRedisFlushData() jobruntime.JobRunner { + return &RedisFlushData{} +} + +// Init 初始化 +func (job *RedisFlushData) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisFlushData Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisFlushData Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + return nil +} + +// InitRealDataDir 初始化参数 +func (job *RedisFlushData) InitRealDataDir() { + redisSoftLink := filepath.Join(consts.UsrLocal, "redis") + job.RedisBinDir = filepath.Join(redisSoftLink, "bin") + job.runtime.Logger.Info("GetRedisBinDir success,binDir:%s", job.RedisBinDir) + job.errChan = make(chan error, len(job.params.Ports)) + + // 获取DataBases + +} + +// Name 原子任务名 +func (job *RedisFlushData) Name() string { + return "redis_flush_data" +} + +// Run 执行 +func (job *RedisFlushData) Run() (err error) { + job.InitRealDataDir() + err = job.CheckParams() + if err != nil { + return + } + ports := job.params.Ports + + wg := sync.WaitGroup{} + for _, port := range ports { + wg.Add(1) + go func(port int) { + defer wg.Done() + job.FlushData(port) + }(port) + } + wg.Wait() + close(job.errChan) + + errMsg := "" + for err := range job.errChan { + errMsg = fmt.Sprintf("%s\n%s", errMsg, err.Error()) + } + if errMsg != "" { + return fmt.Errorf(errMsg) + } + + return nil +} + +// FlushData 执行清档 +func (job *RedisFlushData) FlushData(port int) { + /* + 根据force检查请求 + 根据db_type来执行不同的清档命令 + 检查清档结果。用randomkey 命令来判断是否清理完成 + */ + var err error + + if job.params.IsFlushAll { + err = job.FlushAll(port) + } else { + err = job.FlushDB(port) + } + if err != nil { + job.errChan <- err + return + } + + return +} + +// FlushDB 清理指定DB +func (job *RedisFlushData) FlushDB(port int) error { + job.runtime.Logger.Info("flush db port[%d] doing.......", port) + params := job.params + + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, job.params.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer redisClient.Close() + + for _, db := range params.DBList { + result, err := redisClient.DoCommand([]string{consts.FlushDBRename}, db) + if err != nil { + return err + } + + if !strings.Contains(result.(string), "OK") { + err = fmt.Errorf("flush db port[%d] db[%d] error [%+v]", port, db, result) + return err + } + + if err = job.CheckFlushResult(port, db); err != nil { + return err + } + + job.runtime.Logger.Info("flush db port[%d] db[%d] success", port, db) + } + // TODO 这里是不是要处理一下没有rename前的命令? + return nil +} + +// FlushAll 清理所有数据 +func (job *RedisFlushData) FlushAll(port int) error { + job.runtime.Logger.Info("flush all port[%d] doing.......", port) + var cmd []string + var err error + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, job.params.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer redisClient.Close() + + params := job.params + if consts.IsTendisSSDInstanceDbType(params.DbType) { + cmd = []string{consts.SSDFlushAllRename} + } else if consts.IsTendisplusInstanceDbType(params.DbType) || consts.IsRedisInstanceDbType(params.DbType) { + cmd = []string{consts.CacheFlushAllRename} + } else { + err = fmt.Errorf("unknown dbType(%s)", params.DbType) + return err + } + + result, err := redisClient.DoCommand(cmd, 0) + if err != nil { + return err + } + + if !strings.Contains(result.(string), "OK") { + err = fmt.Errorf("flush all port[%d] error [%+v]", port, result) + return err + } + + if err = job.CheckFlushResult(port, 0); err != nil { + return err + } + + job.runtime.Logger.Info("flush all port[%d] success", port) + return nil +} + +// CheckFlushResult 检查清理结果 +func (job *RedisFlushData) CheckFlushResult(port, db int) error { + job.runtime.Logger.Info("check flush result port[%d] db[%d] doing.......", port, db) + params := job.params + var err error + + if !params.IsForce { + // 强制清档不需要做这不检查了,因为可能会存在写入 + if consts.IsAllowRandomkey(params.DbType) { + if err = job.RandomKey(port, db); err != nil { + return err + } + } else { + if err = job.Keys(port, db); err != nil { + return err + } + } + } + job.runtime.Logger.Info("check flush result port[%d] db[%d] done.......", port, db) + return nil +} + +// Keys 检查tendisplus是否清理完成 +func (job *RedisFlushData) Keys(port, db int) error { + job.runtime.Logger.Info("exec keys port[%d] db[%d] doing.......", port, db) + + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, job.params.Password, db, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer redisClient.Close() + + cmd := []string{consts.KeysRename, "*"} + result, err := redisClient.DoCommand(cmd, db) + if err != nil { + return err + } + + flushSucc := false + switch result.(type) { + case string: + if result == "" { + flushSucc = true + } + case []interface{}: + if len(result.([]interface{})) == 0 { + flushSucc = true + } + } + if !flushSucc { + return fmt.Errorf("flush port[%d] db[%d] failed pleach check key[%s]", port, db, result) + } + + job.runtime.Logger.Info("exec keys port[%d] db[%d] done.......", port, db) + return nil +} + +// RandomKey 随机获取key,检查是否清理完成。 tendisplus不支持 +func (job *RedisFlushData) RandomKey(port, db int) error { + job.runtime.Logger.Info("exec randomkey port[%d] db[%d] doing.......", port, db) + + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, job.params.Password, db, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer redisClient.Close() + + key, err := redisClient.Randomkey() + if err != nil { + return err + } + + if key != "" { + return fmt.Errorf("flush port[%d] db[%d] failed pleach check key[%s]", port, db, key) + } + job.runtime.Logger.Info("exec randomkey port[%d] db[%d] done.......", port, db) + + return nil +} + +// CheckParams 检查参数是否符合 +func (job *RedisFlushData) CheckParams() error { + job.runtime.Logger.Info("check params doing.......") + var err error + if !job.params.IsFlushAll && len(job.params.DBList) == 0 { + err = fmt.Errorf("flush type not flushall and db list is empty") + job.runtime.Logger.Error(err.Error()) + return err + } + + /* 不支持flushdb(或者说:只支持flush db 0)的集群类型有: + 原生cluster集群 + Tendisplus集群 + TendisSSD 只支持flushalldisk + ... + */ + + if !job.params.IsFlushAll { // 清理DB + if !consts.IsAllowFlushMoreDB(job.params.DbType) { // 不支持清理多db的架构类型 + if !(len(job.params.DBList) == 1 && job.params.DBList[0] == 0) { // 不止清理DB0 + err = fmt.Errorf("cluster type only allow flush db 0") + job.runtime.Logger.Error(err.Error()) + return err + } + } + } + + // 检查实例角色,检查,初始化 database + job.DataBases, err = job.GetInsDatabase(job.params.Ports[0]) + if !job.params.IsFlushAll { + for _, db := range job.params.DBList { + if db >= job.DataBases { + return fmt.Errorf("db num[%d] > ins databases[%d], pleace check", db, job.DataBases) + } + } + } + + for _, port := range job.params.Ports { + if err = job.CheckInsRole(port); err != nil { + return err + } + } + return nil +} + +// CheckInsRole 检查 +func (job *RedisFlushData) CheckInsRole(port int) error { + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, job.params.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer redisClient.Close() + + role, err := redisClient.GetRole() + if err != nil { + return err + } + job.runtime.Logger.Info("redis port[%d] role is %s", port, role) + + if role != "master" { + return fmt.Errorf("redis port[%d] role not's master, pleace check", port) + } + return nil +} + +// GetInsDatabase 获取databases +func (job *RedisFlushData) GetInsDatabase(port int) (int, error) { + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, job.params.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + return 0, err + } + defer redisClient.Close() + + result, err := redisClient.ConfigGet("databases") + if err != nil { + return 0, err + } + return strconv.Atoi(result["databases"]) +} + +// Retry times +func (job *RedisFlushData) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisFlushData) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_install.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_install.go new file mode 100644 index 0000000000..706036b8b3 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_install.go @@ -0,0 +1,545 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/report" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/flosch/pongo2/v6" + "github.com/go-playground/validator/v10" +) + +// RedisInstallParams 安装参数 +type RedisInstallParams struct { + common.MediaPkg + DbToolsPkg common.DbToolsMediaPkg `json:"dbtoolspkg"` + DataDirs []string `json:"data_dirs"` + IP string `json:"ip" validate:"required"` + Ports []int `json:"ports"` // 如果端口不连续,可直接指定端口 + StartPort int `json:"start_port"` // 如果端口连续,则可直接指定起始端口和实例个数 + InstNum int `json:"inst_num"` + Password string `json:"password" validate:"required"` + Databases int `json:"databases" validate:"required"` + RedisConfConfigs map[string]string `json:"redis_conf_configs" validate:"required"` + DbType string `json:"db_type" validate:"required"` + MaxMemory uint64 `json:"maxmemory" validate:"required"` +} + +// RedisInstall redis install atomjob +type RedisInstall struct { + runtime *jobruntime.JobGenericRuntime + params RedisInstallParams + RealDataDir string // /data/redis + RedisBinDir string // /usr/local/redis + RedisConfTemplate string // 配置模版 +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisInstall)(nil) + +// NewRedisInstall new +func NewRedisInstall() jobruntime.JobRunner { + return &RedisInstall{} +} + +// Init 初始化 +func (job *RedisInstall) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisInstall Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisInstall Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + // ports 和 inst_num 不能同时为空 + if len(job.params.Ports) == 0 && job.params.InstNum == 0 { + err = fmt.Errorf("RedisInstall ports(%+v) and inst_num(%d) is invalid", job.params.Ports, job.params.InstNum) + job.runtime.Logger.Error(err.Error()) + return err + } + // 6379<= start_port <= 55535 + if job.params.InstNum > 0 && (job.params.StartPort > 55535 || job.params.StartPort < 6379) { + err = fmt.Errorf("RedisInstall start_port(%d) must range [6379,5535]", job.params.StartPort) + job.runtime.Logger.Error(err.Error()) + return err + } + if job.params.InstNum > 0 { + ports := make([]int, 0, job.params.InstNum) + for idx := 0; idx < job.params.InstNum; idx++ { + ports = append(ports, job.params.StartPort+idx) + } + job.params.Ports = ports + } + job.runtime.Logger.Info(fmt.Sprintf("init end, server:%s,ports:%s", + job.params.IP, myredis.ConvertSlotToStr(job.params.Ports))) + + return nil +} + +// Name 原子任务名 +func (job *RedisInstall) Name() string { + return "redis_install" +} + +// Run 执行 +func (job *RedisInstall) Run() (err error) { + err = job.UntarMedia() + if err != nil { + return + } + err = job.GetRealDataDir() + if err != nil { + return + } + err = job.InitInstanceDirs() + if err != nil { + return + } + err = job.StartAll() + if err != nil { + return + } + return job.newExporterConfig() +} + +// UntarMedia 解压介质 +func (job *RedisInstall) UntarMedia() (err error) { + job.runtime.Logger.Info("begin to untar redis media") + defer func() { + if err != nil { + job.runtime.Logger.Error("untar redis media fail") + } else { + job.runtime.Logger.Info("untar redis media success") + } + }() + err = job.params.Check() + if err != nil { + job.runtime.Logger.Error("UntarMedia failed,err:%v", err) + return + } + pkgBaseName := job.params.GePkgBaseName() + job.RedisBinDir = filepath.Join(consts.UsrLocal, pkgBaseName) + _, err = os.Stat(job.RedisBinDir) + if err != nil && os.IsNotExist(err) { + // 如果包不存在,则解压到 /usr/local 下 + pkgAbsPath := job.params.GetAbsolutePath() + tarCmd := fmt.Sprintf("tar -zxf %s -C %s", pkgAbsPath, consts.UsrLocal) + job.runtime.Logger.Info(tarCmd) + _, err = util.RunBashCmd(tarCmd, "", nil, 10*time.Second) + if err != nil { + return + } + util.LocalDirChownMysql(job.RedisBinDir) + } + redisSoftLink := filepath.Join(consts.UsrLocal, "redis") + _, err = os.Stat(redisSoftLink) + if err != nil && os.IsNotExist(err) { + // 如果 /usr/local/redis 不存在,则创建软链接 + err = os.Symlink(job.RedisBinDir, redisSoftLink) + if err != nil { + err = fmt.Errorf("os.Symlink failed,err:%v,dir:%s,softLink:%s", err, job.RedisBinDir, redisSoftLink) + job.runtime.Logger.Error(err.Error()) + return + } + job.runtime.Logger.Info("create soft link success,redisBinDir:%s,redisSoftLink:%s", job.RedisBinDir, redisSoftLink) + } + // 再次确认 /usr/local/redis 是指向 目标redis目录 + // 参数: /usr/loca/redis => 结果: /usr/local/redis-6.2.7 + realLink, err := filepath.EvalSymlinks(redisSoftLink) + if err != nil { + err = fmt.Errorf("filepath.EvalSymlinks failed,err:%v,redisSoftLink:%s", err, redisSoftLink) + job.runtime.Logger.Error(err.Error()) + return + } + redisBaseName := filepath.Base(realLink) + if pkgBaseName != redisBaseName { + err = fmt.Errorf("%s 指向 %s 而不是 %s", redisSoftLink, redisBaseName, pkgBaseName) + job.runtime.Logger.DPanic(err.Error()) + return + } + job.RedisBinDir = filepath.Join(redisSoftLink, "bin") + util.LocalDirChownMysql(redisSoftLink) + + addEtcProfile := fmt.Sprintf(` +ret=$(grep -i %q /etc/profile) +if [[ -z $ret ]] +then +echo "export PATH=%s:\$PATH" >> /etc/profile +fi +`, job.RedisBinDir, job.RedisBinDir) + _, err = util.RunBashCmd(addEtcProfile, "", nil, 10*time.Second) + if err != nil { + return err + } + job.runtime.Logger.Info(fmt.Sprintf("UntarMedia success,redisBinDir:%s,redisSoftLink:%s", job.RedisBinDir, + redisSoftLink)) + + err = job.params.DbToolsPkg.Install() + if err != nil { + return err + } + return nil +} + +// GetRealDataDir 确认redis Data Dir,依次检查 /data1、/data、用户输入的dirs, 如果是挂载点则返回 +func (job *RedisInstall) GetRealDataDir() (err error) { + // dirs := make([]string, 0, len(job.params.DataDirs)+2) + // dirs = append(dirs, consts.Data1Path) + // dirs = append(dirs, consts.DataPath) + // dirs = append(dirs, job.params.DataDirs...) + // job.RealDataDir, err = util.FindFirstMountPoint(dirs...) + // if err != nil { + // job.runtime.Logger.Error("GetInstallDir failed,err:%v,dirs:%+v", err, dirs) + // return + // } + job.RealDataDir = filepath.Join(consts.GetRedisDataDir(), "/redis") + job.runtime.Logger.Info("GetRealDataDir success,dataDir:%s", job.RealDataDir) + return nil +} + +// InitInstanceDirs 初始化实例文件夹 +func (job *RedisInstall) InitInstanceDirs() (err error) { + job.runtime.Logger.Info("begin to init redis instances' dirs") + defer func() { + if err != nil { + job.runtime.Logger.Error("init redis instances' dirs fail") + } else { + job.runtime.Logger.Info("init redis instances' dirs success") + } + }() + var instDir string + for _, port := range job.params.Ports { + instDir = filepath.Join(job.RealDataDir, strconv.Itoa(port)) + var dirs []string + if consts.IsRedisInstanceDbType(job.params.DbType) { + dirs = append(dirs, filepath.Join(instDir, "data")) + } else if consts.IsTendisplusInstanceDbType(job.params.DbType) { + dirs = append(dirs, filepath.Join(instDir, "data", "log")) + dirs = append(dirs, filepath.Join(instDir, "data", "db")) + dirs = append(dirs, filepath.Join(instDir, "data", "dump")) + dirs = append(dirs, filepath.Join(instDir, "data", "slowlog")) + } else if consts.IsTendisSSDInstanceDbType(job.params.DbType) { + dirs = append(dirs, filepath.Join(instDir, "data")) + dirs = append(dirs, filepath.Join(instDir, "rbinlog")) + } else { + err = fmt.Errorf("unknown dbType(%s)", job.params.DbType) + job.runtime.Logger.Error(err.Error()) + return + } + job.runtime.Logger.Info("MkDirAll %+v", dirs) + err = util.MkDirsIfNotExists(dirs) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + } + err = util.LocalDirChownMysql(job.RealDataDir) + if err != nil { + return err + } + err = report.CreateReportDir() + if err != nil { + return err + } + return nil +} + +// IsRedisInstalled 检查redis实例已经安装 +func (job *RedisInstall) IsRedisInstalled(port int) (installed bool, err error) { + job.runtime.Logger.Info("begin to check whether redis %s:%d was installed", + job.params.IP, port) + + instDir := filepath.Join(job.RealDataDir, strconv.Itoa(port)) + instConfFile := filepath.Join(instDir, "redis.conf") + if util.FileExists(instConfFile) { + grepCmd := fmt.Sprintf("grep -i requirepass %s |grep -vP '^#'||{ true; }", instConfFile) + job.runtime.Logger.Info(grepCmd) + grepRet, err := util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + if err != nil { + return false, err + } + if grepRet == "" || !strings.Contains(grepRet, job.params.Password) { + err = fmt.Errorf("redis %s:%d configFile:%s exists,but 'requirepass' not match", job.params.IP, port, instConfFile) + return false, err + } + } + + portIsUse, err := util.CheckPortIsInUse(job.params.IP, strconv.Itoa(port)) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return + } + if !portIsUse { + // 端口没被占用 + return false, nil + } + // 端口已被使用 + redisAddr := job.params.IP + ":" + strconv.Itoa(port) + redisCli, err := myredis.NewRedisClientWithTimeout(redisAddr, job.params.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if err != nil { + err = fmt.Errorf("%d is in used by other process", port) + job.runtime.Logger.Error(err.Error()) + return false, err + } + defer redisCli.Close() + // 检查是否为一个空实例 + dbSize, err := redisCli.DbSize() + if err != nil { + return false, err + } + if dbSize > 20 { + // key个数大于20 + err = fmt.Errorf("redis:%s is in use,dbsize=%d", redisAddr, dbSize) + job.runtime.Logger.Error(err.Error()) + return + } + if !consts.IsTendisplusInstanceDbType(job.params.DbType) { + // 随机检查20个key + randomRets := []string{} + for i := 0; i < 20; i++ { + k, err := redisCli.Randomkey() + if err != nil { + return false, err + } + if k != "" && !util.IsDbmSysKeys(k) { + randomRets = append(randomRets, k) + } + } + if len(randomRets) > 0 { + err = fmt.Errorf("redis:%s is in use,exists keys:%+v", redisAddr, randomRets) + job.runtime.Logger.Error(err.Error()) + return + } + } + + // 检查版本是否正确 + infoMap, err := redisCli.Info("server") + if err != nil { + return false, err + } + serverVer := infoMap["redis_version"] + // 版本格式兼容tendisSSD的情况 + // tendisSSD 包名 redis-2.8.17-rocksdb-v1.2.20.tar.gz, 版本名(redis_version) 2.8.17-TRedis-v1.2.20 + serverVer = strings.ReplaceAll(serverVer, "TRedis", "rocksdb") + pkgBaseName := job.params.GePkgBaseName() + if !strings.Contains(pkgBaseName, serverVer) { + err = fmt.Errorf("redis:%s installed but version(%s) not %s", redisAddr, serverVer, pkgBaseName) + job.runtime.Logger.Error(err.Error()) + return + } + // redis实例已成功安装,且是一个空实例,且版本正确 + job.runtime.Logger.Info(fmt.Sprintf("redis(%s) install success,dbsize:%d,version:%s", + redisAddr, dbSize, serverVer)) + return true, nil +} + +func (job *RedisInstall) getRedisConfTemplate() error { + if job.RedisConfTemplate != "" { + return nil + } + sb := strings.Builder{} + for key, value := range job.params.RedisConfConfigs { + if value == "" { + value = "\"\"" // 针对 save ""的情况 + } + sb.WriteString(key + " " + value + "\n") + } + job.RedisConfTemplate = sb.String() + return nil +} + +// GenerateConfigFile 生成配置文件 +func (job *RedisInstall) GenerateConfigFile(port int) error { + job.runtime.Logger.Info("begin to GenerateConfigFile,port:%d", port) + err := job.getRedisConfTemplate() + if err != nil { + return err + } + + tpl, err := pongo2.FromString(job.RedisConfTemplate) + if err != nil { + err = fmt.Errorf("pongo2.FromString fail,err:%v,RedisConfTemplate:%s", err, job.RedisConfTemplate) + job.runtime.Logger.Error(err.Error()) + return err + } + clusterEnabled := "no" + if consts.IsClusterDbType(job.params.DbType) { + clusterEnabled = "yes" + } + instDir := filepath.Join(job.RealDataDir, strconv.Itoa(port)) + instConfFile := filepath.Join(instDir, "redis.conf") + instBlockcache, err := util.GetTendisplusBlockcache(uint64(len(job.params.Ports))) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + writeBufferSize, err := util.GetTendisplusWriteBufferSize(uint64(len(job.params.Ports))) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + pctx01 := pongo2.Context{ + "address": job.params.IP, + "port": strconv.Itoa(port), + "password": job.params.Password, + "redis_data_dir": instDir, + "databases": strconv.Itoa(job.params.Databases), + "cluster_enabled": clusterEnabled, + "maxmemory": strconv.FormatUint(job.params.MaxMemory, 10), + "rocks_blockcachemb": strconv.FormatUint(instBlockcache, 10), + "rocks_write_buffer_size": strconv.FormatUint(writeBufferSize, 10), + } + confData, err := tpl.Execute(pctx01) + if err != nil { + job.runtime.Logger.Error( + "tpl.Execute failed,err:%v,address:%s,port:%d,password:%s,redis_data_dir:%s,databases:%d,cluster_enabled:%s", + err, job.params.IP, port, job.params.Password, instDir, job.params.Databases, clusterEnabled) + return err + } + err = ioutil.WriteFile(instConfFile, []byte(confData), os.ModePerm) + if err != nil { + job.runtime.Logger.Error("ioutil.WriteFile failed,err:%v,config_file:%s,confData:%s", err, instConfFile, confData) + return err + } + util.LocalDirChownMysql(job.RealDataDir) + job.runtime.Logger.Info("GenerateConfigFile success,port:%d,configFile:%s", port, instConfFile) + return nil +} +func (job *RedisInstall) newExporterConfig() (err error) { + job.runtime.Logger.Info("begin to new exporter config file") + var addr map[string]string + var key, val string + var fileData []byte + var confFile string + err = util.MkDirsIfNotExists([]string{consts.ExporterConfDir}) + if err != nil { + job.runtime.Logger.Error("newExporterConfig mkdirIfNotExists %s failed,err:%v", consts.ExporterConfDir, err) + return err + } + for _, port := range job.params.Ports { + addr = map[string]string{} + confFile = filepath.Join(consts.ExporterConfDir, fmt.Sprintf("%d.conf", port)) + key = fmt.Sprintf("redis://%s:%d", job.params.IP, port) + val = job.params.Password + addr[key] = val + fileData, _ = json.Marshal(addr) + err = ioutil.WriteFile(confFile, fileData, 0755) + if err != nil { + job.runtime.Logger.Error("newExporterConfig writeFile %s fail,err:%v", confFile, err) + return err + } + } + util.LocalDirChownMysql(consts.ExporterConfDir) + return nil +} + +func (job *RedisInstall) getRedisLogFile(port int) (logFile string) { + instDir := filepath.Join(job.RealDataDir, strconv.Itoa(port)) + if consts.IsTendisplusInstanceDbType(job.params.DbType) { + logFile = filepath.Join(instDir, "data/log/tendisplus.ERROR") + } else { + logFile = filepath.Join(instDir, "redis.log") + } + return +} + +// StartAll 拉起所有redis实例 +func (job *RedisInstall) StartAll() error { + var installed bool + var err error + var addr string + for _, port := range job.params.Ports { + instLogFile := job.getRedisLogFile(port) + startScript := filepath.Join(job.RedisBinDir, "start-redis.sh") + addr = job.params.IP + ":" + strconv.Itoa(port) + + installed, err = job.IsRedisInstalled(port) + if err != nil { + return err + } + if installed == true { + continue + } + err = job.GenerateConfigFile(port) + if err != nil { + return err + } + job.runtime.Logger.Info(fmt.Sprintf("su %s -c \"%s\"", consts.MysqlAaccount, startScript+" "+strconv.Itoa(port))) + _, err = util.RunLocalCmd("su", []string{consts.MysqlAaccount, "-c", startScript + " " + strconv.Itoa(port)}, "", + nil, 10*time.Second) + if err != nil { + return err + } + + maxRetryTimes := 20 + i := 0 + for { + i++ + for i >= maxRetryTimes { + break + } + installed, err = job.IsRedisInstalled(port) + if err != nil { + return err + } + if !installed { + job.runtime.Logger.Info("%s not ready,sleep 2 seconds and retry...", addr) + time.Sleep(2 * time.Second) + continue + } + break + } + if err != nil { + return err + } + if installed { + // redis启动成功 + continue + } + // redis启动失败 + logData, err := util.RunBashCmd(fmt.Sprintf("tail -3 %s", instLogFile), "", nil, 10*time.Second) + if err != nil { + return err + } + err = fmt.Errorf("redis(%s:%d) startup failed,logData:%s", job.params.IP, port, logData) + job.runtime.Logger.Error(err.Error()) + return err + } + return nil +} + +// Retry times +func (job *RedisInstall) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisInstall) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keysdelete_files.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keysdelete_files.go new file mode 100644 index 0000000000..43eadef6ee --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keysdelete_files.go @@ -0,0 +1,441 @@ +package atomredis + +import ( + "bufio" + "bytes" + "context" + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "errors" + "fmt" + "io/fs" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" +) + +// TendisKeysFilesDeleteParams 按文件删除job参数 +type TendisKeysFilesDeleteParams struct { + common.DbToolsMediaPkg + FileServer util.FileServerInfo `json:"fileserver" validate:"required"` + BkBizID string `json:"bk_biz_id" validate:"required"` + Domain string `json:"domain" validate:"required"` + ProxyPort int `json:"proxy_port" validate:"required"` + ProxyPassword string `json:"proxy_password" validate:"required"` + Path string `json:"path" validate:"required"` + TendisType string `json:"tendis_type" validate:"required"` + DeleteRate int `json:"delete_rate" validate:"required"` // cache Redis删除速率,避免del 命令执行过快 + TendisplusDeleteRate int `json:"tendisplus_delete_rate" validate:"required"` // tendisplus删除速率,避免del 命令执行过快 +} + +// TendisKeysFilesDelete 按文件形式的提取结果删除key: +type TendisKeysFilesDelete struct { + deleteDir string + Err error `json:"_"` + SafeDelTool string `json:"safeDelTool"` // 执行安全删除的工具 + params TendisKeysFilesDeleteParams + runtime *jobruntime.JobGenericRuntime +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*TendisKeysFilesDelete)(nil) + +// NewTendisKeysFilesDelete new +func NewTendisKeysFilesDelete() jobruntime.JobRunner { + return &TendisKeysFilesDelete{} +} + +// Init 初始化 +func (job *TendisKeysFilesDelete) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("TendisKeysFilesDelete Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("TendisKeysFilesDelete Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + + return nil + +} + +// Name 原子任务名 +func (job *TendisKeysFilesDelete) Name() string { + return "tendis_keysdelete_files" +} + +// Run 执行 +func (job *TendisKeysFilesDelete) Run() error { + deleteDir := filepath.Join(consts.GetRedisBackupDir(), "dbbak/delete_keys_dir") + job.deleteDir = deleteDir + // 解压key工具包介质 + job.UntarMedia() + if job.Err != nil { + return job.Err + } + // 清理目录下15天以前的文件 + job.ClearFilesNDaysAgo(job.deleteDir, 15) + + // time.Sleep(10 * time.Second) + + job.DownloadFileFromoBkrepo() + if job.Err != nil { + return job.Err + } + + folderName := filepath.Base(job.params.Path) + filespath := filepath.Join(job.deleteDir, folderName) + files, err := ioutil.ReadDir(filespath) + if err != nil { + log.Fatal(err) + } + + chanFileDelTask := make(chan fs.FileInfo) + DelWorkLimit := job.SetDelWorkLimit(files) + job.runtime.Logger.Info("DelWorkLimit is %d", DelWorkLimit) + // 生产者 + go job.keysFileDelTaskQueue(chanFileDelTask, files) + wg := sync.WaitGroup{} + wg.Add(DelWorkLimit) + for worker := 0; worker < DelWorkLimit; worker++ { + // 消费者 + go job.keysFileDelTask(chanFileDelTask, &wg) + } + // 等待所有线程退出 + wg.Wait() + + if job.Err != nil { + return job.Err + } + return nil +} + +// Retry times +func (job *TendisKeysFilesDelete) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *TendisKeysFilesDelete) Rollback() error { + return nil + +} + +// keysFileDelTaskQueue 删除keys文件任务队列 +func (job *TendisKeysFilesDelete) keysFileDelTaskQueue(chanFileDelTask chan fs.FileInfo, files []fs.FileInfo) { + for _, file := range files { + fileTask := file + chanFileDelTask <- fileTask + } + close(chanFileDelTask) + job.runtime.Logger.Info("....add keysFileDelTask to Queue finish...") + return + +} + +// keysFileDelTask delete job +func (job *TendisKeysFilesDelete) keysFileDelTask(chanFileDelTask chan fs.FileInfo, wg *sync.WaitGroup) (err error) { + defer wg.Done() + for fileDelTask := range chanFileDelTask { + job.runtime.Logger.Info("....file keys Delete job...") + folderName := filepath.Base(job.params.Path) + filepath := filepath.Join(job.deleteDir, folderName, fileDelTask.Name()) + job.DelKeysRateLimitFromFiles(filepath) + if job.Err != nil { + return job.Err + } + } + job.runtime.Logger.Info("....keysFileDelTask goroutine finish...") + return nil + +} + +// CheckDeleteDir key分析本地数据目录 +func (job *TendisKeysFilesDelete) CheckDeleteDir() (err error) { + _, err = os.Stat(job.deleteDir) + if err != nil && os.IsNotExist(err) { + mkCmd := fmt.Sprintf("mkdir -p %s ", job.deleteDir) + _, err = util.RunLocalCmd("bash", []string{"-c", mkCmd}, "", nil, 100*time.Second) + if err != nil { + err = fmt.Errorf("创建目录:%s失败,err:%v", job.deleteDir, err) + job.runtime.Logger.Error(err.Error()) + return err + } + util.LocalDirChownMysql(job.deleteDir) + } else if err != nil { + err = fmt.Errorf("访问目录:%s 失败,err:%v", job.deleteDir, err) + job.runtime.Logger.Error(err.Error()) + return err + + } + return nil +} + +// UntarMedia 解压key工具包介质 +func (job *TendisKeysFilesDelete) UntarMedia() { + err := job.params.Check() + if err != nil { + job.runtime.Logger.Error("UntarMedis failed err:%v", err) + job.Err = err + return + } + err = job.CheckDeleteDir() + if err != nil { + job.runtime.Logger.Error("检查key保存目录失败: err:%v", err) + job.Err = err + return + } + + // Install: 确保dbtools符合预期 + err = job.params.DbToolsMediaPkg.Install() + if err != nil { + job.runtime.Logger.Error("DbToolsPkg dbtools不符合预期: err:%v,请检查", err) + job.Err = err + return + } + + cpCmd := fmt.Sprintf("cp %s/* %s", consts.DbToolsPath, job.deleteDir) + _, err = util.RunBashCmd(cpCmd, "", nil, 10*time.Second) + if err != nil { + job.Err = err + return + } + job.runtime.Logger.Info(cpCmd) + + return + +} + +// SetDelWorkLimit 设置并发度 +func (job *TendisKeysFilesDelete) SetDelWorkLimit(files []fs.FileInfo) (delWorkLimit int) { + // 根据节点数确认并发度 + delWorkLimit = 0 + fileNumber := len(files) + if fileNumber <= 8 { + delWorkLimit = 1 + } else if fileNumber <= 16 { + delWorkLimit = 2 + } else if fileNumber <= 32 { + delWorkLimit = 3 + } else if fileNumber <= 64 { + delWorkLimit = 4 + } else if fileNumber <= 128 { + delWorkLimit = 5 + } else { + delWorkLimit = 6 + } + msg := fmt.Sprintf("goroutine delWorkLimit is: %d", delWorkLimit) + job.runtime.Logger.Info(msg) + return delWorkLimit +} + +// ClearFilesNDaysAgo 清理目录下 N天前更新的文件 +func (job *TendisKeysFilesDelete) ClearFilesNDaysAgo(dir string, nDays int) { + if dir == "" || dir == "/" || dir == "/data/" || dir == "/data1/" { + return + } + if strings.Contains(dir, "dbbak") { + clearCmd := fmt.Sprintf(`cd %s && find ./ -mtime +%d -exec rm -rf {} \;`, dir, nDays) + job.runtime.Logger.Info("clear %d day cmd:%s", nDays, clearCmd) + util.RunLocalCmd("bash", []string{"-c", clearCmd}, "", nil, 10*time.Minute) + } + +} + +// DownloadFileFromoBkrepo 从蓝盾制品库下载keys文件 +func (job *TendisKeysFilesDelete) DownloadFileFromoBkrepo() { + + folderName := filepath.Base(job.params.Path) + downloadFileArchive := fmt.Sprintf(job.deleteDir + "/" + folderName + ".zip") + folderDir := filepath.Join(job.deleteDir, folderName) + _, err := os.Stat(folderDir) + if err == nil { + job.runtime.Logger.Info("文件夹已存在,不用重复下载解压") + return + } + ArchiveInfo, err := os.Stat(downloadFileArchive) + if err == nil && ArchiveInfo.Size() > 0 { + job.runtime.Logger.Info("文件压缩包已存在且文件大小为:%d,不用重复下载", ArchiveInfo.Size()) + unzipCmd := fmt.Sprintf("unzip -o %s -d %s/%s", downloadFileArchive, job.deleteDir, folderName) + job.runtime.Logger.Info("unzip files cmd: %s", unzipCmd) + _, err = util.RunLocalCmd("bash", []string{"-c", unzipCmd}, "", nil, 10*time.Minute) + if err != nil { + job.Err = err + return + } + } else { + targetURL := fmt.Sprintf(job.params.FileServer.URL + "/generic/" + job.params.FileServer.Project + "/" + + job.params.FileServer.Bucket + job.params.Path + "?download=true") + err = util.DownloadFile(downloadFileArchive, targetURL, job.params.FileServer.Username, + job.params.FileServer.Password) + if err != nil { + err = fmt.Errorf("下载文件 %s 到 %s 失败:%v", targetURL, downloadFileArchive, err) + job.runtime.Logger.Error(err.Error()) + job.Err = err + return + } + + unzipCmd := fmt.Sprintf("unzip -o %s -d %s/%s", downloadFileArchive, job.deleteDir, folderName) + job.runtime.Logger.Info("unzip files cmd: %s", unzipCmd) + _, err = util.RunLocalCmd("bash", []string{"-c", unzipCmd}, "", nil, 10*time.Minute) + if err != nil { + job.Err = err + return + } + } + +} + +// GetRedisSafeDelTool 获取安全删除key的工具 +func (job *TendisKeysFilesDelete) GetRedisSafeDelTool() (bool, error) { + + remoteSafeDelTool := "redisSafeDeleteTool" + job.SafeDelTool = filepath.Join(job.deleteDir, remoteSafeDelTool) + _, err := os.Stat(job.SafeDelTool) + if err != nil && os.IsNotExist(err) { + job.Err = fmt.Errorf("获取redisSafeDeleteTool失败,请检查是否下发成功:err:%v", err) + job.runtime.Logger.Error(job.Err.Error()) + return false, job.Err + } + util.LocalDirChownMysql(job.SafeDelTool) + err = os.Chmod(job.SafeDelTool, 0755) + if err != nil { + job.Err = fmt.Errorf(" redisSafeDeleteTool 加可执行权限失败:err:%v", err) + return false, job.Err + } + return true, nil +} + +// DelKeysRateLimitFromFiles 对redis key执行安全删除 +func (job *TendisKeysFilesDelete) DelKeysRateLimitFromFiles(resultFile string) { + + msg := fmt.Sprintf("redis:%s#%d start delete keys ...", job.params.Domain, job.params.ProxyPort) + job.runtime.Logger.Info(msg) + + job.GetRedisSafeDelTool() + if job.Err != nil { + return + } + + fileData, err := os.Stat(resultFile) + if err != nil { + job.Err = fmt.Errorf("redis:%s#%d keys resultFile:%s os.stat fail,err:%v", job.params.Domain, job.params.ProxyPort, + resultFile, err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + if fileData.Size() == 0 { + msg = fmt.Sprintf("redis:%s#%d keys resultFile:%s size==%d,skip delKeys", job.params.Domain, job.params.ProxyPort, + resultFile, fileData.Size()) + job.runtime.Logger.Info(msg) + return + } + + keyFile, err := os.Open(resultFile) + if err != nil { + job.Err = fmt.Errorf("DelKeysRateLimit open %s fail,err:%v", resultFile, err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + defer keyFile.Close() + + // tendisplus 与 cache 删除默认速率不同 + delRateLimit := 10000 + if consts.IsTendisplusInstanceDbType(job.params.TendisType) { + if job.params.TendisplusDeleteRate >= 10 { + delRateLimit = job.params.TendisplusDeleteRate + } else { + delRateLimit = 3000 + } + } else if job.params.DeleteRate >= 10 { + delRateLimit = job.params.DeleteRate + } else { + delRateLimit = 10000 + } + + var errBuffer bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bigKeyThread := 1000 // 如果hash,hlen>1000,则算big key + threadCnt := 30 + subScanCount := 100 // hscan 中count 个数 + + addr := fmt.Sprintf("%s:%d", job.params.Domain, job.params.ProxyPort) + delCmd := fmt.Sprintf( + `%s bykeysfile --dbtype=standalone --redis-addr=%s --redis-password=%s --keys-file=%s --big-key-threashold=%d --del-rate-limit=%d --thread-cnt=%d --sub-scan-count=%d --without-config-cmd`, + job.SafeDelTool, addr, job.params.ProxyPassword, resultFile, bigKeyThread, delRateLimit, threadCnt, subScanCount) + logCmd := fmt.Sprintf( + `%s bykeysfile --dbtype=standalone --redis-addr=%s --redis-password=xxxxx --keys-file=%s --big-key-threashold=%d --del-rate-limit=%d --thread-cnt=%d --sub-scan-count=%d --without-config-cmd`, + job.SafeDelTool, addr, resultFile, bigKeyThread, delRateLimit, threadCnt, subScanCount) + job.runtime.Logger.Info(logCmd) + + cmd := exec.CommandContext(ctx, "bash", "-c", delCmd) + stdout, _ := cmd.StdoutPipe() + cmd.Stderr = &errBuffer + + if err = cmd.Start(); err != nil { + err = fmt.Errorf("DelKeysRateLimitV2 cmd.Start fail,err:%v", err) + job.runtime.Logger.Error(err.Error()) + job.Err = err + return + } + + scanner := bufio.NewScanner(stdout) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + // 不断打印进度 + m := scanner.Text() + if strings.Contains(m, `"level":"error"`) == true { + err = errors.New(m) + job.runtime.Logger.Info(m) + continue + } + m = m + ";" + addr + job.runtime.Logger.Info(m) + } + if err != nil { + job.Err = err + return + } + + if err = cmd.Wait(); err != nil { + err = fmt.Errorf("DelKeysRateLimitV2 cmd.Wait fail,err:%v", err) + job.runtime.Logger.Error(err.Error()) + job.Err = err + return + } + errStr := errBuffer.String() + errStr = strings.TrimSpace(errStr) + if len(errStr) > 0 { + err = fmt.Errorf("DelKeysRateLimitV2 fail,err:%s", errStr) + job.runtime.Logger.Error(err.Error()) + job.Err = err + return + } + +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern.go new file mode 100644 index 0000000000..bff1448274 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern.go @@ -0,0 +1,1454 @@ +package atomredis + +import ( + "bufio" + "bytes" + "context" + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" + "github.com/gofrs/flock" +) + +// RedisInsKeyPatternJobParam 单台机器'key提取&删除'job参数 +type RedisInsKeyPatternJobParam struct { + common.DbToolsMediaPkg + FileServer util.FileServerInfo `json:"fileserver" validate:"required"` + BkBizID string `json:"bk_biz_id" validate:"required"` + Path string `json:"path"` + Domain string `json:"domain"` + IP string `json:"ip" validate:"required"` + Ports []int `json:"ports"` + StartPort int `json:"start_port"` // 如果端口连续,则可直接指定起始端口和实例个数 + InstNum int `json:"inst_num"` + KeyWhiteRegex string `json:"key_white_regex"` + KeyBlackRegex string `json:"key_black_regex"` + IsKeysToBeDel bool `json:"is_keys_to_be_del"` + DeleteRate int `json:"delete_rate"` // cache Redis删除速率,避免del 命令执行过快 + TendisplusDeleteRate int `json:"tendisplus_delete_rate"` // tendisplus删除速率,避免del 命令执行过快 + SsdDeleteRate int `json:"ssd_delete_rate"` // ssd删除速率,避免del 命令执行过快 +} + +// TendisKeysPattern key提取&删除 +type TendisKeysPattern struct { + saveDir string + Err error `json:"-"` + params RedisInsKeyPatternJobParam + runtime *jobruntime.JobGenericRuntime +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*TendisKeysPattern)(nil) + +// NewTendisKeysPattern new +func NewTendisKeysPattern() jobruntime.JobRunner { + return &TendisKeysPattern{} +} + +// Init 初始化 +func (job *TendisKeysPattern) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("TendisKeysPattern Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("TendisKeysPattern Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + // 白名单不能为空 + if job.params.KeyWhiteRegex == "" { + err = fmt.Errorf("%s为空,白名单不能为空", job.params.KeyWhiteRegex) + job.runtime.Logger.Error(err.Error()) + return err + } + + // ports 和 inst_num 不能同时为空 + if len(job.params.Ports) == 0 && job.params.InstNum == 0 { + err = fmt.Errorf("TendisKeysPattern ports(%+v) and inst_num(%d) is invalid", job.params.Ports, job.params.InstNum) + job.runtime.Logger.Error(err.Error()) + return err + } + if job.params.InstNum > 0 { + ports := make([]int, 0, job.params.InstNum) + for idx := 0; idx < job.params.InstNum; idx++ { + ports = append(ports, job.params.StartPort+idx) + } + job.params.Ports = ports + } + + return nil + +} + +// Name 原子任务名 +func (job *TendisKeysPattern) Name() string { + return "tendis_keyspattern" +} + +// Run 执行 +func (job *TendisKeysPattern) Run() (err error) { + err = myredis.LocalRedisConnectTest(job.params.IP, job.params.Ports, "") + if err != nil { + return + } + saveDir := filepath.Join(consts.GetRedisBackupDir(), "dbbak/get_keys_pattern") + job.saveDir = saveDir + // 解压key工具包介质 + err = job.UntarMedia() + if err != nil { + return + } + // 清理目录下15天以前的文件 + job.ClearFilesNDaysAgo(job.saveDir, 15) + + keyTasks := make([]*RedisInsKeyPatternTask, 0, len(job.params.Ports)) + for _, port := range job.params.Ports { + password, err := myredis.GetPasswordFromLocalConfFile(port) + if err != nil { + return err + } + task, err := NewRedisInsKeyPatternTask(job.params.BkBizID, job.params.Domain, job.params.IP, port, + password, saveDir, job.runtime, job.params.FileServer.URL, job.params.FileServer.Bucket, + job.params.FileServer.Password, job.params.FileServer.Username, job.params.FileServer.Project, + job.params.Path, job.params.KeyWhiteRegex, job.params.KeyBlackRegex, + job.params.IsKeysToBeDel, job.params.DeleteRate, job.params.TendisplusDeleteRate, job.params.SsdDeleteRate, 0, 0) + if err != nil { + return err + } + task.newConnect() + keyTasks = append(keyTasks, task) + } + + chanKeyTasks := make(chan *RedisInsKeyPatternTask) + workerLimit := job.SetWorkLimit(keyTasks) + // 生产者 + go job.taskQueue(chanKeyTasks, keyTasks) + wg := sync.WaitGroup{} + wg.Add(workerLimit) + for worker := 0; worker < workerLimit; worker++ { + // 消费者 + go job.keysPatternTask(chanKeyTasks, &wg) + } + // 等待所有线程退出 + wg.Wait() + + if job.Err != nil { + return job.Err + } + + return nil +} + +// Retry times +func (job *TendisKeysPattern) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *TendisKeysPattern) Rollback() error { + return nil +} + +// GetInsFullData 获取单实例全量数据 +func (task *RedisInsTask) GetInsFullData() { + + if task.Err != nil { + return + } + defer task.redisCli.Close() + + task.Err = task.redisCli.WaitForBackupFinish() + if task.Err != nil { + return + } + // 生成rdb文件 + task.RedisInsBgsave() +} + +// RedisInsKeyPatternTask key提取子任务 +type RedisInsKeyPatternTask struct { + RedisInsTask + FileServer util.FileServerInfo `json:"fileserver" validate:"required"` + Path string `json:"path"` + KeysFile string `json:"keysFile"` + KeyWhiteRegex string `json:"keyWhiteRegex"` // 白 + KeyBlackRegex string `json:"keyBlackRegex"` + ResultFile string `json:"resultFile"` + RedisShakeTool string `json:"redisShakeTool"` + LdbTendisTool string `json:"ldbTendisTool"` + WithExpiredKeys bool `json:"withExpiredKeys"` + SafeDelTool string `json:"safeDelTool"` // 执行安全删除的工具 + IsKeysToBeDel bool `json:"isKeysToBeDel"` + DeleteRate int `json:"deleteRate"` // cache 删除速率,避免del 命令执行过快 + TendisplusDeleteRate int `json:"tendisplusDeleteRate"` // tendisplus 删除速率,避免del 命令执行过快 + SsdDeleteRate int `json:"ssdDeleteRate"` // ssd删除速率,避免del 命令执行过快 + MasterAddr string `json:"masterAddr"` + MasterIP string `json:"masterIp"` + MasterPort string `json:"masterPort"` + MasterAuth string `json:"masterAuth"` + SegStart int `json:"segStart"` // 源实例所属segment start + SegEnd int `json:"segEnd"` // 源实例所属segment end + +} + +// NewRedisInsKeyPatternTask new redis instance keypattern task +func NewRedisInsKeyPatternTask( + bkBizID, domain, iP string, port int, insPassword, saveDir string, + runtime *jobruntime.JobGenericRuntime, url, bucket, password, username, project string, path string, + keyWhite, keyBlack string, iskeysToBeDel bool, deleteRate, tendisplusDeleteRate, ssdDeleteRate int, + segStart, segEnd int) (task *RedisInsKeyPatternTask, err error) { + task = &RedisInsKeyPatternTask{} + t1, err := NewRedisInsTask(bkBizID, domain, iP, port, insPassword, saveDir, runtime) + if err != nil { + return task, err + } + task.RedisInsTask = *t1 + task.KeyWhiteRegex = keyWhite + task.KeyBlackRegex = keyBlack + task.IsKeysToBeDel = iskeysToBeDel + task.DeleteRate = deleteRate + task.TendisplusDeleteRate = tendisplusDeleteRate + task.SsdDeleteRate = ssdDeleteRate + task.FileServer.URL = url + task.FileServer.Bucket = bucket + task.FileServer.Password = password + task.FileServer.Username = username + task.FileServer.Project = project + task.Path = path + task.WithExpiredKeys = false + task.SegStart = segStart + task.SegEnd = segEnd + return task, nil +} + +// UntarMedia 解压key工具包介质 +func (job *TendisKeysPattern) UntarMedia() (err error) { + err = job.params.Check() + if err != nil { + job.runtime.Logger.Error("UntarMedis failed err:%v", err) + return + } + err = job.CheckSaveDir() + if err != nil { + job.runtime.Logger.Error("检查key保存目录失败: err:%v", err) + return err + } + + // Install: 确保dbtools符合预期 + err = job.params.DbToolsMediaPkg.Install() + if err != nil { + job.runtime.Logger.Error("DbToolsPkg 初始化失败: err:%v", err) + job.Err = err + return err + } + + // 复制dbtools/ldb_tendisplus,ldb_with_len.3.8, ldb_with_len.5.13 + // redis-shake redisSafeDeleteTool到get_keys_pattern + cpCmd := fmt.Sprintf("cp %s/ldb* %s/redis-shake %s/redisSafeDeleteTool %s", consts.DbToolsPath, + consts.DbToolsPath, consts.DbToolsPath, job.saveDir) + + // // 这里复制所有的,是为了防止工具名变更,也可指定如上一行代码注释 + // cpCmd := fmt.Sprintf("cp %s/* %s", consts.DbToolsPath, job.saveDir) + _, err = util.RunBashCmd(cpCmd, "", nil, 100*time.Second) + if err != nil { + return + } + job.runtime.Logger.Info(cpCmd) + + return nil + +} + +// CheckSaveDir key分析本地数据目录 +func (job *TendisKeysPattern) CheckSaveDir() (err error) { + _, err = os.Stat(job.saveDir) + if err != nil && os.IsNotExist(err) { + mkCmd := fmt.Sprintf("mkdir -p %s ", job.saveDir) + _, err = util.RunLocalCmd("bash", []string{"-c", mkCmd}, "", nil, 100*time.Second) + if err != nil { + err = fmt.Errorf("创建目录:%s失败,err:%v", job.saveDir, err) + job.runtime.Logger.Error(err.Error()) + return err + } + util.LocalDirChownMysql(job.saveDir) + } else if err != nil { + err = fmt.Errorf("访问目录:%s 失败,err:%v", job.saveDir, err) + job.runtime.Logger.Error(err.Error()) + return err + + } + return nil +} + +// SetWorkLimit 设置并发度 +func (job *TendisKeysPattern) SetWorkLimit(keyTasks []*RedisInsKeyPatternTask) (workerLimit int) { + // tendisplus 并发度3 + // tendis_ssd 并发度3 + // tendis_cache 根据数据量确认并发度 + // - 0 < dataSize <= 10GB,并发度4 + // - 10GB < dataSize <= 20GB,并发度3 + // - 20GB < dataSize <= 40GB,并发度2 + // - dataSize > 40GB,并发度1 + task01 := keyTasks[0] + if task01.TendisType == consts.TendisTypeTendisplusInsance || task01.TendisType == consts.TendisTypeTendisSSDInsance { + workerLimit = 3 + } else { + var maxDataSize uint64 = 0 + for _, taskItem := range keyTasks { + task01 := taskItem + if task01.DataSize > maxDataSize { + maxDataSize = task01.DataSize + } + msg := fmt.Sprintf("redis(%s:%d) dataSize:%dMB", task01.IP, task01.Port, task01.DataSize/1024/1024) + job.runtime.Logger.Info(msg) + } + if maxDataSize <= 10*consts.GiByte { + workerLimit = 4 + } else if maxDataSize <= 20*consts.GiByte { + workerLimit = 3 + } else if maxDataSize <= 40*consts.GiByte { + workerLimit = 2 + } else { + workerLimit = 1 + } + } + msg := fmt.Sprintf("tendisType is:%s,goroutine workerLimit is: %d", task01.TendisType, workerLimit) + job.runtime.Logger.Info(msg) + return workerLimit +} + +// taskQueue 提取key 任务队列 +func (job *TendisKeysPattern) taskQueue(chanKeyTasks chan *RedisInsKeyPatternTask, + keyTasks []*RedisInsKeyPatternTask) (err error) { + for _, task := range keyTasks { + keyTask := task + chanKeyTasks <- keyTask + } + close(chanKeyTasks) + job.runtime.Logger.Info("....add taskQueue finish...") + return + +} + +// keysPatternTask 消费提取key 任务 +func (job *TendisKeysPattern) keysPatternTask(chanKeyTasks chan *RedisInsKeyPatternTask, + wg *sync.WaitGroup) (err error) { + + defer wg.Done() + for keyTask := range chanKeyTasks { + job.runtime.Logger.Info("....GetTendisKeys job...") + // 获取key + keyTask.GetTendisKeys() + if keyTask.Err != nil { + job.Err = fmt.Errorf("GetTendisKeys err:%v", keyTask.Err) + return job.Err + } + + if keyTask.TendisType == consts.TendisTypeTendisplusInsance { + // 合并 kvstore keys临时文件 + keyTask.mergeTendisplusDbFile() + if keyTask.Err != nil { + return keyTask.Err + } + } + + if job.params.IsKeysToBeDel { + keyTask.DelKeysRateLimitV2() + if keyTask.Err != nil { + job.Err = fmt.Errorf(" DelKeysRateLimitV2 err:%v", keyTask.Err) + return job.Err + } + + } + + // 上传keys文件 + keyTask.TransferToBkrepo() + if keyTask.Err != nil { + return keyTask.Err + } + + } + + job.runtime.Logger.Info("....keysPatternTask goroutine finish...") + return + +} + +// ClearFilesNDaysAgo 清理目录下 N天前更新的文件 +func (job *TendisKeysPattern) ClearFilesNDaysAgo(dir string, nDays int) { + if dir == "" || dir == "/" { + return + } + clearCmd := fmt.Sprintf(`cd %s && find ./ -mtime +%d -exec rm -f {} \;`, dir, nDays) + job.runtime.Logger.Info("clear %d day cmd:%s", nDays, clearCmd) + util.RunLocalCmd("bash", []string{"-c", clearCmd}, "", nil, 10*time.Minute) +} + +// getSafeRegexPattern 安全获取正则 +func (task *RedisInsKeyPatternTask) getSafeRegexPattern(keyRegex string) (shellGrepPattern string) { + if keyRegex == "" { + shellGrepPattern = "" + return + } + if keyRegex == "*" || keyRegex == ".*" || keyRegex == "^.*$" { + shellGrepPattern = ".*" + return + } + scanner := bufio.NewScanner(strings.NewReader(keyRegex)) + for scanner.Scan() { + tmpPattern := scanner.Text() + if tmpPattern == "" { + continue + } + regPartten := tmpPattern + regPartten = strings.ReplaceAll(regPartten, "|", "\\|") + regPartten = strings.ReplaceAll(regPartten, ".", "\\.") + regPartten = strings.ReplaceAll(regPartten, "*", ".*") + + if shellGrepPattern == "" { + if strings.HasPrefix(regPartten, "^") { + shellGrepPattern = regPartten + } else { + shellGrepPattern = fmt.Sprintf("^%s", regPartten) + } + } else { + if strings.HasPrefix(regPartten, "^") { + shellGrepPattern = fmt.Sprintf("%s|%s", shellGrepPattern, regPartten) + } else { + shellGrepPattern = fmt.Sprintf("%s|^%s", shellGrepPattern, regPartten) + } + } + } + msg := fmt.Sprintf("trans input partten:[%s] to regex partten:[%s]", keyRegex, shellGrepPattern) + task.runtime.Logger.Info(msg) + if err := scanner.Err(); err != nil { + task.Err = fmt.Errorf("getSafeRegexPattern scanner.Err:%v,inputPattern:%s", err, keyRegex) + task.runtime.Logger.Error(task.Err.Error()) + return + } + return shellGrepPattern +} + +// setResultFile 设置keys文件名 +func (task *RedisInsKeyPatternTask) setResultFile() { + task.ResultFile = filepath.Join(task.SaveDir, fmt.Sprintf("%s.%s_%d.keys", task.BkBizID, task.IP, task.Port)) +} + +// GetRedisShakeBin 获取redis-shake工具 +func (task *RedisInsKeyPatternTask) GetRedisShakeBin(fetchLatest bool) (bool, error) { + if task.TendisType == "" { + task.redisCli.GetTendisType() + if task.Err != nil { + return false, task.Err + } + } + if task.TendisType != consts.TendisTypeRedisInstance { + task.Err = fmt.Errorf("TendisType != consts.TendisTypeRedisInstance") + return false, task.Err + } + shakeTool := "redis-shake" + task.RedisShakeTool = filepath.Join(task.SaveDir, shakeTool) + // flow 里下发到指定目录 ,检查下发是否成功 + _, err := os.Stat(task.RedisShakeTool) + if err != nil && os.IsNotExist(err) { + task.Err = fmt.Errorf("获取redis-shake失败,请检查是否下发成功:err:%v", err) + task.runtime.Logger.Error(task.Err.Error()) + return false, task.Err + } + util.LocalDirChownMysql(task.RedisShakeTool) + err = os.Chmod(task.RedisShakeTool, 0755) + if err != nil { + task.Err = fmt.Errorf("RedisShakeTool加可执行权限失败:err:%v", err) + return false, task.Err + } + + return true, nil +} + +// GetLdbTendisTool 获取ldb_tendisplus工具 +func (task *RedisInsKeyPatternTask) GetLdbTendisTool(fetchLatest bool) (bool, error) { + + if task.TendisType == "" { + task.redisCli.GetTendisType() + if task.Err != nil { + return false, task.Err + } + } + + if task.TendisType != consts.TendisTypeTendisplusInsance { + task.Err = fmt.Errorf("TendisType != consts.TendisTypeTendisplusInsance") + return false, task.Err + } + ldbTendisTool := "ldb_tendisplus" + task.LdbTendisTool = filepath.Join(task.SaveDir, ldbTendisTool) + task.runtime.Logger.Info("Get ldb_tendisplus Tool") + _, err := os.Stat(task.LdbTendisTool) + if err != nil && os.IsNotExist(err) { + task.Err = fmt.Errorf("ldb_tendisplus,请检查是否下发成功:err:%v", err) + task.runtime.Logger.Error(task.Err.Error()) + return false, task.Err + } + util.LocalDirChownMysql(task.LdbTendisTool) + err = os.Chmod(task.LdbTendisTool, 0755) + if err != nil { + task.Err = fmt.Errorf("LdbTendisTool加可执行权限失败:err:%v", err) + return false, task.Err + } + task.runtime.Logger.Info("Get ldb_tendisplus Tool success") + + return true, nil +} + +// tendisplusAllKeys 获取tendisplus keys +func (task *RedisInsKeyPatternTask) tendisplusAllKeys() { + task.GetLdbTendisTool(false) + if task.Err != nil { + return + } + var kvstorecount string + kvstorecount, task.Err = task.redisCli.GetKvstoreCount() + if task.Err != nil { + task.Err = fmt.Errorf("tendisplusAllKeys GetKvstoreCount Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return + } + task.runtime.Logger.Info("kvstorecount:%s", kvstorecount) + kvstorecounts, err := strconv.Atoi(kvstorecount) + if err != nil { + errMsg := fmt.Sprintf("%s:%d kvstorecount string to int failed err:%v", task.IP, task.Port, task.Err) + task.runtime.Logger.Error(errMsg) + } + for db := 0; db < kvstorecounts; db++ { + task.getTendisPlusDBKeys(db) + if task.Err != nil { + errMsg := fmt.Sprintf("get %s_%d_%d keys failed,err:%v", task.IP, task.Port, db, task.Err) + task.runtime.Logger.Error(errMsg) + return + } + Msg := fmt.Sprintf("get %s_%d_%d keys success", task.IP, task.Port, db) + task.runtime.Logger.Info(Msg) + task.getTargetKeysByPartten(db) + if task.Err != nil { + errMsg := fmt.Sprintf("grep pattern from %s_%d_%d.keys failed,err:%v", task.IP, task.Port, db, task.Err) + task.runtime.Logger.Error(errMsg) + return + } + Msg = fmt.Sprintf("grep pattern from %s_%d_%d keys success", task.IP, task.Port, db) + task.runtime.Logger.Info(Msg) + + } +} + +// getTendisPlusDBKeys 获取tendisplus db keys +func (task *RedisInsKeyPatternTask) getTendisPlusDBKeys(db int) { + task.KeysFile = filepath.Join(task.SaveDir, fmt.Sprintf("%s.%s_%d_%d.keys", task.BkBizID, task.IP, task.Port, db)) + getKeysCmd := fmt.Sprintf("%s --db=%s/%d tscan > %s", task.LdbTendisTool, task.DataDir, db, task.KeysFile) + task.runtime.Logger.Info("tendisplus getkeys command:%s", getKeysCmd) + + maxTimes := 5 + var cmdRet string + var err error + for maxTimes > 0 { + maxTimes-- + task.Err = nil + cmdRet, err = util.RunLocalCmd("bash", []string{"-c", getKeysCmd}, "", nil, 24*time.Hour) + if err != nil { + task.Err = err + task.runtime.Logger.Error(task.Err.Error()) + continue + } + if cmdRet != "" { + task.Err = errors.New(cmdRet) + task.runtime.Logger.Error(task.Err.Error()) + continue + } + + msg := fmt.Sprintf("tendisplus db:%d AllKeys get keysFile:%s success", db, task.KeysFile) + task.runtime.Logger.Info(msg) + break + } + if task.Err != nil { + msg := fmt.Sprintf("tendisplus db:%d AllKeys get :%s,err:%v", db, getKeysCmd, err) + task.runtime.Logger.Error(msg) + return + } + +} + +// getTargetKeysByPartten 按正则匹配key +func (task *RedisInsKeyPatternTask) getTargetKeysByPartten(db int) { + task.ResultFile = filepath.Join(task.SaveDir, fmt.Sprintf("result.%s.%s_%d_%d.keys", task.BkBizID, task.IP, task.Port, + db)) + var grepExtra = "" + grepPattern := task.getSafeRegexPattern(task.KeyWhiteRegex) + if task.Err != nil { + return + } + if grepPattern != "" && grepPattern != ".*" { + grepExtra = fmt.Sprintf(` | { grep -E %q || true; }`, grepPattern) + } + grepPattern = task.getSafeRegexPattern(task.KeyBlackRegex) + if task.Err != nil { + return + } + + // 过滤掉节点心跳检测数据 + Heartbeat := fmt.Sprintf("%s_%s:heartbeat", task.MasterIP, task.MasterPort) + DbhaAgent := fmt.Sprintf("dbha:agent:%s", task.MasterIP) + if grepPattern == "" { + grepPattern = fmt.Sprintf("^%s|^%s", Heartbeat, DbhaAgent) + + } else { + grepPattern = fmt.Sprintf("%s|^%s|^%s", grepPattern, Heartbeat, DbhaAgent) + + } + + if grepPattern != "" && grepPattern != ".*" { + grepExtra = fmt.Sprintf(` %s | { grep -vE %q || true; }`, grepExtra, grepPattern) + } + grepCmd := fmt.Sprintf(`awk '{ print $3}' %s %s > %s`, task.KeysFile, grepExtra, task.ResultFile) + _, err := util.RunLocalCmd("bash", []string{"-c", grepCmd}, "", nil, 48*time.Hour) + if err != nil { + task.Err = fmt.Errorf("grepCmd:%s err:%v", grepCmd, err) + task.runtime.Logger.Error(task.Err.Error()) + return + } + task.runtime.Logger.Info("tendisplusAllKeys grepCmd:%s success", grepCmd) + +} + +// mergeTendisplusDbFile 合并节点dbkeys 并删除kvstore keys临时文件 +func (task *RedisInsKeyPatternTask) mergeTendisplusDbFile() { + task.runtime.Logger.Info("开始合并db key提取结果") + mergeFile := filepath.Join(task.SaveDir, fmt.Sprintf("%s.%s_%d.keys", task.BkBizID, task.IP, task.Port)) + // 再次检查是否存在相同的keys文件,必须保证不存在,不然结果会累加 + _, err := os.Stat(mergeFile) + if err == nil { + err = os.Remove(mergeFile) + if err != nil { + errMsg := fmt.Sprintf("再次检查是否存在相同的keys文件且删除失败:%s", mergeFile) + task.runtime.Logger.Error(errMsg) + } + + } + mergeCmd := fmt.Sprintf(`cd %s + flock -x -w 600 ./lock -c 'cat result.%s.%s_%d_* >> %s '`, + task.SaveDir, task.BkBizID, task.IP, task.Port, mergeFile) + _, err = util.RunLocalCmd("bash", []string{"-c", mergeCmd}, "", nil, 1*time.Hour) + + if err != nil { + task.Err = fmt.Errorf("mergeCmd:%s err:%v", mergeCmd, err) + task.runtime.Logger.Error(task.Err.Error()) + return + } + msg := fmt.Sprintf("key db 提取结果合并命令:%s", mergeCmd) + task.runtime.Logger.Info(msg) + task.ResultFile = mergeFile + task.ClearTmpKeysFile() + task.ClearTmpResultFile() + if task.Err != nil { + return + } + +} + +// getSSDLdbTool 获取SSD ldb工具 +func (task *RedisInsKeyPatternTask) getSSDLdbTool(fetchLatest bool) (bool, error) { + + if task.TendisType == "" { + task.redisCli.GetTendisType() + if task.Err != nil { + return false, task.Err + } + } + + if task.TendisType != consts.TendisTypeTendisSSDInsance { + task.Err = fmt.Errorf("TendisType != consts.TendisTypeTendisSSDInsance") + return false, task.Err + } + + ldbTool := "ldb_tendisssd" + ldbTool513 := "ldb_with_len.5.13" + if task.Version == "" { + // 获取 redis_version 以区分ssd版本差异,决定使用不同的ldb工具 + task.Version, task.Err = task.redisCli.GetTendisVersion() + if task.Err != nil { + task.Err = fmt.Errorf("getSSDLdbTool GetTendisVersion Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return false, task.Err + } + } + task.runtime.Logger.Info("getSSDLdbTool GetTendisVersion:%s", task.Version) + + baseVersion, subVersion, err := util.VersionParse(task.Version) + if err != nil { + task.Err = err + task.runtime.Logger.Error(task.Err.Error()) + return false, task.Err + } + msg := fmt.Sprintf("getSSDLdbTool tendis:%s#%d baseVersion:%d subVersion:%d", task.IP, task.Port, baseVersion, + subVersion) + task.runtime.Logger.Info(msg) + if task.TendisType == consts.TendisTypeTendisSSDInsance && baseVersion == 2008017 && subVersion > 1002016 { + if task.WithExpiredKeys == true { + // 提取的key包含已过期的key + ldbTool = filepath.Join(task.SaveDir, ldbTool513) + } else { + // 提取的key不包含已过期的key + ldbTool = filepath.Join(task.SaveDir, ldbTool) + } + } else { + // tendis ssd低版本 + // 此时使用ldb无法支持 --without_expired 参数 + task.WithExpiredKeys = true + } + + _, err = os.Stat(ldbTool) + if err != nil { + task.Err = fmt.Errorf("%s %v", ldbTool, err) + task.runtime.Logger.Error(task.Err.Error()) + return false, task.Err + } + + err = os.Chmod(ldbTool, 0755) + if err != nil { + task.Err = fmt.Errorf("getSSDLdbTool ldbTool:%s 加可执行权限失败:err:%v", ldbTool, err) + return false, task.Err + } + task.LdbTendisTool = ldbTool + task.runtime.Logger.Info("getSSDLdbTool success,ldbTool:%s", task.LdbTendisTool) + + return true, nil +} + +// tendisSSDAllKeys 获取tendisSSD keys +func (task *RedisInsKeyPatternTask) tendisSSDAllKeys() { + task.getSSDLdbTool(false) + if task.Err != nil { + return + } + task.KeysFile = filepath.Join(task.SaveDir, fmt.Sprintf("%s_%d.keys", task.IP, task.Port)) + // 设置最后的文件名格式 + task.setResultFile() + if task.Err != nil { + return + } + var getKeysCmd string + rocksdbFile := filepath.Join(task.DataDir, "rocksdb") + _, err := os.Stat(rocksdbFile) + if os.IsNotExist(err) { + task.runtime.Logger.Error("tendisSSDAllKeys", fmt.Sprintf("%s not exists", rocksdbFile)) + return + } + + if task.WithExpiredKeys == false { + segStart := "" + segEnd := "" + if task.SegStart >= 0 && task.SegEnd > 0 { + segStart = fmt.Sprintf(" --start_segment=%d ", task.SegStart) + segEnd = fmt.Sprintf(" --end_segment=%d ", task.SegEnd) + } + getKeysCmd = fmt.Sprintf( + "export LD_LIBRARY_PATH=LD_LIBRARY_PATH:/usr/local/redis/bin/deps;%s --db=%s --without_expired %s %s scan > %s", + task.LdbTendisTool, rocksdbFile, segStart, segEnd, task.KeysFile) + } else { + getKeysCmd = fmt.Sprintf("export LD_LIBRARY_PATH=LD_LIBRARY_PATH:/usr/local/redis/bin/deps;%s --db=%s scan > %s", + task.LdbTendisTool, rocksdbFile, task.KeysFile) + } + task.runtime.Logger.Info("getKeysCmd command:%s", getKeysCmd) + + maxTimes := 5 + var cmdRet string + for maxTimes > 0 { + maxTimes-- + task.Err = nil + cmdRet, err = util.RunLocalCmd("bash", []string{"-c", getKeysCmd}, "", nil, 24*time.Hour) + if err != nil { + task.Err = err + task.runtime.Logger.Warn("tendisSSDAllKeys ldb fail,err:%v,sleep 60s and retry ...", task.Err) + time.Sleep(1 * time.Minute) + continue + } + if cmdRet != "" { + task.Err = errors.New(cmdRet) + task.runtime.Logger.Warn("tendisSSDAllKeys ldb fail,err:%v,sleep 60s and retry ...", task.Err) + time.Sleep(1 * time.Minute) + continue + } + task.runtime.Logger.Info("tendisSSDAllKeys get keysFile:%s success", task.KeysFile) + break + } + if task.Err != nil { + task.runtime.Logger.Error(task.Err.Error()) + return + } + + var grepExtra string = "" + grepPattern := task.getSafeRegexPattern(task.KeyWhiteRegex) + if task.Err != nil { + return + } + if grepPattern != "" && grepPattern != ".*" { + grepExtra = fmt.Sprintf(` | { grep -E %q || true; }`, grepPattern) + } + grepPattern = task.getSafeRegexPattern(task.KeyBlackRegex) + if task.Err != nil { + return + } + if grepPattern != "" && grepPattern != ".*" { + grepExtra = fmt.Sprintf(` %s | { grep -vE %q || true; }`, grepExtra, grepPattern) + } + + grepCmd := fmt.Sprintf(`awk '{ print $3}' %s %s > %s`, task.KeysFile, grepExtra, task.ResultFile) + task.runtime.Logger.Info("grepCommand:%s", grepCmd) + + cmdRet, err = util.RunLocalCmd("bash", []string{"-c", grepCmd}, "", nil, 48*time.Hour) + if err != nil { + task.Err = err + return + } + task.runtime.Logger.Info("grepCommand:%s success", grepCmd) + util.RunLocalCmd("bash", []string{"-c", fmt.Sprintf("chown -R mysql.mysql %s", task.SaveDir)}, "", nil, 10*time.Second) +} + +// tendisCacheAllKeys 获取所有key +// NOCC:golint/fnsize(设计如此) +func (task *RedisInsKeyPatternTask) tendisCacheAllKeys() { + task.GetRedisShakeBin(false) + if task.Err != nil { + return + } + + value := `conf.version = 1 +id = {{SHAKE_ID}} +log.level = info +log.file ={{LOG_FILE}} +pid_path={{PID_PATH}} +http_profile = {{HTTP_PROFIILE}} +system_profile = {{SYSTEM_PROFILE}} +parallel = 8 +source.rdb.input = {{RDB_FULL_PATH}} +source.rdb.start_segment={{START_SEGMENT}} +source.rdb.end_segment={{END_SEGMENT}} +filter.key.whitelist = {{KEY_WHITELIST}} +filter.key.blacklist = {{KEY_BLACKLIST}} +filter.db.whitelist = 0 +filter.db.blacklist = +decode_only_print_keyname = true +decode_without_expired_keys = true +target.rdb.output = {{RESULT_FULL_PATH}}` + templateFile := filepath.Join(task.SaveDir, fmt.Sprintf("shake.%d.conf", task.Port)) + shakeID := fmt.Sprintf("redis-shake-%d", task.Port) + logFile := filepath.Join(task.SaveDir, fmt.Sprintf("shake.%d.log", task.Port)) + pidPath := filepath.Join(task.SaveDir, "pids") + httpProfile := task.Port + 500 + systemProfile := task.Port + 5000 + rdbFullPath := fmt.Sprintf("%s/dump.rdb", task.DataDir) + segStart := -1 + segEnd := -1 + if task.SegStart != 0 && task.SegEnd != 0 { + segStart = task.SegStart + segEnd = task.SegEnd + } + whitePattern := task.getSafeRegexPattern(task.KeyWhiteRegex) + if whitePattern == ".*" { + whitePattern = "" + } + blackPattern := task.getSafeRegexPattern(task.KeyBlackRegex) + if blackPattern == ".*" { + blackPattern = "" + } + task.setResultFile() + if task.Err != nil { + return + } + value = strings.ReplaceAll(value, "{{SHAKE_ID}}", shakeID) + value = strings.ReplaceAll(value, "{{LOG_FILE}}", logFile) + value = strings.ReplaceAll(value, "{{PID_PATH}}", pidPath) + value = strings.ReplaceAll(value, "{{HTTP_PROFIILE}}", strconv.Itoa(httpProfile)) + value = strings.ReplaceAll(value, "{{SYSTEM_PROFILE}}", strconv.Itoa(systemProfile)) + value = strings.ReplaceAll(value, "{{RDB_FULL_PATH}}", rdbFullPath) + value = strings.ReplaceAll(value, "{{START_SEGMENT}}", strconv.Itoa(segStart)) + value = strings.ReplaceAll(value, "{{END_SEGMENT}}", strconv.Itoa(segEnd)) + value = strings.ReplaceAll(value, "{{KEY_WHITELIST}}", whitePattern) + value = strings.ReplaceAll(value, "{{KEY_BLACKLIST}}", blackPattern) + value = strings.ReplaceAll(value, "{{RESULT_FULL_PATH}}", task.ResultFile) + pidFile := filepath.Join(pidPath, shakeID+".pid") + if _, err := os.Stat(pidFile); err == nil { + task.clearPidFile(pidFile) + task.runtime.Logger.Info("tendisCacheAllKeys: clearPidFile %s success", pidFile) + } + + err := ioutil.WriteFile(templateFile, []byte(value), 0755) + if err != nil { + task.Err = fmt.Errorf("ioutil.WriteFile fail,file:%s,err:%v", templateFile, err) + return + } + getKeyCmdNew := fmt.Sprintf("%s -conf=%s -type=decode", task.RedisShakeTool, templateFile) + task.runtime.Logger.Info("getKey command:%s", getKeyCmdNew) + + var cmdRet string + var msg string + maxRetryTimes := 5 + for maxRetryTimes > 0 { + maxRetryTimes-- + err = nil + cmdRet, err = util.RunLocalCmd("bash", []string{"-c", getKeyCmdNew}, "", nil, 24*time.Hour) + if err != nil && (strings.Contains(cmdRet, "address already in use") || strings.Contains(err.Error(), + "address already in use")) { + msg = fmt.Sprintf("command:%s port address already in use,retry...", getKeyCmdNew) + task.runtime.Logger.Error(msg) + value = strings.ReplaceAll(value, fmt.Sprintf("http_profile = %d", httpProfile), fmt.Sprintf("http_profile = %d", + httpProfile+500)) + value = strings.ReplaceAll(value, fmt.Sprintf("system_profile = %d", systemProfile), + fmt.Sprintf("system_profile = %d", systemProfile+500)) + httpProfile += 500 + systemProfile += 500 + ioutil.WriteFile(templateFile, []byte(value), 0755) + continue + } else if err != nil { + msg = fmt.Sprintf("command:%s err:%v,retry...", getKeyCmdNew, err) + task.runtime.Logger.Error(msg) + + time.Sleep(5 * time.Second) + continue + } + break + } + if err != nil { + task.Err = fmt.Errorf("command:%s failed,err:%v,cmdRet:%s", getKeyCmdNew, err, cmdRet) + task.runtime.Logger.Error(task.Err.Error()) + return + } + task.runtime.Logger.Info("tendisCacheAllKeys :run success command:%s", getKeyCmdNew) + task.ResultFile = task.ResultFile + ".0" + return + +} + +// clearPidFile 删除本地redis-shake pid文件 +func (task *RedisInsKeyPatternTask) clearPidFile(pidFile string) { + pidFile = strings.TrimSpace(pidFile) + if pidFile == "" { + return + } + if strings.Contains(pidFile, "shake") == false { + // 如果pidFile 为空,则下面命令非常危险 + return + } + rmCmd := fmt.Sprintf("cd %s && rm -rf %s 2>/dev/null", filepath.Dir(pidFile), filepath.Base(pidFile)) + task.runtime.Logger.Info(rmCmd) + + util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 10*time.Minute) +} + +// GetTendisKeys 获取tendis(cache/tendisplus/tendisSSD)的keys文件 +func (task *RedisInsKeyPatternTask) GetTendisKeys() { + // 先获取锁,然后获取key + lockFile := filepath.Join(task.SaveDir, fmt.Sprintf("lock.%s.%d", task.IP, task.Port)) + + msg := fmt.Sprintf("try to get filelock:%s,addr:%s", lockFile, task.Addr()) + task.runtime.Logger.Info(msg) + + flock := flock.New(lockFile) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Hour) + defer cancel() + locked, err := flock.TryLockContext(ctx, 10*time.Second) + if err != nil { + task.Err = fmt.Errorf("try to get filelock fail,err:%v,addr:%s", err, task.Addr()) + task.runtime.Logger.Error(task.Err.Error()) + return + } + if locked == false { + return + } + defer flock.Unlock() + + msg = fmt.Sprintf("try to get filelock:%s success,starting getTendisKeys,addr:%s", lockFile, task.Addr()) + task.runtime.Logger.Info(msg) + + task.GetMasterData() + if task.Err != nil { + return + } + + // 如果key模式(白名单)中所有key都要求精确匹配,则无需去提取 + if task.IsAllKeyNamesInWhiteRegex() { + task.getKeysFromRegex() + return + } + + if task.TendisType == consts.TendisTypeRedisInstance { + task.GetInsFullData() + if task.Err != nil { + return + } + task.tendisCacheAllKeys() + } else if task.TendisType == consts.TendisTypeTendisplusInsance { + task.tendisplusAllKeys() + } else if task.TendisType == consts.TendisTypeTendisSSDInsance { + task.tendisSSDAllKeys() + } else { + task.Err = fmt.Errorf("unknown db type:%s,ip:%s,port:%d", task.TendisType, task.IP, task.Port) + task.runtime.Logger.Error(task.Err.Error()) + return + } +} + +// getKeysFromRegex 根据正则获取key +func (task *RedisInsKeyPatternTask) getKeysFromRegex() { + task.setResultFile() + file, err := os.OpenFile(task.ResultFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0755) + if err != nil { + task.Err = fmt.Errorf("getKeysFromRegex os.OpenFile fail,err:%v,resultFile:%s", err, task.ResultFile) + task.runtime.Logger.Error(task.Err.Error()) + return + } + datawriter := bufio.NewWriter(file) + defer file.Close() + defer datawriter.Flush() + + scanner := bufio.NewScanner(strings.NewReader(task.KeyWhiteRegex)) + for scanner.Scan() { + tmpPattern := scanner.Text() + if tmpPattern == "" { + continue + } + tmpPattern = strings.TrimPrefix(tmpPattern, "^") + tmpPattern = strings.TrimSuffix(tmpPattern, "$") + tmpPattern = strings.TrimSpace(tmpPattern) + if tmpPattern == "" { + continue + } + datawriter.WriteString(tmpPattern + "\n") + } + if err := scanner.Err(); err != nil { + task.Err = fmt.Errorf("getKeysFromRegex scanner.Err:%v,KeyWhiteRegex:%s", err, task.KeyWhiteRegex) + task.runtime.Logger.Error(task.Err.Error()) + return + } +} + +// IsAllKeyNamesInWhiteRegex 是否所有key名都已在 keyWhiteRegex 中,也就是 keyWhiteRegex中每一行都是 ^$包裹的,如 ^hello$、^world$ +func (task *RedisInsKeyPatternTask) IsAllKeyNamesInWhiteRegex() bool { + lines := strings.Split(task.KeyWhiteRegex, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + if !strings.HasPrefix(line, "^") { + return false + } + if !strings.HasSuffix(line, "$") { + return false + } + } + return true +} + +// TransferToBkrepo 上传keys文件到蓝盾制品库 +func (task *RedisInsKeyPatternTask) TransferToBkrepo() { + + filepath := task.ResultFile + str1 := strings.Split(filepath, "/") + filename := str1[len(str1)-1] + targetURL := fmt.Sprintf(task.FileServer.URL + "/generic/" + task.FileServer.Project + "/" + task.FileServer.Bucket + + task.Path + "/" + filename) + response, err := util.UploadFile(filepath, targetURL, task.FileServer.Username, task.FileServer.Password) + if err != nil { + err = fmt.Errorf("上传文件 %s 到 %s 失败:%v", filepath, targetURL, err) + task.runtime.Logger.Error(err.Error()) + task.Err = err + } + bodyBytes, err := ioutil.ReadAll(response.Body) + if err != nil { + task.runtime.Logger.Error(err.Error()) + task.Err = err + } + resmsg := fmt.Sprintf("response %s", bodyBytes) + task.runtime.Logger.Info(resmsg) + +} + +// ClearTmpKeysFile 删除本地keys file +func (task *RedisInsKeyPatternTask) ClearTmpKeysFile() { + if strings.Contains(task.KeysFile, task.IP) == false { + return + } + rmCmd := fmt.Sprintf("cd %s && rm result.%s.%s_%d_*", task.SaveDir, + task.BkBizID, task.IP, task.Port) + task.runtime.Logger.Info("ClearKeysFile: %s", rmCmd) + _, err := util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 10*time.Minute) + if err != nil { + err = fmt.Errorf("删除本地keys file失败,err:%v", err) + task.runtime.Logger.Error(err.Error()) + task.Err = err + return + } + +} + +// ClearTmpResultFile 删除本地result file +func (task *RedisInsKeyPatternTask) ClearTmpResultFile() { + if strings.Contains(task.ResultFile, task.IP) == false { + return + } + rmCmd := fmt.Sprintf("cd %s && rm %s.%s_%d_*", task.SaveDir, + task.BkBizID, task.IP, task.Port) + task.runtime.Logger.Info("ClearResultFile: %s", rmCmd) + _, err := util.RunLocalCmd("bash", []string{"-c", rmCmd}, "", nil, 10*time.Minute) + if err != nil { + err = fmt.Errorf("删除本地result file失败,err:%v", err) + task.runtime.Logger.Error(err.Error()) + task.Err = err + return + } +} + +// RedisInsTask redis 原子任务 +type RedisInsTask struct { + BkBizID string `json:"bk_biz_id"` + Domain string `json:"domain"` + IP string `json:"ip"` + Port int `json:"port"` + Password string `json:"password"` + TendisType string `json:"tendis_type"` + Version string `json:"version"` // 区分ssd版本差异,需要使用对应的ldb工具 + Role string `json:"role"` + DataDir string `json:"data_dir"` + DataSize uint64 `json:"data_size"` // 设置并发度 + SaveDir string `json:"save_dir"` + redisCli *myredis.RedisClient `json:"-"` // NOCC:vet/vet(设计如此) + runtime *jobruntime.JobGenericRuntime + Err error `json:"-"` +} + +// NewRedisInsTask new +func NewRedisInsTask(bkBizID, domain, ip string, port int, password, saveDir string, + runtime *jobruntime.JobGenericRuntime) (task *RedisInsTask, err error) { + return &RedisInsTask{ + BkBizID: bkBizID, + Domain: domain, + IP: ip, + Port: port, + Password: password, + SaveDir: saveDir, + runtime: runtime, + }, nil + +} + +// Addr string +func (task *RedisInsTask) Addr() string { + return task.IP + ":" + strconv.Itoa(task.Port) +} + +// newConnect 获取节点相关信息 +func (task *RedisInsTask) newConnect() error { + task.redisCli, task.Err = myredis.NewRedisClient(task.Addr(), task.Password, 0, consts.TendisTypeRedisInstance) + if task.Err != nil { + task.Err = fmt.Errorf("newConnect NewRedisClient Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return task.Err + } + task.Role, task.Err = task.redisCli.GetRole() + if task.Err != nil { + task.Err = fmt.Errorf("newConnect GetRole Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return task.Err + } + task.DataDir, task.Err = task.redisCli.GetDir() + if task.Err != nil { + task.Err = fmt.Errorf("newConnect GetDir Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return task.Err + } + task.TendisType, task.Err = task.redisCli.GetTendisType() + if task.Err != nil { + task.Err = fmt.Errorf("newConnect GetTendisType Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return task.Err + } + // 获取 redis_version 以区分ssd版本差异,决定使用不同的ldb工具 + task.Version, task.Err = task.redisCli.GetTendisVersion() + if task.Err != nil { + task.Err = fmt.Errorf("newConnect GetTendisVersion Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return task.Err + } + + // 获取数据量大小 + if task.TendisType == consts.TendisTypeRedisInstance { + task.DataSize, task.Err = task.redisCli.RedisInstanceDataSize() + } else if task.TendisType == consts.TendisTypeTendisplusInsance { + task.DataSize, task.Err = task.redisCli.TendisplusDataSize() + } else if task.TendisType == consts.TendisTypeTendisSSDInsance { + task.DataSize, task.Err = task.redisCli.TendisSSDDataSize() + } + if task.Err != nil { + task.Err = fmt.Errorf("newConnect Err:%v", task.Err) + task.runtime.Logger.Error(task.Err.Error()) + return task.Err + } + return nil +} + +// RedisInsBgsave 执行bgsave,并等待完成 +func (task *RedisInsTask) RedisInsBgsave() { + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + mes := fmt.Sprintf("%s-redis-%s-%s-%d-%s.rdb", + task.BkBizID, task.Role, task.IP, task.Port, nowtime) + task.runtime.Logger.Info(mes) + task.Err = task.redisCli.BgSaveAndWaitForFinish() + if task.Err != nil { + err := fmt.Sprintf("执行bgsave失败:err:%v", task.Err) + task.runtime.Logger.Error(err) + } +} + +// ConnRedis 连接redis +func (task *RedisInsTask) ConnRedis() { + redisAddr := fmt.Sprintf("%s:%d", task.IP, task.Port) + task.redisCli, task.Err = myredis.NewRedisClient(redisAddr, task.Password, 0, consts.TendisTypeRedisInstance) + if task.Err != nil { + return + } + msg := fmt.Sprintf("%s:%d 连接正常", task.IP, task.Port) + task.runtime.Logger.Info(msg) +} + +// DisconneRedis 与redis断开连接 +func (task *RedisInsTask) DisconneRedis() { + if task.redisCli != nil { + task.redisCli.Close() + } + msg := fmt.Sprintf("%s:%d 断开连接", task.IP, task.Port) + task.runtime.Logger.Info(msg) +} + +// GetMasterData 获取master登录信息 +func (task *RedisInsKeyPatternTask) GetMasterData() { + + msg := fmt.Sprintf("redis:%s#%d get master data ...", task.IP, task.Port) + task.runtime.Logger.Info(msg) + task.ConnRedis() + if task.Err != nil { + return + } + + replInfo, err := task.redisCli.Info("replication") + if err != nil { + task.Err = err + return + } + + role, _ := replInfo["role"] + if role == consts.RedisSlaveRole { + task.MasterAddr = fmt.Sprintf("%s:%s", replInfo["master_host"], replInfo["master_port"]) + confData, err1 := task.redisCli.ConfigGet("masterauth") + if err != nil { + task.Err = err1 + return + } + task.MasterAuth = confData["masterauth"] + task.MasterIP = replInfo["master_host"] + task.MasterPort = replInfo["master_port"] + } else { + task.MasterAddr = fmt.Sprintf("%s:%d", task.IP, task.Port) + task.MasterAuth = task.Password + task.MasterIP = task.IP + task.MasterPort = strconv.Itoa(task.Port) + } + + // msg = fmt.Sprintf("redisMaster:%s masterAuth:%s", task.MasterAddr, task.MasterAuth) + msg = fmt.Sprintf("redisMaster:%s masterAuth:xxxxxx", task.MasterAddr) + task.runtime.Logger.Info(msg) + + masterCli, err := myredis.NewRedisClient(task.MasterAddr, task.MasterAuth, 0, consts.TendisTypeTendisplusInsance) + if err != nil { + task.Err = err + return + } + defer masterCli.Close() + + return +} + +// GetRedisSafeDelTool 获取安全删除key的工具 +func (task *RedisInsKeyPatternTask) GetRedisSafeDelTool() (bool, error) { + + remoteSafeDelTool := "redisSafeDeleteTool" + task.SafeDelTool = filepath.Join(task.SaveDir, remoteSafeDelTool) + task.runtime.Logger.Info("Get redisSafeDeleteTool") + _, err := os.Stat(task.SafeDelTool) + if err != nil && os.IsNotExist(err) { + task.Err = fmt.Errorf("获取redisSafeDeleteTool失败,请检查是否下发成功:err:%v", err) + task.runtime.Logger.Error(task.Err.Error()) + return false, task.Err + } + util.LocalDirChownMysql(task.SafeDelTool) + err = os.Chmod(task.SafeDelTool, 0755) + if err != nil { + task.Err = fmt.Errorf(" redisSafeDeleteTool 加可执行权限失败:err:%v", err) + return false, task.Err + } + task.runtime.Logger.Info("Get redisSafeDeleteTool success") + + return true, nil +} + +// DelKeysRateLimitV2 对redis key执行安全删除 +// NOCC:golint/fnsize(设计如此) +func (task *RedisInsKeyPatternTask) DelKeysRateLimitV2() { + if task.IsKeysToBeDel == false { + return + } + msg := fmt.Sprintf("redis:%s#%d start delete keys ...", task.IP, task.Port) + task.runtime.Logger.Info(msg) + + task.GetMasterData() + if task.Err != nil { + return + } + task.GetRedisSafeDelTool() + if task.Err != nil { + return + } + + fileData, err := os.Stat(task.ResultFile) + if err != nil { + task.Err = fmt.Errorf("redis:%s#%d keysPattern resultFile:%s os.stat fail,err:%v", task.IP, task.Port, + task.ResultFile, err) + task.runtime.Logger.Error(task.Err.Error()) + return + } + if fileData.Size() == 0 { + msg = fmt.Sprintf("redis:%s#%d keysPattern resultFile:%s size==%d,skip delKeys", task.IP, task.Port, task.ResultFile, + fileData.Size()) + task.runtime.Logger.Info(msg) + return + } + + keyFile, err := os.Open(task.ResultFile) + if err != nil { + task.Err = fmt.Errorf("DelKeysRateLimit open %s fail,err:%v", task.ResultFile, err) + task.runtime.Logger.Error(task.Err.Error()) + return + } + defer keyFile.Close() + + // tendisplus/tendisSSD 与 cache 删除默认速率不同 + // 这里只需要添加ssd 删除速率逻辑就好:因为都是通过工具在文件里去删除和存储类型没关系 + delRateLimit := 10000 + if task.TendisType == consts.TendisTypeTendisplusInsance { + if task.TendisplusDeleteRate >= 10 { + delRateLimit = task.TendisplusDeleteRate + } else { + delRateLimit = 3000 + } + } else if task.TendisType == consts.TendisTypeRedisInstance { + if task.DeleteRate >= 10 { + delRateLimit = task.DeleteRate + } else { + delRateLimit = 10000 + } + } else if task.TendisType == consts.TendisTypeTendisSSDInsance { + if task.SsdDeleteRate >= 10 { + delRateLimit = task.SsdDeleteRate + } else { + delRateLimit = 3000 + } + } + + var errBuffer bytes.Buffer + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bigKeyThread := 1000 // 如果hash,hlen>1000,则算big key + threadCnt := 30 + subScanCount := 100 // hscan 中count 个数 + + delCmd := fmt.Sprintf( + // NOCC:tosa/linelength(设计如此) + `%s bykeysfile --dbtype=standalone --redis-addr=%s --redis-password=%s --keys-file=%s --big-key-threashold=%d --del-rate-limit=%d --thread-cnt=%d --sub-scan-count=%d --without-config-cmd`, + task.SafeDelTool, task.MasterAddr, task.MasterAuth, task.ResultFile, bigKeyThread, delRateLimit, threadCnt, + subScanCount) + logCmd := fmt.Sprintf( + // NOCC:tosa/linelength(设计如此) + `%s bykeysfile --dbtype=standalone --redis-addr=%s --redis-password=xxxxx --keys-file=%s --big-key-threashold=%d --del-rate-limit=%d --thread-cnt=%d --sub-scan-count=%d --without-config-cmd`, + task.SafeDelTool, task.MasterAddr, task.ResultFile, bigKeyThread, delRateLimit, threadCnt, subScanCount) + task.runtime.Logger.Info(logCmd) + + cmd := exec.CommandContext(ctx, "bash", "-c", delCmd) + stdout, _ := cmd.StdoutPipe() + cmd.Stderr = &errBuffer + + if err = cmd.Start(); err != nil { + err = fmt.Errorf("DelKeysRateLimitV2 cmd.Start fail,err:%v", err) + task.runtime.Logger.Error(err.Error()) + task.Err = err + return + } + + scanner := bufio.NewScanner(stdout) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + // 不断打印进度 + m := scanner.Text() + if strings.Contains(m, `"level":"error"`) == true { + err = errors.New(m) + task.runtime.Logger.Info(m) + continue + } + m = m + ";" + task.redisCli.Addr + task.runtime.Logger.Info(m) + } + if err != nil { + task.Err = err + return + } + + if err = cmd.Wait(); err != nil { + err = fmt.Errorf("DelKeysRateLimitV2 cmd.Wait fail,err:%v", err) + task.runtime.Logger.Error(err.Error()) + task.Err = err + return + } + errStr := errBuffer.String() + errStr = strings.TrimSpace(errStr) + if len(errStr) > 0 { + err = fmt.Errorf("DelKeysRateLimitV2 fail,err:%s", errStr) + task.runtime.Logger.Error(err.Error()) + task.Err = err + return + } +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern_delete.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern_delete.go new file mode 100644 index 0000000000..23a453c3bb --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_keyspattern_delete.go @@ -0,0 +1,22 @@ +package atomredis + +import "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + +// TendisKeysPatternDelete 按正则删除key,这里会有很多代码和TendisKeysPattern一样 +// 所以在 TendisKeysPattern 里实现按正则删除key,为了flow区分,加了tendis_keysdelete_regex 原子任务 +type TendisKeysPatternDelete struct { + TendisKeysPattern +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*TendisKeysPatternDelete)(nil) + +// NewTendisKeysPatternDelete new +func NewTendisKeysPatternDelete() jobruntime.JobRunner { + return &TendisKeysPatternDelete{} +} + +// Name 原子任务名 +func (job *TendisKeysPatternDelete) Name() string { + return "tendis_keysdelete_regex" +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_migrate_slots.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_migrate_slots.go new file mode 100644 index 0000000000..a8e5d25bdd --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_migrate_slots.go @@ -0,0 +1,1210 @@ +package atomredis + +import ( + "context" + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "errors" + "fmt" + "math" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" +) + +// TendisPlusMigrateSlotsParams slots 迁移参数 +type TendisPlusMigrateSlotsParams struct { + SrcNode TendisPlusNodeItem `json:"src_node" validate:"required"` + DstNode TendisPlusNodeItem `json:"dst_node" validate:"required"` + // 用于缩容场景,迁移DstNode slot ,然后删除节点 + IsDeleteNode bool `json:"is_delete_node"` + // 迁移特定的slot,一般用于热点key情况,把该key所属slot迁移到单独节点 + MigrateSpecifiedSlot bool `json:"migrate_specified_slot" ` + // 如 0-4095 6000 6002-60010, + Slots string `json:"slots"` +} + +// TendisPlusMigrateSlots slots 迁移 +type TendisPlusMigrateSlots struct { + params TendisPlusMigrateSlotsParams + runtime *jobruntime.JobGenericRuntime + Err error `json:"_"` +} + +// TendisPlusNodeItem 节点信息 +type TendisPlusNodeItem struct { + IP string `json:"ip"` + Port int `json:"port"` + Password string `json:"password"` + Role string `json:"role"` + TendisType string `json:"tendis_type"` + redisCli *myredis.RedisClient `json:"-"` // NOCC:vet/vet(设计如此) +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*TendisPlusMigrateSlots)(nil) + +// NewTendisPlusMigrateSlots new +func NewTendisPlusMigrateSlots() jobruntime.JobRunner { + return &TendisPlusMigrateSlots{} +} + +// Init 初始化 +func (job *TendisPlusMigrateSlots) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed, err:%+v", err)) + return err + } + + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("TendisPlusMigrateSlots Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("TendisPlusMigrateSlots Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + } + if job.params.MigrateSpecifiedSlot && job.params.Slots == "" { + + err = fmt.Errorf("MigrateSpecifiedSlot=%v 和 slots:%s 指定迁移的slot不能为空", + job.params.MigrateSpecifiedSlot, job.params.Slots) + job.runtime.Logger.Error(err.Error()) + return err + + } + + job.runtime.Logger.Info("tendisplus migrate slots init success") + + return nil + +} + +// Name 原子任务名 +func (job *TendisPlusMigrateSlots) Name() string { + return "tendisplus_migrate_slots" + +} + +// Retry 重试次数 +func (job *TendisPlusMigrateSlots) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *TendisPlusMigrateSlots) Rollback() error { + return nil + +} + +// Run 执行逻辑 +func (job *TendisPlusMigrateSlots) Run() error { + job.checkNodeInfo() + if job.Err != nil { + return job.Err + } + if job.params.IsDeleteNode { + var toBeDelNodesAddr []string + toBeDelNodesAddr = append(toBeDelNodesAddr, job.dstNodeAddr()) + err := job.MigrateSlotsFromToBeDelNode(toBeDelNodesAddr) + if err != nil { + return err + } + return nil + } + + job.dstClusterMeetSrc() + if job.Err != nil { + return job.Err + } + + if job.params.MigrateSpecifiedSlot { + slots, _, _, _, err := myredis.DecodeSlotsFromStr(job.params.Slots, " ") + if err != nil { + job.Err = err + return job.Err + } + if len(slots) == 0 { + job.Err = fmt.Errorf("MigrateSpecifiedSlot=%v 和 slots:%s 指定迁移的slot不能为空", + job.params.MigrateSpecifiedSlot, job.params.Slots) + job.runtime.Logger.Error(job.Err.Error()) + return job.Err + } + job.MigrateSpecificSlots(job.srcNodeAddr(), job.dstNodeAddr(), slots, 20*time.Minute) + if job.Err != nil { + return job.Err + } + } else { + err := job.ReBalanceCluster() + if err != nil { + job.Err = err + return job.Err + } + } + + return nil +} + +// srcNodeAddr 源节点地址 +func (job *TendisPlusMigrateSlots) srcNodeAddr() string { + return job.params.SrcNode.IP + ":" + strconv.Itoa(job.params.SrcNode.Port) +} + +// dstNodeAddr 源节点地址 +func (job *TendisPlusMigrateSlots) dstNodeAddr() string { + return job.params.DstNode.IP + ":" + strconv.Itoa(job.params.DstNode.Port) +} + +// dstClusterMeetSrc 新建节点加入源集群 +func (job *TendisPlusMigrateSlots) dstClusterMeetSrc() { + var err error + nodePasswordOnMachine, err := myredis.GetPasswordFromLocalConfFile(job.params.SrcNode.Port) + if err != nil { + job.Err = fmt.Errorf("SrcNode GetPassword GetPasswordFromLocalConfFile filed: %+v", err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + // 增加验证密码一样 + if job.params.SrcNode.Password != nodePasswordOnMachine { + job.Err = fmt.Errorf("SrcNode password != nodePasswordOnMachine: SrcNodePassword is %s nodePasswordOnMachine is %s", + job.params.SrcNode.Password, nodePasswordOnMachine) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + // 增加验证密码一样 + if job.params.SrcNode.Password != job.params.DstNode.Password { + job.Err = fmt.Errorf("SrcNode password != DstNode password: SrcNodePassword is %s DstNodePassword is %s", + job.params.SrcNode.Password, job.params.DstNode.Password) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + job.runtime.Logger.Info("dstClusterMeetSrc : src password = dst password ") + // SrcNode所属的原集群状态需要是ok, DstNode所属的新增节点集群状态是fail,且cluster_slots_assigend 是0 + // 以上两个状态是为了防止ip port 搞错,2个正常的集群meet到一起,这样会导致集群混乱 + srcStatusIsOk, _, err := job.clusterState(job.params.SrcNode.redisCli) + if err != nil { + job.Err = err + job.runtime.Logger.Error(err.Error()) + } + if !srcStatusIsOk { + job.Err = fmt.Errorf("redisCluster:%s cluster_state not ok,please check !!! redisCluster", job.srcNodeAddr()) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + // DstNode所属的新增节点集群状态是fail,且cluster_slots_assigend 是0 + dstStateIsfaile, slotsAssigend, err := job.clusterState(job.params.DstNode.redisCli) + if err != nil { + job.Err = err + job.runtime.Logger.Error(err.Error()) + } + if dstStateIsfaile || slotsAssigend != 0 { + job.Err = fmt.Errorf("redisCluster:%s cluster_state not fail or slotsAssigend !=0 please check !!!redisCluster ", + job.dstNodeAddr()) + job.runtime.Logger.Error(job.Err.Error()) + return + } + // cluster meet 新节点加入集群 + _, err = job.params.SrcNode.redisCli.ClusterMeet(job.params.DstNode.IP, strconv.Itoa(job.params.DstNode.Port)) + if err != nil { + job.Err = err + return + } + // 这里 cluster meet 需要点时间,防止后续获取GetClusterNodes信息不全 + time.Sleep(10 * time.Second) + job.runtime.Logger.Info("dstClusterMeetSrc success ") + +} + +// clusterState 集群状态信息 +func (job *TendisPlusMigrateSlots) clusterState(redisCli *myredis.RedisClient) (state bool, + slotsAssigend int, err error) { + clusterInfo, err := redisCli.ClusterInfo() + if err != nil { + err = fmt.Errorf("get cluster info fail:%v", err) + return false, 0, err + } + if clusterInfo.ClusterState == consts.ClusterStateOK && clusterInfo.ClusterSlotsAssigned == consts.TotalSlots { + return true, consts.TotalSlots, nil + } else if clusterInfo.ClusterState == consts.ClusterStateFail && clusterInfo.ClusterSlotsAssigned == 0 { + return false, 0, nil + } + err = fmt.Errorf("get cluster info fail") + return false, 0, err +} + +// checkNodeInfo 验证节点相关信息 +func (job *TendisPlusMigrateSlots) checkNodeInfo() { + // 获取源节点连接&信息 + job.params.SrcNode.redisCli, job.Err = myredis.NewRedisClient(job.srcNodeAddr(), + job.params.SrcNode.Password, 0, consts.TendisTypeRedisInstance) + if job.Err != nil { + job.Err = fmt.Errorf("checkNodeInfo src NewRedisClient Err:%v", job.Err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + job.params.SrcNode.TendisType, job.Err = job.params.SrcNode.redisCli.GetTendisType() + if job.Err != nil { + job.Err = fmt.Errorf("checkNodeInfo src GetTendisType Err:%v", job.Err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + job.params.SrcNode.Role, job.Err = job.params.SrcNode.redisCli.GetRole() + + if job.Err != nil { + job.Err = fmt.Errorf("checkNodeInfo src GetRole Err:%v", job.Err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + job.runtime.Logger.Info("checkNodeInfo SrcNode GetTendisType:%s success ", job.params.SrcNode.TendisType) + + // 获取源节点连接&信息 + job.params.DstNode.redisCli, job.Err = myredis.NewRedisClient(job.dstNodeAddr(), + job.params.DstNode.Password, 0, consts.TendisTypeRedisInstance) + if job.Err != nil { + job.Err = fmt.Errorf("checkNodeInfo DstNode NewRedisClient Err:%v", job.Err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + job.params.DstNode.TendisType, job.Err = job.params.DstNode.redisCli.GetTendisType() + if job.Err != nil { + job.Err = fmt.Errorf("checkNodeInfo DstNode GetTendisType Err:%v", job.Err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + job.runtime.Logger.Info("checkNodeInfo DstNode GetTendisType:%s success ", job.params.DstNode.TendisType) + + job.params.DstNode.Role, job.Err = job.params.DstNode.redisCli.GetRole() + if job.Err != nil { + job.Err = fmt.Errorf("checkNodeInfo dst GetRole Err:%v", job.Err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + // 源节点和目标节点必须是master,因为迁移指定slot时(解决热点key),需要在master上执行 + if job.params.SrcNode.Role != consts.RedisMasterRole || job.params.DstNode.Role != consts.RedisMasterRole { + job.Err = fmt.Errorf("node role != master ,please check ! srcNodeRole is %s,dstNodeRole is %s", + job.params.SrcNode.Role, job.params.DstNode.Role) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + // 由于迁移slot命令和社区不一样,所以必须是tendisplus + if job.params.SrcNode.TendisType != consts.TendisTypeTendisplusInsance || job.params.DstNode.TendisType != + consts.TendisTypeTendisplusInsance { + job.Err = fmt.Errorf("node tendisType != TendisplusInstance ,please check ! srcNodeTendisType is %s"+ + " dsrNodeTendisType is %s", job.params.SrcNode.TendisType, job.params.DstNode.TendisType) + job.runtime.Logger.Error(job.Err.Error()) + } + job.runtime.Logger.Info("checkNodeInfo tendisType success: DstNode tendisType %s", + job.params.DstNode.TendisType) + + return +} + +// ParallelMigrateSpecificSlots 并发执行slot迁移任务 +func (job *TendisPlusMigrateSlots) ParallelMigrateSpecificSlots(migrateList []MigrateSomeSlots) error { + wg := sync.WaitGroup{} + genChan := make(chan MigrateSomeSlots) + retChan := make(chan MigrateSomeSlots) + + limit := len(migrateList) // no limit + for worker := 0; worker < limit; worker++ { + wg.Add(1) + go func() { + defer wg.Done() + for item01 := range genChan { + job.MigrateSpecificSlots(item01.SrcAddr, item01.DstAddr, item01.MigrateSlots, 48*time.Hour) + if job.Err != nil { + item01.Err = job.Err + } + retChan <- item01 + } + }() + } + go func() { + defer close(genChan) + + for _, item02 := range migrateList { + genChan <- item02 + } + }() + + go func() { + wg.Wait() + close(retChan) + }() + + errList := []string{} + for retItem := range retChan { + if retItem.Err != nil { + errList = append(errList, retItem.Err.Error()) + job.Err = fmt.Errorf("srcAddr:%s => dstAddr:%s slotsCount:%d fail", + retItem.SrcAddr, retItem.DstAddr, len(retItem.MigrateSlots)) + job.runtime.Logger.Error(job.Err.Error()) + continue + } + msg := fmt.Sprintf("srcAddr:%s => dstAddr:%s slotsCount:%d success", + retItem.SrcAddr, retItem.DstAddr, len(retItem.MigrateSlots)) + job.runtime.Logger.Info(msg) + } + if len(errList) > 0 { + return errors.New(strings.Join(errList, ";")) + } + return nil +} + +// MigrateSomeSlots ..(为并发迁移slot) +type MigrateSomeSlots struct { + SrcAddr string + DstAddr string + MigrateSlots []int + Err error +} + +// ReBalanceCluster 重新分配slots, +// 将slots尽量均匀的分配到新masterNode(没负责任何slot的master)上 +// NOCC:golint/fnsize(设计如此) +func (job *TendisPlusMigrateSlots) ReBalanceCluster() error { + job.runtime.Logger.Info("start ReBalanceCluster ...") + defer job.runtime.Logger.Info("end ReBalanceCluster ...") + + var msg string + _, err := job.params.SrcNode.redisCli.GetClusterNodes() + if err != nil { + return err + } + + var expected int + allRunningMasters, err := job.params.SrcNode.redisCli.GetRunningMasters() + if err != nil { + return err + } + allRunningCnt := len(allRunningMasters) + + expected = int(float64(consts.DefaultMaxSlots+1) / float64(allRunningCnt)) + + for _, node01 := range allRunningMasters { + nodeItem := node01 + nodeItem.SetBalance(len(nodeItem.Slots) - expected) + nodeItem.SetEndSlotIdx(len(nodeItem.Slots)) + } + totalBalance := 0 + runningMasterList := []*myredis.ClusterNodeData{} + for _, node01 := range allRunningMasters { + nodeItem := node01 + runningMasterList = append(runningMasterList, nodeItem) + totalBalance += nodeItem.Balance() + } + for totalBalance > 0 { + for _, node01 := range allRunningMasters { + nodeItem := node01 + if nodeItem.Balance() < 0 && totalBalance > 0 { + t01 := nodeItem.Balance() - 1 + nodeItem.SetBalance(t01) + totalBalance -= 1 + } + } + } + sort.Slice(runningMasterList, func(i, j int) bool { + a := runningMasterList[i] + b := runningMasterList[j] + return a.Balance() < b.Balance() + }) + + for _, node01 := range runningMasterList { + nodeItem := node01 + msg = fmt.Sprintf("node=>%s balance:%d", nodeItem.Addr, nodeItem.Balance()) + job.runtime.Logger.Info(msg) + } + + migrateTasks := []MigrateSomeSlots{} + dstIdx := 0 + srcidx := len(runningMasterList) - 1 + + for dstIdx < srcidx { + dst := runningMasterList[dstIdx] + src := runningMasterList[srcidx] + + var numSlots float64 + if math.Abs(float64(dst.Balance())) < math.Abs(float64(src.Balance())) { + numSlots = math.Abs(float64(dst.Balance())) + } else { + numSlots = math.Abs(float64(src.Balance())) + } + if numSlots > 0 { + msg = fmt.Sprintf("Moving %f slots from %s to %s,src.endSlotIdx:%d", + numSlots, src.Addr, dst.Addr, src.EndSlotIdx()) + job.runtime.Logger.Info(msg) + task01 := MigrateSomeSlots{ + SrcAddr: src.Addr, + DstAddr: dst.Addr, + MigrateSlots: []int{}, + } + for idx01 := src.EndSlotIdx() - int(numSlots); idx01 < src.EndSlotIdx(); idx01++ { + task01.MigrateSlots = append(task01.MigrateSlots, (src.Slots[idx01])) + } + src.SetEndSlotIdx(src.EndSlotIdx() - int(numSlots)) + migrateTasks = append(migrateTasks, task01) + } + dst.SetBalance(dst.Balance() + int(numSlots)) + src.SetBalance(src.Balance() - int(numSlots)) + msg = fmt.Sprintf("src:%s src.balance:%d,dst:%s dst.balance:%d", + src.Addr, src.Balance(), dst.Addr, dst.Balance()) + job.runtime.Logger.Info(msg) + if dst.Balance() == 0 { + dstIdx++ + } + if src.Balance() == 0 { + srcidx-- + } + } + for _, task01 := range migrateTasks { + msg := fmt.Sprintf("migrate plan=>srcNode:%s dstNode:%s slots:%v", + task01.SrcAddr, task01.DstAddr, myredis.ConvertSlotToShellFormat(task01.MigrateSlots)) + job.runtime.Logger.Info(msg) + } + job.runtime.Logger.Info("migrateTasks:%+v", migrateTasks) + + err = job.ParallelMigrateSpecificSlots(migrateTasks) + if err != nil { + return err + } + + return nil +} + +// MigrateSpecificSlots 迁移slots +// NOCC:golint/fnsize(设计如此) +func (job *TendisPlusMigrateSlots) MigrateSpecificSlots(srcAddr, + dstAddr string, slots []int, timeout time.Duration) { + job.runtime.Logger.Info("MigrateSpecificSlots start... srcAddr:%s desrAddr:%s"+ + " slots:%+v", srcAddr, dstAddr, myredis.ConvertSlotToShellFormat(slots)) + + if len(slots) == 0 { + job.Err = fmt.Errorf("MigrateSpecificSlots target slots count == %d ", len(slots)) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + if srcAddr == dstAddr { + job.Err = fmt.Errorf("MigrateSpecificSlots slot srcAddr:%s = dstAddr:%s", srcAddr, dstAddr) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + // 获取tendisplus cluster nodes信息 + clusterNodes, err := job.params.SrcNode.redisCli.GetAddrMapToNodes() + if err != nil { + job.Err = err + return + } + srcNodeInfo, ok := clusterNodes[srcAddr] + if ok == false { + job.Err = fmt.Errorf("MigrateSpecificSlots cluster not include the sre node,sreAddr:%s,clusterAddr:%s", + srcAddr, job.params.SrcNode.redisCli.Addr) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + dstNodeInfo, ok := clusterNodes[dstAddr] + if ok == false { + job.Err = fmt.Errorf("MigrateSpecificSlots cluster not include the sre node,sreAddr:%s,clusterAddr:%s", + srcAddr, job.params.SrcNode.redisCli.Addr) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + // 检查srcNode dstNode是否状态异常 + if len(srcNodeInfo.FailStatus) > 0 || srcNodeInfo.LinkState != consts.RedisLinkStateConnected { + job.Err = fmt.Errorf(` src node is unnormal? + srcAddr:%s,srcNodeFailStatus:%v,srcNodeLinkStatus:%s,`, + srcAddr, srcNodeInfo.FailStatus, srcNodeInfo.LinkState) + job.runtime.Logger.Error(job.Err.Error()) + return + } + if len(dstNodeInfo.FailStatus) > 0 || dstNodeInfo.LinkState != consts.RedisLinkStateConnected { + job.Err = fmt.Errorf(` dst node is unnormal? + srcAddr:%s,dstNodeFailStatus:%v,dstNodeLinkStatus:%s,`, + srcAddr, dstNodeInfo.FailStatus, dstNodeInfo.LinkState) + job.runtime.Logger.Error(job.Err.Error()) + return + } + // job.runtime.Logger.Info("clusterNodes:%+v", clusterNodes) + allBelong, notBelongList, err := job.params.SrcNode.redisCli.IsSlotsBelongMaster(srcAddr, slots) + if err != nil { + job.Err = err + job.runtime.Logger.Error(job.Err.Error()) + return + } + if allBelong == false { + err = fmt.Errorf("MigrateSpecificSlots slots:%s not belong to srcNode:%s", + myredis.ConvertSlotToShellFormat(notBelongList), srcAddr) + job.runtime.Logger.Error(err.Error()) + return + } + dstCli, err := myredis.NewRedisClient(dstAddr, job.params.SrcNode.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + job.Err = err + job.runtime.Logger.Error(job.Err.Error()) + return + } + defer dstCli.Close() + + srcCli, err := myredis.NewRedisClient(srcAddr, job.params.SrcNode.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + job.Err = err + job.runtime.Logger.Error(job.Err.Error()) + return + } + defer srcCli.Close() + + srcSlaves, err := srcCli.GetAllSlaveNodesByMasterAddr(srcAddr) + if err != nil { + job.Err = fmt.Errorf("srcAddr:%s get slave fail:%+v", srcAddr, err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + dstSlaves, err := dstCli.GetAllSlaveNodesByMasterAddr(dstAddr) + if err != nil { + job.Err = fmt.Errorf("dstAddr:%s get slave fail:%+v", dstAddr, err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + cmd := []string{"cluster", "setslot", "importing", srcNodeInfo.NodeID} + for _, slotItem := range slots { + cmd = append(cmd, strconv.Itoa(slotItem)) + } + var importRet interface{} + deleteSlotErrRetryTimes := 1 // 发生slot in deleting错误,则重试,最多重试300次 + otherErrRetryTimes := 1 + for otherErrRetryTimes < 6 && deleteSlotErrRetryTimes < 301 { + msg := fmt.Sprintf("MigrateSpecificSlots %d otherErrRetryTimes %d SlotErrRetryTimes,srcAddr:%s dstAddr:%s"+ + " migrateCommand:cluster setslot importing %s %s", + otherErrRetryTimes, deleteSlotErrRetryTimes, srcAddr, dstAddr, + srcNodeInfo.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Info(msg) + importRet, err = dstCli.DoCommand(cmd, 0) + if err != nil && strings.Contains(err.Error(), "slot in deleting") == true { + msg = fmt.Sprintf(`slot in deleting : MigrateSpecificSlots execute cluster setslot importing fail, + err:%v,srcAddr:%s,dstAddr:%s,cmd: cluster setslot importing %s %s`, err, srcAddr, dstAddr, srcNodeInfo.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Warn(msg) + time.Sleep(1 * time.Minute) + deleteSlotErrRetryTimes++ + continue + } else if err != nil && strings.Contains(err.Error(), "slot not empty") == true { + dstCli.ClusterClear() + srcCli.ClusterClear() + msg = fmt.Sprintf(`slot not empty : MigrateSpecificSlots execute cluster setslot importing fail, + err:%v,srcAddr:%s,dstAddr:%s,cmd: cluster setslot importing %s %s`, err, srcAddr, dstAddr, srcNodeInfo.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Warn(msg) + time.Sleep(1 * time.Minute) + deleteSlotErrRetryTimes++ + continue + } else if err != nil { + err = fmt.Errorf(`MigrateSpecificSlots execute cluster setslot importing fail, + err:%v,srcAddr:%s,dstAddr:%s,cmd: cluster setslot importing %s %s`, err, srcAddr, dstAddr, srcNodeInfo.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Warn(err.Error()) + time.Sleep(1 * time.Minute) + otherErrRetryTimes++ + continue + } + break + + } + if (otherErrRetryTimes == 5 || deleteSlotErrRetryTimes == 30) && err != nil { + job.Err = fmt.Errorf("otherErrRetryTimes is 5 and deleteSlotErrRetryTimes is 30 always failed:%v", err) + job.runtime.Logger.Error(job.Err.Error()) + return + } + + importingTaskID := importRet.(string) + job.runtime.Logger.Info("importingTaskID %v:", importingTaskID) + _, _, err = job.confirmMigrateSlotsStatus(srcNodeInfo, dstNodeInfo, importingTaskID, slots, timeout) + if err != nil && err.Error() == "migrate fail" { + // migrate fail,let's retry + time.Sleep(2 * time.Minute) // 如果集群拓扑信息发生了变更,让信息充分广播 + job.Err = err + err = job.retryMigrateSpecSlots(srcNodeInfo, dstNodeInfo, job.params.SrcNode.Password, + importingTaskID, srcSlaves, dstSlaves, slots, timeout) + if err != nil { + job.Err = fmt.Errorf("retryMigrateSpecSlots fail: %v", err) + job.runtime.Logger.Error(job.Err.Error()) + } + } + return +} + +// confirmMigrateSlotsStatus 在dstAddr上执行 cluster setslot info 确认slots是否迁移ok +func (job *TendisPlusMigrateSlots) confirmMigrateSlotsStatus( + srcNodeInfo, dstNodeInfo *myredis.ClusterNodeData, + taskID string, migrateSlots []int, timeout time.Duration) (mySuccImport, myFailImport []int, err error) { + + mySuccImport = []int{} + myFailImport = []int{} + var importing, successImport, failImport, unknow []int + timeLimit := int64(timeout.Seconds()) / 30 + + for { + time.Sleep(30 * time.Second) // 每30秒打印一次日志 + if timeLimit == 0 { + break + } + dstSetSlotInfo, err := myredis.GetClusterSetSlotInfo(dstNodeInfo.Addr, job.params.SrcNode.Password) + if err != nil { + + return mySuccImport, myFailImport, err + } + importing, successImport, failImport, unknow = dstSetSlotInfo.GetDstRedisSlotsStatus(migrateSlots) + // 我们目的节点上,可能有多个迁移任务,我们只关心 当前迁移任务的slots情况 + mySuccImport = util.IntSliceInter(migrateSlots, successImport) + myFailImport = util.IntSliceInter(migrateSlots, failImport) + + if len(importing) > 0 { + // 等待所有importing结束,尽管正在迁移的slots不是我当前任务的slot,依然等待 + job.runtime.Logger.Info("confirmMigrateSlotsStatus there are some slots still importing on the dstNode"+ + "importingCount:%d srcNodeAddr:%s srcNodeID:%s dstNodeAddr:%s dstNodeID:%s importingTaskID:%s ", + len(importing), srcNodeInfo.Addr, srcNodeInfo.NodeID, dstNodeInfo.Addr, dstNodeInfo.NodeID, taskID) + timeLimit-- + continue + } else if len(myFailImport) > 0 { + job.runtime.Logger.Error("confirmMigrateSlotsStatus there are some slots migrating fail on the dstNode"+ + "failImportCount:%d srcNodeAddr:%s srcNodeID:%s dstNodeAddr:%s dstNodeID:%s failImportSlot:%v importingTaskID:%s ", + len(myFailImport), srcNodeInfo.Addr, srcNodeInfo.NodeID, dstNodeInfo.Addr, dstNodeInfo.NodeID, + myredis.ConvertSlotToStr(myFailImport), taskID) + err = errors.New("migrate fail") + return mySuccImport, myFailImport, err + } + job.runtime.Logger.Info("confirmMigrateSlotsStatus success "+ + "slots numbers:%d,srcNodeAddr:%s,dstNodeAddr:%s,dstNodeID:%s,slots:%s,importingTaskID:%s ,unknow:%d", + len(successImport), srcNodeInfo.Addr, dstNodeInfo.Addr, dstNodeInfo.NodeID, + myredis.ConvertSlotToStr(migrateSlots), taskID, len(unknow)) + break + } + return mySuccImport, myFailImport, nil + +} + +// retryMigrateSpecSlots TODO +// 1. 检查src master是否failover了? 如果发生了failover,找到new src master +// 2. 检查dst master是否failover了? 如果发生了failover,找到new dst master +// NOCC:golint/fnsize(设计如此) +func (job *TendisPlusMigrateSlots) retryMigrateSpecSlots( + srcNodeInfo, dstNodeInfo *myredis.ClusterNodeData, passwd string, taskID string, + srcSlaves, dstSlaves []*myredis.ClusterNodeData, slots []int, timeout time.Duration, +) (err error) { + var msg string + newSrcNode, srcFailovered, err := job.findNewMasterWhenFailover(srcNodeInfo, passwd, srcSlaves, slots) + if err != nil { + return err + } + newDstNode, dstFailovered, err := job.findNewMasterWhenFailover(dstNodeInfo, passwd, dstSlaves, slots) + if err != nil { + return err + } + + newSrcCli, err := myredis.NewRedisClient(newSrcNode.Addr, passwd, 0, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer newSrcCli.Close() + + newDstCli, err := myredis.NewRedisClient(newDstNode.Addr, passwd, 0, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer newDstCli.Close() + if srcFailovered == true { + msg = fmt.Sprintf("migrate slots,srcNodeAddr:%s failovered,newSrcNode:%s", srcNodeInfo.Addr, newSrcNode.Addr) + job.runtime.Logger.Info(msg) + } + if dstFailovered == true { + msg = fmt.Sprintf("migrate slots,dstNodeAddr:%s failovered,newDstNode:%s", dstNodeInfo.Addr, newDstNode.Addr) + job.runtime.Logger.Info(msg) + } + if srcFailovered == true || dstFailovered == true { + // 如果发生了failover,重试迁移前,先做一些清理 + newSrcCli.ClusterClear() + newDstCli.ClusterClear() + } + cmd := []interface{}{"cluster", "setslot", "restart", newSrcNode.NodeID} + for _, slotItem := range slots { + cmd = append(cmd, slotItem) + } + var importRet interface{} + deleteSlotErrRetryTimes := 0 // 发生slot in deleting错误,则重试,最多重试300次(5小时) + otherErrRetryTimes := 0 + for otherErrRetryTimes < 5 && deleteSlotErrRetryTimes < 300 { + // 打印执行的迁移命令 + msg := fmt.Sprintf("retryMigrateSlots %d times,srcAddr:%s dstAddr:%s migrateCommand:cluster setslot restart %s %s", + otherErrRetryTimes, newSrcNode.Addr, newDstNode.Addr, newSrcNode.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Info(msg) + + importRet, err = newDstCli.InstanceClient.Do(context.TODO(), cmd...).Result() + + if err != nil && strings.Contains(err.Error(), "slot in deleting") == true { + msg = fmt.Sprintf( + `retryMigrateSlots execute cluster setslot restart fail,err:%v,srcAddr:%s,dstAddr:%s, + cmd:cluster setslot restart %s %s,sleep 1min and retry`, + err, newSrcNode.Addr, newDstNode.Addr, newSrcNode.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Warn(msg) + time.Sleep(1 * time.Minute) + deleteSlotErrRetryTimes++ + continue + } else if err != nil && strings.Contains(err.Error(), "slot not empty") == true { + newSrcCli.ClusterClear() + newDstCli.ClusterClear() + msg = fmt.Sprintf( + `retryMigrateSlots execute cluster setslot restart fail,err:%v,srcAddr:%s,dstAddr:%s,cmd:"+ + "cluster setslot restart %s %s,sleep 1min and retry`, + err, newSrcNode.Addr, newDstNode.Addr, newSrcNode.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Warn(msg) + time.Sleep(1 * time.Minute) + deleteSlotErrRetryTimes++ + continue + } else if err != nil && strings.Contains(err.Error(), "json contain err") == true { + newSrcCli.ClusterStopTaskID(taskID) + newDstCli.ClusterStopTaskID(taskID) + err = fmt.Errorf( + `retryMigrateSlots execute cluster setslot restart fail,err:%v,srcAddr:%s,dstAddr:%s,cmd:"+ + "cluster setslot restart %s %s,sleep 1min and retry`, + err, newSrcNode.Addr, newDstNode.Addr, newSrcNode.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Warn(msg) + time.Sleep(1 * time.Minute) + deleteSlotErrRetryTimes++ + continue + } else if err != nil { + // network timeout,retry + err = fmt.Errorf(`retryMigrateSlots execute cluster setslot restart fail,err:%v, + srcAddr:%s,dstAddr:%s,cmd:cluster setslot restart %s %s`, + err, newSrcNode.Addr, newDstNode.Addr, newSrcNode.NodeID, myredis.ConvertSlotToShellFormat(slots)) + job.runtime.Logger.Error(err.Error()) + time.Sleep(5 * time.Second) + otherErrRetryTimes++ + continue + } + break + } + if (otherErrRetryTimes == 5 || deleteSlotErrRetryTimes == 300) && err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + importingTaskID := importRet.(string) + _, _, err = job.confirmMigrateSlotsStatus(newSrcNode, newDstNode, importingTaskID, slots, timeout) + if err != nil { + return err + } + return nil + +} + +// findNewMasterWhenFailover .. +// 检查old master是否failover了,如果failover了,尝试找到new master +// a. 检查,old master是否可连接 +// - 不能连接代表 发生了 failover;从slaves中找new master; +// - 可连接,再检查old master的角色是否变成了slave,如果变成了slave,则old master的master就是new master; +// - 上面两种情况都必须保证new master至少和old master至少具有一个相同的slot +// - 如果old master可连接,且角色依然是master.则new master=old master; +// NOCC:golint/fnsize(设计如此) +func (job *TendisPlusMigrateSlots) findNewMasterWhenFailover( + oldMaster *myredis.ClusterNodeData, passwd string, slaves []*myredis.ClusterNodeData, slots []int, +) (newMaster *myredis.ClusterNodeData, isFailovered bool, err error) { + var msg string + newMaserNode := oldMaster + list01 := []string{} + for _, srcSlave01 := range slaves { + srcSlaveItem := srcSlave01 + addr := strings.Split(srcSlaveItem.Addr, ".")[0] + list01 = append(list01, addr) + } + isFailovered = false + srcCli, err := myredis.NewRedisClient(oldMaster.Addr, passwd, 0, consts.TendisTypeRedisInstance) + if err != nil { + // src master disconnected + msg = fmt.Sprintf( + "oldMasterAddr:%s disconnected,maybe failover occured,now we find new master from it's slaves:%s", + oldMaster.Addr, strings.Join(list01, ",")) + job.runtime.Logger.Warn(msg) + isFailovered = true + // find new src master from slaves + var runningSlave01 *myredis.ClusterNodeData = nil + for _, slave01 := range slaves { + slaveItem := slave01 + if len(slaveItem.FailStatus) == 0 { + runningSlave01 = slaveItem + break + } + } + if runningSlave01 == nil { + err = fmt.Errorf("oldMasterAddr:%s disconnected and have no running slave,slaves:%s", + oldMaster.Addr, strings.Join(list01, ",")) + job.runtime.Logger.Error(err.Error()) + return nil, isFailovered, err + } + + runSlaveCli, err := myredis.NewRedisClient(runningSlave01.Addr, passwd, 0, consts.TendisTypeRedisInstance) + if err != nil { + return nil, isFailovered, err + } + _, err = runSlaveCli.GetClusterNodes() + if err != nil { + runSlaveCli.Close() + return nil, isFailovered, err + } + // current running masters + runningMasters, err := runSlaveCli.GetRunningMasters() + if err != nil { + runSlaveCli.Close() + return nil, isFailovered, err + } + runSlaveCli.Close() + for _, srcSlave01 := range slaves { + srcSlaveItem := srcSlave01 + if _, ok := runningMasters[srcSlaveItem.Addr]; ok == true { + newMaserNode = srcSlaveItem + break + } + } + // not find new src master + if newMaserNode == oldMaster { + err = fmt.Errorf("oldMasterAddr:%s disconnected and can't find new master from slaves:%s", + oldMaster.Addr, strings.Join(list01, ",")) + job.runtime.Logger.Error(err.Error()) + return nil, isFailovered, err + } + interSlots := util.IntSliceInter(newMaserNode.Slots, slots) + if len(interSlots) == 0 { + // have no same slots; + // There is reason to suspect that the new src master is not correct. + err = fmt.Errorf(` +oldMasterAddr:%s disconnected and find a new master:%s, +but old master and new master do not have the same slots, +old master slots:%s, new master slots:%s`, + oldMaster.Addr, newMaserNode.Addr, myredis.ConvertSlotToStr(slots), + myredis.ConvertSlotToStr(newMaserNode.Slots)) + job.runtime.Logger.Error(err.Error()) + return nil, isFailovered, err + } + } else { + defer srcCli.Close() + selfNodeInfo, err := srcCli.GetMyself() + if err != nil { + return nil, isFailovered, err + } + if oldMaster.Role == "slave" { + isFailovered = true + msg = fmt.Sprintf( + "oldMasterAddr:%s now is a slave,maybe failover occured,now we treat it's master as new master", + oldMaster.Addr) + job.runtime.Logger.Warn(msg) + + newMaserNode, err = srcCli.GetMasterNodeBySlaveAddr(selfNodeInfo.Addr) + if err != nil { + return nil, isFailovered, err + } + interSlots := util.IntSliceInter(newMaserNode.Slots, slots) + if len(interSlots) == 0 { + // have no same slots; + // There is reason to suspect that the new src master is not correct. + err = fmt.Errorf(` +oldMasterAddr:%s now is a slave and find a new master:%s, +but old master and new master do not have the same slots, +old master slots:%s, new master slots:%s`, + oldMaster.Addr, newMaserNode.Addr, myredis.ConvertSlotToStr(slots), myredis.ConvertSlotToStr(newMaserNode.Slots)) + job.runtime.Logger.Error(err.Error()) + return nil, isFailovered, err + } + } else { + // old master is connected and role is still master + msg = fmt.Sprintf("oldMasterAddr:%s still a master and is connected,not failover", oldMaster.Addr) + job.runtime.Logger.Info(msg) + + } + } + return newMaserNode, isFailovered, nil +} + +// MigrateSlotsFromToBeDelNode 将待删除Node上的slots 迁移到 剩余Node上 +// NOCC:golint/fnsize(设计如此) +func (job *TendisPlusMigrateSlots) MigrateSlotsFromToBeDelNode(toBeDelNodesAddr []string) (err error) { + var msg string + msg = fmt.Sprintf("start migateSlotsFromToBeDeletedNodes toBeDelNodesAddr:%v", toBeDelNodesAddr) + job.runtime.Logger.Info(msg) + defer job.runtime.Logger.Info("end migateSlotsFromToBeDeletedNodes") + + _, err = job.params.SrcNode.redisCli.GetClusterNodes() + if err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + + mastersWithSlot, err := job.params.SrcNode.redisCli.GetNodesByFunc(myredis.IsMasterWithSlot) + if err != nil && util.IsNotFoundErr(err) { + msg = fmt.Sprintf("cluster have no master with slots,no run migateSlotsFromToBeDeletedNodes") + job.runtime.Logger.Warn(msg) + return nil + } else if err != nil { + return err + } + + // confirm cluster state ok + clusterOK, _, err := job.clusterState(job.params.SrcNode.redisCli) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + if clusterOK == false { + err = fmt.Errorf("cluster_state is fail,addr:%s", job.srcNodeAddr()) + job.runtime.Logger.Error(err.Error()) + return err + } + + // get to be deleted masters with slots + toBeDelMastersWithSlots := []*myredis.ClusterNodeData{} + toBeDelMastersWithSlotAddrs := []string{} + toBeDelNodeMap := make(map[string]bool) + for _, addr01 := range toBeDelNodesAddr { + toBeDelNodeMap[addr01] = true + if node01, ok := mastersWithSlot[addr01]; ok == true { + node02 := *node01 // copy + toBeDelMastersWithSlots = append(toBeDelMastersWithSlots, &node02) + toBeDelMastersWithSlotAddrs = append(toBeDelMastersWithSlotAddrs, node02.Addr) + } + } + if len(toBeDelMastersWithSlots) == 0 { + msg = fmt.Sprintf("no need migate slots,no master with slots in the toBeDeletedNodes:%v", toBeDelNodesAddr) + job.runtime.Logger.Info(msg) + return nil + } + + // get to be left masters (with or without slots) + masterNodes, _ := job.params.SrcNode.redisCli.GetNodesByFunc(myredis.IsRunningMaster) + leftMasters := []*myredis.ClusterNodeData{} + for addr01, node01 := range masterNodes { + node02 := *node01 // copy + if _, ok := toBeDelNodeMap[addr01]; ok == false { + leftMasters = append(leftMasters, &node02) + } + } + if len(leftMasters) == 0 { + msg = fmt.Sprintf("have no leftMasters,no need migate slots,toBeDeletedNodes:%v", toBeDelNodesAddr) + job.runtime.Logger.Info(msg) + return + } + + leftMasterCnt := len(leftMasters) + expectedSlotNum := int(math.Ceil(float64(consts.DefaultMaxSlots+1) / float64(leftMasterCnt))) + type migrationInfo struct { + FromAddr string + ToAddr string + } + migrateMap := make(map[migrationInfo][]int) + + for _, delNode01 := range toBeDelMastersWithSlots { + delNodeItem := delNode01 + for _, slot01 := range delNodeItem.Slots { + // loop all slots on toBeDeleltedNodes + for _, leftNode01 := range leftMasters { + leftNodeItem := leftNode01 + if len(leftNodeItem.Slots) >= expectedSlotNum { + continue + } + leftNodeItem.Slots = append(leftNodeItem.Slots, slot01) + migrate01 := migrationInfo{FromAddr: delNodeItem.Addr, ToAddr: leftNodeItem.Addr} + migrateMap[migrate01] = append(migrateMap[migrate01], slot01) + break // next slot + } + } + } + migrateTasks := []MigrateSomeSlots{} + for migrate01, slots := range migrateMap { + sort.Slice(slots, func(i, j int) bool { + return slots[i] < slots[j] + }) + + migrateTasks = append(migrateTasks, MigrateSomeSlots{ + SrcAddr: migrate01.FromAddr, + DstAddr: migrate01.ToAddr, + MigrateSlots: slots, + }) + } + + for _, task01 := range migrateTasks { + msg = fmt.Sprintf("scale down migrate plan=>srcNode:%s dstNode:%s slots:%s", + task01.SrcAddr, task01.DstAddr, myredis.ConvertSlotToShellFormat(task01.MigrateSlots)) + job.runtime.Logger.Info(msg) + } + + // 获取 slot全部迁移主从对的NodeID,用于forget: 执行迁移slots前获取,因为迁移全部slot后,slvae 会replicate到其他节点 + toBeDelAllNodeNodeID := []string{} + toBeDelAllNodeMap := make(map[string]bool) + for _, addr01 := range toBeDelNodesAddr { + toBeDelAllNodeMap[addr01] = true + if node01, ok := mastersWithSlot[addr01]; ok == true { + node02 := *node01 + dstCli01, err := myredis.NewRedisClient(addr01, job.params.DstNode.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + defer dstCli01.Close() + dstSlaves, err := dstCli01.GetAllSlaveNodesByMasterAddr(addr01) + if err != nil { + job.Err = fmt.Errorf("dstAddr:%s get slave fail:%+v", addr01, err) + job.runtime.Logger.Error(job.Err.Error()) + return job.Err + } + toBeDelAllNodeNodeID = append(toBeDelAllNodeNodeID, node02.NodeID) + for _, srcSlave01 := range dstSlaves { + srcSlaveItem := srcSlave01 + toBeDelAllNodeNodeID = append(toBeDelAllNodeNodeID, srcSlaveItem.NodeID) + toBeDelAllNodeMap[srcSlave01.Addr] = true + } + + } + } + job.runtime.Logger.Info("get toBeDelAllNodeNodeID success :%v", toBeDelAllNodeNodeID) + allNodes, _ := job.params.SrcNode.redisCli.GetAddrMapToNodes() + if err != nil { + return + } + + err = job.ParallelMigrateSpecificSlots(migrateTasks) + if err != nil { + return err + } + + // 如果任何待删除master节点正在migrate slots,则等待1分钟后重试,最长300分钟 + timeLimit := 0 + for { + isToBeDelMasterMigrating, migratingAddr, migratingSlots, err1 := job.areTenplusMigrating(toBeDelMastersWithSlotAddrs) + if err1 != nil { + return err1 + } + if isToBeDelMasterMigrating == true { + time.Sleep(1 * time.Minute) + // 直到"所有待删除master节点没有migrate slots",再继续搬迁slot; + msg = fmt.Sprintf("MigrateSlotsFromToBeDelNode toBeDeletedMaster:%s migrating slots count:%d", + migratingAddr, len(migratingSlots)) + job.runtime.Logger.Info(msg) + timeLimit++ + continue + } + break + + } + if timeLimit == 300 { + err = fmt.Errorf("MigrateSlotsFromToBeDelNode migrating 300 minute,please check") + job.runtime.Logger.Error(err.Error()) + return err + } + + // make sure that toBeDeletedNodes have no slots + _, err = job.params.SrcNode.redisCli.GetClusterNodes() + if err != nil { + return err + } + var filterToBeDelNodeFunc = func(n *myredis.ClusterNodeData) bool { + if _, ok := toBeDelNodeMap[n.Addr]; ok == true { + return true + } + return false + } + toBeDeletedNodes, err := job.params.SrcNode.redisCli.GetNodesByFunc(filterToBeDelNodeFunc) + if err != nil { + return err + } + var errList []string + for _, node01 := range toBeDeletedNodes { + if myredis.IsRunningMaster(node01) == true && len(node01.Slots) > 0 { + errList = append(errList, fmt.Sprintf("%s still have %d slots:%s", + node01.Addr, len(node01.Slots), + myredis.ConvertSlotToShellFormat(node01.Slots))) + } + } + if len(errList) > 0 { + err = fmt.Errorf("%s", strings.Join(errList, "\n")) + job.runtime.Logger.Error(err.Error()) + return err + } + + // 获取需要下线主从对 以外的nodes + leftNodesAddr := []string{} + for addr01 := range allNodes { + if _, ok := toBeDelAllNodeMap[addr01]; ok == false { + leftNodesAddr = append(leftNodesAddr, addr01) + } + } + var errForgetList []string + for _, addr03 := range leftNodesAddr { + addrCli, err := myredis.NewRedisClient(addr03, job.params.SrcNode.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return err + } + defer addrCli.Close() + for _, nodeID := range toBeDelAllNodeNodeID { + err := addrCli.ClusterForget(nodeID) + if err != nil { + errForgetList = append(errForgetList, fmt.Sprintf("node:%s cluster forget %s failed", addr03, nodeID)) + job.runtime.Logger.Error(err.Error()) + } + + } + + } + job.runtime.Logger.Info("get leftNodesAddr success :%v", leftNodesAddr) + if len(errForgetList) > 0 { + err = fmt.Errorf("%s", strings.Join(errForgetList, "\n")) + job.runtime.Logger.Error(err.Error()) + return err + } + job.runtime.Logger.Info("cluster forget success") + return nil +} + +// areTenplusMigrating tendisplus 节点是否正在migrating slots +func (job *TendisPlusMigrateSlots) areTenplusMigrating(tenplusAddrs []string) ( + migratingOrNot bool, migratingAddr string, migatingSlots []int, err error, +) { + if len(tenplusAddrs) == 0 { + return + } + + var srcSetSlotInfo *myredis.ClusterSetSlotInfo = nil + for _, addr01 := range tenplusAddrs { + addr01 = strings.TrimSpace(addr01) + if addr01 == "" { + continue + } + srcSetSlotInfo, err = myredis.GetClusterSetSlotInfo(addr01, job.params.SrcNode.Password) + if err != nil { + job.runtime.Logger.Error(err.Error()) + return + } + if len(srcSetSlotInfo.MigratingSlotList) > 0 { + return true, addr01, srcSetSlotInfo.MigratingSlotList, nil + } + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof.go new file mode 100644 index 0000000000..13a36e9271 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof.go @@ -0,0 +1,425 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" +) + +// ReplicaItem 主从关系单项,每个项填写密码主要是因为 如果一次性操作多对 主从版, 主从版之间是密码并不相同 +type ReplicaItem struct { + MasterIP string `json:"master_ip" validate:"required"` + MasterPort int `json:"master_port" validate:"required"` + MasterAuth string `json:"master_auth" validate:"required"` + SlaveIP string `json:"slave_ip" validate:"required"` + SlavePort int `json:"slave_port" validate:"required"` + SlavePassword string `json:"slave_password" validate:"required"` +} + +// MasterAddr masteraddr +func (item *ReplicaItem) MasterAddr() string { + return item.MasterIP + ":" + strconv.Itoa(item.MasterPort) +} + +// SlaveAddr slaveaddr +func (item *ReplicaItem) SlaveAddr() string { + return item.SlaveIP + ":" + strconv.Itoa(item.SlavePort) +} + +// RedisReplicaOfParams 建立主从关系 参数 +type RedisReplicaOfParams struct { + ReplicaPairs []ReplicaItem `json:"replica_pairs" validate:"required"` +} + +// RedisReplicaOf redis主从关系 原子任务 +type RedisReplicaOf struct { + runtime *jobruntime.JobGenericRuntime + params RedisReplicaOfParams +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisReplicaOf)(nil) + +// NewRedisReplicaOf new +func NewRedisReplicaOf() jobruntime.JobRunner { + return &RedisReplicaOf{} +} + +// Init 初始化,参数校验 +func (job *RedisReplicaOf) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v\n", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisReplicaOf Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisReplicaOf Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + } + return nil +} + +// Name 名字 +func (job *RedisReplicaOf) Name() string { + return "redis_replicaof" +} + +// Run 执行 +func (job *RedisReplicaOf) Run() error { + tasks := []*ReplicaTask{} + for _, item := range job.params.ReplicaPairs { + task := &ReplicaTask{ + ReplicaItem: item, + runtime: job.runtime, + } + tasks = append(tasks, task) + } + err := GroupRunReplicaTasksAndWait(tasks, job.runtime) + if err != nil { + return err + } + job.runtime.Logger.Info("all replicas ok") + return nil +} + +// Retry 返回可重试次数 +func (job *RedisReplicaOf) Retry() uint { + return 2 +} + +// Rollback 回滚函数,一般不用实现 +func (job *RedisReplicaOf) Rollback() error { + return nil +} + +// ReplicaTask 建立主从关系task +type ReplicaTask struct { + ReplicaItem + MasterCli *myredis.RedisClient `json:"-"` + SlaveCli *myredis.RedisClient `json:"-"` + ClusterEnabled string `json:"cluster_enabled"` + // version信息 + SlaveVersion string `json:"slave_version"` + SlaveBaseVer uint64 `json:"slave_base_ver"` + SlaveSubVer uint64 `json:"slave_sub_ver"` + InfoReplRole string `json:"info_repl_role"` + InfoReplMasterHost string `json:"info_repl_master_host"` + InfoReplMasterPort string `json:"info_repl_master_port"` + InfoReplLinkStatus string `json:"info_repl_link_status"` + DbType string `json:"db_type"` + runtime *jobruntime.JobGenericRuntime + Err error `json:"-"` +} + +// NewReplicaTask new replica task +func NewReplicaTask(masterIP string, masterPort int, masterAuth string, slaveIP string, slavePort int, + slavePassword string, runtime *jobruntime.JobGenericRuntime) *ReplicaTask { + return &ReplicaTask{ + ReplicaItem: ReplicaItem{ + MasterIP: masterIP, + MasterPort: masterPort, + MasterAuth: masterAuth, + SlaveIP: slaveIP, + SlavePort: slavePort, + SlavePassword: slavePassword, + }, + runtime: runtime, + } +} + +// InfoReplMasterAddr 'info replication' master addr +func (task *ReplicaTask) InfoReplMasterAddr() string { + return task.InfoReplMasterHost + ":" + task.InfoReplMasterPort +} + +func (task *ReplicaTask) newConnects() { + task.runtime.Logger.Info("begin to connect master:%s", task.MasterAddr()) + task.MasterCli, task.Err = myredis.NewRedisClientWithTimeout(task.MasterAddr(), task.MasterAuth, + 0, consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + return + } + task.runtime.Logger.Info("begin to connect slave:%s", task.SlaveAddr()) + task.SlaveCli, task.Err = myredis.NewRedisClientWithTimeout(task.SlaveAddr(), task.SlavePassword, + 0, consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + return + } + var infoRet map[string]string + infoRet, task.Err = task.SlaveCli.Info("server") + if task.Err != nil { + return + } + task.SlaveVersion = infoRet["redis_version"] + task.SlaveBaseVer, task.SlaveSubVer, task.Err = util.VersionParse(task.SlaveVersion) + if task.Err != nil { + return + } + task.DbType, task.Err = task.SlaveCli.GetTendisType() +} +func (task *ReplicaTask) confirmClusterEnabled() { + confData, err := task.SlaveCli.ConfigGet("cluster-enabled") + if err != nil { + task.Err = err + return + } + defer task.runtime.Logger.Info("slave:%s cluster-enabled=%s", + task.SlaveCli.Addr, task.ClusterEnabled) + val, ok := confData["cluster-enabled"] + if ok && strings.ToLower(val) == "yes" { + task.ClusterEnabled = "yes" + return + } + task.ClusterEnabled = "no" +} + +func (task *ReplicaTask) getReplicaStatusData() { + infoRet, err := task.SlaveCli.Info("replication") + if err != nil { + task.Err = err + return + } + task.InfoReplRole = infoRet["role"] + task.InfoReplMasterHost = infoRet["master_host"] + task.InfoReplMasterPort = infoRet["master_port"] + task.InfoReplLinkStatus = infoRet["master_link_status"] +} + +// IsReplicaStatusOk 复制关系是否已ok +func (task *ReplicaTask) IsReplicaStatusOk() (status bool) { + task.getReplicaStatusData() + if task.Err != nil { + return false + } + if task.InfoReplRole == consts.RedisSlaveRole { + // I am a slave + if task.InfoReplMasterHost == task.MasterIP && + task.InfoReplMasterPort == strconv.Itoa(task.MasterPort) && + task.InfoReplLinkStatus == consts.MasterLinkStatusUP { + // 同步关系已ok + return true + } + // slave角色,但其master信息不正确 + if task.InfoReplMasterHost != task.MasterIP || task.InfoReplMasterPort != strconv.Itoa(task.MasterPort) { + task.Err = fmt.Errorf("slave(%s) current master %s,not %s", + task.SlaveAddr(), task.InfoReplMasterAddr(), task.MasterAddr()) + task.runtime.Logger.Error(task.Err.Error()) + return + } + // slavejues,master信息正确,单link-status !=up + return false + } + // I am a master + dbsize, err := task.SlaveCli.DbSize() + if err != nil { + task.Err = err + return + } + if dbsize > 20 { + task.Err = fmt.Errorf("redis(%s) is a master,but has %d keys", task.SlaveAddr(), dbsize) + task.runtime.Logger.Error(task.Err.Error()) + return + } + return false +} + +// CreateReplicaREL create replica relationship +func (task *ReplicaTask) CreateReplicaREL() { + _, task.Err = task.MasterCli.ConfigSet("appendonly", "no") + if task.Err != nil { + return + } + task.runtime.Logger.Info("master(%s) 'confxx set appendonly no' ok ", task.MasterAddr()) + + _, task.Err = task.SlaveCli.ConfigSet("appendonly", "yes") + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) 'confxx set appendonly yes' ok ", task.SlaveAddr()) + + _, task.Err = task.SlaveCli.ConfigSet("masterauth", task.MasterAuth) + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) 'confxx set masterauth xxx' ok ", task.SlaveAddr()) + + if consts.IsRedisInstanceDbType(task.DbType) { + _, task.Err = task.SlaveCli.ConfigSet("slave-read-only", "yes") + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) 'confxx set slave-read-only yes' ok ", task.SlaveAddr()) + } + + if task.InfoReplRole == consts.RedisMasterRole { + if task.ClusterEnabled == "no" { + _, task.Err = task.SlaveCli.SlaveOf(task.MasterIP, strconv.Itoa(task.MasterPort)) + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) 'slaveof %s %d' ok ", + task.SlaveAddr(), task.MasterIP, task.MasterPort) + } else { + // cluster-enabled=yes + // 先获取masterID + addrMap, err := task.SlaveCli.GetAddrMapToNodes() + if err != nil { + task.Err = err + return + } + masterAddr := task.MasterAddr() + masterNode, ok := addrMap[masterAddr] + if !ok { + task.Err = fmt.Errorf("redis(%s) cluster nodes result not found masterNode(%s)", task.SlaveAddr(), masterAddr) + task.runtime.Logger.Error(task.Err.Error()) + return + } + if !myredis.IsRunningMaster(masterNode) { + task.Err = fmt.Errorf("cluster nodes redis(%s) is not a running master,addr:%s", masterAddr, task.SlaveCli.Addr) + task.runtime.Logger.Error(task.Err.Error()) + task.runtime.Logger.Info(masterNode.String()) + return + } + _, task.Err = task.SlaveCli.ClusterReplicate(masterNode.NodeID) + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) 'cluster replicate %s' ok, master(%s)", + task.SlaveAddr(), masterNode.NodeID, masterAddr) + } + } + _, task.Err = task.MasterCli.ConfigRewrite() + if task.Err != nil { + return + } + task.runtime.Logger.Info("master(%s) 'confxx rewrite' ok ", task.MasterAddr()) + + _, task.Err = task.SlaveCli.ConfigRewrite() + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) 'confxx rewrite' ok ", task.SlaveAddr()) +} + +// CreateReplicaAndWait slaveof and wait util status==up +func (task *ReplicaTask) CreateReplicaAndWait() { + var msg string + task.newConnects() + if task.Err != nil { + return + } + defer task.SlaveCli.Close() + defer task.MasterCli.Close() + + task.confirmClusterEnabled() + if task.Err != nil { + return + } + status := task.IsReplicaStatusOk() + if task.Err != nil { + return + } + if status { + msg = fmt.Sprintf("redis(%s) master(%s) master_link_status:%s,skip...", + task.SlaveAddr(), + task.InfoReplMasterAddr(), + task.InfoReplLinkStatus) + task.runtime.Logger.Info(msg) + return + } + task.CreateReplicaREL() + if task.Err != nil { + return + } + maxRetryTimes := 720 // 1 hour timeout + for maxRetryTimes >= 0 { + maxRetryTimes-- + task.getReplicaStatusData() + if task.Err != nil { + return + } + if task.InfoReplLinkStatus != consts.MasterLinkStatusUP { + if maxRetryTimes%3 == 0 { + // 半分钟输出一次日志 + msg = fmt.Sprintf("redis(%s) master_link_status:%s", task.SlaveAddr(), task.InfoReplLinkStatus) + task.runtime.Logger.Info(msg) + } + time.Sleep(5 * time.Second) + continue + } + break + } + if task.InfoReplLinkStatus != consts.MasterLinkStatusUP { + task.Err = fmt.Errorf("redis(%s) master_link_status:%s", task.SlaveAddr(), task.InfoReplLinkStatus) + task.runtime.Logger.Error(task.Err.Error()) + return + } + msg = fmt.Sprintf("redis(%s) master(%s) master_link_status:%s", + task.SlaveAddr(), + task.InfoReplMasterAddr(), + task.InfoReplLinkStatus) + task.runtime.Logger.Info(msg) + return +} + +// GroupRunReplicaTasksAndWait 根据masterIP,分批执行赋值任务,不同master机器间并行,同一master实例间串行 +func GroupRunReplicaTasksAndWait(tasks []*ReplicaTask, runtime *jobruntime.JobGenericRuntime) error { + util.StopBkDbmon() + defer util.StartBkDbmon() + // 根据masterIP做分组 + tasksMapSlice := make(map[string][]*ReplicaTask) + maxCount := 0 + for _, task01 := range tasks { + task := task01 + tasksMapSlice[task.MasterIP] = append(tasksMapSlice[task.MasterIP], task) + if len(tasksMapSlice[task.MasterIP]) > maxCount { + maxCount = len(tasksMapSlice[task.MasterIP]) + } + } + // 同masterIP实例间串行,多masterIP实例间并行 + for idx := 0; idx < maxCount; idx++ { + groupTasks := []*ReplicaTask{} + for masterIP := range tasksMapSlice { + if len(tasksMapSlice[masterIP]) > idx { + groupTasks = append(groupTasks, tasksMapSlice[masterIP][idx]) + } + } + wg := sync.WaitGroup{} + for _, taskItem := range groupTasks { + task01 := taskItem + wg.Add(1) + go func(task02 *ReplicaTask) { + defer wg.Done() + task02.CreateReplicaAndWait() + }(task01) + } + wg.Wait() + for _, taskItem := range groupTasks { + task01 := taskItem + if task01.Err != nil { + return task01.Err + } + } + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof_batch.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof_batch.go new file mode 100644 index 0000000000..47ff66b87e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_replicaof_batch.go @@ -0,0 +1,123 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + + "github.com/go-playground/validator/v10" +) + +// ReplicaBatchItem 批量主从关系项 +type ReplicaBatchItem struct { + MasterIP string `json:"master_ip" validate:"required"` + MasterStartPort int `json:"master_start_port" validate:"required"` + MasterInstNum int `json:"master_inst_num" validate:"required"` + MasterAuth string `json:"master_auth" validate:"required"` + SlaveIP string `json:"slave_ip" validate:"required"` + SlaveStartPort int `json:"slave_start_port" validate:"required"` + SlaveInstNum int `json:"slave_inst_num" validate:"required"` + SlavePassword string `json:"slave_password" validate:"required"` +} + +// ReplicaBatchParams 批量主从关系参数 +type ReplicaBatchParams struct { + BatchPairs []ReplicaBatchItem `json:"bacth_pairs" validate:"required"` +} + +// RedisReplicaBatch redis(批量)主从关系 原子任务 +type RedisReplicaBatch struct { + runtime *jobruntime.JobGenericRuntime + params ReplicaBatchParams +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*RedisReplicaBatch)(nil) + +// NewRedisReplicaBatch new +func NewRedisReplicaBatch() jobruntime.JobRunner { + return &RedisReplicaBatch{} +} + +// Init 初始化,参数校验 +func (job *RedisReplicaBatch) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v\n", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisReplicaBatch Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisReplicaBatch Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + } + for _, item := range job.params.BatchPairs { + if item.MasterInstNum != item.SlaveInstNum { + err = fmt.Errorf("masterIP:%s slaveIP:%s master_inst_num(%d) <> slave_inst_num(%d)", + item.MasterIP, item.SlaveIP, item.MasterInstNum, item.SlaveInstNum) + job.runtime.Logger.Error(err.Error()) + return err + } + } + return nil +} + +// Name 名字 +func (job *RedisReplicaBatch) Name() string { + return "redis_replica_batch" +} + +// Run 执行 +func (job *RedisReplicaBatch) Run() error { + var tasks []*ReplicaTask + var cnt int = 0 + var masterPort, slavePort int + for _, item := range job.params.BatchPairs { + cnt += item.MasterInstNum + } + tasks = make([]*ReplicaTask, 0, cnt) + for _, item := range job.params.BatchPairs { + for i := 0; i < item.MasterInstNum; i++ { + masterPort = item.MasterStartPort + i + slavePort = item.SlaveStartPort + i + task := &ReplicaTask{ + ReplicaItem: ReplicaItem{ + MasterIP: item.MasterIP, + MasterPort: masterPort, + MasterAuth: item.MasterAuth, + SlaveIP: item.SlaveIP, + SlavePort: slavePort, + SlavePassword: item.SlavePassword, + }, + runtime: job.runtime, + } + tasks = append(tasks, task) + } + } + err := GroupRunReplicaTasksAndWait(tasks, job.runtime) + if err != nil { + return err + } + job.runtime.Logger.Info("all replicas ok") + return nil +} + +// Retry 返回可重试次数 +func (job *RedisReplicaBatch) Retry() uint { + return 2 +} + +// Rollback 回滚函数,一般不用实现 +func (job *RedisReplicaBatch) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_kill_conn.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_kill_conn.go new file mode 100644 index 0000000000..737c63daab --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_kill_conn.go @@ -0,0 +1,137 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +/* + kill 掉长时间不活跃的链接 (包含可能的Dead connection) + // 输入参数 + { + "instances":[{"ip":"","port":""},], + "idel_time":600, + "cluster_type":"", + } +*/ + +// KillDeadParam TODO +// SwitchParam cluster bind entry +type KillDeadParam struct { + Instances []InstanceParam `json:"instances"` + ConnIdleTime int `json:"idle_time"` + ClusterType string `json:"cluster_type"` +} + +// RedisKillDeadConn entry +type RedisKillDeadConn struct { + runtime *jobruntime.JobGenericRuntime + params *KillDeadParam +} + +// NewRedisSceneKillDeadConn TODO +func NewRedisSceneKillDeadConn() jobruntime.JobRunner { + return &RedisKillDeadConn{} +} + +// Init 初始化 +func (job *RedisKillDeadConn) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisKillDeadConn Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisKillDeadConn Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + + return nil +} + +// Run 运行干掉旧链接逻辑 +func (job *RedisKillDeadConn) Run() (err error) { + job.runtime.Logger.Info("kill dead conn start; params:%+v", job.params) + + for _, storage := range job.params.Instances { + addr := fmt.Sprintf("%s:%d", storage.IP, storage.Port) + pwd, err := myredis.GetPasswordFromLocalConfFile(storage.Port) + if err != nil { + job.runtime.Logger.Error("get redis pass from local failed,err %s:%v", addr, err) + return err + } + rconn, err := myredis.NewRedisClientWithTimeout(addr, pwd, 0, job.params.ClusterType, time.Second) + if err != nil { + return fmt.Errorf("conn redis %s failed:%+v", addr, err) + } + defer rconn.Close() + + cs, err := rconn.DoCommand([]string{"Client", "List"}, 0) + if err != nil { + return fmt.Errorf("do cmd failed %s:%+v", addr, err) + } + var totalked int + if clis, ok := cs.([]byte); ok { + sx := []byte{} + for _, cli := range clis { + if cli != 10 { // byte(\n)==10 ASIC + sx = append(sx, cli) + } else { + // id=3 addr=10047:38028 fd=11 name= age=53461290 idle=0 flags=S db=0 sub=0 psub=0 multi=-1 qbuf=0 qbuf-free=0 obl=0 oll=0 omem=0 events=r cmd=replconf + ws := strings.Split(string(sx), " ") + if len(ws) > 1 && strings.HasPrefix(ws[1], "addr=") && !(strings.Contains(string(sx), "replconf")) { + idleTime, _ := strconv.Atoi(strings.Split(ws[5], "=")[1]) + if idleTime >= job.params.ConnIdleTime { + kcli := strings.Split(ws[1], "=")[1] + _, err = rconn.DoCommand([]string{"Clinet", "Kill", kcli}, 0) + job.runtime.Logger.Info("redis send %s:kill [%s] , rst:%+v", addr, sx, err) + totalked++ + } + } + sx = []byte{} + } + } + } else { + job.runtime.Logger.Warn(fmt.Sprintf("client list assert failed %+s ,result not []string", addr)) + } + job.runtime.Logger.Info("redis send %s:total killed [%d].", addr, totalked) + } + + job.runtime.Logger.Info("kill dead conn all success ^_^") + return nil +} + +// Name 原子任务名 +func (job *RedisKillDeadConn) Name() string { + return "redis_kill_conn" +} + +// Retry times +func (job *RedisKillDeadConn) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisKillDeadConn) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_param_sync.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_param_sync.go new file mode 100644 index 0000000000..e5b5cc4b23 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_param_sync.go @@ -0,0 +1,126 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "time" + + "github.com/go-playground/validator/v10" +) + +/* + // 输入参数 + { + "instances":[{"master":{"ip":"","port":},"slave":{"ip":"","port":}}], + "cluster_type":"", + "slave_master_diff_time":100, + "last_io_second_ago":60, + } +*/ + +// DoSyncParam TODO +// SyncParam +type DoSyncParam struct { + Instances []InstanceSwitchParam `json:"instances"` + ClusterType string `json:"cluster_type"` + // "disk-delete-count", "maxmemory", "log-count", "log-keep-count" + ParamList []string `json:"params"` +} + +// RedisPramsSync tendis ssd 参数同步 +type RedisPramsSync struct { + runtime *jobruntime.JobGenericRuntime + params *DoSyncParam +} + +// NewRedisSceneSyncPrams TODO +func NewRedisSceneSyncPrams() jobruntime.JobRunner { + return &RedisPramsSync{} +} + +// Init 初始化 +func (job *RedisPramsSync) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("redissynccheck Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("redissynccheck Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + return nil +} + +// Run TODO +func (job *RedisPramsSync) Run() (err error) { + job.runtime.Logger.Info("redisparamsync start; params:%+v", job.params) + for _, pair := range job.params.Instances { + addr1 := fmt.Sprintf("%s:%d", pair.MasterInfo.IP, pair.MasterInfo.Port) + pwd, err := myredis.GetPasswordFromLocalConfFile(pair.MasterInfo.Port) + if err != nil { + job.runtime.Logger.Error("get redis pass from local failed,err %s:%v", addr1, err) + return err + } + rc1, err := myredis.NewRedisClientWithTimeout(addr1, pwd, 0, job.params.ClusterType, time.Second) + if err != nil { + return fmt.Errorf("conn redis %s failed:%+v", addr1, err) + } + defer rc1.Close() + + addr2 := fmt.Sprintf("%s:%d", pair.SlaveInfo.IP, pair.SlaveInfo.Port) + rc2, err := myredis.NewRedisClientWithTimeout(addr2, pwd, 0, job.params.ClusterType, time.Second) + if err != nil { + return fmt.Errorf("conn redis %s failed:%+v", addr2, err) + } + defer rc2.Close() + + for _, cc := range job.params.ParamList { + v, err := rc1.DoCommand([]string{"Confxx", "GET", cc}, 0) + if err != nil { + job.runtime.Logger.Warn(fmt.Sprintf("get config value failed:%+v:%+v:%+v", addr1, cc, err)) + continue + } + if vv, ok := v.([]interface{}); ok && len(vv) == 2 { + if _, err = rc2.DoCommand([]string{"Confxx", "SET", cc, fmt.Sprintf("%s", vv[1])}, 0); err != nil { + job.runtime.Logger.Warn(fmt.Sprintf("set config value failed:%+v:%s:%+v", addr2, cc, err)) + continue + } + job.runtime.Logger.Info(fmt.Sprintf("sync config %s from %s to %s value:%s done", + cc, addr1, addr2, fmt.Sprint(vv[1]))) + } + } + } + job.runtime.Logger.Info("redisparamsync switch all success.") + return nil +} + +// Name 原子任务名 +func (job *RedisPramsSync) Name() string { + return "redis_param_sync" +} + +// Retry times +func (job *RedisPramsSync) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisPramsSync) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_sync_check.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_sync_check.go new file mode 100644 index 0000000000..5e9049e4bc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_scene_sync_check.go @@ -0,0 +1,136 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/go-playground/validator/v10" +) + +/* + 针对 故障本地修复场景而生 + 1. 全部/部分 实例可能已经挂了 + 2. + { + "instances":[{"ip":"","port":}], + "watch_seconds":600, + "cluster_type":"", + "last_io_second_ago":60, + } +*/ + +// CheckSyncParam TODO +type CheckSyncParam struct { + Instances []InstanceParam `json:"instances"` + ClusterType string `json:"cluster_type"` + MaxSlaveLastIOSecondsAgo int `json:"last_io_second_ago"` // slave io线程最大时间 + WatchSeconds int `json:"watch_seconds"` +} + +// RedisSyncCheck entry +type RedisSyncCheck struct { + runtime *jobruntime.JobGenericRuntime + params *CheckSyncParam +} + +// NewRedisSceneSyncCheck TODO +func NewRedisSceneSyncCheck() jobruntime.JobRunner { + return &RedisSyncCheck{} +} + +// Init 初始化 +func (job *RedisSyncCheck) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("redissynccheck Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("redissynccheck Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + return nil +} + +// Run TODO +func (job *RedisSyncCheck) Run() (err error) { + for i := 0; i < job.params.WatchSeconds; i++ { + var statusNk int + for _, ins := range job.params.Instances { + if err := job.checkReplication(ins); err != nil { + job.runtime.Logger.Warn("instance %d:[%+v] replication sames tobe not ok -_-", i, ins) + statusNk++ + } + } + + if statusNk == 0 { + job.runtime.Logger.Info("all instances replication sames tobe ok ^_^") + break + } + job.runtime.Logger.Info("%d:(%d) instances replication sames tobe not ok -_-, waiting", i, statusNk) + time.Sleep(time.Second) + } + return nil +} + +// Name 原子任务名 +func (job *RedisSyncCheck) Name() string { + return "redis_sync_check" +} + +// Retry times +func (job *RedisSyncCheck) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisSyncCheck) Rollback() error { + return nil +} + +// checkReplication TODO +func (job *RedisSyncCheck) checkReplication(ins InstanceParam) error { + addr := fmt.Sprintf("%s:%d", ins.IP, ins.Port) + pwd, err := myredis.GetPasswordFromLocalConfFile(ins.Port) + if err != nil { + job.runtime.Logger.Error("get redis pass from local failed,err %s:%v", addr, err) + return err + } + rc1, err := myredis.NewRedisClientWithTimeout(addr, pwd, 0, job.params.ClusterType, time.Second) + if err != nil { + return fmt.Errorf("conn redis %s failed:%+v", addr, err) + } + defer rc1.Close() + + rep, err := rc1.Info("Replication") + if err != nil { + job.runtime.Logger.Error("get replication info faeild %s:%v", addr, err) + } + // port:30000 master_link_status:up + // port:30000 master_last_io_seconds_ago:1 + last_io_second_ago, _ := strconv.Atoi(rep["master_last_io_seconds_ago"]) + linkstatus := rep["master_link_status"] + if linkstatus == "up" && last_io_second_ago < job.params.MaxSlaveLastIOSecondsAgo { + job.runtime.Logger.Info("%s:link_status:%s,last_io_seconds:%d", addr, linkstatus, last_io_second_ago) + return nil + } + + return fmt.Errorf("%s:linkStatus:%s,lastIO:%d", addr, linkstatus, last_io_second_ago) +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_shutdown.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_shutdown.go new file mode 100644 index 0000000000..a203dc528e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_shutdown.go @@ -0,0 +1,265 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "sync" + "time" + + "github.com/go-playground/validator/v10" +) + +// RedisShutdownParams redis shutdown参数 +type RedisShutdownParams struct { + IP string `json:"ip" validate:"required"` + Ports []int `json:"ports" validate:"required"` + Debug bool `json:"debug"` +} + +// RedisShutdown redis shutdown 结构体 +type RedisShutdown struct { + runtime *jobruntime.JobGenericRuntime + params *RedisShutdownParams + RealDataDir string // /data/redis + RedisBinDir string // /usr/local/redis + RedisBackupDir string + + errChan chan error +} + +// NewRedisShutdown 创建一个redis shutdown对象 +func NewRedisShutdown() jobruntime.JobRunner { + return &RedisShutdown{} +} + +// Init 初始化 +func (job *RedisShutdown) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisShutdown Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisShutdown Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + // 6379<= start_port <= 55535 + ports := job.params.Ports + for _, p := range ports { + if p > 55535 || p < 6379 { + err = fmt.Errorf("RedisShutdown port[%d] must range [6379,5535]", p) + job.runtime.Logger.Error(err.Error()) + return err + } + } + + return nil +} + +// Run 运行下架流程 +func (job *RedisShutdown) Run() (err error) { + + job.InitRealDataDir() + ports := job.params.Ports + + wg := sync.WaitGroup{} + for _, port := range ports { + wg.Add(1) + go func(port int) { + defer wg.Done() + job.Shutdown(port) + job.BackupDir(port) + }(port) + } + wg.Wait() + close(job.errChan) + + errMsg := "" + for err := range job.errChan { + errMsg = fmt.Sprintf("%s\n%s", errMsg, err.Error()) + } + if errMsg != "" { + return fmt.Errorf(errMsg) + } + + return nil +} + +// InitRealDataDir 初始化参数 +func (job *RedisShutdown) InitRealDataDir() { + redisSoftLink := filepath.Join(consts.UsrLocal, "redis") + job.RedisBinDir = filepath.Join(redisSoftLink, "bin") + job.runtime.Logger.Info("GetRedisBinDir success,binDir:%s", job.RedisBinDir) + + job.RealDataDir = filepath.Join(consts.GetRedisDataDir(), "/redis") + job.runtime.Logger.Info("GetRealDataDir success,dataDir:%s", job.RealDataDir) + + job.RedisBackupDir = filepath.Join(consts.GetRedisBackupDir(), "dbbak") + job.runtime.Logger.Info("GeRedisBackupDir success,backupDir:%s", job.RedisBackupDir) + + job.errChan = make(chan error, len(job.params.Ports)) +} + +// Shutdown 停止进程 +func (job *RedisShutdown) Shutdown(port int) { + shutDownSucc := false + status := true + var err error + stopScript := filepath.Join(job.RedisBinDir, "stop-redis.sh") + job.runtime.Logger.Info("get port[%d] pwd begin.", port) + pwd, err := myredis.GetPasswordFromLocalConfFile(port) + if err != nil { + job.errChan <- fmt.Errorf("get redis port[%d] password failed err[%s]", port, err.Error()) + return + } + job.runtime.Logger.Info("get port[%d] pwd success.", port) + + for i := 1; i <= 10; i++ { + job.runtime.Logger.Info("shuwdown redis port[%d] count[%d/10] begin....", port, i) + status, _ = job.IsRedisRunning(port) + if !status { + job.runtime.Logger.Info("redis port[%d] status is not running. shutdown succ....", port) + shutDownSucc = true + break + } + job.runtime.Logger.Info("check port[%d] conn status.", port) + if err = job.CheckSlaveConn(port, pwd); err != nil { + job.runtime.Logger.Warn(err.Error()) + continue + } + + // 先通过stop脚本去停止,如果停止失败再尝试用redis-client的方式去shutdown + _, err = util.RunLocalCmd("su", []string{ + consts.MysqlAaccount, "-c", stopScript + " " + strconv.Itoa(port) + " " + pwd}, "", + nil, 10*time.Second) + if err != nil { + job.runtime.Logger.Warn(err.Error()) + job.runtime.Logger.Info("shuwdown redis port[%d] count[%d/10] try use redis-client to shutdown", port, i) + job.ShutdownByClient(port, pwd) + } + status, _ = job.IsRedisRunning(port) + if !status { + job.runtime.Logger.Info("redis port[%d] status is not running. shutdown succ....", port) + shutDownSucc = true + break + } + job.runtime.Logger.Info("shuwdown redis port[%d] count[%d/10] end. redis is running. sleep 60s after continue...", + port, i) + time.Sleep(60 * time.Second) + } + if !shutDownSucc { + job.errChan <- fmt.Errorf("shutdown redis port[%d] failed err[%s]", port, err.Error()) + return + } + + job.runtime.Logger.Info("shuwdown redis port[%d] succ....", port) +} + +// ShutdownByClient 使用客户端shutdown的方式去停止实例 +func (job *RedisShutdown) ShutdownByClient(port int, pwd string) { + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, pwd, 0, consts.TendisTypeRedisInstance) + if err != nil { + return + } + defer redisClient.Close() + + _ = redisClient.Shutdown() +} + +// CheckSlaveConn 检查是否有slave连接 +func (job *RedisShutdown) CheckSlaveConn(port int, pwd string) error { + insAddr := fmt.Sprintf("%s:%d", job.params.IP, port) + redisClient, err := myredis.NewRedisClient(insAddr, pwd, 0, consts.TendisTypeRedisInstance) + if err != nil { + return err + } + defer redisClient.Close() + + replInfo, err := redisClient.Info("replication") + if err != nil { + return err + } + if replInfo["role"] == consts.RedisMasterRole { + if replInfo["connected_slaves"] != "0" { + return fmt.Errorf("%s have %s slave conn, pleace waiting", + insAddr, replInfo["connected_slaves"]) + } + } + + return nil +} + +// BackupDir 备份目录 +func (job *RedisShutdown) BackupDir(port int) { + job.runtime.Logger.Info("redis port[%d] backup dir begin....", port) + if job.params.Debug { + return + } + // 判断目录是否存在 + insDir := fmt.Sprintf("%s/%d", job.RealDataDir, port) + exist := util.FileExists(insDir) + if !exist { + job.runtime.Logger.Info("dir %s is not exists. nothing to do", insDir) + return + } + job.runtime.Logger.Info("redis port[%d] backup dir to doing....", port) + mvCmd := fmt.Sprintf("mv %s/%d %s/shutdown_%d_%s", job.RealDataDir, port, + job.RedisBackupDir, port, time.Now().Format("20060102150405")) + job.runtime.Logger.Info(mvCmd) + cmd := []string{"su", consts.MysqlAaccount, "-c", mvCmd} + _, err := util.RunLocalCmd(cmd[0], cmd[1:], "", + nil, 10*time.Second) + if err != nil { + job.errChan <- fmt.Errorf("exec mv dir cmd error[%s]", err.Error()) + return + } + + exist = util.FileExists(insDir) + if !exist { + job.runtime.Logger.Info("mv redis port[%d] dir succ....", port) + return + } + job.runtime.Logger.Info("redis port[%d] backup dir end....", port) + job.errChan <- fmt.Errorf("redis port[%d] dir [%s] exists too..pleace check", port, insDir) +} + +// IsRedisRunning 检查实例是否在运行 +func (job *RedisShutdown) IsRedisRunning(port int) (installed bool, err error) { + time.Sleep(10 * time.Second) + portIsUse, err := util.CheckPortIsInUse(job.params.IP, strconv.Itoa(port)) + return portIsUse, err +} + +// Name 原子任务名 +func (job *RedisShutdown) Name() string { + return "redis_shutdown" +} + +// Retry times +func (job *RedisShutdown) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisShutdown) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_switch.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_switch.go new file mode 100644 index 0000000000..948fe30e99 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/redis_switch.go @@ -0,0 +1,774 @@ +package atomredis + +import ( + "context" + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" +) + +/* + 支持集群切换 + 1. Twemproxy + Tendis 架构支持 + 2. Predixy + RedisCluster (TODO) + + 单实例切换(TODO) + 1. 单实例绑定了 DNS/CLB/北极星 + 2. 这里需要 直接调用 接口,进行切换 + + 是否同城:: 这里在Actor 不能校验!!!,所以, 这个参数传过来也没用 >_< + + // 输入参数 + { + "cluster_meta":{ + "bk_biz_id":1, + "immute_domain":"xx.db", + "cluster_type":"xxx", + "major_version":"sss", + "twemproxy_set":[ + "1.1.a.1:50000" + ], + "redis_master_set":[ + "2.2.b.2:30000 1-10" + ], + "redis_slave_set":[ + "2.3.c.4:30000 1-10" + ], + "proxy_pass":"**", + "storage_pass":"**" + }, + "switch_info":[ + { + "master":{ + "ip":"1.1.a.1", + "port":30000, + }, + "slave":{ + "ip":"1.1.b.2", + "port":30000, + } + } + ], + "switch_condition":{ + "is_check_sync":true, + "slave_master_diff_time":61, + "last_io_second_ago":100, + "can_write_before_switch":true, + "sync_type":"msms" + } +} +*/ + +// InstanceSwitchParam switch param +type InstanceSwitchParam struct { + MasterInfo InstanceParam `json:"master"` + SlaveInfo InstanceParam `json:"slave"` +} + +// InstanceParam instance (tendis/predixy/twemproxy) info +type InstanceParam struct { + IP string `json:"ip"` + Port int `json:"port"` + // Passwrod string `json:"password"` +} + +// SwitchSyncCheckParam swtich sync check param +type SwitchSyncCheckParam struct { + IsCheckSync bool `json:"is_check_sync"` // 代表是否需要强制切换 + MaxSlaveMasterDiffTime int `json:"slave_master_diff_time"` // 最大心跳时间 + MaxSlaveLastIOSecondsAgo int `json:"last_io_second_ago"` // slave io线程最大时间 + // http://tendis.cn/#/Tendisplus/知识库/集群/manual_failover + SwitchOpt string `json:"switch_opt"` // CLUSTER FAILOVER [FORCE|TAKEOVER] + + // CanWriteBeforeSwitch ,控制 slave 是否可写 【切换前可写、切换后可写】 + CanWriteBeforeSwitch bool `json:"can_write_before_switch"` + InstanceSyncType string `json:"sync_type"` // mms msms +} + +// ClusterInfo 集群信息, +type ClusterInfo struct { + BkBizID int `json:"bk_biz_id"` + ImmuteDomain string `json:"immute_domain"` + ClusterType string `json:"cluster_type"` + MajorVersion string `json:"major_version"` + ProxySet []string `json:"twemproxy_set"` // addr ip:port + RedisMasterSet []string `json:"redis_master_set"` // addr ip:port [seg_start seg_end] + RedisSlaveSet []string `json:"redis_slave_set"` // addr ip:port + ProxyPassword string `json:"proxy_pass"` + StoragePassword string `json:"storage_pass"` +} + +// SwitchParam cluster bind entry +type SwitchParam struct { + ClusterMeta ClusterInfo `json:"cluster_meta"` + SwitchRelation []InstanceSwitchParam `json:"switch_info"` + SyncCondition SwitchSyncCheckParam `json:"switch_condition"` +} + +// RedisSwitch entry +type RedisSwitch struct { + runtime *jobruntime.JobGenericRuntime + params *SwitchParam + + errChan chan error +} + +// NewRedisSwitch 创建一个redis switch对象 +// TODO 1. cluster模式下, cluster forget +// TODO 2. cluster模式下, 切换逻辑验证 +// TODO 3. cluster模式下, 同步状态校验 +func NewRedisSwitch() jobruntime.JobRunner { + return &RedisSwitch{} +} + +var supportedClusterType map[string]struct{} + +func init() { + supportedClusterType = map[string]struct{}{} + supportedClusterType[consts.TendisTypeTwemproxyTendisSSDInstance] = struct{}{} + supportedClusterType[consts.TendisTypeTwemproxyRedisInstance] = struct{}{} + supportedClusterType[consts.TendisTypePredixyTendisplusCluster] = struct{}{} +} + +// Init 初始化 +func (job *RedisSwitch) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisSwitch Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisSwitch Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + + // 集群类型支持校验 + if _, ok := supportedClusterType[job.params.ClusterMeta.ClusterType]; !ok { + job.runtime.Logger.Error("unsupported cluster type :%s", job.params.ClusterMeta.ClusterType) + return fmt.Errorf("unsupported cluster type :%s", job.params.ClusterMeta.ClusterType) + } + return nil +} + +// Run 运行切换逻辑 +func (job *RedisSwitch) Run() (err error) { + job.runtime.Logger.Info("redisswitch start; params:%+v", job.params) + + // 前置检查 + if err := job.precheckForSwitch(); err != nil { + job.runtime.Logger.Error("redisswitch precheck err:%v, params:%+v", err, job.params) + return err + } + job.runtime.Logger.Info("redisswitch precheck all success !") + + job.runtime.Logger.Info("redisswitch begin do storages switch .") + // 执行切换, proxy并行,instance 窜行 + for idx, storagePair := range job.params.SwitchRelation { + if job.params.SyncCondition.CanWriteBeforeSwitch { + if err := job.enableWrite4Slave(storagePair.SlaveInfo.IP, + storagePair.SlaveInfo.Port, job.params.ClusterMeta.StoragePassword); err != nil { + job.runtime.Logger.Error("redisswitch slaveof no one failed when do %d:[%+v];with err:%+v", idx, storagePair, err) + } + } + // 这里需要区分集群类型, 不同架构切换方式不一致 + if consts.IsTwemproxyClusterType(job.params.ClusterMeta.ClusterType) { + if err := job.doTendisStorageSwitch4Twemproxy(storagePair); err != nil { + job.runtime.Logger.Error("redisswitch switch failed when do %d:[%+v];with err:%+v", idx, storagePair, err) + return err + } + if err := job.doSlaveOfNoOne4NewMaster(storagePair.SlaveInfo.IP, + storagePair.SlaveInfo.Port, job.params.ClusterMeta.StoragePassword); err != nil { + job.runtime.Logger.Error("redisswitch slaveof no one failed when do %d:[%+v];with err:%+v", idx, storagePair, err) + return err + } + if err := job.checkProxyConsistency(); err != nil { + job.runtime.Logger.Error("redisswitch after check all proxy backends consistency with err:%+v", err) + return err + } + } else if consts.IsClusterDbType(job.params.ClusterMeta.ClusterType) { + if err := job.doTendisStorageSwitch4Cluster(storagePair); err != nil { + job.runtime.Logger.Error("redisswitch switch failed when do %d:[%+v];with err:%+v", idx, storagePair, err) + return err + } + if err := job.clusterForgetNode(storagePair); err != nil { + job.runtime.Logger.Error("redisswitch forget old node failed %d:[%+v];with err:%+v", idx, storagePair, err) + return err + } + // 验证切换成功 ? + } else { + job.runtime.Logger.Error("unsupported cluster type :%+v", job.params.ClusterMeta) + } + job.runtime.Logger.Info("switch from:%s:%d to:%s:%d success", + storagePair.MasterInfo.IP, storagePair.MasterInfo.Port, + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port) + } + job.runtime.Logger.Info("redisswitch switch all success; domain:%s", job.params.ClusterMeta.ImmuteDomain) + return nil +} + +// clusterForgetNode 为了将节点从群集中彻底删除,必须将 CLUSTER FORGET 命令发送到所有其余节点,无论他们是Master/Slave。 +// 不允许命令执行的特殊条件, 并在以下情况下返回错误: +// 1. 节点表中找不到指定的节点标识。 +// 2. 接收命令的节点是从属节点,并且指定的节点ID标识其当前主节点。 +// 3. 节点 ID 标识了我们发送命令的同一个节点。 + +// clusterForgetNode 返回值 +// 简单的字符串回复:OK如果命令执行成功,否则返回错误。 +// 我们有一个60秒的窗口来,所以这个函数必须在60s内执行完成 +func (job *RedisSwitch) clusterForgetNode(storagePair InstanceSwitchParam) error { + newMasterAddr := fmt.Sprintf("%s:%d", storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port) + newMasterConn, err := myredis.NewRedisClientWithTimeout(newMasterAddr, + job.params.ClusterMeta.StoragePassword, 1, job.params.ClusterMeta.ClusterType, time.Second) + if err != nil { + return err + } + defer newMasterConn.Close() + addrNodes, err := newMasterConn.GetAddrMapToNodes() + if err != nil { + return err + } + // 遍历集群节点信息, 拿到老的Master/Slave 节点ID + var ok bool + var willForgetNode *myredis.ClusterNodeData + var forgetNodeMasterID, forgetNodeSlaveID string + if willForgetNode, ok = addrNodes[fmt.Sprintf("%s:%d", + storagePair.MasterInfo.IP, storagePair.SlaveInfo.Port)]; !ok { + job.runtime.Logger.Warn("cluster old master not found by cluster %s:%d :%+v", + storagePair.MasterInfo.IP, storagePair.SlaveInfo.Port, addrNodes) + return nil + } + forgetNodeMasterID = willForgetNode.NodeID + for addr, nodeInfo := range addrNodes { + if nodeInfo.MasterID == forgetNodeMasterID { + forgetNodeSlaveID = nodeInfo.NodeID + job.runtime.Logger.Info("get myslave[%s] nodeID[%s]", addr, forgetNodeSlaveID) + break + } + job.runtime.Logger.Warn("got no slave for me [%s:%d]", storagePair.MasterInfo.IP, storagePair.SlaveInfo.Port) + } + + // 遍历集群中所有节点 + for addr := range addrNodes { + nodeConn, err := myredis.NewRedisClientWithTimeout(addr, + job.params.ClusterMeta.StoragePassword, 1, job.params.ClusterMeta.ClusterType, time.Second) + if err != nil { + return err + } + defer nodeConn.Close() + // var fgRst *redis.StatusCmd + // // (error) ERR:18,msg:forget node unkown 传了不存在的NodeID 1. 节点表中找不到指定的节点标识。 !!! 这里和官方版本返回错误不一致 !!! + // // (error) ERR:18,msg:I tried hard but I can't forget myself... 传了我自己进去 + // // (error) ERR:18,msg:Can't forget my master! 2. 接收命令的节点是从属节点,并且指定的节点ID标识其当前主节点。 + // if fgRst = nodeConn.InstanceClient.ClusterForget(context.TODO(), forgetNodeMasterID); fgRst.Err != nil { + + // } + + // if fgRst = nodeConn.InstanceClient.ClusterForget(context.TODO(), forgetNodeSlaveID); fgRst.Err != nil { + + // } + } + // GetClusterNodes + return nil +} + +func (job *RedisSwitch) enableWrite4Slave(ip string, port int, pass string) error { + newMasterAddr := fmt.Sprintf("%s:%d", ip, port) + newMasterConn, err := myredis.NewRedisClientWithTimeout(newMasterAddr, + pass, 1, job.params.ClusterMeta.ClusterType, time.Second) + if err != nil { + return err + } + defer newMasterConn.Close() + rst, err := newMasterConn.ConfigSet("slave-read-only", "no") + if err != nil { + job.runtime.Logger.Error("[%s] config set slave-read-only no for failed:%+v", newMasterAddr, err) + return err + } + if rst != "OK" { + job.runtime.Logger.Error("[%s] config set slave-read-only no failed:%s", newMasterAddr, rst) + return fmt.Errorf("slaveofNooNE:%s", rst) + } + job.runtime.Logger.Info("[%s] config set slave-read-only no result:%s", newMasterAddr, rst) + return nil +} + +func (job *RedisSwitch) doSlaveOfNoOne4NewMaster(ip string, port int, pass string) error { + newMasterAddr := fmt.Sprintf("%s:%d", ip, port) + newMasterConn, err := myredis.NewRedisClientWithTimeout(newMasterAddr, + pass, 1, job.params.ClusterMeta.ClusterType, time.Second) + if err != nil { + return fmt.Errorf("[%s] conn new master failed :%+v", newMasterAddr, err) + } + defer newMasterConn.Close() + rst, err := newMasterConn.SlaveOf("No", "oNE") + if err != nil { + job.runtime.Logger.Error("[%s] exec slaveof No oNE for failed:%+v", newMasterAddr, err) + return fmt.Errorf("[%s] exec slaveof No oNE for failed:%+v", newMasterAddr, err) + } + if rst != "OK" { + job.runtime.Logger.Error("[%s] exec slaveof No oNE for failed:%s", newMasterAddr, rst) + return fmt.Errorf("[%s] slaveofNooNE:%s", newMasterAddr, rst) + } + job.runtime.Logger.Info("[%s] exec slaveof No oNE for result:%s", newMasterAddr, rst) + return nil +} + +// doTendisStorageSwitch4Cluster rediscluster 类型架构切换姿势 http://redis.cn/commands/cluster-failover.html +func (job *RedisSwitch) doTendisStorageSwitch4Cluster(storagePair InstanceSwitchParam) error { + newMasterAddr := fmt.Sprintf("%s:%d", storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port) + newMasterConn, err := myredis.NewRedisClientWithTimeout(newMasterAddr, + job.params.ClusterMeta.StoragePassword, 0, job.params.ClusterMeta.ClusterType, time.Second) + if err != nil { + return err + } + defer newMasterConn.Close() + // 该命令只能在群集slave节点执行,让slave节点进行一次人工故障切换。 + if job.params.SyncCondition.SwitchOpt == "" { + if rst := newMasterConn.InstanceClient.ClusterFailover(context.TODO()); rst.Err() != nil { + job.runtime.Logger.Error("exec cluster FAILOVER for %s failed:%+v", newMasterAddr, err) + return err + } else if rst.String() != "OK" { + // 该命令已被接受并进行人工故障转移回复OK,切换操作无法执行(如发送命令的已经时master节点)时回复错误 + job.runtime.Logger.Error("exec cluster FAILOVER for %s failed:%s", newMasterAddr, rst.String()) + return fmt.Errorf("clusterfailover:%s", rst.String()) + } + } else { + if rst, err := newMasterConn.DoCommand([]string{"CLUSTER", "FAILOVER", + job.params.SyncCondition.SwitchOpt}, 0); err != nil { + job.runtime.Logger.Error("exec cluster FAILOVER %s for %s failed:%+v", + newMasterAddr, job.params.SyncCondition.SwitchOpt, err) + return err + } else if result, ok := rst.(string); ok { + if result != "OK" { + job.runtime.Logger.Error("exec cluster FAILOVER %s for %s failed:%s", + newMasterAddr, job.params.SyncCondition.SwitchOpt, result) + return fmt.Errorf("clusterfailover:%s", result) + } + } + } + + job.runtime.Logger.Info("switch succ from:%s:%d to:%s:%d ^_^", + storagePair.MasterInfo.IP, storagePair.MasterInfo.Port, + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port) + return nil +} + +// doTendisStorageSwitch4Twemproxy 刷新twemproxy 后端 +func (job *RedisSwitch) doTendisStorageSwitch4Twemproxy(storagePair InstanceSwitchParam) error { + wg := &sync.WaitGroup{} + errCh := make(chan error, len(job.params.ClusterMeta.ProxySet)) + for _, proxy := range job.params.ClusterMeta.ProxySet { + wg.Add(1) + go func(wg *sync.WaitGroup, proxy string) { + defer wg.Done() + addrx := strings.Split(proxy, ":") + port, _ := strconv.Atoi(addrx[1]) + rst, err := util.DoSwitchTwemproxyBackends(addrx[0], port, + fmt.Sprintf("%s:%d", storagePair.MasterInfo.IP, storagePair.MasterInfo.Port), + fmt.Sprintf("%s:%d", storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port)) + if err != nil || !strings.Contains(rst, "success") { + errCh <- fmt.Errorf("[%s:%d]switch proxy [%s] to:%s:%d result:%s,err:%+v", + storagePair.MasterInfo.IP, storagePair.MasterInfo.Port, proxy, + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, rst, err) + } + job.runtime.Logger.Info("[%s:%d]switch proxy [%s] to:%s:%d result:%s", + storagePair.MasterInfo.IP, storagePair.MasterInfo.Port, proxy, + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, rst) + }(wg, proxy) + } + wg.Wait() + close(errCh) + + for someErr := range errCh { + return someErr + } + job.runtime.Logger.Info("[%s:%d]all proxy switch succ to:%s:%d ^_^", + storagePair.MasterInfo.IP, storagePair.MasterInfo.Port, + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port) + return nil +} + +// precheckForSwitch 切换前的检查 +func (job *RedisSwitch) precheckForSwitch() error { + // // 1. 检查密码相同 job.params.ClusterMeta.StoragePassword + // for _, pair := range job.params.SwitchRelation { + // if pair.MasterInfo.Passwrod != job.params.ClusterMeta.StoragePassword { + // return fmt.Errorf("err password not equal %s VS %s", + // pair.MasterInfo.Passwrod, job.params.ClusterMeta.StoragePassword) + // } + // if pair.SlaveInfo.Passwrod != job.params.ClusterMeta.StoragePassword { + // return fmt.Errorf("err password not equal %s VS %s", + // pair.SlaveInfo.Passwrod, job.params.ClusterMeta.StoragePassword) + // } + // } + + // 2. 检查old master 是集群的master节点 + oldmasters := map[string]struct{}{} + for _, oldmaster := range job.params.ClusterMeta.RedisMasterSet { + if strings.Contains(oldmaster, "-") { // "2.2.x.4:30000 0-1" + oldmaster = strings.Split(oldmaster, " ")[0] + } + oldmasters[oldmaster] = struct{}{} + } + for _, pair := range job.params.SwitchRelation { + switchFrom := fmt.Sprintf("%s:%d", pair.MasterInfo.IP, pair.MasterInfo.Port) + if _, ok := oldmasters[switchFrom]; !ok { + return fmt.Errorf("err switch from storage {%s} not in cluster {%s}", + switchFrom, job.params.ClusterMeta.ImmuteDomain) + } + } + + // 3. 检查 proxy 可登陆 & proxy 状态一致 + job.runtime.Logger.Info("precheck for all proxies; domain:%s, proxies:%+v", + job.params.ClusterMeta.ImmuteDomain, job.params.ClusterMeta.ProxySet) + if err := job.precheckForProxy(); err != nil { + return err + } + job.runtime.Logger.Info("precheck for all proxies succ !") + + // 4. 检查redis 可登陆 + job.runtime.Logger.Info("precheck for [switchlink storages sync] storages:%+v", job.params.SwitchRelation) + if err := job.precheckStorageLogin(); err != nil { + return err + } + job.runtime.Logger.Info("precheck for [switchlink storages sync] login succ !") + + // 5. 检查同步状态 + job.runtime.Logger.Info("precheck for [switchlink storages sync] status .") + if err := job.precheckStorageSync(); err != nil { + return err + } + return nil +} + +func (job *RedisSwitch) precheckForProxy() error { + if consts.IsTwemproxyClusterType(job.params.ClusterMeta.ClusterType) { + // 3.1 检查 proxy 可登陆 + if err := job.precheckProxyLogin(); err != nil { + job.runtime.Logger.Error("some [proxy login] failed :%+v", err) + return err + } + job.runtime.Logger.Info("all [proxy login] succ !") + + // 3.2 检查proxy 状态一致 + if err := job.checkProxyConsistency(); err != nil { + return err + } + } else { + job.runtime.Logger.Warn("[%s] proxy check skiped !", job.params.ClusterMeta.ClusterType) + } + return nil +} + +func (job *RedisSwitch) checkProxyConsistency() error { + wg := &sync.WaitGroup{} + md5Ch := make(chan string, len(job.params.ClusterMeta.ProxySet)) + + for _, proxy := range job.params.ClusterMeta.ProxySet { + wg.Add(1) + go func(proxy string, wg *sync.WaitGroup, md5Ch chan string) { + defer wg.Done() + pmd5, err := util.GetTwemProxyBackendsMd5Sum(proxy) + if err != nil { + md5Ch <- fmt.Sprintf("%v", err) + } else { + md5Ch <- pmd5 + } + }(proxy, wg, md5Ch) + } + wg.Wait() + close(md5Ch) + + proxyMd5s := map[string]struct{}{} + for pmd5 := range md5Ch { + proxyMd5s[pmd5] = struct{}{} + } + if len(proxyMd5s) != 1 { + return fmt.Errorf("err mutil [proxy backends md5] got [%+v]", proxyMd5s) + } + job.runtime.Logger.Info("all [proxy backends md5] consistency [%+v]", proxyMd5s) + return nil +} + +// precheckStorageSync 检查节点间同步状态 +func (job *RedisSwitch) precheckStorageSync() error { + wg := &sync.WaitGroup{} + job.errChan = make(chan error, len(job.params.SwitchRelation)*3) + + for _, storagePair := range job.params.SwitchRelation { + wg.Add(1) + go func(storagePair InstanceSwitchParam, wg *sync.WaitGroup) { + defer wg.Done() + // oldMasterAddr := fmt.Sprintf("%s:%d", storagePair.MasterInfo.IP, storagePair.MasterInfo.Port) + newMasterAddr := fmt.Sprintf("%s:%d", storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port) + newMasterConn, err := myredis.NewRedisClientWithTimeout(newMasterAddr, + job.params.ClusterMeta.StoragePassword, 1, job.params.ClusterMeta.ClusterType, time.Second) + if err != nil { + job.errChan <- fmt.Errorf("[%s]new master node, err:%+v", newMasterAddr, err) + return + } + defer newMasterConn.Close() + + replic, err := newMasterConn.Info("replication") + if err != nil { + job.errChan <- fmt.Errorf("[%s]new master node,with info,err:%+v", newMasterAddr, err) + return + } + job.runtime.Logger.Info("[%s]new master node replication info :%+v", newMasterAddr, replic) + + if _, ok := replic["slave0"]; !ok { + job.runtime.Logger.Warn("[%s]new master node got no slave connected or replication , please note.", newMasterAddr) + } + + if replic["role"] != "slave" || replic["master_link_status"] != "up" { + job.runtime.Logger.Error("[%s]new master node role is %s(SLAVE), master_link_status is %s(UP)", + newMasterAddr, replic["role"], replic["master_link_status"]) + job.errChan <- fmt.Errorf("[%s]new master node, bad role or link status", newMasterAddr) + } + + // 3. 检查监控写入心跳 master:PORT:time 时间差。 【重要!!!】 + job.errChan <- job.checkReplicationSync(newMasterConn, storagePair, replic) + + // 4. 检查信息对等 ,slave 的master 是真实的master + realMasterIP := replic["master_host"] + realMasterPort := replic["master_port"] + job.errChan <- job.checkReplicationDetail(storagePair, realMasterIP, realMasterPort) + }(storagePair, wg) + } + wg.Wait() + close(job.errChan) + + var err error + for err = range job.errChan { + if err != nil { + job.runtime.Logger.Error("got err :%+v", err) + } + } + return err +} + +// checkReplicationDetail 检查redis运行状态上真实的主从关系 +func (job *RedisSwitch) checkReplicationDetail( + storagePair InstanceSwitchParam, realIP, realPort string) error { + + if job.params.SyncCondition.InstanceSyncType == "mms" { + // check slave's master + if storagePair.MasterInfo.IP != realIP && strconv.Itoa(storagePair.MasterInfo.Port) != realPort { + return fmt.Errorf("err switch type [%s] new master's [%s:%d] real master [%s:%s] not eq inputed [%s:%d]", + job.params.SyncCondition.InstanceSyncType, storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, + realIP, realPort, storagePair.MasterInfo.IP, storagePair.MasterInfo.Port) + } + // check master && slave version compactiable. + job.runtime.Logger.Info("[%s:%d] storage really had running confied master %s:%s in [mms] mode !", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, realIP, realPort) + } else if job.params.SyncCondition.InstanceSyncType == "msms" { + oldSlaveConn, err := myredis.NewRedisClientWithTimeout(fmt.Sprintf("%s:%s", realIP, realPort), + job.params.ClusterMeta.StoragePassword, 1, job.params.ClusterMeta.ClusterType, time.Second) + if err != nil { + return fmt.Errorf("[%s:%d] conn addr:%s:%s,err:%+v", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, realIP, realPort, err) + } + defer oldSlaveConn.Close() + slaveReplic, err := oldSlaveConn.Info("replication") + if err != nil { + return fmt.Errorf("[%s:%d] conn new master's master failed %s:%s,err:%+v", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, realIP, realPort, err) + } + job.runtime.Logger.Info("[%s:%d] cluster old slave: %s:%s replication info msms:%+v", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, realIP, realPort, slaveReplic) + + masterMasterHost := slaveReplic["master_host"] + masterMasterPort := slaveReplic["master_port"] + masterLastIOSeconds, _ := strconv.Atoi(slaveReplic["master_last_io_seconds_ago"]) + + if slaveReplic["role"] != "slave" || slaveReplic["master_link_status"] != "up" { + job.runtime.Logger.Error("[%s:%d] cluster old slave: %s:%s role:%s(SLAVE), master_link_status:%s(UP)", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, + realIP, realPort, slaveReplic["role"], slaveReplic["master_link_status"]) + return fmt.Errorf("addr:%s:%s,new master bad role or link status", realIP, realPort) + } + job.runtime.Logger.Info("[%s:%d] cluster old slave: %s:%s role:%s, master_link_status:%s succ !", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, + realIP, realPort, slaveReplic["role"], slaveReplic["master_link_status"]) + + if masterMasterHost != storagePair.MasterInfo.IP || strconv.Itoa(storagePair.MasterInfo.Port) != masterMasterPort { + return fmt.Errorf("addr:%s:%s,master bad run time with inputed old master :%s:%d", + realIP, realPort, storagePair.MasterInfo.IP, storagePair.MasterInfo.Port) + } + job.runtime.Logger.Info("[%s:%d] cluster old slave: %s:%s really had confied master %s:%s succ !", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, realIP, realPort, masterMasterHost, masterMasterPort) + + if masterLastIOSeconds > job.params.SyncCondition.MaxSlaveLastIOSecondsAgo { + return fmt.Errorf("err old slave's (%s:%s)[%s:%d] master_last_io_seconds_ago %d > %d ", + realIP, realPort, storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, + masterLastIOSeconds, job.params.SyncCondition.MaxSlaveLastIOSecondsAgo) + } + job.runtime.Logger.Info("[%s:%d] cluster old slave: %s:%s master_last_io_seconds_ago:%d<(%d) succ !", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, realIP, realPort, + masterLastIOSeconds, job.params.SyncCondition.MaxSlaveLastIOSecondsAgo) + } else { + return fmt.Errorf("[%s:%d] err unkown switch type : %s", + storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port, job.params.SyncCondition.InstanceSyncType) + } + return nil +} + +// checkReplicationSync # here we just check the master heartbeat: +func (job *RedisSwitch) checkReplicationSync(newMasterConn *myredis.RedisClient, + storagePair InstanceSwitchParam, replic map[string]string) error { + var err error + var masterTime, masterDbsize, slaveTime int64 + oldMasterAddr := fmt.Sprintf("%s:%d", storagePair.MasterInfo.IP, storagePair.MasterInfo.Port) + newMasterAddr := fmt.Sprintf("%s:%d", storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port) + + rst := newMasterConn.InstanceClient.Get(context.TODO(), fmt.Sprintf("%s:time", oldMasterAddr)) + if rst.Err() != nil { + return fmt.Errorf("[%s]new master node, exec cmd err:%+v", newMasterAddr, err) + } + if masterTime, err = rst.Int64(); err != nil { + return fmt.Errorf("[%s]new master node, time2Int64 err:%+v", newMasterAddr, err) + } + + if rst = newMasterConn.InstanceClient.Get(context.TODO(), + fmt.Sprintf("%s:0:dbsize", oldMasterAddr)); rst.Err() != nil { + return fmt.Errorf("[%s]new master node, exec cmd err:%+v", newMasterAddr, err) + } + if masterDbsize, err = rst.Int64(); err != nil { + job.runtime.Logger.Warn("[%s]new master node, get db0,dbsize2Int64 err:%+v", newMasterAddr, err) + } + + slaveTime = time.Now().Unix() // here gcs.perl use redis-cli time + lastIOseconds, _ := strconv.Atoi(replic["master_last_io_seconds_ago"]) + + slaveMasterDiffTime := math.Abs(float64(slaveTime) - float64(masterTime)) + if slaveMasterDiffTime > float64(job.params.SyncCondition.MaxSlaveMasterDiffTime) { + if job.params.SyncCondition.IsCheckSync { + return fmt.Errorf("err master slave sync too long %s => %s diff: %.0f(%d)", + oldMasterAddr, newMasterAddr, slaveMasterDiffTime, job.params.SyncCondition.MaxSlaveMasterDiffTime) + } + job.runtime.Logger.Warn("master slave sync too long %s => %s diff: %.0f(%d)", + oldMasterAddr, newMasterAddr, slaveMasterDiffTime, job.params.SyncCondition.MaxSlaveMasterDiffTime) + } + if lastIOseconds > job.params.SyncCondition.MaxSlaveLastIOSecondsAgo { + if job.params.SyncCondition.IsCheckSync { + return fmt.Errorf("err slave's (%s) master_last_io_seconds_ago %d > %d ", + newMasterAddr, lastIOseconds, job.params.SyncCondition.MaxSlaveLastIOSecondsAgo) + } + job.runtime.Logger.Warn("slave's (%s) master_last_io_seconds_ago %d > %d ", + newMasterAddr, lastIOseconds, job.params.SyncCondition.MaxSlaveLastIOSecondsAgo) + } + + job.runtime.Logger.Info( + "[%s]new master node, master on slave time:%d, diff:%.0f dbsize:%d; slave time:%d, master_last_io_seconds_ago:%d", + newMasterAddr, masterTime, slaveMasterDiffTime, masterDbsize, slaveTime, lastIOseconds) + return nil +} + +// precheckStorageLogin make sure all todo switch redis can login +func (job *RedisSwitch) precheckStorageLogin() error { + wg := &sync.WaitGroup{} + job.errChan = make(chan error, len(job.params.SwitchRelation)) + for _, storagePair := range job.params.SwitchRelation { + wg.Add(1) + go func(storagePair InstanceSwitchParam, clusterType string, wg *sync.WaitGroup) { + defer wg.Done() + addr := fmt.Sprintf("%s:%d", storagePair.MasterInfo.IP, storagePair.MasterInfo.Port) + if err := job.precheckLogin(addr, job.params.ClusterMeta.StoragePassword, clusterType); err != nil { + // job.errChan <- fmt.Errorf("addr:%s,err:%+v", addr, err) + job.runtime.Logger.Warn("old master login failed :%s:%+v", addr, err) + } + if err := job.precheckLogin(fmt.Sprintf("%s:%d", storagePair.SlaveInfo.IP, storagePair.SlaveInfo.Port), + job.params.ClusterMeta.StoragePassword, clusterType); err != nil { + job.errChan <- fmt.Errorf("addr:%s,err:%+v", addr, err) + } + }(storagePair, job.params.ClusterMeta.ClusterType, wg) + } + wg.Wait() + close(job.errChan) + + var err error + for err = range job.errChan { + if err != nil { + job.runtime.Logger.Error("got err :%+v", err) + } + } + return err +} + +// precheckProxyLogin proxy 链接性检查 +func (job *RedisSwitch) precheckProxyLogin() error { + wg := &sync.WaitGroup{} + job.errChan = make(chan error, len(job.params.ClusterMeta.ProxySet)) + for _, proxy := range job.params.ClusterMeta.ProxySet { + wg.Add(1) + go func(proxy string, clusterType string, wg *sync.WaitGroup) { + defer wg.Done() + if err := job.precheckLogin(proxy, job.params.ClusterMeta.ProxyPassword, clusterType); err != nil { + job.errChan <- fmt.Errorf("addr:%s,err:%+v", proxy, err) + } + }(proxy, job.params.ClusterMeta.ClusterType, wg) + } + wg.Wait() + close(job.errChan) + + var err error + for err = range job.errChan { + if err != nil { + job.runtime.Logger.Error("precheck for [proxy login] got err :%+v", err) + } + } + return err + +} + +// precheckLogin 检查 proxy/redis 可以登录 +func (job *RedisSwitch) precheckLogin(addr, pass, clusterType string) error { + rconn, err := myredis.NewRedisClientWithTimeout(addr, pass, 0, clusterType, time.Second) + if err != nil { + return fmt.Errorf("conn redis %s failed:%+v", addr, err) + } + defer rconn.Close() + + if _, err := rconn.DoCommand([]string{"TYPe", "key|for|dba|login|test"}, 0); err != nil { + return fmt.Errorf("do cmd failed %s:%+v", addr, err) + } + return nil +} + +// Name 原子任务名 +func (job *RedisSwitch) Name() string { + return "redis_switch" +} + +// Retry times +func (job *RedisSwitch) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisSwitch) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/tendisssd_dr_restore.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/tendisssd_dr_restore.go new file mode 100644 index 0000000000..8baf4f354e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/tendisssd_dr_restore.go @@ -0,0 +1,598 @@ +package atomredis + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" +) + +// TendisssdDrRestoreParams tendisSSD建立dr关系参数 +type TendisssdDrRestoreParams struct { + // 备份信息 + BackupTasks []BackupTask `json:"backup_tasks" validate:"required"` + // master信息 + MasterIP string `json:"master_ip" validate:"required"` + MasterStartPort int `json:"master_start_port"` + MasterInstNum int `json:"master_inst_num"` + MasterPorts []int `json:"master_ports"` + MasterAuth string `json:"master_auth" validate:"required"` + // slave信息 + SlaveIP string `json:"slave_ip" validate:"required"` + SlaveStartPort int `json:"slave_start_port"` + SlaveInstNum int `json:"slave_inst_num"` + SlavePorts []int `json:"slave_ports"` + SlavePassword string `json:"slave_password" validate:"required"` + // 全备所在目录 + BackupDir string `json:"backup_dir"` +} + +// TendisssdDrRestore tendisssd dr restore atomjob +type TendisssdDrRestore struct { + runtime *jobruntime.JobGenericRuntime + params TendisssdDrRestoreParams +} + +// 无实际作用,仅确保实现了 jobruntime.JobRunner 接口 +var _ jobruntime.JobRunner = (*TendisssdDrRestore)(nil) + +// NewTendisssdDrRestore new +func NewTendisssdDrRestore() jobruntime.JobRunner { + return &TendisssdDrRestore{} +} + +// Init 初始化 +func (job *TendisssdDrRestore) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("TendisssdDrRestore Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("TendisssdDrRestore Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + // MasterPorts 和 MasterInstNum 不能同时为空 + if len(job.params.MasterPorts) == 0 && job.params.MasterInstNum == 0 { + err = fmt.Errorf("TendisssdDrRestore MasterPorts(%+v) and MasterInstNum(%d) is invalid", job.params.MasterPorts, + job.params.MasterInstNum) + job.runtime.Logger.Error(err.Error()) + return err + } + if job.params.MasterInstNum > 0 { + ports := make([]int, 0, job.params.MasterInstNum) + for idx := 0; idx < job.params.MasterInstNum; idx++ { + ports = append(ports, job.params.MasterStartPort+idx) + } + job.params.MasterPorts = ports + } else { + // 保持元素顺序,做一些去重 + job.params.MasterPorts = common.UniqueSlice(job.params.MasterPorts) + } + // SlavePorts 和 SlaveInstNum 不能同时为空 + if len(job.params.SlavePorts) == 0 && job.params.SlaveInstNum == 0 { + err = fmt.Errorf("TendisssdDrRestore SlavePorts(%+v) and SlaveInstNum(%d) is invalid", job.params.SlavePorts, + job.params.SlaveInstNum) + job.runtime.Logger.Error(err.Error()) + return err + } + if job.params.SlaveInstNum > 0 { + ports := make([]int, 0, job.params.SlaveInstNum) + for idx := 0; idx < job.params.SlaveInstNum; idx++ { + ports = append(ports, job.params.SlaveStartPort+idx) + } + job.params.SlavePorts = ports + } else { + // 保持元素顺序,做一些去重 + job.params.SlavePorts = common.UniqueSlice(job.params.SlavePorts) + } + return nil +} + +// Name 原子任务名 +func (job *TendisssdDrRestore) Name() string { + return "tendisssd_dr_restore" +} + +// Run 执行 +func (job *TendisssdDrRestore) Run() error { + var err error + backupMap := make(map[string]BackupTask, len(job.params.BackupTasks)) + for _, task01 := range job.params.BackupTasks { + backupMap[task01.Addr()] = task01 + } + restoreTasks := make([]*TendisssdDrRestoreTask, 0, len(job.params.SlavePorts)) + for idx, slavePort := range job.params.SlavePorts { + masterAddr := job.params.MasterIP + ":" + strconv.Itoa(job.params.MasterPorts[idx]) + backTask, ok := backupMap[masterAddr] + if !ok { + err = fmt.Errorf("master(%s) not found backupFile in backup_tasks[%+v]", masterAddr, job.params.BackupTasks) + job.runtime.Logger.Error(err.Error()) + return err + } + restoreItem := NewSSDDrRestoreTask(backTask, + job.params.MasterIP, job.params.MasterPorts[idx], job.params.MasterAuth, + job.params.SlaveIP, slavePort, job.params.SlavePassword, + job.params.BackupDir, job.runtime) + restoreTasks = append(restoreTasks, restoreItem) + } + util.StopBkDbmon() + defer util.StartBkDbmon() + + wg := sync.WaitGroup{} + genChan := make(chan *TendisssdDrRestoreTask) + limit := 3 // 并发度3 + for worker := 0; worker < limit; worker++ { + wg.Add(1) + go func() { + defer wg.Done() + for taskItem := range genChan { + taskItem.Run() + } + }() + } + go func() { + // 关闭genChan,以便让所有goroutine退出 + defer close(genChan) + for _, task := range restoreTasks { + restoreItem := task + genChan <- restoreItem + } + }() + wg.Wait() + for _, task := range restoreTasks { + restoreItem := task + if restoreItem.Err != nil { + return restoreItem.Err + } + } + return nil +} + +// Retry times +func (job *TendisssdDrRestore) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *TendisssdDrRestore) Rollback() error { + return nil +} + +// TendisssdDrRestoreTask tendis-ssd dr恢复task +type TendisssdDrRestoreTask struct { + ReplicaItem + BakTask BackupTask `json:"bak_task"` + TaskDir string `json:"task_dir"` + MasterCli *myredis.RedisClient `json:"-"` + SlaveCli *myredis.RedisClient `json:"-"` + MasterVersion string `json:"master_version"` + DbType string `json:"db_type"` + RestoreTool string `json:"restore_tool"` + DepsDir string `json:"deps_dir"` + LocalFullBackupDir string `json:"local_full_backup_dir"` + SlaveDataDir string `json:"slave_data_dir"` + runtime *jobruntime.JobGenericRuntime + Err error `json:"-"` +} + +// NewSSDDrRestoreTask new tendis-ssd dr restore task +func NewSSDDrRestoreTask(bakTask BackupTask, + masterIP string, masterPort int, masterAuth string, + slaveIP string, slavePort int, slavePassword, taskDir string, + runtime *jobruntime.JobGenericRuntime) *TendisssdDrRestoreTask { + return &TendisssdDrRestoreTask{ + ReplicaItem: ReplicaItem{ + MasterIP: masterIP, + MasterPort: masterPort, + MasterAuth: masterAuth, + SlaveIP: slaveIP, + SlavePort: slavePort, + SlavePassword: slavePassword, + }, + BakTask: bakTask, + TaskDir: taskDir, + DepsDir: "/usr/local/redis/bin/deps", + runtime: runtime, + } +} + +// Run 执行恢复任务 +func (task *TendisssdDrRestoreTask) Run() { + var ok bool + var masterType, slaveType string + task.newConnect() + if task.Err != nil { + return + } + // master 和 slave都必须是 tendisSSD类型 + masterType, task.Err = task.MasterCli.GetTendisType() + if task.Err != nil { + return + } + slaveType, task.Err = task.MasterCli.GetTendisType() + if task.Err != nil { + return + } + if masterType != consts.TendisTypeTendisSSDInsance || + slaveType != consts.TendisTypeTendisSSDInsance { + task.Err = fmt.Errorf("master(%s) dbType:%s,slave(%s) dbType:%s,dbType must be %s", + task.MasterAddr(), masterType, task.SlaveAddr(), slaveType, consts.TendisTypeTendisSSDInsance) + task.runtime.Logger.Error(task.Err.Error()) + return + } + + ok, _ = task.SlaveCli.IsTendisSSDReplicaStatusOk(task.MasterIP, strconv.Itoa(task.MasterPort)) + if ok { + // 如果主从关系已经ok,则避免重复执行 + task.runtime.Logger.Info("tendisSSD slave(%s) master(%s) link_status:up", task.SlaveAddr(), task.MasterAddr()) + return + } + + task.Precheck() + if task.Err != nil { + return + } + + defer task.Clean() + + task.UnTarFullBackup() + if task.Err != nil { + return + } + task.RestoreLocalSlave() + if task.Err != nil { + return + } + task.TendisSSDSetLougCount() +} +func (task *TendisssdDrRestoreTask) newConnect() { + task.runtime.Logger.Info("start connect master(%s)", task.MasterAddr()) + task.MasterCli, task.Err = myredis.NewRedisClient(task.MasterAddr(), task.MasterAuth, 0, + consts.TendisTypeRedisInstance) + if task.Err != nil { + return + } + task.runtime.Logger.Info("start connect slave(%s)", task.SlaveAddr()) + task.SlaveCli, task.Err = myredis.NewRedisClient(task.SlaveAddr(), task.SlavePassword, 0, + consts.TendisTypeRedisInstance) + if task.Err != nil { + return + } + var infoRet map[string]string + infoRet, task.Err = task.MasterCli.Info("server") + if task.Err != nil { + return + } + task.MasterVersion = infoRet["redis_version"] + task.DbType, task.Err = task.MasterCli.GetTendisType() + if task.Err != nil { + return + } + if !consts.IsTendisSSDInstanceDbType(task.DbType) { + task.Err = fmt.Errorf("redisMaster(%s) dbtype:%s not a tendis-ssd instance", task.MasterAddr(), task.DbType) + task.runtime.Logger.Error(task.Err.Error()) + return + } + task.SlaveDataDir, task.Err = task.SlaveCli.GetDir() + if task.Err != nil { + return + } +} +func (task *TendisssdDrRestoreTask) getRestoreTool() { + if strings.Contains(task.MasterVersion, "v1.2.") { + task.RestoreTool = "/usr/local/redis/bin/rr_restore_backup" + } else if strings.Contains(task.MasterVersion, "v1.3.") { + task.RestoreTool = "/usr/local/redis/bin/tredisrestore" + } else { + task.Err = fmt.Errorf("redisMaster(%s) version:%s cannot find restore-tool", task.MasterAddr(), task.MasterVersion) + task.runtime.Logger.Error(task.Err.Error()) + return + } + if !util.FileExists(task.RestoreTool) { + task.Err = fmt.Errorf("redis(%s) restore_tool:%s not exists", task.SlaveIP, task.RestoreTool) + task.runtime.Logger.Error(task.Err.Error()) + return + } +} + +func (task *TendisssdDrRestoreTask) isSlaveInUsing() { + var tmpKey string + tmpKey, task.Err = task.SlaveCli.Randomkey() + if task.Err != nil { + return + } + if tmpKey != "" { + task.Err = fmt.Errorf("redis(%s) RandomKey result=>%s, instance is using? cannot shutdown", task.SlaveAddr(), tmpKey) + task.runtime.Logger.Error(task.Err.Error()) + return + } +} + +// Precheck 前置检查 +func (task *TendisssdDrRestoreTask) Precheck() { + if !util.FileExists(task.DepsDir) { + task.Err = fmt.Errorf("redis(%s) deps:%s not exists", task.SlaveIP, task.DepsDir) + task.runtime.Logger.Error(task.Err.Error()) + return + } + if !util.FileExists("/usr/local/redis") { + task.Err = fmt.Errorf("redis(%s) /usr/local/redis not exists", task.SlaveIP) + task.runtime.Logger.Error(task.Err.Error()) + return + } + task.isSlaveInUsing() + if task.Err != nil { + return + } + task.getRestoreTool() + if task.Err != nil { + return + } + return +} + +// UnTarFullBackup 解压全备并检查 +func (task *TendisssdDrRestoreTask) UnTarFullBackup() { + if len(task.BakTask.BackupFiles) != 1 { + task.Err = fmt.Errorf("master(%s) has %d backupFiles?? [%+v]", task.MasterAddr(), len(task.BakTask.BackupFiles), + task.BakTask.BackupFiles) + task.runtime.Logger.Error(task.Err.Error()) + return + } + var localTarFile string + var ret string + localTarFile, task.Err = util.UnionSplitFiles(task.TaskDir, task.BakTask.BackupFiles) + if task.Err != nil { + task.runtime.Logger.Error(task.Err.Error()) + return + } + task.LocalFullBackupDir = strings.TrimSuffix(localTarFile, ".tar") + if !util.FileExists(task.LocalFullBackupDir) { + unTarCmd := fmt.Sprintf("tar -xf %s -C %s", localTarFile, task.TaskDir) + task.runtime.Logger.Info(unTarCmd) + _, task.Err = util.RunBashCmd(unTarCmd, "", nil, 24*time.Hour) + if task.Err != nil { + return + } + } + util.LocalDirChownMysql(task.LocalFullBackupDir) + versionFile := filepath.Join(task.LocalFullBackupDir, "meta/2") + if util.FileExists(versionFile) { + task.Err = fmt.Errorf("error: backup version bigger than 1, please check the backup,exit. file exists[%s]", + versionFile) + task.runtime.Logger.Error(task.Err.Error()) + return + } + duCmd := fmt.Sprintf("du -s %s|awk '{print $1}'", task.LocalFullBackupDir) + ret, task.Err = util.RunBashCmd(duCmd, "", nil, 1*time.Hour) + if task.Err != nil { + return + } + backupSize, _ := strconv.ParseUint(ret, 10, 64) + if backupSize < 10000 { + task.runtime.Logger.Info(fmt.Sprintf("master(%s) backupDir:%s dataSize:%d bytes, too small ?", task.MasterAddr(), + task.LocalFullBackupDir, backupSize)) + } else { + task.runtime.Logger.Info(fmt.Sprintf("master(%s) backupDir:%s dataSize:%dM", task.MasterAddr(), + task.LocalFullBackupDir, backupSize/1024)) + } + util.LocalDirChownMysql(task.LocalFullBackupDir) + task.runtime.Logger.Info("UnTarFullBakcup success") +} + +// Clean 最后清理 +func (task *TendisssdDrRestoreTask) Clean() { + if task.SlaveCli != nil { + task.SlaveCli.Close() + task.SlaveCli = nil + } + if task.MasterCli != nil { + task.MasterCli.Close() + task.MasterCli = nil + } + if task.LocalFullBackupDir == "" { + return + } + if task.Err != nil { + return + } + localTarFile := task.LocalFullBackupDir + ".tar" + tarDir := filepath.Dir(localTarFile) + if strings.Contains(localTarFile, task.MasterIP) && util.FileExists(localTarFile) { + rmCmd := fmt.Sprintf("cd %s && rm -rf %s", tarDir, filepath.Base(localTarFile)) + util.RunBashCmd(rmCmd, "", nil, 1*time.Hour) + task.runtime.Logger.Info(rmCmd) + } + if strings.Contains(task.LocalFullBackupDir, task.MasterIP) && util.FileExists(task.LocalFullBackupDir) { + rmCmd := fmt.Sprintf("cd %s && rm -rf %s", tarDir, filepath.Base(task.LocalFullBackupDir)) + util.RunBashCmd(rmCmd, "", nil, 1*time.Hour) + task.runtime.Logger.Info(rmCmd) + } +} + +// RestoreLocalSlave 恢复本地slave +// 1. 关闭slave 并 mv 本地rocksdb目录 +// 2. 利用备份恢复数据,拉起slave; +func (task *TendisssdDrRestoreTask) RestoreLocalSlave() { + task.Err = task.SlaveCli.Shutdown() + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) shutdown success", task.SlaveAddr()) + + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + rockdbDir := filepath.Join(task.SlaveDataDir, "rocksdb") + bakDir := filepath.Join(task.SlaveDataDir, "backup_rocksdb."+nowtime) + var ret, slaveConfFile, msg string + var infoRet map[string]string + var slaveBinlogRange, masterBinlogRange myredis.TendisSSDBinlogSize + + mvCmd := fmt.Sprintf("mv %s %s", rockdbDir, bakDir) + task.runtime.Logger.Info(mvCmd) + util.RunBashCmd(mvCmd, "", nil, 2*time.Hour) + util.LocalDirChownMysql(bakDir) + + var extraOpt string + if strings.Contains(task.MasterVersion, "v1.2") { + extraOpt = " 1" + } else if strings.Contains(task.MasterVersion, "v1.3") { + extraOpt = "" + } else { + task.Err = fmt.Errorf("unsupported tendis version:%s,exit.", task.MasterVersion) + task.runtime.Logger.Error(task.Err.Error()) + return + } + restoreCmd := fmt.Sprintf(` +export LD_PRELOAD=%s/libjemalloc.so +export LD_LIBRARY_PATH=LD_LIBRARY_PATH:%s +%s %s %s %s +`, task.DepsDir, task.DepsDir, task.RestoreTool, task.LocalFullBackupDir, rockdbDir, extraOpt) + task.runtime.Logger.Info(restoreCmd) + ret, task.Err = util.RunBashCmd(restoreCmd, "", nil, 6*time.Hour) + if task.Err != nil { + return + } + task.runtime.Logger.Info("restore command result:" + ret) + if util.FileExists(rockdbDir) { + task.runtime.Logger.Info("restore ok, %s generated", rockdbDir) + } else { + task.Err = fmt.Errorf("restore command failed, %s not generated", rockdbDir) + task.runtime.Logger.Error(task.Err.Error()) + return + } + util.LocalDirChownMysql(rockdbDir) + slaveConfFile, task.Err = myredis.GetRedisLoccalConfFile(task.SlavePort) + if task.Err != nil { + return + } + // 先注释掉slaveof命令,拉起后不要立刻同步master + sedCmd := fmt.Sprintf("sed -i -e 's/^slaveof/#slaveof/g' %s", slaveConfFile) + task.runtime.Logger.Info(sedCmd) + util.RunBashCmd(sedCmd, "", nil, 1*time.Minute) + + startScript := filepath.Join("/usr/local/redis/bin", "start-redis.sh") + _, task.Err = util.RunLocalCmd("su", []string{consts.MysqlAaccount, "-c", startScript + " " + strconv.Itoa( + task.SlavePort)}, "", nil, 10*time.Second) + if task.Err != nil { + return + } + task.runtime.Logger.Info(fmt.Sprintf("su %s -c \"%s\"", consts.MysqlAaccount, + startScript+" "+strconv.Itoa(task.SlavePort))) + time.Sleep(2 * time.Second) + + task.SlaveCli, task.Err = myredis.NewRedisClient(task.SlaveAddr(), task.SlavePassword, 0, + consts.TendisTypeRedisInstance) + if task.Err != nil { + return + } + + // 一些必要设置 + _, task.Err = task.SlaveCli.ConfigSet("masterauth", task.MasterAuth) + if task.Err != nil { + return + } + _, task.Err = task.SlaveCli.ConfigSet("is-master-snapshot", "1") + if task.Err != nil { + return + } + infoRet, task.Err = task.MasterCli.Info("server") + if task.Err != nil { + return + } + masterRunID := infoRet["run_id"] + task.runtime.Logger.Info("slave(%s) confxx set server-runid %s", task.SlaveAddr(), masterRunID) + _, task.Err = task.SlaveCli.ConfigSet("server-runid", masterRunID) + if task.Err != nil { + return + } + + // 检查binlog范围ok + slaveBinlogRange, task.Err = task.SlaveCli.TendisSSDBinlogSize() + if task.Err != nil { + return + } + masterBinlogRange, task.Err = task.MasterCli.TendisSSDBinlogSize() + if task.Err != nil { + return + } + if slaveBinlogRange.FirstSeq < masterBinlogRange.FirstSeq { + task.Err = fmt.Errorf("slave(%s) binlog_first_seq:%d < master(%s) binlog_first_seq:%d", + task.SlaveAddr(), slaveBinlogRange.FirstSeq, task.MasterAddr(), masterBinlogRange.FirstSeq) + task.runtime.Logger.Error(task.Err.Error()) + return + } + if slaveBinlogRange.EndSeq > masterBinlogRange.EndSeq { + task.Err = fmt.Errorf("slave(%s) binlog_end_seq:%d > master(%s) binlog_end_seq:%d", + task.SlaveAddr(), slaveBinlogRange.EndSeq, task.MasterAddr(), masterBinlogRange.EndSeq) + task.runtime.Logger.Error(task.Err.Error()) + return + } + msg = fmt.Sprintf("master(%s) binlog_range:%s,slave(%s) binlog_range:%s,is ok", + task.MasterAddr(), masterBinlogRange.String(), task.SlaveAddr(), slaveBinlogRange.String()) + task.runtime.Logger.Info(msg) + + // slaveof + _, task.Err = task.SlaveCli.SlaveOf(task.MasterIP, strconv.Itoa(task.MasterPort)) + if task.Err != nil { + return + } + task.runtime.Logger.Info("slave(%s) 'slaveof %s %d'", task.SlaveAddr(), task.MasterIP, task.MasterPort) + + // slave 'confxx set disk-delete-count 50' + _, task.Err = task.SlaveCli.ConfigSet("disk-delete-count", "50") + if task.Err != nil { + return + } + + // 最多等待10分钟 + maxRetryTimes := 120 + var i int = 0 + for { + i++ + if i >= maxRetryTimes { + break + } + task.Err = nil + _, task.Err = task.SlaveCli.IsTendisSSDReplicaStatusOk(task.MasterIP, strconv.Itoa(task.MasterPort)) + if task.Err != nil { + task.runtime.Logger.Info(task.Err.Error() + ",sleep 5 seconds and retry...") + time.Sleep(5 * time.Second) + continue + } + break + } + if task.Err != nil { + return + } + task.runtime.Logger.Info("tendisSSD slave(%s) master(%s) create replicate link success", task.SlaveAddr(), + task.MasterAddr()) +} + +// TendisSSDSetLougCount tendisSSD恢复log-count参数 +func (task *TendisssdDrRestoreTask) TendisSSDSetLougCount() { + task.MasterCli.ConfigSet("log-count", "200000") + task.MasterCli.ConfigSet("slave-log-keep-count", "0") +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/atomsys.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/atomsys.go new file mode 100644 index 0000000000..870be9321f --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/atomsys.go @@ -0,0 +1,2 @@ +// Package atomsys 系统相关原子任务 +package atomsys diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/os_mongo_init.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/os_mongo_init.go new file mode 100644 index 0000000000..99a57e47fc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/os_mongo_init.go @@ -0,0 +1,122 @@ +package atomsys + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/ioutil" + "strings" + "time" + + "github.com/go-playground/validator/v10" +) + +// OsMongoInitConfParams 系统初始化参数 +type OsMongoInitConfParams struct { + User string `json:"user" validate:"required"` + Password string `json:"password" validate:"required"` +} + +// OsMongoInit 系统初始化原子任务 +type OsMongoInit struct { + runtime *jobruntime.JobGenericRuntime + ConfParams *OsMongoInitConfParams + OsUser string + OsGroup string +} + +// NewOsMongoInit new +func NewOsMongoInit() jobruntime.JobRunner { + return &OsMongoInit{} +} + +// Init 初始化 +func (o *OsMongoInit) Init(runtime *jobruntime.JobGenericRuntime) error { + // 获取安装参数 + o.runtime = runtime + o.runtime.Logger.Info("start to init") + o.OsUser = consts.GetProcessUser() + o.OsGroup = consts.GetProcessUserGroup() + // 获取MongoDB配置文件参数 + if err := json.Unmarshal([]byte(o.runtime.PayloadDecoded), &o.ConfParams); err != nil { + o.runtime.Logger.Error( + "get parameters of mongoOsInit fail by json.Unmarshal, error:%s", err) + return fmt.Errorf("get parameters of mongoOsInit fail by json.Unmarshal, error:%s", err) + } + o.runtime.Logger.Info("init successfully") + + // 进行校验 + if err := o.checkParams(); err != nil { + return err + } + + return nil +} + +// checkParams 校验参数 +func (o *OsMongoInit) checkParams() error { + // 校验配置参数 + o.runtime.Logger.Info("start to validate parameters") + validate := validator.New() + o.runtime.Logger.Info("start to validate parameters of deInstall") + if err := validate.Struct(o.ConfParams); err != nil { + o.runtime.Logger.Error("validate parameters of mongoOsInit fail, error:%s", err) + return fmt.Errorf("validate parameters of mongoOsInit fail, error:%s", err) + } + o.runtime.Logger.Info("validate parameters successfully") + return nil +} + +// Name 名字 +func (o *OsMongoInit) Name() string { + return "os_mongo_init" +} + +// Run 执行函数 +func (o *OsMongoInit) Run() error { + // 获取初始化脚本 + o.runtime.Logger.Info("start to make init script content") + data := common.MongoShellInit + data = strings.Replace(data, "{{user}}", o.OsUser, -1) + data = strings.Replace(data, "{{group}}", o.OsGroup, -1) + o.runtime.Logger.Info("make init script content successfully") + + // 创建脚本文件 + o.runtime.Logger.Info("start to create init script file") + tmpScriptName := "/tmp/sysinit.sh" + if err := ioutil.WriteFile(tmpScriptName, []byte(data), 07555); err != nil { + o.runtime.Logger.Error("write tmp script failed %s", err.Error()) + return err + } + o.runtime.Logger.Info("create init script file successfully") + + // 执行脚本 + o.runtime.Logger.Info("start to execute init script") + _, err := util.RunBashCmd(tmpScriptName, "", nil, 30*time.Second) + if err != nil { + o.runtime.Logger.Error("execute init script fail, error:%s", err) + return fmt.Errorf("execute init script fail, error:%s", err) + } + o.runtime.Logger.Info("execute init script successfully") + // 设置用户名密码 + o.runtime.Logger.Info("start to set user:%s password", o.OsUser) + err = util.SetOSUserPassword(o.ConfParams.User, o.ConfParams.Password) + o.runtime.Logger.Info("set user:%s password successfully", o.OsUser) + if err != nil { + return err + } + return nil +} + +// Retry times +func (o *OsMongoInit) Retry() uint { + return 2 +} + +// Rollback rollback +func (o *OsMongoInit) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/redis_capturer.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/redis_capturer.go new file mode 100644 index 0000000000..8b1361e134 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/redis_capturer.go @@ -0,0 +1,209 @@ +package atomsys + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/common" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-playground/validator/v10" +) + +// GetRequestParams get request参数 +type GetRequestParams struct { + DbToolsPkg common.DbToolsMediaPkg `json:"dbtoolspkg"` + IP string `json:"ip" validate:"required"` + Ports []int `json:"ports" validate:"required"` + MonitorTimeMs int `json:"monitor_time_ms" validate:"required" ` + IgnoreKeys []string `json:"ignore_keys"` + Ignore bool `json:"ignore"` // 是否忽略错误 +} + +// RedisCapturer get request 结构体 +type RedisCapturer struct { + runtime *jobruntime.JobGenericRuntime + params *GetRequestParams + Device string + monitorTool string + errChan chan error +} + +// NewRedisCapturer 创建一个get request对象 +func NewRedisCapturer() jobruntime.JobRunner { + return &RedisCapturer{} +} + +// Init 初始化 +func (job *RedisCapturer) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("RedisCapturer Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("RedisCapturer Init params validate failed,err:%v,params:%+v", + err, job.params) + return err + } + } + // 6379<= start_port <= 55535 + ports := job.params.Ports + for _, p := range ports { + if p > 55535 || p < 6379 { + err = fmt.Errorf("RedisCapturer port[%d] must range [6379,5535]", p) + job.runtime.Logger.Error(err.Error()) + return err + } + + } + job.errChan = make(chan error, len(ports)) + job.monitorTool = consts.MyRedisCaptureBin + job.Device, err = util.GetIpv4InterfaceName(job.params.IP) + if err != nil { + return err + } + + err = job.params.DbToolsPkg.Install() + if err != nil { + return err + } + return nil +} + +// Run 运行监听请求任务 +func (job *RedisCapturer) Run() (err error) { + ports := job.params.Ports + + _, err = os.Stat(job.monitorTool) + if err != nil && os.IsNotExist(err) { + return fmt.Errorf("获取redis-capturer失败,请检查是否下发成功:err:%v", err) + } + + wg := sync.WaitGroup{} + for _, port := range ports { + wg.Add(1) + go func(port int) { + defer wg.Done() + job.Monitor(port) + }(port) + } + wg.Wait() + close(job.errChan) + + errMsg := "" + for err := range job.errChan { + errMsg = fmt.Sprintf("%s\n%s", errMsg, err.Error()) + } + if errMsg != "" { + // 如果忽略错误,则这里只报warning + if job.params.Ignore { + job.runtime.Logger.Warn(errMsg) + return nil + } + return fmt.Errorf(errMsg) + } + + return nil +} + +// Monitor 监听请求 +func (job *RedisCapturer) Monitor(port int) { + job.runtime.Logger.Info("monitor port[%d] begin..", port) + defer job.runtime.Logger.Info("monitor port[%d] end..", port) + var err error + running, err := job.IsRedisRunning(port) + if err != nil || !running { + job.errChan <- fmt.Errorf("port[%d] is not running", port) + return + } + + nowstr := time.Now().Local().Format("150405") + capturelog := fmt.Sprintf("capture_%s_%d_%s.log", job.params.IP, port, nowstr) + monitorCmd := fmt.Sprintf("%s --device=%s --ip=%s --port=%d --timeout=%d --log-file=%s", job.monitorTool, + job.Device, job.params.IP, port, job.params.MonitorTimeMs/1000, capturelog) + if len(job.params.IgnoreKeys) != 0 { + ignoreStr := strings.Join(job.params.IgnoreKeys, "|") + monitorCmd = fmt.Sprintf("%s | grep -i -v -E '%s' || true", monitorCmd, ignoreStr) + } + job.runtime.Logger.Info("monitor cmd is [%s]", monitorCmd) + // password, err := myredis.GetPasswordFromLocalConfFile(port) + // if err != nil { + // job.errChan <- err + // return + // } + // monitorCmd := fmt.Sprintf("timeout %d %s --no-auth-warning -a %s -h %s -p %d monitor", + // job.params.MonitorTimeMs/1000, consts.RedisCliBin, password, job.params.IP, port) + // logCmd := fmt.Sprintf("timeout %d %s --no-auth-warning -a %s -h %s -p %d monitor", + // job.params.MonitorTimeMs/1000, consts.RedisCliBin, "xxxxxx", job.params.IP, port) + // if len(job.params.IgnoreKeys) != 0 { + // ignoreStr := strings.Join(job.params.IgnoreKeys, "|") + // monitorCmd = fmt.Sprintf("%s | grep -i -v -E '%s' ", monitorCmd, ignoreStr) + // logCmd = fmt.Sprintf("%s | grep -i -v -E '%s' ", logCmd, ignoreStr) + // } + // monitorCmd += " || true" + // logCmd += " || true" + // job.runtime.Logger.Info("monitor cmd is [%s]", logCmd) + + cmdRet, err := util.RunLocalCmd("bash", []string{"-c", monitorCmd}, "", nil, 10*time.Minute) + if err != nil { + if err.Error() == "RunLocalCmd cmd wait fail,err:exit status 1" { + return + } + job.errChan <- err + return + } + if cmdRet != "" { + // 只取前30条 + cmdText := "" + num := 0 + for _, cmdLine := range strings.Split(cmdRet, "\n") { + cmdText = fmt.Sprintf("%s\n%s", cmdText, cmdLine) + num++ + if num >= 30 { + break + } + } + err = fmt.Errorf("check request failed. because have qps: %s", cmdText) + job.errChan <- err + return + } +} + +// IsRedisRunning 检查实例是否在运行。 下架流程中,实例没有运行到底算不算异常呢? +func (job *RedisCapturer) IsRedisRunning(port int) (installed bool, err error) { + time.Sleep(10 * time.Second) + portIsUse, err := util.CheckPortIsInUse(job.params.IP, strconv.Itoa(port)) + return portIsUse, err +} + +// Name 原子任务名 +func (job *RedisCapturer) Name() string { + return "redis_capturer" +} + +// Retry times +func (job *RedisCapturer) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *RedisCapturer) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit.go new file mode 100644 index 0000000000..84cef35af2 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit.go @@ -0,0 +1,101 @@ +package atomsys + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "embed" + "encoding/json" + "fmt" + "io/ioutil" + "time" + + "github.com/go-playground/validator/v10" +) + +// SysInitMySQLScriptFileName 系统初始化脚本文件名 +var SysInitMySQLScriptFileName = "sysinit_mysql.sh" + +// SysInitMySQLScript embed调用 +// +//go:embed sysinit_mysql.sh +var SysInitMySQLScript embed.FS + +// SysInitParams 系统初始化参数 +type SysInitParams struct { + User string `json:"user" validate:"required"` + Password string `json:"password" validate:"required"` +} + +// SysInit 系统初始化原子任务 +type SysInit struct { + runtime *jobruntime.JobGenericRuntime + params SysInitParams +} + +// NewSysInit new +func NewSysInit() jobruntime.JobRunner { + return &SysInit{} +} + +// Init 初始化 +func (job *SysInit) Init(m *jobruntime.JobGenericRuntime) error { + job.runtime = m + + err := json.Unmarshal([]byte(job.runtime.PayloadDecoded), &job.params) + if err != nil { + job.runtime.Logger.Error(fmt.Sprintf("json.Unmarshal failed,err:%+v\n", err)) + return err + } + // 参数有效性检查 + validate := validator.New() + err = validate.Struct(job.params) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + job.runtime.Logger.Error("Sys Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + for _, err := range err.(validator.ValidationErrors) { + job.runtime.Logger.Error("Sys Init params validate failed,err:%v,params:%+v", err, job.params) + return err + } + } + return nil +} + +// Name 名字 +func (job *SysInit) Name() string { + return "sysinit" +} + +// Run 执行函数 +func (job *SysInit) Run() (err error) { + data, err := SysInitMySQLScript.ReadFile(SysInitMySQLScriptFileName) + if err != nil { + job.runtime.Logger.Error("read sysinit script failed %s", err.Error()) + return err + } + tmpScriptName := "/tmp/sysinit.sh" + if err = ioutil.WriteFile(tmpScriptName, data, 07555); err != nil { + job.runtime.Logger.Error("write tmp script failed %s", err.Error()) + return err + } + _, err = util.RunBashCmd(tmpScriptName, "", nil, 30*time.Second) + if err != nil { + return + } + err = util.SetOSUserPassword(job.params.User, job.params.Password) + if err != nil { + return err + } + return nil +} + +// Retry retry times +func (job *SysInit) Retry() uint { + return 2 +} + +// Rollback rollback +func (job *SysInit) Rollback() error { + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit_mysql.sh b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit_mysql.sh new file mode 100644 index 0000000000..4d33e4fb1d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys/sysinit_mysql.sh @@ -0,0 +1,122 @@ +#!/bin/sh +# 新建mysql.mysql用户 +## +# mysql scripts +## +function _exit() { + rm $0 + exit +} +#handler nscd restart +#如果存在mysql用户组就groupadd mysql -g 202 +egrep "^mysql" /etc/group >&/dev/null +if [ $? -ne 0 ]; then + groupadd mysql -g 202 +fi +#考虑到可能上架已运行的机器,userdel有风险,不采用这种方法 +#如果存在mysql用户就删掉(因为有可能1)id不为30019,2)不存在home目录) +id mysql >&/dev/null +if [ $? -ne 0 ]; then + useradd -m -d /home/mysql -g 202 -G users -u 30019 mysql + chage -M 99999 mysql + if [ ! -d /home/mysql ]; then + mkdir -p /home/mysql + fi + chmod 755 /home/mysql + usermod -d /home/mysql mysql 2>/dev/null +fi + +if [ -L "/data1" ] && [ ! -e "/data1" ] +then + echo "/data1 is an invalid soft link. Removing it..." + rm "/data1" +fi + +if [ -L "/data" ] && [ ! -e "/data" ] +then + echo "/data is an invalid soft link. Removing it..." + rm "/data" +fi + +if [[ -z "$REDIS_DATA_DIR" ]]; then + echo "env REDIS_DATA_DIR cannot be empty" >&2 + exit -1 +fi +if [[ -z "$REDIS_BACKUP_DIR" ]]; then + echo "env REDIS_BACKUP_DIR cannot be empty" >&2 + exit -1 +fi + +if [ ! -d $REDIS_DATA_DIR ]; then + mkdir -p $REDIS_DATA_DIR +fi + +if [ ! -d $REDIS_BACKUP_DIR ]; then + mkdir -p $REDIS_BACKUP_DIR +fi + +#如果存在mysql用户,上面那一步会报错,也不会创建/home/mysql,所以判断下并创建/home/mysql +if [ ! -d /data ]; then + ln -s $REDIS_BACKUP_DIR /data +fi +if [ ! -d /data1 ]; then + ln -s $REDIS_DATA_DIR /data1 +fi +if [[ ! -d /data1/dbha ]]; then + mkdir -p /data1/dbha +fi +chown -R mysql /data1/dbha +if [[ ! -d /data/dbha ]]; then + mkdir -p /data/dbha +fi +chown -R mysql /data/dbha +if [[ ! -d /data/install ]]; then + mkdir -p /data/install + chown -R mysql /data/install +fi +if [[ ! -d $REDIS_BACKUP_DIR/dbbak ]]; then + mkdir -p $REDIS_BACKUP_DIR/dbbak + chown -R mysql $REDIS_BACKUP_DIR/dbbak +fi +chown -R mysql /home/mysql +chmod -R a+rwx /data/install +rm -rf /home/mysql/install +ln -s /data/install /home/mysql/install +chown -R mysql /home/mysql/install +password="$2" +#password=$(echo "$2" | /home/mysql/install/lib/tools/base64 -d) +echo "mysql:$password" | chpasswd +FOUND=$(grep 'ulimit -n 204800' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'ulimit -n 204800' >>/etc/profile +fi +FOUND=$(grep 'export LC_ALL=en_US' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'export LC_ALL=en_US' >>/etc/profile +fi +FOUND=$(grep 'export PATH=/usr/local/mysql/bin/:$PATH' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'export PATH=/usr/local/mysql/bin/:$PATH' >>/etc/profile +fi +FOUND_umask=$(grep '^umask 022' /etc/profile) +if [ -z "$FOUND_umask" ]; then + echo 'umask 022' >>/etc/profile +fi +FOUND=$(grep 'fs.aio-max-nr' /etc/sysctl.conf) +if [ -z "$FOUND" ]; then + echo "fs.aio-max-nr=1024000" >>/etc/sysctl.conf +fi +FOUND=$(grep 'vm.overcommit_memory = 1' /etc/sysctl.conf) +if [ -z "$FOUND" ]; then + echo "vm.overcommit_memory = 1" >>/etc/sysctl.conf +fi +FOUND=$(grep 'vm.swappiness = 0' /etc/sysctl.conf) +if [ -z "$FOUND" ]; then + echo "vm.swappiness = 0" >>/etc/sysctl.conf +fi +FOUND=$(grep -i 'net.ipv4.ip_local_reserved_ports=30000-31000,40000-41000,50000-52000' /etc/sysctl.conf) +if [ -z "$FOUND" ]; then + echo "net.ipv4.ip_local_reserved_ports=30000-31000,40000-41000,50000-52000" >>/etc/sysctl.conf +fi +/sbin/sysctl -p +_exit diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/backupsys/backupsys.go b/dbm-services/redis/db-tools/dbactuator/pkg/backupsys/backupsys.go new file mode 100644 index 0000000000..04f3ce51bc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/backupsys/backupsys.go @@ -0,0 +1,230 @@ +// Package backupsys 备份系统 +package backupsys + +import ( + "bufio" + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" +) + +// UploadTask 操作备份系统 +type UploadTask struct { + Files []string `json:"files"` // 全路径 + TaskIDs []uint64 `json:"taskids"` + Tag string `json:"tag"` +} + +// UploadFiles 上传文件 +func (task *UploadTask) UploadFiles() (err error) { + var taskIDStr string + var taskIDNum uint64 + if len(task.Files) == 0 { + return + } + if task.Tag == "" { + err = fmt.Errorf("BackupSystem uploadFiles tag(%s) cannot be empty", task.Tag) + mylog.Logger.Error(err.Error()) + return + } + for _, file := range task.Files { + if !util.FileExists(file) { + err = fmt.Errorf("BackupSystem uploadFiles %s not exists", file) + mylog.Logger.Error(err.Error()) + return + } + } + for _, bkfile := range task.Files { + bkCmd := fmt.Sprintf("%s -n -f %s --with-md5 -t %s|grep 'taskid'|awk -F: '{print $2}'", + consts.BackupClient, bkfile, task.Tag) + mylog.Logger.Info(bkCmd) + taskIDStr, err = util.RunBashCmd(bkCmd, "", nil, 10*time.Minute) + if err != nil { + return + } + taskIDNum, err = strconv.ParseUint(taskIDStr, 10, 64) + if err != nil { + err = fmt.Errorf("%s ParseUint failed,err:%v", taskIDStr, err) + mylog.Logger.Error(err.Error()) + return + } + task.TaskIDs = append(task.TaskIDs, taskIDNum) + } + return +} + +// CheckTasksStatus 检查tasks状态 +func (task *UploadTask) CheckTasksStatus() (runningTaskIDs, failTaskIDs, succTaskIDs []uint64, + runningFiles, failedFiles, succFiles []string, failMsgs []string, err error) { + var status TaskStatus + for idx, taskID := range task.TaskIDs { + status, err = GetTaskStatus(taskID) + if err != nil { + return + } + if status.Status > 4 { + // err = fmt.Errorf("ToBackupSystem %s failed,err:%s,taskid:%d", + // status.File, status.StatusInfo, taskID) + // mylog.Logger.Error(err.Error()) + failMsgs = append(failMsgs, fmt.Sprintf("taskid:%d,failMsg:%s", taskID, status.StatusInfo)) + failedFiles = append(failedFiles, task.Files[idx]) + failTaskIDs = append(failTaskIDs, task.TaskIDs[idx]) + } else if status.Status == 4 { + succFiles = append(succFiles, task.Files[idx]) + succTaskIDs = append(succTaskIDs, task.TaskIDs[idx]) + } else if status.Status < 4 { + runningFiles = append(runningFiles, task.Files[idx]) + runningTaskIDs = append(runningTaskIDs, task.TaskIDs[idx]) + } + } + return +} + +// WaitForUploadFinish 等待所有files上传成功 +func (task *UploadTask) WaitForUploadFinish() (err error) { + var times int64 + var msg string + var runningFiles, failFiles, succFiles, failMsgs []string + for { + times++ + _, _, _, runningFiles, failFiles, succFiles, failMsgs, err = task.CheckTasksStatus() + if err != nil { + return + } + // 只要有running的task,则继续等待 + if len(runningFiles) > 0 { + if times%6 == 0 { + // 每分钟打印一次日志 + msg = fmt.Sprintf("files[%+v] cnt:%d upload to backupSystem still running", runningFiles, len(runningFiles)) + mylog.Logger.Info(msg) + } + time.Sleep(10 * time.Second) + continue + } + if len(failMsgs) > 0 { + err = fmt.Errorf("failCnt:%d,failFiles:[%+v],err:%s", len(failFiles), failFiles, strings.Join(failFiles, ",")) + mylog.Logger.Error(err.Error()) + return + } + if len(succFiles) == len(task.Files) { + return nil + } + break + } + return +} + +// TaskStatus backup_client -q --taskid=xxxx 命令的结果 +type TaskStatus struct { + File string `json:"file"` + Host string `json:"host"` + SednupDateTime time.Time `json:"sendup_datetime"` + Status int `json:"status"` + StatusInfo string `json:"status_info"` + StartTime time.Time `json:"start_time"` + CompleteTime time.Time `json:"complete_time"` + ExpireTime time.Time `json:"expire_time"` +} + +// String 用于打印 +func (status *TaskStatus) String() string { + statusBytes, _ := json.Marshal(status) + return string(statusBytes) +} + +// GetTaskStatus 执行backup_client -q --taskid=xxxx 命令的结果并解析 +func GetTaskStatus(taskid uint64) (status TaskStatus, err error) { + var cmdRet string + bkCmd := fmt.Sprintf("%s -q --taskid=%d", consts.BackupClient, taskid) + cmdRet, err = util.RunBashCmd(bkCmd, "", nil, 30*time.Second) + if err != nil { + return + } + scanner := bufio.NewScanner(strings.NewReader(cmdRet)) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + line := scanner.Text() + line = strings.TrimSpace(line) + if line == "" { + continue + } + l01 := strings.SplitN(line, ":", 2) + if len(l01) != 2 { + err = fmt.Errorf("len()!=2,cmd:%s,result format not correct:%s", bkCmd, cmdRet) + mylog.Logger.Error(err.Error()) + return + } + first := strings.TrimSpace(l01[0]) + second := strings.TrimSpace(l01[1]) + switch first { + case "file": + status.File = second + case "host": + status.Host = second + case "sendup datetime": + if second == "0000-00-00 00:00:00" { + status.SednupDateTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.SednupDateTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse 'sendup datetime' failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "status": + status.Status, err = strconv.Atoi(second) + if err != nil { + err = fmt.Errorf("strconv.Atoi failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "status info": + status.StatusInfo = second + case "start_time": + if second == "0000-00-00 00:00:00" { + status.StartTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.StartTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse start_time failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "complete_time": + if second == "0000-00-00 00:00:00" { + status.CompleteTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.CompleteTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse complete_time failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "expire_time": + if second == "0000-00-00 00:00:00" { + status.ExpireTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.ExpireTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse expire_time failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + } + } + if err = scanner.Err(); err != nil { + err = fmt.Errorf("scanner.Scan failed,err:%v,cmd:%s", err, cmdRet) + mylog.Logger.Error(err.Error()) + return + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/common.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/common.go new file mode 100644 index 0000000000..87fb268f39 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/common.go @@ -0,0 +1,42 @@ +// Package common 一些公共类定义 +package common + +// MySet 通过泛型自定义集合类型 +type MySet[T int | string] map[T]struct{} + +// Has 判断集合中是否有该元素 +func (s MySet[T]) Has(key T) bool { + _, ok := s[key] + return ok +} + +// Add 向集合中添加元素 +func (s MySet[T]) Add(key T) { + s[key] = struct{}{} +} + +// Delete 从集合中删除元素 +func (s MySet[T]) Delete(key T) { + delete(s, key) +} + +// ToStringSlice 返回对应的数组类型(不保证原始顺序) +func (s MySet[T]) ToStringSlice() (ret []T) { + ret = make([]T, 0, len(s)) + for k := range s { + ret = append(ret, k) + } + return ret +} + +// UniqueSlice 去重,保证原始顺序 +func UniqueSlice[T int | string](l01 []T) (ret []T) { + var tmp MySet[T] = make(MySet[T]) + for _, ele := range l01 { + if !tmp.Has(ele) { + ret = append(ret, ele) + tmp.Add(ele) + } + } + return ret +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/exporter_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/exporter_conf.go new file mode 100644 index 0000000000..1f6dc8cca4 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/exporter_conf.go @@ -0,0 +1,46 @@ +package common + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +func getConfFileName(port int) string { + return filepath.Join(consts.ExporterConfDir, fmt.Sprintf("%d.conf", port)) +} + +// setExporterConfig 写入ExporterConfig文件 +// 目录固定:. consts.ExporterConfDir +// 文件名称:. $port.conf +// 文件已经存在, 覆盖. +// 文件写入失败,报错. + +// WriteExporterConfigFile TODO +func WriteExporterConfigFile(port int, data interface{}) (err error) { + var fileData []byte + var confFile string + err = util.MkDirsIfNotExists([]string{consts.ExporterConfDir}) + if err != nil { + return err + } + confFile = getConfFileName(port) + fileData, _ = json.Marshal(data) + err = ioutil.WriteFile(confFile, fileData, 0755) + if err != nil { + return err + } + util.LocalDirChownMysql(consts.ExporterConfDir) + return nil +} + +// DeleteExporterConfigFile 删除Exporter配置文件. +func DeleteExporterConfigFile(port int) (err error) { + var confFile string + confFile = getConfFileName(port) + return os.Remove(confFile) +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/filelock.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/filelock.go new file mode 100644 index 0000000000..7c7d52a864 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/filelock.go @@ -0,0 +1,34 @@ +package common + +import ( + "os" + "syscall" +) + +// FileLock 结构体 +type FileLock struct { + Path string + FD *os.File +} + +// NewFileLock 生成结构体 +func NewFileLock(path string) *FileLock { + fd, _ := os.Open(path) + return &FileLock{ + Path: path, + FD: fd, + } +} + +// Lock 加锁 +func (f *FileLock) Lock() error { + err := syscall.Flock(int(f.FD.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) + return err +} + +// UnLock 解锁 +func (f *FileLock) UnLock() error { + defer f.FD.Close() + err := syscall.Flock(int(f.FD.Fd()), syscall.LOCK_UN) + return err +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/initiate_replicaset_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/initiate_replicaset_conf.go new file mode 100644 index 0000000000..f4b9cd7902 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/initiate_replicaset_conf.go @@ -0,0 +1,37 @@ +package common + +import "encoding/json" + +// JsonConfReplicaset 复制集配置 +type JsonConfReplicaset struct { + Id string `json:"_id"` + ConfigSvr bool `json:"configsvr"` + Members []*Member `json:"members"` +} + +// Member 成员 +type Member struct { + Id int `json:"_id"` + Host string `json:"host"` + Priority int `json:"priority"` + Hidden bool `json:"hidden"` +} + +// NewJsonConfReplicaset 获取结构体 +func NewJsonConfReplicaset() *JsonConfReplicaset { + return &JsonConfReplicaset{} +} + +// GetConfContent 获取配置内容 +func (j *JsonConfReplicaset) GetConfContent() ([]byte, error) { + confContent, err := json.Marshal(j) + if err != nil { + return nil, err + } + return confContent, nil +} + +// NewMember 获取结构体 +func NewMember() *Member { + return &Member{} +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/media_pkg.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/media_pkg.go new file mode 100644 index 0000000000..fc8d7460ec --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/media_pkg.go @@ -0,0 +1,129 @@ +package common + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "os" + "path/filepath" + "regexp" + "time" +) + +// MediaPkg 通用介质包处理 +type MediaPkg struct { + Pkg string `json:"pkg" validate:"required"` // 安装包名 + PkgMd5 string `json:"pkg_md5" validate:"required,md5"` // 安装包MD5 +} + +// GetAbsolutePath 返回介质存放的绝对路径 +func (m *MediaPkg) GetAbsolutePath() string { + return filepath.Join(consts.PackageSavePath, m.Pkg) +} + +// GePkgBaseName 例如将 mysql-5.7.20-linux-x86_64-tmysql-3.1.5-gcs.tar.gz +// 解析出 mysql-5.7.20-linux-x86_64-tmysql-3.1.5-gcs +// 用于做软连接使用 +func (m *MediaPkg) GePkgBaseName() string { + pkgFullName := filepath.Base(m.GetAbsolutePath()) + return regexp.MustCompile("(.tar.gz|.tgz)$").ReplaceAllString(pkgFullName, "") +} + +// Check 检查介质包 +func (m *MediaPkg) Check() (err error) { + var fileMd5 string + // 判断安装包是否存在 + pkgAbPath := m.GetAbsolutePath() + if !util.FileExists(pkgAbPath) { + return fmt.Errorf("%s不存在", pkgAbPath) + } + if fileMd5, err = util.GetFileMd5(pkgAbPath); err != nil { + return fmt.Errorf("获取[%s]md5失败, %v", m.Pkg, err.Error()) + } + // 校验md5 + if fileMd5 != m.PkgMd5 { + return fmt.Errorf("安装包的md5不匹配,[%s]文件的md5[%s]不正确", fileMd5, m.PkgMd5) + } + return +} + +// DbToolsMediaPkg db工具包 +type DbToolsMediaPkg struct { + MediaPkg +} + +// Install 安装dbtools +// 1. 确保本地 /data/install/dbtool.tar.gz 存在,且md5校验ok; +// 2. 检查 {REDIS_BACKUP_DIR}/dbbak/dbatool.tar.gz 与 /data/install/dbtool.tar.gz 是否一致; +// - md5一致,则忽略更新; +// - /data/install/dbtool.tar.gz 不存在 or md5不一致 则用最新 /data/install/dbtool.tar.gz 工具覆盖 {REDIS_BACKUP_DIR}/dbbak/dbatool +// 3. 创建 /home/mysql/dbtools -> /data/dbbak/dbtools 软链接 +// 4. cp /data/install/dbtool.tar.gz {REDIS_BACKUP_DIR}/dbbak/dbatool.tar.gz +func (pkg *DbToolsMediaPkg) Install() (err error) { + var fileMd5 string + var overrideLocal bool = true + var newMysqlHomeLink bool = true + var realLink string + err = pkg.Check() + if err != nil { + return + } + toolsName := filepath.Base(consts.DbToolsPath) + backupDir := filepath.Join(consts.GetRedisBackupDir(), "dbbak") // 如 /data/dbbak + bakdirToolsTar := filepath.Join(backupDir, toolsName+".tar.gz") // 如 /data/dbbak/dbtools.tar.gz + installToolTar := pkg.GetAbsolutePath() + if util.FileExists(bakdirToolsTar) { + fileMd5, err = util.GetFileMd5(bakdirToolsTar) + if err != nil { + return + } + if fileMd5 == pkg.PkgMd5 { + overrideLocal = false + } + } + if overrideLocal { + // 最新介质覆盖本地 + untarCmd := fmt.Sprintf("tar -zxf %s -C %s", installToolTar, backupDir) + mylog.Logger.Info(untarCmd) + _, err = util.RunBashCmd(untarCmd, "", nil, 10*time.Minute) + if err != nil { + return + } + } + if !util.FileExists(filepath.Join(backupDir, toolsName)) { // 如 /data/dbbak/dbtools 目录不存在 + err = fmt.Errorf("dir:%s not exists", filepath.Join(backupDir, toolsName)) + mylog.Logger.Error(err.Error()) + return + } + if util.FileExists(consts.DbToolsPath) { + realLink, err = filepath.EvalSymlinks(consts.DbToolsPath) + if err != nil { + err = fmt.Errorf("filepath.EvalSymlinks %s fail,err:%v", consts.DbToolsPath, err) + mylog.Logger.Error(err.Error()) + return err + } + if realLink == filepath.Join(backupDir, toolsName) { // /home/mysql/dbtools 已经是指向 /data/dbbak/dbtools 的软连接 + newMysqlHomeLink = false + } + } + if newMysqlHomeLink { + // 需创建 /home/mysql/dbtools -> /data/dbbak/dbtools 软链接 + err = os.Symlink(filepath.Join(backupDir, toolsName), consts.DbToolsPath) + if err != nil { + err = fmt.Errorf("os.Symlink %s -> %s fail,err:%s", consts.DbToolsPath, filepath.Join(backupDir, toolsName), err) + mylog.Logger.Error(err.Error()) + return + } + mylog.Logger.Info("create softLink success,%s -> %s", consts.DbToolsPath, filepath.Join(backupDir, toolsName)) + } + cpCmd := fmt.Sprintf("cp %s %s", installToolTar, bakdirToolsTar) + mylog.Logger.Info(cpCmd) + _, err = util.RunBashCmd(cpCmd, "", nil, 10*time.Minute) + if err != nil { + return + } + util.LocalDirChownMysql(consts.DbToolsPath) + util.LocalDirChownMysql(backupDir) + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_common.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_common.go new file mode 100644 index 0000000000..7d858234d6 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_common.go @@ -0,0 +1,592 @@ +package common + +import ( + "crypto/md5" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +// UnTarAndCreateSoftLinkAndChown 解压目录,创建软链接并修改属主 +func UnTarAndCreateSoftLinkAndChown(runtime *jobruntime.JobGenericRuntime, binDir string, installPackagePath string, + unTarPath string, + installPath string, user string, group string) error { + // 解压安装包 + if !util.FileExists(unTarPath) { + // 解压到/usr/local目录下 + runtime.Logger.Info("start to unTar install package") + tarCmd := fmt.Sprintf("tar -zxf %s -C %s", installPackagePath, binDir) + if _, err := util.RunBashCmd(tarCmd, "", nil, 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("untar install file fail, error:%s", err)) + return fmt.Errorf("untar install file fail, error:%s", err) + } + runtime.Logger.Info("unTar install package successfully") + // 修改属主 + runtime.Logger.Info("start to execute chown command for unTar directory") + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", user, group, unTarPath), + "", nil, + 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("chown untar directory fail, error:%s", err)) + return fmt.Errorf("chown untar directory fail, error:%s", err) + } + runtime.Logger.Info("execute chown command for unTar directory successfully") + } + + // 创建软链接 + if !util.FileExists(installPath) { + // 创建软链接 + runtime.Logger.Info("start to create soft link") + softLink := fmt.Sprintf("ln -s %s %s", unTarPath, installPath) + if _, err := util.RunBashCmd(softLink, "", nil, 10*time.Second); err != nil { + runtime.Logger.Error( + fmt.Sprintf("install directory create softLink fail, error:%s", err)) + return fmt.Errorf("install directory create softLink fail, error:%s", err) + } + runtime.Logger.Info("create soft link successfully") + + // 修改属主 + runtime.Logger.Info("start to execute chown command for softLink directory") + if _, err := util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", user, group, installPath), + "", nil, + 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("chown softlink directory fail, error:%s", err)) + return fmt.Errorf("chown softlink directory fail, error:%s", err) + } + runtime.Logger.Info("execute chown command for softLink directory successfully") + + } + + return nil +} + +// GetMd5 获取md5值 +func GetMd5(str string) string { + h := md5.New() + h.Write([]byte(str)) + return hex.EncodeToString(h.Sum(nil)) +} + +// CheckMongoVersion 检查mongo版本 +func CheckMongoVersion(binDir string, mongoName string) (string, error) { + cmd := fmt.Sprintf("%s -version |grep -E 'db version|mongos version'| awk -F \" \" '{print $3}' |sed 's/v//g'", + filepath.Join(binDir, "mongodb", "bin", mongoName)) + getVersion, err := util.RunBashCmd(cmd, "", nil, 10*time.Second) + getVersion = strings.Replace(getVersion, "\n", "", -1) + if err != nil { + return "", err + } + return getVersion, nil +} + +// CheckMongoService 检查mongo服务是否存在 +func CheckMongoService(port int) (bool, string, error) { + cmd := fmt.Sprintf("netstat -ntpl |grep %d | awk '{print $7}' |head -1", port) + result, err := util.RunBashCmd(cmd, "", nil, 10*time.Second) + if err != nil { + return false, "", err + } + if strings.Contains(result, "mongos") { + return true, "mongos", nil + } + if strings.Contains(result, "mongod") { + return true, "mongod", nil + } + return false, "", nil +} + +// CreateConfFileAndKeyFileAndDbTypeFileAndChown 创建配置文件,key文件,dbType文件并授权 +func CreateConfFileAndKeyFileAndDbTypeFileAndChown(runtime *jobruntime.JobGenericRuntime, authConfFilePath string, + authConfFileContent []byte, user string, group string, noAuthConfFilePath string, noAuthConfFileContent []byte, + keyFilePath string, app string, areaId string, dbTypeFilePath string, instanceType string, + defaultPerm os.FileMode) error { + // 创建Auth配置文件 + runtime.Logger.Info("start to create auth config file") + authConfFile, err := os.OpenFile(authConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, defaultPerm) + defer authConfFile.Close() + if err != nil { + runtime.Logger.Error(fmt.Sprintf("create auth config file fail, error:%s", err)) + return fmt.Errorf("create auth config file fail, error:%s", err) + } + if _, err = authConfFile.WriteString(string(authConfFileContent)); err != nil { + runtime.Logger.Error(fmt.Sprintf("auth config file write content fail, error:%s", err)) + return fmt.Errorf("auth config file write content fail, error:%s", err) + } + runtime.Logger.Info("create auth config file successfully") + + // 修改配置文件属主 + runtime.Logger.Info("start to execute chown command for auth config file") + if _, err = util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", user, group, authConfFilePath), + "", nil, + 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("chown auth config file fail, error:%s", err)) + return fmt.Errorf("chown auth config file fail, error:%s", err) + } + runtime.Logger.Info("start to execute chown command for auth config file successfully") + + // 创建NoAuth配置文件 + runtime.Logger.Info("start to create no auth config file") + noAuthConfFile, err := os.OpenFile(noAuthConfFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, defaultPerm) + defer noAuthConfFile.Close() + if err != nil { + runtime.Logger.Error(fmt.Sprintf("create no auth config file fail, error:%s", err)) + return fmt.Errorf("create no auth config file fail, error:%s", err) + } + if _, err = noAuthConfFile.WriteString(string(noAuthConfFileContent)); err != nil { + runtime.Logger.Error(fmt.Sprintf("auth no config file write content fail, error:%s", err)) + return fmt.Errorf("auth no config file write content fail, error:%s", err) + } + runtime.Logger.Info("create no auth config file successfully") + + // 修改配置文件属主 + runtime.Logger.Info("start to execute chown command for no auth config file") + if _, err = util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", user, group, noAuthConfFilePath), + "", nil, + 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("chown no auth config file fail, error:%s", err)) + return fmt.Errorf("chown no auth config file fail, error:%s", err) + } + runtime.Logger.Info("execute chown command for no auth config file successfully") + + // 创建key文件 + runtime.Logger.Info("start to create key file") + keyFile, err := os.OpenFile(keyFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600) + defer keyFile.Close() + if err != nil { + runtime.Logger.Error(fmt.Sprintf("create key file fail, error:%s", err)) + return fmt.Errorf("create key file fail, error:%s", err) + } + key := GetMd5(strings.Join([]string{app, areaId}, "-")) + if _, err = keyFile.WriteString(key); err != nil { + runtime.Logger.Error(fmt.Sprintf("key file write content fail, error:%s", err)) + return fmt.Errorf("key file write content fail, error:%s", err) + } + runtime.Logger.Info("create key file successfully") + + // 修改key文件属主 + runtime.Logger.Info("start to execute chown command for key file") + if _, err = util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", user, group, keyFilePath), + "", nil, + 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("chown key file fail, error:%s", err)) + return fmt.Errorf("chown key file fail, error:%s", err) + } + runtime.Logger.Info("execute chown command for key file successfully") + + // 创建dbType文件 + runtime.Logger.Info("start to create dbType file") + dbTypeFile, err := os.OpenFile(dbTypeFilePath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, defaultPerm) + defer dbTypeFile.Close() + if err != nil { + runtime.Logger.Error(fmt.Sprintf("create dbType file fail, error:%s", err)) + return fmt.Errorf("create dbType file fail, error:%s", err) + } + if _, err = dbTypeFile.WriteString(instanceType); err != nil { + runtime.Logger.Error(fmt.Sprintf("dbType file write content fail, error:%s", err)) + return fmt.Errorf("dbType file write content fail, error:%s", err) + } + runtime.Logger.Info("create dbType file successfully") + + // 修改dbType文件属主 + runtime.Logger.Info("start to execute chown command for dbType file") + if _, err = util.RunBashCmd( + fmt.Sprintf("chown -R %s.%s %s", user, group, dbTypeFilePath), + "", nil, + 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("chown dbType file fail, error:%s", err)) + return fmt.Errorf("chown dbType file fail, error:%s", err) + } + runtime.Logger.Info("execute chown command for dbType file successfully") + + return nil + +} + +// StartMongoProcess 启动进程 +func StartMongoProcess(binDir string, port int, user string, auth bool) error { + // 启动服务 + var cmd string + cmd = fmt.Sprintf("su %s -c \"%s %d %s\"", user, + filepath.Join(binDir, "mongodb", "bin", "start_mongo.sh"), + port, "noauth") + if auth == true { + cmd = fmt.Sprintf("su %s -c \"%s %d\"", user, + filepath.Join(binDir, "mongodb", "bin", "start_mongo.sh"), + port) + } + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + return err + } + return nil +} + +// ShutdownMongoProcess 关闭进程 +func ShutdownMongoProcess(user string, instanceType string, binDir string, dbpathDir string, port int) error { + var cmd string + cmd = fmt.Sprintf("su %s -c \"%s --shutdown --dbpath %s\"", + user, filepath.Join(binDir, "mongodb", "bin", "mongod"), dbpathDir) + if instanceType == "mongos" { + cmd = fmt.Sprintf("ps -ef|grep mongos |grep -v grep|grep %d|awk '{print $2}' | xargs kill -2", port) + } + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + return err + } + return nil +} + +// AddPathToProfile 把可执行文件路径写入/etc/profile +func AddPathToProfile(runtime *jobruntime.JobGenericRuntime, binDir string) error { + runtime.Logger.Info("start to add binary path in /etc/profile") + etcProfilePath := "/etc/profile" + addEtcProfile := fmt.Sprintf(` +if ! grep -i %s: %s; +then +echo "export PATH=%s:\$PATH" >> %s +fi`, filepath.Join(binDir, "mongodb", "bin"), etcProfilePath, filepath.Join(binDir, "mongodb", "bin"), etcProfilePath) + runtime.Logger.Info(addEtcProfile) + if _, err := util.RunBashCmd(addEtcProfile, "", nil, 10*time.Second); err != nil { + runtime.Logger.Error(fmt.Sprintf("binary path add in /etc/profile, error:%s", err)) + return fmt.Errorf("binary path add in /etc/profile, error:%s", err) + } + runtime.Logger.Info("add binary path in /etc/profile successfully") + return nil +} + +// AuthGetPrimaryInfo 获取primary节点信息 +func AuthGetPrimaryInfo(user string, mongoBin string, username string, password string, ip string, port int) (string, + error) { + // 超时时间 + timeout := time.After(20 * time.Second) + for { + select { + case <-timeout: + return "", fmt.Errorf("get primary info timeout") + default: + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"rs.isMaster().primary\\\"\"", + user, mongoBin, username, password, ip, port) + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + return "", err + } + if strings.Replace(result, "\n", "", -1) == "" { + time.Sleep(1 * time.Second) + continue + } + primaryInfo := strings.Replace(result, "\n", "", -1) + return primaryInfo, nil + } + } +} + +// NoAuthGetPrimaryInfo 获取primary节点信息 +func NoAuthGetPrimaryInfo(user string, mongoBin string, ip string, port int) (string, error) { + // 超时时间 + timeout := time.After(20 * time.Second) + for { + select { + case <-timeout: + return "", fmt.Errorf("get primary info timeout") + default: + cmd := fmt.Sprintf( + "su %s -c \"%s --host %s --port %d --quiet --eval \\\"rs.isMaster().primary\\\"\"", + user, mongoBin, ip, port) + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + return "", err + } + if strings.Replace(result, "\n", "", -1) == "" { + time.Sleep(1 * time.Second) + continue + } + primaryInfo := strings.Replace(result, "\n", "", -1) + return primaryInfo, nil + } + + } +} + +// InitiateReplicasetGetPrimaryInfo 复制集初始化时判断 +func InitiateReplicasetGetPrimaryInfo(user string, mongoBin string, ip string, port int) (string, error) { + cmd := fmt.Sprintf( + "su %s -c \"%s --host %s --port %d --quiet --eval \\\"rs.isMaster().primary\\\"\"", + user, mongoBin, ip, port) + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + return "", err + } + primaryInfo := strings.Replace(result, "\n", "", -1) + return primaryInfo, nil +} + +// RemoveFile 删除文件 +func RemoveFile(filePath string) error { + cmd := fmt.Sprintf("rm -rf %s", filePath) + if _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second); err != nil { + return err + } + return nil +} + +// CreateFile 创建文件 +func CreateFile(path string) error { + installLockFile, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600) + if err != nil { + return err + } + defer installLockFile.Close() + return nil +} + +// AuthCheckUser 检查user是否存在 +func AuthCheckUser(user string, mongoBin string, username string, password string, ip string, port int, authDb string, + checkUsername string) (bool, error) { + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"db.getMongo().getDB('%s').getUser('%s')\\\"\"", + user, mongoBin, username, password, ip, port, authDb, checkUsername) + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + return false, fmt.Errorf("get user info fail, error:%s", err) + } + if strings.Contains(result, checkUsername) == true { + return true, nil + } + + return false, nil +} + +// NoAuthCheckUser 检查user是否存在 +// func NoAuthCheckUser(user string, mongoBin string, ip string, port int, authDb string, +// checkUsername string) (bool, error) { +// cmd := fmt.Sprintf( +// "su %s -c \"%s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"db.getMongo().getDB('%s').getUser('%s')\\\"\"", +// user, mongoBin, ip, port, authDb, checkUsername) +// result, err := util.RunBashCmd( +// cmd, +// "", nil, +// 10*time.Second) +// if err != nil { +// return false, fmt.Errorf("get user info fail, error:%s", err) +// } +// if strings.Contains(result, checkUsername) == true { +// return true, nil +// } +// +// return false, nil +// } + +// GetNodeInfo 获取mongod节点信息 _id int state int hidden bool priority int +func GetNodeInfo(user string, mongoBin string, ip string, port int, username string, password string, + sourceIP string, sourcePort int) (bool, int, int, bool, int, []map[string]string, error) { + source := strings.Join([]string{sourceIP, strconv.Itoa(sourcePort)}, ":") + cmdStatus := fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \"rs.status().members\"'", + user, mongoBin, username, password, ip, port) + cmdConf := fmt.Sprintf( + "su %s -c '%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \"rs.conf().members\"'", + user, mongoBin, username, password, ip, port) + + // 获取状态 + result1, err := util.RunBashCmd( + cmdStatus, + "", nil, + 10*time.Second) + if err != nil { + return false, 0, 0, false, 0, nil, fmt.Errorf("get members status info fail, error:%s", err) + } + result1 = strings.Replace(result1, " ", "", -1) + result1 = strings.Replace(result1, "\n", "", -1) + result1 = strings.Replace(result1, "NumberLong(", "", -1) + result1 = strings.Replace(result1, "Timestamp(", "", -1) + result1 = strings.Replace(result1, "ISODate(", "", -1) + result1 = strings.Replace(result1, ",1)", "", -1) + result1 = strings.Replace(result1, ",3)", "", -1) + result1 = strings.Replace(result1, ",2)", "", -1) + result1 = strings.Replace(result1, ",6)", "", -1) + result1 = strings.Replace(result1, ",0)", "", -1) + result1 = strings.Replace(result1, ")", "", -1) + + // 获取配置 + result2, err := util.RunBashCmd( + cmdConf, + "", nil, + 10*time.Second) + if err != nil { + return false, 0, 0, false, 0, nil, fmt.Errorf("get members conf info fail, error:%s", err) + } + result2 = strings.Replace(result2, " ", "", -1) + result2 = strings.Replace(result2, "\n", "", -1) + result2 = strings.Replace(result2, "NumberLong(", "", -1) + result2 = strings.Replace(result2, "Timestamp(", "", -1) + result2 = strings.Replace(result2, "ISODate(", "", -1) + result2 = strings.Replace(result2, ",1)", "", -1) + result2 = strings.Replace(result2, ")", "", -1) + + var statusSlice []map[string]interface{} + var confSlice []map[string]interface{} + if err = json.Unmarshal([]byte(result1), &statusSlice); err != nil { + return false, 0, 0, false, 0, nil, fmt.Errorf("get members status info json.Unmarshal fail, error:%s", err) + } + if err = json.Unmarshal([]byte(result2), &confSlice); err != nil { + return false, 0, 0, false, 0, nil, fmt.Errorf("get members conf info json.Unmarshal fail, error:%s", err) + } + + // 格式化配置信息 + var memberInfo []map[string]string + for _, v := range statusSlice { + member := make(map[string]string) + member["name"] = v["name"].(string) + member["state"] = fmt.Sprintf("%1.0f", v["state"]) + for _, k := range confSlice { + if k["host"].(string) == member["name"] { + member["hidden"] = strconv.FormatBool(k["hidden"].(bool)) + break + } + } + memberInfo = append(memberInfo, member) + } + + var id int + var state int + var hidden bool + var priority int + flag := false + for _, key := range statusSlice { + if key["name"].(string) == source { + id, _ = strconv.Atoi(fmt.Sprintf("%1.0f", key["_id"])) + state, _ = strconv.Atoi(fmt.Sprintf("%1.0f", key["state"])) + flag = true + break + } + } + for _, key := range confSlice { + if key["host"].(string) == source { + hidden = key["hidden"].(bool) + priority, _ = strconv.Atoi(fmt.Sprintf("%1.0f", key["priority"])) + break + } + } + return flag, id, state, hidden, priority, memberInfo, nil + +} + +// AuthRsStepDown 主备切换 +func AuthRsStepDown(user string, mongoBin string, ip string, port int, username string, password string) (bool, error) { + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"rs.stepDown()\\\"\"", + user, mongoBin, username, password, ip, port) + _, _ = util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + primaryInfo, err := AuthGetPrimaryInfo(user, mongoBin, username, password, ip, port) + if err != nil { + return false, err + } + if primaryInfo == strings.Join([]string{ip, strconv.Itoa(port)}, ":") { + return false, nil + } + + return true, nil +} + +// NoAuthRsStepDown 主备切换 +func NoAuthRsStepDown(user string, mongoBin string, ip string, port int) (bool, error) { + cmd := fmt.Sprintf( + "su %s -c \"%s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"rs.stepDown()\\\"\"", + user, mongoBin, ip, port) + _, _ = util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + primaryInfo, err := NoAuthGetPrimaryInfo(user, mongoBin, ip, port) + if err != nil { + return false, err + } + if primaryInfo == strings.Join([]string{ip, strconv.Itoa(port)}, ":") { + return false, nil + } + return true, nil +} + +// CheckBalancer 检查balancer的值 +func CheckBalancer(user string, mongoBin string, ip string, port int, username string, password string) (string, + error) { + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"sh.getBalancerState()\\\"\"", + user, mongoBin, username, password, ip, port) + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + return "", err + } + result = strings.Replace(result, "\n", "", -1) + return result, nil +} + +// GetProfilingLevel 获取profile级别 +func GetProfilingLevel(user string, mongoBin string, ip string, port int, username string, password string, + dbName string) (int, error) { + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"db.getMongo().getDB('%s').getProfilingLevel()\\\"\"", + user, mongoBin, username, password, ip, port, dbName) + result, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + return -1, err + } + intResult, _ := strconv.Atoi(result) + return intResult, nil +} + +// SetProfilingLevel 设置profile级别 +func SetProfilingLevel(user string, mongoBin string, ip string, port int, username string, password string, + dbName string, level int) error { + cmd := fmt.Sprintf( + "su %s -c \"%s -u %s -p %s --host %s --port %d --authenticationDatabase=admin --quiet --eval \\\"db.getMongo().getDB('%s').setProfilingLevel(%d)\\\"\"", + user, mongoBin, username, password, ip, port, dbName, level) + _, err := util.RunBashCmd( + cmd, + "", nil, + 10*time.Second) + if err != nil { + return err + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_init_shell.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_init_shell.go new file mode 100644 index 0000000000..919bfffe9c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_init_shell.go @@ -0,0 +1,165 @@ +package common + +// MongoShellInit TODO +var MongoShellInit = `#!/bin/sh +# 新建用户 + +function _exit() { + rm $0 + exit +} +#handler nscd restart 默认使用mysql用户 +#如果存在mysql用户组就groupadd mysql -g 202 +egrep "^{{group}}" /etc/group >& /dev/null +if [ $? -ne 0 ] +then +groupadd {{group}} -g 2000 +fi +#考虑到可能上架已运行的机器,userdel有风险,不采用这种方法 +#如果存在user用户就删掉(因为有可能1)id不为30019,2)不存在home目录) +id {{user}} >& /dev/null +if [ $? -ne 0 ] +then + useradd -m -d /home/{{user}} -g 2000 -G users -u 2000 {{user}} + chage -M 99999 {{user}} + if [ ! -d /home/{{user}} ]; + then + mkdir -p /home/{{user}} + fi + chmod 755 /home/{{user}} + usermod -d /home/{{user}} {{user}} 2>/dev/null +fi +if [[ -z "$MONGO_DATA_DIR" ]] +then + echo "env MONGO_DATA_DIR cannot be empty" >&2 + exit -1 +fi +if [[ -z "$MONGO_BACKUP_DIR" ]] +then + echo "env MONGO_BACKUP_DIR cannot be empty" >&2 + exit -1 +fi + +if [ ! -d $MONGO_DATA_DIR ] +then + mkdir -p $MONGO_DATA_DIR +fi + +if [ ! -d $MONGO_BACKUP_DIR ] +then + mkdir -p $RMONGO_BACKUP_DIR +fi + +#添加mongo安装锁文件 +if [ ! -f $MONGO_DATA_DIR/mongoinstall.lock ] +then + touch $MONGO_DATA_DIR/mongoinstall.lock +fi + +#如果存在mysql用户,上面那一步会报错,也不会创建/home/mysql,所以判断下并创建/home/mysql +if [ ! -d /data ]; +then + ln -s $MONGO_BACKUP_DIR /data +fi +if [ ! -d /data1 ]; +then + ln -s $MONGO_DATA_DIR /data1 +fi +if [[ ! -d /data1/dbha ]] +then + mkdir -p /data1/dbha +fi +chown -R {{user}} /data1/dbha +if [[ ! -d /data/dbha ]] +then + mkdir -p /data/dbha +fi +chown -R {{user}} /data/dbha +if [[ ! -d /data/install ]] +then + mkdir -p /data/install + chown -R {{user}} /data/install +fi +if [[ ! -d $MONGO_BACKUP_DIR/dbbak ]] +then + mkdir -p $MONGO_BACKUP_DIR/dbbak + chown -R {{user}} $MONGO_BACKUP_DIR/dbbak +fi +chown -R {{user}} /home/{{user}} +chmod -R a+rwx /data/install +rm -rf /home/{{user}}/install +ln -s /data/install /home/{{user}}/install +chown -R {{user}} /home/{{user}}/install +password="$2" +#password=$(echo "$2" | /home/mysql/install/lib/tools/base64 -d) +echo "mysql:$password" | chpasswd +FOUND=$(grep 'ulimit -n 204800' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'ulimit -n 204800' >> /etc/profile +fi +FOUND=$(grep 'export LC_ALL=en_US' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'export LC_ALL=en_US' >> /etc/profile +fi +FOUND=$(grep 'export PATH=/usr/local/mongodb/bin/:$PATH' /etc/profile) +if [ -z "$FOUND" ]; then + echo 'export PATH=/usr/local/mongodb/bin/:$PATH' >> /etc/profile +fi +FOUND_umask=$(grep '^umask 022' /etc/profile) +if [ -z "$FOUND_umask" ]; then + echo 'umask 022' >> /etc/profile +fi +FOUND=$(grep 'vm.swappiness = 0' /etc/sysctl.conf) +if [ -z "$FOUND" ];then +echo "vm.swappiness = 0" >> /etc/sysctl.conf +fi +FOUND=$(grep 'kernel.pid_max = 200000' /etc/sysctl.conf) +if [ -z "$FOUND" ];then +echo "kernel.pid_max = 200000" >> /etc/sysctl.conf +fi + +FOUND=$(grep '{{user}} soft nproc 64000' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} soft nproc 64000" >> /etc/security/limits.conf +fi +FOUND=$(grep '{{user}} hard nproc 64000' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} hard nproc 64000" >> /etc/security/limits.conf +fi +FOUND=$(grep '{{user}} soft fsize unlimited' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} soft fsize unlimited" >> /etc/security/limits.conf +fi +FOUND=$(grep '{{user}} hard fsize unlimited' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} hard fsize unlimited" >> /etc/security/limits.conf +fi +FOUND=$(grep '{{user}} soft memlock unlimited' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} soft memlock unlimited" >> /etc/security/limits.conf +fi +FOUND=$(grep '{{user}} hard memlock unlimited' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} hard memlock unlimited" >> /etc/security/limits.conf +fi +FOUND=$(grep '{{user}} soft as unlimited' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} soft as unlimited" >> /etc/security/limits.conf +fi +FOUND=$(grep '{{user}} hard as unlimited' /etc/security/limits.conf) +if [ -z "$FOUND" ];then +echo "{{user}} hard as unlimited" >> /etc/security/limits.conf +fi + +FOUND=$(grep 'session required pam_limits.so' /etc/pam.d/login) +if [ -z "$FOUND" ];then +echo "session required pam_limits.so" >> /etc/pam.d/login +fi + +FOUND=$(grep 'session required pam_limits.so' /etc/pam.d/su) +if [ -z "$FOUND" ];then +echo "session required pam_limits.so" >> /etc/pam.d/su +fi + +/sbin/sysctl -p +_exit` diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_user_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_user_conf.go new file mode 100644 index 0000000000..add72f1a02 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongo_user_conf.go @@ -0,0 +1,35 @@ +package common + +import "encoding/json" + +// MongoRole 角色 +type MongoRole struct { + Role string `json:"role"` + Db string `json:"db"` +} + +// MongoUser 用户 +type MongoUser struct { + User string `json:"user"` + Pwd string `json:"pwd"` + Roles []*MongoRole `json:"roles"` +} + +// NewMongoUser 生成结构体 +func NewMongoUser() *MongoUser { + return &MongoUser{} +} + +// GetContent 转成json +func (m *MongoUser) GetContent() (string, error) { + content, err := json.Marshal(m) + if err != nil { + return "", err + } + return string(content), nil +} + +// NewMongoRole 生成结构体 +func NewMongoRole() *MongoRole { + return &MongoRole{} +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/mongod_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongod_conf.go new file mode 100644 index 0000000000..7cc3629df2 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongod_conf.go @@ -0,0 +1,87 @@ +package common + +import ( + "gopkg.in/yaml.v2" +) + +// YamlMongoDBConf 3.0及以上配置文件 +type YamlMongoDBConf struct { + Storage struct { + DbPath string `yaml:"dbPath"` + Engine string `yaml:"engine"` + WiredTiger struct { + EngineConfig struct { + CacheSizeGB int `yaml:"cacheSizeGB"` + } `yaml:"engineConfig"` + } `yaml:"wiredTiger"` + } `yaml:"storage"` + Replication struct { + OplogSizeMB int `yaml:"oplogSizeMB"` + ReplSetName string `yaml:"replSetName"` + } `yaml:"replication"` + SystemLog struct { + LogAppend bool `yaml:"logAppend"` + Path string `yaml:"path"` + Destination string `yaml:"destination"` + } `yaml:"systemLog"` + ProcessManagement struct { + Fork bool `yaml:"fork"` + PidFilePath string `yaml:"pidFilePath"` + } `yaml:"processManagement"` + Net struct { + Port int `yaml:"port"` + BindIp string `yaml:"bindIp"` + WireObjectCheck bool `yaml:"wireObjectCheck"` + } `yaml:"net"` + OperationProfiling struct { + SlowOpThresholdMs int `yaml:"slowOpThresholdMs"` + } `yaml:"operationProfiling"` + Sharding struct { + ClusterRole string `yaml:"clusterRole,omitempty"` + } `yaml:"sharding,omitempty"` + Security struct { + KeyFile string `yaml:"keyFile,omitempty"` + } `yaml:"security,omitempty"` +} + +// NewYamlMongoDBConf 生成结构体 +func NewYamlMongoDBConf() *YamlMongoDBConf { + return &YamlMongoDBConf{} +} + +// GetConfContent 获取配置文件内容 +func (y *YamlMongoDBConf) GetConfContent() ([]byte, error) { + out, err := yaml.Marshal(y) + if err != nil { + return nil, err + } + return out, nil +} + +// IniNoAuthMongoDBConf 3.0以下配置文件 +var IniNoAuthMongoDBConf = `replSet={{replSet}} +dbpath={{dbpath}} +logpath={{logpath}} +pidfilepath={{pidfilepath}} +logappend=true +port={{port}} +bind_ip={{bind_ip}} +fork=true +nssize=16 +oplogSize={{oplogSize}} +{{instanceRole}} = true` + +// IniAuthMongoDBConf 3.0以下配置文件 +var IniAuthMongoDBConf = `replSet={{replSet}} +dbpath={{dbpath}} +logpath={{logpath}} +pidfilepath={{pidfilepath}} +logappend=true +port={{port}} +bind_ip={{bind_ip}} +keyFile={{keyFile}} +fork=true +nssize=16 +oplogSize={{oplogSize}} +{{instanceRole}} = true +` diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/mongos_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongos_conf.go new file mode 100644 index 0000000000..66f791a1ca --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/mongos_conf.go @@ -0,0 +1,46 @@ +package common + +import ( + "gopkg.in/yaml.v2" +) + +// YamlMongoSConf 4.0及以上配置文件 +type YamlMongoSConf struct { + Sharding struct { + ConfigDB string `yaml:"configDB"` + } `yaml:"sharding"` + SystemLog struct { + LogAppend bool `yaml:"logAppend"` + Path string `yaml:"path"` + Destination string `yaml:"destination"` + } `yaml:"systemLog"` + ProcessManagement struct { + Fork bool `yaml:"fork"` + PidFilePath string `yaml:"pidFilePath"` + } `yaml:"processManagement"` + Net struct { + Port int `yaml:"port"` + BindIp string `yaml:"bindIp"` + WireObjectCheck bool `yaml:"wireObjectCheck"` + } `yaml:"net"` + OperationProfiling struct { + SlowOpThresholdMs int `yaml:"slowOpThresholdMs,omitempty"` + } `yaml:"operationProfiling,omitempty"` + Security struct { + KeyFile string `yaml:"keyFile,omitempty"` + } `yaml:"security,omitempty"` +} + +// NewYamlMongoSConf 生成结构体 +func NewYamlMongoSConf() *YamlMongoSConf { + return &YamlMongoSConf{} +} + +// GetConfContent 获取配置文件内容 +func (y *YamlMongoSConf) GetConfContent() ([]byte, error) { + out, err := yaml.Marshal(y) + if err != nil { + return nil, err + } + return out, nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/predixy_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/predixy_conf.go new file mode 100644 index 0000000000..196afefaf0 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/predixy_conf.go @@ -0,0 +1,38 @@ +package common + +// PredixConf TODO +var PredixConf = `Bind {{ip:port}} +WorkerThreads {{worker_threads}} +ClientTimeout {{client_timeout}} +Authority { + Auth "{{predixy_password}}" { + Mode write + } +} +Log {{log_path}} +LogRotate 1d +ClusterServerPool { + Password {{redis_password}} + RefreshInterval {{refresh_interval}} + ServerFailureLimit {{server_failure_limit}} + ServerRetryTimeout {{server_retry_timeout}} + ServerTimeout {{server_timeout}} + KeepAlive {{keep_alive}} + Servers { + {{server:port}} + + } +} +LatencyMonitor all { + Commands { + + all + } + TimeSpan { + + 100 + + 500 + + 1000 + + 5000 + + 10000 + } +} +` diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/repliccaset_member_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/repliccaset_member_conf.go new file mode 100644 index 0000000000..731c652038 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/repliccaset_member_conf.go @@ -0,0 +1,24 @@ +package common + +import "encoding/json" + +// ReplicasetMemberAdd 复制集状态 +type ReplicasetMemberAdd struct { + Host string `json:"host"` // ip:port + Hidden bool `json:"hidden"` + Priority int `json:"priority"` +} + +// NewReplicasetMemberAdd 生成结构体 +func NewReplicasetMemberAdd() *ReplicasetMemberAdd { + return &ReplicasetMemberAdd{} +} + +// GetJson 获取json格式 +func (t *ReplicasetMemberAdd) GetJson() (string, error) { + byteInfo, err := json.Marshal(t) + if err != nil { + return "", err + } + return string(byteInfo), nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/common/twemproxy_conf.go b/dbm-services/redis/db-tools/dbactuator/pkg/common/twemproxy_conf.go new file mode 100644 index 0000000000..2b49e82238 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/common/twemproxy_conf.go @@ -0,0 +1,145 @@ +package common + +import ( + "fmt" + "os" + "strconv" + "strings" + + "github.com/pkg/errors" + "gopkg.in/yaml.v2" +) + +const twemproxyBucketMax = 420000 + +type twemproxyConfServer struct { + Addr string + App string + Weight int + BucketStart int + BucketEnd int + RouterLine string + Status int +} + +// Output 生成Server配置中的行 +func (s twemproxyConfServer) Output() string { + // :1是权重,我们的架构里,权重都一样,都设置为相同的. + return fmt.Sprintf("%s:1 %s %d-%d %d", s.Addr, s.App, s.BucketStart, s.BucketEnd, s.Status) +} + +// ReFormatTwemproxyConfServer 重新格式化, +func ReFormatTwemproxyConfServer(serverLines []string) (newServerLines []string, err error) { + bucketSum := 0 + confServers := make([]twemproxyConfServer, 0) + if len(serverLines) == 0 { + return nil, errors.Errorf("empty") + } + for _, line := range serverLines { + server, err := newTwemproxyConfServerFromLine(line) + if err != nil { + return nil, errors.Errorf("bad format, line:%s", line) + } + if server.Status != 1 { + return nil, errors.Errorf("bad status:%d, line:%s", server.Status, line) + } + confServers = append(confServers, *server) + bucketSum += 1 + server.BucketEnd - server.BucketStart + } + if bucketSum != twemproxyBucketMax { + return nil, errors.Errorf("bucket sum is Not %d", twemproxyBucketMax) + } + newServerLines = make([]string, 0, len(serverLines)) + for i := range confServers { + newServerLines = append(newServerLines, confServers[i].Output()) + } + return newServerLines, nil + +} + +// newTwemproxyConfServerFromLine 生成Server配置中的行 +func newTwemproxyConfServerFromLine(line string) (*twemproxyConfServer, error) { + var server twemproxyConfServer + + fs := strings.Fields(line) + if len(fs) != 4 { + return nil, errors.Errorf("bad line") + } + + fs0 := strings.Split(fs[0], ":") + if len(fs0) == 2 { + server.Addr = fs[0] + server.Weight = 1 + } else if len(fs0) == 3 { + server.Addr = strings.Join(fs0[0:2], ":") + server.Weight = 1 + } + + server.App = fs[1] + bucket := strings.Split(fs[2], "-") + if len(bucket) != 2 { + return nil, errors.Errorf("bad line") + } + server.BucketStart, _ = strconv.Atoi(bucket[0]) + server.BucketEnd, _ = strconv.Atoi(bucket[1]) + server.Status, _ = strconv.Atoi(fs[3]) + + if server.BucketStart < 0 || server.BucketEnd < 0 || + server.BucketStart > server.BucketEnd || + server.BucketEnd >= twemproxyBucketMax { + return nil, errors.Errorf("bad line") + } + return &server, nil +} + +// TwemproxyConf 负责处理生成Twemproxy的配置文件 +type TwemproxyConf struct { + NosqlProxy struct { + Listen string `yaml:"listen"` + Password string `yaml:"password"` + RedisPassword string `yaml:"redis_password"` + SlowMs int `yaml:"slowms"` + Redis bool `yaml:"redis"` + Distribution string `yaml:"distribution"` + Hash string `yaml:"hash"` + ServerFailureLimit int `yaml:"server_failure_limit"` + AutoEjectHosts bool `yaml:"auto_eject_hosts"` + PreConnect bool `yaml:"preconnect"` + ServerRetryTimeout int `yaml:"server_retry_timeout"` + ServerConnections int `yaml:"server_connections"` + HashTag string `yaml:"hash_tag,omitempty"` + Backlog int `yaml:"backlog"` + Servers []string `yaml:"servers"` // + } `yaml:"nosqlproxy"` +} + +// NewTwemproxyConf Do NewTwemproxyConf +func NewTwemproxyConf() *TwemproxyConf { + return &TwemproxyConf{} +} + +// Load do load from file +func (yc *TwemproxyConf) Load(filePath string) error { + out, err := os.ReadFile(filePath) + if err != nil { + return err + } + + err = yaml.Unmarshal(out, yc) + return err +} + +// Save do save to file +func (yc *TwemproxyConf) Save(filePath string, perm os.FileMode) error { + out, err := yaml.Marshal(yc) + if err != nil { + return err + } + return os.WriteFile(filePath, out, perm) +} + +// CheckServersValid 检查Servers 本身的合法性. +func (yc *TwemproxyConf) CheckServersValid(serverLines []string) error { + _, err := ReFormatTwemproxyConfServer(serverLines) + return err +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/consts/consts.go b/dbm-services/redis/db-tools/dbactuator/pkg/consts/consts.go new file mode 100644 index 0000000000..8f1e4f0b98 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/consts/consts.go @@ -0,0 +1,277 @@ +// Package consts 常量 +package consts + +const ( + // TendisTypePredixyRedisCluster predixy + RedisCluster架构 + TendisTypePredixyRedisCluster = "PredixyRedisCluster" + // TendisTypePredixyTendisplusCluster predixy + TendisplusCluster架构 + TendisTypePredixyTendisplusCluster = "PredixyTendisplusCluster" + // TendisTypeTwemproxyRedisInstance twemproxy + RedisInstance架构 + TendisTypeTwemproxyRedisInstance = "TwemproxyRedisInstance" + // TendisTypeTwemproxyTendisplusInstance twemproxy+ TendisplusInstance架构 + TendisTypeTwemproxyTendisplusInstance = "TwemproxyTendisplusInstance" + // TendisTypeTwemproxyTendisSSDInstance twemproxy+ TendisSSDInstance架构 + TendisTypeTwemproxyTendisSSDInstance = "TwemproxyTendisSSDInstance" + // TendisTypeRedisInstance RedisCache 主从版 + TendisTypeRedisInstance = "RedisInstance" + // TendisTypeTendisplusInsance Tendisplus 主从版 + TendisTypeTendisplusInsance = "TendisplusInstance" + // TendisTypeTendisSSDInsance TendisSSD 主从版 + TendisTypeTendisSSDInsance = "TendisSSDInstance" + // TendisTypeRedisCluster 原生RedisCluster 架构 + TendisTypeRedisCluster = "RedisCluster" + // TendisTypeTendisplusCluster TendisplusCluster架构 + TendisTypeTendisplusCluster = "TendisplusCluster" +) + +// kibis of bits +const ( + Byte = 1 << (iota * 10) + KiByte + MiByte + GiByte + TiByte + EiByte +) + +const ( + // RedisMasterRole redis role master + RedisMasterRole = "master" + // RedisSlaveRole redis role slave + RedisSlaveRole = "slave" + + // RedisNoneRole none role + RedisNoneRole = "none" + + // MasterLinkStatusUP up status + MasterLinkStatusUP = "up" + // MasterLinkStatusDown down status + MasterLinkStatusDown = "down" + + // TendisSSDIncrSyncState IncrSync state + TendisSSDIncrSyncState = "IncrSync" + // TendisSSDReplFollowtate REPL_FOLLOW state + TendisSSDReplFollowtate = "REPL_FOLLOW" +) + +const ( + // RedisLinkStateConnected redis connection status connected + RedisLinkStateConnected = "connected" + // RedisLinkStateDisconnected redis connection status disconnected + RedisLinkStateDisconnected = "disconnected" +) + +const ( + // NodeStatusPFail Node is in PFAIL state. Not reachable for the node you are contacting, but still logically reachable + NodeStatusPFail = "fail?" + // NodeStatusFail Node is in FAIL state. It was not reachable for multiple nodes that promoted the PFAIL state to FAIL + NodeStatusFail = "fail" + // NodeStatusHandshake Untrusted node, we are handshaking. + NodeStatusHandshake = "handshake" + // NodeStatusNoAddr No address known for this node + NodeStatusNoAddr = "noaddr" + // NodeStatusNoFlags no flags at all + NodeStatusNoFlags = "noflags" +) + +const ( + // ClusterStateOK command 'cluster info',cluster_state + ClusterStateOK = "ok" + // ClusterStateFail command 'cluster info',cluster_state + ClusterStateFail = "fail" +) +const ( + // DefaultMinSlots 0 + DefaultMinSlots = 0 + // DefaultMaxSlots 16383 + DefaultMaxSlots = 16383 + + // TwemproxyMaxSegment twemproxy max segment + TwemproxyMaxSegment = 419999 + // TotalSlots 集群总槽数 + TotalSlots = 16384 +) + +// time layout +const ( + UnixtimeLayout = "2006-01-02 15:04:05" + FilenameTimeLayout = "20060102-150405" + FilenameDayLayout = "20060102" +) + +// account +const ( + MysqlAaccount = "mysql" + MysqlGroup = "mysql" + OSAccount = "mysql" + OSGroup = "mysql" +) + +// path dirs +const ( + UsrLocal = "/usr/local" + PackageSavePath = "/data/install" + Data1Path = "/data1" + DataPath = "/data" + DbaReportSaveDir = "/home/mysql/dbareport/" + RedisReportSaveDir = "/home/mysql/dbareport/redis/" + ExporterConfDir = "/home/mysql/.exporter" + RedisReportLeftDay = 15 +) + +// tool path +const ( + DbToolsPath = "/home/mysql/dbtools" + RedisShakeBin = "/home/mysql/dbtools/redis-shake" + RedisSafeDeleteToolBin = "/home/mysql/dbtools/redisSafeDeleteTool" + LdbTendisplusBin = "/home/mysql/dbtools/ldb_tendisplus" + TredisverifyBin = "/home/mysql/dbtools/tredisverify" + TredisBinlogBin = "/home/mysql/dbtools/tredisbinlog" + TredisDumpBin = "/home/mysql/dbtools/tredisdump" + NetCatBin = "/home/mysql/dbtools/netcat" + TendisKeyLifecycleBin = "/home/mysql/dbtools/tendis-key-lifecycle" + ZkWatchBin = "/home/mysql/dbtools/zkwatch" + ZstdBin = "/home/mysql/dbtools/zstd" + LzopBin = "/home/mysql/dbtools/lzop" + LdbWithV38Bin = "/home/mysql/dbtools/ldb_with_len.3.8" + LdbWithV513Bin = "/home/mysql/dbtools/ldb_with_len.5.13" + MyRedisCaptureBin = "/home/mysql/dbtools/myRedisCapture" + BinlogToolTendisplusBin = "/home/mysql/dbtools/binlogtool_tendisplus" + RedisCliBin = "/home/mysql/dbtools/redis-cli" + TendisDataCheckBin = "/home/mysql/dbtools/tendisDataCheck" + RedisDiffKeysRepairerBin = "/home/mysql/dbtools/redisDiffKeysRepairer" +) + +// bk-dbmon path +const ( + BkDbmonPath = "/home/mysql/bk-dbmon" + BkDbmonBin = "/home/mysql/bk-dbmon/bk-dbmon" + BkDbmonConfFile = "/home/mysql/bk-dbmon/dbmon-config.yaml" + BkDbmonPort = 6677 + BkDbmonHTTPAddress = "127.0.0.1:6677" +) + +// backup +const ( + NormalBackupType = "normal_backup" + ForeverBackupType = "forever_backup" + BackupClient = "/usr/local/bin/backup_client" + BackupTarSplitSize = "8G" + RedisFullBackupTAG = "REDIS_FULL" + RedisBinlogTAG = "REDIS_BINLOG" + RedisForeverBackupTAG = "DBFILE" + RedisFullBackupReportType = "redis_fullbackup" + RedisBinlogBackupReportType = "redis_binlogbackup" + DoingRedisFullBackFileList = "redis_backup_file_list_%d_doing" + DoneRedisFullBackFileList = "redis_backup_file_list_%d_done" + DoingRedisBinlogFileList = "redis_binlog_file_list_%d_doing" + DoneRedisBinlogFileList = "redis_binlog_file_list_%d_done" + RedisFullbackupRepoter = "redis_fullbackup_%s.log" + RedisBinlogRepoter = "redis_binlog_%s.log" + BackupStatusStart = "start" + BackupStatusRunning = "running" + BackupStatusToBakSystemStart = "to_backup_system_start" + BackupStatusToBakSystemFailed = "to_backup_system_failed" + BackupStatusToBakSysSuccess = "to_backup_system_success" + BackupStatusFailed = "failed" + BackupStatusLocalSuccess = "local_success" +) + +// meta role +const ( + MetaRoleRedisMaster = "redis_master" + MetaRoleRedisSlave = "redis_slave" +) + +// proxy operations +const ( + ProxyStart = "proxy_open" + ProxyStop = "proxy_close" + ProxyRestart = "proxy_restart" + ProxyShutdown = "proxy_shutdown" +) + +const ( + // FlushDBRename .. + FlushDBRename = "cleandb" + // CacheFlushAllRename .. + CacheFlushAllRename = "cleanall" + // SSDFlushAllRename .. + SSDFlushAllRename = "flushalldisk" + // KeysRename .. + KeysRename = "mykeys" + // ConfigRename .. + ConfigRename = "confxx" +) + +// IsClusterDbType 存储端是否是cluster类型 +func IsClusterDbType(dbType string) bool { + if dbType == TendisTypePredixyRedisCluster || + dbType == TendisTypePredixyTendisplusCluster || + dbType == TendisTypeRedisCluster || + dbType == TendisTypeTendisplusCluster { + return true + } + return false +} + +// IsRedisInstanceDbType 存储端是否是cache类型 +func IsRedisInstanceDbType(dbType string) bool { + if dbType == TendisTypePredixyRedisCluster || + dbType == TendisTypeTwemproxyRedisInstance || + dbType == TendisTypeRedisInstance || + dbType == TendisTypeRedisCluster { + return true + } + return false +} + +// IsTwemproxyClusterType 检查proxy是否为Twemproxy +func IsTwemproxyClusterType(dbType string) bool { + if dbType == TendisTypeTwemproxyRedisInstance || + dbType == TendisTypeTwemproxyTendisSSDInstance || + dbType == TendisTypeTwemproxyTendisplusInstance { + return true + } + return false +} + +// IsTendisplusInstanceDbType 存储端是否是tendisplus类型 +func IsTendisplusInstanceDbType(dbType string) bool { + if dbType == TendisTypePredixyTendisplusCluster || + dbType == TendisTypeTwemproxyTendisplusInstance || + dbType == TendisTypeTendisplusInsance || + dbType == TendisTypeTendisplusCluster { + return true + } + return false +} + +// IsTendisSSDInstanceDbType 存储端是否是tendisSSD类型 +func IsTendisSSDInstanceDbType(dbType string) bool { + if dbType == TendisTypeTwemproxyTendisSSDInstance || + dbType == TendisTypeTendisSSDInsance { + return true + } + return false +} + +// IsAllowFlushMoreDB 是否支持flush 多DB +func IsAllowFlushMoreDB(dbType string) bool { + if dbType == TendisTypeRedisInstance || + dbType == TendisTypeTendisplusInsance { + return true + } + return false +} + +// IsAllowRandomkey 是否支持randomkey命令 +func IsAllowRandomkey(dbType string) bool { + if dbType == TendisTypePredixyTendisplusCluster || + dbType == TendisTypeTwemproxyTendisplusInstance || + dbType == TendisTypeTendisplusInsance || + dbType == TendisTypeTendisplusCluster { + return false + } + return true +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/consts/data_dir.go b/dbm-services/redis/db-tools/dbactuator/pkg/consts/data_dir.go new file mode 100644 index 0000000000..23fbfb3409 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/consts/data_dir.go @@ -0,0 +1,325 @@ +package consts + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" +) + +// fileExists 检查目录是否已经存在 +func fileExists(path string) bool { + _, err := os.Stat(path) + if err != nil { + return os.IsExist(err) + } + return true +} + +// IsMountPoint2 Determine if a directory is a mountpoint, by comparing the device for the directory +// with the device for it's parent. If they are the same, it's not a mountpoint, if they're +// different, it is. +// reference: https://github.com/cnaize/kubernetes/blob/master/pkg/util/mount/mountpoint_unix.go#L29 +// 该函数与util/util.go 中 IsMountPoint()相同,但package consts 不建议依赖其他模块故拷贝了实现 +func IsMountPoint2(file string) bool { + stat, err := os.Stat(file) + if err != nil { + return false + } + rootStat, err := os.Lstat(file + "/..") + if err != nil { + return false + } + // If the directory has the same device as parent, then it's not a mountpoint. + return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev +} + +// SetRedisDataDir 设置环境变量 REDIS_DATA_DIR,并持久化到/etc/profile中 +// 如果函数参数 dataDir 不为空,则 REDIS_DATA_DIR = {dataDir} +// 否则,如果环境变量 REDIS_DATA_DIR 不为空,则直接读取; +// 否则,如果 /data1/redis 存在, 则 REDIS_DATA_DIR=/data1 +// 否则,如果 /data/redis, 则 REDIS_DATA_DIR=/data +// 否则,如果 /data1 是挂载点, 则 REDIS_DATA_DIR=/data1 +// 否则,如果 /data 是挂载点, 则 REDIS_DATA_DIR=/data +// 否则,REDIS_DATA_DIR=/data1 +func SetRedisDataDir(dataDir string) (err error) { + if dataDir == "" { + envDir := os.Getenv("REDIS_DATA_DIR") + if envDir != "" { // 环境变量 REDIS_DATA_DIR 不为空 + dataDir = envDir + } else { + if fileExists(filepath.Join(Data1Path, "redis")) { + // /data1/redis 存在 + dataDir = Data1Path + } else if fileExists(filepath.Join(DataPath, "redis")) { + // /data/redis 存在 + dataDir = DataPath + } else if IsMountPoint2(Data1Path) { + // /data1是挂载点 + dataDir = Data1Path + } else if IsMountPoint2(DataPath) { + // /data是挂载点 + dataDir = DataPath + } else { + // 函数参数 dataDir为空, 环境变量 REDIS_DATA_DIR 为空 + // /data1 和 /data 均不是挂载点 + // 强制指定 REDIS_DATA_DIR=/data1 + dataDir = Data1Path + } + } + } + dataDir = strings.TrimSpace(dataDir) + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export REDIS_DATA_DIR=' /etc/profile) +if [[ -z $ret ]] +then +echo "export REDIS_DATA_DIR=%s">>/etc/profile +fi + `, dataDir) + ret, err = exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetRedisDataDir failed,err:%v,ret:%s,shCmd:%s", err, string(ret), shCmd) + return + } + os.Setenv("REDIS_DATA_DIR", dataDir) + return nil +} + +// GetRedisDataDir 获取环境变量 REDIS_DATA_DIR,不为空直接返回, +// 否则,如果目录 /data1/redis存在,返回 /data1; +// 否则,如果目录 /data/redis存在,返回 /data; +// 否则,返回 /data1 +func GetRedisDataDir() string { + dataDir := os.Getenv("REDIS_DATA_DIR") + if dataDir == "" { + if fileExists(filepath.Join(Data1Path, "redis")) { + // /data1/redis 存在 + dataDir = Data1Path + } else if fileExists(filepath.Join(DataPath, "redis")) { + // /data/redis 存在 + dataDir = DataPath + } else { + dataDir = Data1Path + } + } + return dataDir +} + +// SetRedisBakcupDir 设置环境变量 REDIS_BACKUP_DIR ,并持久化到/etc/profile中 +// 如果函数参数 backupDir 不为空,则 REDIS_BACKUP_DIR = {backupDir} +// 否则,如果环境变量 REDIS_BACKUP_DIR 不为空,则直接读取; +// 否则,如果 /data/dbbak 存在, 则 REDIS_BACKUP_DIR=/data +// 否则,如果 /data1/dbbak 存在, 则 REDIS_BACKUP_DIR=/data1 +// 否则,如果 /data 是挂载点, 则 REDIS_BACKUP_DIR=/data +// 否则,如果 /data1 是挂载点, 则 REDIS_BACKUP_DIR=/data1 +// 否则,REDIS_BACKUP_DIR=/data +func SetRedisBakcupDir(backupDir string) (err error) { + if backupDir == "" { + envDir := os.Getenv("REDIS_BACKUP_DIR") + if envDir != "" { + backupDir = envDir + } else { + if fileExists(filepath.Join(DataPath, "dbbak")) { + // /data/dbbak 存在 + backupDir = DataPath + } else if fileExists(filepath.Join(Data1Path, "dbbak")) { + // /data1/dbbak 存在 + backupDir = Data1Path + } else if IsMountPoint2(DataPath) { + // /data是挂载点 + backupDir = DataPath + } else if IsMountPoint2(Data1Path) { + // /data1是挂载点 + backupDir = Data1Path + } else { + // 函数参数 backupDir 为空, 环境变量 REDIS_BACKUP_DIR 为空 + // /data1 和 /data 均不是挂载点 + // 强制指定 REDIS_BACKUP_DIR=/data + backupDir = DataPath + } + } + } + backupDir = strings.TrimSpace(backupDir) + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export REDIS_BACKUP_DIR=' /etc/profile) +if [[ -z $ret ]] +then +echo "export REDIS_BACKUP_DIR=%s">>/etc/profile +fi + `, backupDir) + ret, err = exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetRedisBakcupDir failed,err:%v,ret:%s", err, string(ret)) + return + } + os.Setenv("REDIS_BACKUP_DIR", backupDir) + return nil +} + +// GetRedisBackupDir 获取环境变量 REDIS_BACKUP_DIR,默认值 /data +// 否则,如果目录 /data/dbbak 存在,返回 /data; +// 否则,如果目录 /data1/dbbak 存在,返回 /data1; +// 否则,返回 /data +func GetRedisBackupDir() string { + dataDir := os.Getenv("REDIS_BACKUP_DIR") + if dataDir == "" { + if fileExists(filepath.Join(DataPath, "dbbak")) { + // /data/dbbak 存在 + dataDir = DataPath + } else if fileExists(filepath.Join(Data1Path, "dbbak")) { + // /data1/dbbak 存在 + dataDir = Data1Path + } else { + dataDir = DataPath + } + } + return dataDir +} + +// SetMongoDataDir 设置环境变量 MONGO_DATA_DIR,并持久化到/etc/profile中 +// 如果函数参数 dataDir 不为空,则 MONGO_DATA_DIR = {dataDir} +// 否则,如果环境变量 MONGO_DATA_DIR 不为空,则直接读取; +// 否则,如果 /data1/redis 存在, 则 MONGO_DATA_DIR=/data1 +// 否则,如果 /data/redis, 则 MONGO_DATA_DIR=/data +// 否则,如果 /data1 是挂载点, 则 MONGO_DATA_DIR=/data1 +// 否则,如果 /data 是挂载点, 则 MONGO_DATA_DIR=/data +// 否则,MONGO_DATA_DIR=/data1 +func SetMongoDataDir(dataDir string) (err error) { + if dataDir == "" { + envDir := os.Getenv("MONGO_DATA_DIR") + if envDir != "" { // 环境变量 REDIS_DATA_DIR 不为空 + dataDir = envDir + } else { + if fileExists(filepath.Join(Data1Path, "mongodata")) { + // /data1/mongodata 存在 + dataDir = Data1Path + } else if fileExists(filepath.Join(DataPath, "mongodata")) { + // /data/mongodata 存在 + dataDir = DataPath + } else if IsMountPoint2(Data1Path) { + // /data1是挂载点 + dataDir = Data1Path + } else if IsMountPoint2(DataPath) { + // /data是挂载点 + dataDir = DataPath + } else { + // 函数参数 dataDir为空, 环境变量 MONGO_DATA_DIR 为空 + // /data1 和 /data 均不是挂载点 + // 强制指定 MONGO_DATA_DIR=/data1 + dataDir = Data1Path + } + } + } + dataDir = strings.TrimSpace(dataDir) + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export MONGO_DATA_DIR=' /etc/profile) +if [[ -z $ret ]] +then +echo "export MONGO_DATA_DIR=%s">>/etc/profile +fi + `, dataDir) + ret, err = exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetMongoDataDir failed,err:%v,ret:%s,shCmd:%s", err, string(ret), shCmd) + return + } + os.Setenv("MONGO_DATA_DIR", dataDir) + return nil +} + +// GetMongoDataDir 获取环境变量 MONGO_DATA_DIR,不为空直接返回, +// 否则,如果目录 /data1/mongodata存在,返回 /data1; +// 否则,如果目录 /data/mongodata存在,返回 /data; +// 否则,返回 /data1 +func GetMongoDataDir() string { + dataDir := os.Getenv("MONGO_DATA_DIR") + if dataDir == "" { + if fileExists(filepath.Join(Data1Path, "mongodata")) { + // /data1/mongodata 存在 + dataDir = Data1Path + } else if fileExists(filepath.Join(DataPath, "mongodata")) { + // /data/mongodata 存在 + dataDir = DataPath + } else { + dataDir = Data1Path + } + } + return dataDir +} + +// SetMongoBackupDir 设置环境变量 MONGO_BACKUP_DIR ,并持久化到/etc/profile中 +// 如果函数参数 backupDir 不为空,则 MONGO_BACKUP_DIR = {backupDir} +// 否则,如果环境变量 MONGO_BACKUP_DIR 不为空,则直接读取; +// 否则,如果 /data/dbbak 存在, 则 MONGO_BACKUP_DIR=/data +// 否则,如果 /data1/dbbak 存在, 则 MONGO_BACKUP_DIR=/data1 +// 否则,如果 /data 是挂载点, 则 MONGO_BACKUP_DIR=/data +// 否则,如果 /data1 是挂载点, 则 MONGO_BACKUP_DIR=/data1 +// 否则,MONGO_BACKUP_DIR=/data +func SetMongoBackupDir(backupDir string) (err error) { + if backupDir == "" { + envDir := os.Getenv("MONGO_BACKUP_DIR") + if envDir != "" { + backupDir = envDir + } else { + if fileExists(filepath.Join(DataPath, "dbbak")) { + // /data/dbbak 存在 + backupDir = DataPath + } else if fileExists(filepath.Join(Data1Path, "dbbak")) { + // /data1/dbbak 存在 + backupDir = Data1Path + } else if IsMountPoint2(DataPath) { + // /data是挂载点 + backupDir = DataPath + } else if IsMountPoint2(Data1Path) { + // /data1是挂载点 + backupDir = Data1Path + } else { + // 函数参数 backupDir 为空, 环境变量 MONGO_BACKUP_DIR 为空 + // /data1 和 /data 均不是挂载点 + // 强制指定 MONGO_BACKUP_DIR=/data + backupDir = DataPath + } + } + } + backupDir = strings.TrimSpace(backupDir) + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export MONGO_BACKUP_DIR=' /etc/profile) +if [[ -z $ret ]] +then +echo "export MONGO_BACKUP_DIR=%s">>/etc/profile +fi + `, backupDir) + ret, err = exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetMongoBakcupDir failed,err:%v,ret:%s", err, string(ret)) + return + } + os.Setenv("MONGO_BACKUP_DIR", backupDir) + return nil +} + +// GetMongoBackupDir 获取环境变量 MONGO_BACKUP_DIR,默认值 /data +// 否则,如果目录 /data/dbbak 存在,返回 /data; +// 否则,如果目录 /data1/dbbak 存在,返回 /data1; +// 否则,返回 /data +func GetMongoBackupDir() string { + dataDir := os.Getenv("MONGO_BACKUP_DIR") + if dataDir == "" { + if fileExists(filepath.Join(DataPath, "dbbak")) { + // /data/dbbak 存在 + dataDir = DataPath + } else if fileExists(filepath.Join(Data1Path, "dbbak")) { + // /data1/dbbak 存在 + dataDir = Data1Path + } else { + dataDir = DataPath + } + } + return dataDir +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/consts/dts.go b/dbm-services/redis/db-tools/dbactuator/pkg/consts/dts.go new file mode 100644 index 0000000000..342f035d3d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/consts/dts.go @@ -0,0 +1,25 @@ +package consts + +// dts type +const ( + DtsTypeOneAppDiffCluster = "one_app_diff_cluster" // 一个业务下的不同集群 + DtsTypeDiffAppDiffCluster = "diff_app_diff_cluster" // 不同业务下的不同集群 + DtsTypeSyncToOtherSystem = "sync_to_other_system" // 同步到其他系统,如迁移到腾讯云 + DtsTypeUserBuiltToDbm = "user_built_to_dbm" // 用户自建redis到dbm系统 +) + +// IsDtsTypeSrcClusterBelongDbm (该dst类型中)源集群是否属于dbm系统 +func IsDtsTypeSrcClusterBelongDbm(dtsType string) bool { + if dtsType == DtsTypeOneAppDiffCluster || + dtsType == DtsTypeDiffAppDiffCluster || + dtsType == DtsTypeSyncToOtherSystem { + return true + } + return false +} + +// dts datacheck mode +const ( + DtsDataCheckByKeysFileMode = "bykeysfile" // 基于key提取结果,做数据校验 + DtsDataCheckByScanMode = "byscan" // 通过scan命令获取key名,做数据校验 +) diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/consts/test.go b/dbm-services/redis/db-tools/dbactuator/pkg/consts/test.go new file mode 100644 index 0000000000..8dd5594c3e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/consts/test.go @@ -0,0 +1,94 @@ +package consts + +import "fmt" + +// test consts +const ( + // ----- tendisplus master指定的端口范围 [11000,11999] ------ + // ----- tendisplus slave指定的端口范围 [12000,12999] ------ + // TestTendisPlusMasterStartPort master start port + TestTendisPlusMasterStartPort = 11000 + // TestTendisPlusSlaveStartPort slave start port + TestTendisPlusSlaveStartPort = 12000 + + // ExpansionTestTendisPlusMasterStartPort master start port + ExpansionTestTendisPlusMasterStartPort = 11100 + // ExpansionTestTendisPlusSlaveStartPort slave start port + ExpansionTestTendisPlusSlaveStartPort = 12100 + + // SlotTestTendisPlusMasterPort master start port + SlotTestTendisPlusMasterPort = 11200 + // SLotTestTendisPlusSlaveStart slave start port + SLotTestTendisPlusSlaveStart = 12200 + // SlotsMigrateTest 指定迁移slot + SlotsMigrateTest = "0-100" + + // TestSyncTendisPlusMasterStartPort make sync /redo slave + TestSyncTendisPlusMasterStartPort = 11300 + // TestSyncTendisPlusSlaveStartPort make sync / + TestSyncTendisPlusSlaveStartPort = 12300 + + // ----- cache redis master指定的端口范围 [13000,13999] ------ + // ----- cache redis slave指定的端口范围 [14000,14999] ------ + + // TestRedisMasterStartPort master start port + TestRedisMasterStartPort = 13000 + // TestRedisSlaveStartPort slave start port + TestRedisSlaveStartPort = 14000 + + // TestSyncRedisMasterStartPort make sync /redo slave + TestSyncRedisMasterStartPort = 13300 + // TestSyncRedisSlaveStartPort make sync / + TestSyncRedisSlaveStartPort = 14300 + + // ----- tendisssd master指定的端口范围 [14000,14999] ------ + // ----- tendisssd slave指定的端口范围 [15000,15999] ------ + + // TestTendisSSDMasterStartPort master start port + TestTendisSSDMasterStartPort = 15000 + // TestTendisSSDSlaveStartPort slave start port + TestTendisSSDSlaveStartPort = 16000 + + // TestTwemproxyPort twemproxy port + TestTwemproxyPort = 50100 + // TestPredixyPort predixy port + TestPredixyPort = 50200 + // TestSSDClusterTwemproxyPort twemproxy port + TestSSDClusterTwemproxyPort = 50300 + + // TestRedisInstanceNum instance number + TestRedisInstanceNum = 4 + + // ExpansionTestRedisInstanceNum instance number + ExpansionTestRedisInstanceNum = 2 + // SLotTestRedisInstanceNum instance number + SLotTestRedisInstanceNum = 1 +) +const ( + // RedisTestPasswd redis test password + RedisTestPasswd = "redisPassTest" + // ProxyTestPasswd proxy test password + ProxyTestPasswd = "proxyPassTest" +) + +// test uid/rootid/nodeid +const ( + TestUID = 1111 + TestRootID = 2222 + TestNodeID = 3333 +) + +var ( + // ActuatorTestCmd actuator测试命令 + ActuatorTestCmd = fmt.Sprintf( + // NOCC:tosa/linelength(设计如此) + "cd %s && ./dbactuator_redis --uid=%d --root_id=%d --node_id=%d --version_id=v1 --atom-job-list=%%q --payload=%%q --payload-format=raw", + PackageSavePath, TestUID, TestRootID, TestNodeID) +) + +const ( + // PayloadFormatRaw raw + PayloadFormatRaw = "raw" + // PayloadFormatBase64 base64 + PayloadFormatBase64 = "base64" +) diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/consts/user.go b/dbm-services/redis/db-tools/dbactuator/pkg/consts/user.go new file mode 100644 index 0000000000..f19fa9bb06 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/consts/user.go @@ -0,0 +1,80 @@ +package consts + +import ( + "fmt" + "os" + "os/exec" +) + +// SetProcessUser 设置os用户 +func SetProcessUser(user string) error { + // 如果有user参数,设置环境变量 + if user != "" { + envUser := os.Getenv("PROCESS_EXEC_USER") + if envUser == user { + return nil + } + envUser = user + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export PROCESS_EXEC_USER=' /etc/profile) +if [[ -z $ret ]] +then +echo "export PROCESS_EXEC_USER=%s">>/etc/profile +fi + `, envUser) + ret, err := exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetProcessUser failed,err:%v,ret:%s", err, string(ret)) + return err + } + os.Setenv("PROCESS_EXEC_USER", envUser) + } + return nil +} + +// GetProcessUser 获取os用户 +func GetProcessUser() string { + envUser := os.Getenv("PROCESS_EXEC_USER") + if envUser == "" { + return OSAccount + } + return envUser +} + +// SetProcessUserGroup 设置os用户Group +func SetProcessUserGroup(group string) error { + // 如果有user参数,设置环境变量 + if group != "" { + envGroup := os.Getenv("PROCESS_EXEC_USER_GROUP") + if envGroup == group { + return nil + } + envGroup = group + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export PROCESS_EXEC_USER_GROUP=' /etc/profile) +if [[ -z $ret ]] +then +echo "export PROCESS_EXEC_USER_GROUP=%s">>/etc/profile +fi + `, envGroup) + ret, err := exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetProcessUserGroup failed,err:%v,ret:%s", err, string(ret)) + return err + } + os.Setenv("PROCESS_EXEC_USER_GROUP", envGroup) + + } + return nil +} + +// GetProcessUserGroup 获取os用户group +func GetProcessUserGroup() string { + envGroup := os.Getenv("PROCESS_EXEC_USER_GROUP") + if envGroup == "" { + return OSGroup + } + return envGroup +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/customtime/customtime.go b/dbm-services/redis/db-tools/dbactuator/pkg/customtime/customtime.go new file mode 100644 index 0000000000..3e9c9f8150 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/customtime/customtime.go @@ -0,0 +1,76 @@ +// Package customtime 自定义time +package customtime + +import ( + "database/sql/driver" + "fmt" + "strings" + "time" +) + +// CustomTime 自定义时间类型 +type CustomTime struct { + time.Time +} + +const ctLayout = "2006-01-02 15:04:05" + +var nilTime = (time.Time{}).UnixNano() + +// UnmarshalJSON .. +func (ct *CustomTime) UnmarshalJSON(b []byte) (err error) { + s := strings.Trim(string(b), "\"") + if s == "null" || s == "" { + ct.Time = time.Time{} + return + } + ct.Time, err = time.ParseInLocation(ctLayout, s, time.Local) + return +} + +// MarshalJSON .. +func (ct CustomTime) MarshalJSON() ([]byte, error) { + if ct.Time.UnixNano() == nilTime { + return []byte("null"), nil + } + return []byte(fmt.Sprintf("\"%s\"", ct.Time.Format(ctLayout))), nil +} + +// Scan scan +func (ct *CustomTime) Scan(value interface{}) error { + switch v := value.(type) { + case []byte: + return ct.UnmarshalText(string(v)) + case string: + return ct.UnmarshalText(v) + case time.Time: + ct.Time = v + case nil: + ct.Time = time.Time{} + default: + return fmt.Errorf("cannot sql.Scan() CustomTime from: %#v", v) + } + return nil +} + +// UnmarshalText unmarshal ... +func (ct *CustomTime) UnmarshalText(value string) error { + dd, err := time.ParseInLocation(ctLayout, value, time.Local) + if err != nil { + return err + } + ct.Time = dd + return nil +} + +// Value .. +// 注意这里ct不能是指针 +// 参考文章:https://www.codenong.com/44638610/ +func (ct CustomTime) Value() (driver.Value, error) { + return driver.Value(ct.Local().Format(ctLayout)), nil +} + +// IsSet .. +func (ct *CustomTime) IsSet() bool { + return ct.UnixNano() != nilTime +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/jobmanager/jobmanager.go b/dbm-services/redis/db-tools/dbactuator/pkg/jobmanager/jobmanager.go new file mode 100644 index 0000000000..8434997d03 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/jobmanager/jobmanager.go @@ -0,0 +1,195 @@ +// Package jobmanager 原子任务工厂类 与 管理类 +package jobmanager + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atommongodb" + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy" + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys" + "dbm-services/redis/db-tools/dbactuator/pkg/jobruntime" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "log" + "runtime/debug" + "strings" + "sync" + "time" +) + +// AtomJobCreatorFunc 原子任务创建接口 +type AtomJobCreatorFunc func() jobruntime.JobRunner + +// JobGenericManager 原子任务管理者 +type JobGenericManager struct { + Runners []jobruntime.JobRunner `json:"runners"` + atomJobMapper map[string]AtomJobCreatorFunc + once sync.Once + runtime *jobruntime.JobGenericRuntime +} + +// NewJobGenericManager new +func NewJobGenericManager(uid, rootID, nodeID, versionID, payload, payloadFormat, atomJobs, baseDir string) ( + ret *JobGenericManager, err error) { + runtime, err := jobruntime.NewJobGenericRuntime(uid, rootID, nodeID, versionID, + payload, payloadFormat, atomJobs, baseDir) + if err != nil { + log.Panicf(err.Error()) + } + ret = &JobGenericManager{ + runtime: runtime, + } + return +} + +// LoadAtomJobs 加载子任务 +func (m *JobGenericManager) LoadAtomJobs() (err error) { + defer func() { + // err最后输出到标准错误 + if err != nil { + m.runtime.PrintToStderr(err.Error()) + } + }() + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%s", (debug.Stack())) + } + }() + m.runtime.AtomJobList = strings.TrimSpace(m.runtime.AtomJobList) + if m.runtime.AtomJobList == "" { + err = fmt.Errorf("atomJobList(%s) cannot be empty", m.runtime.AtomJobList) + m.runtime.Logger.Error(err.Error()) + return + } + jobList := strings.Split(m.runtime.AtomJobList, ",") + for _, atomName := range jobList { + atomName = strings.TrimSpace(atomName) + if atomName == "" { + continue + } + atom := m.GetAtomJobInstance(atomName) + if atom == nil { + err = fmt.Errorf("atomJob(%s) not found", atomName) + m.runtime.Logger.Error(err.Error()) + return + } + m.Runners = append(m.Runners, atom) + m.runtime.Logger.Info(fmt.Sprintf("atomJob:%s instance load success", atomName)) + } + return +} + +// RunAtomJobs 顺序执行原子任务 +func (m *JobGenericManager) RunAtomJobs() (err error) { + defer func() { + // err最后输出到标准错误 + if err != nil { + m.runtime.PrintToStderr(err.Error() + "\n") + } + }() + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%s", string(debug.Stack())) + } + }() + + m.runtime.StartHeartbeat(10 * time.Second) + + defer m.runtime.StopHeartbeat() + + for _, runner := range m.Runners { + name := util.GetTypeName(runner) + m.runtime.Logger.Info(fmt.Sprintf("begin to run %s init", name)) + if err = runner.Init(m.runtime); err != nil { + return + } + m.runtime.Logger.Info(fmt.Sprintf("begin to run %s", name)) + err = runner.Run() + if err != nil { + m.runtime.Logger.Info(fmt.Sprintf("runner %s run failed,err:%s", name, err)) + // err = runner.Rollback() + // if err != nil { + // err = fmt.Errorf("runner %s rollback failed,err:%+v", name, err) + // m.runtime.Logger.Error(err.Error()) + // return + // } + // m.runtime.Logger.Info(fmt.Sprintf("runner %s rollback success!!!", name)) + return + } + m.runtime.Logger.Info(fmt.Sprintf("finished run %s", name)) + } + m.runtime.Logger.Info(fmt.Sprintf("run all atomJobList:%s success", m.runtime.AtomJobList)) + + m.runtime.OutputPipeContextData() + return +} + +func (m *JobGenericManager) atomjobsMapperLoading() { + m.once.Do(func() { + m.atomJobMapper = make(map[string]AtomJobCreatorFunc) + m.atomJobMapper[atomsys.NewSysInit().Name()] = atomsys.NewSysInit + + // redis atom jobs + m.atomJobMapper[atomredis.NewRedisInstall().Name()] = atomredis.NewRedisInstall + m.atomJobMapper[atomredis.NewRedisReplicaOf().Name()] = atomredis.NewRedisReplicaOf + m.atomJobMapper[atomredis.NewRedisReplicaBatch().Name()] = atomredis.NewRedisReplicaBatch + m.atomJobMapper[atomredis.NewClusterMeetSlotsAssign().Name()] = atomredis.NewClusterMeetSlotsAssign + m.atomJobMapper[atomproxy.NewTwemproxyInstall().Name()] = atomproxy.NewTwemproxyInstall + m.atomJobMapper[atomredis.NewRedisBackup().Name()] = atomredis.NewRedisBackup + m.atomJobMapper[atomredis.NewTendisKeysPattern().Name()] = atomredis.NewTendisKeysPattern + m.atomJobMapper[atomredis.NewTendisKeysPatternDelete().Name()] = atomredis.NewTendisKeysPatternDelete + m.atomJobMapper[atomredis.NewTendisKeysFilesDelete().Name()] = atomredis.NewTendisKeysFilesDelete + m.atomJobMapper[atomproxy.NewPredixyInstall().Name()] = atomproxy.NewPredixyInstall + m.atomJobMapper[atomredis.NewTendisssdDrRestore().Name()] = atomredis.NewTendisssdDrRestore + m.atomJobMapper[atomproxy.NewTwemproxyOperate().Name()] = atomproxy.NewTwemproxyOperate + m.atomJobMapper[atomproxy.NewPredixyOperate().Name()] = atomproxy.NewPredixyOperate + m.atomJobMapper[atomredis.NewRedisShutdown().Name()] = atomredis.NewRedisShutdown + m.atomJobMapper[atomredis.NewRedisFlushData().Name()] = atomredis.NewRedisFlushData + m.atomJobMapper[atomsys.NewRedisCapturer().Name()] = atomsys.NewRedisCapturer + m.atomJobMapper[atomredis.NewRedisSwitch().Name()] = atomredis.NewRedisSwitch + m.atomJobMapper[atomredis.NewBkDbmonInstall().Name()] = atomredis.NewBkDbmonInstall + m.atomJobMapper[atomredis.NewTendisPlusMigrateSlots().Name()] = atomredis.NewTendisPlusMigrateSlots + m.atomJobMapper[atomredis.NewRedisDtsDataCheck().Name()] = atomredis.NewRedisDtsDataCheck + m.atomJobMapper[atomredis.NewRedisDtsDataRepaire().Name()] = atomredis.NewRedisDtsDataRepaire + // scene needs. + m.atomJobMapper[atomproxy.NewTwemproxySceneCheckBackends().Name()] = atomproxy.NewTwemproxySceneCheckBackends + m.atomJobMapper[atomredis.NewRedisSceneSyncCheck().Name()] = atomredis.NewRedisSceneSyncCheck + m.atomJobMapper[atomredis.NewRedisSceneKillDeadConn().Name()] = atomredis.NewRedisSceneKillDeadConn + m.atomJobMapper[atomredis.NewRedisSceneSyncPrams().Name()] = atomredis.NewRedisSceneSyncPrams + + // mongo atom jobs + m.atomJobMapper[atommongodb.NewMongoDBInstall().Name()] = atommongodb.NewMongoDBInstall + m.atomJobMapper[atommongodb.NewMongoSInstall().Name()] = atommongodb.NewMongoSInstall + m.atomJobMapper[atommongodb.NewInitiateReplicaset().Name()] = atommongodb.NewInitiateReplicaset + m.atomJobMapper[atommongodb.NewAddShardToCluster().Name()] = atommongodb.NewAddShardToCluster + m.atomJobMapper[atommongodb.NewAddUser().Name()] = atommongodb.NewAddUser + m.atomJobMapper[atommongodb.NewDelUser().Name()] = atommongodb.NewDelUser + m.atomJobMapper[atommongodb.NewMongoDReplace().Name()] = atommongodb.NewMongoDReplace + m.atomJobMapper[atommongodb.NewMongoRestart().Name()] = atommongodb.NewMongoRestart + m.atomJobMapper[atommongodb.NewStepDown().Name()] = atommongodb.NewStepDown + m.atomJobMapper[atommongodb.NewBalancer().Name()] = atommongodb.NewBalancer + m.atomJobMapper[atommongodb.NewDeInstall().Name()] = atommongodb.NewDeInstall + m.atomJobMapper[atommongodb.NewExecScript().Name()] = atommongodb.NewExecScript + m.atomJobMapper[atommongodb.NewSetProfiler().Name()] = atommongodb.NewSetProfiler + m.atomJobMapper[atomsys.NewOsMongoInit().Name()] = atomsys.NewOsMongoInit + }) +} + +// SupportAtomJobs 返回支持的atomJob列表 +func (m *JobGenericManager) SupportAtomJobs() []string { + m.atomjobsMapperLoading() + atomJobs := make([]string, 0, len(m.atomJobMapper)) + for k := range m.atomJobMapper { + atomJobs = append(atomJobs, k) + } + return atomJobs +} + +// GetAtomJobInstance 根据atomJobName,从m.atomJobMapper中获取其creator函数,执行creator函数 +func (m *JobGenericManager) GetAtomJobInstance(atomJob string) jobruntime.JobRunner { + m.atomjobsMapperLoading() + creator, ok := m.atomJobMapper[strings.ToLower(atomJob)] + if ok { + return creator() + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobrunner.go b/dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobrunner.go new file mode 100644 index 0000000000..26c2eb74cc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobrunner.go @@ -0,0 +1,19 @@ +package jobruntime + +// JobRunner defines a behavior of a job +type JobRunner interface { + // Init doing some operation before run a job + // such as reading parametes + Init(*JobGenericRuntime) error + + // Name return the name of the job + Name() string + + // Run run a job + Run() error + + Retry() uint + + // Rollback you can define some rollback logic here when job fails + Rollback() error +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobruntime.go b/dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobruntime.go new file mode 100644 index 0000000000..78b1119be9 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/jobruntime/jobruntime.go @@ -0,0 +1,158 @@ +// Package jobruntime 全局操作、全局变量 +package jobruntime + +import ( + "context" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/base64" + "encoding/json" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "time" +) + +const ( + logDir = "logs/" +) + +// JobGenericRuntime job manager +type JobGenericRuntime struct { + UID string `json:"uid"` // 单据ID + RootID string `json:"rootId"` // 流程ID + NodeID string `json:"nodeId"` // 节点ID + VersionID string `json:"versionId"` // 运行版本ID + PayloadEncoded string `json:"payloadEncoded"` // 参数encoded + PayloadDecoded string `json:"payloadDecoded"` // 参数decoded + PayLoadFormat string `json:"payloadFormat"` // payload的内容格式,raw/base64 + AtomJobList string `json:"atomJobList"` // 原子任务列表,逗号分割 + BaseDir string `json:"baseDir"` + // ShareData保存多个atomJob间的中间结果,前后atomJob可通过ShareData通信 + ShareData interface{} `json:"shareData"` + // PipeContextData保存流程调用,上下文结果 + // PipeContextData=>json.Marshal=>Base64=>标准输出打印{Result} + PipeContextData interface{} `json:"pipeContextData"` + Logger *logger.Logger `json:"-"` // 线程安全日志输出 + ctx context.Context `json:"-"` + cancelFunc context.CancelFunc `json:"-"` + Err error +} + +// NewJobGenericRuntime new +func NewJobGenericRuntime(uid, rootID string, + nodeID, versionID, payload, payloadFormat, atomJobs, baseDir string) (ret *JobGenericRuntime, err error) { + ret = &JobGenericRuntime{ + UID: uid, + RootID: rootID, + NodeID: nodeID, + VersionID: versionID, + PayloadEncoded: payload, + PayLoadFormat: payloadFormat, + AtomJobList: atomJobs, + BaseDir: baseDir, + ShareData: nil, + } + + if ret.PayLoadFormat == consts.PayloadFormatRaw { + ret.PayloadDecoded = ret.PayloadEncoded + } else { + var decodedStr []byte + decodedStr, err = base64.StdEncoding.DecodeString(ret.PayloadEncoded) + if err != nil { + log.Printf("Base64.DecodeString failed,err:%v,encodedString:%s", err, ret.PayloadEncoded) + os.Exit(0) + } + ret.PayloadDecoded = string(decodedStr) + // log.Printf("===========PayloadDecoded========") + // log.Printf(ret.PayloadDecoded) + } + ret.ctx, ret.cancelFunc = context.WithCancel(context.TODO()) + ret.SetLogger() + return +} + +// SetLogger set logger +func (r *JobGenericRuntime) SetLogger() { + var err error + logFile := fmt.Sprintf("redis_actuator_%s_%s.log", r.UID, r.NodeID) + err = util.MkDirsIfNotExists([]string{logDir}) + if err != nil { + panic(err) + } + + logFilePath := filepath.Join(logDir, logFile) + file, err := os.OpenFile(logFilePath, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) + if err != nil { + panic(err) + } + extMap := map[string]string{ + "uid": r.UID, + "node_id": r.NodeID, + "root_id": r.RootID, + "version_id": r.VersionID, + } + r.Logger = logger.New(file, true, logger.InfoLevel, extMap) + r.Logger.Sync() + mylog.SetDefaultLogger(r.Logger) + + // 修改日志目录owner + chownCmd := fmt.Sprintf("chown -R %s.%s %s", consts.MysqlAaccount, consts.MysqlGroup, logDir) + cmd := exec.Command("bash", "-c", chownCmd) + cmd.Run() +} + +// PrintToStdout 打印到标准输出 +func (r *JobGenericRuntime) PrintToStdout(format string, args ...interface{}) { + fmt.Fprintf(os.Stdout, format, args...) +} + +// PrintToStderr 打印到标准错误 +func (r *JobGenericRuntime) PrintToStderr(format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, format, args...) +} + +// OutputPipeContextData PipeContextData=>json.Marshal=>Base64=>标准输出打印{Result} +func (r *JobGenericRuntime) OutputPipeContextData() { + if r.PipeContextData == nil { + r.Logger.Info("no PipeContextData to output") + return + } + tmpBytes, err := json.Marshal(r.PipeContextData) + if err != nil { + r.Err = fmt.Errorf("json.Marshal PipeContextData failed,err:%v", err) + r.Logger.Error(r.Err.Error()) + return + } + // decode函数: base64.StdEncoding.DecodeString + base64Ret := base64.StdEncoding.EncodeToString(tmpBytes) + r.PrintToStdout("" + base64Ret + "") +} + +// StartHeartbeat 开始心跳 +func (r *JobGenericRuntime) StartHeartbeat(period time.Duration) { + go func() { + ticker := time.NewTicker(period) + defer ticker.Stop() + var heartbeatTime string + for { + select { + case <-ticker.C: + heartbeatTime = time.Now().Local().Format(consts.UnixtimeLayout) + r.PrintToStdout("[" + heartbeatTime + "]heartbeat\n") + case <-r.ctx.Done(): + r.Logger.Info("stop heartbeat") + return + } + } + }() +} + +// StopHeartbeat 结束心跳 +func (r *JobGenericRuntime) StopHeartbeat() { + r.cancelFunc() +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/report/filereport.go b/dbm-services/redis/db-tools/dbactuator/pkg/report/filereport.go new file mode 100644 index 0000000000..d96c756523 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/report/filereport.go @@ -0,0 +1,100 @@ +// Package report (备份等)记录上报 +package report + +import ( + "bufio" + "dbm-services/redis/db-tools/dbactuator/mylog" + "fmt" + "os" + "sync" +) + +var _ Reporter = (*FileReport)(nil) + +// FileReport 文件上报 +type FileReport struct { + saveFile string + fileP *os.File + bufWriter *bufio.Writer + mux sync.Mutex // 并发安全写入 +} + +// NewFileReport new +func NewFileReport(savefile string) (ret *FileReport, err error) { + ret = &FileReport{} + err = ret.SetSaveFile(savefile) + return ret, err +} + +// AddRecord 新增记录 +func (f *FileReport) AddRecord(item string, flush bool) (err error) { + if f.saveFile == "" { + err = fmt.Errorf("saveFile(%s) can't be empty", f.saveFile) + mylog.Logger.Error(err.Error()) + return + } + _, err = f.bufWriter.WriteString(item) + if err != nil { + err = fmt.Errorf("bufio.Writer WriteString fail,err:%v,saveFile:%s", err, f.saveFile) + mylog.Logger.Error(err.Error()) + return + } + if flush == true { + f.bufWriter.Flush() + } + return nil +} + +// SaveFile .. +func (f *FileReport) SaveFile() string { + return f.saveFile +} + +// SetSaveFile set方法 +func (f *FileReport) SetSaveFile(savefile string) error { + var err error + err = f.Close() + if err != nil { + return err + } + if savefile == "" { + err = fmt.Errorf("saveFile(%s) cannot be empty", savefile) + mylog.Logger.Error(err.Error()) + return err + } + f.saveFile = savefile + f.fileP, err = os.OpenFile(savefile, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + err = fmt.Errorf("open file:%s fail,err:%v", savefile, err) + mylog.Logger.Error(err.Error()) + return err + } + f.bufWriter = bufio.NewWriter(f.fileP) + return nil +} + +// Close file +func (f *FileReport) Close() error { + f.mux.Lock() + defer f.mux.Unlock() + + var err error + if f.saveFile == "" { + return nil + } + f.saveFile = "" + + err = f.bufWriter.Flush() + if err != nil { + err = fmt.Errorf("bufio flush fail.err:%v,file:%s", err, f.saveFile) + mylog.Logger.Error(err.Error()) + return nil + } + err = f.fileP.Close() + if err != nil { + err = fmt.Errorf("file close fail.err:%v,file:%s", err, f.saveFile) + mylog.Logger.Error(err.Error()) + return nil + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/report/reporter.go b/dbm-services/redis/db-tools/dbactuator/pkg/report/reporter.go new file mode 100644 index 0000000000..5c242ff965 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/report/reporter.go @@ -0,0 +1,57 @@ +package report + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "os" + "path/filepath" + "time" +) + +// Reporter 上报接口 +type Reporter interface { + AddRecord(item string, flush bool) error + Close() error +} + +// CreateReportDir 创建上报目录 /home/mysql/dbareport -> {REDIS_BACKUP_DIR}/dbbak/dbareport +func CreateReportDir() (err error) { + mylog.Logger.Info("begin to create reportDir(%s)", consts.DbaReportSaveDir) + var realLink string + realReportDir := filepath.Join(consts.GetRedisBackupDir(), "dbbak", "dbareport") // 如 /data/dbbak/dbareport + if !util.FileExists(realReportDir) { + err = util.MkDirsIfNotExists([]string{realReportDir}) + if err != nil { + mylog.Logger.Error(err.Error()) + return + } + } + util.LocalDirChownMysql(realReportDir) + if util.FileExists(consts.DbaReportSaveDir) { + realLink, err = filepath.EvalSymlinks(consts.DbaReportSaveDir) + if err != nil { + err = fmt.Errorf("filepath.EvalSymlinks %s fail,err:%v", consts.DbaReportSaveDir, err) + mylog.Logger.Error(err.Error()) + return err + } + // /home/mysql/dbareport -> /data/dbbak/dbareport ok,直接返回 + if realLink == realReportDir { + return nil + } + // 如果 /home/mysql/dbareport 不是指向 /data/dbbak/dbareport,先删除 + rmCmd := "rm -rf " + consts.DbaReportSaveDir + util.RunBashCmd(rmCmd, "", nil, 1*time.Minute) + } + err = os.Symlink(realReportDir, filepath.Dir(consts.DbaReportSaveDir)) + if err != nil { + err = fmt.Errorf("os.Symlink %s -> %s fail,err:%s", consts.DbaReportSaveDir, realReportDir, err) + mylog.Logger.Error(err.Error()) + return + } + mylog.Logger.Info("create softLink success,%s -> %s", consts.DbaReportSaveDir, realReportDir) + util.MkDirsIfNotExists([]string{consts.RedisReportSaveDir}) + util.LocalDirChownMysql(consts.DbaReportSaveDir) + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/bkrepo.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/bkrepo.go new file mode 100644 index 0000000000..3ec86cc9ea --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/bkrepo.go @@ -0,0 +1,101 @@ +package util + +import ( + "bytes" + "dbm-services/redis/db-tools/dbactuator/mylog" + "encoding/base64" + "fmt" + "io" + "mime/multipart" + "net/http" + "os" +) + +// FileServerInfo 文件服务器 +type FileServerInfo struct { + URL string `json:"url"` // 制品库地址 + Bucket string `json:"bucket"` // 目标bucket + Password string `json:"password"` // 制品库 password + Username string `json:"username"` // 制品库username + Project string `json:"project"` // 制品库project +} + +// UploadFile 上传文件到蓝盾制品库 +// filepath: 本地需要上传文件的路径 +// targetURL: 仓库文件完整路径 +func UploadFile(filepath string, targetURL string, username string, password string) (*http.Response, error) { + + userMsg := fmt.Sprintf(username + ":" + password) + token := base64.StdEncoding.EncodeToString([]byte(userMsg)) + msg := fmt.Sprintf("start upload files from %s to %s", filepath, targetURL) + mylog.Logger.Info(msg) + bodyBuf := bytes.NewBufferString("") + bodyWriter := multipart.NewWriter(bodyBuf) + + fh, err := os.Open(filepath) + if err != nil { + mylog.Logger.Info("error opening file") + return nil, err + } + boundary := bodyWriter.Boundary() + closeBuf := bytes.NewBufferString("") + + requestReader := io.MultiReader(bodyBuf, fh, closeBuf) + fi, err := fh.Stat() + if err != nil { + fmt.Printf("Error Stating file: %s", filepath) + return nil, err + } + req, err := http.NewRequest("PUT", targetURL, requestReader) + if err != nil { + return nil, err + } + + // Set headers for multipart, and Content Length + req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary) + // 文件是否可以被覆盖,默认false + req.Header.Set("X-BKREPO-OVERWRITE", "True") + // 文件默认保留半年 + req.Header.Set("X-BKREPO-EXPIRES", "183") + req.Header.Set("Authorization", "Basic "+token) + req.ContentLength = fi.Size() + int64(bodyBuf.Len()) + int64(closeBuf.Len()) + return http.DefaultClient.Do(req) + // return response, err +} + +// DownloadFile 从蓝盾制品库下载文件 +// filepath: 本地保存文件压缩包名 +// targetURL: 仓库文件完整路径 +func DownloadFile(filepath string, targetURL string, username string, password string) (err error) { + msg := fmt.Sprintf("start download files from %s to %s", targetURL, filepath) + mylog.Logger.Info(msg) + userMsg := fmt.Sprintf(username + ":" + password) + token := base64.StdEncoding.EncodeToString([]byte(userMsg)) + outFile, err := os.Create(filepath) + if err != nil { + return err + } + defer outFile.Close() + + resp, err := http.Get(targetURL) + if err != nil { + return err + } + resp.Header.Set("Authorization", "Basic "+token) + defer resp.Body.Close() + + // Check server response + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad status: %s", resp.Status) + } + + // Writer the body to file + _, err = io.Copy(outFile, resp.Body) + if err != nil { + return err + } + mylog.Logger.Info("finish download files") + + return nil + +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/compress.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/compress.go new file mode 100644 index 0000000000..974df428cc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/compress.go @@ -0,0 +1,205 @@ +package util + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/dustin/go-humanize" +) + +// IsZstdExecutable 通过'zstd -V'命令确定本地zstd工具是能正常运行的 +func IsZstdExecutable() (ok bool) { + var err error + if !FileExists(consts.ZstdBin) { + return false + } + cmd := exec.Command(consts.ZstdBin, "-V") + if err = cmd.Start(); err != nil { + // err = fmt.Errorf("'%s -V' cmd.Start fail,err:%v", zstdBin, err) + return false + } + if err = cmd.Wait(); err != nil { + // err = fmt.Errorf("'%s -V' cmd.Wait fail,err:%v", zstdBin, err) + return false + } + return true +} + +// CompressFile 压缩文件 +// 优先使用zstd 做压缩,zstd无法使用则使用gzip +func CompressFile(file, targetDir string, rmOrigin bool) (retFile string, err error) { + var compressCmd string + fileDir := filepath.Dir(file) + filename := filepath.Base(file) + if targetDir == "" { + targetDir = fileDir + } + if IsZstdExecutable() { + retFile = filepath.Join(targetDir, filename+".zst") + if rmOrigin { + compressCmd = fmt.Sprintf(`cd %s && %s --rm -T4 %s -o %s`, fileDir, consts.ZstdBin, filename, retFile) + } else { + compressCmd = fmt.Sprintf(`cd %s && %s -T4 %s -o %s`, fileDir, consts.ZstdBin, filename, retFile) + } + _, err = RunBashCmd(compressCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + } else { + retFile = filepath.Join(fileDir, filename+".gz") + if rmOrigin { + compressCmd = fmt.Sprintf(`gzip < %s >%s && rm -f %s`, file, retFile, file) + } else { + compressCmd = fmt.Sprintf(`gzip < %s >%s`, file, retFile) + } + _, err = RunBashCmd(compressCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + } + return +} + +// SplitLargeFile 切割大文件为小文件,并返回切割后的结果 +// 参数file须是全路径; +// 如果file大小 小于 splitTargetSize,则返回值splitTargetSize只包含 file 一个元素 +func SplitLargeFile(file, splitTargetSize string, rmOrigin bool) (splitedFiles []string, err error) { + var fileSize int64 + var splitLimit uint64 + var cmdRet string + if file == "" { + return + } + fileSize, err = GetFileSize(file) + if err != nil { + return + } + splitLimit, err = humanize.ParseBytes(splitTargetSize) + if err != nil { + err = fmt.Errorf("humanize.ParseBytes fail,err:%v,splitTargetSize:%s", err, splitTargetSize) + return + } + if fileSize < int64(splitLimit) { + splitedFiles = append(splitedFiles, file) + return + } + fileDir := filepath.Dir(file) + fileBase := filepath.Base(file) + fileBase = strings.TrimSuffix(fileBase, ".tar") + fileBase = strings.TrimSuffix(fileBase, ".tar.gz") + fileBase = fileBase + ".split." + splitCmd := fmt.Sprintf(`cd %s && split --verbose -a 3 -b %s -d %s %s|grep -i --only-match -E "%s[0-9]+"`, + fileDir, splitTargetSize, file, fileBase, fileBase) + mylog.Logger.Info(splitCmd) + cmdRet, err = RunBashCmd(splitCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + l01 := strings.Split(cmdRet, "\n") + for _, item := range l01 { + item = strings.TrimSpace(item) + if item == "" { + continue + } + splitedFiles = append(splitedFiles, filepath.Join(fileDir, item)) + } + if rmOrigin { + err = os.Remove(file) + mylog.Logger.Info(fmt.Sprintf("rm %s", file)) + if err != nil { + err = fmt.Errorf("os.Remove fail,err:%v,file:%s", err, file) + return + } + } + return +} + +// TarADir 对一个目录进行tar打包, +// 如打包 /data/dbbak/REDIS-FULL-rocksdb-1.1.1.1-30000 为 /tmp/REDIS-FULL-rocksdb-1.1.1.1-30000.tar +// 参数: originDir 为 /data/dbbak/REDIS-FULL-rocksdb-1.1.1.1-30000 +// 参数: tarSaveDir 为 /tmp/ +// 返回值: tarFile 为 /tmp/REDIS-FULL-rocksdb-1.1.1.1-30000.tar +func TarADir(originDir, tarSaveDir string, rmOrigin bool) (tarFile string, err error) { + var tarCmd string + basename := filepath.Base(originDir) + baseDir := filepath.Dir(originDir) + if tarSaveDir == "" { + tarSaveDir = filepath.Dir(originDir) + } + tarFile = filepath.Join(tarSaveDir, basename+".tar") + + if rmOrigin { + tarCmd = fmt.Sprintf(`tar --remove-files -cf %s -C %s %s`, tarFile, baseDir, basename) + } else { + tarCmd = fmt.Sprintf(`tar -cf %s -C %s %s`, tarFile, baseDir, basename) + } + mylog.Logger.Info(tarCmd) + _, err = RunBashCmd(tarCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + return +} + +// TarAndSplitADir 对目录tar打包并执行split +func TarAndSplitADir(originDir, targetSaveDir, splitTargetSize string, rmOrigin bool) ( + splitedFiles []string, err error) { + var tarFile string + tarFile, err = TarADir(originDir, targetSaveDir, rmOrigin) + if err != nil { + return + } + splitedFiles, err = SplitLargeFile(tarFile, splitTargetSize, rmOrigin) + if err != nil { + return + } + return +} + +// UnionSplitFiles 合并多个split文件为一个tar文件 +func UnionSplitFiles(dir string, splitFiles []string) (tarfile string, err error) { + if len(splitFiles) == 0 { + err = fmt.Errorf("splitFiles:%+v empty list", splitFiles) + return + } + if len(splitFiles) == 1 && strings.HasSuffix(splitFiles[0], ".tar") { + return splitFiles[0], nil + } + var name string + var fullpath string + var cmd01 string + reg01 := regexp.MustCompile(`.split.\d+$`) + baseNames := make([]string, 0, len(splitFiles)) + for _, file01 := range splitFiles { + name = filepath.Base(file01) + baseNames = append(baseNames, name) + if !reg01.MatchString(file01) { + err = fmt.Errorf("%+v not split files?", splitFiles) + return + } + fullpath = filepath.Join(dir, name) + if !FileExists(fullpath) { + err = fmt.Errorf("%s not exists", fullpath) + return + } + } + + prefix := reg01.ReplaceAllString(baseNames[0], "") + tarfile = prefix + ".tar" + if len(baseNames) == 1 { + cmd01 = fmt.Sprintf("cd %s && mv %s %s", dir, baseNames[0], tarfile) + } else { + cmd01 = fmt.Sprintf("cd %s && cat %s.split* > %s", dir, prefix, tarfile) + } + mylog.Logger.Info(cmd01) + _, err = RunBashCmd(cmd01, "", nil, 2*time.Hour) + tarfile = filepath.Join(dir, tarfile) + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/file.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/file.go new file mode 100644 index 0000000000..3e708ee613 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/file.go @@ -0,0 +1,66 @@ +package util + +import ( + "bufio" + "bytes" + "crypto/md5" + "fmt" + "io" + "os" +) + +// FileExists 检查目录是否已经存在 +func FileExists(path string) bool { + _, err := os.Stat(path) + if err != nil { + return os.IsExist(err) + } + return true +} + +// GetFileMd5 求文件md5sum值 +func GetFileMd5(fileAbPath string) (md5sum string, err error) { + rFile, err := os.Open(fileAbPath) + if err != nil { + return "", fmt.Errorf("GetFileMd5 fail,err:%v,file:%s", err, fileAbPath) + } + defer func(rFile *os.File) { + _ = rFile.Close() + }(rFile) + h := md5.New() + if _, err := io.Copy(h, rFile); err != nil { + return "", fmt.Errorf("GetFileMd5 io.Copy fail,err:%v,file:%s", err, fileAbPath) + } + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +// FileLineCounter 计算文件行数 +// 参考: https://stackoverflow.com/questions/24562942/golang-how-do-i-determine-the-number-of-lines-in-a-file-efficiently +func FileLineCounter(filename string) (lineCnt uint64, err error) { + _, err = os.Stat(filename) + if err != nil && os.IsNotExist(err) == true { + return 0, fmt.Errorf("file:%s not exists", filename) + } + file, err := os.Open(filename) + if err != nil { + return 0, fmt.Errorf("file:%s open fail,err:%v", filename, err) + } + defer file.Close() + reader01 := bufio.NewReader(file) + buf := make([]byte, 32*1024) + lineCnt = 0 + lineSep := []byte{'\n'} + + for { + c, err := reader01.Read(buf) + lineCnt += uint64(bytes.Count(buf[:c], lineSep)) + + switch { + case err == io.EOF: + return lineCnt, nil + + case err != nil: + return lineCnt, fmt.Errorf("file:%s read fail,err:%v", filename, err) + } + } +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/net.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/net.go new file mode 100644 index 0000000000..413be8aceb --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/net.go @@ -0,0 +1,67 @@ +package util + +import ( + "fmt" + "net" +) + +// GetIpv4InterfaceName 根据ipv4地址获取网络接口名 +// https://stackoverflow.com/questions/23529663/how-to-get-all-addresses-and-masks-from-local-interfaces-in-go +func GetIpv4InterfaceName(ipv4 string) (interName string, err error) { + var ifaces []net.Interface + var addrs []net.Addr + + ifaces, err = net.Interfaces() + if err != nil { + err = fmt.Errorf("net.Interfaces fail,err:%v", err) + return + } + for _, i := range ifaces { + addrs, err = i.Addrs() + if err != nil { + // err = fmt.Errorf("%s get addrs fail,err:%v", i.Name, err) + continue + } + for _, a := range addrs { + switch v := a.(type) { + case *net.IPAddr: + if v.IP.String() == ipv4 { + return i.Name, nil + } + + case *net.IPNet: + if v.IP.String() == ipv4 { + return i.Name, nil + } + } + } + } + err = fmt.Errorf("ipv4:%s not found interfacename", ipv4) + return +} + +// GetInterfaceIpv4Addr 获取网络接口对应的 ipv4地址 +// https://gist.github.com/schwarzeni/f25031a3123f895ff3785970921e962c +func GetInterfaceIpv4Addr(interfaceName string) (addr string, err error) { + var ( + ief *net.Interface + addrs []net.Addr + ipv4Addr net.IP + ) + if ief, err = net.InterfaceByName(interfaceName); err != nil { // get interface + err = fmt.Errorf("net.InterfaceByName %s fail,err:%v", interfaceName, err) + return + } + if addrs, err = ief.Addrs(); err != nil { // get addresses + return + } + for _, addr := range addrs { // get ipv4 address + if ipv4Addr = addr.(*net.IPNet).IP.To4(); ipv4Addr != nil { + break + } + } + if ipv4Addr == nil { + return "", fmt.Errorf("interface %s don't have an ipv4 address\n", interfaceName) + } + return ipv4Addr.String(), nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/osCmd.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/osCmd.go new file mode 100644 index 0000000000..da15db470e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/osCmd.go @@ -0,0 +1,106 @@ +package util + +import ( + "bytes" + "context" + "dbm-services/redis/db-tools/dbactuator/mylog" + "fmt" + "io" + "os" + "os/exec" + "strings" + "time" +) + +// DealLocalCmdPid 处理本地命令得到pid +type DealLocalCmdPid interface { + DealProcessPid(pid int) error +} + +// RunBashCmd bash -c "$cmd" 执行命令并得到命令结果 +func RunBashCmd(cmd, outFile string, dealPidMethod DealLocalCmdPid, + timeout time.Duration) (retStr string, err error) { + opts := []string{"-c", cmd} + return RunLocalCmd("bash", opts, outFile, dealPidMethod, timeout) +} + +// RunLocalCmd 运行本地命令并得到命令结果 +/* + *参数: + * outFile: 不为空,则将标准输出结果打印到outFile中; + * dealPidMethod: 不为空,则将命令pid传给dealPidMethod.DealProcessPid()函数; + * logger: 用于打印日志; + */ +func RunLocalCmd( + cmd string, opts []string, outFile string, + dealPidMethod DealLocalCmdPid, timeout time.Duration) (retStr string, err error) { + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + + cmdCtx := exec.CommandContext(ctx, cmd, opts...) + var retBuffer bytes.Buffer + var errBuffer bytes.Buffer + var outFileHandler *os.File + if len(strings.TrimSpace(outFile)) == 0 { + cmdCtx.Stdout = &retBuffer + } else { + outFileHandler, err = os.Create(outFile) + if err != nil { + mylog.Logger.Error("RunLocalCmd create outfile fail,err:%v,outFile:%s", err, outFile) + return "", fmt.Errorf("RunLocalCmd create outfile fail,err:%v,outFile:%s", err, outFile) + } + defer outFileHandler.Close() + mylog.Logger.Info("RunLocalCmd create outfile(%s) success ...", outFile) + cmdCtx.Stdout = outFileHandler + } + cmdCtx.Stderr = &errBuffer + mylog.Logger.Debug("Running a new local cmd:%s,opts:%+v", cmd, opts) + + if err = cmdCtx.Start(); err != nil { + mylog.Logger.Error("RunLocalCmd cmd Start fail,err:%v,cmd:%s,opts:%+v", err, cmd, opts) + return "", fmt.Errorf("RunLocalCmd cmd Start fail,err:%v", err) + } + if dealPidMethod != nil { + dealPidMethod.DealProcessPid(cmdCtx.Process.Pid) + } + if err = cmdCtx.Wait(); err != nil { + mylog.Logger.Error("RunLocalCmd cmd wait fail,err:%v,errBuffer:%s,retBuffer:%s,cmd:%s,opts:%+v", err, + errBuffer.String(), retBuffer.String(), cmd, opts) + return "", fmt.Errorf("RunLocalCmd cmd wait fail,err:%v,detail:%s", err, errBuffer.String()) + } + retStr = retBuffer.String() + + if strings.TrimSpace(errBuffer.String()) != "" { + mylog.Logger.Error("RunLocalCmd fail,err:%v,cmd:%s,opts:%+v", errBuffer.String(), cmd, opts) + err = fmt.Errorf("RunLocalCmd fail,err:%s", retBuffer.String()+"\n"+errBuffer.String()) + } else { + err = nil + } + retStr = strings.TrimSpace(retStr) + return +} + +// SetOSUserPassword run set user password by chpasswd +func SetOSUserPassword(user, password string) error { + exec.Command("/bin/bash", "-c", "") + cmd := exec.Command("chpasswd") + stdin, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("new pipe failed, err:%w", err) + } + go func() { + _, err := io.WriteString(stdin, fmt.Sprintf("%s:%s", user, password)) + if err != nil { + mylog.Logger.Warn("write into pipe failed, err:%s", err.Error()) + } + if err := stdin.Close(); err != nil { + mylog.Logger.Warn("colse stdin failed, err:%s", err.Error()) + } + }() + if output, err := cmd.CombinedOutput(); err != nil { + err = fmt.Errorf("run chpasswd failed, output:%s, err:%w", string(output), err) + mylog.Logger.Error(err.Error()) + return err + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/proxy_tools.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/proxy_tools.go new file mode 100644 index 0000000000..8f4d60b17a --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/proxy_tools.go @@ -0,0 +1,103 @@ +// Package util here mybe something +package util + +import ( + "bufio" + "crypto/md5" + "encoding/json" + "fmt" + "io" + "math/rand" + "net" + "sort" + "strconv" + "strings" + "time" +) + +// NCInstance NCInstance +var NCInstance *NetCat + +// NetCat use tcp for nc +type NetCat struct { + AdminAddr string + ReadTimeOut time.Duration + Nc net.Conn +} + +func init() { + NCInstance = &NetCat{} + rand.Seed(time.Now().UnixNano()) +} + +// GetTwemProxyBackendsMd5Sum 获取MD5 sum +func GetTwemProxyBackendsMd5Sum(addr string) (string, error) { + pinfo := strings.Split(addr, ":") + port, _ := strconv.Atoi(pinfo[1]) + segsMap, err := GetTwemproxyBackends(pinfo[0], port) + if err != nil { + return "errFailed", err + } + segList := []string{} + for addr, seg := range segsMap { + segList = append(segList, fmt.Sprintf("%s|%s", addr, seg)) + } + sort.Slice(segList, func(i, j int) bool { + return segList[i] > segList[j] + }) + + x, _ := json.Marshal(segList) + return fmt.Sprintf("%x", md5.Sum(x)), nil +} + +// DoSwitchTwemproxyBackends "change nosqlproxy $mt:$mp $st:$sp" +func DoSwitchTwemproxyBackends(ip string, port int, from, to string) (rst string, err error) { + addr := fmt.Sprintf("%s:%d", ip, port+1000) + nc, err := net.DialTimeout("tcp", addr, time.Second) + if err != nil { + return "nil", err + } + _, err = nc.Write([]byte(fmt.Sprintf("change nosqlproxy %s %s", from, to))) + if err != nil { + return "nil", err + } + return bufio.NewReader(nc).ReadString('\n') +} + +// GetTwemproxyBackends get nosqlproxy servers +func GetTwemproxyBackends(ip string, port int) (segs map[string]string, err error) { + addr := fmt.Sprintf("%s:%d", ip, port+1000) + nc, err := net.DialTimeout("tcp", addr, time.Second) + if err != nil { + return nil, err + } + if segs, err = GetSegDetails(nc); err != nil { + return nil, err + } + return segs, nil +} + +// GetSegDetails echo stats |nc twempip port +func GetSegDetails(nc net.Conn) (map[string]string, error) { + _, err := nc.Write([]byte("stats")) + if err != nil { + return nil, err + } + reader := bufio.NewReader(nc) + segs := make(map[string]string) + for { + // rep, _, err := reader.ReadLine() + line, _, err := reader.ReadLine() + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + strws := strings.Split(string(line), " ") + if len(strws) == 4 { + segs[strws[2]] = strws[0] + } + } + return segs, nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/redisutil.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/redisutil.go new file mode 100644 index 0000000000..8e6c25b193 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/redisutil.go @@ -0,0 +1,102 @@ +package util + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/shirou/gopsutil/v3/mem" +) + +// GetTendisplusBlockcache 返回单位Mbyte +// 如果系统内存小于4GB,则 instBlockcache = 系统总内存 * 0.3 / 实例数 +// 否则 instBlockcache = 系统总内存 * 0.5 / 实例数 +func GetTendisplusBlockcache(instCount uint64) (instBlockcache uint64, err error) { + if instCount <= 0 { + err = fmt.Errorf("instCount==%d <=0", instCount) + return + } + var vMem *mem.VirtualMemoryStat + vMem, err = mem.VirtualMemory() + if err != nil { + err = fmt.Errorf("mem.VirtualMemory fail,err:%v", err) + return + } + if vMem.Total < 4*consts.GiByte { + instBlockcache = vMem.Total * 3 / (10 * instCount) + } else { + instBlockcache = vMem.Total * 5 / (10 * instCount) + } + if instBlockcache < 128*consts.MiByte { + instBlockcache = 128 * consts.MiByte + } + instBlockcache = instBlockcache / consts.MiByte + return +} + +// GetTendisplusWriteBufferSize 返回单位是Byte +// 如果系统内存小于8GB,则 writeBufferSize = 8MB,否则 writeBufferSize = 32MB +func GetTendisplusWriteBufferSize(instCount uint64) (writeBufferSize uint64, err error) { + if instCount <= 0 { + err = fmt.Errorf("instCount==%d <=0", instCount) + return + } + var vMem *mem.VirtualMemoryStat + vMem, err = mem.VirtualMemory() + if err != nil { + err = fmt.Errorf("mem.VirtualMemory fail,err:%v", err) + return + } + if vMem.Total <= 8*consts.GiByte { + writeBufferSize = 8 * consts.MiByte + } else { + writeBufferSize = 32 * consts.MiByte + } + return +} + +// StopBkDbmon 停止bk-dbmon +func StopBkDbmon() (err error) { + if FileExists(consts.BkDbmonBin) { + stopScript := filepath.Join(consts.BkDbmonPath, "stop.sh") + stopCmd := fmt.Sprintf("su %s -c '%s'", consts.MysqlAaccount, "sh "+stopScript) + mylog.Logger.Info(stopCmd) + _, err = RunLocalCmd("su", []string{consts.MysqlAaccount, "-c", "sh " + stopScript}, + "", nil, 1*time.Minute) + return + } + mylog.Logger.Info(fmt.Sprintf("bk-dbmon not exists")) + killCmd := ` +pid=$(ps aux|grep 'bk-dbmon --config'|grep -v dbactuator|grep -v grep|awk '{print $2}') +if [[ -n $pid ]] +then +kill $pid +fi +` + mylog.Logger.Info(killCmd) + _, err = RunBashCmd(killCmd, "", nil, 1*time.Minute) + return +} + +// StartBkDbmon start local bk-dbmon +func StartBkDbmon() (err error) { + startScript := filepath.Join(consts.BkDbmonPath, "start.sh") + if !FileExists(startScript) { + err = fmt.Errorf("%s not exists", startScript) + mylog.Logger.Error(err.Error()) + return + } + startCmd := fmt.Sprintf("su %s -c 'nohup %s &'", consts.MysqlAaccount, "sh "+startScript) + mylog.Logger.Info(startCmd) + _, err = RunLocalCmd("su", []string{consts.MysqlAaccount, "-c", "nohup sh " + startScript + " &"}, + "", nil, 1*time.Minute) + + if err != nil && strings.Contains(err.Error(), "no crontab for") { + return nil + } + + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/reflect.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/reflect.go new file mode 100644 index 0000000000..0698796aa2 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/reflect.go @@ -0,0 +1,20 @@ +package util + +import ( + "reflect" + "runtime" +) + +// GetTypeName 获取接口类型名 +func GetTypeName(object interface{}) string { + t := reflect.TypeOf(object) + if t.Kind() == reflect.Ptr { + return "*" + t.Elem().Name() + } + return t.Name() +} + +// GetFunctionName 获取函数名 +func GetFunctionName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/util.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/util.go new file mode 100644 index 0000000000..7a76e85e46 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/util.go @@ -0,0 +1,269 @@ +// Package util 公共函数 +package util + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "errors" + "fmt" + "net" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NotFound error +const NotFound = "not found" + +// NewNotFound .. +func NewNotFound() error { + return errors.New(NotFound) +} + +// IsNotFoundErr .. +func IsNotFoundErr(err error) bool { + if err.Error() == NotFound { + return true + } + return false +} + +// GetCurrentDirectory 获取当前二进制程序所在执行路径 +func GetCurrentDirectory() (string, error) { + dir, err := filepath.Abs(filepath.Dir(os.Args[0])) + if err != nil { + return dir, fmt.Errorf("convert absolute path failed, err: %+v", err) + } + dir = strings.Replace(dir, "\\", "/", -1) + return dir, nil +} + +// GetLocalIP 获得本地ip +func GetLocalIP() (string, error) { + var localIP string + var err error + addrs, err := net.InterfaceAddrs() + if err != nil { + return localIP, fmt.Errorf("GetLocalIP net.InterfaceAddrs fail,err:%v", err) + } + for _, addr := range addrs { + if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + localIP = ipnet.IP.String() + return localIP, nil + } + } + } + return localIP, fmt.Errorf("can't find local ip") +} + +// IsMountPoint Determine if a directory is a mountpoint, by comparing the device for the directory +// with the device for it's parent. If they are the same, it's not a mountpoint, if they're +// different, it is. +// reference: https://github.com/cnaize/kubernetes/blob/master/pkg/util/mount/mountpoint_unix.go#L29 +func IsMountPoint(file string) (bool, error) { + stat, err := os.Stat(file) + if err != nil { + return false, err + } + rootStat, err := os.Lstat(file + "/..") + if err != nil { + return false, err + } + // If the directory has the same device as parent, then it's not a mountpoint. + return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev, nil +} + +// FindFirstMountPoint find first mountpoint in prefer order +func FindFirstMountPoint(paths ...string) (string, error) { + for _, path := range paths { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + continue + } + } + isMountPoint, err := IsMountPoint(path) + if err != nil { + return "", fmt.Errorf("check whether mountpoint failed, path: %s, err: %v", path, err) + } + if isMountPoint { + return path, nil + } + } + return "", fmt.Errorf("no available mountpoint found, choices: %#v", paths) +} + +// CheckPortIsInUse 检查端口是否被占用 +func CheckPortIsInUse(ip, port string) (inUse bool, err error) { + timeout := time.Second + conn, err := net.DialTimeout("tcp", net.JoinHostPort(ip, port), timeout) + if err != nil && strings.Contains(err.Error(), "connection refused") { + return false, nil + } else if err != nil { + return false, fmt.Errorf("net.DialTimeout fail,err:%v", err) + } + if conn != nil { + defer func(conn net.Conn) { + _ = conn.Close() + }(conn) + return true, nil + } + return false, nil +} + +// IsValidIP 判断字符串是否是一个有效IP +func IsValidIP(ipStr string) bool { + if net.ParseIP(ipStr) == nil { + return false + } + return true +} + +// MkDirsIfNotExists 如果目录不存在则创建 +func MkDirsIfNotExists(dirs []string) error { + return MkDirsIfNotExistsWithPerm(dirs, 0755) +} + +// MkDirsIfNotExistsWithPerm 如果目录不存在则创建,并指定文件Perm +func MkDirsIfNotExistsWithPerm(dirs []string, perm os.FileMode) error { + for _, dir := range dirs { + _, err := os.Stat(dir) + if err == nil { + continue + } + if os.IsNotExist(err) == true { + err = os.MkdirAll(dir, perm) + if err != nil { + return fmt.Errorf("MkdirAll fail,err:%v,dir:%s", err, dirs) + } + } + } + return nil +} + +// IsExecOwner owner是否可执行 +func IsExecOwner(mode os.FileMode) bool { + return mode&0100 != 0 +} + +// IsExecGroup grouper是否可执行 +func IsExecGroup(mode os.FileMode) bool { + return mode&0010 != 0 +} + +// IsExecOther other是否可执行 +func IsExecOther(mode os.FileMode) bool { + return mode&0001 != 0 +} + +// IsExecAny owner/grouper/other 任意一个可执行 +func IsExecAny(mode os.FileMode) bool { + return mode&0111 != 0 +} + +// IsExecAll owner/grouper/other 全部可执行 +func IsExecAll(mode os.FileMode) bool { + return mode&0111 == 0111 +} + +// LocalDirChownMysql 改变localDir的属主为mysql +func LocalDirChownMysql(localDir string) (err error) { + cmd := fmt.Sprintf("chown -R %s.%s %s", consts.MysqlAaccount, consts.MysqlGroup, localDir) + _, err = RunBashCmd(cmd, "", nil, 1*time.Hour) + return +} + +// HostDiskUsage 本地路径所在磁盘使用情况 +type HostDiskUsage struct { + TotalSize uint64 `json:"ToTalSize"` // bytes + UsedSize uint64 `json:"UsedSize"` // bytes + AvailSize uint64 `json:"AvailSize"` // bytes + UsageRatio int `json:"UsageRatio"` +} + +// String 用于打印 +func (disk *HostDiskUsage) String() string { + ret := fmt.Sprintf("total_size=%dMB,used_size=%d,avail_size=%d,Use=%d%%", + disk.TotalSize/1024/1024, + disk.UsedSize/1024/1024, + disk.AvailSize/1024/1024, + disk.UsageRatio) + return ret +} + +// GetLocalDirDiskUsg 获取本地路径所在磁盘使用情况 +// 参考: +// https://stackoverflow.com/questions/20108520/get-amount-of-free-disk-space-using-go +// http://evertrain.blogspot.com/2018/05/golang-disk-free.html +func GetLocalDirDiskUsg(localDir string) (diskUsg HostDiskUsage, err error) { + var stat unix.Statfs_t + if err = unix.Statfs(localDir, &stat); err != nil { + err = fmt.Errorf("unix.Statfs fail,err:%v,localDir:%s", err, localDir) + return + } + diskUsg.TotalSize = stat.Blocks * uint64(stat.Bsize) + diskUsg.AvailSize = stat.Bavail * uint64(stat.Bsize) + diskUsg.UsedSize = (stat.Blocks - stat.Bfree) * uint64(stat.Bsize) + diskUsg.UsageRatio = int(diskUsg.UsedSize * 100 / diskUsg.TotalSize) + return +} + +// GetFileSize 获取文件大小(单位byte) +func GetFileSize(filename string) (size int64, err error) { + fileInfo, err := os.Stat(filename) + if err != nil { + err = fmt.Errorf("file:%s os.Stat fail,err:%v", filename, err) + return + } + return fileInfo.Size(), nil +} + +// IntSliceInter 两个[]int 求交集 +func IntSliceInter(list01, list02 []int) []int { + m01 := make(map[int]bool) + m02 := make(map[int]bool) + for _, item01 := range list01 { + m01[item01] = true + } + for _, item02 := range list02 { + m02[item02] = true + } + ret := []int{} + for item01 := range m01 { + if _, ok := m02[item01]; ok == true { + ret = append(ret, item01) + } + } + sort.Ints(ret) + return ret +} + +// IntSliceToString 将[]int join,返回一个字符串 +func IntSliceToString(src []int, seq string) string { + strList := []string{} + for _, item := range src { + strList = append(strList, strconv.Itoa(item)) + } + return strings.Join(strList, seq) +} + +// IsDbmSysKeys 是否是dbm 系统相关的管理key,如监控、心跳key等的key +func IsDbmSysKeys(key string) bool { + if key == "master_ip" || key == "master_port" || key == "twemproxy_mon" { + return true + } + if strings.HasPrefix(key, "dbmon:") || + strings.HasPrefix(key, "dbha:agent:") || + strings.HasSuffix(key, ":timediff") || + strings.HasSuffix(key, ":time") || + strings.HasSuffix(key, ":0:dbsize") || + strings.HasSuffix(key, ":heartbeat") { + return true + } + return false +} diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/util/version.go b/dbm-services/redis/db-tools/dbactuator/pkg/util/version.go new file mode 100644 index 0000000000..630afc178e --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/pkg/util/version.go @@ -0,0 +1,118 @@ +package util + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +func convertVersionToUint(version string) (total uint64, err error) { + version = strings.TrimSpace(version) + if version == "" { + return 0, nil + } + list01 := strings.Split(version, ".") + var billion string + var thousand string + var single string + if len(list01) == 0 { + err = fmt.Errorf("version:%s format not correct", version) + mylog.Logger.Error(err.Error()) + return 0, err + } + billion = list01[0] + if len(list01) >= 2 { + thousand = list01[1] + } + if len(list01) >= 3 { + single = list01[2] + } + + if billion != "" { + b, err := strconv.ParseUint(billion, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,billion:%s,version:%s", err, billion, version) + mylog.Logger.Error(err.Error()) + return 0, err + } + total += b * 1000000 + } + if thousand != "" { + t, err := strconv.ParseUint(thousand, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,thousand:%s,version:%s", err, thousand, version) + mylog.Logger.Error(err.Error()) + return 0, err + } + total += t * 1000 + } + if single != "" { + s, err := strconv.ParseUint(single, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,single:%s,version:%s", err, single, version) + mylog.Logger.Error(err.Error()) + return 0, err + } + total += s + } + return total, nil +} + +// VersionParse tendis版本解析 +/* + * VersionParse + * 2.8.17-TRedis-v1.2.20, baseVersion: 2008017,subVersion:1002020 + * 6.2.7,baseVersion: 6002007 + */ +func VersionParse(version string) (baseVersion, subVersion uint64, err error) { + reg01 := regexp.MustCompile(`[\d+.]+`) + rets := reg01.FindAllString(version, -1) + if len(rets) == 0 { + err = fmt.Errorf("TendisVersionParse version:%s format not correct", version) + mylog.Logger.Error(err.Error()) + return 0, 0, err + } + if len(rets) >= 1 { + baseVersion, err = convertVersionToUint(rets[0]) + if err != nil { + return 0, 0, err + } + } + if len(rets) >= 2 { + subVersion, err = convertVersionToUint(rets[1]) + if err != nil { + return 0, 0, err + } + } + + return baseVersion, subVersion, nil +} + +// RedisCliVersion redis-cli 的版本解析 +func RedisCliVersion(cliBin string) (baseVersion, subVersion uint64, err error) { + cmd := cliBin + " -v" + verRet, err := RunBashCmd(cmd, "", nil, 20*time.Second) + if err != nil { + return + } + baseVersion, subVersion, err = VersionParse(verRet) + if err != nil { + return + } + return +} + +// IsCliSupportedNoAuthWarning redis-cli 是否支持 --no-auth-warning参数 +func IsCliSupportedNoAuthWarning(cliBin string) bool { + bVer, _, err := RedisCliVersion(cliBin) + if err != nil { + return false + } + if bVer > 6000000 { + return true + } + return false +} diff --git a/dbm-services/redis/db-tools/dbactuator/scripts/upload.sh b/dbm-services/redis/db-tools/dbactuator/scripts/upload.sh new file mode 100644 index 0000000000..ff9e93e222 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/scripts/upload.sh @@ -0,0 +1,182 @@ +#!/usr/bin/env bash + +# 安全模式 +set -euo pipefail + +# 重置PATH +PATH=/usr/local/sbin:/usr/sbin:/usr/bin:/sbin:/bin +export PATH + +# 通用脚本框架变量 +PROGRAM=$(basename "$0") +EXITCODE=0 + +BKREPO_USER= +BKREPO_PASSWORD= +BKREPO_API=http://127.0.0.1:8080 #介质库https地址 +BKREPO_PROJECT=generic # 项目代号 +BKREPO_NAME=bk-dbm # 仓库名字,默认自定义仓库 +DOWNLOAD_DIR=/tmp # 下载文件的默认路径:/tmp +BKREPO_METHOD=GET # 默认为下载 +BKREPO_PUT_OVERWRITE=true # 上传时是否覆盖仓库 +REMOTE_PATH= +declare -a REMOTE_FILE=() # 下载的文件列表 +declare -a UPLOAD_FILE=() # 上传的文件列表 + +trap 'rm -f /tmp/bkrepo_tool.*.log' EXIT +usage () { + cat < -p [ -d ] -r /devops/path1 -r /devops/path2 ... + $PROGRAM -u -p -X PUT -T local_file_path1 -T local_file_path2 -R remote_path + [ -u, --user [必填] "指定访问bkrepo的api用户名" ] + [ -p, --password [必填] "指定访问bkrepo的api密码" ] + [ -i, --url [必填] "指定访问bkrepo的url,默认是$BKREPO_API" ] + [ -r, --remote-file [必填] "指定下载的远程文件路径路径" ] + [ -n, --repo [选填] "指定项目的仓库名字,默认为$BKREPO_NAME" ] + [ -P, --project [选填] "指定项目名字,默认为blueking" ] + [ -d, --dir [选填] "指定下载制品库文件的存放文件夹,若不指定,则为/tmp" ] + [ -X, --method [选填] "默认为下载(GET),可选PUT,为上传" ] + + -X PUT时,以下参数生效: + [ -T, --upload-file [必填] "指定需要上传的本机文件路径" ] + [ -R, --remote-path [必填] "指定上传到的仓库目录的路径" ] + [ -O, --override [选填] "指定上传同名文件是否覆盖" ] + [ -h --help -? 查看帮助 ] +EOF +} + +usage_and_exit () { + usage + exit "$1" +} + +log () { + echo "$@" +} + +error () { + echo "$@" 1>&2 + usage_and_exit 1 +} + +warning () { + echo "$@" 1>&2 + EXITCODE=$((EXITCODE + 1)) +} + +# 解析命令行参数,长短混合模式 +(( $# == 0 )) && usage_and_exit 1 +while (( $# > 0 )); do + case "$1" in + -u | --user ) + shift + BKREPO_USER=$1 + ;; + -p | --password) + shift + BKREPO_PASSWORD=$1 + ;; + -i | --url) + shift + BKREPO_API=$1 + ;; + -d | --dir ) + shift + DOWNLOAD_DIR=$1 + ;; + -n | --name ) + shift + BKREPO_NAME=$1 + ;; + -P | --project ) + shift + BKREPO_PROJECT=$1 + ;; + -r | --remote-file ) + shift + REMOTE_FILE+=("$1") + ;; + -T | --upload-file ) + shift + UPLOAD_FILE+=("$1") + ;; + -O | --override) + BKREPO_PUT_OVERWRITE=true + ;; + -R | --remote-path ) + shift + REMOTE_PATH=$1 + ;; + -X | --method ) + shift + BKREPO_METHOD=$1 + ;; + --help | -h | '-?' ) + usage_and_exit 0 + ;; + -*) + error "不可识别的参数: $1" + ;; + *) + break + ;; + esac + shift +done + +if [[ -z "$BKREPO_USER" || -z "$BKREPO_PASSWORD" ]]; then + warning "-u, -p must not be empty" +fi + +if (( EXITCODE > 0 )); then + usage_and_exit "$EXITCODE" +fi + +case $BKREPO_METHOD in + GET ) + if ! [[ -d "$DOWNLOAD_DIR" ]]; then + mkdir -p "$DOWNLOAD_DIR" + fi + + cd "$DOWNLOAD_DIR" || { echo "can't change into $DOWNLOAD_DIR"; exit 1; } + + for remote_file in "${REMOTE_FILE[@]}"; do + echo "start downloading $remote_file ..." + curl -X "$BKREPO_METHOD" -sLO -u "$BKREPO_USER:$BKREPO_PASSWORD" "${BKREPO_API}/${BKREPO_PROJECT}/$BKREPO_NAME/$remote_file" + rt=$? + if [[ $rt -eq 0 ]]; then + echo "download $remote_file finished in $DOWNLOAD_DIR/${remote_file##*/}" + else + echo "download $remote_file with error code: <$rt>" + fi + done + ;; + PUT ) + for local_file in "${UPLOAD_FILE[@]}"; do + if [[ -r "$local_file" ]]; then + local_file_md5=$(md5sum "$local_file" | awk '{print $1}') + local_file_name=$(basename "$local_file") + http_code=$(curl -s -o /tmp/bkrepo_tool.$$.log -w "%{http_code}" \ + -u "$BKREPO_USER:$BKREPO_PASSWORD" "${BKREPO_API}/${BKREPO_PROJECT}/${BKREPO_NAME}/$REMOTE_PATH/$local_file_name" \ + -T "$local_file" \ + -H "X-BKREPO-OVERWRITE: $BKREPO_PUT_OVERWRITE" \ + -H "X-BKREPO-MD5: $local_file_md5" + ) + if [[ $http_code -eq 200 ]]; then + echo "upload $local_file to $REMOTE_PATH succeed" + else + echo "upload $local_file to $REMOTE_PATH failed" + echo "http response is: $(>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") + fmt.Println("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") + return nil + + redistest.TendisplusSyncMasterClear(serverIP, consts.TendisTypePredixyTendisplusCluster, true) + redistest.TendisplusSyncSlaveClear(serverIP, consts.TendisTypePredixyTendisplusCluster, true) + + err = redistest.TendisplusSyncMasterInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + consts.TendisTypePredixyTendisplusCluster) + if err != nil { + return + } + + err = redistest.TendisplusSyncSlaveInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + consts.TendisTypePredixyTendisplusCluster) + if err != nil { + return + } + + // 建立tendisplus cluster + replicaPairs := make([]atomredis.ClusterReplicaItem, 0, consts.TestRedisInstanceNum) + for i := 0; i < consts.TestRedisInstanceNum; i++ { + replicaPairs = append(replicaPairs, atomredis.ClusterReplicaItem{ + MasterIP: serverIP, + MasterPort: consts.TestSyncTendisPlusMasterStartPort + i, + SlaveIP: serverIP, + SlavePort: consts.TestSyncTendisPlusSlaveStartPort + i, + }) + } + plusClusterTest := redistest.ClusterMeetTest{} + plusClusterTest.SetPassword(consts.RedisTestPasswd). + SetSlotAutoAssign(true). + SetClusterReplicaPairs(replicaPairs) + if plusClusterTest.Err != nil { + return plusClusterTest.Err + } + plusClusterTest.RunClusterMeetAndSlotsAssign() + if plusClusterTest.Err != nil { + return plusClusterTest.Err + } + // TODO (新实例加入集群中。。s) + + return DoSwitchActionTest(serverIP, + consts.TestPredixyPort, + consts.TestTendisPlusMasterStartPort, + consts.TestTendisPlusSlaveStartPort, + consts.TestSyncTendisPlusMasterStartPort, + consts.TestSyncTendisPlusSlaveStartPort, + consts.TendisTypePredixyTendisplusCluster) +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_cluster.go b/dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_cluster.go new file mode 100644 index 0000000000..1d972024ae --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_cluster.go @@ -0,0 +1,240 @@ +package clustertest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/tests/proxytest" + "dbm-services/redis/db-tools/dbactuator/tests/redistest" + "encoding/base64" + "encoding/json" + "fmt" + "path/filepath" +) + +// TwemproxyRedisInstanceInstall twemproxy + redisinstance 集群安装 +func TwemproxyRedisInstanceInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + twemproxyPkgName, twemproxyPkgMd5 string) (err error) { + + // 先清理再安装 + err = redistest.RedisInstanceMasterClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + if err != nil { + return + } + err = redistest.RedisInstanceSlaveClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + if err != nil { + return + } + // 安装master + err = redistest.RedisInstanceMasterInstall(serverIP, redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return + } + + err = redistest.RedisInstanceSlaveInstall(serverIP, redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return + } + + // 建立主从关系 + err = redistest.CreateReplicaof(serverIP, consts.TestRedisMasterStartPort, consts.RedisTestPasswd, + serverIP, consts.TestRedisSlaveStartPort, consts.RedisTestPasswd) + if err != nil { + return + } + + // 安装twemproxy + err = proxytest.TwemproxyInstall(serverIP, twemproxyPkgName, twemproxyPkgMd5, + consts.TendisTypeTwemproxyRedisInstance, + consts.TestRedisMasterStartPort, consts.TestRedisInstanceNum, + consts.TestTwemproxyPort) + if err != nil { + return + } + + // 写入数据测试 + cmdTest, err := redistest.NewCommandTest(serverIP, consts.TestTwemproxyPort, consts.ProxyTestPasswd, + consts.TendisTypeRedisInstance, 0) + if err != nil { + return err + } + cmdTest.StringTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + cmdTest.HashTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + cmdTest.ZsetTest() + + if cmdTest.Err != nil { + return cmdTest.Err + } + cmdTest.ListTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + return nil +} + +// TwemproxyRedisInstanceClear twemproxy+redis_instance 集群清理 +func TwemproxyRedisInstanceClear(serverIP string, clearDataDir bool) (err error) { + proxytest.TwemproxyClear(serverIP, consts.TestTwemproxyPort, clearDataDir) + // master清理时, /usr/local/redis 先保留 + redistest.RedisInstanceMasterClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, clearDataDir) + redistest.RedisInstanceSlaveClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, clearDataDir) + return nil +} + +// TwemproxyTendisSSDInstall twemproxy + tendisSSD 集群安装 +func TwemproxyTendisSSDInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + twemproxyPkgName, twemproxyPkgMd5 string) (err error) { + var retBase64 string + + // 先清理再安装 + err = redistest.TendisSSDClear(serverIP, consts.TendisTypeTwemproxyTendisSSDInstance, + true, consts.TestTendisSSDMasterStartPort, consts.TestRedisInstanceNum) + if err != nil { + return + } + err = redistest.TendisSSDClear(serverIP, consts.TendisTypeTwemproxyTendisSSDInstance, + true, consts.TestTendisSSDSlaveStartPort, consts.TestRedisInstanceNum) + if err != nil { + return + } + // 安装ssd master + err = redistest.TendisSSDInstall(serverIP, redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, consts.TendisTypeTwemproxyTendisSSDInstance, + consts.TestTendisSSDMasterStartPort, consts.TestRedisInstanceNum) + if err != nil { + return + } + err = redistest.TendisSSDInstall(serverIP, redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, consts.TendisTypeTwemproxyTendisSSDInstance, + consts.TestTendisSSDSlaveStartPort, consts.TestRedisInstanceNum, + ) + if err != nil { + return + } + // 向master中写点数据 + for i := 0; i < consts.TestRedisInstanceNum; i++ { + cmdTest, err := redistest.NewCommandTest(serverIP, consts.TestTendisSSDMasterStartPort+i, + consts.RedisTestPasswd, consts.TendisTypeRedisInstance, 0) + if err != nil { + return err + } + cmdTest.StringTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + } + // master执行备份 + retBase64, err = redistest.Backup(serverIP, []int{}, + consts.TestTendisSSDMasterStartPort, + consts.TestRedisInstanceNum, &atomredis.TendisSSDSetLogCount{ + LogCount: 10000, + SlaveLogKeepCount: 20000, + }) + if err != nil { + return + } + retDecoded, err := base64.StdEncoding.DecodeString(retBase64) + if err != nil { + err = fmt.Errorf("TwemproxyTendisSSDInstall base64 decode fail,err:%v,base64Len:%d,base64Data:%s", err, + len(retBase64), retBase64) + fmt.Println(err.Error()) + return + } + backupTasks := []atomredis.BackupTask{} + err = json.Unmarshal(retDecoded, &backupTasks) + if err != nil { + err = fmt.Errorf("TwemproxyTendisSSDInstall json.Unmarshal fail,err:%v,dataDecoded:%s", err, string(retDecoded)) + fmt.Println(err.Error()) + return + } + + // tendis_ssd restore slave + err = redistest.SsdRestore(serverIP, []int{}, consts.TestTendisSSDMasterStartPort, + consts.TestRedisInstanceNum, consts.RedisTestPasswd, + serverIP, []int{}, consts.TestTendisSSDSlaveStartPort, + consts.TestRedisInstanceNum, consts.RedisTestPasswd, + filepath.Join(consts.GetRedisBackupDir(), "dbbak/"), backupTasks) + if err != nil { + return + } + + // 建立主从关系 + // err = redistest.CreateReplicaof(serverIP, consts.TestTendisSSDMasterStartPort, consts.RedisTestPasswd, + // serverIP, consts.TestTendisSSDSlaveStartPort, consts.RedisTestPasswd) + // if err != nil { + // return + // } + + // 安装twemproxy + err = proxytest.TwemproxyInstall(serverIP, twemproxyPkgName, twemproxyPkgMd5, + consts.TendisTypeTwemproxyRedisInstance, + consts.TestTendisSSDMasterStartPort, consts.TestRedisInstanceNum, + consts.TestSSDClusterTwemproxyPort) + if err != nil { + return + } + + // 写入数据测试 + cmdTest, err := redistest.NewCommandTest(serverIP, consts.TestSSDClusterTwemproxyPort, consts.ProxyTestPasswd, + consts.TendisTypeRedisInstance, 0) + if err != nil { + return err + } + cmdTest.StringTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + cmdTest.HashTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + // 和cache,tendisplus 写入类型保持一致,方便key提取删除,校验结果保持一致 + cmdTest.SetTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + cmdTest.ListTest() + if cmdTest.Err != nil { + return cmdTest.Err + } + return nil +} + +// TwemproxyTendisSSDClear twemproxy+tendisssd 集群清理 +func TwemproxyTendisSSDClear(serverIP string, clearDataDir bool) (err error) { + proxytest.TwemproxyClear(serverIP, consts.TestSSDClusterTwemproxyPort, clearDataDir) + // 清理redis + redistest.TendisSSDClear(serverIP, consts.TendisTypeTwemproxyTendisSSDInstance, + true, consts.TestTendisSSDMasterStartPort, consts.TestRedisInstanceNum) + redistest.TendisSSDClear(serverIP, consts.TendisTypeTwemproxyTendisSSDInstance, + true, consts.TestTendisSSDSlaveStartPort, consts.TestRedisInstanceNum) + return nil +} + +// RedisSceneTest TODO +func RedisSceneTest(masterIp, SlaveIp, tp string, mport, sport, num int) error { + rscene := redistest.RedisSceneTest{} + rscene.SetClusterType(tp) + rscene.SetInatances(masterIp, SlaveIp, mport, sport, num) + if err := rscene.RunRedisCheckSyncStatus(); err != nil { + return err + } + if err := rscene.RunRedisSyncParams(); err != nil { + return err + } + if err := rscene.RunRedisKillConn(); err != nil { + return err + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_swtich.go b/dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_swtich.go new file mode 100644 index 0000000000..1f1cd1d4bd --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/clustertest/twemproxy_swtich.go @@ -0,0 +1,255 @@ +package clustertest + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "dbm-services/redis/db-tools/dbactuator/tests/proxytest" + "dbm-services/redis/db-tools/dbactuator/tests/redistest" + "fmt" + "path/filepath" + "strconv" + "time" +) + +// TwemproxyCacheSwitch twemproxy+cache_redis切换测试 +// 必须先成功执行 TwemproxyRedisInstanceInstall +func TwemproxyCacheSwitch(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + bkdbmonPkgName, bkdbmonPkgMd5 string) (err error) { + // 设置参数 + fmt.Println(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") + fmt.Println(">>>>>>>>>>>>>>>start tendisCacheSwitchTest=================") + + // 先清理,再安装 + redistest.RedisSyncMasterClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + redistest.RedisSyncSlaveClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + + // 安装sync redis 并创建主从关系 + err = redistest.RedisSyncMasterInstall(serverIP, redisPkgName, redisPkgMd5, dbtoolsPkgName, dbtoolsPkgMd5, + consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return + } + err = redistest.RedisSyncSlaveInstall(serverIP, redisPkgName, redisPkgMd5, dbtoolsPkgName, dbtoolsPkgMd5, + consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return + } + + err = redistest.CreateReplicaof(serverIP, consts.TestSyncRedisMasterStartPort, consts.RedisTestPasswd, + serverIP, consts.TestSyncRedisSlaveStartPort, consts.RedisTestPasswd) + if err != nil { + return + } + + // [oldslave ---> newmaster] cacheRedis 建立主从关系 + fmt.Println(">>>>>>oldmaster-->oldsalve--->newmaster--->newslave>>>>>>>>>>>>>>>>>>") + replicaOfTest := redistest.RedisReplicaofTest{} + replicaOfTest.SetMasterIP(serverIP). + SetMasterPorts(consts.TestRedisSlaveStartPort, consts.TestRedisInstanceNum). + SetMasterAuth(consts.RedisTestPasswd). + SetSlaveIP(serverIP). + SetSlavePorts(consts.TestSyncRedisMasterStartPort, consts.TestRedisInstanceNum). + SetSlaveAuth(consts.RedisTestPasswd) + if replicaOfTest.Err != nil { + return replicaOfTest.Err + } + replicaOfTest.RunReplicaOf() + if replicaOfTest.Err != nil { + return replicaOfTest.Err + } + + installTest := redistest.BkDBmonInstallTest{} + installTest. + SetBkDbmonPkg(bkdbmonPkgName, bkdbmonPkgMd5). + SetDbtoolsPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetBackupConf(). + AppendMasterServer(serverIP, consts.TestRedisMasterStartPort, consts.TestRedisInstanceNum). + AppendMasterServer(serverIP, consts.TestSyncRedisMasterStartPort, consts.TestRedisInstanceNum) + if installTest.Err != nil { + return installTest.Err + } + // 安装bk-dbmon,开始写心跳 + installTest.InstallBkDbmon() + if installTest.Err != nil { + return installTest.Err + } + // 开始切换 + err = DoSwitchActionTest(serverIP, consts.TestTwemproxyPort, consts.TestRedisMasterStartPort, + consts.TestRedisSlaveStartPort, consts.TestSyncRedisMasterStartPort, + consts.TestSyncRedisSlaveStartPort, + consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return err + } + + // 切换成功,再次检查 twemproxy的配置中包含了 syncMasterIP:syncMasterPort + twemport := strconv.Itoa(consts.TestTwemproxyPort) + twemConfFile := filepath.Join(consts.GetRedisDataDir(), "twemproxy-0.2.4", twemport, "nutcracker."+twemport+".yml") + if util.FileExists(twemConfFile) { + var grepRet string + grepCmd := fmt.Sprintf(`grep '%s:%d' %s`, serverIP, consts.TestSyncRedisMasterStartPort, twemConfFile) + mylog.Logger.Info(grepCmd) + grepRet, err = util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + if err != nil { + return + } + mylog.Logger.Info("%s %s", twemConfFile, grepRet) + } + + // 检测后端Md5 ,这里只有1个ip,就校验程序的一般性问题吧 + twemproxyMd5 := proxytest.TwemproxyBackendsMd5Test{} + twemproxyMd5.SetProxiesList([]string{serverIP}, consts.TestTwemproxyPort) + if err = twemproxyMd5.RunCheckProxyBackends(); err != nil { + return + } + + // 卸载 bk-dbmon + installTest.StopBkDbmon() + + // 旧master 和 slave 环境清理 + redistest.RedisInstanceMasterClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + redistest.RedisInstanceSlaveClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + + return nil +} + +// TwemproxyCacheSwitchRestoreEnv twemproxy+cache_redis恢复环境 +// 必须先成功执行 TwemproxyCacheSwitch +func TwemproxyCacheSwitchRestoreEnv(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + bkdbmonPkgName, bkdbmonPkgMd5 string) (err error) { + // 设置参数 + fmt.Println(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") + fmt.Println(">>>>>>>>>>>>>>>start TwemproxyCacheSwitchRestoreEnv=================") + + // 先清理,再安装 + redistest.RedisInstanceMasterClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + redistest.RedisInstanceSlaveClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + + // 安装redis master/slave 并创建主从关系 + err = redistest.RedisInstanceMasterInstall(serverIP, redisPkgName, redisPkgMd5, dbtoolsPkgName, dbtoolsPkgMd5, + consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return + } + err = redistest.RedisInstanceSlaveInstall(serverIP, redisPkgName, redisPkgMd5, dbtoolsPkgName, dbtoolsPkgMd5, + consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return + } + + err = redistest.CreateReplicaof(serverIP, consts.TestRedisMasterStartPort, consts.RedisTestPasswd, + serverIP, consts.TestRedisSlaveStartPort, consts.RedisTestPasswd) + if err != nil { + return + } + + // [syncslave ---> newmaster] cacheRedis 建立主从关系 + fmt.Println(">>>>>>syncmaster-->syncsalve--->newmaster--->newslave>>>>>>>>>>>>>>>>>>") + replicaOfTest := redistest.RedisReplicaofTest{} + replicaOfTest.SetMasterIP(serverIP). + SetMasterPorts(consts.TestSyncRedisSlaveStartPort, consts.TestRedisInstanceNum). + SetMasterAuth(consts.RedisTestPasswd). + SetSlaveIP(serverIP). + SetSlavePorts(consts.TestRedisMasterStartPort, consts.TestRedisInstanceNum). + SetSlaveAuth(consts.RedisTestPasswd) + if replicaOfTest.Err != nil { + return replicaOfTest.Err + } + replicaOfTest.RunReplicaOf() + if replicaOfTest.Err != nil { + return replicaOfTest.Err + } + + installTest := redistest.BkDBmonInstallTest{} + installTest. + SetBkDbmonPkg(bkdbmonPkgName, bkdbmonPkgMd5). + SetDbtoolsPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetBackupConf(). + AppendMasterServer(serverIP, consts.TestRedisMasterStartPort, consts.TestRedisInstanceNum). + AppendMasterServer(serverIP, consts.TestSyncRedisMasterStartPort, consts.TestRedisInstanceNum) + if installTest.Err != nil { + return installTest.Err + } + // 安装bk-dbmon,开始写心跳 + installTest.InstallBkDbmon() + if installTest.Err != nil { + return installTest.Err + } + // 开始切换 + err = DoSwitchActionTest(serverIP, consts.TestTwemproxyPort, + consts.TestSyncRedisMasterStartPort, consts.TestSyncRedisSlaveStartPort, + consts.TestRedisMasterStartPort, consts.TestRedisSlaveStartPort, + consts.TendisTypeTwemproxyRedisInstance) + if err != nil { + return err + } + + // 切换成功,再次检查 twemproxy的配置中包含了 syncMasterIP:syncMasterPort + twemport := strconv.Itoa(consts.TestTwemproxyPort) + twemConfFile := filepath.Join(consts.GetRedisDataDir(), "twemproxy-0.2.4", twemport, "nutcracker."+twemport+".yml") + if util.FileExists(twemConfFile) { + var grepRet string + grepCmd := fmt.Sprintf(`grep '%s:%d' %s`, serverIP, consts.TestRedisMasterStartPort, twemConfFile) + mylog.Logger.Info(grepCmd) + grepRet, err = util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + if err != nil { + return + } + mylog.Logger.Info("%s %s", twemConfFile, grepRet) + } + + // 卸载 bk-dbmon + installTest.StopBkDbmon() + + // sync master 和 slave 环境清理 + redistest.RedisSyncMasterClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + redistest.RedisSyncSlaveClear(serverIP, consts.TendisTypeTwemproxyRedisInstance, true) + + return nil +} + +// DoSwitchActionTest 执行切换 +func DoSwitchActionTest(serverIP string, proxyPort, cmaster, cslave, syncMaster, syncSlave int, ctp string) error { + tendisSwitchTest := redistest.RedisSwitchTest{} + var maddrs, saddrs []string + for i := 0; i < 4; i++ { + if ctp == consts.TendisTypeTwemproxyRedisInstance { + maddrs = append(maddrs, fmt.Sprintf("%s:%d %d-%d", serverIP, cmaster+i, i, i)) + saddrs = append(saddrs, fmt.Sprintf("%s:%d %d-%d", serverIP, cslave+i, i, i)) + } else { + maddrs = append(maddrs, fmt.Sprintf("%s:%d", serverIP, cmaster+i)) + saddrs = append(saddrs, fmt.Sprintf("%s:%d", serverIP, cslave+i)) + } + } + + sinf := []atomredis.InstanceSwitchParam{} + for i := 0; i < 4; i++ { + sinf = append(sinf, atomredis.InstanceSwitchParam{ + MasterInfo: atomredis.InstanceParam{IP: serverIP, Port: cmaster + i}, + SlaveInfo: atomredis.InstanceParam{IP: serverIP, Port: syncMaster + i}, + }) + } + + tendisSwitchTest.SetDefaultClusterMeta(consts.ProxyTestPasswd, consts.RedisTestPasswd). + SetProxySet(fmt.Sprintf("%s:%d", serverIP, proxyPort)).SetClusterType(ctp). + SetMasterSet(maddrs). + SetSlaveSet(saddrs).SetDefaultSwitchCondition("mms"). + SetSwitchInfo(sinf) + + if ctp == consts.TendisTypeTwemproxyRedisInstance { + tendisSwitchTest.SetDefaultSwitchCondition("msms") + } + + if tendisSwitchTest.Err != nil { + return tendisSwitchTest.Err + } + + tendisSwitchTest.RunTendisSwitch() + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_install.go b/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_install.go new file mode 100644 index 0000000000..dc318be935 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_install.go @@ -0,0 +1,442 @@ +package proxytest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" +) + +// PredixyInstallTest predixy安装测试 +type PredixyInstallTest struct { + atomproxy.PredixyConfParams + Err error `json:"-"` +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *PredixyInstallTest) SetIP(ip string) *PredixyInstallTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPort set port +func (test *PredixyInstallTest) SetPort(port int) *PredixyInstallTest { + if test.Err != nil { + return test + } + test.Port = port + return test +} + +// SetPkg set pkg信息,传入为空则pkg=predixy-1.4.0.tar.gz,pkgMd5=9a863ce100bfe6138523d046c068f49c +func (test *PredixyInstallTest) SetPkg(pkg, pkgMd5 string) *PredixyInstallTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "predixy-1.4.0.tar.gz" + pkgMd5 = "9a863ce100bfe6138523d046c068f49c" + } + test.Pkg = pkg + test.PkgMd5 = pkgMd5 + return test +} + +// SetProxyPassword set proxy password,传入为空则password=xxxx +func (test *PredixyInstallTest) SetProxyPassword(proxyPasswd string) *PredixyInstallTest { + if test.Err != nil { + return test + } + if proxyPasswd == "" { + proxyPasswd = "xxxx" + } + test.PredixyPasswd = proxyPasswd + return test +} + +// SetRedisPassword set password,传入为空则password=xxxx +func (test *PredixyInstallTest) SetRedisPassword(redisPasswd string) *PredixyInstallTest { + if test.Err != nil { + return test + } + if redisPasswd == "" { + redisPasswd = "xxxx" + } + test.RedisPasswd = redisPasswd + return test +} + +// SetServers 设置 servers +func (test *PredixyInstallTest) SetServers(servers []string) *PredixyInstallTest { + if test.Err != nil { + return test + } + if len(servers) == 0 { + test.Err = fmt.Errorf("PredixyInstallTest servers cannot be empty") + fmt.Println(test.Err.Error()) + return test + } + test.Servers = servers + return test +} + +// SetOtherParamsDefault 设置其他参数的默认值 +func (test *PredixyInstallTest) SetOtherParamsDefault() *PredixyInstallTest { + if test.Err != nil { + return test + } + test.DbConfig.WorkerThreads = "4" + test.DbConfig.ClientTimeout = "0" + test.DbConfig.RefreshInterval = "1" + test.DbConfig.ServerFailureLimit = "10" + test.DbConfig.ServerRetryTimeout = "1" + test.DbConfig.KeepAlive = "0" + test.DbConfig.ServerTimeout = "0" + return test +} + +// RunPredixyInstall 安装predixy +func (test *PredixyInstallTest) RunPredixyInstall() { + msg := fmt.Sprintf("=========PredixyIntall test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========PredixyIntall test fail============") + } else { + msg = fmt.Sprintf("=========PredixyIntall test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomproxy.NewPredixyInstall().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +func predixyStillRunning() bool { + grepCmd := `ps aux|grep "/usr/local/predixy/bin"|grep -v grep || true;` + ret, _ := util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + ret = strings.TrimSpace(ret) + if ret != "" { + return true + } + return false +} + +// ClearPredixy 清理predixy环境 +// 关闭predixy进程,清理数据目录,清理 /usr/local/predixy +func (test *PredixyInstallTest) ClearPredixy(clearDataDir bool) { + var dir string + var isUsing bool + isUsing, _ = util.CheckPortIsInUse(test.IP, strconv.Itoa(test.Port)) + if isUsing { + killCmd := fmt.Sprintf("ps aux|grep predixy|grep -v grep|grep %d|awk '{print $2}'|xargs kill -9", test.Port) + util.RunBashCmd(killCmd, "", nil, 1*time.Minute) + } + + if clearDataDir { + dataDir := consts.GetRedisDataDir() + predixyDir := filepath.Join(dataDir, "predixy", strconv.Itoa(test.Port)) + if util.FileExists(predixyDir) { + util.RunBashCmd("rm -rf "+predixyDir, "", nil, 1*time.Minute) + } + } + + if !predixyStillRunning() { + dir = "/usr/local/predixy" + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + dir = filepath.Join("/usr/local", test.GePkgBaseName()) + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + } +} + +// TwemproxyInstallTest twemproxy 安装测试 +type TwemproxyInstallTest struct { + atomproxy.TwemproxyInstallParams + Err error `json:"-"` +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *TwemproxyInstallTest) SetIP(ip string) *TwemproxyInstallTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPort set port +func (test *TwemproxyInstallTest) SetPort(port int) *TwemproxyInstallTest { + if test.Err != nil { + return test + } + test.Port = port + return test +} + +// SetDbType 设置DbType,默认为 TwemproxyRedisInstance +func (test *TwemproxyInstallTest) SetDbType(dbType string) *TwemproxyInstallTest { + if test.Err != nil { + return test + } + if dbType == "" { + dbType = "TwemproxyRedisInstance" + } + test.DbType = dbType + return test +} + +// SetPkg set pkg信息,传入为空则pkg=twemproxy-0.4.1-v23.tar.gz,pkgMd5=41850e44bebfce84ebd4d0cf4cce6833 +func (test *TwemproxyInstallTest) SetPkg(pkg, pkgMd5 string) *TwemproxyInstallTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "twemproxy-0.4.1-v23.tar.gz" + pkgMd5 = "41850e44bebfce84ebd4d0cf4cce6833" + } + test.Pkg = pkg + test.PkgMd5 = pkgMd5 + return test +} + +// SetProxyPassword set proxy password,传入为空则password=xxxx +func (test *TwemproxyInstallTest) SetProxyPassword(proxyPasswd string) *TwemproxyInstallTest { + if test.Err != nil { + return test + } + if proxyPasswd == "" { + proxyPasswd = "xxxx" + } + test.Password = proxyPasswd + return test +} + +// SetRedisPassword set password,传入为空则password=xxxx +func (test *TwemproxyInstallTest) SetRedisPassword(redisPasswd string) *TwemproxyInstallTest { + if test.Err != nil { + return test + } + if redisPasswd == "" { + redisPasswd = "xxxx" + } + test.RedisPassword = redisPasswd + return test +} + +// SetServers 设置 servers +func (test *TwemproxyInstallTest) SetServers(servers []string) *TwemproxyInstallTest { + if test.Err != nil { + return test + } + if len(servers) == 0 { + test.Err = fmt.Errorf("TwemproxyInstallTest servers cannot be empty") + fmt.Println(test.Err.Error()) + return test + } + test.Servers = servers + return test +} + +// RunTwemproxyInstall 安装twemproxy +func (test *TwemproxyInstallTest) RunTwemproxyInstall() { + msg := fmt.Sprintf("=========TwemproxyInstall test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========TwemproxyInstall test fail============") + } else { + msg = fmt.Sprintf("=========TwemproxyInstall test success============") + } + fmt.Println(msg) + }() + + test.ConfConfigs = map[string]interface{}{ + "hash_tag": "{}", + "server_failure_limit": "3", + "slowms": "1000000", + "backlog": "512", + "redis": "true", + "distribution": "modhash", + "hash": "fnv1a_64", + "auto_eject_hosts": "false", + "preconnect": "false", + "server_retry_timeout": "2000", + "server_connections": "1", + "mbuf-size": "1024", + } + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomproxy.NewTwemproxyInstall().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +func twemproxyStillRunning() bool { + grepCmd := `ps aux|grep nutcracker|grep -v grep || true;` + ret, _ := util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + ret = strings.TrimSpace(ret) + if ret != "" { + return true + } + return false +} + +// ClearTwemproxy 清理twemproxy环境 +// 关闭twemproxy进程,清理数据目录,清理 /usr/local/twemproxy +func (test *TwemproxyInstallTest) ClearTwemproxy(clearDataDir bool) { + var dir string + var isUsing bool + isUsing, _ = util.CheckPortIsInUse(test.IP, strconv.Itoa(test.Port)) + if isUsing { + killCmd := fmt.Sprintf("ps aux|grep nutcracker|grep -v grep|grep %d|awk '{print $2}'|xargs kill -9", test.Port) + util.RunBashCmd(killCmd, "", nil, 1*time.Minute) + } + + if clearDataDir { + dataDir := consts.GetRedisDataDir() + twemDir := filepath.Join(dataDir, "twemproxy-0.2.4", strconv.Itoa(test.Port)) + if util.FileExists(twemDir) { + fmt.Println("rm -rf " + twemDir) + util.RunBashCmd("rm -rf "+twemDir, "", nil, 1*time.Minute) + } + } + + if !twemproxyStillRunning() { + dir = "/usr/local/twemproxy" + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + dir = filepath.Join("/usr/local", test.GePkgBaseName()) + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + } +} + +// TwemproxyInstall twemproxy安装 并 绑定集群关系 +func TwemproxyInstall(serverIP, twemproxyPkgName, twemproxyPkgMd5, dbType string, + masterStartPort, instNum, twemPort int) (err error) { + // 建立twemproxy+cacheRedis集群关系 + twemTest := TwemproxyInstallTest{} + servers := make([]string, 0, instNum) + var segStart int + var segEnd int + segStep := (consts.TwemproxyMaxSegment + 1) / instNum + for i := 0; i < instNum; i++ { + segStart = i * segStep + segEnd = (i+1)*segStep - 1 + servers = append(servers, fmt.Sprintf("%s:%d:1 testapp %d-%d 1", serverIP, masterStartPort+i, + segStart, + segEnd)) + } + + twemTest.SetIP(serverIP).SetPort(twemPort). + SetPkg(twemproxyPkgName, twemproxyPkgMd5). + SetProxyPassword(consts.ProxyTestPasswd). + SetRedisPassword(consts.RedisTestPasswd). + SetDbType(dbType). + SetServers(servers) + if twemTest.Err != nil { + return twemTest.Err + } + // 先清理,再安装 + twemTest.ClearTwemproxy(true) + + twemTest.RunTwemproxyInstall() + if twemTest.Err != nil { + return twemTest.Err + } + return nil +} + +// TwemproxyClear twemproxy 下架与清理 +func TwemproxyClear(serverIP string, port int, clearDataDir bool) { + twemTest := TwemproxyInstallTest{} + twemTest.SetIP(serverIP).SetPort(port).SetProxyPassword(consts.ProxyTestPasswd) + if twemTest.Err != nil { + return + } + twemTest.ClearTwemproxy(clearDataDir) +} + +// PredixyInstall predixy安装 +func PredixyInstall(serverIP, predixyPkgName, predixyPkgMd5, dbType string, + startPort, instNum, predixyPort int) (err error) { + // predixy 安装并建立redis关系 + servers := make([]string, 0, instNum) + for i := 0; i < instNum; i++ { + servers = append(servers, fmt.Sprintf("%s:%d", serverIP, startPort+i)) + } + predixyTest := PredixyInstallTest{} + predixyTest.SetIP(serverIP).SetPort(predixyPort). + SetPkg(predixyPkgName, predixyPkgMd5). + SetProxyPassword(consts.ProxyTestPasswd). + SetRedisPassword(consts.RedisTestPasswd). + SetServers(servers). + SetOtherParamsDefault() + if predixyTest.Err != nil { + return predixyTest.Err + } + // 先清理,再安装 + predixyTest.ClearPredixy(true) + + predixyTest.RunPredixyInstall() + if predixyTest.Err != nil { + return predixyTest.Err + } + return nil +} + +// PredixyClear predixy 下架与清理 +func PredixyClear(serverIP string, port int, clearDataDir bool) { + predixyTest := PredixyInstallTest{} + predixyTest.SetIP(serverIP).SetPort(port).SetProxyPassword(consts.ProxyTestPasswd) + if predixyTest.Err != nil { + return + } + predixyTest.ClearPredixy(clearDataDir) +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_operate.go b/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_operate.go new file mode 100644 index 0000000000..0ecd04dfb7 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxy_operate.go @@ -0,0 +1,226 @@ +package proxytest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// TwemproxyOperateTest 启停测试 +type TwemproxyOperateTest struct { + atomproxy.TwemproxyOperateParams + Err error `json:"-"` +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *TwemproxyOperateTest) SetIP(ip string) *TwemproxyOperateTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPort set port +func (test *TwemproxyOperateTest) SetPort(port int) *TwemproxyOperateTest { + if test.Err != nil { + return test + } + test.Port = port + return test +} + +// SetOp set op +func (test *TwemproxyOperateTest) SetOp(op string) *TwemproxyOperateTest { + if test.Err != nil { + return test + } + test.Operate = op + return test +} + +// SetTest set test +func (test *TwemproxyOperateTest) SetTest() *TwemproxyOperateTest { + if test.Err != nil { + return test + } + test.Debug = true + return test +} + +// RunTwemproxyOpenClose twemproxy启停 +func (test *TwemproxyOperateTest) RunTwemproxyOpenClose() { + msg := fmt.Sprintf("=========RunTwemproxyOpenClose %s test start============", test.Operate) + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========RunTwemproxyOpenClose %s test fail============", test.Operate) + } else { + msg = fmt.Sprintf("=========RunTwemproxyOpenClose %s test success============", test.Operate) + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomproxy.NewTwemproxyOperate().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// PredixyOperateTest 启停测试 +type PredixyOperateTest struct { + atomproxy.PredixyOperateParams + Err error `json:"-"` +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *PredixyOperateTest) SetIP(ip string) *PredixyOperateTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPort set port +func (test *PredixyOperateTest) SetPort(port int) *PredixyOperateTest { + if test.Err != nil { + return test + } + test.Port = port + return test +} + +// SetOp set op +func (test *PredixyOperateTest) SetOp(op string) *PredixyOperateTest { + if test.Err != nil { + return test + } + test.Operate = op + return test +} + +// SetTest set test +func (test *PredixyOperateTest) SetTest() *PredixyOperateTest { + if test.Err != nil { + return test + } + test.Debug = true + return test +} + +// RunPredixyOpenClose Predixy启停 +func (test *PredixyOperateTest) RunPredixyOpenClose() { + msg := fmt.Sprintf("=========RunPredixyOpenClose %s test start============", test.Operate) + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========RunPredixyOpenClose %s test fail============", test.Operate) + } else { + msg = fmt.Sprintf("=========RunPredixyOpenClose %s test success============", test.Operate) + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomproxy.NewPredixyOperate().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// PredixyOpenClose predixy操作测试 +func PredixyOpenClose(serverIP string) (err error) { + predixyOpenCloseTest := PredixyOperateTest{} + predixyOpenCloseTest.SetIP(serverIP).SetPort(consts.TestPredixyPort).SetOp(consts.ProxyStop) + if predixyOpenCloseTest.Err != nil { + return + } + predixyOpenCloseTest.RunPredixyOpenClose() + if predixyOpenCloseTest.Err != nil { + return + } + predixyOpenCloseTest.SetIP(serverIP).SetPort(consts.TestPredixyPort).SetOp(consts.ProxyStart) + if predixyOpenCloseTest.Err != nil { + return + } + predixyOpenCloseTest.RunPredixyOpenClose() + if predixyOpenCloseTest.Err != nil { + return + } + return nil +} + +// PredixyShutdown predixy关闭测试 +func PredixyShutdown(serverIP string) (err error) { + predixyOpenCloseTest := PredixyOperateTest{} + predixyOpenCloseTest.SetIP(serverIP).SetPort(consts.TestPredixyPort).SetOp(consts.ProxyShutdown).SetTest() + if predixyOpenCloseTest.Err != nil { + return + } + predixyOpenCloseTest.RunPredixyOpenClose() + if predixyOpenCloseTest.Err != nil { + return + } + return nil +} + +// TwemproxyOpenClose twemproxy操作测试 +func TwemproxyOpenClose(serverIP string) (err error) { + twempOpenCloseTest := TwemproxyOperateTest{} + twempOpenCloseTest.SetIP(serverIP).SetPort(consts.TestTwemproxyPort).SetOp(consts.ProxyStop) + if twempOpenCloseTest.Err != nil { + return + } + twempOpenCloseTest.RunTwemproxyOpenClose() + if twempOpenCloseTest.Err != nil { + return + } + twempOpenCloseTest.SetIP(serverIP).SetPort(consts.TestTwemproxyPort).SetOp(consts.ProxyStart) + if twempOpenCloseTest.Err != nil { + return + } + twempOpenCloseTest.RunTwemproxyOpenClose() + if twempOpenCloseTest.Err != nil { + return + } + return nil +} + +// TwemproxyShutDown twemproxy关闭测试 +func TwemproxyShutDown(localIP string) (err error) { + twempOpenCloseTest := TwemproxyOperateTest{} + twempOpenCloseTest.SetIP(localIP).SetPort(consts.TestTwemproxyPort).SetOp(consts.ProxyShutdown).SetTest() + if twempOpenCloseTest.Err != nil { + return + } + twempOpenCloseTest.RunTwemproxyOpenClose() + if twempOpenCloseTest.Err != nil { + return + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxytest.go b/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxytest.go new file mode 100644 index 0000000000..c2ef880821 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/proxytest/proxytest.go @@ -0,0 +1,55 @@ +// Package proxytest proxy test +package proxytest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" + // "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomproxy" + // "dbm-services/redis/db-tools/dbactuator/pkg/consts" + // "dbm-services/redis/db-tools/dbactuator/pkg/util" +) + +// TwemproxyBackendsMd5Test TODO +// RedisBackupTest 场景需求测试 +type TwemproxyBackendsMd5Test struct { + proxies atomproxy.ProxyCheckParam + // proxies []atomproxy.ProxyInstances + Err error `json:"-"` +} + +// SetProxiesList TODO +// SetInatanceList 可配置,干掉老链接、检查同步状态 场景 +func (test *TwemproxyBackendsMd5Test) SetProxiesList(ips []string, port int) { + if test.Err != nil { + return + } + + for _, ip := range ips { + test.proxies.Instances = append(test.proxies.Instances, atomproxy.ProxyInstances{IP: ip, Port: port}) + } +} + +// RunCheckProxyBackends 检查proxy backends 是否一致 +func (test *TwemproxyBackendsMd5Test) RunCheckProxyBackends() error { + msg := fmt.Sprintf("=========CheckProxyBackendsTest start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========CheckProxyBackendsTest fail============") + } else { + msg = fmt.Sprintf("=========CheckProxyBackendsTest success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test.proxies) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomproxy.NewTwemproxySceneCheckBackends().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + return test.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/bkdbmon_install.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/bkdbmon_install.go new file mode 100644 index 0000000000..a3606a92fc --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/bkdbmon_install.go @@ -0,0 +1,265 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "time" +) + +// BkDBmonInstallTest 安装bk-dbmon测试 +type BkDBmonInstallTest struct { + atomredis.BkDbmonInstallParams + Err error `json:"-"` +} + +// SetBkDbmonPkg 设置 bk-dbmon pkg信息 +func (test *BkDBmonInstallTest) SetBkDbmonPkg(pkg, pkgMd5 string) *BkDBmonInstallTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "bk-dbmon-v0.2.tar.gz" + pkgMd5 = "99081e28443d0615b151ae82e74b69e4" + } + test.BkDbmonPkg.Pkg = pkg + test.BkDbmonPkg.PkgMd5 = pkgMd5 + return test +} + +// SetDbtoolsPkg set dbtools pkg信息,传入为空则 pkg=dbtools.tar.gz, pkgMd5=334cf6e3b84d371325052d961584d5aa +func (test *BkDBmonInstallTest) SetDbtoolsPkg(pkg, pkgMd5 string) *BkDBmonInstallTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "dbtools.tar.gz" + pkgMd5 = "334cf6e3b84d371325052d961584d5aa" + } + test.DbToolsPkg.Pkg = pkg + test.DbToolsPkg.PkgMd5 = pkgMd5 + return test +} + +// SetBackupConf 设置备份配置 +func (test *BkDBmonInstallTest) SetBackupConf() *BkDBmonInstallTest { + if test.Err != nil { + return test + } + test.GsePath = "/usr/local/gse_bkte" + test.RedisFullBackup = map[string]interface{}{ + "to_backup_system": "no", + "old_file_left_day": 2, + "cron": "0 5,13,21 * * *", + "tar_split": true, + "tar_split_part_size": "8G", + } + test.RedisBinlogBackup = map[string]interface{}{ + "to_backup_system": "no", + "old_file_left_day": 2, + "cron": "@every 10m", + } + test.RedisHeartbeat = map[string]interface{}{ + "cron": "@every 10s", + } + test.RedisMonitor = map[string]interface{}{ + "bkmonitor_event_data_id": 542898, + "bkmonitor_event_token": "xxxxxx", + "bkmonitor_metric_data_id": 11111, + "bkmonitor_metirc_token": "xxxx", + "cron": "@every 1m", + } + test.RedisKeyLifecyckle = map[string]interface{}{ + "stat_dir": "/data/dbbak/keylifecycle", + "cron": fmt.Sprintf("%d %d * * *", time.Now().Minute()+1, time.Now().Hour()), + "hotkey_conf": map[string]interface{}{ + "top_count": 10, + "duration_seconds": 30, + }, + "bigkey_conf": map[string]interface{}{ + "top_count": 10, + "duration_seconds": 60 * 60 * 5, + "on_master": false, + "use_rdb": true, + "disk_max_usage": 65, + "keymod_spec": "[]", + "keymod_engine": "default", + }, + } + return test +} + +// AppendMasterServer append master server +func (test *BkDBmonInstallTest) AppendMasterServer(masterIP string, startPort, instNum int) *BkDBmonInstallTest { + if test.Err != nil { + return test + } + ports := make([]int, 0, instNum) + if startPort == 0 { + startPort = consts.TestTendisPlusMasterStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + svrItem := atomredis.ConfServerItem{ + BkBizID: "200500194", + BkCloudID: 246, + ClusterDomain: "tendisx.aaaa.testapp.db", + MetaRole: consts.MetaRoleRedisMaster, + ServerIP: masterIP, + ServerPorts: ports, + } + test.Servers = append(test.Servers, svrItem) + return test +} + +// OnlyAEmptyServer 将servers中只保留一个实例,且实例ports=[]int{}为空 +func (test *BkDBmonInstallTest) OnlyAEmptyServer(ip string) *BkDBmonInstallTest { + if test.Err != nil { + return test + } + svrItem := atomredis.ConfServerItem{ + BkBizID: "", + BkCloudID: 0, + ClusterDomain: "", + MetaRole: consts.MetaRoleRedisMaster, + ServerIP: ip, + ServerPorts: []int{}, + } + test.Servers = []atomredis.ConfServerItem{svrItem} + return test +} + +// AppendSlaveServer append slave server +func (test *BkDBmonInstallTest) AppendSlaveServer(slaveIP string, startPort, instNum int) *BkDBmonInstallTest { + if test.Err != nil { + return test + } + ports := make([]int, 0, instNum) + if startPort == 0 { + startPort = consts.TestTendisPlusSlaveStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + svrItem := atomredis.ConfServerItem{ + BkBizID: "200500194", + BkCloudID: 246, + App: "testapp", + AppName: "测试app", + ClusterDomain: "tendisx.aaaa.testapp.db", + ClusterName: "aaaa", + ClusterType: consts.TendisTypePredixyTendisplusCluster, + MetaRole: consts.MetaRoleRedisSlave, + ServerIP: slaveIP, + ServerPorts: ports, + } + test.Servers = append(test.Servers, svrItem) + return test +} + +// InstallBkDbmon 安装bk-dbmon +func (test *BkDBmonInstallTest) InstallBkDbmon() { + msg := fmt.Sprintf("=========Install_bkDbmon test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========Install_bkDbmon test fail============") + } else { + msg = fmt.Sprintf("=========Install_bkDbmon test success============") + } + fmt.Println(msg) + }() + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewBkDbmonInstall().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// StopBkDbmon stop bk-dbmon +func (test *BkDBmonInstallTest) StopBkDbmon() (err error) { + if util.FileExists(consts.BkDbmonBin) { + stopScript := filepath.Join(consts.BkDbmonPath, "stop.sh") + stopCmd := fmt.Sprintf("su %s -c '%s'", consts.MysqlAaccount, "sh "+stopScript) + mylog.Logger.Info(stopCmd) + _, err = util.RunLocalCmd("su", []string{consts.MysqlAaccount, "-c", "sh " + stopScript}, + "", nil, 1*time.Minute) + return + } + killCmd := ` +pid=$(ps aux|grep 'bk-dbmon --config'|grep -v dbactuator|grep -v grep|awk '{print $2}') +if [[ -n $pid ]] +then +kill $pid +fi +` + mylog.Logger.Info(killCmd) + _, err = util.RunBashCmd(killCmd, "", nil, 1*time.Minute) + return +} + +var ( + bkdbmonTest BkDBmonInstallTest = BkDBmonInstallTest{} +) + +// BkDbmonInstall bk-dbmon安装测试 +func BkDbmonInstall(serverIP, dbtoolsPkgName, dbtoolsPkgMd5, bkdbmonPkgName, bkdbmonPkgMd5, dbType string) (err error) { + bkdbmonTest = BkDBmonInstallTest{} + masterStartPort := 0 + slaveStartPort := 0 + if consts.IsRedisInstanceDbType(dbType) { + masterStartPort = consts.TestRedisMasterStartPort + slaveStartPort = consts.TestRedisSlaveStartPort + } else if consts.IsTendisplusInstanceDbType(dbType) { + masterStartPort = consts.TestTendisPlusMasterStartPort + slaveStartPort = consts.TestTendisPlusSlaveStartPort + } + bkdbmonTest. + SetBkDbmonPkg(bkdbmonPkgName, bkdbmonPkgMd5). + SetDbtoolsPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetBackupConf(). + AppendMasterServer(serverIP, masterStartPort, consts.TestRedisInstanceNum). + AppendSlaveServer(serverIP, slaveStartPort, consts.TestRedisInstanceNum) + if bkdbmonTest.Err != nil { + return + } + bkdbmonTest.InstallBkDbmon() + return bkdbmonTest.Err +} + +// BkDbmonStop bk-dbmon stop +func BkDbmonStop() (err error) { + return bkdbmonTest.StopBkDbmon() +} + +// BkDbmonStopNew bk-dbmon stop +func BkDbmonStopNew(serverIP, dbtoolsPkgName, dbtoolsPkgMd5, bkdbmonPkgName, bkdbmonPkgMd5 string) (err error) { + bkdbmonTest = BkDBmonInstallTest{} + bkdbmonTest. + SetBkDbmonPkg(bkdbmonPkgName, bkdbmonPkgMd5). + SetDbtoolsPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetBackupConf(). + OnlyAEmptyServer(serverIP) + if bkdbmonTest.Err != nil { + return + } + bkdbmonTest.InstallBkDbmon() + return bkdbmonTest.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/commands.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/commands.go new file mode 100644 index 0000000000..8a184058bf --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/commands.go @@ -0,0 +1,263 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "fmt" + "strconv" + + "github.com/go-redis/redis/v8" +) + +// CommandTest 命令测试 +type CommandTest struct { + IP string `json:"ip"` + Port int `json:"port"` + Password string `json:"password"` + DbType string `json:"db_type"` + Database int `json:"database"` + Err error `json:"-"` + client *myredis.RedisClient `json:"-"` +} + +// NewCommandTest new +func NewCommandTest(ip string, port int, password, dbType string, database int) (ret *CommandTest, err error) { + ret = &CommandTest{ + IP: ip, + Port: port, + Password: password, + DbType: dbType, + Database: database, + } + addr := ip + ":" + strconv.Itoa(port) + ret.client, err = myredis.NewRedisClient(addr, password, database, dbType) + return +} + +// StringTest string命令测试 +func (test *CommandTest) StringTest() { + var vals []interface{} + var k01, v01 string + for i := 0; i < 100; i++ { + vals = make([]interface{}, 0, 2) + k01 = "test_string_" + strconv.Itoa(i) + v01 = "v" + strconv.Itoa(i) + vals = append(vals, k01, v01) + _, test.Err = test.client.Mset(vals) + if test.Err != nil { + return + } + } +} + +// KeyTypeCheck key 过期时间 +// 说明:集群写入 400 : *string* ,*hash*,*list*,*set*, 各100个key,提取&删除 *hash* 和 *set* 共200 +// 所以 *hash* *set* ttl的值为 none; *string* 的ttl 为string ;*list*, 的ttl 为list +func (test *CommandTest) KeyTypeCheck() (err error) { + var k01, ttl string + for i := 0; i < 100; i++ { + k01 = "test_string_" + strconv.Itoa(i) + ttl, test.Err = test.client.KeyType(k01) + if test.Err != nil { + return test.Err + } + if ttl != "string" { + test.Err = fmt.Errorf("%s ttl:%s,不为string ,key被意外删除", k01, ttl) + return test.Err + } + + k01 = "test_hash_" + strconv.Itoa(i) + ttl, test.Err = test.client.KeyType(k01) + if test.Err != nil { + return test.Err + } + if ttl != "none" { + test.Err = fmt.Errorf("%s ttl:%s,不为none ,key没有删除成功", k01, ttl) + return test.Err + } + + k01 = "test_set_" + strconv.Itoa(i) + ttl, test.Err = test.client.KeyType(k01) + if test.Err != nil { + return test.Err + } + + if ttl != "none" { + test.Err = fmt.Errorf("%s ttl:%s,不为none ,key没有删除成功", k01, ttl) + return test.Err + } + + k01 = "test_list_" + strconv.Itoa(i) + ttl, test.Err = test.client.KeyType(k01) + if test.Err != nil { + return test.Err + } + if ttl != "list" { + test.Err = fmt.Errorf("%s ttl:%s,不为list ,key被意外删除", k01, ttl) + return test.Err + } + + } + return nil +} + +// HashTest hash命令测试 +func (test *CommandTest) HashTest() { + var vals []interface{} + var kname string + for i := 0; i < 100; i++ { + kname = "test_hash_" + strconv.Itoa(i) + vals = make([]interface{}, 0, 4) + vals = append(vals, "k1", "v1", "k2", "v2") + _, test.Err = test.client.Hmset(kname, vals) + if test.Err != nil { + return + } + } +} + +// ListTest list命令测试 +func (test *CommandTest) ListTest() { + var vals []interface{} + var kname string + for i := 0; i < 100; i++ { + kname = "test_list_" + strconv.Itoa(i) + vals = make([]interface{}, 0, 2) + vals = append(vals, "v1", "v2") + _, test.Err = test.client.Rpush(kname, vals) + if test.Err != nil { + return + } + } +} + +// SetTest set命令测试 +func (test *CommandTest) SetTest() { + var vals []interface{} + var kname string + for i := 0; i < 100; i++ { + kname = "test_set_" + strconv.Itoa(i) + vals = make([]interface{}, 0, 4) + vals = append(vals, "v1", "v1", "v2", "v2") + _, test.Err = test.client.Sadd(kname, vals) + if test.Err != nil { + return + } + } +} + +// ZsetTest Zset命令测试 +func (test *CommandTest) ZsetTest() { + var kname string + var members []*redis.Z + for i := 0; i < 100; i++ { + kname = "test_zset_" + strconv.Itoa(i) + members = []*redis.Z{ + { + Score: 10, + Member: "m01", + }, { + Score: 20, + Member: "m02", + }, + } + _, test.Err = test.client.Zadd(kname, members) + if test.Err != nil { + return + } + } +} + +// DelKeysCheck check key命令测试 +// 说明:集群写入 400 : *string* ,*hash*,*list*,*set*, 各100个key,提取&删除 *hash* 和 *set* 共200 +func (test *CommandTest) DelKeysCheck() (err error) { + var stringKeys []string + var hashKeys []string + var listKeys []string + var setKeys []string + stringKeys, _, test.Err = test.client.Scan("*string*", 0, 400) + msg := fmt.Sprintf("redis scan result stringKeys: %v", stringKeys) + fmt.Println(msg) + hashKeys, _, test.Err = test.client.Scan("*hash*", 0, 400) + msg = fmt.Sprintf("redis scan result hashKeys: %v", hashKeys) + fmt.Println(msg) + listKeys, _, test.Err = test.client.Scan("*list*", 0, 400) + msg = fmt.Sprintf("redis scan result listKeys: %v", listKeys) + fmt.Println(msg) + setKeys, _, test.Err = test.client.Scan("*set*", 0, 400) + msg = fmt.Sprintf("redis scan result setKeys:%v", setKeys) + fmt.Println(msg) + if test.Err != nil { + return test.Err + } + if stringKeys == nil || listKeys == nil || len(stringKeys) == 0 || len(listKeys) == 0 { + test.Err = fmt.Errorf("删除了不该删的数据,请检查写入数据是否有更改或者是否改动提取key部分代码:说明:集群共写入400个key(这里只校验一个节点数据) : " + + " *string* ,*hash*,*list*,*set*, 各100个key,提取&删除 *hash* 和 *set* 共200") + } + if len(hashKeys) != 0 || len(setKeys) != 0 { + test.Err = fmt.Errorf("该删除的key没有删成功,请检查写入数据是否有更改或者是否改动提取key部分代码:说明:集群共写入400个key(这里只校验一个节点数据) :" + + " *string* ,*hash*,*list*,*set*, 各100个key,提取&删除 *hash* 和 *set* 共200") + } + if test.Err != nil { + return test.Err + } + fmt.Println("提取&删除key正则符合预期:删除*hash* 和 *set*; 保留*string* 和*list*") + return nil +} + +// FileDelKeysCheck check file delete key命令测试 +// 说明:文件中包含 100个 *string* 匹配的key,实例中还剩下部分*list* 匹配的key +func (test *CommandTest) FileDelKeysCheck() (err error) { + var stringKeys []string + var listKeys []string + stringKeys, _, test.Err = test.client.Scan("*string*", 0, 400) + msg := fmt.Sprintf("FileDelKeysCheck redis scan result stringKeys: %v", stringKeys) + fmt.Println(msg) + + listKeys, _, test.Err = test.client.Scan("*list*", 0, 400) + msg = fmt.Sprintf("FileDelKeysCheck redis scan result listKeys: %v", listKeys) + fmt.Println(msg) + + if test.Err != nil { + return test.Err + } + if listKeys == nil || len(listKeys) == 0 { + test.Err = fmt.Errorf("删除了不该删的数据,请检查写入数据是否有更改或者是否改动提取key部分代码:文件中包含 100个 *string* 匹配的key,还剩下部分*list* 匹配的key") + } + if len(stringKeys) != 0 || len(stringKeys) != 0 { + test.Err = fmt.Errorf("该删除的key没有删成功,请检查写入数据是否有更改或者是否改动提取key部分代码:文件中包含 100个 *string* 匹配的key,还剩下部分*list* 匹配的key") + } + if test.Err != nil { + return test.Err + } + fmt.Println("文件删除结果验证符合预期:删除*string*; 保留*list*") + return nil +} + +// FileDelKeyTypeCheck check file delete key命令测试 +// 说明:文件中包含 100个 *string* 匹配的key(会被删除),实例中还剩下部分*list* 匹配的key +func (test *CommandTest) FileDelKeyTypeCheck() (err error) { + var k01, ttl string + for i := 0; i < 100; i++ { + k01 = "test_string_" + strconv.Itoa(i) + ttl, test.Err = test.client.KeyType(k01) + if test.Err != nil { + return test.Err + } + if ttl != "none" { + test.Err = fmt.Errorf("%s ttl:%s,不为string ,key被意外删除", k01, ttl) + return test.Err + } + + k01 = "test_list_" + strconv.Itoa(i) + ttl, test.Err = test.client.KeyType(k01) + if test.Err != nil { + return test.Err + } + if ttl != "list" { + test.Err = fmt.Errorf("%s ttl:%s,不为list ,key被意外删除", k01, ttl) + return test.Err + } + + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_backup.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_backup.go new file mode 100644 index 0000000000..475316a46d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_backup.go @@ -0,0 +1,165 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "regexp" + "time" +) + +// RedisBackupTest 安装测试 +type RedisBackupTest struct { + atomredis.RedisBackupParams + Err error `json:"-"` +} + +// SetBkBizID 设置 BkBizID +func (test *RedisBackupTest) SetBkBizID(bkBizID string) *RedisBackupTest { + if test.Err != nil { + return test + } + if bkBizID == "" { + bkBizID = "testapp" + } + test.BkBizID = bkBizID + return test +} + +// SetDomain set domain,传入为空则填充 cache.hello.testapp.db +func (test *RedisBackupTest) SetDomain(domain string) *RedisBackupTest { + if test.Err != nil { + return test + } + if domain == "" { + domain = "cache.hello.testapp.db" + } + test.Domain = domain + return test +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *RedisBackupTest) SetIP(ip string) *RedisBackupTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPorts set ports +// 如果ports=[],startPort=0,instNum=0,则默认startPort=40000,instNum=4 +func (test *RedisBackupTest) SetPorts(ports []int, startPort, instNum int) *RedisBackupTest { + if test.Err != nil { + return test + } + if len(ports) == 0 { + if startPort == 0 { + startPort = consts.TestTendisPlusMasterStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + } + test.Ports = ports + test.StartPort = startPort + test.InstNum = instNum + return test +} + +// SetBackupType 设置备份类型 +func (test *RedisBackupTest) SetBackupType(backupType string) *RedisBackupTest { + if test.Err != nil { + return test + } + if backupType == "" { + backupType = consts.NormalBackupType + } + test.BackupType = backupType + return test +} + +// SetWithoutToBackupSys 是否上传备份系统 +func (test *RedisBackupTest) SetWithoutToBackupSys(backupType bool) *RedisBackupTest { + if test.Err != nil { + return test + } + test.WithoutToBackupSys = backupType + return test +} + +// SetSSDLogCount 设置ssd log-count参数 +func (test *RedisBackupTest) SetSSDLogCount(logParam *atomredis.TendisSSDSetLogCount) *RedisBackupTest { + if test.Err != nil { + return test + } + if logParam == nil { + return test + } + test.SSDLogCount = *logParam + return test +} + +// RunBackup 执行 backup 原子任务 +func (test *RedisBackupTest) RunBackup() (backupRetBase64 string) { + msg := fmt.Sprintf("=========Backup test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========Backup test fail============") + } else { + msg = fmt.Sprintf("=========Backup test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisBackup().Name(), string(paramBytes)) + fmt.Println(instllCmd) + backupRetBase64, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + fmt.Println(backupRetBase64) + return +} + +// Backup Test 备份 +func Backup(serverIP string, ports []int, startPort, instNum int, ssdLogCountParam *atomredis.TendisSSDSetLogCount) ( + backupRetBase64 string, err error) { + backupTest := RedisBackupTest{} + backupTest.SetBkBizID("testapp"). + SetIP(serverIP).SetPorts(ports, startPort, instNum). + SetDomain("cache.hello.testapp.db"). + SetBackupType(consts.NormalBackupType). + SetWithoutToBackupSys(true). + SetSSDLogCount(ssdLogCountParam) + if backupTest.Err != nil { + return + } + backupRet := backupTest.RunBackup() + if backupTest.Err != nil { + return + } + reg := regexp.MustCompile(`(?U)(.*)`) + slice01 := reg.FindStringSubmatch(backupRet) + if len(slice01) != 2 { + err = fmt.Errorf("backup result not ?, backup result:%s", backupRet) + return + } + return slice01[1], backupTest.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_cluster.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_cluster.go new file mode 100644 index 0000000000..260a812114 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_cluster.go @@ -0,0 +1,161 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// ClusterMeetTest 测试 +type ClusterMeetTest struct { + atomredis.ClusterMeetSlotsAssignParams + Err error `json:"-"` +} + +// SetPassword 设置密码 +func (test *ClusterMeetTest) SetPassword(password string) *ClusterMeetTest { + if test.Err != nil { + return test + } + if password == "" { + test.Err = fmt.Errorf("ClusterMeetTest password(%s) cannot be empty", password) + fmt.Println(test.Err.Error()) + return test + } + test.Password = password + return test +} + +// SetSlotAutoAssign set slotAutoAssgin +func (test *ClusterMeetTest) SetSlotAutoAssign(slotAutoAssgin bool) *ClusterMeetTest { + if test.Err != nil { + return test + } + test.SlotsAutoAssgin = slotAutoAssgin + return test +} + +// SetUseForExpansion set use for expansion +func (test *ClusterMeetTest) SetUseForExpansion(isexpansion bool) *ClusterMeetTest { + if test.Err != nil { + return test + } + test.UseForExpansion = isexpansion + return test +} + +// SetClusterReplicaPairs set replicaPairs +func (test *ClusterMeetTest) SetClusterReplicaPairs(replicaItems []atomredis.ClusterReplicaItem) *ClusterMeetTest { + if test.Err != nil { + return test + } + if len(replicaItems) == 0 { + test.Err = fmt.Errorf("ClusterMeetTest replicaPairs cannot be empty") + fmt.Println(test.Err.Error()) + return test + } + test.ReplicaPairs = replicaItems + return test +} + +// RunClusterMeetAndSlotsAssign 建立集群关系和slots分配 +func (test *ClusterMeetTest) RunClusterMeetAndSlotsAssign() { + msg := fmt.Sprintf("=========ClusterMeetAndSlotsAssign test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========ClusterMeetAndSlotsAssign test fail============") + } else { + msg = fmt.Sprintf("=========ClusterMeetAndSlotsAssign test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewClusterMeetSlotsAssign().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// CreateClusterREPL 创建redis cluster +func CreateClusterREPL(serverIP string, masterStartPort, slaveStartPort, instNum int, + slotAutoAssign, useForExpansion bool) (err error) { + // 建立cluster 关系 + replicaPairs := make([]atomredis.ClusterReplicaItem, 0, instNum) + for i := 0; i < instNum; i++ { + replicaPairs = append(replicaPairs, atomredis.ClusterReplicaItem{ + MasterIP: serverIP, + MasterPort: masterStartPort + i, + SlaveIP: serverIP, + SlavePort: slaveStartPort + i, + }) + } + plusClusterTest := ClusterMeetTest{} + plusClusterTest.SetPassword(consts.RedisTestPasswd). + SetSlotAutoAssign(slotAutoAssign). + SetUseForExpansion(useForExpansion). + SetClusterReplicaPairs(replicaPairs) + if plusClusterTest.Err != nil { + return plusClusterTest.Err + } + plusClusterTest.RunClusterMeetAndSlotsAssign() + if plusClusterTest.Err != nil { + return plusClusterTest.Err + } + return nil +} + +// CreateRedisClusterREPL 创建redis cluster +func CreateRedisClusterREPL(serverIP string) (err error) { + return CreateClusterREPL(serverIP, + consts.TestRedisMasterStartPort, + consts.TestRedisSlaveStartPort, + consts.TestRedisInstanceNum, true, false) +} + +// CreateTendisplusClusterREPL 创建tendisplus cluster +func CreateTendisplusClusterREPL(serverIP string) (err error) { + return CreateClusterREPL(serverIP, + consts.TestTendisPlusMasterStartPort, + consts.TestTendisPlusSlaveStartPort, + consts.TestRedisInstanceNum, true, false) +} + +// CreateTendisplusREPL 创建主从 +func CreateTendisplusREPL(serverIP string, masterStartPort, slaveStartPort, numbers int) (err error) { + // 建立tendisplus 主从 + replicaPairs := make([]atomredis.ClusterReplicaItem, 0, numbers) + for i := 0; i < numbers; i++ { + replicaPairs = append(replicaPairs, atomredis.ClusterReplicaItem{ + MasterIP: serverIP, + MasterPort: masterStartPort + i, + SlaveIP: serverIP, + SlavePort: slaveStartPort + i, + }) + } + plusClusterTest := ClusterMeetTest{} + plusClusterTest.SetPassword(consts.RedisTestPasswd). + // SetSlotAutoAssign(true). + // 部署扩容所需节点不分配slot + SetSlotAutoAssign(false). + SetUseForExpansion(true). + SetClusterReplicaPairs(replicaPairs) + if plusClusterTest.Err != nil { + return plusClusterTest.Err + } + plusClusterTest.RunClusterMeetAndSlotsAssign() + if plusClusterTest.Err != nil { + return plusClusterTest.Err + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datacheck.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datacheck.go new file mode 100644 index 0000000000..a9d63261ea --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datacheck.go @@ -0,0 +1,247 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/go-redis/redis/v8" +) + +// RedisDtsDataCheckJobTest dts数据校验测试 +type RedisDtsDataCheckJobTest struct { + atomredis.RedisDtsDataCheckAndRpaireParams + Err error `json:"-"` +} + +// SetBkBizID 设置 BkBizID +func (test *RedisDtsDataCheckJobTest) SetBkBizID(bkBizID string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + if bkBizID == "" { + bkBizID = "testapp" + } + test.BkBizID = bkBizID + return test +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *RedisDtsDataCheckJobTest) SetIP(ip string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.SrcRedisIP = ip + return test +} + +// SetPkg set pkg信息,传入为空则pkg=dbtools.tar.gz,pkgMd5=334cf6e3b84d371325052d961584d5aa +func (test *RedisDtsDataCheckJobTest) SetPkg(pkg, pkgMd5 string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "dbtools.tar.gz" + pkgMd5 = "334cf6e3b84d371325052d961584d5aa" + } + test.Pkg = pkg + test.PkgMd5 = pkgMd5 + return test +} + +// SetDtsType set dts type +func (test *RedisDtsDataCheckJobTest) SetDtsType(dtsType string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.DtsCopyType = dtsType + return test +} + +// SetPortSegmentList set ports +func (test *RedisDtsDataCheckJobTest) SetPortSegmentList(ports []atomredis.PortAndSegment) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.SrcRedisPortSegmentList = ports + return test +} + +// SetSrcClusterAddr set src cluster addr +func (test *RedisDtsDataCheckJobTest) SetSrcClusterAddr(srcClusterAddr string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.SrcClusterAddr = srcClusterAddr + return test +} + +// SetSrcReddisPassword set src redis password +func (test *RedisDtsDataCheckJobTest) SetSrcReddisPassword(srcRedisPasswd string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.SrcRedisPassword = srcRedisPasswd + return test +} + +// SetDtsClusterAddr set dst cluster addr +func (test *RedisDtsDataCheckJobTest) SetDtsClusterAddr(dstClusterAddr string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.DstClusterAddr = dstClusterAddr + return test +} + +// SetDstClusterPassword set dst cluster password +func (test *RedisDtsDataCheckJobTest) SetDstClusterPassword(dstClusterPasswd string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.DstClusterPassword = dstClusterPasswd + return test +} + +// SetKeyWhiteRegex set key white regex +func (test *RedisDtsDataCheckJobTest) SetKeyWhiteRegex(whiteRegex string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.KeyWhiteRegex = whiteRegex + return test +} + +// SetKeyBlackRegex set key black regex +func (test *RedisDtsDataCheckJobTest) SetKeyBlackRegex(blackRegex string) *RedisDtsDataCheckJobTest { + if test.Err != nil { + return test + } + test.KeyBlackRegex = blackRegex + return test +} + +// RunRedisDtsDataCheck 执行 redis dts datacheck 原子任务 +func (test *RedisDtsDataCheckJobTest) RunRedisDtsDataCheck() (ret string) { + // 写入 400 : *string* ,*hash*,*list*,*set*, 各100个key,提取"hash* 和 *set* 共200 + msg := fmt.Sprintf("=========dtsDataCheck test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========dtsDataCheck test fail============") + fmt.Println(test.Err) + } else { + msg = fmt.Sprintf("=========dtsDataCheck test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + runcmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisDtsDataCheck().Name(), string(paramBytes)) + fmt.Println(runcmd) + ret, test.Err = util.RunBashCmd(runcmd, "", nil, 1*time.Hour) + if test.Err != nil && !strings.Contains(test.Err.Error(), "totalDiffKeysCnt:") { + return + } + ret = test.Err.Error() + test.Err = nil + + return +} + +// genSomeDiffKeys 伪造master=>slave之间两条不一致的数据 +func genSomeDiffKeys(srcAddr, srcPassword, dstAddr, dtsPassword string) { + var err error + var masterCli, slaveCli *myredis.RedisClient + var member *redis.Z + masterCli, err = myredis.NewRedisClientWithTimeout(srcAddr, srcPassword, 0, + consts.TendisTypeRedisInstance, 10*time.Second) + if err != nil { + return + } + defer masterCli.Close() + + slaveCli, err = myredis.NewRedisClientWithTimeout(dstAddr, dtsPassword, 0, + consts.TendisTypeRedisInstance, 10*time.Second) + if err != nil { + return + } + defer slaveCli.Close() + + // 伪造master=>slave之间两条不一致的数据 + _, err = slaveCli.ConfigSet("slave-read-only", "no") + if err != nil { + return + } + keyName := "dts_diff_string" + _, err = masterCli.Set(keyName, "val100", 0) + if err != nil { + return + } + _, err = slaveCli.DelForce(keyName) + if err != nil { + return + } + keyName = "dts_diff_zset" + todelMems := []interface{}{} + for idx := 0; idx < 100; idx++ { + member = &redis.Z{ + Score: float64(idx), + Member: "member:" + strconv.Itoa(idx), + } + masterCli.Zadd(keyName, []*redis.Z{member}) + if idx%10 == 0 { + todelMems = append(todelMems, member.Member) + } + } + slaveCli.Zrem(keyName, todelMems...) +} + +// RunReplicaPairDataCheck 利用一对主从 做数据校验 +func RunReplicaPairDataCheck(masterIP string, masterPort int, masterPasswd, + slaveIP string, slavePort int, slavePasswd string, dbtoolsPkgName, dbtoolsPkgMd5 string) (err error) { + masterAddr := masterIP + ":" + strconv.Itoa(masterPort) + slaveAddr := slaveIP + ":" + strconv.Itoa(slavePort) + + genSomeDiffKeys(masterAddr, masterPasswd, slaveAddr, slavePasswd) + + // 进行数据校验 + dataCheck := RedisDtsDataCheckJobTest{} + portSegmentList := []atomredis.PortAndSegment{} + portSegmentList = append(portSegmentList, atomredis.PortAndSegment{ + Port: masterPort, + SegmentStart: -1, + SegmentEnd: -1, + }) + dataCheck.SetBkBizID("testapp").SetIP(masterIP).SetPortSegmentList(portSegmentList). + SetPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetDtsType(consts.DtsTypeOneAppDiffCluster). + SetSrcClusterAddr(masterAddr).SetSrcReddisPassword(masterPasswd). + SetDtsClusterAddr(slaveAddr).SetDstClusterPassword(slavePasswd). + SetKeyWhiteRegex("*").SetKeyBlackRegex("") + + ret := dataCheck.RunRedisDtsDataCheck() + if dataCheck.Err != nil { + return dataCheck.Err + } + if !strings.Contains(ret, "totalDiffKeysCnt:1") { + fmt.Printf("====>%s\n", ret) + } + + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datarepaire.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datarepaire.go new file mode 100644 index 0000000000..8f808e065a --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_dts_datarepaire.go @@ -0,0 +1,78 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" +) + +// RedisDtsDataRepaireJobTest dts数据修复测试 +type RedisDtsDataRepaireJobTest struct { + RedisDtsDataCheckJobTest +} + +// RunRedisDtsDataRepaire 执行 redis dts datarepaire 原子任务 +func (test *RedisDtsDataCheckJobTest) RunRedisDtsDataRepaire() (ret string) { + // 写入 400 : *string* ,*hash*,*list*,*set*, 各100个key,提取"hash* 和 *set* 共200 + msg := fmt.Sprintf("=========dtsDataRepaire test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========dtsDataRepaire test fail============") + fmt.Println(test.Err) + } else { + msg = fmt.Sprintf("=========dtsDataRepaire test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + runcmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisDtsDataRepaire().Name(), string(paramBytes)) + fmt.Println(runcmd) + ret, test.Err = util.RunBashCmd(runcmd, "", nil, 1*time.Hour) + if test.Err != nil && !strings.Contains(test.Err.Error(), "totalHotKeysCnt:") { + return + } else if test.Err != nil { + ret = test.Err.Error() + test.Err = nil + } + + return +} + +// RunReplicaPairDataRepaire 利用一对主从 做数据修复 +func RunReplicaPairDataRepaire(masterIP string, masterPort int, masterPasswd, + slaveIP string, slavePort int, slavePasswd string, dbtoolsPkgName, dbtoolsPkgMd5 string) (err error) { + masterAddr := masterIP + ":" + strconv.Itoa(masterPort) + slaveAddr := slaveIP + ":" + strconv.Itoa(slavePort) + + // 进行数据校验 + dataRepaire := RedisDtsDataRepaireJobTest{} + portSegmentList := []atomredis.PortAndSegment{} + portSegmentList = append(portSegmentList, atomredis.PortAndSegment{ + Port: masterPort, + SegmentStart: -1, + SegmentEnd: -1, + }) + dataRepaire.SetBkBizID("testapp").SetIP(masterIP).SetPortSegmentList(portSegmentList). + SetPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetDtsType(consts.DtsTypeOneAppDiffCluster). + SetSrcClusterAddr(masterAddr).SetSrcReddisPassword(masterPasswd). + SetDtsClusterAddr(slaveAddr).SetDstClusterPassword(slavePasswd). + SetKeyWhiteRegex("*").SetKeyBlackRegex("") + + dataRepaire.RunRedisDtsDataRepaire() + if dataRepaire.Err != nil { + return dataRepaire.Err + } + + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_flushdata.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_flushdata.go new file mode 100644 index 0000000000..a9a197f1a4 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_flushdata.go @@ -0,0 +1,143 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// RedisFlushDataTest redis清档 +type RedisFlushDataTest struct { + atomredis.RedisFlushDataParams + Err error `json:"-"` +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *RedisFlushDataTest) SetIP(ip string) *RedisFlushDataTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetDbType db type +func (test *RedisFlushDataTest) SetDbType(dbType string) *RedisFlushDataTest { + if test.Err != nil { + return test + } + test.DbType = dbType + return test +} + +// SetPorts set port +func (test *RedisFlushDataTest) SetPorts(ports []int, startPort, instNum int) *RedisFlushDataTest { + if test.Err != nil { + return test + } + if len(ports) == 0 { + if startPort == 0 { + startPort = consts.TestTendisPlusMasterStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + } + test.Ports = ports + return test +} + +// SetForce 是否强制 +func (test *RedisFlushDataTest) SetForce(force bool) *RedisFlushDataTest { + if test.Err != nil { + return test + } + test.IsForce = force + return test +} + +// SetPwd set pwd +func (test *RedisFlushDataTest) SetPwd(pwd string) *RedisFlushDataTest { + if test.Err != nil { + return test + } + test.Password = pwd + return test +} + +// SetDbList db list +func (test *RedisFlushDataTest) SetDbList(dbList []int) *RedisFlushDataTest { + if test.Err != nil { + return test + } + test.DBList = dbList + return test +} + +// SetFlushAll .. +func (test *RedisFlushDataTest) SetFlushAll(flushall bool) *RedisFlushDataTest { + if test.Err != nil { + return test + } + test.IsFlushAll = flushall + return test +} + +// SetTest set test +func (test *RedisFlushDataTest) SetTest() *RedisFlushDataTest { + if test.Err != nil { + return test + } + test.Debug = true + return test +} + +// RunRedisFlushData flush data +func (test *RedisFlushDataTest) RunRedisFlushData() { + msg := fmt.Sprintf("=========RunRedisFlushData start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========RunRedisFlushData fail============") + } else { + msg = fmt.Sprintf("=========RunRedisFlushData success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisFlushData().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// FlushData 清理数据 +func FlushData(serverIP string, dbType, pwd string, ports []int, startPort, instNum int) (err error) { + flushTest := RedisFlushDataTest{} + flushTest.SetIP(serverIP).SetPorts(ports, startPort, instNum). + SetDbType(dbType).SetForce(true).SetFlushAll(true). + SetPwd(pwd) + if flushTest.Err != nil { + return + } + + flushTest.RunRedisFlushData() + return flushTest.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_install.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_install.go new file mode 100644 index 0000000000..8f655b91ac --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_install.go @@ -0,0 +1,790 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" +) + +// RedisInstallTest 安装测试 +type RedisInstallTest struct { + atomredis.RedisInstallParams + Err error `json:"-"` +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *RedisInstallTest) SetIP(ip string) *RedisInstallTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPorts set ports +// 如果ports=[],startPort=0,instNum=0,则默认startPort=40000,instNum=4 +func (test *RedisInstallTest) SetPorts(ports []int, startPort, instNum int) *RedisInstallTest { + if test.Err != nil { + return test + } + if len(ports) == 0 { + if startPort == 0 { + startPort = consts.TestTendisPlusMasterStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + } + test.Ports = ports + test.StartPort = startPort + test.InstNum = instNum + return test +} + +// SetPassword set password,传入为空则password=xxxxx +func (test *RedisInstallTest) SetPassword(password string) *RedisInstallTest { + if test.Err != nil { + return test + } + if password == "" { + password = "xxxx" + } + test.Password = password + return test +} + +// SetRedisMediaPkg set redis pkg信息,传入为空则pkg=redis-6.2.7.tar.gz,pkgMd5=ab596d27e8fa545ea5f374d0cc9b263e +func (test *RedisInstallTest) SetRedisMediaPkg(pkg, pkgMd5 string) *RedisInstallTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "redis-6.2.7.tar.gz" + pkgMd5 = "ab596d27e8fa545ea5f374d0cc9b263e" + } + test.Pkg = pkg + test.PkgMd5 = pkgMd5 + return test +} + +// SetDbtoolsPkg set dbtools pkg信息,传入为空则 pkg=dbtools.tar.gz, pkgMd5=334cf6e3b84d371325052d961584d5aa +func (test *RedisInstallTest) SetDbtoolsPkg(pkg, pkgMd5 string) *RedisInstallTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "dbtools.tar.gz" + pkgMd5 = "334cf6e3b84d371325052d961584d5aa" + } + test.DbToolsPkg.Pkg = pkg + test.DbToolsPkg.PkgMd5 = pkgMd5 + return test +} + +// SetDatabases 设置databases,默认为2 +func (test *RedisInstallTest) SetDatabases(databases int) *RedisInstallTest { + if test.Err != nil { + return test + } + if databases == 0 { + databases = 2 + } + test.Databases = databases + return test +} + +// SetDbType 设置DbType,默认为 PredixyTendisplusCluster +func (test *RedisInstallTest) SetDbType(dbType string) *RedisInstallTest { + if test.Err != nil { + return test + } + if dbType == "" { + dbType = "PredixyTendisplusCluster" + } + test.DbType = dbType + return test +} + +// SetMaxMemory 设置 +func (test *RedisInstallTest) SetMaxMemory(maxMemory uint64) *RedisInstallTest { + if test.Err != nil { + return test + } + test.MaxMemory = maxMemory + return test +} + +// InstallTendisplus 安装tendisplus +func (test *RedisInstallTest) InstallTendisplus() { + msg := fmt.Sprintf("=========install_tendisplus test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========install_tendisplus test fail============") + } else { + msg = fmt.Sprintf("=========install_tendisplus test success============") + } + fmt.Println(msg) + }() + test.SetTendisplusRedisConf() + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisInstall().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// InstallCacheRedis 安装cache redis +func (test *RedisInstallTest) InstallCacheRedis() { + msg := fmt.Sprintf("=========install_cache_redis test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========install_cache_redis test fail============") + } else { + msg = fmt.Sprintf("=========install_cache_redis test success============") + } + fmt.Println(msg) + }() + + test.SetCacheRedisConf() + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisInstall().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// InstallTendisSSD 安装 tendisSSD +func (test *RedisInstallTest) InstallTendisSSD() { + msg := fmt.Sprintf("=========install_tendisSSD test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========install_tendisSSD test fail============") + } else { + msg = fmt.Sprintf("=========install_tendisSSD test success============") + } + fmt.Println(msg) + }() + + test.SetTendisSSDRedisConf() + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisInstall().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +func tendisplusStillRunning() bool { + grepCmd := `ps aux|grep "/usr/local/redis/bin/tendisplus"|grep -v grep || true;` + ret, _ := util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + ret = strings.TrimSpace(ret) + if ret != "" { + return true + } + return false +} + +// ClearTendisplus 清理tendisplus环境 +// 关闭tendisplus进程,清理数据目录,清理 /usr/local/redis +func (test *RedisInstallTest) ClearTendisplus(clearDataDir bool) { + dataDirs := []string{} + var dir string + for i := 0; i < test.InstNum; i++ { + dataDirs = append(dataDirs, filepath.Join(consts.GetRedisDataDir(), "redis", strconv.Itoa(test.Ports[i]))) + StopRedisProcess(test.IP, test.Ports[i], test.Password, "tendisplus") + } + + if clearDataDir { + for _, dir = range dataDirs { + if strings.Contains(dir, "redis") && util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + } + } + if !tendisplusStillRunning() { + dir = "/usr/local/redis" + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + dir = filepath.Join("/usr/local", test.GePkgBaseName()) + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + } + time.Sleep(2 * time.Second) +} + +func redisserverStillRunning() bool { + grepCmd := `ps aux|grep "/usr/local/redis/bin/redis-server"|grep -v grep || true;` + ret, _ := util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + ret = strings.TrimSpace(ret) + if ret != "" { + return true + } + return false +} + +// ClearCacheRedis 清理Redis环境 +// 关闭Redis进程,清理数据目录,清理 /usr/local/redis +func (test *RedisInstallTest) ClearCacheRedis(clearDataDir bool) { + dataDirs := []string{} + var dir string + for i := 0; i < test.InstNum; i++ { + dataDirs = append(dataDirs, filepath.Join(consts.GetRedisDataDir(), "redis", strconv.Itoa(test.Ports[i]))) + StopRedisProcess(test.IP, test.Ports[i], test.Password, "redis-server") + } + + if clearDataDir { + for _, dir = range dataDirs { + if strings.Contains(dir, "redis") && util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + } + } + if !redisserverStillRunning() { + dir = "/usr/local/redis" + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + dir = filepath.Join("/usr/local", test.GePkgBaseName()) + if util.FileExists(dir) { + fmt.Println("rm -rf " + dir) + util.RunBashCmd("rm -rf "+dir, "", nil, 1*time.Minute) + } + } + time.Sleep(2 * time.Second) +} + +// SetCacheRedisConf 设置cache redis dbConfig +func (test *RedisInstallTest) SetCacheRedisConf() { + test.RedisConfConfigs = map[string]string{ + "bind": "{{address}} 127.0.0.1", + "port": "{{port}}", + "requirepass": "{{password}}", + "maxmemory": "{{maxmemory}}", + "logfile": "{{redis_data_dir}}/redis.log", + "pidfile": "{{redis_data_dir}}/redis.pid", + "dir": "{{redis_data_dir}}/data", + "databases": "{{databases}}", + "cluster-enabled": "{{cluster_enabled}}", + "daemonize": "yes", + "tcp-keepalive": "300", + "protected-mode": "yes", + "maxmemory-policy": "noeviction", + "tcp-backlog": "511", + "timeout": "0", + "supervised": "no", + "hz": "10", + "maxclients": "180000", + "loglevel": "notice", + "always-show-logo": "yes", + "save": "", + "stop-writes-on-bgsave-error": "yes", + "rdbcompression": "yes", + "rdbchecksum": "yes", + "dbfilename": "dump.rdb", + "slave-serve-stale-data": "yes", + "slave-read-only": "yes", + "repl-diskless-sync": "no", + "slave-priority": "100", + "rename-command": `flushdb cleandb + rename-command flushall cleanall + rename-command debug nobug + rename-command keys mykeys`, + "lazyfree-lazy-eviction": "yes", + "lazyfree-lazy-expire": "yes", + "lazyfree-lazy-server-del": "yes", + "slave-lazy-flush": "yes", + "appendonly": "no", + "appendfilename": "appendonly.aof", + "appendfsync": "everysec", + "no-appendfsync-on-rewrite": "yes", + "auto-aof-rewrite-percentage": "100", + "auto-aof-rewrite-min-size": "64mb", + "aof-load-truncated": "yes", + "aof-use-rdb-preamble": "no", + "aof-rewrite-incremental-fsync": "yes", + "lua-time-limit": "5000", + "cluster-config-file": "nodes.conf", + "cluster-node-timeout": "15000", + "client-output-buffer-limit": `normal 256mb 512mb 300 + client-output-buffer-limit slave 2048mb 2048mb 300 + client-output-buffer-limit pubsub 32mb 8mb 60`, + "hash-max-ziplist-entries": "512", + "hash-max-ziplist-value": "64", + "list-max-ziplist-size": "-2", + "list-compress-depth": "0", + "zset-max-ziplist-entries": "128", + "zset-max-ziplist-value": "64", + "hll-sparse-max-bytes": "3000", + "activerehashing": "yes", + "slowlog-log-slower-than": "10000", + "slowlog-max-len": "256", + } +} + +// SetTendisplusRedisConf 设置tendisplus dbConfig +func (test *RedisInstallTest) SetTendisplusRedisConf() { + test.RedisConfConfigs = map[string]string{ + "bind": "{{address}}", + "port": "{{port}}", + "loglevel": "notice", + "logdir": "{{redis_data_dir}}/data/log", + "dir": "{{redis_data_dir}}/data/db", + "dumpdir": "{{redis_data_dir}}/data/dump", + "pidfile": "{{redis_data_dir}}data/tendisplus.pid", + "slowlog": "{{redis_data_dir}}/data/slowlog", + "databases": "{{databases}}", + "requirepass": "{{password}}", + "masterauth": "{{password}}", + "cluster-enabled": "{{cluster_enabled}}", + "executorWorkPoolSize": "2", + "executorThreadNum": "24", + "netIoThreadNum": "3", + "noexpire": "no", + "rocks.blockcachemb": "{{rocks_blockcachemb}}", + "kvstorecount": "10", + "rocks.compress_type": "lz4", + "rocks.max_background_compactions": "12", + "rocks.write_buffer_size": "{{rocks_write_buffer_size}}", + "binlog-using-defaultCF": "off", + "maxBinlogKeepNum": "1", + "netBatchSize": "1048576", + "netBatchTimeoutSec": "10", + "cluster-migration-rate-limit": "200", + "migrateReceiveThreadnum": "4", + "migrateSenderThreadnum": "4", + "migrate-snapshot-key-num": "30000", + "slave-migrate-enabled": "on", + "rocks.cache_index_and_filter_blocks": "0", + "truncateBinlogNum": "10000000", + "domain-enabled": "off", + "pauseTimeIndexMgr": "1", + "scanCntIndexMgr": "10000", + "truncateBinlogIntervalMs": "100", + "minbinlogkeepsec": "1800", + "binlogdelrange": "500000", + "migrate-gc-enabled": "false", + "deletefilesinrange-for-binlog": "1", + "incrpushthreadnum": "10", + "rename-command": `config confxx + rename-command flushdb cleandb + rename-command flushall cleanall + rename-command debug nobug + rename-command keys mykeys`, + } +} + +// SetTendisSSDRedisConf 设置tendisssd dbConfig +func (test *RedisInstallTest) SetTendisSSDRedisConf() { + test.RedisConfConfigs = map[string]string{ + "activerehashing": "yes", + "aof-rewrite-incremental-fsync": "yes", + "appendfilename": "appendonly.aof", + "appendfsync": "everysec", + "appendonly": "no", + "auto-aof-rewrite-min-size": "64mb", + "auto-aof-rewrite-percentage": "100", + "bind": "{{address}} 127.0.0.1", + "binlog-enabled": "1", + "binlog-filesize": "268435456", + "clean-time": "3", + "client-output-buffer-limit": `normal 256mb 512mb 300 + client-output-buffer-limit slave 2048mb 2048mb 300 + client-output-buffer-limit pubsub 32mb 8mb 60`, + "daemonize": "yes", + "databases": "{{databases}}", + "dbfilename": "dump.rdb", + "dir": "{{redis_data_dir}}/data", + "disk-delete-count": "50", + "disk-delete-time": "50", + "dumpdir": "{{redis_data_dir}}/rbinlog/", + "hash-max-ziplist-entries": "512", + "hash-max-ziplist-value": "64", + "hz": "10", + "list-max-ziplist-entries": "512", + "list-max-ziplist-value": "64", + "log-count": "200000", + "log-keep-count": "20000000", + "log-keep-time": "36000", + "logfile": "{{redis_data_dir}}/redis.log", + "loglevel": "notice", + "lua-time-limit": "5000", + "max_manifest_file_size": "200000000", + "max_open_files": "100000", + "maxclients": "50000", + "maxmemory": "{{maxmemory}}", + "maxmemory-policy": "noeviction", + "no-appendfsync-on-rewrite": "no", + "pause-clean-time": "5", + "pause-scan-expires-time": "100", + "pidfile": "{{redis_data_dir}}/redis.pid", + "port": "{{port}}", + "rdbchecksum": "yes", + "rdbcompression": "yes", + "rename-command": `config confxx + rename-command debug nobug + rename-command keys mykeys`, + "repl-disable-tcp-nodelay": "no", + "repl-mode": "tredis-binlog", + "repl-timeout": "600", + "requirepass": "{{password}}", + "rocksdb_block_cache": "500000000", + "rocksdb_block_size": "32000", + "rocksdb_write_buffer_size": "32000000", + "save": "", + "scan-expires-time": "1", + "set-max-intset-entries": "512", + "slave-priority": "100", + "slave-read-only": "yes", + "slave-serve-stale-data": "yes", + "slowlog-log-slower-than": "10000", + "slowlog-max-len": "256", + "stop-writes-on-bgsave-error": "yes", + "target_file_size_base": "8000000", + "tcp-keepalive": "300", + "timeout": "0", + "write_batch_size": "2", + "zset-max-ziplist-entries": "128", + "zset-max-ziplist-value": "64", + } +} + +// RedisInstanceInstall cache安装 +func RedisInstanceInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string, startPort, numbers int) (err error) { + cacheMasterTest := RedisInstallTest{} + cacheMasterTest.SetIP(serverIP). + SetPorts([]int{}, startPort, numbers). + SetPassword(consts.RedisTestPasswd). + SetRedisMediaPkg(redisPkgName, redisPkgMd5). + SetDbtoolsPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetDbType(dbType). + SetDatabases(2).SetMaxMemory(8589934592) + if cacheMasterTest.Err != nil { + return cacheMasterTest.Err + } + cacheMasterTest.InstallCacheRedis() + if cacheMasterTest.Err != nil { + return + } + return +} + +// RedisInstanceClear cache清理 +func RedisInstanceClear(serverIP, dbType string, + clearDataDir bool, startPort, numbers int) (err error) { + cacheMasterTest := RedisInstallTest{} + cacheMasterTest.SetIP(serverIP). + SetPorts([]int{}, startPort, numbers). + SetPassword(consts.RedisTestPasswd). + SetDbType(dbType). + SetDatabases(2).SetMaxMemory(8589934592) + if cacheMasterTest.Err != nil { + return cacheMasterTest.Err + } + cacheMasterTest.ClearCacheRedis(clearDataDir) + if cacheMasterTest.Err != nil { + return + } + return +} + +// 下面这些的 install 和 clear函数只是端口不同,其他的均一样 + +// RedisInstanceMasterInstall cache master安装 +func RedisInstanceMasterInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + return RedisInstanceInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, + consts.TestRedisMasterStartPort, + consts.TestRedisInstanceNum) +} + +// RedisInstanceMasterClear cache master清理 +func RedisInstanceMasterClear(serverIP, dbType string, + clearDataDir bool) (err error) { + return RedisInstanceClear(serverIP, + dbType, clearDataDir, + consts.TestRedisMasterStartPort, + consts.TestRedisInstanceNum, + ) +} + +// RedisInstanceSlaveInstall cache slave安装 +func RedisInstanceSlaveInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + return RedisInstanceInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, + consts.TestRedisSlaveStartPort, + consts.TestRedisInstanceNum) +} + +// RedisInstanceSlaveClear cache slave清理 +func RedisInstanceSlaveClear(serverIP, dbType string, clearDataDir bool) (err error) { + return RedisInstanceClear(serverIP, + dbType, clearDataDir, + consts.TestRedisSlaveStartPort, + consts.TestRedisInstanceNum, + ) +} + +// RedisSyncMasterInstall cache master安装 +func RedisSyncMasterInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + return RedisInstanceInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, + consts.TestSyncRedisMasterStartPort, + consts.TestRedisInstanceNum) +} + +// RedisSyncMasterClear cache master清理 +func RedisSyncMasterClear(serverIP, dbType string, clearDataDir bool) (err error) { + return RedisInstanceClear(serverIP, + dbType, clearDataDir, + consts.TestSyncRedisMasterStartPort, + consts.TestRedisInstanceNum, + ) +} + +// RedisSyncSlaveInstall cache slave安装 +func RedisSyncSlaveInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + return RedisInstanceInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, + consts.TestSyncRedisSlaveStartPort, + consts.TestRedisInstanceNum) +} + +// RedisSyncSlaveClear cache slave清理 +func RedisSyncSlaveClear(serverIP, dbType string, clearDataDir bool) (err error) { + return RedisInstanceClear(serverIP, + dbType, clearDataDir, + consts.TestSyncRedisSlaveStartPort, + consts.TestRedisInstanceNum, + ) +} + +// TendisplusInstall 安装tendisplus实例 +func TendisplusInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string, port, numbers int) (err error) { + plusMasterTest := RedisInstallTest{} + plusMasterTest.SetIP(serverIP). + SetPorts([]int{}, port, numbers). + SetPassword(consts.RedisTestPasswd). + SetRedisMediaPkg(tendisplusPkgName, tendisplusPkgMd5). + SetDbtoolsPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetDbType(dbType). + SetDatabases(1).SetMaxMemory(8589934592) + if plusMasterTest.Err != nil { + return plusMasterTest.Err + } + plusMasterTest.InstallTendisplus() + if plusMasterTest.Err != nil { + return plusMasterTest.Err + } + return nil +} + +// TendisplusClear tendisplus实例清理 +func TendisplusClear(serverIP, dbType string, + clearDataDir bool, + port, numbers int) (err error) { + plusMasterTest := RedisInstallTest{} + plusMasterTest.SetIP(serverIP). + SetPorts([]int{}, port, numbers). + SetPassword(consts.RedisTestPasswd). + SetDbType(dbType). + SetDatabases(1).SetMaxMemory(8589934592) + if plusMasterTest.Err != nil { + return plusMasterTest.Err + } + plusMasterTest.ClearTendisplus(clearDataDir) + if plusMasterTest.Err != nil { + return plusMasterTest.Err + } + return nil +} + +// TendisplusMasterInstall 安装tendisplus master实例 +func TendisplusMasterInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + + return TendisplusInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, consts.TestTendisPlusMasterStartPort, + consts.TestRedisInstanceNum) +} + +// TendisplusMasterClear tendisplus实例清理 +func TendisplusMasterClear(serverIP, dbType string, clearDataDir bool) (err error) { + return TendisplusClear(serverIP, dbType, clearDataDir, + consts.TestTendisPlusMasterStartPort, + consts.TestRedisInstanceNum, + ) +} + +// TendisplusSlaveInstall 安装tendisplus slave实例 +func TendisplusSlaveInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + return TendisplusInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, consts.TestTendisPlusSlaveStartPort, + consts.TestRedisInstanceNum) +} + +// TendisplusSlaveClear tendisplus slave实例清理 +func TendisplusSlaveClear(serverIP, dbType string, clearDataDir bool) (err error) { + return TendisplusClear(serverIP, dbType, clearDataDir, + consts.TestTendisPlusSlaveStartPort, + consts.TestRedisInstanceNum, + ) +} + +// TendisplusSyncMasterInstall 安装tendisplus sync master实例 +func TendisplusSyncMasterInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + return TendisplusInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, consts.TestSyncTendisPlusMasterStartPort, + consts.TestRedisInstanceNum) +} + +// TendisplusSyncMasterClear tendisplus sync master清理 +func TendisplusSyncMasterClear(serverIP, dbType string, clearDataDir bool) (err error) { + return TendisplusClear(serverIP, dbType, clearDataDir, + consts.TestSyncTendisPlusMasterStartPort, + consts.TestRedisInstanceNum, + ) +} + +// TendisplusSyncSlaveInstall 安装tendisplus sync slave实例 +func TendisplusSyncSlaveInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string) (err error) { + return TendisplusInstall(serverIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType, consts.TestSyncTendisPlusSlaveStartPort, + consts.TestRedisInstanceNum) +} + +// TendisplusSyncSlaveClear tendisplus sync slave清理 +func TendisplusSyncSlaveClear(serverIP, dbType string, clearDataDir bool) (err error) { + return TendisplusClear(serverIP, dbType, clearDataDir, + consts.TestSyncTendisPlusSlaveStartPort, + consts.TestRedisInstanceNum, + ) +} + +// TendisSSDInstall tendisSSD安装 +func TendisSSDInstall(serverIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + dbType string, startPort, numbers int) (err error) { + ssdMasterTest := RedisInstallTest{} + ssdMasterTest.SetIP(serverIP). + SetPorts([]int{}, startPort, numbers). + SetPassword(consts.RedisTestPasswd). + SetRedisMediaPkg(redisPkgName, redisPkgMd5). + SetDbtoolsPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetDbType(dbType). + SetDatabases(2).SetMaxMemory(8589934592) + if ssdMasterTest.Err != nil { + return ssdMasterTest.Err + } + ssdMasterTest.InstallTendisSSD() + if ssdMasterTest.Err != nil { + return ssdMasterTest.Err + } + return +} + +// TendisSSDClear tendisSSD清理 +func TendisSSDClear(serverIP, dbType string, + clearDataDir bool, startPort, numbers int) (err error) { + ssdMasterTest := RedisInstallTest{} + ssdMasterTest.SetIP(serverIP). + SetPorts([]int{}, startPort, numbers). + SetPassword(consts.RedisTestPasswd). + SetDbType(dbType). + SetDatabases(2).SetMaxMemory(8589934592) + if ssdMasterTest.Err != nil { + return ssdMasterTest.Err + } + ssdMasterTest.ClearCacheRedis(clearDataDir) + if ssdMasterTest.Err != nil { + return ssdMasterTest.Err + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keysdelete_files.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keysdelete_files.go new file mode 100644 index 0000000000..073c8fea7d --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keysdelete_files.go @@ -0,0 +1,227 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// RedisKeysFilesDeleteJobTest key提取&删除测试 +type RedisKeysFilesDeleteJobTest struct { + atomredis.TendisKeysFilesDeleteParams + Err error `json:"-"` +} + +// SetBkBizID 设置 BkBizID +func (test *RedisKeysFilesDeleteJobTest) SetBkBizID(bkBizID string) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + if bkBizID == "" { + bkBizID = "testapp" + } + test.BkBizID = bkBizID + return test +} + +// SetDomain domain 信息 +func (test *RedisKeysFilesDeleteJobTest) SetDomain(domain string) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + // 不传domain 则测试本地创建集群,传线上域名,开通访问也是可以的 + if domain == "" { + domain, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.Domain = domain + return test +} + +// SetDbType 设置DbType,默认为 TendisplusInstance +func (test *RedisKeysFilesDeleteJobTest) SetDbType(dbType string) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + if dbType == "" { + dbType = "TendisplusInstance" + } + test.TendisType = dbType + return test +} + +// SetPorts 设置 proxyPort 传入0 则默认为 proxyPort =consts.TestPredixyPort +func (test *RedisKeysFilesDeleteJobTest) SetPorts(proxyPort int) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + // 不传端口, 则测试本地创建集群,传线上端口,开通访问也是可以的 + if proxyPort == 0 { + proxyPort = consts.TestPredixyPort + } + test.ProxyPort = proxyPort + return test + +} + +// SetPkg 设置 pkg信息,传入为空则pkg=dbtools.tar.gz,pkgMd5=334cf6e3b84d371325052d961584d5aa +func (test *RedisKeysFilesDeleteJobTest) SetPkg(pkg, pkgMd5 string) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "dbtools.tar.gz" + pkgMd5 = "334cf6e3b84d371325052d961584d5aa" + } + test.Pkg = pkg + test.PkgMd5 = pkgMd5 + return test +} + +// SetPath 设置 bkrepo 路径 +func (test *RedisKeysFilesDeleteJobTest) SetPath(path string) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + if path == "" { + // 包含 *string* 的key + path = "/redis/keyfiles/fileDeleteUnitTest.cache2006.moyecachetest.redistest.db" + } + test.Path = path + return test +} + +// SetFileServer fileserver 信息 +func (test *RedisKeysFilesDeleteJobTest) SetFileServer(repoUser, repoPassword, + repoUrl string) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + fileserver := util.FileServerInfo{} + fileserver.URL = repoUrl + fileserver.Bucket = "bk-dbm-redistest" + fileserver.Password = repoPassword + fileserver.Username = repoUser + fileserver.Project = "bk-dbm" + + test.FileServer = fileserver + return test +} + +// SetDeleteRate 设置删除key且设置key删除速率 +func (test *RedisKeysFilesDeleteJobTest) SetDeleteRate(deleteRate int) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + if deleteRate == 0 { + deleteRate = 2000 + } + test.DeleteRate = deleteRate + test.TendisplusDeleteRate = deleteRate + return test +} + +// SetProxyPassword set proxy password,传入为空则password=xxxx +func (test *RedisKeysFilesDeleteJobTest) SetProxyPassword(proxyPasswd string) *RedisKeysFilesDeleteJobTest { + if test.Err != nil { + return test + } + // 这里需要和test.go 里定义保持一致 + if proxyPasswd == "" { + proxyPasswd = "proxyPassTest" + } + test.ProxyPassword = proxyPasswd + return test +} + +// RunRedisKeysFilesDelete 执行 redis keysfiles delete 原子任务 +func (test *RedisKeysFilesDeleteJobTest) RunRedisKeysFilesDelete() { + msg := fmt.Sprintf("=========redis keysfiles deletetest start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========redis keysfiles delete test fail============") + } else { + msg = fmt.Sprintf("=========redis keysfiles delete test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewTendisKeysFilesDelete().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// KeysFilesDelete redis keysfiles delete test 测试 +func KeysFilesDelete(serverIP string, proxyPort int, + repoUser, repoPassword, repoUrl, dbtoolsPkgName, dbtoolsPkgMd5 string) (err error) { + keysFilesDeleteTest := RedisKeysFilesDeleteJobTest{} + var clientPort int + if proxyPort == consts.TestPredixyPort { + clientPort = consts.TestTendisPlusMasterStartPort + } else if proxyPort == consts.TestTwemproxyPort { + clientPort = consts.TestRedisMasterStartPort + } else if proxyPort == consts.TestSSDClusterTwemproxyPort { + clientPort = consts.TestTendisSSDMasterStartPort + } else { + fmt.Printf("redisKeyspatternTest failed :请确认输入的proxyPort是否是定义的:TemproxyPort或者PredixyPort") + keysFilesDeleteTest.Err = fmt.Errorf("请确认输入的是否是定义的:TemproxyPort或者PredixyPort或者TestSSDClusterTwemproxyPort") + return keysFilesDeleteTest.Err + } + keysFilesDeleteTest.SetBkBizID("testapp"). + SetDomain("").SetDbType(""). + SetPorts(proxyPort). + SetPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetPath("").SetFileServer(repoUser, repoPassword, repoUrl). + SetDeleteRate(0).SetProxyPassword("") + keysFilesDeleteTest.RunRedisKeysFilesDelete() + if keysFilesDeleteTest.Err != nil { + return keysFilesDeleteTest.Err + } + + // 在proxy上验证数据正则提取和删除结果;ssd 不支持scan ,和其他校验不同 + if proxyPort == consts.TestSSDClusterTwemproxyPort { + fmt.Printf("-----------SSD FileDelKeyTypeCheck -----------") + fmt.Println() + cmdTest, err := NewCommandTest(serverIP, consts.TestSSDClusterTwemproxyPort, consts.ProxyTestPasswd, + consts.TendisTypeTwemproxyTendisSSDInstance, 0) + + if err != nil { + return err + } + err = cmdTest.FileDelKeyTypeCheck() + + if err != nil { + fmt.Printf("SSDDelKeysCheck failed: %v", err) + return err + } + + } else { + cmdTest, err := NewCommandTest(serverIP, clientPort, consts.RedisTestPasswd, + consts.TendisTypeRedisInstance, 0) + if err != nil { + return err + } + // 选一个节点验证数据正则提取和删除结果 + err = cmdTest.FileDelKeysCheck() + if err != nil { + fmt.Printf("file delete fail:%v", err) + return err + } + + } + return keysFilesDeleteTest.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keyspattern.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keyspattern.go new file mode 100644 index 0000000000..4bbf8e25e5 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_keyspattern.go @@ -0,0 +1,301 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "strconv" + "time" +) + +// RedisInsKeyPatternJobTest key提取&删除测试 +type RedisInsKeyPatternJobTest struct { + atomredis.RedisInsKeyPatternJobParam + Err error `json:"-"` +} + +// SetBkBizID 设置 BkBizID +func (test *RedisInsKeyPatternJobTest) SetBkBizID(bkBizID string) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if bkBizID == "" { + bkBizID = "testapp" + } + test.BkBizID = bkBizID + return test +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *RedisInsKeyPatternJobTest) SetIP(ip string) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPorts set ports +// 如果ports=[],startPort=0,instNum=0,则默认startPort=40000,instNum=4 +func (test *RedisInsKeyPatternJobTest) SetPorts(ports []int, startPort, instNum int) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if len(ports) == 0 { + if startPort == 0 { + startPort = consts.TestTendisPlusMasterStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + } + test.Ports = ports + test.StartPort = startPort + test.InstNum = instNum + return test +} + +// SetPkg set pkg信息,传入为空则pkg=dbtools.tar.gz,pkgMd5=334cf6e3b84d371325052d961584d5aa +func (test *RedisInsKeyPatternJobTest) SetPkg(pkg, pkgMd5 string) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if pkg == "" || pkgMd5 == "" { + pkg = "dbtools.tar.gz" + pkgMd5 = "334cf6e3b84d371325052d961584d5aa" + } + test.Pkg = pkg + test.PkgMd5 = pkgMd5 + return test +} + +// SetPath bkrepo 路径 +func (test *RedisInsKeyPatternJobTest) SetPath(path string) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if path == "" { + path = "/redis/keyfiles/unittest.cache2006.moyecachetest.redistest.db" + } + test.Path = path + return test +} + +// SetDomain domain 信息 +func (test *RedisInsKeyPatternJobTest) SetDomain(domain string) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if domain == "" { + domain = "cache2006.moyecachetest.redistest.db" + } + test.Domain = domain + return test +} + +// SetFileServer fileserver 信息 +func (test *RedisInsKeyPatternJobTest) SetFileServer(repoUser, repoPassword, + repoUrl string) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + fileserver := util.FileServerInfo{} + fileserver.URL = repoUrl + fileserver.Bucket = "bk-dbm-redistest" + fileserver.Password = repoPassword + fileserver.Username = repoUser + fileserver.Project = "bk-dbm" + + test.FileServer = fileserver + return test +} + +// Setregex 设置key解析黑白名单信息 +func (test *RedisInsKeyPatternJobTest) Setregex(whiteKey, blackKey string) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if whiteKey == "" || blackKey == "" { + whiteKey = "*hash*\n*set*" + blackKey = "" + } + test.KeyWhiteRegex = whiteKey + test.KeyBlackRegex = blackKey + return test +} + +// SetDeleteRate 设置删除key且设置key删除速率 +func (test *RedisInsKeyPatternJobTest) SetDeleteRate(deleteRate int, keyDel bool) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + if keyDel == false { + keyDel = true + } + test.IsKeysToBeDel = keyDel + if deleteRate == 0 { + deleteRate = 2000 + } + test.DeleteRate = deleteRate + test.TendisplusDeleteRate = deleteRate + test.SsdDeleteRate = deleteRate + + return test +} + +// checkKeysPatternNums 校验提取结果数量是否符合预期 +// 说明:集群写入 400 : *string* ,*hash*,*list*,*set*, 各100个key,提取&删除 *hash* 和 *set* 共200 +func (test *RedisInsKeyPatternJobTest) checkKeysPatternNums(startPort int) *RedisInsKeyPatternJobTest { + if test.Err != nil { + return test + } + var str string + var len01 int + mergeFile := filepath.Join("/data/dbbak/get_keys_pattern", fmt.Sprintf("testapp.allPattern.keys.0")) + var keyFiles string + if startPort == consts.TestTendisPlusSlaveStartPort { + str = strconv.Itoa(consts.TestTendisPlusSlaveStartPort) + len01 = len(str) + keyFiles = fmt.Sprintf("testapp.%s_%s*.keys", test.IP, str[:len01-1]) + } else if startPort == consts.TestRedisSlaveStartPort { + str = strconv.Itoa(consts.TestRedisSlaveStartPort) + len01 = len(str) + keyFiles = fmt.Sprintf("testapp.%s_%s*.keys.0", test.IP, str[:len01-1]) + } else if startPort == consts.TestTendisSSDSlaveStartPort { + str = strconv.Itoa(consts.TestTendisSSDSlaveStartPort) + len01 = len(str) + keyFiles = fmt.Sprintf("testapp.%s_%s*.keys", test.IP, str[:len01-1]) + } + fmt.Println("startPort:", startPort) + mergeCmd := fmt.Sprintf(`cd /data/dbbak/get_keys_pattern + flock -x -w 600 ./lock -c 'cat %s > %s '`, keyFiles, mergeFile) + fmt.Println("mergeCmd:", mergeCmd) + _, err := util.RunLocalCmd("bash", []string{"-c", mergeCmd}, "", nil, 1*time.Hour) + if err != nil { + test.Err = fmt.Errorf("mergeCmd:%s err:%v", mergeCmd, err) + } + + catCmd := fmt.Sprintf(`cd /data/dbbak/get_keys_pattern + flock -x -w 600 ./lock -c 'cat testapp.allPattern.keys.0 |wc -l'`) + numKeys, err := util.RunLocalCmd("bash", []string{"-c", catCmd}, "", nil, 1*time.Hour) + if err != nil { + test.Err = fmt.Errorf("mergeCmd:%s err:%v", mergeCmd, err) + } + if numKeys == "200" { + msg := fmt.Sprintf("numKeys=%s 提取key个数符合预期", numKeys) + fmt.Println(msg) + } else { + msg := fmt.Errorf("提取key个数为%s 不符合预期200个key,请检查写入数据是否有更改或者是否改动提取key部分代码:"+ + "说明:写入 400 : *string* ,*hash*,*list*,*set*, 各100个key,提取&删除 *hash* 和 *set* 共200", numKeys) + fmt.Printf("checkKeysPatternNums failed: %v", msg) + test.Err = msg + + } + return test +} + +// RunRedisKeyspattern 执行 redis keyspattern 原子任务 +func (test *RedisInsKeyPatternJobTest) RunRedisKeyspattern(startPort int) { + + // 写入 400 : *string* ,*hash*,*list*,*set*, 各100个key,提取"hash* 和 *set* 共200 + msg := fmt.Sprintf("=========keyspattern test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========keyspattern test fail============") + fmt.Println(test.Err) + } else { + msg = fmt.Sprintf("=========keyspattern test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewTendisKeysPattern().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + // 校验提取结果数量是否符合预期 + test.checkKeysPatternNums(startPort) + return +} + +// Keyspattern redis keyspattern 测试 +func Keyspattern(serverIP string, ports []int, startPort, instNum int, repoUser, + repoPassword, repoUrl, dbtoolsPkgName, dbtoolsPkgMd5 string) (err error) { + keyspatternTest := RedisInsKeyPatternJobTest{} + var clientPort int + if startPort == consts.TestTendisPlusSlaveStartPort { + clientPort = consts.TestTendisPlusMasterStartPort + } else if startPort == consts.TestRedisSlaveStartPort { + clientPort = consts.TestRedisMasterStartPort + } else if startPort == consts.TestTendisSSDSlaveStartPort { + clientPort = consts.TestTendisSSDMasterStartPort + } else { + fmt.Printf("redisKeyspatternTest failed :请确认输入的startPort是否是定义的:TendisPlusSlaveStartPort或者RedisSlaveStartPort") + keyspatternTest.Err = fmt.Errorf( + "请确认输入的是否是定义的:TendisPlusSlaveStartPort或者RedisSlaveStartPort或者TestTendisSSDSlaveStartPort") + return keyspatternTest.Err + } + keyspatternTest.SetBkBizID("testapp"). + SetIP(serverIP).SetPorts(ports, startPort, instNum). + SetPkg(dbtoolsPkgName, dbtoolsPkgMd5). + SetPath("").SetDomain("").SetFileServer(repoUser, repoPassword, repoUrl). + Setregex("", "").SetDeleteRate(0, true) + keyspatternTest.RunRedisKeyspattern(startPort) + if keyspatternTest.Err != nil { + return keyspatternTest.Err + } + + // 在proxy上验证数据正则提取和删除结果;ssd 不支持scan ,和其他校验不同 + if startPort == consts.TestTendisSSDSlaveStartPort { + fmt.Printf("-----------SSDDelKeysCheck-----------") + fmt.Println() + cmdTest, err := NewCommandTest(serverIP, consts.TestSSDClusterTwemproxyPort, consts.ProxyTestPasswd, + consts.TendisTypeTwemproxyTendisSSDInstance, 0) + + if err != nil { + return err + } + // err = cmdTest.SSDDelKeysCheck() + err = cmdTest.KeyTypeCheck() + + if err != nil { + fmt.Printf("SSDDelKeysCheck failed: %v", err) + return err + } + } else { + fmt.Printf("-----------DelKeysCheck-----------") + cmdTest, err := NewCommandTest(serverIP, clientPort, consts.RedisTestPasswd, + consts.TendisTypeRedisInstance, 0) + + if err != nil { + return err + } + err = cmdTest.DelKeysCheck() + if err != nil { + fmt.Printf("DelKeysCheck failed: %v", err) + return err + } + + } + + return keyspatternTest.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_migrate_slots.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_migrate_slots.go new file mode 100644 index 0000000000..13501c0bf0 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_migrate_slots.go @@ -0,0 +1,196 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/models/myredis" + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "strconv" + "time" +) + +// TendisPlusMigrateSlotsTest slots 迁移测试 +type TendisPlusMigrateSlotsTest struct { + atomredis.TendisPlusMigrateSlotsParams + Err error `json:"-"` +} + +// SetMigrateSpecifiedSlot set MigrateSpecifiedSlot +func (test *TendisPlusMigrateSlotsTest) SetMigrateSpecifiedSlot(migrateSpecifiedSlot bool) *TendisPlusMigrateSlotsTest { + if test.Err != nil { + return test + } + test.MigrateSpecifiedSlot = migrateSpecifiedSlot + return test +} + +// SetSlots 设置迁移slots +func (test *TendisPlusMigrateSlotsTest) SetSlots(slots string) *TendisPlusMigrateSlotsTest { + if test.Err != nil { + return test + } + if slots == "" { + test.Err = fmt.Errorf("TendisPlusMigrateSlotsTest Slots(%s) cannot be empty", slots) + fmt.Println(test.Err.Error()) + return test + } + test.Slots = slots + return test +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *TendisPlusMigrateSlotsTest) SetIP(ip string) *TendisPlusMigrateSlotsTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.SrcNode.IP = ip + test.DstNode.IP = ip + return test +} + +// SetSrcNodeItem 设置迁移slots目标节点 +func (test *TendisPlusMigrateSlotsTest) SetSrcNodeItem(password string, port int) *TendisPlusMigrateSlotsTest { + if test.Err != nil { + return test + } + + if password == "" { + test.Err = fmt.Errorf("ClusterMeetTest password(%s) cannot be empty", password) + fmt.Println(test.Err.Error()) + return test + } + test.SrcNode.Password = password + if port == 0 { + test.Err = fmt.Errorf("ClusterMeetTest port(%d) cannot be empty", port) + fmt.Println(test.Err.Error()) + return test + } + test.SrcNode.Port = port + return test +} + +// SetDstNodeItem 设置迁移slots源节点 +func (test *TendisPlusMigrateSlotsTest) SetDstNodeItem(password string, port int) *TendisPlusMigrateSlotsTest { + if test.Err != nil { + return test + } + + if password == "" { + test.Err = fmt.Errorf("ClusterMeetTest password(%s) cannot be empty", password) + fmt.Println(test.Err.Error()) + return test + } + test.DstNode.Password = password + if port == 0 { + test.Err = fmt.Errorf("ClusterMeetTest port(%d) cannot be empty", port) + fmt.Println(test.Err.Error()) + return test + } + test.DstNode.Port = port + return test +} + +// MigrateSpecificSlots redis slots 迁移 测试 +func MigrateSpecificSlots(localIP, password string, srcPort, dstPort int) (err error) { + migrateSlotsTest := TendisPlusMigrateSlotsTest{} + // SlotsMigrateTest = "0-100" + migrateSlotsTest.SetSlots(consts.SlotsMigrateTest). + SetMigrateSpecifiedSlot(true). + SetIP(localIP). + SetSrcNodeItem(password, srcPort). + SetDstNodeItem(password, dstPort) + + if migrateSlotsTest.Err != nil { + return migrateSlotsTest.Err + } + migrateSlotsTest.RunTendisPlusMigrateSlotsTest() + if migrateSlotsTest.Err != nil { + return migrateSlotsTest.Err + } + + srcNodeAddr := localIP + ":" + strconv.Itoa(dstPort) + + // 获取源节点连接&信息 + dstNodeCli, err := myredis.NewRedisClient(srcNodeAddr, + password, 0, consts.TendisTypeRedisInstance) + if err != nil { + err = fmt.Errorf("get dst NewRedisClient Err:%v", err) + fmt.Println(err.Error()) + return + } + slots, _, _, _, err := myredis.DecodeSlotsFromStr(consts.SlotsMigrateTest, " ") + if err != nil { + fmt.Println(err.Error()) + return err + } + // job.runtime.Logger.Info("clusterNodes:%+v", clusterNodes) + allBelong, notBelongList, err := dstNodeCli.IsSlotsBelongMaster(srcNodeAddr, slots) + if err != nil { + err = fmt.Errorf("check IsSlotsBelongMaster Err:%v", err) + fmt.Println(err.Error()) + return err + } + if allBelong == false { + err = fmt.Errorf("check slots:%s not belong to srcNode:%s", + util.IntSliceToString(notBelongList, ","), srcNodeAddr) + fmt.Println(err.Error()) + return err + } + msg := fmt.Sprintf("consts.SlotsMigrateTest Is Belong dstMaster MigrateSpecificSlots success ") + fmt.Println(msg) + return migrateSlotsTest.Err +} + +// Rebalance redis slots 迁移扩容 测试 +func Rebalance(localIP, password string, srcPort, dstPort int) (err error) { + migrateSlotsTest := TendisPlusMigrateSlotsTest{} + + migrateSlotsTest.SetSlots(consts.SlotsMigrateTest). + SetMigrateSpecifiedSlot(false). + SetIP(localIP). + SetSrcNodeItem(password, srcPort). + SetDstNodeItem(password, dstPort) + + if migrateSlotsTest.Err != nil { + return migrateSlotsTest.Err + } + migrateSlotsTest.RunTendisPlusMigrateSlotsTest() + if migrateSlotsTest.Err != nil { + return migrateSlotsTest.Err + } + return migrateSlotsTest.Err +} + +// RunTendisPlusMigrateSlotsTest 建立集群关系和slots分配 +func (test *TendisPlusMigrateSlotsTest) RunTendisPlusMigrateSlotsTest() { + msg := fmt.Sprintf("=========TendisPlusMigrateSlotsTest test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========TendisPlusMigrateSlotsTest fail============") + } else { + msg = fmt.Sprintf("=========TendisPlusMigrateSlotsTest success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewTendisPlusMigrateSlots().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_replicaof.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_replicaof.go new file mode 100644 index 0000000000..845d2e86c1 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_replicaof.go @@ -0,0 +1,173 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// RedisReplicaofTest 建立主从关系测试 +type RedisReplicaofTest struct { + BatchPairs []atomredis.ReplicaBatchItem `json:"bacth_pairs" validate:"required"` + Err error `json:"-"` +} + +// SetMasterIP 设置master ip +func (test *RedisReplicaofTest) SetMasterIP(masterIP string) *RedisReplicaofTest { + if test.Err != nil { + return test + } + if len(test.BatchPairs) == 0 { + test.BatchPairs = append(test.BatchPairs, atomredis.ReplicaBatchItem{}) + } + if masterIP == "" { + test.Err = fmt.Errorf("RedisReplicaofTest masterIP cannot be empty") + fmt.Println(test.Err.Error()) + return test + } + test.BatchPairs[0].MasterIP = masterIP + return test +} + +// SetMasterPorts 设置master start port +func (test *RedisReplicaofTest) SetMasterPorts(startPort, instNum int) *RedisReplicaofTest { + if test.Err != nil { + return test + } + if startPort == 0 { + test.Err = fmt.Errorf("RedisReplicaofTest masterStartPort cannot be 0") + fmt.Println(test.Err.Error()) + return test + } + if instNum == 0 { + test.Err = fmt.Errorf("RedisReplicaofTest masterStartPort cannot be 0") + fmt.Println(test.Err.Error()) + return test + } + if len(test.BatchPairs) == 0 { + test.BatchPairs = append(test.BatchPairs, atomredis.ReplicaBatchItem{}) + } + test.BatchPairs[0].MasterStartPort = startPort + test.BatchPairs[0].MasterInstNum = instNum + return test +} + +// SetMasterAuth 设置masterAuth +func (test *RedisReplicaofTest) SetMasterAuth(masterAuth string) *RedisReplicaofTest { + if test.Err != nil { + return test + } + if masterAuth == "" { + test.Err = fmt.Errorf("RedisReplicaofTest masterAuth cannot be empty") + fmt.Println(test.Err.Error()) + return test + } + if len(test.BatchPairs) == 0 { + test.BatchPairs = append(test.BatchPairs, atomredis.ReplicaBatchItem{}) + } + test.BatchPairs[0].MasterAuth = masterAuth + return test +} + +// SetSlaveIP 设置slave ip +func (test *RedisReplicaofTest) SetSlaveIP(slaveIP string) *RedisReplicaofTest { + if test.Err != nil { + return test + } + if slaveIP == "" { + test.Err = fmt.Errorf("RedisReplicaofTest slaveIP cannot be empty") + fmt.Println(test.Err.Error()) + return test + } + test.BatchPairs[0].SlaveIP = slaveIP + return test +} + +// SetSlavePorts 设置slave start port +func (test *RedisReplicaofTest) SetSlavePorts(startPort, instNum int) *RedisReplicaofTest { + if test.Err != nil { + return test + } + if startPort == 0 { + test.Err = fmt.Errorf("RedisReplicaofTest slaveStartPort cannot be 0") + fmt.Println(test.Err.Error()) + return test + } + if instNum == 0 { + test.Err = fmt.Errorf("RedisReplicaofTest slaveStartPort cannot be 0") + fmt.Println(test.Err.Error()) + return test + } + if len(test.BatchPairs) == 0 { + test.BatchPairs = append(test.BatchPairs, atomredis.ReplicaBatchItem{}) + } + test.BatchPairs[0].SlaveStartPort = startPort + test.BatchPairs[0].SlaveInstNum = instNum + return test +} + +// SetSlaveAuth 设置slaveAuth +func (test *RedisReplicaofTest) SetSlaveAuth(slavePassword string) *RedisReplicaofTest { + if test.Err != nil { + return test + } + if slavePassword == "" { + test.Err = fmt.Errorf("RedisReplicaofTest slavePassword cannot be empty") + fmt.Println(test.Err.Error()) + return test + } + if len(test.BatchPairs) == 0 { + test.BatchPairs = append(test.BatchPairs, atomredis.ReplicaBatchItem{}) + } + test.BatchPairs[0].SlavePassword = slavePassword + return test +} + +// RunReplicaOf 执行replicaof 原子任务 +func (test *RedisReplicaofTest) RunReplicaOf() { + msg := fmt.Sprintf("=========ReplicaOf test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========ReplicaOf test fail============") + } else { + msg = fmt.Sprintf("=========ReplicaOf test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + // fmt.Printf("-------payload(raw)--------\n%s\n\n", string(paramBytes)) + // encodeStr := base64.StdEncoding.EncodeToString(paramBytes) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisReplicaBatch().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// CreateReplicaof 建立主从关系 +func CreateReplicaof(masterIP string, masterPort int, masterAuth string, + slaveIP string, slavePort int, slaveAuth string) (err error) { + replicaOfTest := RedisReplicaofTest{} + replicaOfTest.SetMasterIP(masterIP). + SetMasterPorts(masterPort, consts.TestRedisInstanceNum). + SetMasterAuth(masterAuth). + SetSlaveIP(slaveIP). + SetSlavePorts(slavePort, consts.TestRedisInstanceNum). + SetSlaveAuth(slaveAuth) + if replicaOfTest.Err != nil { + return replicaOfTest.Err + } + replicaOfTest.RunReplicaOf() + if replicaOfTest.Err != nil { + return replicaOfTest.Err + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_scene.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_scene.go new file mode 100644 index 0000000000..dfd7383895 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_scene.go @@ -0,0 +1,130 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// RedisSceneTest 场景需求测试 +type RedisSceneTest struct { + killParam atomredis.KillDeadParam + syncParam atomredis.DoSyncParam + checkParam atomredis.CheckSyncParam + Err error `json:"-"` +} + +// SetInatances 可配置 同步参数场景 +func (test *RedisSceneTest) SetInatances(srcip, dstip string, startPort, sStartPort, instNum int) { + if test.Err != nil { + return + } + if srcip == "" || dstip == "" { + test.Err = fmt.Errorf("bad input for ip[%s,%s]", srcip, dstip) + return + } + if startPort == 0 { + startPort = consts.TestTendisPlusMasterStartPort + } + + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + port := startPort + i + sport := sStartPort + i + + test.killParam.Instances = append(test.killParam.Instances, atomredis.InstanceParam{IP: srcip, Port: port}) + test.checkParam.Instances = append(test.checkParam.Instances, atomredis.InstanceParam{IP: dstip, Port: sport}) + test.syncParam.Instances = append(test.syncParam.Instances, atomredis.InstanceSwitchParam{ + MasterInfo: atomredis.InstanceParam{IP: srcip, Port: port}, + SlaveInfo: atomredis.InstanceParam{IP: dstip, Port: sport}}) + } +} + +// SetClusterType TODO +func (test *RedisSceneTest) SetClusterType(t string) { + if test.Err != nil { + return + } + + test.checkParam.ClusterType = t + test.checkParam.MaxSlaveLastIOSecondsAgo = 60 + test.checkParam.WatchSeconds = 600 + + test.killParam.ClusterType = t + test.killParam.ConnIdleTime = 600 + + test.syncParam.ClusterType = t + test.syncParam.ParamList = []string{"disk-delete-count", "maxmemory", "log-count", "log-keep-count", + "slave-log-keep-count"} + +} + +// RunRedisKillConn redis实例停止 +func (test *RedisSceneTest) RunRedisKillConn() error { + msg := fmt.Sprintf("=========RedisKillConnTest start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========RedisKillConnTest fail============") + } else { + msg = fmt.Sprintf("=========RedisKillConnTest success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test.killParam) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisSceneKillDeadConn().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + return test.Err +} + +// RunRedisSyncParams TODO +// RunRedisShutdown redis实例停止 +func (test *RedisSceneTest) RunRedisSyncParams() error { + msg := fmt.Sprintf("=========RedisSyncParamsTest start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========RedisSyncParamsTest fail============") + } else { + msg = fmt.Sprintf("=========RedisSyncParamsTest success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test.syncParam) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisSceneSyncPrams().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + return test.Err +} + +// RunRedisCheckSyncStatus TODO +// RunRedisShutdown redis实例停止 +func (test *RedisSceneTest) RunRedisCheckSyncStatus() error { + msg := fmt.Sprintf("=========RedisCheckSyncStatusTest start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========RedisCheckSyncStatusTest fail============") + } else { + msg = fmt.Sprintf("=========RedisCheckSyncStatusTest success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test.checkParam) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisSceneSyncCheck().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + return test.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_shutdown.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_shutdown.go new file mode 100644 index 0000000000..e704626d5b --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_shutdown.go @@ -0,0 +1,122 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "sync" + "time" +) + +// RedisShutDownTest redis实例下架 +type RedisShutDownTest struct { + atomredis.RedisShutdownParams + Err error `json:"-"` +} + +// SetIP set ip,传入为空则自动获取本地ip +func (test *RedisShutDownTest) SetIP(ip string) *RedisShutDownTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.IP = ip + return test +} + +// SetPorts set port +func (test *RedisShutDownTest) SetPorts(ports []int, startPort, instNum int) *RedisShutDownTest { + if test.Err != nil { + return test + } + if len(ports) == 0 { + if startPort == 0 { + startPort = consts.TestTendisPlusMasterStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + } + test.Ports = ports + return test +} + +// SetTest set test +func (test *RedisShutDownTest) SetTest() *RedisShutDownTest { + if test.Err != nil { + return test + } + test.Debug = true + return test +} + +// RunRedisShutdown redis实例停止 +func (test *RedisShutDownTest) RunRedisShutdown() { + msg := fmt.Sprintf("=========RedisShutDownTest start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========RedisShutDownTest fail============") + } else { + msg = fmt.Sprintf("=========RedisShutDownTest success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisShutdown().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// RedisShutdown redis关闭测试 +func RedisShutdown(serverIP string, masterStartPort, slaveStartPort, insNum int) (err error) { + slaveShutdownTest := RedisShutDownTest{} + slaveShutdownTest.SetIP(serverIP).SetPorts([]int{}, masterStartPort, insNum).SetTest() + if slaveShutdownTest.Err != nil { + return slaveShutdownTest.Err + } + + masterShutdownTest := RedisShutDownTest{} + masterShutdownTest.SetIP(serverIP).SetPorts([]int{}, slaveStartPort, insNum).SetTest() + if masterShutdownTest.Err != nil { + return masterShutdownTest.Err + } + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + defer wg.Done() + + slaveShutdownTest.RunRedisShutdown() + }() + go func() { + defer wg.Done() + masterShutdownTest.RunRedisShutdown() + }() + wg.Wait() + if slaveShutdownTest.Err != nil { + return slaveShutdownTest.Err + } + + if masterShutdownTest.Err != nil { + return masterShutdownTest.Err + } + + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_switch.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_switch.go new file mode 100644 index 0000000000..cd47bca015 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redis_switch.go @@ -0,0 +1,127 @@ +// Package redistest redis切换测试 +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// RedisSwitchTest 集群切换 单元测试 +type RedisSwitchTest struct { + atomredis.SwitchParam + Err error `json:"-"` +} + +/*测试集群架构: + 1.1.a.1:40000 1.1b.1:41000 + 1.1.a.1:50000 1.1.a.1:40001 1.1.1.b:41001 + 1.1.1.1:40002 1.1.b.1:41002 +-------------------------------------------------------------- + 1.1.a.1:42000 1.1.b.1:43000 + 1.1.a.1:42001 1.1.b.1:43001 +*/ + +// SetDefaultClusterMeta 设置集群元数据 +func (test *RedisSwitchTest) SetDefaultClusterMeta(ppass, spass string) *RedisSwitchTest { + if test.Err != nil { + return test + } + test.ClusterMeta.BkBizID = 119 + test.ClusterMeta.ImmuteDomain = "cache1.hitest.testapp.db" + test.ClusterMeta.ProxyPassword = ppass + test.ClusterMeta.StoragePassword = spass + return test +} + +// SetClusterType 设置集群元数据ClusterType +func (test *RedisSwitchTest) SetClusterType(ctp string) *RedisSwitchTest { + if test.Err != nil { + return test + } + test.ClusterMeta.ClusterType = ctp + return test +} + +// SetProxySet 设置集群元数据ProxySet +func (test *RedisSwitchTest) SetProxySet(proxyAddr string) *RedisSwitchTest { + if test.Err != nil { + return test + } + test.ClusterMeta.ProxySet = []string{proxyAddr} + return test +} + +// SetMasterSet 设置集群元数据RedisMasterSet +func (test *RedisSwitchTest) SetMasterSet(addrs []string) *RedisSwitchTest { + if test.Err != nil { + return test + } + + test.ClusterMeta.RedisMasterSet = addrs + return test +} + +// SetSlaveSet 设置集群元数据RedisSlaveSet +func (test *RedisSwitchTest) SetSlaveSet(addrs []string) *RedisSwitchTest { + if test.Err != nil { + return test + } + test.ClusterMeta.RedisSlaveSet = addrs + return test +} + +// SetSwitchInfo 设置集群元数据SwitchRelation +func (test *RedisSwitchTest) SetSwitchInfo(sinfos []atomredis.InstanceSwitchParam) *RedisSwitchTest { + if test.Err != nil { + return test + } + test.SwitchRelation = sinfos + return test +} + +// SetDefaultSwitchCondition 配置默认切换行为 可选值 [msms|mms] +func (test *RedisSwitchTest) SetDefaultSwitchCondition(stp string) *RedisSwitchTest { + if test.Err != nil { + return test + } + test.SyncCondition.IsCheckSync = true + test.SyncCondition.MaxSlaveMasterDiffTime = 61 + test.SyncCondition.MaxSlaveLastIOSecondsAgo = 100 + test.SyncCondition.CanWriteBeforeSwitch = false + + if stp == "" { + test.SyncCondition.InstanceSyncType = "msms" + } else { + test.SyncCondition.InstanceSyncType = stp + } + return test +} + +// RunTendisSwitch 执行 tendis 切换测试 +func (test *RedisSwitchTest) RunTendisSwitch() { + fmt.Println("=========tendisSwitch test start============") + + defer func() { + var msg string + if test.Err != nil { + msg = "=========tendisSwitch test fail============" + fmt.Println(test.Err) + } else { + msg = "=========tendisSwitch test success============" + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + cmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewRedisSwitch().Name(), string(paramBytes)) + fmt.Println(cmd) + _, test.Err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + if test.Err != nil { + fmt.Printf("run bash cmd failed :+%+v", test.Err) + return + } +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/redistest.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redistest.go new file mode 100644 index 0000000000..74374a5549 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/redistest.go @@ -0,0 +1,45 @@ +// Package redistest redis test +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "fmt" + "strconv" + "time" +) + +// CheckPortUntilNotUse 检查端口直到其关闭 +func CheckPortUntilNotUse(ip string, port int) { + var isUse bool + for { + isUse, _ = util.CheckPortIsInUse(ip, strconv.Itoa(port)) + if isUse { + fmt.Printf("%s:%d is using\n", ip, port) + time.Sleep(time.Second) + continue + } + break + } +} + +// StopRedisProcess 停止redis进程 +func StopRedisProcess(ip string, port int, password, serverName string) (err error) { + var stopCmd string + var isUsing bool + isUsing, _ = util.CheckPortIsInUse(ip, strconv.Itoa(port)) + if !isUsing { + return nil + } + if util.FileExists("/usr/local/bin/stop-redis.sh") { + stopCmd = fmt.Sprintf("cd /usr/local/redis && ./bin/stop-redis.sh %d %s", port, password) + fmt.Println(stopCmd) + util.RunBashCmd(stopCmd, "", nil, 10*time.Second) + } else { + killCmd := fmt.Sprintf("ps -ef | grep -w %s | grep %d | grep -v grep | awk '{print $2}'|xargs kill -9", serverName, + port) + fmt.Println(killCmd) + util.RunBashCmd(killCmd, "", nil, 10*time.Second) + } + CheckPortUntilNotUse(ip, port) + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/redistest/tendisssd_dr_restore.go b/dbm-services/redis/db-tools/dbactuator/tests/redistest/tendisssd_dr_restore.go new file mode 100644 index 0000000000..c31c742e49 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/redistest/tendisssd_dr_restore.go @@ -0,0 +1,185 @@ +package redistest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "path/filepath" + "time" +) + +// TendisSsdDrRestoreTest tendis-ssd 重建dr测试 +type TendisSsdDrRestoreTest struct { + atomredis.TendisssdDrRestoreParams + Err error `json:"-"` +} + +// SetMasterIP set master ip,传入为空则自动获取本地ip +func (test *TendisSsdDrRestoreTest) SetMasterIP(ip string) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.MasterIP = ip + return test +} + +// SetMasterPorts set ports +// 如果ports=[],startPort=0,instNum=0,则默认startPort=40000,instNum=4 +func (test *TendisSsdDrRestoreTest) SetMasterPorts(ports []int, startPort, instNum int) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + if len(ports) == 0 { + if startPort == 0 { + startPort = consts.TestTendisSSDMasterStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + } + test.MasterPorts = ports + test.MasterStartPort = startPort + test.MasterInstNum = instNum + return test +} + +// SetMasterAuth set master auth,传入为空则password=xxxxx +func (test *TendisSsdDrRestoreTest) SetMasterAuth(password string) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + if password == "" { + password = "xxxx" + } + test.MasterAuth = password + return test +} + +// SetSlaveIP set slave ip,传入为空则自动获取本地ip +func (test *TendisSsdDrRestoreTest) SetSlaveIP(ip string) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + if ip == "" || ip == "127.0.0.1" { + ip, test.Err = util.GetLocalIP() + if test.Err != nil { + return test + } + } + test.SlaveIP = ip + return test +} + +// SetSlavePorts set ports +// 如果ports=[],startPort=0,instNum=0,则默认startPort=40000,instNum=4 +func (test *TendisSsdDrRestoreTest) SetSlavePorts(ports []int, startPort, instNum int) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + if len(ports) == 0 { + if startPort == 0 { + startPort = consts.TestTendisSSDSlaveStartPort + } + if instNum == 0 { + instNum = 4 + } + for i := 0; i < instNum; i++ { + ports = append(ports, startPort+i) + } + } + test.SlavePorts = ports + test.SlaveStartPort = startPort + test.SlaveInstNum = instNum + return test +} + +// SetSlavePasswd set slave password,传入为空则password=xxxxx +func (test *TendisSsdDrRestoreTest) SetSlavePasswd(password string) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + if password == "" { + password = "xxxx" + } + test.SlavePassword = password + return test +} + +// SetBackupDir set backup dir 设置backup dir +func (test *TendisSsdDrRestoreTest) SetBackupDir(dir string) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + if dir == "" { + dir = filepath.Join(consts.GetRedisBackupDir(), "dbbak") + } + test.BackupDir = dir + return test +} + +// SetBackupTasks 设置备份信息 +func (test *TendisSsdDrRestoreTest) SetBackupTasks(bakTasks []atomredis.BackupTask) *TendisSsdDrRestoreTest { + if test.Err != nil { + return test + } + test.BackupTasks = bakTasks + return test +} + +// RunRestore 执行ssd restore +func (test *TendisSsdDrRestoreTest) RunRestore() { + msg := fmt.Sprintf("=========TendisSSDDrRestore test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========TendisSSDDrRestore test fail============") + } else { + msg = fmt.Sprintf("=========TendisSSDDrRestore test success============") + } + fmt.Println(msg) + }() + + paramBytes, _ := json.Marshal(test) + instllCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomredis.NewTendisssdDrRestore().Name(), string(paramBytes)) + fmt.Println(instllCmd) + _, test.Err = util.RunBashCmd(instllCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// SsdRestore ssd恢复slave +func SsdRestore(masterIP string, masterPorts []int, masterStartPort, masterInstNum int, masterAuth string, + slaveIP string, slavePorts []int, slaveStartPort, slaveInstNum int, slavePasswd string, + backupDir string, bakTasks []atomredis.BackupTask) (err error) { + restoreTask := TendisSsdDrRestoreTest{} + restoreTask.SetMasterIP(masterIP). + SetMasterPorts(masterPorts, masterStartPort, masterInstNum). + SetMasterAuth(masterAuth). + SetSlaveIP(slaveIP). + SetSlavePorts(slavePorts, slaveStartPort, slaveInstNum). + SetSlavePasswd(slavePasswd). + SetBackupDir(backupDir). + SetBackupTasks(bakTasks) + if restoreTask.Err != nil { + return restoreTask.Err + } + restoreTask.RunRestore() + if restoreTask.Err != nil { + return + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/systest/sysinit.go b/dbm-services/redis/db-tools/dbactuator/tests/systest/sysinit.go new file mode 100644 index 0000000000..79ef1a359c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/systest/sysinit.go @@ -0,0 +1,49 @@ +// Package systest mysys test +package systest + +import ( + "dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomsys" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "encoding/json" + "fmt" + "time" +) + +// SysInitTest 系统初始化测试 +type SysInitTest struct { + atomsys.SysInitParams + Err error `json:"-"` +} + +// Run run +func (test *SysInitTest) Run() { + msg := fmt.Sprintf("=========sys_init test start============") + fmt.Println(msg) + + defer func() { + if test.Err != nil { + msg = fmt.Sprintf("=========sys_init test fail============") + } else { + msg = fmt.Sprintf("=========sys_init test success============") + } + fmt.Println(msg) + }() + test.User = consts.MysqlAaccount + test.Password = "xxxx" + paramBytes, _ := json.Marshal(test) + initCmd := fmt.Sprintf(consts.ActuatorTestCmd, atomsys.NewSysInit().Name(), string(paramBytes)) + fmt.Println(initCmd) + _, test.Err = util.RunBashCmd(initCmd, "", nil, 1*time.Hour) + if test.Err != nil { + return + } + return +} + +// RunSysInit run sysinit +func RunSysInit() error { + test := &SysInitTest{} + test.Run() + return test.Err +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/test.go b/dbm-services/redis/db-tools/dbactuator/tests/test.go new file mode 100644 index 0000000000..0b27af816a --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/test.go @@ -0,0 +1,318 @@ +package main + +import ( + "dbm-services/redis/db-tools/dbactuator/mylog" + "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbactuator/pkg/util" + "dbm-services/redis/db-tools/dbactuator/tests/clustertest" + "dbm-services/redis/db-tools/dbactuator/tests/proxytest" + "dbm-services/redis/db-tools/dbactuator/tests/redistest" + "dbm-services/redis/db-tools/dbactuator/tests/systest" + "flag" + "fmt" + "net/url" + "os" +) + +var ( + localIP string + repoUrl string + repoUser string + repoPassword string + + tendisplusPkgName string + tendisplusPkgMd5 string + tendisssdPkgName string + tendisssdPkgMd5 string + redisPkgName string + redisPkgMd5 string + predixyPkgName string + predixyPkgMd5 string + twemproxyPkgName string + twemproxyPkgMd5 string + keytoolsPkgName string + keytoolsPkgMd5 string + dbtoolsPkgName string + dbtoolsPkgMd5 string + bkdbmonPkgName string + bkdbmonPkgMd5 string +) + +func before() (err error) { + mylog.UnitTestInitLog() + localIP, err = util.GetLocalIP() + if err != nil { + fmt.Println(err.Error()) + } + return err +} +func after() { + clustertest.PredixyTendisplusClusterClear(localIP, true) + clustertest.TwemproxyRedisInstanceClear(localIP, true) +} + +func main() { + flag.StringVar(&tendisplusPkgName, "tendisplus-pkgname", "", "tendisplus pkg name") + flag.StringVar(&tendisplusPkgMd5, "tendisplus-pkgmd5", "", "tendisplus pkg md5sum") + flag.StringVar(&tendisssdPkgName, "tendisssd-pkgname", "", "tendisssd pkg name") + flag.StringVar(&tendisssdPkgMd5, "tendisssd-pkgmd5", "", "tendisssd pkg md5sum") + flag.StringVar(&redisPkgName, "redis-pkgname", "", "redis pkg name") + flag.StringVar(&redisPkgMd5, "redis-pkgmd5", "", "redis pkg md5sum") + flag.StringVar(&predixyPkgName, "predixy-pkgname", "", "predixy pkg name") + flag.StringVar(&predixyPkgMd5, "predixy-pkgmd5", "", "predixy pkg md5sum") + flag.StringVar(&twemproxyPkgName, "twemproxy-pkgname", "", "twemproxy pkg name") + flag.StringVar(&twemproxyPkgMd5, "twemproxy-pkgmd5", "", "twemproxy pkg md5sum") + flag.StringVar(&dbtoolsPkgName, "dbtools-pkgname", "", "dbtools pkg name") + flag.StringVar(&dbtoolsPkgMd5, "dbtools-pkgmd5", "", "dbtools pkg md5sum") + flag.StringVar(&bkdbmonPkgName, "bkdbmon-pkgname", "", "bk-dbmon pkg name") + flag.StringVar(&bkdbmonPkgMd5, "bkdbmon-pkgmd5", "", "bk-dbmon pkg md5sum") + flag.StringVar(&repoUrl, "repo-url", "xxxx", "制品库地址") + flag.StringVar(&repoUser, "user", "xxxx", "制品库用户名") + flag.StringVar(&repoPassword, "password", "xxxx", "制品库用户密码") + flag.Parse() + + var err error + before() + + defer func() { + if err != nil { + os.Exit(-1) + } + }() + + // 获取制品库地址 + u, err := url.Parse(repoUrl) + if err != nil { + fmt.Println("Parse输入网址不正确,请检查!", repoUrl) + return + } + repoUrl = fmt.Sprintf(u.Scheme + "://" + u.Host) + + systest.RunSysInit() // 系统初始化可能出错,忽略错误继续执行 + + err = clustertest.PredixyTendisplusClusterInstallTest(localIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + predixyPkgName, predixyPkgMd5) + if err != nil { + return + } + + err = redistest.BkDbmonInstall(localIP, dbtoolsPkgName, dbtoolsPkgMd5, + bkdbmonPkgName, bkdbmonPkgMd5, + consts.TendisTypePredixyTendisplusCluster) + if err != nil { + return + } + redistest.BkDbmonStopNew(localIP, dbtoolsPkgName, dbtoolsPkgMd5, + bkdbmonPkgName, bkdbmonPkgMd5) + + err = redistest.Keyspattern(localIP, []int{}, + consts.TestTendisPlusSlaveStartPort, consts.TestRedisInstanceNum, + repoUser, repoPassword, repoUrl, dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + err = redistest.KeysFilesDelete(localIP, consts.TestPredixyPort, + repoUser, repoPassword, repoUrl, + dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + + _, err = redistest.Backup(localIP, []int{}, consts.TestTendisPlusSlaveStartPort, consts.TestRedisInstanceNum, nil) + if err != nil { + return + } + // slot 扩容测试 新增节点 + err = clustertest.TendisplusScaleNodesInstall(localIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + predixyPkgName, predixyPkgMd5, + consts.ExpansionTestTendisPlusMasterStartPort, + consts.ExpansionTestTendisPlusSlaveStartPort, consts.ExpansionTestRedisInstanceNum) + if err != nil { + return + } + + // slot rebalance 用于扩容 + err = clustertest.TendisPlusRebalence(localIP) + if err != nil { + return + } + // 新增1组节点 用于迁移特定slot :处理热点key场景 + err = clustertest.TendisplusScaleNodesInstall(localIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + predixyPkgName, predixyPkgMd5, + consts.SlotTestTendisPlusMasterPort, + consts.SLotTestTendisPlusSlaveStart, consts.SLotTestRedisInstanceNum) + if err != nil { + return + } + + // slot migrate 用于迁移特定slot :处理热点key场景 + err = clustertest.TendisPlusMigrateSpecificSlots(localIP) + if err != nil { + return + } + err = redistest.FlushData(localIP, + consts.TendisTypePredixyTendisplusCluster, consts.RedisTestPasswd, []int{}, + consts.TestTendisPlusMasterStartPort, consts.TestRedisInstanceNum) + if err != nil { + return + } + + err = clustertest.PredixyTendisPlusSwitchTest(localIP, + tendisplusPkgName, tendisplusPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + + // 测试predixy启停 + err = proxytest.PredixyOpenClose(localIP) + if err != nil { + return + } + err = proxytest.PredixyShutdown(localIP) + if err != nil { + return + } + err = redistest.RedisShutdown(localIP, + consts.TestTendisPlusMasterStartPort, + consts.TestTendisPlusSlaveStartPort, + consts.TestRedisInstanceNum) + if err != nil { + return + } + err = clustertest.PredixyTendisplusClusterClear(localIP, true) + if err != nil { + return + } + + fmt.Println(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") + fmt.Println(">>>>>>>>>>>>>>>>>>>>start twmproxy arch test>>>>>>>>>>>>>>>") + err = clustertest.TwemproxyRedisInstanceInstall(localIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + twemproxyPkgName, twemproxyPkgMd5) + if err != nil { + return + } + + err = redistest.Keyspattern(localIP, []int{}, + consts.TestRedisSlaveStartPort, consts.TestRedisInstanceNum, + repoUser, repoPassword, repoUrl, dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + + err = redistest.KeysFilesDelete(localIP, consts.TestTwemproxyPort, repoUser, repoPassword, repoUrl, + dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + + err = redistest.RunReplicaPairDataCheck(localIP, consts.TestRedisMasterStartPort, consts.RedisTestPasswd, + localIP, consts.TestRedisSlaveStartPort, consts.RedisTestPasswd, + dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + err = redistest.RunReplicaPairDataRepaire(localIP, consts.TestRedisMasterStartPort, consts.RedisTestPasswd, + localIP, consts.TestRedisSlaveStartPort, consts.RedisTestPasswd, + dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + + // 测试twemproxy启停 + err = proxytest.TwemproxyOpenClose(localIP) + if err != nil { + return + } + _, err = redistest.Backup(localIP, []int{}, consts.TestRedisSlaveStartPort, consts.TestRedisInstanceNum, nil) + if err != nil { + return + } + err = redistest.FlushData(localIP, + consts.TendisTypeTwemproxyRedisInstance, consts.RedisTestPasswd, + []int{}, + consts.TestRedisMasterStartPort, + consts.TestRedisInstanceNum) + if err != nil { + return + } + + if err := clustertest.RedisSceneTest(localIP, localIP, consts.TendisTypeTwemproxyRedisInstance, + consts.TestRedisMasterStartPort, + consts.TestRedisSlaveStartPort, consts.TestRedisInstanceNum); err != nil { + return + } + + err = clustertest.TwemproxyCacheSwitch(localIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + bkdbmonPkgName, bkdbmonPkgMd5) + if err != nil { + return + } + + err = clustertest.TwemproxyCacheSwitchRestoreEnv(localIP, + redisPkgName, redisPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + bkdbmonPkgName, bkdbmonPkgMd5) + if err != nil { + return + } + + err = proxytest.TwemproxyShutDown(localIP) + if err != nil { + return + } + err = redistest.RedisShutdown(localIP, consts.TestRedisMasterStartPort, + consts.TestRedisSlaveStartPort, consts.TestRedisInstanceNum) + if err != nil { + return + } + err = clustertest.TwemproxyRedisInstanceClear(localIP, true) + if err != nil { + return + } + + fmt.Println(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") + fmt.Println(">>>>>>>>>>>>>>>>>>>>start twmproxy tendisssd test>>>>>>>>>>>>>>>") + err = clustertest.TwemproxyTendisSSDInstall(localIP, + tendisssdPkgName, tendisssdPkgMd5, + dbtoolsPkgName, dbtoolsPkgMd5, + twemproxyPkgName, twemproxyPkgMd5) + if err != nil { + return + } + + if err := clustertest.RedisSceneTest(localIP, localIP, consts.TendisTypeTwemproxyTendisSSDInstance, + consts.TestTendisSSDMasterStartPort, + consts.TestTendisSSDSlaveStartPort, consts.TestRedisInstanceNum); err != nil { + return + } + + err = redistest.Keyspattern(localIP, []int{}, + consts.TestTendisSSDSlaveStartPort, consts.TestRedisInstanceNum, + repoUser, repoPassword, repoUrl, dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + + err = redistest.KeysFilesDelete(localIP, consts.TestSSDClusterTwemproxyPort, repoUser, repoPassword, repoUrl, + dbtoolsPkgName, dbtoolsPkgMd5) + if err != nil { + return + } + + err = clustertest.TwemproxyTendisSSDClear(localIP, true) + if err != nil { + return + } + return +} diff --git a/dbm-services/redis/db-tools/dbactuator/tests/test.sh b/dbm-services/redis/db-tools/dbactuator/tests/test.sh new file mode 100644 index 0000000000..21ca002225 --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/test.sh @@ -0,0 +1,244 @@ +#!/usr/bin/env bash + +repoUser="" +repoPassword="" + +tendisplusPkgName="tendisplus-2.5.0-rocksdb-v6.23.3.tgz" +tendisplusPkgMd5="573fac8917f3cb6d73d4913471a6eacc" + +redisPkgName="redis-6.2.7.tar.gz" +redisPkgMd5="1fc9e5c3a044ce523844a6f2717e5ac3" + +#tendisssdPkgName="redis-2.8.17-rocksdb-v1.3.10.tar.gz" +#tendisssdPkgMd5="26fb850222e9666595a48a6f2e9b0382" +tendisssdPkgName="redis-2.8.17-rocksdb-v1.2.20.tar.gz" +tendisssdPkgMd5="7bfe87efbe017c689c3f4a11bb2a8be9" + +predixyPkgName="predixy-1.4.0.tar.gz" +predixyPkgMd5="24aba4a96dcf7f8581d2fde89d062455" + +twemproxyPkgName="twemproxy-0.4.1-v23.tar.gz" +twemproxyPkgMd5="41850e44bebfce84ebd4d0cf4cce6833" + +dbtoolsPkgName="dbtools.tar.gz" +dbtoolsPkgMd5="ced0fa280c63cb31536fefc1845f3ff0" + +bkdbmonPkgName="bk-dbmon-v0.9.tar.gz" +bkdbmonPkgMd5="a579e2ffd74259f3dd66d23a10a170ba" + +repoUrl="" + +usage() { + echo -e "Usage: $0 [OPTIONS]" + echo -e "dbactuator test" + echo -e "" + echo -e "-H --help -h required,display help info" + echo -e "--repo-user required,bk repo user name," + echo -e "--repo-password required,bk repo user password" + echo -e "--repo-url required,bk repo https url" + echo -e "" + exit 1 +} + +if [[ $# -lt 2 ]]; then + usage +fi + +for i in "$@"; do + case $i in + --repo-user=*) + repoUser="${i#*=}" + shift + ;; + --repo-password=*) + repoPassword="${i#*=}" + shift + ;; + --repo-url=*) + repoUrl="${i#*=}" + shift + ;; + *) + echo -e "unknown option:$i" + usage + ;; + esac +done + +if [[ -z $repoUser ]]; then + echo -e "error: --repo-user must be passed,repoUser=$repoUser" + usage +fi + +if [[ -z $repoPassword ]]; then + echo -e "error: --repo-password must be passed,repoPassword=$repoPassword" + usage +fi + +if [[ -z $repoUrl ]]; then + echo -e "error: --repo-url must be passed,repoUrl=$repoUrl" + usage +fi + +# change dir to current +SCRIPT=$(readlink -f "$0") +DIR=$(dirname $SCRIPT) +cd $DIR +echo "DIR==$DIR" + +cd .. && make build + +cp ./bin/dbactuator_redis /data/install/ + +localTendisplusPkgName="/data/install/$tendisplusPkgName" +localTendisplusPkgMd5="" + +localRedisPkgName="/data/install/$redisPkgName" +localRedisPkgMd5="" + +localTendisssdPkgName="/data/install/$tendisssdPkgName" +localTendisssdPkgMd5="" + +localPredixyPkgName="/data/install/$predixyPkgName" +localPredixyPkgMd5="" + +localTwemproxyPkgName="/data/install/$twemproxyPkgName" +localTwemproxyPkgMd5="" + +localDbToolsPkgName="/data/install/$dbtoolsPkgName" +localDbToolsPkgMd5="" + +localBkDbmonPkgName="/data/install/$bkdbmonPkgName" +localBkDbmonPkgMd5="" + +if [[ -e $localTendisplusPkgName ]]; then + + localTendisplusPkgMd5=$(md5sum $localTendisplusPkgName | awk '{print $1}') +fi + +if [[ -e $localRedisPkgName ]]; then + localRedisPkgMd5=$(md5sum $localRedisPkgName | awk '{print $1}') +fi + +if [[ -e $localTendisssdPkgName ]]; then + localTendisssdPkgMd5=$(md5sum $localTendisssdPkgName | awk '{print $1}') +fi + +if [[ -e $localPredixyPkgName ]]; then + localPredixyPkgMd5=$(md5sum $localPredixyPkgName | awk '{print $1}') +fi + +if [[ -e $localTwemproxyPkgName ]]; then + localTwemproxyPkgMd5=$(md5sum $localTwemproxyPkgName | awk '{print $1}') +fi + +if [[ -e $localKeyToolsPkgName ]]; then + localKeyToolsPkgMd5=$(md5sum $localKeyToolsPkgName | awk '{print $1}') +fi + +if [[ -e $localDbToolsPkgName ]]; then + localDbToolsPkgMd5=$(md5sum $localDbToolsPkgName | awk '{print $1}') +fi + +if [[ -e $localBkDbmonPkgName ]]; then + localBkDbmonPkgMd5=$(md5sum $localBkDbmonPkgName | awk '{print $1}') +fi + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/tendisplus/Tendisplus-2.5/$tendisplusPkgName -O $localTendisplusPkgName" +if [[ ! -e $localTendisplusPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localTendisplusPkgMd5 && $localTendisplusPkgMd5 != $tendisplusPkgMd5 ]]; then + echo "rm -f $localTendisplusPkgName" + rm -f $localTendisplusPkgName + echo $wgetCmd + $wgetCmd +fi + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/predixy/Predixy-latest/$predixyPkgName -O $localPredixyPkgName" +if [[ ! -e $localPredixyPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localPredixyPkgMd5 && $localPredixyPkgMd5 != $predixyPkgMd5 ]]; then + echo "rm -f $localPredixyPkgName" + rm -f $localPredixyPkgName + echo $wgetCmd + $wgetCmd +fi + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/redis/Redis-6/$redisPkgName -O $localRedisPkgName" +if [[ ! -e $localRedisPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localRedisPkgMd5 && $localRedisPkgMd5 != $redisPkgMd5 ]]; then + echo "rm -f $localRedisPkgName" + rm -f $localRedisPkgName + echo $wgetCmd + $wgetCmd +fi + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/tendisssd/TendisSSD-1.2/$tendisssdPkgName -O $localTendisssdPkgName" +#wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/tendisssd/TendisSSD-1.3/$tendisssdPkgName -O $localTendisssdPkgName" +if [[ ! -e $localTendisssdPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localTendisssdPkgMd5 && $localTendisssdPkgMd5 != $tendisssdPkgMd5 ]]; then + echo "rm -f $localTendisssdPkgName" + rm -f $localTendisssdPkgName + echo $wgetCmd + $wgetCmd +fi + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/twemproxy/Twemproxy-latest/$twemproxyPkgName -O $localTwemproxyPkgName" +if [[ ! -e $localTwemproxyPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localTwemproxyPkgMd5 && $localTwemproxyPkgMd5 != $twemproxyPkgMd5 ]]; then + echo "rm -f $localTwemproxyPkgName" + rm -f $localTwemproxyPkgName + echo $wgetCmd + $wgetCmd +fi + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/tools/latest/$dbtoolsPkgName -O $localDbToolsPkgName" +if [[ ! -e $localDbToolsPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localDbToolsPkgMd5 && $localDbToolsPkgMd5 != $dbtoolsPkgMd5 ]]; then + echo "rm -f $localDbToolsPkgName" + rm -f $localDbToolsPkgName + echo $wgetCmd + $wgetCmd +fi + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/dbmon/latest/$bkdbmonPkgName -O $localBkDbmonPkgName" +if [[ ! -e $localBkDbmonPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localBkDbmonPkgMd5 && $localBkDbmonPkgMd5 != $bkdbmonPkgMd5 ]]; then + echo "rm -f $localBkDbmonPkgName" + rm -f $localBkDbmonPkgName + echo $wgetCmd + $wgetCmd +fi + +cd $DIR +CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o dbactuator-test -v test.go + +echo "./dbactuator-test -tendisplus-pkgname=$tendisplusPkgName -tendisplus-pkgmd5=$tendisplusPkgMd5 + -redis-pkgname=$redisPkgName -redis-pkgmd5=$redisPkgMd5 + -tendisssd-pkgname=$tendisssdPkgName -tendisssd-pkgmd5=$tendisssdPkgMd5 + -predixy-pkgname=$predixyPkgName -predixy-pkgmd5=$predixyPkgMd5 + -twemproxy-pkgname=$twemproxyPkgName -twemproxy-pkgmd5=$twemproxyPkgMd5 + -dbtools-pkgname=$dbtoolsPkgName -dbtools-pkgmd5=$dbtoolsPkgMd5 + -bkdbmon-pkgname=$bkdbmonPkgName -bkdbmon-pkgmd5=$bkdbmonPkgMd5" + +./dbactuator-test \ + -tendisplus-pkgname=$tendisplusPkgName -tendisplus-pkgmd5=$tendisplusPkgMd5 \ + -redis-pkgname=$redisPkgName -redis-pkgmd5=$redisPkgMd5 \ + -tendisssd-pkgname=$tendisssdPkgName -tendisssd-pkgmd5=$tendisssdPkgMd5 \ + -predixy-pkgname=$predixyPkgName -predixy-pkgmd5=$predixyPkgMd5 \ + -twemproxy-pkgname=$twemproxyPkgName -twemproxy-pkgmd5=$twemproxyPkgMd5 \ + -dbtools-pkgname=$dbtoolsPkgName -dbtools-pkgmd5=$dbtoolsPkgMd5 \ + -bkdbmon-pkgname=$bkdbmonPkgName -bkdbmon-pkgmd5=$bkdbmonPkgMd5 \ + -user $repoUser -password $repoPassword -repo-url $repoUrl diff --git a/dbm-services/redis/db-tools/dbactuator/tests/test_mongo.sh b/dbm-services/redis/db-tools/dbactuator/tests/test_mongo.sh new file mode 100644 index 0000000000..e12e630c2c --- /dev/null +++ b/dbm-services/redis/db-tools/dbactuator/tests/test_mongo.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +source /etc/profile +repoUser="" +repoPassword="" + +mongodbPkgName="mongodb-linux-x86_64-3.4.20.tar.gz" +mongodbPkgMd5="e68d998d75df81b219e99795dec43ffb" + +localMongodbPkgName="/data/install/$mongodbPkgName" +localMongodbPkgMd5="" + +repoUrl="" + +usage() { + echo -e "Usage: $0 [OPTIONS]" + echo -e "" + echo -e "-H --help -h required,display help info" + echo -e "--repo-user required,bk repo user name," + echo -e "--repo-password required,bk repo user password" + echo -e "--repo-url required,bk repo https url" + echo -e "" + exit 1 +} + +if [[ $# -lt 2 ]]; then + usage +fi + +for i in "$@"; do + case $i in + --repo-user=*) + repoUser="${i#*=}" + shift + ;; + --repo-password=*) + repoPassword="${i#*=}" + shift + ;; + --repo-url=*) + repoUrl="${i#*=}" + shift + ;; + *) + echo -e "unknown option:$i" + usage + ;; + esac +done + +if [[ -z $repoUser ]]; then + echo -e "error: --repo-user must be passed,repoUser=$repoUser" + usage +fi + +if [[ -z $repoPassword ]]; then + echo -e "error: --repo-password must be passed,repoPassword=$repoPassword" + usage +fi + +if [[ -z $repoUrl ]]; then + echo -e "error: --repo-url must be passed,repoUrl=$repoUrl" + usage +fi + +if [[ -e $localMongodbPkgName ]]; then + localMongodbPkgMd5=$(md5sum $localMongodbPkgName | awk '{print $1}') +fi + + +wgetCmd="wget --user=$repoUser --password=$repoPassword $repoUrl/install_package/$mongodbPkgName -O $localMongodbPkgName" +if [[ ! -e $localMongodbPkgName ]]; then + echo $wgetCmd + $wgetCmd +elif [[ -n $localMongodbPkgMd5 && $localMongodbPkgMd5 != $mongodbPkgMd5 ]]; then + echo "rm -f $localMongodbPkgName" + rm -f $localMongodbPkgName + echo $wgetCmd + $wgetCmd +fi + +cd $(dirname $0)/../pkg/atomjobs/atommongodb/ +go test -v +rm -rf logs + diff --git a/dbm-services/redis/db-tools/dbmon/.ci/codecc.yml b/dbm-services/redis/db-tools/dbmon/.ci/codecc.yml new file mode 100644 index 0000000000..9be59c2114 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/.ci/codecc.yml @@ -0,0 +1,29 @@ +version: v2.0 +resources: + repositories: + - repository: ci_templates/public/codecc + name: codecc +on: + mr: + target-branches: [ "*" ] +stages: + - name: "代码检查" + check-out: + gates: + - template: commonGate.yml@codecc + timeout-hours: 10 + jobs: + codecc: + name: "CodeCC代码检查" + runs-on: + pool-name: docker #docker-on-devcloud、docker、local、agentless + container: + image: mirrors.tencent.com/ci/tlinux3_ci:2.0.0 + steps: + - checkout: self + - uses: CodeccCheckAtomDebug@4.* + name: 腾讯代码分析 + with: + beAutoLang: true # 自动检测项目语言 + checkerSetType: "openScan" # 规则集类型,normal对应自主配置规则集,openScan对应按开源治理要求配置 + toolScanType: "2" # 扫描方式。快速全量扫描[1] | 全量扫描[0] | 差异扫描[6] | MR/PR扫描[2],默认为1 diff --git a/dbm-services/redis/db-tools/dbmon/.ci/open_source_check.yml b/dbm-services/redis/db-tools/dbmon/.ci/open_source_check.yml new file mode 100644 index 0000000000..f421f315f3 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/.ci/open_source_check.yml @@ -0,0 +1,84 @@ +version: "v2.0" +name: "开源检查" +label: [] +variables: {} +stages: +- name: "开源检查" + label: + - "Build" + jobs: + job_AfK: + name: "构建环境-LINUX" + runs-on: + pool-name: "docker" + container: + image: "mirrors.tencent.com/ci/tlinux3_ci:2.3.0" + needs: {} + steps: + - checkout: self + - name: "敏感信息检查-部门RTX" + uses: "SensitiveRtxChecker@3.*" + - name: "腾讯代码分析(官方-代码分析工作组)" + uses: "CodeccCheckAtomDebug@4.*" + with: + beAutoLang: true + languages: + - "GOLANG" + checkerSetType: "communityOpenScan" + tools: + - "WOODPECKER_COMMITSCAN" + - "SCC" + - "PECKER_SECURITY" + - "SENSITIVE" + - "DUPC" + - "IP_CHECK" + - "WOODPECKER_SENSITIVE" + - "HORUSPY" + - "XCHECK" + - "CCN" + asyncTask: false + asyncTaskId: "" + scriptType: "SHELL" + script: |- + # Coverity/Klocwork将通过调用编译脚本来编译您的代码,以追踪深层次的缺陷 + # 请使用依赖的构建工具如maven/cmake等写一个编译脚本build.sh + # 确保build.sh能够编译代码 + # cd path/to/build.sh + # sh build.sh + languageRuleSetMap: {} + checkerSetEnvType: "prod" + multiPipelineMark: "" + rtxReceiverType: "1" + botWebhookUrl: "" + botRemindRange: "2" + botRemindSeverity: "7" + botRemaindTools: [] + emailReceiverType: "1" + emailCCReceiverList: [] + instantReportStatus: "2" + reportDate: [] + reportTime: "" + reportTools: [] + toolScanType: "1" + diffBranch: "" + byFile: false + mrCommentEnable: true + prohibitIgnore: false + newDefectJudgeFromDate: "" + transferAuthorList: [] + path: [] + customPath: [] + scanTestSource: false + openScanPrj: false + openScanFilterEnable: false + issueSystem: "TAPD" + issueSubSystem: "" + issueResolvers: [] + issueReceivers: [] + issueFindByVersion: "" + maxIssue: 1000 + issueAutoCommit: false + check-out: + gates: + - template: open_source_gate.yml + timeout-hours: 10 \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbmon/.ci/templates/open_source_gate.yml b/dbm-services/redis/db-tools/dbmon/.ci/templates/open_source_gate.yml new file mode 100644 index 0000000000..34ff9b0cb8 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/.ci/templates/open_source_gate.yml @@ -0,0 +1,26 @@ +parameters: +- name: receivers + type: array + default: [ "${{ ci.actor }}" ] + +gates: +- name: open-source-gate + rule: + - "CodeccCheckAtomDebug.all_risk <= 0" + - "CodeccCheckAtomDebug.high_med_new_issue <= 0" + - "CodeccCheckAtomDebug.ccn_new_max_value <= 40" + - "CodeccCheckAtomDebug.sensitive_defect <= 0" + - "CodeccCheckAtomDebug.dupc_average <= 15" + - "CodeccCheckAtomDebug.ccn_average <= 3" + - "CodeccCheckAtomDebug.ccn_new_defect <= 0" + - "CodeccCheckAtomDebug.ccn_funcmax <= 20" + - "CodeccCheckAtomDebug.woodpecker_all_defect <= 0" + - "CodeccCheckAtomDebug.horuspy_all_defect <= 0" + - "CodeccCheckAtomDebug.go_serious_defect <= 0" + - "CodeccCheckAtomDebug.go_all_defect <= 100" + notify-on-fail: + - type: wework-message + receivers: ${{ parameters.receivers }} + continue-on-fail: + gatekeepers: + - "${{ ci.actor }}" \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbmon/.gitignore b/dbm-services/redis/db-tools/dbmon/.gitignore new file mode 100644 index 0000000000..3a34df87d2 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/.gitignore @@ -0,0 +1,8 @@ +logs +.vscode +.codecc +bin +build.yml +bk-dbmon/ +nohup.out +.idea \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbmon/LICENSE b/dbm-services/redis/db-tools/dbmon/LICENSE new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dbm-services/redis/db-tools/dbmon/Makefile b/dbm-services/redis/db-tools/dbmon/Makefile new file mode 100644 index 0000000000..b4c971362d --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/Makefile @@ -0,0 +1,24 @@ +SRV_NAME=bk-dbmon + +clean: + -rm ./bin/${SRV_NAME} + +build:clean + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./bin/$(SRV_NAME) -v main.go + + +LOCAL_PKG_DIR := $(shell eval "./bin/bk-dbmon -v|awk '{print $2}'") +version:=$(shell sh -c "./bin/bk-dbmon -v|awk '{print $1}'") + +package:build + mkdir -p bkdbmon + cp ./bin/$(SRV_NAME) ./bkdbmon/ + cp ./start.sh ./bkdbmon/ + cp ./stop.sh ./bkdbmon/ + cp ./dbmon-config.yaml ./bkdbmon/ + +version:build + @echo LOCAL_PKG_DIR=$(LOCAL_PKG_DIR) + @echo version=$(version) + +.PHONY: init clean build diff --git a/dbm-services/redis/db-tools/dbmon/README.md b/dbm-services/redis/db-tools/dbmon/README.md new file mode 100644 index 0000000000..cba0173ede --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/README.md @@ -0,0 +1,47 @@ +### bk-dbmon +本地例行任务集合,含例行全备、binlog备份、心跳等例行任务。 + +#### 使用示例 +- **配置示例,文件名: dbmon-config.yaml** +```yaml +report_save_dir: /home/mysql/dbareport/ +redis_backup: + cron: '0 5/13/21 * * *' #从分开始 + to_backup_system: yes + old_file_left_day: 2 # 旧文件本地保存天数 + tar_split: true + tar_split_part_size: '8G' +redis_binlogbackup: + to_backup_system: 'no' #是否上传备份系统 + old_file_left_day: '2' # 旧文件本地保存天数 + cron: '@every 10m' #从分开始 +redis_heartbeat: + cron: '@every 10s' # refer https://pkg.go.dev/github.com/robfig/cron +redis_monitor: + bkmonitor_event_data_id: 542898 + bkmonitor_event_token: 'xxxx' + bkmonitor_metric_data_id: 11111 + bkmonitor_metirc_token: 'xxxx' + cron: '@every 1m' +servers: + - bk_biz_id: 200500194 + domain: cache01.aaaa.testapp.db + server_ip: 127.0.0.1 + server_ports: + - 44000 + - 44001 + - bk_biz_id: 200500195 + domain: cache02.bbbb.testapp.db + server_ip: 127.0.0.1 + server_ports: + - 45000 + - 45001 +``` + +- **使用命令** +```sh +./bin/bk-dbmon --config=dbmon-config.yaml +``` + +#### 架构 +![bk-dbmon-structurer](./imgs/bk-dbmon-structurer.png) \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbmon/cmd/root.go b/dbm-services/redis/db-tools/dbmon/cmd/root.go new file mode 100644 index 0000000000..88096b6156 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/cmd/root.go @@ -0,0 +1,180 @@ +// Package cmd rootcmd +package cmd + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/httpapi" + "dbm-services/redis/db-tools/dbmon/pkg/keylifecycle" + "dbm-services/redis/db-tools/dbmon/pkg/mongojob" + "dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup" + "dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup" + "dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat" + "dbm-services/redis/db-tools/dbmon/pkg/redismonitor" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "fmt" + "log" + "os" + "runtime/debug" + "time" + + "github.com/robfig/cron/v3" + "github.com/spf13/cobra" +) + +var cfgFile string +var showversion = false + +// rootCmd represents the base command when called without any subcommands +var rootCmd = &cobra.Command{ + Use: "bk-dbmon", + Short: "redis local crontab job,include routine_backup,heartbeat etc", + Long: `redis local crontab job,include routine_backup,heartbeat etc. + Wait each job finish,the job result would write to local file, and other program would report the result. + `, + // Uncomment the following line if your bare application + // has an action associated with it: + Run: func(cmd *cobra.Command, args []string) { + defer func() { + if r := recover(); r != nil { + _, _ = fmt.Fprintf(os.Stderr, "%s", string(debug.Stack())) + } + }() + + if showversion { + _, _ = fmt.Fprintf(os.Stdout, "bk-dbmon %s\n", consts.BkDbmonVersion) + return + } + + config.InitConfig(cfgFile) + mylog.InitRotateLoger() + var entryID cron.EntryID + var err error + + hasMongo, hasRedis, _ := getDbType(config.GlobalConf.Servers) + + if hasMongo && hasRedis { + mylog.Logger.Fatal("dbmon not support mongo and redis at the same time") + } + + c := cron.New( + cron.WithLogger(mylog.AdapterLog), + ) + if hasRedis { + report.InitGlobalHistoryClearJob(config.GlobalConf) + redisfullbackup.InitGlobRedisFullBackupJob(config.GlobalConf) + redisbinlogbackup.InitGlobRedisBinlogBackupJob(config.GlobalConf) + redisheartbeat.InitGlobRedisHeartbeatJob(config.GlobalConf) + redismonitor.InitGlobRedisMonitorJob(config.GlobalConf) + keylifecycle.InitRedisKeyLifeCycleJob(config.GlobalConf) + // 默认每小时执行一次清理 + entryID, err = c.AddJob("0 1 * * *", + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then(report.GlobHistoryClearJob)) + if err != nil { + log.Panicf("reportHistoryClear addjob fail,entryID:%d,err:%v\n", entryID, err) + return + } + if config.GlobalConf.RedisFullBackup.Cron != "" { + entryID, err = c.AddJob(config.GlobalConf.RedisFullBackup.Cron, + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then(redisfullbackup.GlobRedisFullBakJob)) + if err != nil { + log.Panicf("fullbackup addjob fail,entryID:%d,err:%v\n", entryID, err) + return + } + } + if config.GlobalConf.RedisBinlogBackup.Cron != "" { + entryID, err = c.AddJob(config.GlobalConf.RedisBinlogBackup.Cron, + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then(redisbinlogbackup.GlobRedisBinlogBakJob)) + if err != nil { + log.Panicf("binlogbackup addjob fail,entryID:%d,err:%v\n", entryID, err) + return + } + } + if config.GlobalConf.RedisHeartbeat.Cron != "" { + entryID, err = c.AddJob(config.GlobalConf.RedisHeartbeat.Cron, + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then(redisheartbeat.GlobRedisHeartbeatJob)) + if err != nil { + fmt.Printf("heartbeat addjob fail,entryID:%d,err:%v\n", entryID, err) + return + } + } + if config.GlobalConf.RedisMonitor.Cron != "" { + entryID, err = c.AddJob(config.GlobalConf.RedisMonitor.Cron, + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then(redismonitor.GlobRedisMonitorJob)) + if err != nil { + fmt.Printf("monitor addjob fail,entryID:%d,err:%v\n", entryID, err) + return + } + } + if config.GlobalConf.KeyLifeCycle.Cron != "" { + entryID, err = c.AddJob(config.GlobalConf.KeyLifeCycle.Cron, + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then(keylifecycle.GlobRedisKeyLifeCycleJob)) + if err != nil { + fmt.Printf("keylifecycle addjob fail,entryID:%d,err:%v\n", entryID, err) + return + } + } + } else if hasMongo { + + // Login 登录检查和拉起 + entryID, err = c.AddJob("@every 1m", + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then( + mongojob.GetBackupJob(config.GlobalConf))) + + if err != nil { + log.Panicf("mongo backup addjob fail,entryID:%d,err:%v\n", entryID, err) + return + } + + entryID, err = c.AddJob("@every 1m", + cron.NewChain(cron.SkipIfStillRunning(mylog.AdapterLog)).Then( + mongojob.GetCheckServiceJob(config.GlobalConf))) + + } else { + } + mylog.Logger.Info(fmt.Sprintf("start cron job,entryID:%d Listen:%s\n", entryID, config.GlobalConf.HttpAddress)) + c.Start() + httpapi.StartListen(config.GlobalConf) + for { + time.Sleep(10 * time.Second) + } + }, +} + +// Execute adds all child commands to the root command and sets flags appropriately. +// This is called by main.main(). It only needs to happen once to the rootCmd. +func Execute() { + err := rootCmd.Execute() + if err != nil { + os.Exit(1) + } +} + +func init() { + // Here you will define your flags and configuration settings. + // Cobra supports persistent flags, which, if defined here, + // will be global for your application. + + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "dbmon-config.yaml", + "required,config file (default is ./dbmon-config.yaml)") + rootCmd.PersistentFlags().BoolVarP(&showversion, "version", "v", false, "show bk-dbmon version") + + // Cobra also supports local flags, which will only run + // when this action is called directly. + // rootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") + +} + +// getDbType TODO +// return getDbType +func getDbType(servers []config.ConfServerItem) (hasMongo, hasRedis bool, err error) { + for _, row := range servers { + if consts.IsMongo(row.ClusterType) { + hasMongo = true + } else { + hasRedis = true + } + } + return +} diff --git a/dbm-services/redis/db-tools/dbmon/config/config.go b/dbm-services/redis/db-tools/dbmon/config/config.go new file mode 100644 index 0000000000..ba9853b52c --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/config/config.go @@ -0,0 +1,145 @@ +// Package config 配置包 +package config + +import ( + "encoding/json" + "fmt" + "log" + "os" + + "github.com/spf13/viper" +) + +// ConfServerItem servers配置项 +type ConfServerItem struct { + BkBizID string `json:"bk_biz_id" mapstructure:"bk_biz_id"` + BkCloudID int64 `json:"bk_cloud_id" mapstructure:"bk_cloud_id"` + App string `json:"app" mapstructure:"app"` + AppName string `json:"app_name" mapstructure:"app_name"` + ClusterDomain string `json:"cluster_domain" mapstructure:"cluster_domain"` + ClusterName string `json:"cluster_name" mapstructure:"cluster_name"` + ClusterType string `json:"cluster_type" mapstructure:"cluster_type"` + MetaRole string `json:"meta_role" mapstructure:"meta_role"` + ServerIP string `json:"server_ip" mapstructure:"server_ip"` + ServerPorts []int `json:"server_ports" mapstructure:"server_ports"` + Shard string `json:"shard" mapstructure:"shard"` +} + +// ConfRedisFullBackup 全备配置 +type ConfRedisFullBackup struct { + ToBackupSystem string `json:"to_backup_system" mapstructure:"to_backup_system"` + Cron string `json:"cron" mapstructure:"cron"` + OldFileLeftDay int `json:"old_file_left_day" mapstructure:"old_file_left_day"` + TarSplit bool `json:"tar_split" mapstructure:"tar_split"` + TarSplitPartSize string `json:"tar_split_part_size" mapstructure:"tar_split_part_size"` +} + +// ConfRedisBinlogBackup binlog备份配置 +type ConfRedisBinlogBackup struct { + ToBackupSystem string `json:"to_backup_system" mapstructure:"to_backup_system"` + Cron string `json:"cron" mapstructure:"cron"` + OldFileLeftDay int `json:"old_file_left_day" mapstructure:"old_file_left_day"` +} + +// ConfRedisHeartbeat 心跳配置 +type ConfRedisHeartbeat struct { + Cron string `json:"cron" mapstructure:"cron"` +} + +// ConfRedisKeyLifeCycle Key统计 大key/热key,key模式 +type ConfRedisKeyLifeCycle struct { + StatDir string `json:"stat_dir" mapstructure:"stat_dir"` + Cron string `json:"cron" mapstructure:"cron"` + + HotKeyConf ConfKeyStat `json:"hotkey_conf" mapstructure:"hotkey_conf"` + BigKeyConf ConfBigKeyStat `json:"bigkey_conf" mapstructure:"bigkey_conf"` +} + +// ConfRedisMonitor redis本地监控配置 +type ConfRedisMonitor struct { + BkMonitorEventDataID int64 `json:"bkmonitor_event_data_id" mapstructure:"bkmonitor_event_data_id"` + BkMonitorEventToken string `json:"bkmonitor_event_token" mapstructure:"bkmonitor_event_token"` + BkMonitorMetricDataID int64 `json:"bkmonitor_metric_data_id" mapstructure:"bkmonitor_metric_data_id"` + BkMonitorMetircToken string `json:"bkmonitor_metirc_token" mapstructure:"bkmonitor_metirc_token"` + Cron string `json:"cron" mapstructure:"cron"` +} + +// Configuration 配置 +type Configuration struct { + ReportSaveDir string `json:"report_save_dir" mapstructure:"report_save_dir"` + ReportLeftDay int `json:"report_left_day" mapstructure:"report_left_day"` + HttpAddress string `json:"http_address" mapstructure:"http_address"` + GsePath string `json:"gsepath" mapstructure:"gsepath"` + RedisFullBackup ConfRedisFullBackup `json:"redis_fullbackup" mapstructure:"redis_fullbackup"` + RedisBinlogBackup ConfRedisBinlogBackup `json:"redis_binlogbackup" mapstructure:"redis_binlogbackup"` + RedisHeartbeat ConfRedisHeartbeat `json:"redis_heartbeat" mapstructure:"redis_heartbeat"` + KeyLifeCycle ConfRedisKeyLifeCycle `json:"redis_keylife" mapstructure:"redis_keylife"` + RedisMonitor ConfRedisMonitor `json:"redis_monitor" mapstructure:"redis_monitor"` + Servers []ConfServerItem `json:"servers" mapstructure:"servers"` + InstConfig InstConfigList `json:"inst_config,omitempty" mapstructure:"inst_config"` +} + +// String string +func (c *Configuration) String() string { + tmp, _ := json.Marshal(c) + return string(tmp) +} + +// GlobalConf 全局配置 +// 如果配置文件被修改,会重新加载配置文件更新全局配置 +var GlobalConf *Configuration + +func loadConfigFile() { + conf := Configuration{} + err := viper.Unmarshal(&conf) + if err != nil { + log.Panicf("viper.Unmarshal fail,err:%v,configFile:%s", err, viper.ConfigFileUsed()) + return + } + if conf.RedisFullBackup.OldFileLeftDay == 0 { + conf.RedisFullBackup.OldFileLeftDay = 3 // 默认全备保留天数 + } + if conf.RedisBinlogBackup.OldFileLeftDay == 0 { + conf.RedisBinlogBackup.OldFileLeftDay = 3 // 默认binlog保留天数 + } + if conf.ReportLeftDay == 0 { + conf.ReportLeftDay = 15 + } + if conf.GsePath == "" { + conf.GsePath = "/usr/local/gse_bkte" + } + fmt.Println(conf.String()) + GlobalConf = &conf +} + +// InitConfig reads in config file and ENV variables if set. +func InitConfig(cfgFile string) { + if cfgFile != "" { + _, err := os.Stat(cfgFile) + if err != nil { + log.Panicf("os.Stat %s fail,err:%v", cfgFile, err) + } + + // Use config file from the flag. + viper.SetConfigFile(cfgFile) + viper.SetConfigType("yaml") + // If a config file is found, read it in. + err = viper.ReadInConfig() + if err == nil { + fmt.Println("Using config file:", viper.ConfigFileUsed()) + } else { + log.Panicf("viper.ReadInConfig fail,err:%v,configFile:%s", err, viper.ConfigFileUsed()) + } + // viper.WatchConfig() + // viper.OnConfigChange(func(e fsnotify.Event) { + // fmt.Printf("Config file changed:%s event:%s\n", e.Name, e.String()) + // loadConfigFile() + // }) + loadConfigFile() + } else { + log.Panicf("--config not pass?") + } + + // read in environment variables that match + viper.AutomaticEnv() +} diff --git a/dbm-services/redis/db-tools/dbmon/config/instconfig.go b/dbm-services/redis/db-tools/dbmon/config/instconfig.go new file mode 100644 index 0000000000..9e337923e7 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/config/instconfig.go @@ -0,0 +1,40 @@ +package config + +// InstConfigIdTypeAddr TODO +const InstConfigIdTypeAddr = "addr" + +// InstConfigIdTypeCluster TODO +const InstConfigIdTypeCluster = "cluster" + +// InstConfig 实例配置 +// MongoDB同一机器上会有不同的集群的节点,需要有不同的配置 +type InstConfig struct { + Id string // ip:port or hostname:port + IdType string // addr or cluster + Segment string + Prop string + Value string + Mtime string +} + +// InstConfigList TODO +type InstConfigList []InstConfig + +// Len 用于排序 +func (list *InstConfigList) Len() int { + return len(*list) +} + +// Get 获取配置,不存在返回nil +// addr 的配置优先级高于 cluster 的配置 +func (list *InstConfigList) Get(cluster, addr, segment, key string) *InstConfig { + var clusterConfig *InstConfig + for _, c := range *list { + if c.Id == addr && c.Segment == segment && c.Prop == key { + return &c + } else if c.Id == cluster && c.Segment == segment && c.Prop == key { + clusterConfig = &c + } + } + return clusterConfig +} diff --git a/dbm-services/redis/db-tools/dbmon/config/keystat.go b/dbm-services/redis/db-tools/dbmon/config/keystat.go new file mode 100644 index 0000000000..a9757dabc7 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/config/keystat.go @@ -0,0 +1,28 @@ +package config + +// ConfKeyStat TODO +type ConfKeyStat struct { + TopCnt int `json:"top_count" mapstructure:"top_count"` + Duration int `json:"duration_seconds" mapstructure:"duration_seconds"` +} + +// ConfBigKeyStat TODO +type ConfBigKeyStat struct { + TopCnt int `json:"top_count" mapstructure:"top_count"` + Duration int `json:"duration_seconds" mapstructure:"duration_seconds"` + // 是否在Master 上运行, 默认在slave 上运行 + RunOnMaster bool `json:"on_master" mapstructure:"on_master"` + // 是否使用RDB 来分析 + UseRdb bool `json:"use_rdb" mapstructure:"use_rdb"` + // 磁盘最大使用率,大于这个值将不执行分析 + DiskMaxUsage int `json:"disk_max_usage" mapstructure:"disk_max_usage"` + // 业务可以执行key模式, 如果有,会优先按照这里匹配 + KeyModSpec string `json:"keymod_spec" mapstructure:"keymod_spec"` + // 可以模式算法 + KeyModeEngine string `json:"keymod_engine" mapstructure:"keymod_engine"` +} + +// key模式分析,3个需求: +// 1,支持第3方的新增的Key模式算法 +// 2,内存版,支持估算valueSize (取部分member的value Size) +// 3,支持从rdb中分析 (好象和备份有冲突?) diff --git a/dbm-services/redis/db-tools/dbmon/dbmon-config.yaml b/dbm-services/redis/db-tools/dbmon/dbmon-config.yaml new file mode 100644 index 0000000000..06b612ca05 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/dbmon-config.yaml @@ -0,0 +1,39 @@ +report_save_dir: /home/mysql/dbareport/ +report_left_day: 15 # 上报的.log文件保存15天 +http_address: '127.0.0.1:6677' +gsepath: '/usr/local/gse_bkte' +redis_fullbackup: + cron: '42 * * * *' #从分开始 + to_backup_system: 'no' #是否上传备份系统 + old_file_left_day: '2' # 旧文件本地保存天数 + tar_split: 'true' + tar_split_part_size: '8G' +redis_binlogbackup: + to_backup_system: 'no' #是否上传备份系统 + old_file_left_day: '2' # 旧文件本地保存天数 + cron: '42 * * * *' #从分开始 +redis_heartbeat: + cron: '@every 1h' # refer https://pkg.go.dev/github.com/robfig/cron +redis_monitor: + bkmonitor_event_data_id: 542898 + bkmonitor_event_token: 'xxxx' + bkmonitor_metric_data_id: 11111 + bkmonitor_metirc_token: 'xxxx' + cron: '@every 1m' +servers: + - bk_biz_id: '200500194' + bk_cloud_id: '246' + app: testapp + app_id: '200500194' + app_name: 测试app + cluster_domain: tendisx.aaaa.testapp.db + cluster_name: aaaa + cluster_type: PredixyTendisplusCluster + meta_role: redis_master # predixy/twemproxy/redis_master/redis_slave + shard: "" + server_ip: 127.0.0.1 + server_ports: + - 42000 + - 42001 + - 42002 + - 42003 \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbmon/embedfiles/embedfiles.go b/dbm-services/redis/db-tools/dbmon/embedfiles/embedfiles.go new file mode 100644 index 0000000000..160c159c83 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/embedfiles/embedfiles.go @@ -0,0 +1,9 @@ +// Package embedfiles TODO +package embedfiles + +import _ "embed" // embed TODO + +// MongoLoginJs TODO +// +//go:embed js/login.js +var MongoLoginJs string diff --git a/dbm-services/redis/db-tools/dbmon/embedfiles/js/login.js b/dbm-services/redis/db-tools/dbmon/embedfiles/js/login.js new file mode 100644 index 0000000000..04b246934b --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/embedfiles/js/login.js @@ -0,0 +1,39 @@ + +const mi = db.isMaster(); +db = db.getSisterDB('admin'); +let mongo_type = ''; +if (typeof mi['setName'] != 'undefined' ) { + if (mi.arbiterOnly) { + mongo_type = 'arbiter'; + } + else if ( mi.secondary) { + db.auth(user,pwd); + mongo_type = 'secondary'; + } + else if ( mi.ismaster) { + db.auth(user,pwd); + mongo_type = 'primary'; + } else { + mongo_type = 'unknown'; + } +} else if (mi.ismaster) { + if (typeof mi['msg'] != 'undefined') { + mongo_type = 'mongos'; + } else { + mongo_type = 'configsvr'; + } +} else { + mongo_type = 'unknown2'; +} + +db.auth(user,pwd); +print ('mongo_type:' + mongo_type); + +if (mongo_type == 'primary') { + print ("me ", mi.me , "connect ok update test.dbmon_heartbeat"); + var testdb= db.getSisterDB('test'); + testdb.dbmon_heartbeat.update ({_id:'hb'}, {"$set":{mi:mi}}, true,true); + //printjson(testdb.dbmon_heartbeat.findOne()); +} else { + print ("me ", mongo_type , "connect ok"); +} diff --git a/dbm-services/redis/db-tools/dbmon/go.mod b/dbm-services/redis/db-tools/dbmon/go.mod new file mode 100644 index 0000000000..d0e125cffc --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/go.mod @@ -0,0 +1,87 @@ +module dbm-services/redis/db-tools/dbmon + +go 1.18 + +require ( + github.com/Shopify/sarama v1.37.2 + github.com/dustin/go-humanize v1.0.1 + github.com/gin-gonic/gin v1.9.0 + github.com/go-redis/redis/v8 v8.11.5 + github.com/gofrs/flock v0.8.1 + github.com/pkg/errors v0.9.1 + github.com/robfig/cron/v3 v3.0.1 + github.com/smartystreets/goconvey v1.7.2 + github.com/spf13/cobra v1.7.0 + github.com/spf13/viper v1.15.0 + github.com/xdg-go/scram v1.1.1 + go.mongodb.org/mongo-driver v1.11.4 + go.uber.org/zap v1.24.0 + golang.org/x/sys v0.7.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 +) + +require ( + github.com/bytedance/sonic v1.8.8 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/eapache/go-resiliency v1.3.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/queue v1.1.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.12.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jcmturner/aescts/v2 v2.0.0 // indirect + github.com/jcmturner/dnsutils/v2 v2.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect + github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect + github.com/jcmturner/rpc/v2 v2.0.3 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/jtolds/gls v4.20.0+incompatible // indirect + github.com/klauspost/compress v1.15.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/leodido/go-urn v1.2.3 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/pierrec/lz4/v4 v4.1.17 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/smartystreets/assertions v1.2.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + github.com/xdg-go/pbkdf2 v1.0.0 // indirect + github.com/xdg-go/stringprep v1.0.3 // indirect + github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.12 // indirect + go.uber.org/multierr v1.8.0 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/crypto v0.8.0 // indirect + golang.org/x/net v0.9.0 // indirect + golang.org/x/sync v0.1.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/dbm-services/redis/db-tools/dbmon/go.sum b/dbm-services/redis/db-tools/dbmon/go.sum new file mode 100644 index 0000000000..f6b9650179 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/go.sum @@ -0,0 +1,655 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Shopify/sarama v1.37.2 h1:LoBbU0yJPte0cE5TZCGdlzZRmMgMtZU/XgnUKZg9Cv4= +github.com/Shopify/sarama v1.37.2/go.mod h1:Nxye/E+YPru//Bpaorfhc3JsSGYwCaDDj+R4bK52U5o= +github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.8.8 h1:Kj4AYbZSeENfyXicsYppYKO0K2YWab+i2UTSY7Ukz9Q= +github.com/bytedance/sonic v1.8.8/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= +github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.9.0 h1:OjyFBKICoexlu99ctXNR2gg+c5pKrKMuyjgARg9qeY8= +github.com/gin-gonic/gin v1.9.0/go.mod h1:W1Me9+hsUSyj3CePGrd1/QrKJMSJ1Tu/0hFEH89961k= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI= +github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= +github.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE/Tq8= +github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0= +github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/leodido/go-urn v1.2.3 h1:6BE2vPT0lqoz3fmOesHZiaiFh7889ssCo2GMvLCfiuA= +github.com/leodido/go-urn v1.2.3/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808 h1:pmpDGKLw4n82EtrNiLqB+xSz/JQwFOaZuMALYUHwX5s= +github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc= +github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= +github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= +github.com/xdg-go/scram v1.1.1 h1:VOMT+81stJgXW3CpHyqHN3AXDYIMsx56mEFrB37Mb/E= +github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= +github.com/xdg-go/stringprep v1.0.3 h1:kdwGpVNwPFtjs98xCGkHjQtGKh86rDcRZN17QEMCOIs= +github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= +github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.mongodb.org/mongo-driver v1.11.4 h1:4ayjakA013OdpGyL2K3ZqylTac/rMjrJOMZ1EHizXas= +go.mongodb.org/mongo-driver v1.11.4/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/dbm-services/redis/db-tools/dbmon/imgs/bk-dbmon-structurer.png b/dbm-services/redis/db-tools/dbmon/imgs/bk-dbmon-structurer.png new file mode 100644 index 0000000000000000000000000000000000000000..5cc40c05cb037e4da054122a597ef069a490bac4 GIT binary patch literal 81910 zcmeFZWmuJ4*9N*+q#%NdAR?hiiGb400!11WPy{4IQc}8M!L~(0q)RDLx*JpwB%~Vw zmF{j>XDk(Dzu)&==ij-`kN3ysGPch%=a^%TagTf4<9TvTS%#GO6fpvUAeEE7qKZHe zAQ1>;91#KhiKeXadj#UJgxTfG*W@l=X1Hc+ZER*?gh0qXd8I*kOYI9~jCN1}%Jj9u zp|V3~RgtoXUOyO!eEZKi6ze4dpQp4s>NYb5I5frQ-{>c5UaR{0rhCgkQd*OW#BoH4 zc)U2n-MZX*c6%+B2P0%R+fe3&xF+)T=##~34v57E4}Ns(d%4=mpO0R~3qaE~^Bq)tmGs2 zlXhY!)(4p+RJeyf5#M}eabGso{_90+0Xvola#_Sx9v`3AvOeZJv&%5_W%|rjww|B> zydtrCwH*2%qHt&*M>|J(Uwlk@=E$*8>4)s=&lM%fX$%Ubnp1qF97U^K$+?E9@IJFy z@IPazKJWgTG~9qv?L+LyVvDj<6*=*T|MskQuJyZUtg&Bgdww}0~szIGq~}X zmc;hwM+*MC&vG;aFY_D7rSf``Jifc}QQxab{AMuQk*7DhLr16%H&XRQap=pOpWppB zy-x1pnd!(WLn#qO;`oHikooY$JW0@}N24Eq65`d3y%76Ja{LE{F#VMWyIG4_g3`fM zX_+;R>naG2`HRq+CsboLQt=}TL-i;@eKZ0^cN9D5V&Xj&e8_cs@aldv^^s8QHJ)v{}M!2Ca z;|POVr}jCTK#~|#6xl}8}ue)+NF743e++J~}v8|M|@4r-Gjy30%JRw~X?<7Y+B-Db2uZly#K z{MhR$H`4y{4udo&w`T*s6+WNNeXo9+cbAQB6AB=X6p}1|s`7YkqEVYoabF=v3M02= z&9na9jCHzJ*)TX&VU@9OTU50}xx?X>bi@?>5~DxgUBSWB?=i7EIi9uRWcha_v<*nI zaJ#9e@f?qc`0ycxJD>91q3yoTMyIReZ}i^0Ry*}V%~>yIj4v#>WBqSeyWGTNwI z^v-y_Tc#zw_3}zvl~3xll<;xSqwDth0aJoOv(v-VhrO4#&U$&2Jqh#>Adh}A*Iz1u zn75)meOh!on#2-cKUSoFw{cL2hG5&C71t&|oc$=(7opp(XDSQ+In{5rS$Q%Hmqw7@ zWQd^Do1O^oH3M;#FU3WitFfeAMJE+;cq(sSBd+_vop~yiFtYMy=J9EqElEZ}q_TJ0 z5V8<29W7$Bvg}P2M_-B?dlc`?8TEf2jx#bpW-{;>WIS9&c<9Q^ZaHN?-R>+xx4*a|Qn#hV06H9PWB_%S1hHvk{&}XxI?Nc-t*}S-;?Zshn=`iNMgz`iX6v0e>&XYlj}< zx%sf5m1LO>k0z1JA3E{R%s-d^QM2J%As;yUu(SLTBTgfZ#225X&MTuD5o*{JH5!Y`S2D@|&~yH`3-+#$R8 zm9Ro|ytqP{YMJKF!%c4jrMtOTgRho^~)lkZhe|~s7)$znDF^?KOesLdif7@{(Xdo*Gd>sPt|Xobm*u==N7ss!*Jh7Xs_dVcB~O^}nCIZ- z9LsUdaT}$ud+3t1aej$^$&=*xGookIlzEf|9A82X*IU%{g_v9z)YjBy(&Wgzm+NYr zX406?nV+6lZhkp$AurrQ&C;$T#I!!MG4Gqc+m|DGc6s&r?{iB`{d-G#NTvn4o&3%{ zUp~opT2pNCq3=`Nw%4_@v1T$^bW@D0xTNIqRE!ZPFKxe z9HG_WE;d>=HxV&Ow@X~=xYT}0@~*MJ*{P+v>VXGy*)1bU?2UXM9nj{HQyKzJ0$JM8 z+8HGe+=|^^ZIM2tWQ@oPyWi}tFG{iK1S zy`oXW%kGve9)qT;mLJTYXP&xI&Cyrz?kf8z`J8*rah>B5M;7yfhbpS|X~OLA^)Eax zxe`t$5UEC}%B4I>=~E;8jQ>JgES;s1FO7F*(|Kw^ttK{7|Kb_zk1bE`9Tz%ImT`TJ$-^z5u^R2_sS&p&jrmw(SCjNw_mGk3Jnr`beF7zC zOH?Y4rBapF9s1G}yd*Ag-brRwUsqMvqg>U~)idXsS{S=Utf8pcWCCYk*<1B?u^zj{ z$uB!UE-tJh6ZG9B)m~F>2hIg_%X$WivM#c{yRdnY>3$tL<&@e3HIbwqC2igrv-{T; ztlJhQ`X?-T6iphU7xbC!Mng1fQ=e;E<}{i##eU;qasK+XD7dJ|&Wr2*dClvKj=HU- zAsbhh#u~hq73R6}65l2MXr&=3rL@p`%tdiw`Zlj|OR@A|owLzceRLgrUB!h_PI1iC zxL((;aN#*SxiQs(M4Hf}p@%|s^kz%k$B6w-RI}5iJ-^kH`Lz60Cw*g1V4nZ>z<5EH)ytNoMWtAN68^#d zW2K3&d*2lsu-`Xt>163URuH}6x1inV=sPpgS20UIJ3c^Nl2i7%EXg?ZdwOD?X<9vLO+-K-CtKRVGr-~9Ao$yOG<7uoGN zX1Ms}{miAA=;V*e(PFgro5R$j;hi}HCjL=NqFwGW_sbW2`W`$y()HqUMaiz`+qSfv z?{BuMlN9JewTeGG(w0oSZB)JTW>3z@&d@ESbV-|X{kRe-&}wBrIQ-4sk$tZ7eV=rJ z#9+8xs7=9i%x2lst%qyxF_}B@<$Y;e6YpPvAh@fI?G^*qqB5*M*{tWfeb(_qx5MqU)i9dbB%VcEmEOG6?jzJmWFOHQJ#C zmbfjrYl9;dTifN+zPpd2>8a_P+~qfymzRRM61dDOdOa;SC|74@^>>Hj9y5;Cdg5Qa z)zYAd(49kgM<5ZTF-@#LhsPu|C5KIJQX?^k>YE;C{~$ooZhQGp_<9KlAi7IP=_=yN zwN4>g)Db_#5W>a{HnhU;zkGewD)rBpcVmkl&mvD{1=}Le7}zO?u;}2?NF)G%I-R5?NU?MY2nl1i2*&Aj8YY!aG$u(q$BsOU)3z2PD!Ka zw)}OgbH`NAsTvkuA$mp5Eg<-iJM{b9&W1SvTEVHd6=g#4gqYya%`;W(R=nI#6*o7Y;kVFjL|ND=t3^Yf6|7Gf}EUC^kZ5Hi( zi@<0Rr#dt);c#srqwQz|_4n#ap7eho5y(JJMa5ENHx_bM`l+nDd#U={Ym)CYGB{Tz zJ6Uqp5j}Yx|4rjJ|(-V7s|I5!WgG{qKW` z@YEPoVnkc>Ewv~5i*lC7TNuM`T<8D&sOxMf28Q!oG=5iMzuU(J?Z*S2(@W?wD@B}> zlatF2IK_8Q*1+IR`@0(o=QYwV7}W&_zL9?RbeHgzxQn#ZUmG#>BR7-YiC4Z^%i3C< zc{$TpQ2*JF#t7D)?XqOtCtBt|Ggwh^_!O_K{HyauOFh5MTiJ-jL-xC`Oo$+f3a<`L z_vCOJR1vu>R8!5UOOVp=2hV;hsZT2yIDhESq4cb{_`jRVV##!x)7_Z=L}oPF5T0$^ zc&bg(BW@(ChVEr@tf!XkNZkpy=p>on|G-en8--%d6r_M7eS8%s8fiM-{5snUI zu2T|`93R1JcFgbF>&Cy&CvrY1UMO?&~&Z5JQu(c+oO@7P`$Chl5jw(J8i1nOG`Y z8(`0<*ey2SRFV1hFmhQ=IEEsEK3D+iIni`)o`UPjq>3Lbkm&4kUlwK~5)SHwdUR?S0LIwgnBD|_f=>!t&18vb6<(u-w_7et4 z$X03ZLH8x46rB>s#mn8;L;v&G${R=;ev1Iloy~CR2M+JzgzQFTzZE;^7@(7Iv7a7gJ|%#ZGO3m zO&O}d>f05nW1RxHAspJDc_q^^(g^JJE4=KD@)k+wDTA$35uIaZj^M*tR`T9h8Y_vb zu|K-MK?Dv4PuPB3f8cO4AMVWz-5I?zQNQmnisF#rlQ;RT>c4D3hQuW}d{Gjib6g)h zi|)l)ngfr(Q%B8(uMD^}M)18^YyA@@z!e0L20O%U65w7g*vlx}6eFf2(Uqas^s(}i zSM>zLeyBh(;MwDR7j~Eodo$p%rZdqJFT?OO@$m0cII9W|I=h(N%!;+oU8G4<%&XUz zA4T}qyr5MeXSi!|`asa}E^_L0GgWd`Nn7jOTgVNpPwHxJ!vB^|e)5vRKboftu@31x zudlHju6*`yxFTo8DSOoEHnFp{_H01=1>M zCe18^2fSvj=SH^_2@jkKQEp}3EA~XQwxnYay9ke*4p2>$$1f(XyZqY?r;(*c$yXIn zxY*}o@IL#~Tl*lu^_Yl;=+Bs(`MVtb*~FC?E&9wBH;ZGxZI#7ZNrRK+d}HD`9I;f~ ztt*Bvx{F*}lUfi{NYE;;SrwLnPN2XwZ>>-j1qAzG>Ki+-6 z^`rV3_%2nU58I&w#}i62?vX!WJ%#*&rX54=$3W1Kp+DiD+iT?|2 zz;?uOkT~CSl8q0X)d4eNAL)b5FymF~SS$YT4??X&()gz-CgOmn{KsO5ioyJG$JKED zuV)~T-;XnplZ(6EJ8;Ji>;=0?hW{Gne;(6+jq<kQ?N4Jzkh#X zcV~O4N%FnGY5cXhK?xl_2YOLwn+RrFQRiy_^+RWc04%?H$)b`Z^ym+I{1fmQ0wCx7 z$4i#Y98@M6!g*pGb?)yzInE*)@_k~$#h>NM`Vs4$ji3G!t-0c5rNe|eg*In4H1hwI z0MOpp^drcIe*iuHM-m|yf>)j$(`r8UKLgud@JC|v&rkxz<9}J?U?xHh6Oj`=jxv{2 z_>a_e;I~ADkbt~jsxtiF2`G>eUSyk&&+$72@4f%PLr@Gu$R4Eb*#kSe@}LqbbHgt| zOxUupr@%VaurcC>e8>eh0Kl#9Zm_dK0k6KyNbV=nB=ky zsuKXbw5DqEn6CISjDBvZgp6neAhHW1HKD^w#Y_o(_{cXUxbLV6e$O{C@vZpA5js&;C<!$ETu^P*24RDzkuUBoGP@R2#OMRIREXZ+7`CtyqSL6EWIAQx>4u^`^{kD z6X#{Yxf68DTE#7D2g4%8cCP8UuV0v?9Dm2Nj#A$9-Yj@#i4Og_d$7btYe=$ck zuib53|FY8wMZ7>zC{S_UmQ_=keD1Zo6iZhR&^cIW+Y7Tbm`KT?F&TE{m@G6_@U^i@Pydn7Bu==t{equ1Xur5Eog1o7m^U z=*{4w@|F2s^NfuPKNj`#U3m1tisJr_(>R+1KEf62Kc3nCs5+F9Jk_@8v9Rl_@v*;yEa#d8~ym=oGiN-C@?70*WOWIH`~BUCE1)0@j?NxH#f{@2v&Kwej^l zHxj#qgXT^{-Kd_TD$Plj%F^_;d39vyQ81^DmiX2*Pi&3bl91DERH|OZKxxnXAZEAA zM00bi@O0tDuIe`Jlj0=b5eK})hZU~k<3b9*y*qy;sB(m*cO*pHYC*_3vbgc?=jVa- zDza1WWmB9sc610|%@t2^W{gb*v&XtGKA~W}RylQlce?$!jI* zWM!4H@mzjaOYWED>pw_}CXynD3kE&w8_ZU_JKk&5j|k9AuRXWuzh6UZtVOre?yYL9 z`spF5G%nFV&~7>J_;LeJle0*}n0T48aC9KSnQf)eL4z;LbC;)D2Rm|_#HXzDqJY=g z57W(njPd+JeD(q(y7u_|N>x!Z<&@fz-Dtx?u87)3Ux9V#>P(-TShDlN4`VCmox}F6 zF4H0olfzC~yQf^=U5hPTW0|0#Q4!xrkVuX|C>X13uXO1(mTrfWA_Z3zBY3~`D6TDy zgz8a}3ge%ju(ntlZS*{mY;Lmd7QNDVgfutcKn#0)4iJB!J)bGb-YUsIAYqHgb-Y*Q zgN`syNmUIHXmT~Zv`%t7)M-$S*=X`A?et>WAz3NsRT;VO2jb-X2hWKoM~g8v2; zjpuNnOzSr%rtmViHA-tWgpOR#z}F6a#DS^^Eqe^Gy_|+`{(jg#L-|&>tax1SYS35T z^rgD3NPF`2PHpQGdMcG%TDh|66}zrp?-yJLMyk;~qASn8T6(#BFw7XizniREc(g?82uC-(mcTB4Gi~_^WStGe`i=W8XH3<+dR9XD`*~^>r)jPc>t^zl@Mvt(cCuqQRilc?!bGbz(`(DTSo)V{w z^?Oqgfm$ijd`HVEFYNXW?lJ>q=QqC6CP28myH@cmU#F>Twb#5YI9&8HF5zzZmp3~t z=Gk{X5rm7nWT$vRK42yfEPc`5cWPSLZd7s7qWo9w{QJyl!Mz-3Ztn+Nq zSSjVD`#-L@{CL8WaP76!s$gCVq86A*3*9(@7_VIyp4!{*)Y-J{LoR4GXb}t7&YGwF z6V7AOyTo1m8n>@l-|qJ9D6zyVPmaB@He8o4jh1%aSQ;{PaacD8~YW?_tglU8CeqA%_pYk(9 z91{}ke>jm}x8|($5T?S;KbW%R$lk`Gus(z(q$BvuN3*sgt;;oM%eSojw;W`Q2?ZUL zcfEF|^mOanH-5UUOpdIaq`k5WcawW%ZELQgk9*~bgLMj)TGOvY_&+k8KgXyx z(-Dt19zZVBtv-eHT?>@4=xi5liQ~*-u|!!kWI+`!?ewD2Qww}MjUH>?LT8Ke%)68Y z2ET|xRLwDy82cJ+UeMOpjPfo>Xv5vm;?<R38nX3 zKjRS6H77ixG*IdyMyK{#3g3Tgc?PeHU?LGxi<^$dnU!4CO>Ox(4xHraNA#Yzo>H*= za9>&PiI*YEuvT*4-58^5TFJfaYcLQ*U|O08@Y=PnQGEYQaPXwgL()j-SAVz{fB#WI z5!wJ^n#{rBGw-CZewMi@LSL5rRK#g^+AOqj&FRJ3@Yn`1Uas}vKlT}#pE zF^<7Lnp0n0ge%iYI3N?US{bVXzAK~CjYHx1Z+VEV%*_-i$|N$RA*YaE=#5M#Va(6p%vTJcip-VKzUTcA+UNbyOi6$)fo=;+k7Hr8}(gN z_Y;p!|A0{W(lwNpyd*j@^>Y2Svukf(4a;>A zovnIu+_**0pD>~KwY7k=!bD@u+-dq+92=eB9_`}I$@gV5%2_I1tn(rEcYZ!+)-&fe z75dU8Q)^mu`e2?a#4!i&36;Aob{YReIaH0yA8b zwV%#`E6m0EUkJPO@1_Wt15HlsOvAgvm-bwu@wBLOrt|x@SZ=jgJBZuunyYTE&$@e~ zQ}|))A?I0m^*qGH^fL87H1qzWwP(s-6wxGX|pEPuIRg78oGAUfG>zn}`b$u#Jdxpfp<7N#A zXY+8J8<@#zhXTy4f6P(_)=;Hggat9>9$R&gZeo*8(pGgmxXT5d4&&H@=YT!CzYUD8 zuJKz2a13Gix$P__?b^1|al6YYlRB1Gr+Z!wOfb&@wrgzOB%>oCYI^};%q>lG@W*^U zztrN(wVA%qZSx4rfiT(2QUv=8^nM2RDI7OwyYc7Z*c4PkL+QMBfz=6G-qjGt*?u*`12H>uUb_BYrj=(hG635yjg_qqY7cVv#ayGyc*g6dpy$5uz<#{> zL*E(W*zy}$?i;rLHD+SPMEnOJ15N?)MZN&-4AGrEK2U>rdrShe&||GvW5`cFU(4uU z%hM!!m%sqrZ7R=`c0HQUJf!RH_O}W1%$>Y0-6hTCsf>!*rLYFNi}%`DW37eVAy@ve zoTTySJ9|oXvf5{5=9sk_f7xOkJ9G$!bIf^+8|F`y-fK(pYvL9>Q2wEBQ38Ca*9a}& zZ#<;nF)K%z#<;7zvaFbe3J3}flZxbmK=T`#cG$=G5v zGMVH<=IWHktka9R=5~!v^_^~#z?62~)yg&f?jW&$f5818J3xxg@_aBszjxpP@^SZn zem35WR_|d(1%!o#35!&(#fZ7d8xIo6UauVC;woEKolK<|bkKSe$S9LkOHvgIofx^7 z%u1z{H!^g^G%GwkBJT2A4_ttJ6VhU^I|b+D(y}a zt<^twWd|=oYRgb+7htAy9>S5c&N3u@!)w}HIn{f7vaQ_1EyE%6_+NSoeTz!}O~b8< zkIQ6xe<;fo=i39M1~2YgY%3RuPh4B8C!mv2t8heLNqh2EhNOxe99_b;TF3`o&tQcYm*2*|IZxhl3*m$~BQl5H09!mO^= zyDolVDI-AUMpeL7$+r3a@i}Gq>!0?`8|fvpK~4iMHB$@1w<$#?2a>tZD; zmM7X+;vYTQDQq{#6C`zA>CkADYW=#lEbb6!W=*1$?DT^3SgIt;e$5E6$cjq;9zy{J zIWDocmHa4cK4Y&8hwSKm=`A#7@`H*3&(DaTRT#Q$b=v-@K)jl~YyFSAg@oO@Bm3n7umv4J6}{P9Y;oR8`3ygiPW|^Em9BJZQ%iiuyx8ul*kuxCpdSww zak6U3E=$BEJo5PK6#aNY+k|2yKSx{jq)&OVzHMBP@=Sai{e+2e5CP7B=a%Ic0CL94 z&&KaM^u}e~t*L`L&P{qoy`D)SL>7We#@o}%dqD`^%@6`1w5qiuf$L9-8FIy;y}cG} zPaF=xJfdhWlGp6V#Kp*r8jJqI>$?m7UfrT9Gy??LQ14Al@V4MsMi&ffx9Wd*KtfeY z`=d)-hh4uf-_rPwz8?O#+7{rNkTaJbKIZz_Zsamm5ptu%(Q;#RDuahYpH(A0sv&|; zVZd!pELLs6Ig-1!Pu#MiKztOM=nhnB?rV7x)gMyI$7yg?BzAI(fK#-W%j;Ux5YzCL z0rF(gB~!rn`SOP@WYs|3T94q715Fx+O62`e&SXTW-2GqMlBBRnx-q0TD0wBx6z}%g zOweLzmN=$CV!rl*PVAnmPNu#jn@(}=__6>k#N# za){B?UFe%wpP{B4l2GJUtVZvl0Dy#sTHaeTiZAaYv6lfwL3EMt8pZ8%5&t)>=v@IG zxBcp;=$^wJNC|&J)f*J_;QsrRy}-TiPye;c|1ilfl=@%0{MRmfx!C)^@#R2z|IJGL zs@(toY8M7YB=pykMioLiAEe-`Kii`jAe&ea|CWtdtOn`+su2pWgBpe=L)hag0{xb< zHd>^>3q%O1Cx&5CxLl{KfI`3@5GS%M zLE~3awW5H3_xZhY>`@fMJA9CvTJ=^ysquD}&~IP62w{P{(j2^NJ#;4h9gvSZ67F}GD&DJdylrw925>B^%> zP!PV#HmaA0j_PoGirUc@=2O497*rgdGb6*bpkPpH5&d%wt4K`BCzIH|_)nhdiT(Qt zA|>{rer=Jp-mE?~*I%sC&{a4ctJ2A35Z%1djk6p3nnVVj-RGO-&3{i72k)$8Wl@3S zY#!rlrCgj}e}P6F0=yA-jt#3~BFaKRex) zw>4V98&CB=VadDYkE#)<#D zTm|~j>rB{td;NSI2EEvQ*FU@ySOQUbH_4m>jz*N>ZN~-rLOh(0ZX9oEu{L0E#x6iq zTONz0eK>7ar!`a5Q-Y=kEA&3Upk?&Roh@TSx@&)+#VsSWX`A|dHrOU3X(`Kj2J$GDFpuAXi@S!~D5C&+dCS{?rP)wUE z-%7{InX5SBgny$uDVEB1)a#riX6#B?k(=}v)&|@%*h=&&JZ&f2Q{vh$zy50>qN=Cf z13Xh@Im~6D%tYku8X=y)LYeciAwL9+bYkCnof*n)R`br(K@vNeQh1Y>|t-0qF+!ehZ@^(txR$oqt_rjxVkm6t*XU*;(#$9bS>k9pKRG4r{7e`4a? zzNe!I@RkQ|%>}OLYGOsInscw^es8u=5*xFNkw z%7}7_^&#KZY$F9t&|Obm7{9o`FK-e2?lzpFr8QIr)=>*cs&tB@ z`d^?h@H=35!YiYQ9V?dH|zC(m$|ufGvo(DUFZhKOW4l~y1xCFMgx zF@AP`p{Uefs!s9k`ODZ2Z>na_XL}w_PGvf`=}Rt~6Uj^(>9@0b9kB9Ttg}Ca3e@oN z6JKl)=qATX?_GrNwI;{`jb$iuBmOF>oT6ed(8F>gb*~aZU!a<6dL2r7|8XxAw%5Mr zghU12;>{rYH?yNJu^trfDfcLWlC?!~Eq)0jA~$z;Yi-VYz=z|;yWp3Ne>l!`V8Vtb z>s7|!3>uNbM$LbQ2Sz2{_FG5}{*3@{v{Cf>W_|f-_Ot!rY~F9SR9juKHx11I3^>uT z3)R@<(sC}-pqf|{#HP?Mkj8S4*pS00^P(1dJ=5w^E$#HZPxsR-xf@Ih+?(w$N|X&e zdyJl*>%@8WfXR;c3T3V<37{m->bm3lyTgG(05yR}{_@S=&Gxfpx~*A-LnCTq3v?B2 z4C-lGH_9=)Zam*2a&A7OYX=EN(a#qm!>fJPq3_d+&o{al{<_oqKhWg0#~w?u{*s{o zF`)o)LyVZVu;Vl<2x~%3Vm)s15?zBXy$kR=iO|e_XxRStdbY#V%?W6pd(^+fQgPqw z;Qx&jN>vDEy;Iks?^Et~a2f(0jAVlVk8V%6@H;SJZ5Bq=T+J4pjBz;6#{z>93a)Fj z98k}5t8ADD!~q?)2t6*LN42DIT8EF9*wI4p;~U)1wWpvfstp7{7t`C6rmh$NXsYr+$@%k?eNkN!mQKQ0WT z5=fjB>Gc=;H)`kWHZYmwCJUCgxK^o#`asw4FIyL01yh@I z@*N2EHh7)sI_9{DP2O_m=vf)6oeqg;0@nBmfHStANCj$vTb=$gcxw}yIUC~yTU3M6GLHow;veG@uK33Hj(_6h6_KcQ=ClJO9&|JhjxJw ze2GQk0=U4r2Yz1yKyGmjH7IYR3fIi?TAWdRh%$fd-t&KX)>*JMXK}aviM?QJ6JPo6 z#(N9I5Jb#}D8gRsb^fm7)doEZD@uaS3154d3%bzXSLWvA0$EM*rG22j4uxROL=C+c zul0%X4N$NYBtzS!6AwSP0c2Y}?)-nl8-SL4h+ojB_uQHh5Lu6PXD9_N z7&F4AOUFqks~*P6q*jz*hyaPs`D znRjcBxOsx}!%kra>p#5tG1i&0oRlVx?q#zfEi@RU2T@u5jLPrwlVeQNZJ3UCH%P+# zl>6dWRwmkHk`%*_W!gsj#JEnfxj zU}4vfZ4zC2u6eRh>H+Z^lI_v@P%eEV7OrtnBNc2z&q_e^^25V{Aj@DG1Tg{hz_Un+ zf$*;jNwgAn2q3`s>EP0)rr9$u_L#|!!RF80N%|{aM59RDAz?EkE_!U*Yukr&8%=+# zK!;@vQbt+ag)TDCyp5aC$`~Gvv{ud+n2!Vn$t!4F^iiCCUO@IWR>C^@$mWnQnFS$? zrrk?EGTEB=>_0aB1}pqu*5T25zITyf)6kc6oQ$2TzzhiYyH2HzPGCTe*H@}<0U)F^ zl_b++%aP}XzjDv3&uL$ z01+O&>7{Uh0^(CCbtcGuQdAS4QopU3EuK<6ef&9>RxIbm?cJ@pdQi_;oX{#LPl@w_ z5th6Si~ZyehmJDD_U==sB+d7)T^C71q(BoMBB)6t!*>8+2se)R`uchR%?z{ztqQ*r z2;Kkjk7C$GNrEG%PcHHco}d%5tvXx^5=+IWR9sdAL8hIl;W;AXvCt{Zmc)sE_eBn5 zLP(0EK!$=Rc?SQwf@!Wc)Zrc*TErA`+oI*%JSVdYf$3r&QS^ zMjRRn=9C1p17#&p7JX{FbEdHiXALIv9&=8w^z|=TB%&2>4<)2`X=B=ol@1%DZGc zEKALA5$00o0f}5SK{n8ak4dQvB1(Y3fb*wel?Yxrds1JU1QIIF?gBhs!S4x$g@VV) zZG4c*;{S@F(>SPzR@~&Hbfn#!$^riV_lX1oy%+EnTNsRS1ZK1mC64KsIeaZ9*&DqEnz*9|Z}UF`w)iB}cX-8ocCNzFOB& z@TubRgF-}@RHlLgrh?YwZwR9j1O*lCb{CE*3!hbgxA;yhkBKsi$a!&yD;AUlb zW~G0p3_}#yn;<|CZ!dzO7!oU?)n$f@NLG&K79pX$U`l`i2na)-38H7@%dyG?<2yru zqU*r{4;_`MpcU@B>ddM%0WD^O7s3uhhkEYuO7#cW2YTQCkM2% zG;QTWLTCoO(~EGvz@t!-*ec@dT3Yu_g)JQJu)li($M;@CkI z37#sTKiP>70(hC{sp$8<1+cdSbUhOKyE(oX8Ud?ZX3O5(OE}&lbER#g)gM1g>@JYy zT+AfkvzL#%r3iWDRj97(7kf*n_Kk@XOlOnJX@~Iem(y@q%uq78$l`PqltlY=@~;y= zP%Qt+putE87I`=bozfd>kc6h&B?8NbEb0afrz4dk<4JJMTQ`=WeH@f^JboS|fLFhO zNZnWq?Bib#v;Xm#RA!Oe6Ung=fEEMq=;&3-rD+%BOQHeK2>q+Pf_egplU_1xe{#>) zWZB}L)hR~{cWiqsH3~SI08Up##DO4!pR1D}d$S(WSOq}CJWTRF?P>oNUYA9~2_P3I zM!j)x4S!TUxzu4a#sQU`;D-w{`k|aU(-BBbIPDbiHtIc3f<3xJikxSv^4$9)hO78c z`WX^nOd7)OxV=?NS)TpYw|(R7wGX2;lUM!e#2P+U7WCUbg8}c0H%r2k^yuB^(bNEv0idi3O{zF33orHe&n{ zAC916Q$06>z7!B&;YR8>J_zu^(%-(9-9!uxLYKaOq3*|gSn%s8Jwdhyegj@q0@tU6 zKqV{P4-#LP6Wy81m%dS8(Q_6gh`eew+iMk=UwH4%{f;qEo|_YV^Cnn_)PAHhFvwi>nDLSL8O<*PVG}2jV1sDQc=F zLqXxw8;1o|mI9DiOni|5u z7YYHX*>K@7*w~1N=^Cg<`WxEg5^43k88fgGVs^nRE#o2c%;He1M|HB?G|W z-HlfxI>C9ndo*Vs(gXD3#X_&7_tM(?1QQujVtd}#W77+SzpHPtmlh8JkpJ#df9dOI zpC><7M%WJ(0En|1m2F!CUB*-8R|#f7BTU8*+10BL`^1>%Mh8Vf@PrnF9-B5$q$ z7V6(s)nj`&-$2g?ru%}bA?YFPU<7VH{JUJ4iS5y^{TL{nb2h971p!BCQlXONT%iCM z=`o3Q5Roh1q31SyT#60kgTOEjzkWm~za}VPIB9*=7v~g(>~7O7=uTLmDlM&*3%?h) z$?Vycff8HJ{kV~+@~7S)Z$kq25Dh|>Mm&VQr<0Oqwqcep28dMuiQTO}p)s>HxN5a9 z!!v7rDP7PS)Xs0zSk16fqmiPq7(3~TGKWQ|5&+|aS$ZyG`6nMFcwh;yWpDex&3z7Q z?u7S}+WQ{Ig`6v0dO^M2#bz0$4vG{S1naHLxR;G4=h0Y{Z+{2-364kyoOIBatIN#u z>{(s#-&`}k=nx73q;^d^GZ?IcbB^4P}dF=*(HOhrv9+~ zQgDe=2@FA065wOYO0wDZl$WH$;1-|$ZSad&ysH+Py??dnv7e8~^Toj66K1-lc-%BY zw@i`T`-4<{P>LVC^NIMAG&7)lTH#tLMmK$6!^d*wF{XN;4+8CanB**&I!W}gwb?J8 z1s92}q`$?&l^1ci_UH($#lLU?9=3$PsLr+@)924*x~|-#K{OXl@d3kI3-Lrn=jAq4 zGfSpJRwovFJv>5t9?Pj?9^6kl(JW~hJzxwxOpt_^Cl-2b6T!WZ~WRes5bJ4uL$#9})O(2*sBjQM&3#Y)A`4L0I z3ob@TtV2UxdFj&ipbE45Mv1ohjB!)y)->MD-ZGQ z^|h!7I&)!AgD2VJp~`6GVG_~h=F8lU)3@xlvEOq+6Vlbr7o84M)d$oLj15rTC2E6* zTLmsIXSi=H{+qJl2HKY)$Kdp@?->9QOJs-|0(1d4EHcQJ)|)Gbc*cr3E9Hs(Dxq;Uqs>{Y z>#f{$g*eKalXW(g<)YWFyLKi4PL#Jd0io>P`GFmsfgc%?d+5Hmx5`JAEV1qe1Q-YM zcDb(Gc7D{`a~|8}y&#d1D^fZo5Q9ekQqKr zmFT$cUZ#r1QsGcpO!*7SC6n0tAcg(N4`5vMFl;i&Jab=MzfRs=;Z*^t5wPv*IM6_m zh)Vbr(!Hf_qG%K_6GlHj`z->+REJfcHs4TGV#8ZLWlBB@L}M`A+=w_uDJiL^ch`>z zqT$0+r|yz^GU+|LLPgmb=dDY!W8!;YiUj(Mp16A!fG(o=<~ZYVL>Q0B3$bD#z8z;V zFp$&j&M(D2-+Y-23RZ_oJMhrgX;HncM36CR4ogktVP%cfZ_^GiHe3krNGj%7jPia z7sNlEyXdxR!84xP0bxw*6u-sEq>yWtw&>$5%3)L4&I<-?kjSW`tbn0}?;0I>Q%R-g ze#aLHzManpkgb6L|JLPC*_r!jq!2y{<;~8?v-g=2bOX`28m4$Xqa1%I;aI16)H50)0?i}m@}Pb!m4EY5c0Z{ch%or2YbNmd= z@Zlx${KxP1^FE#w_A_6>z3qG74?4;cr@}tfhQSuvmTk!m(9yKqg;5=YrJl#9(U6qk zkV#IQH>8v^lI*Q5;-APfT%Z5G2+Ge09qg>G=*IBN3EHc=kMZn_8BD}*qOaCsXH4=W z(f`>as`7?xe6kQA*q<7Pn`IuZYA^8SuGC7}-U)96frH`ye>YPX27h_gtdA z;t<841FxRQ4%qunQOkvS0Fu2QFfh*DoerPI0;^Ly=U%v;lLepTg0h(3d7?EDJKX%% zOoUX4Ez|0oFi%)1G?C4kSMf_9_NEkg-x?I#&Xr-9q)I*35X zT_Ap)or`Eb`zLPBtDy*~pgp1zC3r`^Wx~^*_?QPSRwv{c(qQ(TQqXg|aQy67thnb- z+wsIK^L5M#;_?1v;m4`gGvoF=Se-+(&qInLln+o=~+MU@PvWX`Ipg8 z!_EWkI86V@Ylzm**g!{IWW53=+O(zMLBPHV%Su1B!XX`ocnt-22$WB}#Uhud(!2?r zm(6T-3gV$5Z5$9TAa+6JiuGyWcnR>ANoiTIbqoPM^x;Pgm-Z|RJp*HSE?^N_EVjW zrAOmjHmiyt2emNHLl%VE4F=sUKh=kDnEN721C2Dh_j~;x&%!mnFs;5v;_eb{L!%pF z$FQ%`W_t?Hj=#i+t@1E2d?KI&&%s_k2pB|#3eTOh1zEY%Pw{}iX3Z#DjbOVAQ~*pa zdb6MRfBA_Ks|p7=pLO?<r}TNG)%!{b>_@QA)JLA#cv3$4-T+wE|8Vfvz7J zDuv=GC;An;MnqgCM9mQxyv=Sjg-#JEF;;p0FaHt+dwyKgrcDWwt;g``A9h;UbCg{0Fmn^qY`65>unDXVFfveFS{l-=E? zWJF}El$DV^;{H8fH|KoL_xt_*d(L^B+k0KF>w2#1*!E^9%bp64^|<+LrL#tC>;H39 z&@50%n1*tK)8AQ&4qf;x={`^*8Wx!4xY>^nX!pM|zj?cHPuihN8TJ)#2Y$ol5(-P{ z`h@YVtvmSIo^uj!=`{pPSbIV_vSTsx*@O9NzLR+ZJjKx()k9KH7-6J?)c^k|VpJ(S z@6|!qk>q%=FrrcPHDUdX017n21L)c^MNT^)1N6^3PsjuSSSJHvE1q)CXsB zR_ynIwdeh`Hl_q}clO)^#&@Jw4d;Vg(ATYQ{EIYCS8{U^Q-59fSuXzCXS99Koc@oK zs@EDvBLc2YRr2OLV12_EMY^Gnwy!j)|DbxGGVIOv0*LWaH#Vu@ zAp)40JP0n?PAV+-Rh+2dKh@UWw zED?9W;MF-he(|G7yyxaZyu=}msN1*g&IpO>{A6j%^ZyI7`sw3EUu^%c-*oqi$8eA1 zMK>OAj$5pG`Zfm#KU00F_d_~5BVFNt*dfk&v>iMaIc`NW|CB%`2JHVx!n|Cd-a8h( zy7gs2nXh)6Idv?NvRSsBb<2)U_t)j(o9B`?`oEYqgJpQxM7SfTzi`2Y6!Ez7Bwt9| zZ1^kX#N9e1CUQF-{!Qk-2TXjyhk^9}ENTRiVHmnp*KebwC4<7rT`1<9w|~g!&W61t zuA|%a(AyM3M;xQmIX4@Bl6#}P5O`~p_kV^c9UfZm%IOlv@Z<4@|D3=Ctm^Zc(JeD4 z0lIxE`X*=8cb@r8_Xt?pj#Ees@iCnj$ijcf506Z*oeY3f^#F$X=4pNtO2C|5f*_l$(oaBGzoVrEtuj z3cMlXKjf4X87~t4HnN@?BY8snX4Q67YL9u0^zJuv0RGoisV)CaMAA-po)Q8_(pS5H zk(3@~M)T(w3v)`YKKy>(Xs0#HVM|Wy9j`->i&2)}SbAV%GrE*ah-D6bG(-=azLC=K zKf=Ii4u10HN+a#5pUm1s@i|^6^2Zt|$Ert2{KNqQw3A$Bugq#B29%$VU569WxTES( zXXm^jsSrJa3o7@&WTAVyA2_2^R0eRQ(TPPO-ShW=DH^pT0xH53EfH0=vpJ)Ylk$Hs zMF;kxg34Q4j!k*G=^QlDym4}<3#TU_PY35l_>m#0QlTQ91 zf->Bb_wLn}w%lGAZ^On`7f9LA23D#1c6hAPjMAhY@OU+r}m$LGnVKq-%)uX!kH-KuyEg*-K zNM1Brua@HQifuxbkfl{#{4LIKW8+2sRGeH--q|rf@$!)d^M5Tk!wzqq;Hur1PfiVY zf;JSN166zf3?ootb-eJ(UBwE2%%R8Ps5Wrr3E-vLt!@T+e+#0_&1dOlG*k~+=lsE& za=!1c%YA}CJm$0Rizjk172Db~HjV+oX{s^a1)Q43<;HgpC?!hqzr&qQ8mhNJfqI4j z`^aW+@GsIhSv+&H^A9VB=P@KXI?>Q(Y4o_Ijp#@=Q&}VLw83DA9{ME5FyNt?HIUG7UQvqR$Uue3vF-F~VV4~kTp-_pdhfr#tV2Bb0Uzj6|Nb(e7H6=YBcjTD zBPQ_T3Kq_&OwXf|g4-C-Wf~wNR^SO20MZa$d_=ueC z=QhRY2QS7Ujc!dB0_264S0r&Vl&Sd6*7fID=bb8_S=OPfC{3fvxFFC-OmGiWTB}qD zbuC$j@aq2*Z$YAgySE^5Iyd61q)Tt}M#xr(@TYOWX)IZd|-pg5`xDNhlD*eZ+?&wZ?^HqRUH18G8#5q z#Gr?8xjoF#=+4^w)VWC^$0c)e{F-X~*(=@OzM(Ji>KCh!I~QhBu^YTgzeZ)#0dzE$ z@;cSi$fM*=(u~U(X-*y0Gy~F4p@`!l*X(d#y)ltx-&BzQ@gOI|g%1T21WtXbwRZXX zT0GWgq*fV2^hAZiK{SDt=XW0pQczYHv@(c=WmHa{POh#1ipOY0j{f0 zJi^zs-8*)sKR6xXUr3h>pu)>LCtBSgF)7F8>)A(!)x+h_CI-mE`w!I0l)$?-`M`D7 zR13aw!6~Ba2HMen^gb4=T6GuB8hov1YiQFk#vF`=gcf}zd@F@b zDCpc`LiC54qCGKi&E6XaK{3-8cvu{um-5d`_H}My5byGN21)h;MeQC>i4HQRM8DsX znDJ?#(#)hNV2;%Nbg2yWWwZe;dK0)ZLLzD1KsFqW%jx;VN&DK2r2mY4y#$}TB{)9a zs-!W&&>f_5sGq-ljR<)0qO<8ES?Zisy->ov*Bq{wg1kkwlWO7Bl{u%uO7tRHGz1R5 zZ<-uZy{75XOb=hZe-n`AAbKGeUVQ)>xZ6(#cR$=&#LeYUm2Cf?ss0n00q*d5PQT@Q zZJ=YsTaK~M%)BIOJ?;ZcH>0dCxk9HR9}Y3T&z_TC7;;})T1NNC5}ltjLyO}sabM}&zo<${Ql0?=XXE*rVeQhM`^h9gm*jpXuYek?a3Ur z>IBW&y|+|!Z5GX9F=JppVYDk+xf>gd&unYyjmsJeVRT;93=s+SAvQ9afUkDGL{AL~-dnEc_4{X021 zh&ssws10cxXhiCU>pq$Zs%{?Z}X{ZqAu6x;7ZiL2)@X1MpE;)81hJ8W+H3RzKUxEYR7owQms8y7^0ITI!I^o zkThJeMqU1>|4Sn6g;eR`mO9;`I ze9~m?z}Af8x`{o0Xwo{<;YNOJNqOhcWNDa8AbCwLEp-|1diG(fV!re`hf2Yo!dp5? z68Q0zW${~u!=Wvy8N>&-1>8XX#Tq@y1TMFY9+o`jfSm19>)pzAg==Ey=lSN>H*A>J z7SO3gLc>(Ov0|!}=-EJOrNR-T8N1G2QRCTf!kUN5@px>MwC^#HiBIV;JlxUwe+n|}i+fLNQhT@qI9C_PfAqWL^H$6v& zEX3kj(n~8^~LvZp1Vq0@Fm~t8NM^n+XS)I;yMeYM>v}Y}M$XM}fu;Ngyi!yI4VMTnfGc%#6u*9pUT-V!NQ|dqDI2>Wr&mHAvar8D!Tx*fhfFZF-^r7y2AO`_>0t@;1SJNZ~ z*HJ9x&BZpOfXU%QU2`=79bw=;et+wqEkSR}wK#F<`WU}S?TynY4C4z17s1FL z{`h}knFCwMTZj&lBuS`vhY8Y~t=Z^{4HJB@j+n0dA@b5seq=77@-BO^-yH~MOP2(vjJV45NgIh3U$^NRPlp3dGls5?0DJ+=;~|f ziTv+xDc-f0d<3z*^5vJBa{?YPq3SCx&7HKqXU3bIbh4}Vr_#N<{8iUn|N5u2pT-Mu z-?lV*x^*Kipf6}!kY8uz_WKvQ0s}~24i*&86J*%&x&8WL3RV4q%Q220k+i1}@#bqI zzCEH67D!d}=GCTF2G~q5tU4`W|%MWa$hluXHced69m8#^Nm#&!rG?tRm^s;9f1W{c6&* zS@a3HPeV;oTmFsM(9yMl{s0pA=%0sx+r6YdF!dD6vXA1Mxo|a|TR`;C7VT5LHl7HS z{Fbq7o*2VSh%lMrGLK!pZt{APUZVbin+tL*+yB)?5h?t}L5u7a;Ha~{k2}3S7v47Gd8fs7IL-M3dwC& zL7*jHC{aU&+p1IV5+|cV?yt2(LOIeFu-o2hnXj%I&8=y@gJywFgGJnFdDz9zF zjE!tyOgL#ov5adpg18w)9_Rv2#M?Z0tOt^YRh zDN%!$7PWMBqLfP*75;+)R0rWm zTSi1Avbt^bc`G63`QOKQjs)0H&s*ZXqVJ<+%y=|pK~S?F$Yol0!l4x{&Z=7>#I7YD zHuOf=x)8c6DJc4{6Nfu`$T(E#A}dGM zbu;pCaw_-xcHa7%UomgH!t+1JPn5FYp?gu&<*3<1AePj@Ghctss#X_Qb78iS3}`X{ zA7$a;p`&?T2*9uF+aAvJWPIDrOINkN$t`FD%p<4BQS;mR{840>8IRt2DaX-st{Oy zDs>-3L*=7K)@>J3uwNYq>|-sy^pL$Un%XtaP>q?hR_H&eZ%dawhA>EMUhky9191np zF-afT^3)If_^QXQP&}{GIIgXF*O$Aw2KVv^7m1Cn_@K2@kj}4(byMZs-U3v+*lewg z>H<~!;L*v6@vXqtj=stn&dSw1_;zML*Aaj@-3sH$0<6XX@c7se(|9oFk<=`=N$2P> zDT_~OcszS?j4rG7(#{^hQ@^OW$U;T+VAjNocmAp3zgmA*w}Po%sNW+@?{O7>a&P0M zX0A&O8*nF*M0n>Y@WhNRGr1$b+sZTkXj4Ih=2epKF#Yi@=d*BqGr#Y(kjd>inP7p| zL~qLYelh5@&#x!%T9D>@16JKFgc=+L1Q@$#htm{kPaoh~I7e_|*l#XArYPPv!4@?7 zBU>JsjoA55etmV(sPsfv{^BS%^+jFBdX{y$r)u^$#F_QmDS97!P~F%&cK8U|@9zC4 zQdHcK3I3(K;=&jis;Xaz)e8B7z4!!wc*O&2D20CCG!pwW_#w2!G0Itllx-#2 zEN?FhqdfQFAfCtJQ374rsrgK07GSZJY&G6~@ij40(s_nrLy9?VOM4WQ4u{T?t8TyO zc3I|8V&s5kB}bXeEsJm6-~P80M)CqpYowV|X$NUhjt{h%nx_#FKqwU*+rL##(u_w< zQ)$XBYA@*Wv%cDCWa!Dsu6<24B@q+`K}i>T^s6&CB1?G}(xSKzN512gr$uECce#2T z#5|)CZ1v8*<>c&e+t$FVJy}1?JJmJoIMLw#{vi(ks$vgLI-hrg=dDsPiWPX5ET>v) zIcG<6A8%UI6NntN+D-1_1B zws-S-fAYIa^mxU;i@xC7v~J+MP{{9wzfJCH8TLK?pEFSMDJ^R74z=V#2_3knkMD=W z=zhv@;`#|K4|qg16Pb}z4QodIW0832b!mlCY+>)7->$I3s_P5E3d^Ko~Dsc>VvAG9n2 z;Nc^ie5ytd-}U?{G8|8I!v&pRIR6=cF$2H8LdTA!I7|RPdo(A!M&fT-g#qynG~PIh z0v+j|<2E5{X}y(oQ+i5C8d6AxsL_UaCtPDUwNcWqM7RkDT*xr|wYjNZHPtF0nzi+4 zIR0VP4ov&w>Zhc#UNmaBo47QR6kgQp`FZAq?X_n?*q!W(BFNP z(_64$lH$TFs|eC6%S_kitj-$8<|%rzaCqMm!8N#&Ik7+WcyugcVETXh z!kSe@dJm(-F!&I65s$oHKu6OBl81eYWp(A$#_T0i4R||=xffrPJ&(#GUEntyNO$2p z;nEd2Pi}7BhrA($s=qY`0u8nxl(r>-o6qAB9$T$>+>uao+Z69eYt=md?y*^~3jF-8GGiJCB0N$o-{jJl!1<(#n(GJQhjz6 zZEd=r%E)X{i%cs_v0E9jjoK4cLc8+?i7x^ z9yWX?g3vey?boH>nf#2B7B>7bOMG2uRb0@Ejo}#lfPTVt=t_{#mbM4YD#vOAQkJ-{ z_4U_#cAvk#+z{wTa* zVxYpOwav`{F1f+3t=+m=Nq56EE*|809YM#aei-!X(>*kPc`h*?p1ZTp@CX7>%Q<;n z%gXb|d@(_?!hs~z{=Dbtlubeurn$(eP}j*>rr<0BJ{eZ0=qu+;VJZXA@BTK zTdSA6t^FdX&>lfmSFSNaWTK0z*~G8tS$5SamU4Gm#W%(X_}fOg>RoQ7*%}nds`=Hp!%0%Q7 z5CuQ)@$=`#((vGIpP`;XY6KYTvfXHw$ka=nM0~3I*|yGOWNFDKH_Z7*YADmq3I7m`clxQ-k`ZWB`iFHT1$0B zhpc%Ujx!@m-TS%&Gmi=U%3^T18!NP*EdrMRIIELZk2dd!X?9D=*F9srZNknI+uCbq zg5plGI9YR8&bV~G_b}RKGq?PiKBexDu12jtak+#)S^GP%1pbn4uSaG9ZKb5BzP7x! z$#-%$BAId?$8d)Y?0aGO;qQ$v20N?SM)U<8Rd+S=ZF}1!d$Ld8(a68qXwoCL!alm5 z>&%Ik!X=0~R;_V+{RvsG2ZGy6Aey-}9{xFLX8rhGHP=nb%@3p;&n9wJyh(4cf9xiz z<(Fi$utZ*Qwnj9#2*GM8E&t%$oYAhd;Q?#=`JWr_tu5TK2b{Mz(xa! z{l02yxUcOiV$A$I3^;t+SS+XQHkeTR;0}z?FTbw~mJg4!{j{&b6lczq<5jc%GbK|_ znzvBP`xJ9D-cbSrlzX9)D%kY?tkGz_^^wmYb05kQn#ZFiaAs&v?}duyUmrS^du!y4 z{R=b`hFwpO*BxvWNThte)q^vqZ}HGEjS9ihz5;+<1>eb}s@a(9;oP)l_ziH^V~bba zs#>GAzCWdQ1I}|PnP0B?Z}GUAbI&QIytC7F;?gVZ0#Vk#Z&j$bgV>$F>ZY-BjWfJTjSpSi83eyj>RiedzAyfEaH z+c!hC2@w!tQRk~g*m4oa?%EmJ8n1eMw4EDE!xt2_A6x9AcRcQtcz(*{hUciYh7NQ8 ztQT8zKqsuzlanIKGoTVP<5tc1KHr<0xD@lA+lKMGdHOpwmhJ^qb?th&m$hb!+^W0j zD7%_9jva&R3jeTZ9)$z)t4*-cIc<1UdZBn zxf1m+!f(my@xy~@(RDsLZKz|~##C=yGH8-G67o1TCz09I}S9MPLzkIr3UoO;l! z-ky%7C|{yWHqTbOI#3{Y#%+@2Viwi5b2)*3t1AQ&*OYDB8pq$-F;5&R4w+^@+I|Ce zG6bdSUXg&(Y>3#BF2_gDEBg1kIqkG`Q?9&D-!}y3YRw6^&52s;JCoCYX~}XdA-xH( z0BeZTm*>#aJVFoH>i9XY;<8lUnqD(V{V!tahPfY#TWsmn^HnGndPj4j~X-&$# zhG1@#f1g~D(zd+^C|1?tYJq60w;By$J(7x_bm@uFymxY~5-Z9*J=~Xc;5~0rJ$%!v zqPesNoYW7ZWSAY9%H3==#QViQCts`usbI_iz$iC|w-SNS) zrFMLMB;vB$o_9;P6LMrW|!7exV-jgYa83V zE~pZ)^pGA=`c9+AqXyounB6{a>pm0J_Hp74`kmtZZSnWL#TnbYwa%>5=JVEiy{+c2 zL}PI!-AltP4Xu@s+4@i{_v}fWjR1;>@U+rZp_3BH{4+Z@vo1`_n*`@D>~tiuh}lc9 zlv=XkD+7c2>Nj_ufEXC~;VL@l{kh62-T3=+#o+!x?H0!_%S8?bt}Z4&T+BGLRZsp@ z5b&JXKK%LX$9tz2cJNTT-Yn`^)T7l?DQ(OdZ%%7E@j%YnI3Y2uLa-$_WmxyP8K-># z71{C7PdyiL;H}0|y1uuX!22QfvHk@P zwc%v($%Zkl&H?4dd4JYdY&hM3hi{`#*a($^E|7X4mZvbyx0BRCv_mJqPHjaLV*i4 z^l9?xRII0xE_xN@OB?b2GoJmkf6imGP_@orT2!X$$Zd4oSNPgkCyZY`?_sG=uu>qJ zV!%LH_s2*yRyT1EyCdXXiX1)OKD`}?JPOG3t1w?DxM9^LmB#kU_3DTHD>xg6FBYh% z&5(_Lsv}n4a3N=Vsk%pY#X*&ko&B|Y|5SLSsC2|%cdHF^IpN(okvYqF_P0Zd-{l8H zPEMXUw!BhNy^;G=gYb&=`8hs6s>gE~ShmdUyjR`7%vszm;+~74*qQulUfZ}6cO5z7 zRZ~6cLn)?JL_&nInPzvjzv-^9u19x!Fk2pi8|Ah9m)I{QXGA=^xLHHo~3^TJgsM&M6vRx6K}bIniUr_$6Yul4Uo%h{8 z(eFgM-8uB^PVJs>i%+Eyk-P6+;}@0oh}O}1+bX5PNhzQ7wzH|U%lfU{NRp0A$mcHd?d?qn;Uld1FQ9>sct>oO)eGun(D$CGURjuMjmLewA)(OCY zs_&wh_tYwaitH1k7p!%b|Al~r<6jQYN?ASfXPo-gV|jj^g!AQN1jkAJvI~<6Zf7M= z`p|qNqD*&f%w9XZRZHT@=%oI5f2gL{N!Nt+Lb7dg#;eT+*wtP#{wPw4qUqha#WO~&#&Cq@{FO1^l zG5q^f0C#VQx}$OAcjNY1IV15Ex$hQ#dUFooQHVOl`IReX*xvb@xV|a}dpxo7vCKQ= z>a#_~Gc`WY@#5C6De-{@xpM0Q>(!;s*Z=xiBeJ)}OWaYBZ-M;6rv|k=A_|ysOBNs3 z(=bGYMVL=kpme8U&I7d~MP}=(o;fR15B%u=n#?7QtzLfn$?`UD-@w5{gM6}&~^dL|$S#K)HYno81@6aoKP)J#x& zL7oK<%o2mJhppK)q33tkIM760vB}Kc!I2QRcRBR|YnJ0VrL6%U?p76#Ebg{u#X=B< zTZYt6mwBzSz5MX;=&Qk6j|6I3t6INX9p=Qwar?!#4#6&aUv+DTpLl5{f1Xp_Vd*17 zaex1{_SqLa4sG05vb818TWbfa`$$#k#dN!DBJ^6fop?+Ku(@;4u0;o=pI}+&d5oF2 z2$T?&xxWPz?+qx*-(z+&dTgR0W7ir75_Ww}=*cyvB7AHmqo5LXo4`CJXD+Ud9E?+f zg@`+-N8-;~WcZm`*qU1I1v^9b^toLYi&j~lwL8^#BJ@)w{r7f{_W|@cT1H}C-Kk&6 zn?rvc%dhx-N@{XJbQKQS3_>K zhe6^D1IOc4Dh8c$veDXrZ0)s%F`5rY$5t%){K&lenPRr^*Mu`$x}$Wo@s$tuxEz`E zRtvllXq@>uAK7OeNr~GM&tsriGkA)91LoL$d3>*J^K)M{$I~_K{@9zSNV2Lf+9k${ zIc2@r@X~!xKscYNARC-`K(v&inyR3KP675<74^pFFm|{n@Tp%t&&|0>W@2f#yu4_K z;5e}GD@@2~owUtLmY&{!d?LNhGF?i|toP3!(U_GMSuQRrq~WJ}cT;{k=bQ z?3qTouU5wQv&L`Yuli%#N?5pqBf#aP8Pe!HUPt+*2*m^g`i9P&m?u5)^gnLapOr;n zz!3D-VpL9$Ow|B9Zi_C@nIurgH+WcUKWjg|g_IWb%SLIE4^-%A1Gl<8u@R3*9VH*) zX-ZEH2Yj`A(`4a-Ze0@G8NkuA=+D=)?%hSm{K$+IbhFNK=cxa4?@s;?E2t$!&I_)% zn``15oMqU`HS5+{{aL5_<98}LH`*O@e3cnlxjs9Nf7x(nwMlngzj6NvLZ7aExFZ+r z>02`59nd>!FnQ1gtMnq74^#HX@P~99Xid%!%@fg|6EK4TwO-*z)h2mJ=GMf9l0Pa5 z-S%kRcUXi&5T&@wE#f6fr-0&T!R){8UtyP>IfM+O9CE_@d`WKm-@8_MLe!$`HHeG3 zd*4j(4@~ht2O$&u=(h3F-V+Yz>2LW?c~v>S`YY?aSLUrzh?`z5&=Vdwd95Vf3zE>` z!(puulhyi&(#zTlK1HCsJDQY-ZgMUdP4E%VyXSZbI`q2ksz$FyH{f8kx5QMAKCWvN z5K!?(=w+kjnrutNo0%&_8?UM#PYP$R-W&Z04UOYsb#XCLoi?8<)vwi(P?x#IEih=S ze|O!&X3P3|me5-=wWyLDZv(hajQpRcNX1r!bu>wn_=+}S}J}|*x5ZU zkA3y+(#uA&*-AU!2A! zs^cQC(ONL~{;BpjVf#A|Q)I!%?`Mu+f17ZmGtlpX?@(>>ss)zTe($~@MC<(_hGjxt zg&QF!4BaW=Wyp1Y>mJ)k}y-d{jC{Evy% zzeU$?++n6NtYz%4!wk3j=#^7*QqN8^SK#A1bUY`j}n?Oz$UX7e(Nm&M$@1iwl7F7t~_=-(^=zeq!P|3brk3Xk6?GQY{P|>fFCvuFn z%dn1jct?S)rRoo`a!<$4Ud>4Kh4=?hOQP|;gzD-!e*x2;7hT1S%y^gPA-bsNNpZo) z)`L-dY1I?!oBO|RiTmUIXT(*>cLYif=QE}44_5x1V^VW<5zp&fKZO;^cRhTt?xk#( zzCKU3BVSv@Qnql2K@%>taGmWy;_qKyAn|0>;0=8S&1d-)dI!teF;+77_O4XngY7Ng zzMs-=Z|2^kb;F}BlhRxJPB$~5GcCEkp;1;#^G< zH4$;QhuK~fIp`ZL>TcEqq`$gT@ zkLva%!rl)(eSUpe)0F+9qVt@3QFyhs3-WTuOZpqlI-84cNp$SYEp5*hbJ3%m%e!D4 z*m6al=Tu@#xzD|@ihk9ib@g;VW3HA}x2#Gd^W4kqFT9kAPI>DU?V?9=Hk`*Kde-N3 zT^Zv+=*80dV_zhsnXUe%`eng75mrO(74ObC-9@!kj^26%g~^wQ`iaGK&`)Va;iN(t zD=^wLlp?U8qiOck(|$b{JEU5D)!7_bC^g6KmhtzP+0iFAn-LLlhPL{k33P62`-UvU z80Q3CCjZQ}wArn#k+c1QmSb&}YpmJ}<#cfeheaovS*-FTKjr6IZew>L_6mP90DFd% z$eKk~iFvMitlQ)5JL&4{%E*ztSY5^#C|uqf{@gCwQXL#hfnE;TgX&lj#N%b$OR+sh zQ(6_O1&%B{q1%4F_Q>hPzI}zwo|>e z=5};l34hgyOb%MF2+%KW92ROt&~>8B-@yDdf(Hda<%=e z*$(B6=woh>^b;R-fnejv&6^}cxkkLD`*Ot^a(wX2684+;z9j}RQ5FiOSi>e)P*DLQ zlnL>#LGxUyxYK?-L|C%`-#qy}Wz#x6$ss8JUpIPfx`a;8n1%-JpchL*%lpgqeTi$} zT@U2tkXDU0jM^D`V5#6bMt4K*2L!47nuvbm4mf_r5i9R#mBPJOsuovVB}Tz0E{4ze zDF*#M_z;6i50$Y&%mvZBX-;UsGTx=CjGY$^oj(z_gNnZ^fr$|6;+~6$BrihKanv-U z|ANL2y1wpIJpTcNPScO(OI!z^dBB1$oL#Bwn>qa4L*aOI-@C4fQi=RElgoDGWeJv@ zKbiUN`I+)NEqLNsndo|6al1J!<{vU~Ib1Or;&%Cpj?FJ*5si|9SZNg{xzun|zf%1p-qliOtt7QtFAs90 zfS|%pp%lUh6@Sdeis{$!?3C8!Y@dcYk&I16aZjtt8HHfw0`k^-VAN5YGXjYH*!Sl{ zmrt0Xs2uHKK@goqgrkg^3(5x7GF-Qa#gF_tyjtZ}um%dd>+sDtx(xT6S7ei48f>(C z0g4I=|40Y{q-V(>bZID+#3rB5Zo40NbbAbP^8R^cLy5;s#I_q|H&nenl|2aDno>{#?VFIRPdN(zap|wLPCUJax!; zxAoq2j#P-9>||?yYGZjc0u(YfGLX}#7k{*V9==t`cOjy$(=hhIw;9JI`ITO*&P|?9 zpZ52Pa=A> z$C}JW)=@Ezm4C&}i3)Vd=1PHVEM9vZ&VdzQ}%kNou4SF*!Jjw~QZ zGL5|;_Y(H|7R)ulM_jYW?#5=J+W@WYqsDO}#R1GupeUkYE7*`nq)Enl|EQdweIzPp zZYVdmV_oOe{o|n8LxCKkw>k~)f;aD<9$!{^{*jlgjf+%P1aMqJ3{SIA=Cx+sk2!%X5iTKDuLH&>oxmL<%7 zKZk?iLU&%qPIsku5PfRnkuZ{TJQM*gS6+Sa>P5zDyXW-7#iD*aC2 zJ5Av^7TYWnyBz;-KWiHptrFH4`|fUk@ZO`*Q-8(I2hF9gF~-m(#c3J-=O%;7i8Q)v zJ`{=0P~}+$PeE;Yc(q!5)Q~XYDjj2OAiM3h&0_4Yd@SQ!of6H<(@*os@5?CslT!XX z(rv{O)z2xGb%^FMFJeDO!CsV+Uaww0_IT_yfu#ldl&mDYHBX0GNWbgs{z(*30frRI z*A`xxT@AUrNt6TH^s7b3cPuB@cAbvixdk~%YcCSNm(U`f8^J#0V>r#%Hmohc(`NAh zE29o+$L&5eCPCp43^1WY_xK_p172|3(#7VN*wIULco2SRk%~*SBcU8)x=-PID~LIg zd@GX8k*Cl>82vaeY_(eP-4zijveMEkfL#GLbDd#LcxY^~!Ra+*MbdvMl&TBq#N9?; z27gz`r6%u(LG%rLcFsy&6WWM0H22DVLFqQEUvgs&5m16lP1?^tK7i^RbL;mO+ z^ZCma*$?{^W_evSVb(c0rS_C2 z@7Outz$8iP@0HEQ6G|kBi+5u4BVZaoL!sB2I#nbW6GNC>&T1DVjJBY=y7F$tcC@>6 z$xD9RD!Fd$2ZlPEjzM{95_VCXJYs1s;!1*}?3q*Mv#RnR>bkL73I!tvrvGtJ%AmJYZe8m30AQAOpnPgdlk=S}9V@#dY3?0(y zE$@*Yq)P^IO(Jf&VkGqhOVKLtOH~~3CKZleL|wm|YJ37b7=-1MpP4*ZHtHDuO}??o z^gWVh?$Jn0w&-VQIi9mzp9MmsoQP+D9!0teD(wbUhV$j+}YzD4S82Ix{1adN2yIsnj$ zHB@>+UIYU{270{kx60z9aTQK>{^r09=f<7fiFz~TPTfcYx%CrMH?w~iRt z6{j6Jzllg{anri@SLloQ?G*XZ-Oi2{t}{Z2;~EM_w22D52UAs_aZaeL>4duPNqll7 zvYB_c_6ky)X#9U5&_5CpW1=;x1E|?fv~D5q{N&e;ctPh=qVHG1VE~bec{0f|Fr`B9 z@oY-|{Y@9GT3(*nbs>hDFme4(#Zy-%U@6GcVP62i-|{ZZ1MLs*UNceRf!MYzQTskK zU4bL;Q~XiAS@<`L7*MH2yv;-}bGsEvN-ftF`bKycmvBwVnu!hGU4BHL&Aj z*5JgHeA^^>`67J-Kk*qO3%hILIfzg5`CXdj5TBM?Jt6a%WIE5$#=IKXgg2L3gHFo+ zanpS5*3U_V)U@G(S!0$bCd-eG0zp8&8x~KM-@{}N7Cf^U-H)11g#}IfneYq`7~ztW z^QErOV9?k;>aGb^wY6`!C`8+ZLzB-zDhb8h+)3ow29b2M0`R{RtOWm!<}ul8Boo6x zk^vR*2ke76>@d*8n%s>IUN2}5#s9)cQ!M$|t$jy0I9cAAe-MWC^gUb>XppeWrF8lQ z<^n4Jq*q}(H*GE*cmZd^Z)<&ywgWCs*l=`-mjg6?W5XNbU|K?sF(9^7uYzHjVAiVP_(vc>kpqo(ujq5FMWux@4g z*KeZ2*cF|yrp?u{Y0UE^5s=n7?Zn@a&r-~8nLFJfCCn>px<=fU?nnzK8ecUt_3ifL ziR`g~@a=wVoN|L(g;5_Cw1*#?B&+?tnbP%NH7Hmz(PV=?IDTpQxWmnp(KUxen6$ zogphTBDvF$A8d*7%3icQ&9=0A`s6O5kxXQND(utG;!9yh65t~%xu-As94^{m`H9)M zX!^ws4jj!GDxR(*xOdlt@7MFfwbKU70J2(wD2=vYLOhIlBZTeCJjV_iFnaX`l}B;6 zrTY#04Fo|Ci26maxNjyl&_TfH2ceERgLE<6T;J7?kvZU*lH{h_k6hCZS=>jt?(HX_q>2RvU19K5t4#-{L_tVflETvNF%$J z!qHQILA}Wr8P^EN28VRXY_9`}#E460qU}KxCR*E2vabYCc!^s9=PlA1&o!}u^CH0` zIVcAGQi#1g@~n1!`|G+WIZlI^cxK39(AUXM0(X&(EY8ps30V zj;<@j?^I!M5+RGT%qdcGVzE9W~{c&UWcX@h3$~-tu!++#I+gCP9aJ-SXyr62oWhgBqjiV#m zj76+4Q{5q5(wz|5>)<2ph9#IMe~($Ko&vi=WBSk5T8rL&wRd1=I9Gth5;Ds;mCh19ue>dQJ<@4cr)D=Bf}T zqRfK+=E&cFJMv_0WT35`KECeZ0iWBkUjn|u$FAS?>c&~X6O=P6*6(ge@&yre&1PMK zI%z^$xXAXx1>7n7G9QBWUE9ai97O*4S-!SPIut*DCpBQZ(K5i#$In(>cOWm896x$5 zL!@Pz@f^c3aA$kMHW7r){!07sYih$6#oSR&|` znXrXiDmo!D3%v>6Pd*Q*cPy>OTY_uU0Ii_NuuOW zIL&~b7%_W<>F_!L|IM-OlFp5J>_#DeU>=ztXUQB;mVAcQVwh+VRx~o>VERGuzzzlakd>@@N zzWWt_Il48md4=#_50g!WA-v>A&#H#6SeD|aFgK2$MG`lm);X}^;hZzkWBU3WtJmp% zD;TcCDw_kFZC(e=fiV?N<~`7UvfA#6>FO?4Jq9$BC5u8&uR;fV(pDwKujwvPzxIro_a_K<&8E_AAyF{1@_aSfME`A}Q_Z1T>1Go#yzy6E#E zTJYVj`_P}A9E%}Q9poIO|7#DIeas{uR1XWRR9)o&jCz^*tPj`at}!zcfv{jLFn<=x zm>gkhai1@DQO(%bDa1nxNFPbWjX-p^H5Ol8+P2;CA@Rw^rII{-#0p}I)zCN6if&|( zwv_$LWW7Q_>TuwvWsjL?u@LoukhqsssNM<7IGFVnH_wqpbCM2J67#_}3I}~oymR`! zin{J2dOdp=1e1?QWIx5~&+Z>1$yBgi)*i%Fk)Jjq79pHYrA{++^Ea^*gZu`0D~@b& zqTMG5Z*339aOnU$&KsD*oeeenEL>i2p%EslEdB)*QHf{Zn8F&1rPzeG-;MuO0~?VE zBgv=@%GQB9-laXKlIrjULzVSceGEZqeqADh0t%&zX|M^qEwQXcaJ*-Z%r-g$ zr(EOEB$+Lb3Z!EB3c+wMJ+6zM`r8j>(b0t-hRVLE5!t$H@I~P^ez+hY)&evBHWlp| z?7!i@RLt{Ie9kM$o=JcUddds8$IuyV^Dg8PdoN7Q0F77xm-IX|d)j`TUi)f* z5Z)ji*BsKCO164^!Eds$N-v4{D4UwbhKF^9Ew4X(+0NqsI;m~&nX8ZtFNdh4!KqD= zC8vk^_pJ)qNEZdSIAww^CwGYW+@!OZ@Hb)C7_Kfo^RwbIDD`i^Y*phkb%S)Qm;yAr zSd%PM3K^XEIh)YRK@pNwC>*B2wl>G@rF*4xG}V)6DYCec`)|DW@u#B6+`@FeEK)?? z5lI^wCe*_q0z7)2lJJZtFbiw#TYv}~Vo!che&V~1Fj=MAKA}zKxzG4ikaLpldPH}VkfRLtUFRH!<&qlkW7yX)IS-7`>(W9CB1F%~B~16%SKyQkgA0EAI{&HbT@$CzY-Bf&^uSfEno{P=m~k^<=` z>&BbJxOxDu0*V4!Th9A{>~s+Hpf=L!y$}3?wZ;ln7NW$MbX2EGcnroe+2!Ftf?MS3 z{b%ppR&%`%2zb48pz-{~7aT_F;)~cnPtH!CVM_!%V6yvl9;^%Q4ap`ZSIf9EzcAv- zt=0~n*LVEtKFDfrP2dk{pQlaK@-r82*n?%EPmk^;%d#;0n>2d&8!h1px%16py50e* ztP5;s&2i!^g9SAXj*p*Mx2&lr$Xqc69o}WaRu>QRTS7C){ZO3HT5qQgcjDJuX83}?f$hmB0nJ}dlWg~VfiIVi`-aGwQo4OC?G$C z8Q`bmDVwDKkSpU>aDod~?+tB2E|7{D%P#mn`3$2H`Y(0lvBuyjwsKI^WoKzImh-js zI5{8J=c>ot1B?HJ??G6Q8|9C(8>rVwOd*pEOK_JT$mMf;HY9y@yQs1tyJ`Nwx(d!D zDjPkqW^c|6n!D%e(eV6J_>|)zFg+4SBFnob%^J#@y2f%Rch>nCXD*5grpuG;vhyB{ zwL06F;vHTqd@CJ$&WP{Y;L&Yi-c91dIp~P(_=oE_U@fU87$b8AwZ8a43d%ZmRe*mK z8lQjf9Q#2h&0PpTZ@p)0^s=7mb4jJ{1%soZXn=m0E>1V7p|WmIICEhtOUtjlz?O{qX$ zkdpiPI{QMe-x+6|d+&T;Z-IS}nO3dk<#94~Hsg(0l z(tN-6xS=jHrD5HpLng2fTGwU#4JlQsBNeA7vEZcZZ%C;67^ zEX$W?x5b2ZEa1DJkH=XT#}{=kn%bHA`YG9V6Ax(TI9%`c5NbKUPJPyLg|F9vokxEt z&(HWn5~Yq%VPxry^8-p9yQUHpW;ik9tH3}Jpsyhq)t2}bV=bYvEvyHUgmt;U1eY^HBWc2Lmu?V4~Dgzd6N6e3kdywFBb9N>#ahj9d1L3u0C7kVqn2& zZrmvM84*miN;~gP`FDTIHFLY7+5Nq9H*;b#q(i8Xoxc&XpGtPn#nQmrALs!O*ZRej z+W3~`60OrvqB%r8DjCvDUfe$dCCQ(+!j!!Xt=UbIz=gwRl_}xlIGC0OZxdqa=6-<+IfZE0lo==g zKla}HtI4lx_mwKBfPNKGdJz#3P-)UZDbkfHHAwHhhmKw8O?n6Ey*Ck+PUxWr6$mw< zCO`-|D?ZQj?)TmM`~hc-vwz8OFcJxOR@S}doY(ca&Rs}6!n3Nr4th^iI6p_ssGC#* zPO%vy3uW2IvyD8^##wywuU+F-lbpj@C;PdWC0TM^I`B(ghn{tS-pEPd-S^K)igJ6J ziKnce7t2grrzk0amFlJIixOSl(NOs&h5PL3mvC2!-K^a9yonwKf1MMqP9R!*2%vR9 zrNU+L_iLY`D@UlLf(t!WIeHwU5^^7Cv#D;Ch2mI#Sw)bZcE(i6GU|fw!9UN4)HVPL z0u8Baj=kukYwvqw&XL}`o_@}iQ}4)tz1~wApq@;DYGDuk-s$gZu#zBYl)uyT`-EE# z8r0(X^gJ8#?*STU3D)ii2HE|IIm?9G`a%My-hr7RxL}}riOB(|>CfhJO;U?^##^iC zkV>k#_r(6^6~v^vQ4z4fny})zvrM#}iLntUb)X1&^3|F7_eHr?zs_QAu-=!>7U^g& zy^0@2|69OubY-M-b9@Z-T)5?Z#QRBOr9U6*PRx{%-28q>lgzh zBA|p4rND@iuZ#Zc9_x9biNMyBM=^he%#0@dhZ#)+I8B931A;A_=jvDP8<$(ryxfxB zVO5Ug^RgCa#gGiXzbDEUVYaAp*T$>?Dz{s!;u%_IOs_BiCwh68|MGTPwa^AiV^>)H6588zisV{l1U5CU! z=GFDJ@ggmg3Bh{l7k_deUi*jmPQ&GllwW{fe!=yh#{Y#oChpn){VlMAsXr+v_fXJ( z|Kb09&bx4MIJ%SVsrEl!_&=V*35a>@+GF#?uGRnk8~*jgusg(C9;w9teJlU{`uu9*G-=@y%*rHXc*=?e4_ zZ~j1RaA${jXm0nN7gW1`FR?_VM0KTvzQBE7$>WRyX=~?vec1Tc{_0>dlj}BTazgWW z-^z?R-+u7d@iUm3j5R_acK)*IBFdr_h+A~hi-LXbKE%!qHITj%sXd&4B$Ny$WlHsK zfWt#f9GQMdM2Tz4;=PDs{p}7Si>3i#cH{%6U7s(EZJvOqiG7d(su?AAc4@a}BApkAfVwH^lEu+M zR;ul8sK=u{B9WWce*Bq1C;rDa`(h7+`ddA`@e&W64lauep=C+%MdxV~755p^zA7No zBvd1AA98Qn8?Fopb|jGCDaGYOBMSrhXN8jo`jh&0YCOouFFz6w1I&FQKnbAVxE_Mi z+pyWAzy!2wd3+zh3_+_u5men!%&mn%fSMxan)niBYw-YsJB#82BPf6X9%!rt?F?Q@ zQX=!1h&gZ9RB{3AC0-31B8Zp9h6sxQoDBiM@k$_aj6 zk9)X!Tx5L|!ls5gu!=rxL0?(N%gw@MeYV8bePQK$T~NX&@|{I1G#x@^v(TV#8eXV6 zGz<5m%NWCSWzju_`)Dk$#X^GMqoe=$`l8q7lIK6-SQ(wR;s&}O5N{J<)7L*A%%<(l zeyrr-UEY79%ss>4@>cXaVrqZN?d=m31V6)gmE4Uk7{4M0ggFrO;&C}O=^Ca)%OIua z4NI2Vu#iW8Dr{<&J(M-Xd(iQr#gn)Vpu;WYvR1crh*cDM&ae9A@-$kB_?WqRB0X z81MP7*(>pGWc3i-S*VZRWP$kJ6zr}O^&{!Q1uxAU&w~C$gJwbNm7RsyW zCeIR>W`Y>5SAmbc^X>4p_Pe+P32 zWGW3hZ^={>;`xR;OZ8+ECOaN@5{AbuV_clA7G@J)!X|&7R5+*g#&`dGiW9JRw4(NY zP9rrxRgg&iikuD7@aR}jr|a0Yb+cp1v`q;RTdAEE4-&U#I|h6kODihup*ovk>#jpF zXv`^2QWWqx3#;{^Pn9Irg#{tbC$qkD8}$_lrxbfQOaVqQf754eiDT~mc2c!h%9!@a zLxPtms@lZcE%<~#ur>%HjF%0=FW`{v9hd)7LC9cBI*{|Ax@NQ0ZEVv;(T3>%C_ z&^`;Ix}woXRQ}w;OunL}zr)wgc7NH=$2s%kSQjK=|6(ALIJyI40f0lWabRqGc>+{_ z@t|w?+-C4I$Bczj>hSbs$$D*~lCs8bl+zvz#cy6&GAnmBMz+FJMR=pU-1h$qjrN{+ zsH|Sp(n-q-nH6kz*<3138U7(_KilJzMM)?8P5Y}n@OzA4s z&bIpMk0@v3psP)y`<-S-}o zd}w`Y+>xf0DWY;$+_SaSes+0%?|1{Yki6O)FZ4rIn3ROAQ&tP?M&b}dvk;tK@$=Re zSBL{dp~Ypc5fu~cE4$2R7Twy~^%9GYJ~#{bAzb_HLykSre}T%4Do$+k17T z8WF{PJmWIAPAOKJu5-jJ6d9w*v|LDCsh*&mwT13j20ed^r`cfmk~>Z&0+eb=4xo8O zq~Q4frY89Qlg1IrBr!Qc#4--g*4jpcp6IPe1F8%>(2KqAU0FE{3l_q z*4XPML~Nb3NhJN;kM7~qPDBW{TuOQk@sr-P3DY8~QdGq2HZ_A~oiCJ^@pH(0OL-~Q7`i z+9+3~%lWTXNAg$D8c-<0-xNDZyhTwtS5ABH_iEY*wCE>Jt(Yh3qhcYXCn)77g-S~`D8e7C)+s4}iijy;S> zKj!)?N@EL}nA!kfltf~;9>j_`Vl%dsKqCXRJwwjkoUP?W2Zn_rLI?}woJ4IZpf}5x zxaK&1w9kIbGhC*iTd|JUe1Z=2N=R~bEt{*jK@Rx zrkm%shj$209?kq9fktaqO|R^Z%Fa>qDA9d3lhTn6c1KIgpDgoUW}1H zcX3nF`(o*#!oZoivvKj^(P(}l%3nDcugDq_VAZP_pDcL%o!V%A%|-;qt(p`kRs_@V zy#LMjirL@8t%8X;3`J1MnnUjoRipV|4fi0rQ#LyvCtg@wu)+0KDi0<|Eh+^_XG`za zzm%z82gcGwF*8>5Kt6I<|#cbcy1T2GIMqtswD8BuF-Y68_d%V2^;6l}+o7!Lvq z8Kw|^6=?2$>t^^)q`2rtFuRN`$r+K6pqh(L$}Vzr0?v#F+|+kQO?u}m+3lk^;Ban@ zCXTy^#qra?u(J0MMTZ-z?4VJYJO0~`P26MkQlf|{h6T6%LYp(1l@S6H>+D_(iM2;%E*2ZpHLqY6_i*T*akqM%fqv4L`#oI5al(;wbL*N=8B_m)G?51x?AvK);r@GE{ybPGSXpT9Ko`maEgU6f*P6}EX= z0Y@AYWVA1^{!g|e(O zw6H!QLQI?+o#&FadMVRl|Ldc>6SmxXg-2Jf_>+MgbnfkjJ9;QSbFT7JkY*cA-=xDt z?8OfdJJ+0cOuIdrj0 zxFI-D!ieRG&igj!R8P3ho3I8ry}Q=A!-;+=9=MZTcH)%cnnlT)$s!~#yflXYsqeGF zVOrJne4B3>V-~cM`Uc+r1iOH`cxg@Xu3kAuo>}I-*B7#ot+#;!27JP1%!J2I*YB~Y zCiZ%(ozIL(eu1A!y@g{EbG;SuhW1)U39^}kAUZ;T-xS9Vr;VpCo<%<*@Kh$ob}R9S z`|ecr6gVT9;43ok$k+&mn-ID8;ytA-PB-X;o?+4o3ir?(0Lsj4~6W6IFB5A?kQ+xGx3+%;g3 z2(?`$ep?@Y@kE9wU0;9vqeVtub$!wRLhl-JM$D5fJXTqklI{SiL`Vaui$1UW1xm*I zKL+xOe(7KF-9Hsm-9`1t-LH@eai0)VAY<@2+mF#|ocX zO3lj3w>XBPk_SdiPGT-E_#kD*{1VvUAN`ZWW2j2~Mt$78&;)He!5~fah<5Uesi4yz z)GDZQ7JF)~-wKsS3h3kJM!B??Pc1w$#~)Q%yZV?KkN(_vwoa^3JvE<%iLKW?TwFGl zl*7__GO~p0Y`4&G7x-^Qz>0M$t6iVZLs22>s1CvvYI4#-dD94TK3P*IR%gk@TlR*5 zkB-bxj`$~%w!QpaEQaaM%nc=G_3lmbYo7_DJh|i9O~p?~Ifv0R{T`>&?xV+}Vu5F`YZrgA zHe+)ZwaW`Ti?T4eW-WFp`{H<|jSp6LcP)r5Nj2D@L%z zLLdjBoxc9Av(UXE17j;>L&jdYPG{ipzDjH^vhSE7T`;o3`OeDSgnGDYB@I5KOsQ1X zn4d4XG7gMm)Moaqhw21dcB(N{bcxzJjr3}=@e#6Ybw!RrUhWpApL~8!q~x~GdzUb? zm_BP$cwj4$zBiWb9yGeRT9oQ#)UfhVR-}FI(Dvn@6OFx6A#Tm}hsnL%!+E;vAJc0l zs-%DEv>>Kj8~PZAVG9hKV*N>#4H9UTE6vMe9}9jQ4h{Wo?qZcd1ml0gj|6bpLLwPw z>Km}c+ZkiZz?4a_M6Ct2nyj7%Vo>gt8CfB7N!uQOXvB^yas)x8ewPd4ui5`dcEDam z7KML~YAtn*(-Cm1C>p0YY+G?bRD(kK!OH&jrGN)VDe_}=K0?Me(o)%IIopX@LzN9S zue*Z4CZtNrZ&VjU9GnRl83UV+IN@~P*}lPL!C|#Q5X>R}=2U38SOUN@&n$i(iDwjj$kw z8xC#ND$%ce7JFTK6=(jnn_ z>G=|84PibsKFn2&nf2;mjAftBCSw1Je!3S`%rKWow0sFm&B3g*l+nr`MIULU2&s6i z4!=Z$37&t$l?bH(nU5y^9{tnBmih9LO2G{Qzf*L+3y(#o-&fyz5xG^(kAR$*1^5Q5 z)L~m){e61`Mw748czn7ctA2pwdoe2aUUNfoW3o4J+z5VV!V{I@gqM^tDb)OoavI04 z3mu8Qa+F!y9q&=8j1qj>!wO3a{Ww+s5mONo^wZDY41sHnYDWZT2|I=(mfJBM=4iqx zKKfv7gfAN>J0;X%=f4h1Yi)ZN5FX!c;yj_1csEfB2-md%`gxyqK@Yh9c)^tOszzhp)y+k_0^WA3NUKTV$3P?e z;H_gY4#bzfns~0tbR40v3@sPQ(rRJeh{mWx6wYsgDN$C zU1iy}W2Z+$jQY_vv6C`>NQ{Q|LjQ4x%bc=)G!D8ov}&reDK*|yXgl^oi^5TH{QE?3 zSrdBoMLEGvmP-)VgYS-K*wDGl~TawhE=4*6vNL-S}tY7%K z4p^(t4+$aaQZz_YR7<6-^3>n~~u9>mao$kpB~YA1*u>rcLUxxFpDS$;Fu-u$;whq$MHrsKq2bIZCQ z7n)fwzlz$j={j{6b?>}(@nv6{^t#{{(*#*QOrJ1Y=d76Puyn0R!xUmL+>p8X2#cO$3}!jq({B_ zvoG3*s@jE*?VANd7VVfe3bj%>T75gvz5)yPJ;&5%nq1;-A!?4_em_%YUb_=L+%DNS zo%2Tu`T3DejXLE1WNjpI-9kt*KKTL>&1ybTSKYjJDBTjr@q}`mPa2#+G$-rq+vOd$ z>nqwfMVcR(Softg6hF!1bFDN* z6+aA|7v{rjsMK!1XDldxlkiT(!4>B9w#e)b-)wfSz1vnU*WCcGJHN>3o5xDzo7}}%9kHI zrW5`V;VON3^tL&ZO2Zm3e<^U4s=Cc8(=q*AUWO3j5~a zx=5-In>me}t5HuAh`?P=y3%ApE;D^7p8(ri_<>fBa85`J^wDG?ypcM3#sO3KV%sUs z;C%TjQrO*SvQB()nkKgkEZrb3BZ(GZvG4#6Mhb5X{(XpJSL!5X*$$CBOIknPrjo| z>#nFH$08druPS)ZoOpZZ-B#V$3;PS%=G@b;Fv`~l1(9fplkAahx)&s2xLaC_^Kk>J zrUk`GTZ^YSSqbxCn{d-V8P4dF?J;ys-ye&qcjyj-_96o%ngdBeCQwBLQ-Dn3W$~B4 zzU4%Jb3aeA6%Vx3{p-z@bF0G{Ov0CUfRO$vaHVfQu?Ff1O|C#dLRs!Q&nC=8Qx~pj zG!G0ADeKc7TW$wx6izr z+6{q@Z+DN1LS z5aw7`maKXYdi3E0)D@|Jm1-m~si#bwBKNL@y7$k7-bH9?VWCY1$PNq@d(qr1C<#jc zx1s?(1$L=_4QF5x6 zZ-@o9LC9)FRWX6hZ&e)gr005Qn9#`q%1mhkviiduW7qD7eOOh6v+jl%N`Lo`3V+5} zOP0~&o3{D^D{au+zhZUd_@peP)kVaU^PXXqE~Oz?>AWKH*@&*-Zmb#vR>~X7%CGZy zI9=vX%F$#;AthTMIuPQl+?T|aHW83pXf+&X*zw3i*G4^a-FKEOi}_&g`T`}( z(66*brRdRP`jW|#yJGWh?J8^{zv&5~J3C=q1s0PR^fQWZ#m{}NtVHq zfd`11Z;6QtXvmxJ!xi>#wywzpASGgVfVpfLKoEDKF|+lK(1vv>5}#-S(9Rxn}_?u2aL%ucNtRrlGdHi>#kX-bnKY3iuS|{QngHqJUl-x(n@YAmIS6G;dK27Zh z>ryn={=KrBYKRWZKgd$)j{09`70{ZGnmhU2&tT}gA{)JqI$TUMYKi5y3~N_*(qzq# z$Bs4G%}q;^($J*A73NiruWnjE!d= z2-4i(pm9(XHMd__K)VRhY!HdGc3+<$FH`CJI4ziqd`@@sftSF0h5I)tGcL`T?kx|Y z%_2u@3dtO%s#1|g0pS5Osr`65DXZavus2~(dX%AN(#eub5A9woDKX*{HkOS~XfUh( zK4|pZroX#bn^yX6616sfcN!}X_b<=4fS(RS$*fjuj=neH_B9Ma%#vgsnXvu$*&Uex9946xl#r*7m;x2&MQna zJX+=KR5mkT;eo5XtY1^;rf207)Ki<>Ik*lt*$rqDeX*fX!gyncKQdLM?zRy6k8n%l z=9tokUNKu$*9>-gZOWU}7_sLz`d0n2T%)5ydXbUv1L^SvdsAV*ogcQ4M=&Hb>T?-* zjlRU;))I3r>*T=1!Nq6l)r@ccaDjlrs6lnWatoc#ehorcUkR!G%f~(atEFKrYSHMwASIz^cC%nINwK|X3INWTBj9L%mivlY=#D2+U;};zYyZ*Dz^3}A(lh7WMT7H4 z)&}Tu_lytPja}n=Pph@#aEpQ3O;6otS|lwV3gU#t@C@XmhadD<#lp_f#C^Ic^R>ca z(VI3rhYSjBu-}od%#(i`G(~mZx6jfPy$@@961{+n;14-Fc>XRIJ)DB8Wx5h-^5L&-kW9#t=eotu<@IAkr!dM zf(;s64bh7$_k7n*a%%@kNexm%F<&#Kpt<>~8U6cqJ)JvRrMV737AdS z>i#)&ha4=q6&il7rBeO%!(ecMo_@~{3|8puZYxeW*mqnQmGn@D700v5&S7r)JHHb9 ze7Y#vnjt5b&90G{8tor`E}(*x?pjQRpce+$JQn0Ln#D$=BXy(XM&^$~hVA=+aq)dO(LWjVe zST%K6i)ojRMT&W=X4IuoNBIi*yQB{nK;Q^KjVw~Hdxs;MhykGfq(nc{g&R~O)PdXc zSda@&yIw;itN_jaYh7Ou0F}$d3TCV?ha$a=>w4^lgAlnSubZyXgnn;`Jp;z>M8FX^ zT`)mPj!GoeU6dkPNH#LDss`7V_BGrZiKhw<@dklK_J_Q>VK?n&|0qSGk< zHZu*rMlXdDHbjQV#>CD=^1xq=uXk4Z1cT@vT-`r$t`oJR^FthHbR3pU_zmV```n392q_DV^|xMI3M6>^&y6pC4eI1)wIkTz?*t{Bdqi6WI*LXp1~c{mOAQ< zABPSS%@vw8j@Ws4pNm$e6fZD-?5EFyk5~y2C7zm#1OTkUj$!WXu*cgO1VGz@4(IS; zo|1I3OuYflt~BJBrO3`@ygt{uS z*OR@>$p4+INM+JYTw%Xz`466rhT1j*p2b(QHBca5uL{nNls$FpTkgj?$H1iy#1I~i z*-z9{b3!F8ns$!`1dj^%JhC36RU#Nq#=I0#Dc7v|Rw5;_K_wGnDxC&BskUxxIGk%| zKNa7QVs>tExPzX+>dfK0L^4@XMdX90Yrk>I%DQl!T1fCN=K5hdp%ixJX$w;jpEwTS z%lPKp2rhDDty!}XKj`CR2z_Ota=ZJ(T|Ppu!~XqmKUG>~cEbzHd@+JcK^YP##JMuw z7d9HwH>*0(KR+^b2&HL;tg}&#TvO=!pxMs|my=uBNVNKtsAcNecAifnwya}SJ|~Df&c8}Ta@L_RzxE@8!<1I2nx2UTNPoUW#Tn0g4Riu?E-j=q)a3x6yct^V^pSR+b_GJ5cE;^ij0Ze93G`}H#Yw^o z*84s`$TMm2;$0I5z!Hbs_cCQeIsjOj!>+v@+U{9IQGO$W*{2re3{;KyUYXXJtDK^_ujewnzDsOJ8VE9&WGd zGD%tFwoP=Wg@2n#p}H7v#v~ALwvXmuo-EkZ`*@bT3)3Q4i0phn4M9F!DScWv=<6py zSnrBn8OO7U&t*DI%vZimEOeGQ@!!%Bd&FZ)5_bRUNZ@f;e~-<9tiu%>c`@YwL5{*2f2O2kUTQ^N|7#u}#E#tpa7lS9_blERK@HL0=o% z7GtJ}ATp8={IS8vNe=2=3DWipl1djwyViuYcfo_o_$i4-R|1D+0Enh;L*&?xQ=Vl! z;oDG1t#+POcf#XgP+y#-~5%6<%!C-okUa3OI^x zP>{~UqXc?_L9`m4ltkh^z|1*kJg1&Duh_q?KuElZ^nN_`Osy)DkQ{QvvBn#G^b41bv#5E(lqAv^5^v;1_z%K(GDvfKcd^t{Ev3^b59;}Kh1L}JSuV5?pJFpqolQgQoN63@Id}^Z zUl_PDU4ZRNl`cw|c)>%L_6X0fVyPjMln@pgdU?0%dB{xX?d!;!!hzw5f9BF?E&E?x z$&~v#@<0baEH&6~pvnF<`ZbL~Q>bVHC7&VbazFf-1l58ZHh=5afPf;}ZA_U~f(8U- z3BOzW3<h(J&shsZQ&$pZARGOnhACZf-vxzdamNlh~iC5|dZXtoZs% z=C5d=w2^#R@-+;yqOf@R3pOPCXCSUYfZ#S`cFh>KNtqEzK*I}oeJD1%QIF?3e64GyCi5L4T< zIkWfx6wclGHrYS|kUnQHb|TM_E-N{f^3#DehZ;ML@Y_0vWqC*KRTEEbtHLpVMD zI1fm4^W`wiGn@uz#$$aUBC>(%stxCO9c3gB{6}<{Prnlgu8YCGZLxGg-5< z=a`T>NZl1o2ESxP)V1k7%+#avq+Y}18r*-3g{7k;Asy|qGPmzEz~~X^8Bc4hTNnNB_s&zJ`YSJmEZFaiY;Wuw+Pi8l&uR&~v;FgD#$?=vh!1FG$Supz0)lm~{Po-INxy`QcHMO_W_LlRc*?u(dVrwRs5V>KEZ+&g} zT-#S;L+S?eRwoO5ZN)7M?rqGqXCa=3BrE3k?zoVtZ)y4upL0 z*bJX@KJYzH@__+Mr_>q^fZ?S?d6vV}0VVbjeVvB(y8#ietIjT`Q`@5Ug^T^*GcbL= z2f1L$fZCx3BDLq!qy{7&>;vl4%3x8pq~l}>H<4Wkl#+fMHN>^^sx|a%Zd-CsmFF2e z+4SrT=Q;|8U8mNlE0CNCcQ)8V(moX=Gzu{g3r2Dan)b9qh*!uEi;<8Qk|iV5@WII^P#n&Lo9=KlI^@ zYb`C;_AJd6+>L@d9Lqzf4$r(2B3%2k0Ss|&GB?nd@T7zu!gTzBnsfTRH7v!rWa`~o zpQYQbpX&H{J0|`LA!_sN zB2rLw;I{K}sSd=ygpPR>K~734;^7dzS+U(Ru%?pteVDhh>lG*{pu<YzNKYt=zyR+8*HmrIlq{-mO$9q3)`<{Ty427On^qywp;a1cUfRpr;g8`U|H8n z=&N581BN=&q;PVrlB4NpFE1kB084S*F81v-i9s&R~k09wA%%o zMc)t}GlWy%{p}8QnfyD9C3z2?aR~M8J9hUBv`YH;Z>{Y$I|oh_7H2b2WHC5!zCtZ(0+OrVNN~)PMQ}1~?Rwq|{x0|Uby_YK1ebS9rMyjS7GK18B-vvST*SYD z-tHR8;K{nggnJ{7jhuSb&3G^8yV(+|Y=%1`I){=IaArv(zwDG>#YrwbC9k+dk}+Lx ziWGh5zGVd?JKQ)5Z+q}EH9-I}yX=E#>0-Lk02DF0!}n|k6P_FSI&tfmr8%Gw*=1jN7QFt4ipTE?a&(BeFn)Pv^jpPrp$mQC`VPwLjZ=eeC{>rj)S1H}Fh zFz25veAN7!4iacXdz!=?VxH)bH#0Q@R?!{6-+I8CXd(2n8h3h+XwvOd{MuO5xtNS@ zAoc>#KJ{dkt*^9uOtY-X<355}C;8be7ci0p(iKCA28@<#t&g5UX1`oxHccH%Yy0w6 zf6RmU1HX0pn-7|zYlqC-u&j-ZKMvd7Vt?QE?UXd=%+yh4QV{!{t+=VFd5&uW_bB+l zbA6LjDV2l9Ef1Egy1F6nPiKrRBF%RBB@C-v zM}9WaZc^n7hc%eaz7sWLJ?z6(Yz}{5e>(Qtq|I%#Qn@2Y z-DGjj*BQw`HDZPwEl3>|mZra(96eAV-J^sJ^k$lfm5bjE(2TpJFI<%?e%k0fi)v;P zF>Ul{w@pK57VS>y45T}(eEy8`8H}ZQ>5N&db()g z-rj~ha)kx+wM(|I&FB}bTG~u}vdydiEqQZ)sBwPF__a6awVWeyzC219u&r33=(!7l zI`7Ex*T+JltC?DZT-QOq%AV8M`{3aeHk}5YjW$7iS+N>ZkQWvUF>Q0Tx_nFqPJ}J*jrYC#m+uu(;Hn< zDALS0EwQVFuZ1(CB0&C?in!0#XWPfiL`I8ws9ATg3--a^l>p8{a(>hbUXoOmP}=w#&K@v^CSN<6yvx602F7jE&EgoY=w+p|6k0yMI|`;*Ubn{>;3?0f;kdSA^da<(`cyg%VXkT-pLuU?eKtFfTlj5oQ*veJi4h|_ zc=qbiNVX2*91F*RA-pnpZofC>q3vE-Ex7+vbHzw_JEVxYX5KSvdr_Cu0{X7O3}Z?@ zt#*P~0n?VxDxvUlV;)p;W*#_M_~`AUP(Lbo-x6svx3?niQauCx5ZD(|7a0PiQPAL{Ptz$Q?wP7G-9MV^AKbC5yIFLNJulunQGF&fz=FG>Uy^-e95O8t9f=0e$ zX5(-_5`ir8SI%o$ZJ8X%w=F_SVQ<7ID%JD7GBMTR4szS^>OEAGP3N0!5f_56IkVhl zcjLZrbk=&aUud;MEV*p4;qfZWguN2O#CULdh)eeO;jcI$->6V;)_Z%|lSi!z5{;y&_iBugqd-gG6+wW_}qLbD$c)Llzi|5w6&D9FmrUQVvLD88VvufG0+i9?vXtI1Y#` zLsHbkwAYE=!uwaL4q98$=%cVR+5>5U1c{eQ-P?tiK{5K^G~02~(Q2jF^;&lxX9=<| z4YDfWZw!nLN)l^lMav(zj}icAc*U)cq3cLT>q9+mjI55Qi0EnWO5%^T1g; z(Elfecb{cyQ`0dsLXta6q)6@#76ok=vsFJ>xYZ6}y}NRkGRw&czd=yR{XpNp=f@!FYK7Ku&zgpDr8lu# z*se92x}hvm(M486B%cP4@f|5Z3mmhQz^RLBtj@GM*+Yk8pSAlaX#H`lY}F4Rs(mSI zTOGHXR@nQuFMQcVPWe3f-PAE#&_4+CQEV;;K+@5|oY7IrRD$e_VuQ*|BAZUT!bJRP zRR%~T=i!cFg#$vjI}#zNKZrxW6@Asp)O~m(qwSH*2TToJLG>9irpHelpE=>r82C}s z#t^in5kn7*;Gw->{!svU*}3sTtpgyM2V^|ae0vBg!HiJK*N60a{vh;H5%N6o*@l9* z6z=lVyI@tSu!p=Ak+14dO)S5*S-QD)F5}h<$ouDY25qZbaDzag=DU>e)BCPO(L{DP z(8Wq@hFw2k4W}UWFeFJ-PpKuBxc76e207@geXp4(03-b#%}m*WGX#-wA+wI6R!Nv>rqiV*e-z==;yk*@RCv zA2W0KI0#Nz{hNEhh?h^FLP|lIzim3i?`se$@`qYdt&;d9cURRaYpBB0FASfBEvqLV z(eEFy3Kf2ew|0F!HSlNxn!`%$CWRU-jr~Ad4USYVMo!K0PrYiIbkGyYBRM%@fy{IH{Q2oRtMijBbS(gPmLqH_&U+zpK5^w zY^L0Gr*L{a!B~gdy6>NWx2)TTL^`bsXoVC2^rOvhggFxKB@WaVu|%DNm%MBHVUo!2 zp^#e#Jz$X6b zmwi61g#}{?+CID(yPGPB{e)x475N)mmduuVh{b_X|Bxzg#z%ZZM%EvhhC)iILxkwZb|1$|5~ro;XYT>}`B zVZUcy(m`fW2^+dsSuV_hCVUV$wY|GWFYbvH&SRU$k?|8%>*CPWvY7(jsJr^bOS-t9 zvo1xQ9-@UKul1m}(|FMOo+ckIkLcbq0$GpooSDgQ)dPvRP&FVc$(IZJqI`q0+Fi*U9XEXWJe{H0%x&HrY@2lgY%-;54L^_DfKtQ-U8%&lAwC5st->k`JHQk~q3{C6zlV#UBy3*Mtb$?Z_1>*iZBhjv zk`7;7w@RB7kr-hgXk<0=pa8ROHFG0m|h{X4ihFKi?of{pvgL@{dGMDnQ3~^^ST!aBFau z8X~)@72-_w^a{5RXi-r}x+?feq$*0_GhQ z-hAx6E!3^=o~y&jzxKR2!UjHn*+#UruVBthf`V8Ap~q**0BNZyC+B}5vGN9E?>$y< z^2IL)76pke7&f1@wEhcG+b+b#jE#}54C zu9V>NA-VHUC-WbU$M+?G4?npfEBg!Y2tInR228i*Wj@0H>zvL4GIx^bfPi1LKd{eX zu*WxiTbaqPbGiy-izozWX#PtxN(0k3^4)fqOULj6Bs~VivbN*G$qy6KsUg5H z>+D;!Q9-i+=~*ygT+MJ){ozRz?)$#mFHzFK5TWH+I6)jHn%Jw-QjO7e1dXs=MBh6& zbu6nAkbL8BjvaayjhFC5EK!ZLM%M{~5NOhB)BxqY{odi!4+nSN+SfXKW7NTkgovVx zNo=PF?qleQ)J35L>Fa)&zz2K6oA|IA_gFz`Y(8HBf}KCB#R>}>W`Dm&L`~A)#hky2 zJR^Lw&eA92$pBD5B{A5Bc_IzA6ZqAjS>*f^P&3k`7SSI*kA>rCu4+r5lP%Fn4WDiT z(#+O!@Whu(2b7QtS2W!W^6-5^J&tER>mmv%5kj+2kZ=`IX(2u$u@BK94Ik4tGPbm?la+}b7@_aV0 z1vJ&?Y7y2*=YCduH4E)9PiliQJlU9D{GKUbM+e_Dj=mH;9|q5;c05J?Qh}Ky3gTgX z4|N>A?Zh$_kUUE?>9@VmHYp0bpX8+z=K+cMl@*5vj?{@Fs*bx_W$e&<+hpLqU1`>3 z3SddFyYfVyZ&G{e@ndsKXbf`O3doPoKAOA`*G^TVEX7wY^8=!gRWF2(-Mw}ao@fCej^g#%h-9CyhDO3x%uYJp+C9GqSy`d+V8 zYLHQCgxbz=_l`SIwH_Sx`Xm9NYXPS(C=&r^I<$z;@ByE3j9;6f{GO8nAuNbQfsolz z$?dxPB=|mN7J6+0YPhY9DI2JyN|d18-g@>(Knh$?n>-_6h2;iy@_&$gYM;j-cOpqCw`!yA;rl^#K#}O>_uT5hrtamndLZ`szosY96o)p6uPf=6)Tm@LOMds> z8`O-z_aYHn$1u}}F)oBo`#Ia(5x>d9QE;8&c8P`KIfDz^h7d%l8Jkwd3RxnEsKR;v zq#U9(=Wxjg1VC6}CzWFsew8C=SR&|riy%(<*K5p|x4|p_rt-gS|z|iwwM6<{V-GWt%><^*_Z(wZnKpr5(U`b-cH? z*~{l=^e2{xd5rgs?8?5#tp%XMpFK?0wGWC~6a}#F)mv93-s{^o4AD5Qyhrt9SSe#) zP@pH=J^iDSfJ#^D9bXa5Yt}(W^YET&W!cPcotT49*&^nx0LBk)%7z7IgInx{aTac zA-?es9gbUF`x#jZ2RcoPW1cv9UG{+vZIy*jGvwA&B^+z1?m6s7w0n)UX4~GJ7cZ^i zW?mfQkUm&Xam#&Ez8q=ZDak8rjgGR;4IT_xT1KFn!=4tIVUJY;{2+Uq3$`Q1kcs=2 zQAO=~K&nct)Ohv4nG>F?Nf2X)RU>A7Iel~HRPBkc!zy3XK-)QWoOP!@&a#E5H6pWU{&nx8tU^8zu z9v4*j0^4yD+l0H~AvqF)N_Y%Hp5QF}L>=*xTn&CFqJUboSQG$ymH^3HO-O`x5J0d% zKy&Y6tf*6(1Yji6sCwv-&O_EsmSF-eO@MHkcPP*mRRTxykC{^Y$!IU}`JT>khU7RQ z9Y4S_6b`_lM3OHc30`O*J(xb;m@T`};-b*Qu3x>&J^y)zqt>JA<6d{ePOIm;0%Uvt zi3=(*&Z=?J2V2*Mq%@78m`R&nIVbXqa|_jR!=|g*9~}2Kl5m9vMUKzs9hTql)x8Jg zASC(1J1WKr`#nU2#c7)gq*~7SL@C4R_S007l^f-o$SR!*NsK{%juB_L1rL7;vqa>h zY#Y5=B|VY6S(MHZ7@3+%RB-=(x$@X60D8vv6^K*`Bhz$u`*xm+rD8BmJ_Fa~Sx1y11wC2*IPA~6lZ@bnT^hBb;*8&~wU2k7LpC!>^Noad+JEr!_U zR@CF)!~qldT;MZVy|KL(BK_8V>Hf!S@)=8B2iJ4~!b(CrrBFhn%0Hp^6}f(UX;z31 z|3N&`bxzPJ$XFKr%sP*3N$1tTNXUXffqVRva~0m`Hgq&~@)}%xHCwMl34;0a-~d&o z(%#O73Kq#oI?jXf!a>ST*8xWZ7trE$!OJ<5cYPPTG3* z6UgLsaGSL?io}^D>uqgBCB(7RIoVde{(Rl{E5;4c508Cpho20=n4xGQPBC2NRb48v zKSt=RbuCbv@q*JUah01U-Bt7EpC4ad%)4Cyzc1Z=WlngB(eHQ9MUY+fO^i&+)c!H1 zlhx_6PU)>z7v5&N?yTGJl116CD8F;wd8Y}ebS_0pdt`aFoT^>0BqtXO@+`>{b=?lz zPUhtn8F|e)1ox53i#M71jBexKNoaX{JjRSMn$nPkCAUeO&J2>qB{|23?7zBzDbF)q ze_4?BvE}W%>&ANEGUS<$uTKeAfAk(rR)h%FIBvJ4$~x~rfChF<0o&mol}R;XU)ey~=x1h2dzeg3_Dleq>S|3Z5-Wrh3n7|R0A-|<)b_}Xx~P6` zj(?G28%S%lTLzrHmdqrXuAShy`kacWEB*j5i1Eop0}&oA5Guw#+nFGe(7Qh0JDnD5 z_vmo@4|?$s zO(SMAFs$8g8zt0qDIU>kR3cvw?|bYGm^QStlAiV~J(Y3PAqmC-xLS`WvQsMWqo`l% z%mlJZL!F%_#A)>+?bv9+;9{ReO!=M2@TOXL{dLRC^+VY#Y&?|1y}x7oBY6%ow&r_A16i_#9escv}y zt}W49QLg)933<<5x&}>(oAgTX0|-V)-??UYRhgZH&*bB8?9)N}@zv(-^lm`catfk1 z2lAr|+pE)NKv{JafYg+MXr|e>@p`t4e1LRVx5l%aMxutlMSrh>o-)TN87NY@sP_l9 zsxtPa%_EX4=`hy2AfD7QC$dH;t!X>H39&aBBK-$A!KE#1&U$2#V@=)GF3bPuKeev~(=R zP7QQ-F1D-JAj>SG2J`AE5fpV&bIo$?P>RyhtC>_JTbm;yFY*jUO0j4?WX6bPdtV~0 z7^jRhsv*I90;enQvM;_|)!LGZ-s z4rUX)U$JUuC4diDSvp>k54Ua7m%P(Icp-a(+HH4^w1`{(h0Tm4&%yKgy)7fIkr130 zQ$N&6o}BFZxPQ0dS#+;8FH#)4n^(Kf3Ahm)XO4VIkgm;4mw60C%c7a)W9Svmq;cKK zIuEH5gZup$7mP&43Ltt!!hc`zZg)BO#2WIt_^N9IUL0G+O7Gv5ocW9iXeJgd z?uT@!^Lg(+EXo9`17WID&Rev;QekqRI}0gp86S@WK|G(5uKBfX zxU5cs|Ht6-^ijExfPv8Kn}w!|ZXo`2G)knEj>mwax|B92Z+kz5lm3psA_e@+w++z} zpbgvrS{R*iyC9jr%gGK%$pQes2@ptSeW#P>+H=B+FX>MGBXg-^UfMrnke{IH`o4JK zzaVj_+u;N{83u+-dA)q&4VGV7xQ7U`WeB*J)rkNWM0j}QpqyR*_imwUdYe_uq7!w7 zO^EH*@TN94ah;m|d&vFaqGa-&0>C?G_R^3!!PzKK$2MeDXb5hU@qp4qz0g^&04tN07+Q!6Pm5>JoQZHOn>EK98_yIGms}}}W#)S2dg4&nR z*w=399KCQ_Tl9HDcf@5GUquskytd$iwT@is(X87gk95(*r{Y-Q%KeL!^Rr-SOjo|! zU{=I^`QUl;9+4QbjG1{ZdH@nqYD7U4**6->FUO#>ZncV5&m^Fq-v_WOE1|nzCAr8?$~P?s{p$o^tkl|9c>w-0trSSo#5(&}g39vT%+Hkfgpo z%g~i9b5zo=Mkt>mBip!kZ1Dhy(zOwN)RTWq>xJJGmYm(Fa_z2rqWW<-TUSujb1T1k zuqr{!rRB|R!wG4j`QYMQ+NB#V0D2YcwKFo5GKR9`^~6g#&2BOa&+6CSF1S}{Gbk|I z2UI*UobfIy2PSlqmv0vI^37Y4SMCDjQV+l|x&buN8y27CUd87K>=?J2vU%&Niw<8hR<3!Ef)X(Iu7vuVd}Y`!3=4)9)ntwWZc$ z;`crxbm}&E)tE$vOIE5JMwQH|9~r9m&J|- z*KyLnJOz;Sf_^f6F-gL0d1F;N`-Qo&QYIgLX8ROlmK?EOM;B$OoV)P_fgXcnwBfkt zy|m4K9++_XfXJ}-CuqVkKrKB6HM`D9dqhR^_&7AN$JA&70ehU#e*r`&z zSpZg`d1yOyd)u;8UL$p&p?@yt>I|mboSXGgqFrRKal9-29y7;^|DenMh+BF~RrHz} zeuk0lW#XA$3kBZDtXmNs)nZCR&Q@{m-P}*yrRzJWmYa@w@RULhcZDO-mw@}NQlz6C zHQ=qjuAS{aSC*UtTO_qAvgqUiV$>+ljXoU+ls9{Mxs3JR`s=hT2RibT*2m9OhJuCp zvN;o>?d!1;>nMOc4jvowKiFBwQr7p4~RRxLn~<7W@^9cfo9s)6KYqvIK5TSsZz4XjNN#bJFF-2&U$*@ zp0lAzWoITt#F@4;t@qQm$KGYT)qk9Rt_&07$C9K} zgC~1lQ*3#yrsD)fc(PUKNRyClHxHX%Y_XDqsFWOC3P7HuD)@RHU8eG~>i#SyCJj$4 zwC!~*DhI1{^K*9%Mfduw(!YXWiZ^D4We>k3U-EPc&DJZ`o4FKsVVi6Qw|JuT+W^Q7>$pHJVsTP+Q2m|+mw2&4-+2o@Vorl`ld`^+C;bL z(Z%Ex(&7Sd5YlPx?6cX0_Q(M0grr24lckqGR)!1$Ndm7jiOm5MbKS=$)@Ecqu|ifo zjY3<)ku9&dt$V%>%zKsE4%6}lhoqgo-<9a=^}76ohwPD)%U~sY0Fdyet^iHgo=cmD z70b*w^gKehxv6g-yr_^i?;}KU2NM;S+noC@A&K7dro6QMQkUs5^+mCt=ppzPi2K_sFhsh-;iaOT@kO(e8?v8aZ~J zu>xF1P^^TG0EUvk{&SD4eZb{Jc5P9|IUjbCM0DRQgxp=(4oh@I-V+NS#6F zQL7lIP)%ubXeziO9l-^ebJ2)tbun<$7#Zw7h=Ae4FUsvE(i{w{*RBXUZm;52ZI2 zo4tu1Kx`(L@K-z|K4E@?i7Ul#1M#A_*-u{0Q!|-fbX?0E+WNp*RAF=-#u3$6q-Dp7 zW%S98a%YV%GdVH3Yo!M4od{bC6$#Ij!KmrnO9Br+vsi0sibad(_nmehULWcK zK_RU`CgraE?_1s=K(WYBb5JYKXf$=D8~&&RAU$1*3x@4JA=Ek+R8vycD2O=4{CsE` zUB>mHs4^YnB}`r}I7ni9nS>s#Mb=slv2eTUu`qJqRm)!V=qSte>$zz!9d5J$wo3w0 zpA}=5J}W|}A|P1$HNg#_4cYDixEdzitq<{j(0N8P9AGL0wkSqWFNHl+tqU9d(sY4OHh+3>Wj_%mD zonybCE-NeswaAhe;?}p`b+A0)7H;=3^Cg^Yn@n!ECp#TksP5FQh*+xgCnf>PF?2nV z(xV5V#rA_;r>&Aw2zGvB;7}8?q5T$xd#S2)0ou0vo+7neMsk~Ry$aR0tNtLm89E_* z-MC(9STzT%xN*!<51+u{d>0QVsOOjN%qF7C!GvU4(o}uf+Zu}lbkQ+#$oBbK$7v-e zglk0%(8y2(K@IJtfVaTiJ;s_B1iBJPvUdK5s-ac>6JMc@-NTu&wL6UMH_M0MLCWG26}9y&RMPL!U6_#D_1e{sUz zMW#a_*PJt~?xoV&DKEg0+88#vxB$pY=(*as4HLlxOsht-nWgDN(U|P5T|nI9WZt?I zeT|H0i(wfRBk93oF^r(&ZS-InAK&+K{1)z|9n!VXZa~!WVPC)OYu3B#PLsn_yEQ1M zSgjm&G%5zO6GIdD)?|ULW3#^#_p+4HLofKIx?1rxL*3!c7`%$a=5|s2n{)uV{Nk6mhf)-drQ-340T<*%<0irc=pye?ula(f-O0XF{8a*z(Kfe^N4^E{gP8;~W7 z@!SQouweMy&>ESgQhd)_We>Za(N?3p?)$;MVDQ}Eb?c6h8Mo>Ri1Z}1IYb(`RxU4ZQJNf^O;bcwTY+p zIf}Gu&4vhh)vmWR9Y_!DV!A3?Ms+~MU1L@qnR_>C{`s0Gx~`TeLGp|!x-L!%9k{%k zw1`0=P`^DaD9X&5e{NTE!SN&){-H~L;C>RN;r2w)HC2jnJL`#YbXuDr)e(6F1OD)0 zcS2{psZ}~aQvE^n@lbN9n8yz%UMtSro92w9ies%q?Sivp^PW4S@vJ(on9=V$!2YXb1H210rJK=*zcOjvYF4tb($OQbF?SI5hpW@8jBN}{UzQET&+%reMM zG}GMBRyJ4xMa+u-Gb(sZW-jF8 zjw+*5g6G{&>@e_i#ON3wAsi~&?^Tm_c|=K1l)tvpH07Dp@6uV4h$g2d$)DxSpVb2M zFEXy2ak&O>7oMi5DQxvXUS1z4+UN_pgh>Uea@wesGJKU(2b#KvCwO5YK1hv*DJOfK z-M9e`pLH3O{qs4Fd3+bMF8LHaQ5T5oy1QmnVlKIsdl0>|qhK*;bQ|R!U44qLY6=Fm zByl6Gt~~hQGpsd8=Lm4r;}8W1w#0&G$mn>eaZ~l~l8l270Laxz;l|p|0xnjPzaY-S z7+Yf5oxH5?k29;bE4km-=2lW~%&u3uq?#bb4R%(S@#{v5a357Z8@2u_)8>=q557E_ zt#QSUEoCn#kpHFGGJV6giWd52Zw=3w`8px-O3gBdlAj4yIeI~ zKCDh<-a^%<94!_%ByoXPPv+?v`x|C#Yb9Ld3}RzOL92W5HlnFfeO&@JU4f1h19TcS zTV7$37Ix*+uZB$znmpo6T$1l0jJ#Fm6T@k(IkdJY(~WFsog?a%kH{&YH{07t{E16i zq9YVUokqEuUU<(01#~%u&`WFPI;eQMZ1wJa{DM~wmDDP}Yn&k48WXA9)|ySU-TBR- z7uKOdkt!B@=o7C}Z-I@US^H{9TB$1aqJBk9m+aVRY3mMRH#^APCCH72N8>gi_%$gV z+`yB24ep>0=shB#Dr zIV_duQXM_U>23g`luW@Tk$EA;edBBN$NPKuYno#NW?hLg{TR5&(7TSxG(tx-*=H(a z`L{0EF~bVirTfcjvs_&Sk1hu-%n>uHC(5!*mY!*eBYTpcNJX`xuxS#7f^zsFQ4vR~`hf^Ry+NM+tky1L2n z@DK$Js=`MN?hPB6^-3AaO&i4VZv*=X>}EF*A~n`_c$o$^e@Xhfno;q>kqc{4wqX_t z?ju4EtvkKMpT3Mcfr(eHlVZ!lX)jgAK0~UUn6kT$3Yp(qrLgL*C}NjR_c+IE?Pn-F zURjyDsYR-FAB1AEtt^3atF=oz5_W+-)11oSrs5j?^ny)0DwzL&?Q=*B80K}}AD{_fEX zR?RC{#J{706lh^X3KBr`uzL(IrkFR=bVB<$9ghtgO~2}n$3FM%}+|uyMQ>h{SEQ*xGynZ zv3fN)kdALy^l&}!n#}x$2_O~}^0||KV>K>?rHjAY*2%cHdQnPMUTSXbw^Mv|(WwJ= zZRk(}Kqey)e!4QGJM=g`1EgSCP&!MQzYxKt)4^aNU0EG>*v+6@7GB?X+0DND}v8_d7e#MP|Y#CFtY zHCk4VH2}V&>Bbe_si*|xNpklQ6VWZzfJG@Q)NE|)35;==m6J}HNKp{HkmB_f`^xf= zP(v=ZK)m8y?5UEh@bc>b;&gIHOj+T^hK=b3c6UYJk5Ah0JfXuQGFzgD(|qQa$m>Zx5K>h0}-LofWqFk&%(;V-dLm7+I4P8mnUBq2XaX^2o%~r%#=hp787}nI+iT*a+Ff zue0Qrl(-Cbo!RCYf_)e!l$V!pPiqMb41BydX5>9na&LEe*-rGz6-&K-RW-G!!NL04 z55!5&QjB5jc*ON9vn+5Q0zw!&Y>ebe=-gJPr3+Q;v4k;rnXMwqIF^Gseh6N+gyo^9 zTY8!-tp3LK1tvklE`AJ&+jHOJNqHuMajz5|{!C#T!E{hzShF)z8y{usZSor-jEH24 zs!Y%$y>TAO!PlGY#*G^hTg>wgMmYa|X-bwxH%)nod8Wh6wkps8!7gfGW5~^Ih^Uy| z@hZlZx6}~vON|vi?2o1Gy!C{c@_Rw8h=>TSUL_-+?rt~9Z0~+~%qDdE;cRt>-FR;Z zmVAhQl=Fm?D1OM!;%GS^R0xXipYf*Fefe_tIjuyu)HQ&>&Ih47!62-hF?V%>J5MV@y{O$Nl9RZIsWcH ztN7=W!@s~~31#52H#C2=(%+sE_}ai8`PgtJGyPN2Ac33eaEhChy;^_&?nhBrVtrUx zh66JE{<&-z_ZA8q$!V z00oBX(0x0>hSiwk@NLHq-@5a + +*/ +package main + +import "dbm-services/redis/db-tools/dbmon/cmd" + +func main() { + cmd.Execute() +} diff --git a/dbm-services/redis/db-tools/dbmon/models/mymongo/mymongo.go b/dbm-services/redis/db-tools/dbmon/models/mymongo/mymongo.go new file mode 100644 index 0000000000..a39c05a151 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/mymongo/mymongo.go @@ -0,0 +1,46 @@ +// Package mymongo TODO +package mymongo + +import ( + "context" + "fmt" + "time" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +// MongoHost TODO +type MongoHost struct { + Host string + Port int + User string + Pass string + AuthDb string +} + +// LoginCheck TODO +func (m *MongoHost) LoginCheck(timeout int) (bool, error) { + return true, nil +} + +// ConnMongo TODO +func ConnMongo(host, port, user, pass, authdb string) (*mongo.Client, error) { + mongoURI := fmt.Sprintf("mongodb://%s:%s@%s:%s/%s", user, pass, host, port, authdb) + // log.Printf("conn to %s", mongoURI) + // opts := options.Client().ApplyURI(mongoURI).SetWriteConcern(writeconcern.New(writeconcern.WMajority())) + opts := options.Client().ApplyURI(mongoURI) + client, err := mongo.NewClient(opts) + if err != nil { + return nil, err + } + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + err = client.Connect(ctx) + if err != nil { + return nil, err + } + // defer client.Disconnect(ctx) + return client, err + +} diff --git a/dbm-services/redis/db-tools/dbmon/models/myredis/client.go b/dbm-services/redis/db-tools/dbmon/models/myredis/client.go new file mode 100644 index 0000000000..8d5a867501 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/myredis/client.go @@ -0,0 +1,1577 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/go-redis/redis/v8" +) + +// RedisClient redis连接信息 +type RedisClient struct { + Addr string `json:"addr"` + Password string `json:"password"` + DB int `json:"db"` + MaxRetryTime int `json:"maxRetryTimes"` + DbType string `json:"dbType"` // db类型 + InstanceClient *redis.Client `json:"-"` + ClusterClient *redis.ClusterClient `json:"-"` + addrMapToNodes map[string]*ClusterNodeData `json:"-"` + nodeIDMapToNodes map[string]*ClusterNodeData `json:"-"` + nodesMu *sync.Mutex // 写入/读取 AddrMapToNodes NodeIDMapToNodes 时加锁 +} + +// NewRedisClient 建redis客户端 +func NewRedisClient(addr, passwd string, db int, dbType string) (conn *RedisClient, err error) { + conn = &RedisClient{ + Addr: addr, + Password: passwd, + DB: db, + MaxRetryTime: 60, // 默认重试60次 + DbType: dbType, + nodesMu: &sync.Mutex{}, + } + err = conn.newConn() + if err != nil { + return nil, err + } + return +} + +// NewRedisClientWithTimeout 建redis客户端,可指定超时时间 +func NewRedisClientWithTimeout(addr, passwd string, db int, dbType string, timeout time.Duration) ( + conn *RedisClient, err error) { + conn = &RedisClient{ + Addr: addr, + Password: passwd, + DB: db, + MaxRetryTime: int(timeout.Seconds()), + DbType: dbType, + nodesMu: &sync.Mutex{}, + } + err = conn.newConn() + if err != nil { + return nil, err + } + return +} + +func (db *RedisClient) newConn() (err error) { + // 执行命令失败重连,确保重连后,databases正确 + var redisConnHook = func(ctx context.Context, cn *redis.Conn) error { + pipe01 := cn.Pipeline() + _, err := pipe01.Select(context.TODO(), db.DB).Result() + if err != nil { + err = fmt.Errorf("newConnct pipeline change db fail,err:%v", err) + mylog.Logger.Error(err.Error()) + return err + } + _, err = pipe01.Exec(context.TODO()) + if err != nil { + err = fmt.Errorf("newConnct pipeline.exec db fail,err:%v", err) + mylog.Logger.Error(err.Error()) + return err + } + return nil + } + redisOpt := &redis.Options{ + Addr: db.Addr, + DB: db.DB, + DialTimeout: 1 * time.Minute, + ReadTimeout: 1 * time.Minute, + MaxConnAge: 24 * time.Hour, + MaxRetries: db.MaxRetryTime, // 失败自动重试,重试次数 + MinRetryBackoff: 1 * time.Second, // 重试间隔 + MaxRetryBackoff: 1 * time.Second, + PoolSize: 10, + OnConnect: redisConnHook, + } + clusterOpt := &redis.ClusterOptions{ + Addrs: []string{db.Addr}, + DialTimeout: 1 * time.Minute, + ReadTimeout: 1 * time.Minute, + MaxConnAge: 24 * time.Hour, + MaxRetries: db.MaxRetryTime, // 失败自动重试,重试次数 + MinRetryBackoff: 1 * time.Second, // 重试间隔 + MaxRetryBackoff: 1 * time.Second, + PoolSize: 10, + OnConnect: redisConnHook, + } + if db.Password != "" { + redisOpt.Password = db.Password + clusterOpt.Password = db.Password + } + if db.DbType == consts.TendisTypeRedisCluster { + db.ClusterClient = redis.NewClusterClient(clusterOpt) + _, err = db.ClusterClient.Ping(context.TODO()).Result() + } else { + db.InstanceClient = redis.NewClient(redisOpt) + _, err = db.InstanceClient.Ping(context.TODO()).Result() + } + if err != nil { + errStr := fmt.Sprintf("redis new conn fail,sleep 10s then retry.err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(errStr) + return fmt.Errorf("redis new conn fail,err:%v addr:%s", err, db.Addr) + } + return +} + +// RedisClusterConfigSetOnlyMasters run 'config set ' on all redis cluster running masters +func (db *RedisClient) RedisClusterConfigSetOnlyMasters(confName string, val string) (rets []string, err error) { + nodes, err := db.GetClusterNodes() + if err != nil { + return + } + confSetFunc := func(node001 *ClusterNodeData, confName, val string) (ret string, err error) { + cli01, err := NewRedisClient(node001.Addr, db.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + return + } + defer cli01.Close() + return cli01.ConfigSet(confName, val) + } + for _, nodeItem := range nodes { + node01 := nodeItem + if IsRunningMaster(node01) { + ret, err := confSetFunc(node01, confName, val) + if err != nil { + return rets, err + } + rets = append(rets, ret) + } + } + return +} + +// DoCommand Do command(auto switch db) +func (db *RedisClient) DoCommand(cmdArgv []string, dbnum int) (interface{}, error) { + err := db.SelectDB(dbnum) + if err != nil { + return nil, err + } + var ret interface{} + dstCmds := []interface{}{} + for _, cmd01 := range cmdArgv { + dstCmds = append(dstCmds, cmd01) + } + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.Do(context.TODO(), dstCmds...).Result() + } else { + ret, err = db.InstanceClient.Do(context.TODO(), dstCmds...).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error(fmt.Sprintf("Redis DoCommand fail,err:%v,command:%+v,addr:%s", err, cmdArgv, db.Addr)) + return nil, err + } else if err != nil && err == redis.Nil { + return nil, err + } + return ret, nil +} + +// SelectDB db +func (db *RedisClient) SelectDB(dbNum int) (err error) { + if db.DB == dbNum { + return nil + } + if db.DbType != consts.TendisTypeRedisInstance { + err = fmt.Errorf("redis:%s dbtype:%s cannot change db", db.Addr, db.DbType) + mylog.Logger.Error(err.Error()) + return + } + if db.InstanceClient == nil { + err = fmt.Errorf("redis:%s not connect", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + pipe01 := db.InstanceClient.Pipeline() + _, err = pipe01.Select(context.TODO(), dbNum).Result() + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis:%s selectdb fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return + } + _, err = pipe01.Exec(context.TODO()) + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis:%s selectdb fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return + } + db.DB = dbNum + return nil +} + +// SelectDB1WhenClusterDisabled 当cluster-enabled=no时,执行select 1,否则依然连db 0 +func (db *RedisClient) SelectDB1WhenClusterDisabled() (err error) { + var clusterEnabled bool + clusterEnabled, err = db.IsClusterEnabled() + if err != nil { + return + } + if !clusterEnabled { + return db.SelectDB(1) + } + return nil +} + +// ReadOnlyOnClusterSlave 如果是cluster slave,则执行readonly命令 +func (db *RedisClient) ReadOnlyOnClusterSlave() (err error) { + // 执行 bgsave 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("'readonly' command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return err + } + var clusterEnabled bool + var role string + clusterEnabled, err = db.IsClusterEnabled() + if err != nil { + return + } + role, err = db.GetRole() + if err != nil { + return + } + if clusterEnabled && role == consts.RedisSlaveRole { + err = db.InstanceClient.ReadOnly(context.TODO()).Err() + if err != nil { + err = fmt.Errorf("'readonly' fail,err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + } + return nil +} + +// DelForce 删除key +func (db *RedisClient) DelForce(keyname string) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.Del(context.TODO(), keyname).Result() + } else { + ret, err = db.InstanceClient.Del(context.TODO(), keyname).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error(fmt.Sprintf("Redis 'del %s' command fail,err:%v,addr:%s", keyname, err, db.Addr)) + return 0, err + } + return +} + +// KeyType key类型 +func (db *RedisClient) KeyType(keyname string) (keyType string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + keyType, err = db.ClusterClient.Type(context.TODO(), keyname).Result() + } else { + keyType, err = db.InstanceClient.Type(context.TODO(), keyname).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error(fmt.Sprintf("Redis 'type %s' command fail,err:%v,addr:%s", keyname, err, db.Addr)) + return + } + return +} + +// DbSize 'dbsize' +func (db *RedisClient) DbSize() (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.DBSize(context.TODO()).Result() + } else { + ret, err = db.InstanceClient.DBSize(context.TODO()).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error(fmt.Sprintf("Redis 'dbsize' command fail,err:%v,addr:%s", err, db.Addr)) + return + } + return ret, nil +} + +// Info 执行info [section]命令并将返回结果保存在map中 +func (db *RedisClient) Info(section string) (infoRet map[string]string, err error) { + infoRet = make(map[string]string) + var str01 string + ctx := context.TODO() + if section == "" && db.DbType == consts.TendisTypeRedisCluster { + str01, err = db.ClusterClient.Info(ctx).Result() + } else if section != "" && db.DbType == consts.TendisTypeRedisCluster { + str01, err = db.ClusterClient.Info(ctx, section).Result() + } else if section == "" && db.DbType != consts.TendisTypeRedisCluster { + str01, err = db.InstanceClient.Info(ctx).Result() + } else if section != "" && db.DbType != consts.TendisTypeRedisCluster { + str01, err = db.InstanceClient.Info(ctx, section).Result() + } + if err != nil { + err = fmt.Errorf("redis:%s 'info %s' fail,err:%v", db.Addr, section, err) + mylog.Logger.Error(err.Error()) + return + } + infoList := strings.Split(str01, "\n") + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + infoRet[list01[0]] = list01[1] + } + return +} + +// GetTendisType 获取redis类型,返回RedisInstance or TendisplusInstance or TendisSSDInsance +func (db *RedisClient) GetTendisType() (dbType string, err error) { + var infoRet map[string]string + infoRet, err = db.Info("server") + if err != nil { + return + } + version := infoRet["redis_version"] + if strings.Contains(version, "-rocksdb-") { + dbType = consts.TendisTypeTendisplusInsance + } else if strings.Contains(version, "-TRedis-") { + dbType = consts.TendisTypeTendisSSDInsance + } else { + dbType = consts.TendisTypeRedisInstance + } + return +} + +// GetDir config get dir 获取数据路径 +func (db *RedisClient) GetDir() (dir string, err error) { + var ok bool + confRet, err := db.ConfigGet("dir") + if err != nil { + return + } + dir, ok = confRet["dir"] + if !ok { + err = fmt.Errorf("config get dir result not include dir?,result:%+v,addr:%s", confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + dir = strings.TrimPrefix(dir, `"`) + dir = strings.TrimSuffix(dir, `"`) + return +} + +// GetDumpDir config get dumpdir 获取dumpdir(保存binlog的路径) +func (db *RedisClient) GetDumpDir() (dir string, err error) { + var ok bool + confRet, err := db.ConfigGet("dumpdir") + if err != nil { + return + } + dir, ok = confRet["dumpdir"] + if !ok { + err = fmt.Errorf("config get dumpdir result not include dumpdir?,result:%+v,addr:%s", confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + dir = strings.TrimPrefix(dir, `"`) + dir = strings.TrimSuffix(dir, `"`) + return +} + +// GetKvstoreCount config get kvstorecount 获取kvstore 数量 +func (db *RedisClient) GetKvstoreCount() (kvstorecount int, err error) { + var ok bool + var kvstorecountStr string + confRet, err := db.ConfigGet("kvstorecount") + if err != nil { + return 0, err + } + kvstorecountStr, ok = confRet["kvstorecount"] + if !ok { + err = fmt.Errorf("config get kvstorecount result not include dir?,result:%+v,addr:%s", confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return 0, err + } + + kvstorecountStr = strings.TrimPrefix(kvstorecountStr, `"`) + kvstorecountStr = strings.TrimSuffix(kvstorecountStr, `"`) + kvstorecount, err = strconv.Atoi(kvstorecountStr) + if err != nil { + err = fmt.Errorf("config get kvstorecount result '%s' to int fail,err:%v,addr:%s", kvstorecountStr, err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// GetRole info replication中获取角色 +func (db *RedisClient) GetRole() (role string, err error) { + var infoRet map[string]string + infoRet, err = db.Info("replication") + if err != nil { + return + } + role = infoRet["role"] + return +} + +// GetMasterData info replication中master信息 +func (db *RedisClient) GetMasterData() (masterHost, masterPort, linkStatus, selfRole string, + masterLastIOSec int64, err error) { + var infoRet map[string]string + infoRet, err = db.Info("replication") + if err != nil { + return + } + selfRole = infoRet["role"] + if selfRole != consts.RedisSlaveRole { + return + } + masterHost = infoRet["master_host"] + masterPort = infoRet["master_port"] + linkStatus = infoRet["master_link_status"] + masterLastIOSec, err = strconv.ParseInt(infoRet["master_last_io_seconds_ago"], 10, 64) + if err != nil { + err = fmt.Errorf("redis:%s 'info replication' master_last_io_seconds_ago(%s) to int64 fail,err:%v", + db.Addr, infoRet["master_last_io_seconds_ago"], err) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// Bgsave 执行bgsave命令 +func (db *RedisClient) Bgsave() (ret string, err error) { + // 执行 bgsave 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("bgsave command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + str01, err := db.InstanceClient.BgSave(context.TODO()).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'bgsave' fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return str01, err + } + return str01, nil +} + +// IsBgsaveInProgress .. +func (db *RedisClient) IsBgsaveInProgress() (ret bool, err error) { + persisInfo, err := db.Info("Persistence") + if err != nil { + return false, err + } + inProgress := persisInfo["rdb_bgsave_in_progress"] + if inProgress == "1" { + return true, nil + } + return false, nil +} + +// BgRewriteAOF 执行bgrewriteaof命令 +func (db *RedisClient) BgRewriteAOF() (ret string, err error) { + // 执行 bgrewriteaof 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("bgrewriteaof command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + str01, err := db.InstanceClient.BgRewriteAOF(context.TODO()).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'bgrewriteaof' fail,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return str01, err + } + return str01, nil +} + +// IsAofRewriteInProgress .. +func (db *RedisClient) IsAofRewriteInProgress() (ret bool, err error) { + persisInfo, err := db.Info("Persistence") + if err != nil { + return false, err + } + inProgress := persisInfo["aof_rewrite_in_progress"] + if inProgress == "1" { + return true, nil + } + return false, nil +} + +// BgRewriteAOFAndWaitForDone 执行bgrewriteaof命令并等待结束 +func (db *RedisClient) BgRewriteAOFAndWaitForDone() (err error) { + _, err = db.BgRewriteAOF() + if err != nil { + return err + } + count := 0 // 每分钟输出一次日志 + var msg string + var inProgress bool + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsAofRewriteInProgress() + if err != nil { + return err + } + if inProgress == false { + msg = fmt.Sprintf("redis:%s bgrewriteaof success", db.Addr) + mylog.Logger.Info(msg) + return nil + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("redis:%s bgrewriteaof is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// BgSaveAndWaitForFinish 执行bgsave命令并等待结束 +func (db *RedisClient) BgSaveAndWaitForFinish() (err error) { + var inProgress bool + if inProgress, err = db.IsBgsaveInProgress(); err != nil { + return err + } + + if !inProgress { + if _, err = db.Bgsave(); err != nil { + return err + } + } + count := 0 // 每分钟输出一次日志 + var msg string + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsBgsaveInProgress() + if err != nil { + return err + } + if !inProgress { + msg = fmt.Sprintf("redis:%s bgsave success", db.Addr) + mylog.Logger.Info(msg) + return nil + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("redis:%s bgsave is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// TendisplusBackup backup +func (db *RedisClient) TendisplusBackup(targetDir string) (ret string, err error) { + // 执行 backup 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("backup command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + cmd := []interface{}{"backup", targetDir} + res, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + ret = res.(string) + return ret, nil +} + +// IsTendisplusBackupInProgress .. +func (db *RedisClient) IsTendisplusBackupInProgress() (ret bool, err error) { + bakInfo, err := db.Info("backup") + if err != nil { + return false, err + } + inProgress := bakInfo["current-backup-running"] + if inProgress == "yes" { + return true, nil + } + return false, nil +} + +// TendisplusBackupAndWaitForDone 执行backup命令并等待结束 +func (db *RedisClient) TendisplusBackupAndWaitForDone(targetDir string) (err error) { + _, err = db.TendisplusBackup(targetDir) + if err != nil { + return err + } + count := 0 // 每分钟输出一次日志 + var msg string + var inProgress bool + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsTendisplusBackupInProgress() + if err != nil { + return err + } + if inProgress == false { + msg = fmt.Sprintf("tendisplus:%s backup success", db.Addr) + mylog.Logger.Info(msg) + return nil + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("tendisplus:%s backup is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// IsTendisSSDBackupInProgress tendisSSD是否在备份中 +func (db *RedisClient) IsTendisSSDBackupInProgress() (ret bool, err error) { + bakInfo, err := db.Info("Backups") + if err != nil { + return false, err + } + inProgress := bakInfo["current-backup-running"] + if inProgress == "yes" { + return true, nil + } + return false, nil +} + +// WaitForBackupFinish 无论是redis or tendisplus等待其backup结束 +func (db *RedisClient) WaitForBackupFinish() (err error) { + count := 0 // 每分钟输出一次日志 + var msg string + var aofRewriteRunning bool + var bgsaveRunning bool + var plusBakRunning bool + var ssdBakRunning bool + var tendisType string + tendisType, err = db.GetTendisType() + if err != nil { + return + } + for { + switch tendisType { + case consts.TendisTypeRedisInstance: + aofRewriteRunning, _ = db.IsAofRewriteInProgress() + bgsaveRunning, err = db.IsAofRewriteInProgress() + msg = fmt.Sprintf("redis:%s bgrewriteaof or bgsave is still running ...", db.Addr) + case consts.TendisTypeTendisplusInsance: + plusBakRunning, err = db.IsTendisplusBackupInProgress() + msg = fmt.Sprintf("tendisplus:%s backup is still running ...", db.Addr) + case consts.TendisTypeTendisSSDInsance: + ssdBakRunning, err = db.IsTendisSSDBackupInProgress() + msg = fmt.Sprintf("tendisSSD:%s backup is still running ...", db.Addr) + } + if err != nil { + return + } + if aofRewriteRunning || bgsaveRunning || plusBakRunning || ssdBakRunning { + count++ + if (count % 12) == 0 { + mylog.Logger.Info(msg) + } + time.Sleep(5 * time.Second) + continue + } + msg = fmt.Sprintf("redis:%s rdb_bgsave_in_progress=0,aof_rewrite_in_progress=0,current-backup-running=no", db.Addr) + mylog.Logger.Info(msg) + break + } + return nil +} + +// TendisSSDBackup pipeline执行 binlogsize + bakcup $dir命令,并返回结果 +func (db *RedisClient) TendisSSDBackup(targetDir string) ( + binlogsizeRet TendisSSDBinlogSize, backupCmdRet string, err error, +) { + // 执行 backup 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("backup command redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + pipe01 := db.InstanceClient.Pipeline() + cmd := []interface{}{"binlogsize"} + binlogRetInter := pipe01.Do(context.TODO(), cmd...) + cmd = []interface{}{"backup", targetDir} + backupRetInter := pipe01.Do(context.TODO(), cmd...) + + _, err = pipe01.Exec(context.TODO()) + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis(%s) pipeline.Exec fail,err:%v,cmds:['binlogsize','backup %s']", db.Addr, err, targetDir) + mylog.Logger.Error(err.Error()) + return + } + binlogsizeRet, err = db.parseBinlogSizeCmdRet(binlogRetInter.Val()) + if err != nil { + return + } + + backupCmdRet = backupRetInter.Val().(string) + return +} + +// TendisSSDBackupAndWaitForDone 执行backup命令并等待结束 +func (db *RedisClient) TendisSSDBackupAndWaitForDone(targetDir string) ( + binlogsizeRet TendisSSDBinlogSize, backupCmdRet string, err error, +) { + binlogsizeRet, backupCmdRet, err = db.TendisSSDBackup(targetDir) + if err != nil { + return + } + count := 0 // 每分钟输出一次日志 + var msg string + var inProgress bool + for { + time.Sleep(5 * time.Second) + inProgress, err = db.IsTendisSSDBackupInProgress() + if err != nil { + return + } + if inProgress == false { + msg = fmt.Sprintf("tendisSSD:%s backup success", db.Addr) + mylog.Logger.Info(msg) + return + } + count++ + if (count % 12) == 0 { + msg = fmt.Sprintf("tendisSSD:%s backup is still running ...", db.Addr) + mylog.Logger.Info(msg) + } + } +} + +// Scan 命令 +func (db *RedisClient) Scan(match string, cursor uint64, count int64) (keys []string, retcursor uint64, err error) { + // 执行scan命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("Scan redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + keys, retcursor, err = db.InstanceClient.Scan(context.TODO(), cursor, match, count).Result() + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis scan fail,err:%v,match:%s,cursor:%d,count:%d,addr:%s", err, match, cursor, count, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return keys, retcursor, nil +} + +// Sscan 'sscan' +func (db *RedisClient) Sscan(keyname string, cursor uint64, match string, count int64) (fields []string, + retCursor uint64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + fields, retCursor, err = db.ClusterClient.SScan(context.TODO(), keyname, cursor, match, count).Result() + } else { + fields, retCursor, err = db.InstanceClient.SScan(context.TODO(), keyname, cursor, match, count).Result() + } + if err != nil && err != redis.Nil { + mylog.Logger.Error(fmt.Sprintf("Redis 'sscan %s %d match %s count %d' command fail,err:%v,addr:%s", + keyname, cursor, match, count, err, db.Addr)) + return fields, 0, err + } + return fields, retCursor, nil +} + +// GetClusterNodes 获取cluster nodes命令结果并解析 +func (db *RedisClient) GetClusterNodes() (clusterNodes []*ClusterNodeData, err error) { + db.nodesMu.Lock() + defer db.nodesMu.Unlock() + var nodesStr01 string + if db.DbType == consts.TendisTypeRedisCluster { + nodesStr01, err = db.ClusterClient.ClusterNodes(context.TODO()).Result() + } else { + nodesStr01, err = db.InstanceClient.ClusterNodes(context.TODO()).Result() + } + if err != nil { + err = fmt.Errorf("cluster nodes fail,err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + clusterNodes, err = DecodeClusterNodes(nodesStr01) + if err != nil { + return + } + db.addrMapToNodes = make(map[string]*ClusterNodeData) + db.nodeIDMapToNodes = make(map[string]*ClusterNodeData) + for _, tmpItem := range clusterNodes { + infoItem := tmpItem + db.addrMapToNodes[infoItem.Addr] = infoItem + db.nodeIDMapToNodes[infoItem.NodeID] = infoItem + } + return +} + +// GetAddrMapToNodes 返回addr=>clusterNode 映射 +func (db *RedisClient) GetAddrMapToNodes() (ret map[string]*ClusterNodeData, err error) { + _, err = db.GetClusterNodes() + if err != nil { + return + } + ret = db.addrMapToNodes + return +} + +// GetNodeIDMapToNodes 返回nodeId=>clusterNode 映射 +func (db *RedisClient) GetNodeIDMapToNodes() (ret map[string]*ClusterNodeData, err error) { + _, err = db.GetClusterNodes() + if err != nil { + return + } + ret = db.nodeIDMapToNodes + return +} + +// ConfigSet tendis执行confxx set +func (db *RedisClient) ConfigSet(confName string, val string) (string, error) { + var err error + var ok bool + // 执行 config set 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ConfigSet redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + // 先执行config set,如果报错则执行 confxx set + data, err := db.InstanceClient.ConfigSet(context.TODO(), confName, val).Result() + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "set", confName, val} + confRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + data, ok = confRet.(string) + if ok == false { + err = fmt.Errorf(`confxx set result not interface{},cmd:%v,cmdRet:%v,nodeAddr:%s`, + cmd, confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + } else if err != nil { + err = fmt.Errorf("redis config set %s %s fail,err:%v,addr:%s", confName, val, err, db.Addr) + mylog.Logger.Error(err.Error()) + return data, err + } + return data, nil +} + +// ConfigGet tendis执行config get or confxx get +func (db *RedisClient) ConfigGet(confName string) (ret map[string]string, err error) { + var confInfos []interface{} + var ok bool + ret = map[string]string{} + + // 先执行config get,如果报错则执行 confxx get + if db.DbType == consts.TendisTypeRedisCluster { + confInfos, err = db.ClusterClient.ConfigGet(context.TODO(), confName).Result() + } else { + confInfos, err = db.InstanceClient.ConfigGet(context.TODO(), confName).Result() + } + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "get", confName} + var confRet interface{} + if db.DbType == consts.TendisTypeRedisCluster { + confRet, err = db.ClusterClient.Do(context.TODO(), cmd...).Result() + } else { + confRet, err = db.InstanceClient.Do(context.TODO(), cmd...).Result() + } + if err != nil { + err = fmt.Errorf("cmd:%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + confInfos, ok = confRet.([]interface{}) + if ok == false { + err = fmt.Errorf("cmd:%v result not []interface{},cmdRet:%v,nodeAddr:%s", cmd, confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + } else if err != nil { + err = fmt.Errorf(" cmd:config get %q failed,err:%v", confName, err) + mylog.Logger.Error(err.Error()) + return ret, err + } + + var k01, v01 string + for idx, confItem := range confInfos { + conf01 := confItem.(string) + if idx%2 == 0 { + k01 = conf01 + continue + } + v01 = conf01 + ret[k01] = v01 + } + return ret, nil +} + +// ConfigRewrite tendis执行confxx rewrite +func (db *RedisClient) ConfigRewrite() (string, error) { + var err error + var data string + var ok bool + // 执行 config rewrite 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ConfigRewrite redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + data, err = db.InstanceClient.ConfigRewrite(context.TODO()).Result() + if err != nil && strings.Contains(err.Error(), "ERR unknown command") { + cmd := []interface{}{"confxx", "rewrite"} + confRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("%+v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + data, ok = confRet.(string) + if ok == false { + err = fmt.Errorf( + `confxx rewrite result not string,cmd:%v,cmdRet:%v,addr:%s`, + cmd, confRet, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + } else if err != nil { + err = fmt.Errorf("redis config rewrite fail,err:%v,addr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return "", err + } + return data, nil +} + +// SlaveOf 'slaveof' command +func (db *RedisClient) SlaveOf(masterIP, masterPort string) (ret string, err error) { + // 执行slaveof 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("SlaveOf redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + ret, err = db.InstanceClient.SlaveOf(context.TODO(), masterIP, masterPort).Result() + if err != nil { + err = fmt.Errorf("'slaveof %s %s' failed,err:%v,addr:%s", masterIP, masterPort, err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// IsClusterEnabled 'cluster-enabled' 是否启动 +func (db *RedisClient) IsClusterEnabled() (clusterEnabled bool, err error) { + confData, err := db.ConfigGet("cluster-enabled") + if err != nil { + return + } + val, ok := confData["cluster-enabled"] + if ok && (strings.ToLower(val) == "yes" || + strings.ToLower(val) == "on" || + strings.ToLower(val) == "1") { + clusterEnabled = true + } + return +} + +// ClusterMeet 'cluster meet' command +func (db *RedisClient) ClusterMeet(ip, port string) (ret string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.ClusterMeet(context.TODO(), ip, port).Result() + } else { + ret, err = db.InstanceClient.ClusterMeet(context.TODO(), ip, port).Result() + } + if err != nil { + err = fmt.Errorf("redis(%s) 'cluster meet %s %s' failed,err:%v", db.Addr, ip, port, err) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// ClusterAddSlots 添加slots, 'cluster addslots 'command +func (db *RedisClient) ClusterAddSlots(slots []int) (ret string, err error) { + // 执行 cluster addslots 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ClusterAddSlots redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + ret, err = db.InstanceClient.ClusterAddSlots(context.TODO(), slots...).Result() + if err != nil { + slotStr := ConvertSlotToStr(slots) + err = fmt.Errorf("redis(%s) 'cluster addslots %s' failed,err:%v", db.Addr, slotStr, err) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// ClusterReplicate 'cluster replicate' +func (db *RedisClient) ClusterReplicate(masterID string) (ret string, err error) { + // 执行cluster replicate 命令只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("ClusterReplicate redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + ret, err = db.InstanceClient.ClusterReplicate(context.TODO(), masterID).Result() + if err != nil { + err = fmt.Errorf("'cluster replicate %s' failed,err:%v,addr:%s", masterID, err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// GetMyself 获取myself的节点信息 +func (db *RedisClient) GetMyself() (ret *ClusterNodeData, err error) { + // cluster nodes中找到 myself 节点,只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("GetMyself redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return nil, err + } + addrMap, err := db.GetAddrMapToNodes() + if err != nil { + return ret, err + } + for _, info01 := range addrMap { + infoItem := info01 + if infoItem.IsMyself == true { + ret = infoItem + break + } + } + return ret, nil +} + +// TendisplusDataSize tendisplus数据量大小,'info Dataset' rocksdb.total-sst-files-size,单位byte +func (db *RedisClient) TendisplusDataSize() (dataSize uint64, err error) { + // 命令'info Dataset',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("TendisplusDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + var infoRet map[string]string + infoRet, err = db.Info("Dataset") + if err != nil { + return + } + sizeStr := infoRet["rocksdb.total-sst-files-size"] + dataSize, err = strconv.ParseUint(sizeStr, 10, 64) + if err != nil { + err = fmt.Errorf("strconv.ParseUint fail,err:%v,value:%s", err, sizeStr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// RedisInstanceDataSize redis数据量大小,'info memory' used_memory,单位byte +func (db *RedisClient) RedisInstanceDataSize() (dataSize uint64, err error) { + // 命令'info Dataset',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("RedisInstanceDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + var infoRet map[string]string + infoRet, err = db.Info("memory") + if err != nil { + return + } + sizeStr := infoRet["used_memory"] + dataSize, err = strconv.ParseUint(sizeStr, 10, 64) + if err != nil { + err = fmt.Errorf("strconv.ParseUint fail,err:%v,value:%s", err, sizeStr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// TendisSSDDataSize 获取tendisSSD数据量大小,单位 byte +func (db *RedisClient) TendisSSDDataSize() (rockdbSize uint64, err error) { + // 命令'info',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("TendisSSDDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + infoRet, err := db.Info("") + if err != nil { + return 0, err + } + + rockdbSize = 0 + levelHeadRegexp := regexp.MustCompile(`^level-\d+$`) + levelDataRegexp := regexp.MustCompile(`^bytes=(\d+),num_entries=(\d+),num_deletions=(\d+)`) + for k01, v01 := range infoRet { + if levelHeadRegexp.MatchString(k01) { + list01 := levelDataRegexp.FindStringSubmatch(v01) + if len(list01) != 4 { + err = fmt.Errorf("redis:%s info 'RocksDB Level stats' format not correct,%s:%s", db.Addr, k01, v01) + mylog.Logger.Error(err.Error()) + return + } + size01, _ := strconv.ParseUint(list01[1], 10, 64) + rockdbSize = rockdbSize + size01 + } + } + return +} + +// TendisSSDBinlogSize tendis ssd binlog size +type TendisSSDBinlogSize struct { + FirstSeq uint64 `json:"firstSeq"` + EndSeq uint64 `json:"endSeq"` +} + +// String 字符串 +func (t *TendisSSDBinlogSize) String() string { + return fmt.Sprintf("[%d,%d]", t.FirstSeq, t.EndSeq) +} + +// parseBinlogSizeCmdRet 解析tendisSSD binlogsize命令的结果 +func (db *RedisClient) parseBinlogSizeCmdRet(cmdRet interface{}) (ret TendisSSDBinlogSize, err error) { + sizeInfos, ok := cmdRet.([]interface{}) + if ok == false { + err = fmt.Errorf("parseBinlogSizeCmdRet 'binlogsize' result not []interface{},cmdRet:%v,nodeAddr:%s", + cmdRet, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + if len(sizeInfos) != 4 { + err = fmt.Errorf("'binlogsize' result not correct,length:%d != 4,data:%+v,addr:%s", + len(sizeInfos), sizeInfos, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + firstSeqStr := sizeInfos[1].(string) + endSeqStr := sizeInfos[3].(string) + + ret.FirstSeq, err = strconv.ParseUint(firstSeqStr, 10, 64) + if err != nil { + err = fmt.Errorf("'binlogsize' firstSeq:%s to uint64 fail,err:%v,data:%+v,addr:%s", + firstSeqStr, err, sizeInfos, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + ret.EndSeq, err = strconv.ParseUint(endSeqStr, 10, 64) + if err != nil { + err = fmt.Errorf("'binlogsize' endSeq:%s to uint64 fail,err:%v,data:%+v,addr:%s", + endSeqStr, err, sizeInfos, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + return ret, nil +} + +// TendisSSDBinlogSize binlogsize +func (db *RedisClient) TendisSSDBinlogSize() (ret TendisSSDBinlogSize, err error) { + // 命令'info',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("TendisSSDDataSize redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + cmd := []interface{}{"binlogsize"} + ret = TendisSSDBinlogSize{} + sizeRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("TendisSSDBinlogSize fail,cmd:%v fail,err:%v,addr:%s", cmd, err, db.Addr) + mylog.Logger.Error(err.Error()) + return ret, err + } + return db.parseBinlogSizeCmdRet(sizeRet) +} + +// Randomkey command +func (db *RedisClient) Randomkey() (key string, err error) { + // 命令'RANDOMKEY',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("Randomkey redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + key, err = db.InstanceClient.RandomKey(context.TODO()).Result() + if err != nil && err != redis.Nil { + err = fmt.Errorf("redis:%s 'randomkey' failed,err:%v", db.Addr, err) + mylog.Logger.Error(err.Error()) + return + } + return key, nil +} + +// Shutdown command +func (db *RedisClient) Shutdown() (err error) { + // 命令'shutdown',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("Shutdown redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + redisCliBin := filepath.Join(consts.UsrLocal, "redis/bin/redis-cli") + if util.FileExists(redisCliBin) { + // 如果redis-cli存在,则优先使用redis-cli 执行shutdown + // db.InstanceClient.Shutdown() 会返回一些其他错误 + var opt string + if util.IsCliSupportedNoAuthWarning(redisCliBin) { + opt = "--no-auth-warning" + } + l01 := strings.Split(db.Addr, ":") + cmd := fmt.Sprintf("%s -h %s -p %s -a %s %s shutdown", + redisCliBin, l01[0], l01[1], db.Password, opt) + logcmd := fmt.Sprintf("%s -h %s -p %s -a xxxx %s shutdown", + redisCliBin, l01[0], l01[1], opt) + mylog.Logger.Info(logcmd) + _, err = util.RunBashCmd(cmd, "", nil, 1*time.Minute) + if err != nil { + return + } + return + } + + db.InstanceClient.Shutdown(context.TODO()).Result() + return nil +} + +// IsReplicaStatusOk '我'是slave,判断我和master的复制状态是否ok +func (db *RedisClient) IsReplicaStatusOk(masterIP, masterPort string) (ok bool, err error) { + var infoRet map[string]string + ok = false + infoRet, err = db.Info("replication") + if err != nil { + return + } + replRole := infoRet["role"] + if replRole != consts.RedisSlaveRole { + return false, nil + } + replMasterHost := infoRet["master_host"] + replMasterPort := infoRet["master_port"] + replLinkStatus := infoRet["master_link_status"] + if replMasterHost != masterIP || replMasterPort != masterPort { + err = fmt.Errorf("slave(%s) 'info replication' master(%s:%s) != (%s:%s)", + db.Addr, replMasterHost, replMasterPort, masterIP, masterPort) + return + } + if replLinkStatus != consts.MasterLinkStatusUP { + err = fmt.Errorf("slave(%s) 'info replication' master(%s:%s) master_link_status:%s", + db.Addr, replMasterHost, replMasterPort, replLinkStatus) + return + } + return true, nil +} + +// IsTendisSSDReplicaStatusOk '我'是tendisssd slave,判断我和master的复制状态是否ok +func (db *RedisClient) IsTendisSSDReplicaStatusOk(masterIP, masterPort string) (ok bool, err error) { + ok, err = db.IsReplicaStatusOk(masterIP, masterPort) + if err != nil { + return + } + if !ok { + return + } + ok = false + // master上执行 info slaves,结果中 slave的状态必须是 IncrSync/REPL_FOLLOW + var confRet map[string]string + var masterCli *RedisClient + var slavesState TendisSSDInfoSlavesData + masterAddr := masterIP + ":" + masterPort + confRet, err = db.ConfigGet("masterauth") + if err != nil { + return + } + masterAuth := confRet["masterauth"] + masterCli, err = NewRedisClient(masterAddr, masterAuth, 0, consts.TendisTypeRedisInstance) + if err != nil { + return + } + defer masterCli.Close() + + slavesState, err = masterCli.TendisSSDInfoSlaves() + if err != nil { + return + } + if len(slavesState.SlaveList) == 0 { + err = fmt.Errorf("slave(%s) master_link_status:up but master(%s) 'info slaves' not found slaves", db.Addr, masterAddr) + return + } + for _, slave01 := range slavesState.SlaveList { + slaveItem := slave01 + if slaveItem.Addr() == db.Addr { + if slaveItem.State == consts.TendisSSDIncrSyncState || + slaveItem.State == consts.TendisSSDReplFollowtate { + return true, nil + } + mylog.Logger.Info(fmt.Sprintf("master(%s) 'info slaves' ret:%s", masterAddr, slavesState.String())) + err = fmt.Errorf( + "slave(%s) master_link_status:up but master(%s) 'info slaves' slave.state:%s != IncrSync|REPL_FOLLOW", + db.Addr, masterAddr, slaveItem.State) + return + } + } + mylog.Logger.Info(fmt.Sprintf("master(%s) 'info slaves' ret:%s", masterAddr, slavesState.String())) + err = fmt.Errorf("slave(%s) master_link_status:up but master(%s) 'info slaves' not found record", db.Addr, masterAddr) + return +} + +// MaxMemory 'confxx get maxmemory' +func (db *RedisClient) MaxMemory() (maxmemory uint64, err error) { + var confRet map[string]string + confRet, err = db.ConfigGet("maxmemory") + if err != nil { + return + } + str01, ok := confRet["maxmemory"] + if !ok { + err = fmt.Errorf("redis(%s) get maxmemory fail,'confxx get maxmemory' ret:%+v", db.Addr, confRet) + mylog.Logger.Error(err.Error()) + return + } + maxmemory, _ = strconv.ParseUint(str01, 10, 64) + return +} + +// GetMemUsed 'info memory' used_memory/used_memory_rss +func (db *RedisClient) GetMemUsed() (memoryUsed, memoryUsedRss uint64, err error) { + var infoRet map[string]string + var memoryUsedStr, memoryRssStr string + var ok bool + infoRet, err = db.Info("memory") + if err != nil { + return + } + memoryUsedStr, ok = infoRet["used_memory"] + if !ok { + err = fmt.Errorf("redis(%s) used_memory not found,'info memory' ret:%+v", db.Addr, infoRet) + mylog.Logger.Error(err.Error()) + return + } + memoryRssStr, ok = infoRet["used_memory_rss"] + if !ok { + err = fmt.Errorf("redis(%s) get used_memory_rss not found,'info memory' ret:%+v", db.Addr, infoRet) + mylog.Logger.Error(err.Error()) + return + } + memoryUsed, _ = strconv.ParseUint(memoryUsedStr, 10, 64) + memoryUsedRss, _ = strconv.ParseUint(memoryRssStr, 10, 64) + return +} + +// Set set $k $v ex/px $expiration +func (db *RedisClient) Set(k string, val interface{}, expiration time.Duration) (ret string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.Set(context.TODO(), k, val, expiration).Result() + } else { + ret, err = db.InstanceClient.Set(context.TODO(), k, val, expiration).Result() + } + if err != nil { + err = fmt.Errorf("'set %s %v ex %d' fail,err:%v,addr:%s", k, val, int(expiration.Seconds()), err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Mset mset ... +func (db *RedisClient) Mset(vals []interface{}) (ret string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.MSet(context.TODO(), vals...).Result() + } else { + ret, err = db.InstanceClient.MSet(context.TODO(), vals...).Result() + } + if err != nil { + err = fmt.Errorf("mset %+v fail,err:%v,addr:%s", vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Get 'get key' +func (db *RedisClient) Get(k string) (ret string, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.Get(context.TODO(), k).Result() + } else { + ret, err = db.InstanceClient.Get(context.TODO(), k).Result() + } + if err != nil { + err = fmt.Errorf("'get %s' fail,err:%v,addr:%s", k, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Hmset hmset ... +func (db *RedisClient) Hmset(k string, vals []interface{}) (ret bool, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.HMSet(context.TODO(), k, vals...).Result() + } else { + ret, err = db.InstanceClient.HMSet(context.TODO(), k, vals...).Result() + } + if err != nil { + err = fmt.Errorf("hmset %s %+v fail,err:%v,addr:%s", k, vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Rpush rpush ... +func (db *RedisClient) Rpush(k string, vals []interface{}) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.RPush(context.TODO(), k, vals...).Result() + } else { + ret, err = db.InstanceClient.RPush(context.TODO(), k, vals...).Result() + } + if err != nil { + err = fmt.Errorf("rpush %s %+v fail,err:%v,addr:%s", k, vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Sadd sadd ... +func (db *RedisClient) Sadd(k string, vals []interface{}) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.SAdd(context.TODO(), k, vals...).Result() + } else { + ret, err = db.InstanceClient.SAdd(context.TODO(), k, vals...).Result() + } + if err != nil { + err = fmt.Errorf("Sadd %s %+v fail,err:%v,addr:%s", k, vals, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// Zadd zadd ... +func (db *RedisClient) Zadd(k string, members []*redis.Z) (ret int64, err error) { + if db.DbType == consts.TendisTypeRedisCluster { + ret, err = db.ClusterClient.ZAdd(context.TODO(), k, members...).Result() + } else { + ret, err = db.InstanceClient.ZAdd(context.TODO(), k, members...).Result() + } + if err != nil { + err = fmt.Errorf("Zadd %s %+v fail,err:%v,addr:%s", k, members, err, db.Addr) + mylog.Logger.Info(err.Error()) + return + } + return +} + +// AdminSet tendisplus 'adminset' 命令 +func (db *RedisClient) AdminSet(key, val string) (ret string, err error) { + var ok bool + // 命令'adminset ',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("'adminset' redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + cmd := []interface{}{"adminset", key, val} + adminsetRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'adminget %s' fail,err:%v\n", db.Addr, key, err) + mylog.Logger.Error(err.Error()) + return + } + ret, ok = adminsetRet.(string) + if ok == false { + err = fmt.Errorf("'adminget %s %s' result not string,ret:%+v,nodeAddr:%s", key, val, adminsetRet, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + return +} + +// GetTendisplusHeartbeat 获取tendisplus 心跳数据 +/* for example: +> adminget 1.1.1.1:heartbeat + 1) 1) "0" + 2) "2021-06-01 16:47:00" + 2) 1) "1" + 2) "2021-06-01 16:47:00" + 3) 1) "2" + 2) "2021-06-01 16:47:00" + 4) 1) "3" + 2) "2021-06-01 16:47:00" + 5) 1) "4" + 2) "2021-06-01 16:47:00" + 6) 1) "5" + 2) "2021-06-01 16:47:00" + 7) 1) "6" + 2) "2021-06-01 16:47:00" + 8) 1) "7" + 2) "2021-06-01 16:47:00" + 9) 1) "8" + 2) "2021-06-01 16:47:00" +10) 1) "9" + 2) "2021-06-01 16:47:00" +*/ +func (db *RedisClient) GetTendisplusHeartbeat(key string) (heartbeat map[int]time.Time, err error) { + // 命令'adminget ',只能用 普通redis client + if db.InstanceClient == nil { + err = fmt.Errorf("'adminget' redis:%s must create a standalone client", db.Addr) + mylog.Logger.Error(err.Error()) + return + } + heartbeat = make(map[int]time.Time) + cmd := []interface{}{"adminget", key} + adminGetRet, err := db.InstanceClient.Do(context.TODO(), cmd...).Result() + if err != nil { + err = fmt.Errorf("redis:%s 'adminget %s' fail,err:%v\n", db.Addr, key, err) + mylog.Logger.Error(err.Error()) + return heartbeat, err + } + adminGetRets, ok := adminGetRet.([]interface{}) + if ok == false { + err = fmt.Errorf("GetTendisplusHeartbeat 'adminget %s' result not []interface{},nodeAddr:%s", key, db.Addr) + mylog.Logger.Error(err.Error()) + return heartbeat, err + } + var storeID int + var value, storeIDStr string + for _, confItem := range adminGetRets { + conf01 := confItem.([]interface{}) + if conf01[1] == nil { + continue + } + storeIDStr = conf01[0].(string) + value = conf01[1].(string) + storeID, _ = strconv.Atoi(storeIDStr) + heartbeat[storeID], _ = time.ParseInLocation(consts.UnixtimeLayout, value, time.Local) + } + return heartbeat, nil +} + +// Close 关闭连接 +func (db *RedisClient) Close() { + if db.InstanceClient == nil && db.ClusterClient == nil { + return + } + + if db.DbType == consts.TendisTypeRedisCluster { + db.ClusterClient.Close() + db.ClusterClient = nil + return + } + db.InstanceClient.Close() + db.InstanceClient = nil + return +} diff --git a/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_info.go b/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_info.go new file mode 100644 index 0000000000..5427c6c616 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_info.go @@ -0,0 +1,103 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "fmt" + "strconv" + "strings" +) + +// CmdClusterInfo 命令:cluster info的结果 +type CmdClusterInfo struct { + ClusterState string `json:"cluster_state"` + ClusterSlotsAssigned int `json:"cluster_slots_assigned"` + ClusterSlotsOk int `json:"cluster_slots_ok"` + ClusterSlotsPfail int `json:"cluster_slots_pfail"` + ClusterSlotsFail int `json:"cluster_slots_fail"` + ClusterKnownNodes int `json:"cluster_known_nodes"` + ClusterSize int `json:"cluster_size"` + ClusterCurrentEpoch int `json:"cluster_current_epoch"` + ClusterMyEpoch int `json:"cluster_my_epoch"` + ClusterStatsMessagesPingSent uint64 `json:"cluster_stats_messages_ping_sent"` + ClusterStatsMessagesPongSent uint64 `json:"cluster_stats_messages_pong_sent"` + ClusterStatsMessagesMeetSent uint64 `json:"cluster_stats_messages_meet_sent"` + ClusterStatsMessagesPublishSent uint64 `json:"cluster_stats_messages_publish_sent"` + ClusterStatsMessagesUpdateSent uint64 `json:"cluster_stats_messages_update_sent"` + ClusterStatsMessagesSent uint64 `json:"cluster_stats_messages_sent"` + ClusterStatsMessagesPingReceived uint64 `json:"cluster_stats_messages_ping_received"` + ClusterStatsMessagesPongReceived uint64 `json:"cluster_stats_messages_pong_received"` + ClusterStatsMessagesMeetReceived uint64 `json:"cluster_stats_messages_meet_received"` + ClusterStatsMessagesUpdateReceived uint64 `json:"cluster_stats_messages_update_received"` + ClusterStatsMessagesReceived uint64 `json:"cluster_stats_messages_received"` +} + +// DecodeClusterInfo 解析cluster info命令结果 +func DecodeClusterInfo(cmdRet string) (clusterInfo *CmdClusterInfo) { + clusterInfo = &CmdClusterInfo{} + list01 := strings.Split(cmdRet, "\n") + for _, item01 := range list01 { + item01 = strings.TrimSpace(item01) + if len(item01) == 0 { + continue + } + list02 := strings.SplitN(item01, ":", 2) + if len(list02) < 2 { + continue + } + if list02[0] == "cluster_state" { + clusterInfo.ClusterState = list02[1] + } else if list02[0] == "cluster_slots_assigend" { + clusterInfo.ClusterSlotsAssigned, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_slots_ok" { + clusterInfo.ClusterSlotsOk, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_slots_pfail" { + clusterInfo.ClusterSlotsPfail, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_known_nodes" { + clusterInfo.ClusterKnownNodes, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_size" { + clusterInfo.ClusterSize, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_current_epoch" { + clusterInfo.ClusterCurrentEpoch, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_my_epoch" { + clusterInfo.ClusterMyEpoch, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "cluster_stats_messages_ping_sent" { + clusterInfo.ClusterStatsMessagesPingSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_pong_sent" { + clusterInfo.ClusterStatsMessagesPongSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_meet_sent" { + clusterInfo.ClusterStatsMessagesMeetSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_sent" { + clusterInfo.ClusterStatsMessagesSent, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_ping_received" { + clusterInfo.ClusterStatsMessagesPingReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_pong_received" { + clusterInfo.ClusterStatsMessagesPongReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_meet_received" { + clusterInfo.ClusterStatsMessagesMeetReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_update_received" { + clusterInfo.ClusterStatsMessagesUpdateReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } else if list02[0] == "cluster_stats_messages_received" { + clusterInfo.ClusterStatsMessagesReceived, _ = strconv.ParseUint(list02[1], 10, 64) + } + } + return +} + +// ClusterInfo 获取cluster info结果并解析 +func (db *RedisClient) ClusterInfo() (clusterInfo *CmdClusterInfo, err error) { + var ret01 string + if db.DbType == consts.TendisTypeRedisCluster { + ret01, err = db.ClusterClient.ClusterInfo(context.TODO()).Result() + } else { + ret01, err = db.InstanceClient.ClusterInfo(context.TODO()).Result() + } + if err != nil { + err = fmt.Errorf("ClusterInfo execute cluster info fail,err:%v,clusterAddr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return nil, err + } + clusterInfo = DecodeClusterInfo(ret01) + return clusterInfo, nil +} diff --git a/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes.go b/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes.go new file mode 100644 index 0000000000..72b8db9db1 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes.go @@ -0,0 +1,417 @@ +package myredis + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "strconv" + "strings" +) + +const ( + slotSeparator = "-" + importingSeparator = "-<-" + migratingSeparator = "->-" +) + +// ClusterNodeData 获取并解析 cluster nodes命令结果 +type ClusterNodeData struct { + NodeID string `json:"ID"` + Addr string `json:"addr"` + IP string `json:"ip"` // 如果是k8s中的redis cluster,则ip代表节点的pod名,否则ip表示redis IP + Port int `json:"port"` + CPort int `json:"cport"` + Role string `json:"role"` // master or slave + IsMyself bool `json:"is_myself"` + LinkState string `json:"link_state"` // connected or disconnected + MasterID string `json:"master_iD"` + FailStatus []string `json:"fail_status"` + PingSent int64 `json:"ping_sent"` + PongRecv int64 `json:"pong_recv"` + ConfigEpoch int64 `json:"config_epoch"` + SlotSrcStr string `json:"slot_src_str"` + Slots []int `json:"slots"` + SlotsMap map[int]bool `json:"slots_map"` // convenient to know whether certain slots belong to the node + MigratingSlots map[int]string `json:"migrating_slots"` // key:slot,value:dst redis ID + ImportingSlots map[int]string `json:"importing_slots"` // key:slot.value:src redis ID + + balance int `json:"-"` // (扩缩容)迁移slot时使用 + endSlotIdx int `json:"-"` +} + +// NewDefaultNode builds and returns new defaultNode instance +func NewDefaultNode() *ClusterNodeData { + return &ClusterNodeData{ + Slots: []int{}, + SlotsMap: map[int]bool{}, + MigratingSlots: map[int]string{}, + ImportingSlots: map[int]string{}, + } +} + +// String 用于打印 +func (n *ClusterNodeData) String() string { + return fmt.Sprintf( + `{Redis ID:%s,Addr:%s,role:%s,master:%s,link:%s,status:%s,slots:%s,len(migratingSlots):%d,len(importingSlots):%d}`, + n.NodeID, n.Addr, n.GetRole(), n.MasterID, n.LinkState, n.FailStatus, + ConvertSlotToStr(n.Slots), len(n.MigratingSlots), len(n.ImportingSlots)) +} + +// SetRole from a flags string list set the Node's role +func (n *ClusterNodeData) SetRole(flags string) error { + n.Role = "" // reset value before setting the new one + vals := strings.Split(flags, ",") + for _, val := range vals { + switch val { + case consts.RedisMasterRole: + n.Role = consts.RedisMasterRole + case consts.RedisSlaveRole: + n.Role = consts.RedisSlaveRole + } + } + + if n.Role == "" { + err := fmt.Errorf("node setRole failed,addr:%s,flags:%s", n.Addr, flags) + return err + } + + return nil +} + +// GetRole return the Redis Cluster Node GetRole +func (n *ClusterNodeData) GetRole() string { + switch n.Role { + case consts.RedisMasterRole: + return consts.RedisMasterRole + case consts.RedisSlaveRole: + return consts.RedisSlaveRole + default: + if n.MasterID != "" { + return consts.RedisSlaveRole + } + if len(n.Slots) > 0 { + return consts.RedisMasterRole + } + } + + return consts.RedisNoneRole +} + +// SlotCnt slot count +func (n *ClusterNodeData) SlotCnt() int { + return len(n.Slots) +} + +// SetLinkStatus set the Node link status +func (n *ClusterNodeData) SetLinkStatus(status string) error { + n.LinkState = "" // reset value before setting the new one + switch status { + case consts.RedisLinkStateConnected: + n.LinkState = consts.RedisLinkStateConnected + case consts.RedisLinkStateDisconnected: + n.LinkState = consts.RedisLinkStateDisconnected + } + + if n.LinkState == "" { + err := fmt.Errorf("Node SetLinkStatus failed,addr:%s,status:%s", n.Addr, status) + return err + } + + return nil +} + +// SetFailureStatus set from inputs flags the possible failure status +func (n *ClusterNodeData) SetFailureStatus(flags string) { + n.FailStatus = []string{} // reset value before setting the new one + vals := strings.Split(flags, ",") + for _, val := range vals { + switch val { + case consts.NodeStatusFail: + n.FailStatus = append(n.FailStatus, consts.NodeStatusFail) + case consts.NodeStatusPFail: + n.FailStatus = append(n.FailStatus, consts.NodeStatusPFail) + case consts.NodeStatusHandshake: + n.FailStatus = append(n.FailStatus, consts.NodeStatusHandshake) + case consts.NodeStatusNoAddr: + n.FailStatus = append(n.FailStatus, consts.NodeStatusNoAddr) + case consts.NodeStatusNoFlags: + n.FailStatus = append(n.FailStatus, consts.NodeStatusNoFlags) + } + } +} + +// SetReferentMaster set the redis node parent referent +func (n *ClusterNodeData) SetReferentMaster(ref string) { + n.MasterID = "" + if ref == "-" { + return + } + n.MasterID = ref +} + +// DecodeClusterNodes decode from the cmd output the Redis nodes info. +// Second argument is the node on which we are connected to request info +func DecodeClusterNodes(input string) ([]*ClusterNodeData, error) { + infos := []*ClusterNodeData{} + lines := strings.Split(input, "\n") + for _, line := range lines { + values := strings.Fields(line) + if len(values) < 8 { + // last line is always empty + // not enough values in line split, skip line + mylog.Logger.Info(fmt.Sprintf("not enough values in line split, ignoring line: '%s'", line)) + continue + } else { + node := NewDefaultNode() + + node.NodeID = values[0] + // remove trailing port for cluster internal protocol + ipPort := strings.Split(values[1], "@") + node.Addr = ipPort[0] + if node.Addr != "" { + list02 := strings.Split(node.Addr, ":") + if util.IsValidIP(list02[0]) { + node.IP = list02[0] + } else { + l01 := strings.Split(node.Addr, ".") + if len(l01) > 0 { + node.IP = l01[0] + } + } + node.Port, _ = strconv.Atoi(list02[1]) + } + node.CPort, _ = strconv.Atoi(ipPort[1]) + node.SetRole(values[2]) + node.SetFailureStatus(values[2]) + node.SetReferentMaster(values[3]) + if i, err := strconv.ParseInt(values[4], 10, 64); err == nil { + node.PingSent = i + } + if i, err := strconv.ParseInt(values[5], 10, 64); err == nil { + node.PongRecv = i + } + if i, err := strconv.ParseInt(values[6], 10, 64); err == nil { + node.ConfigEpoch = i + } + node.SetLinkStatus(values[7]) + + for _, slot := range values[8:] { + if node.SlotSrcStr == "" { + node.SlotSrcStr = slot + } else { + node.SlotSrcStr = fmt.Sprintf("%s %s", node.SlotSrcStr, slot) + } + slots01, _, importingSlots, migratingSlots, err := DecodeSlotsFromStr(slot, " ") + if err != nil { + return infos, err + } + node.Slots = append(node.Slots, slots01...) + for _, s01 := range slots01 { + node.SlotsMap[s01] = true + } + for s01, nodeid := range importingSlots { + node.ImportingSlots[s01] = nodeid + } + for s01, nodeid := range migratingSlots { + node.MigratingSlots[s01] = nodeid + } + } + + if strings.HasPrefix(values[2], "myself") { + node.IsMyself = true + } + infos = append(infos, node) + } + } + + return infos, nil +} + +// IsRunningMaster anonymous function for searching running Master Node +var IsRunningMaster = func(n *ClusterNodeData) bool { + if (n.GetRole() == consts.RedisMasterRole) && + (len(n.FailStatus) == 0) && (n.LinkState == consts.RedisLinkStateConnected) { + return true + } + return false +} + +// IsMasterWithSlot anonymous function for searching Master Node withslot +var IsMasterWithSlot = func(n *ClusterNodeData) bool { + if (n.GetRole() == consts.RedisMasterRole) && (len(n.FailStatus) == 0) && + (n.LinkState == consts.RedisLinkStateConnected) && (n.SlotCnt() > 0) { + return true + } + return false +} + +// IsRunningNode anonymous function for searching running Node +var IsRunningNode = func(n *ClusterNodeData) bool { + if (len(n.FailStatus) == 0) && (n.LinkState == consts.RedisLinkStateConnected) { + return true + } + return false +} + +// DecodeSlotsFromStr 解析 slot 字符串,如 0-10,12,100-200,seq为',' +// 同时可以解析: +// migrating slot: ex: [42->-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1] +// importing slot: ex: [42-<-67ed2db8d677e59ec4a4cefb06858cf2a1a89fa1] +func DecodeSlotsFromStr(slotStr string, seq string) (slots []int, slotMap map[int]bool, migratingSlots, + importingSlots map[int]string, err error) { + slotMap = make(map[int]bool) + migratingSlots = make(map[int]string) + importingSlots = make(map[int]string) + var items []string + var slot int + if seq == "" || seq == " " || seq == "\t" || seq == "\n" { + items = strings.Fields(slotStr) + } else { + items = strings.Split(slotStr, seq) + } + for _, slotItem := range items { + slotItem = strings.TrimSpace(slotItem) + list02 := strings.Split(slotItem, slotSeparator) + if len(list02) == 3 { + separator := slotSeparator + list02[1] + slotSeparator + slot, err = strconv.Atoi(strings.TrimPrefix(list02[0], "[")) + if err != nil { + err = fmt.Errorf("DecodeSlotsFromStr fail,strconv.Atoi err:%v,str:%s", err, slotItem) + mylog.Logger.Error(err.Error()) + return + } + nodeID := strings.TrimSuffix(list02[2], "]") + if separator == importingSeparator { + importingSlots[slot] = nodeID + } else if separator == migratingSeparator { + migratingSlots[slot] = nodeID + } else { + err = fmt.Errorf("impossible to decode slotStr:%s", slotItem) + mylog.Logger.Error(err.Error()) + return + } + } else if len(list02) == 1 { + num01, _ := strconv.Atoi(list02[0]) + if num01 < consts.DefaultMinSlots || num01 > consts.DefaultMaxSlots { + err = fmt.Errorf("slot:%d in param:%s not correct,valid range [%d,%d]", num01, slotStr, + consts.DefaultMinSlots, consts.DefaultMaxSlots) + mylog.Logger.Error(err.Error()) + return + } + slots = append(slots, num01) + slotMap[num01] = true + } else if len(list02) == 2 { + start, _ := strconv.Atoi(list02[0]) + end, _ := strconv.Atoi(list02[1]) + if start < consts.DefaultMinSlots || start > consts.DefaultMaxSlots { + err = fmt.Errorf("slot:%d in param:%s not correct,valid range [%d,%d]", start, slotStr, + consts.DefaultMinSlots, consts.DefaultMaxSlots) + mylog.Logger.Error(err.Error()) + return + } + if end < consts.DefaultMinSlots || end > consts.DefaultMaxSlots { + err = fmt.Errorf("slot:%d in param:%s not correct,valid range [%d,%d]", end, slotStr, + consts.DefaultMinSlots, consts.DefaultMaxSlots) + mylog.Logger.Error(err.Error()) + return + } + for num01 := start; num01 <= end; num01++ { + slots = append(slots, num01) + slotMap[num01] = true + } + } + } + return +} + +// ConvertSlotToStr 将slots:[0,1,2,3,4,10,11,12,13,17] 按照 0-4,10-13,17 打印 +func ConvertSlotToStr(slots []int) string { + if len(slots) == 0 { + return "" + } + str01 := "" + start := slots[0] + curr := slots[0] + for _, item := range slots { + next := item + if next == curr { + continue + } + if curr == next-1 { + // slot连续,继续 + curr = next + continue + } + // slot不连续了 + if start == curr { + str01 = fmt.Sprintf("%s,%d", str01, start) + } else { + str01 = fmt.Sprintf("%s,%d-%d", str01, start, curr) + } + start = next + curr = next + } + // 最后再处理一次start curr + if start == curr { + str01 = fmt.Sprintf("%s,%d", str01, start) + } else { + str01 = fmt.Sprintf("%s,%d-%d", str01, start, curr) + } + str01 = strings.Trim(str01, ",") + return str01 +} + +// ConvertSlotToShellFormat 将slots:[0,1,2,3,4,10,11,12,13,17] 按照 {0..4} {10..13} 17 打印 +func ConvertSlotToShellFormat(slots []int) string { + if len(slots) == 0 { + return "" + } + str01 := "" + start := slots[0] + curr := slots[0] + for _, item := range slots { + next := item + if next == curr { + continue + } + if curr == next-1 { + // slot连续,继续 + curr = next + continue + } + // slot不连续了 + if start == curr { + str01 = fmt.Sprintf("%s %d", str01, start) + } else { + str01 = fmt.Sprintf("%s {%d..%d}", str01, start, curr) + } + start = next + curr = next + } + // 最后再处理一次start curr + if start == curr { + str01 = fmt.Sprintf("%s %d", str01, start) + } else { + str01 = fmt.Sprintf("%s {%d..%d}", str01, start, curr) + } + str01 = strings.Trim(str01, " ") + return str01 +} + +// SlotSliceDiff 寻找在slotB中存在,但在 slotA中不存在的slots +func SlotSliceDiff(slotsA []int, slotsB []int) (diffSlots []int) { + if len(slotsA) == 0 { + return slotsB + } + aMap := make(map[int]struct{}) + for _, slot := range slotsA { + aMap[slot] = struct{}{} + } + for _, slot := range slotsB { + if _, ok := aMap[slot]; !ok { + diffSlots = append(diffSlots, slot) + } + } + return +} diff --git a/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes_test.go b/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes_test.go new file mode 100644 index 0000000000..967426552d --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/myredis/cluster_nodes_test.go @@ -0,0 +1,87 @@ +package myredis + +import ( + "fmt" + "testing" + + "github.com/smartystreets/goconvey/convey" +) + +// test unit +func TestDecodeClusterNodes(t *testing.T) { + convey.Convey("cluster nodes decode", t, func() { + clusterNodesStr := ` + 17922e98b0b8f7a9d233422cf8ae55f2d22fdab7 4.4.4.4:30003@40003 master - 0 1655005291000 20 connected 7509-8191 + e81c4276dce41ae3ed4a5fe18e460ed5b9f77e8b 3.3.3.3:30003@40003 slave 17922e98b0b8f7a9d233422cf8ae55f2d22fdab7 0 1655005291000 20 connected + 56e53ca70ef13f3ca1817b0746d64319a4b66fed synctest-redis-rdsplus1-0.synctest-svc.vip:30000@40000 myself,slave 72ffcd1f8d39d1b6011ab38f5e1a42dd6f66f765 0 1655006313000 3 connected + 72ffcd1f8d39d1b6011ab38f5e1a42dd6f66f765 synctest-redis-rdsplus1-1.synctest-svc.vip:30000@40000 master - 0 1655006315419 7 connected 5461-10921 + ` + nodes, err := DecodeClusterNodes(clusterNodesStr) + if err != nil { + t.Fatalf(err.Error()) + } + convey.So(len(nodes), convey.ShouldEqual, 4) + convey.So(nodes[0].NodeID, convey.ShouldEqual, "17922e98b0b8f7a9d233422cf8ae55f2d22fdab7") + convey.So(nodes[0].IP, convey.ShouldEqual, "4.4.4.4") + convey.So(nodes[0].Port, convey.ShouldEqual, 30003) + convey.So(nodes[0].SlotsMap, convey.ShouldContainKey, 7560) + convey.So(nodes[1].MasterID, convey.ShouldEqual, "17922e98b0b8f7a9d233422cf8ae55f2d22fdab7") + convey.So(IsMasterWithSlot(nodes[0]), convey.ShouldBeTrue) + convey.So(nodes[2].IP, convey.ShouldEqual, "synctest-redis-rdsplus1-0") + convey.So(IsMasterWithSlot(nodes[3]), convey.ShouldBeTrue) + convey.So(nodes[3].SlotsMap, convey.ShouldContainKey, 5470) + }) + + convey.Convey("cluster nodes decode2", t, func() { + clusterNodesStr := `36b96240e16051711d2391472cfd5900d33dc8bd 5.5.5.5:46000@56000 master - 0 1660014754278 5 connected +a32f9cb266d85ea96a1a87ce56872f339e2a257f 5.5.5.5:45001@55001 master - 0 1660014755280 4 connected 5462-10923 +5d555b4ab569de196f71afd275c1edf8c046959a 5.5.5.5:45000@55000 myself,master - 0 1660014753000 1 connected 0-5461 +90ed7be9db5e4b78e959ad3b40253c2ffb3d5845 5.5.5.5:46002@56002 master - 0 1660014752269 3 connected +dcff36cc5e915024d12173b1c5a3235e9186f193 5.5.5.5:46001@56001 master - 0 1660014753273 2 connected +ff29e2e2782916a0451d5f4064cb55483f4b2a97 5.5.5.5:45002@55002 master - 0 1660014753000 0 connected 10924-16383 +` + nodes, err := DecodeClusterNodes(clusterNodesStr) + if err != nil { + t.Fatalf(err.Error()) + } + var selfNode *ClusterNodeData = nil + for _, node01 := range nodes { + nodeItem := node01 + if nodeItem.IsMyself { + selfNode = nodeItem + break + } + } + fmt.Printf("%s\n", selfNode.String()) + convey.So(IsMasterWithSlot(selfNode), convey.ShouldBeTrue) + }) + + convey.Convey("decode slots from string", t, func() { + slotStr := "0-10,12,100-200" + slots, slotMap, _, _, err := DecodeSlotsFromStr(slotStr, ",") + if err != nil { + t.Fatalf(err.Error()) + } + convey.So(len(slots), convey.ShouldEqual, 11+1+101) + convey.So(slotMap, convey.ShouldContainKey, 12) + convey.So(slotMap, convey.ShouldNotContainKey, 11) + // convey.So(len(migratingSlots), convey.ShouldEqual, 0) + // convey.So(len(importingSlots), convey.ShouldEqual, 0) + + slotStr = "[93-<-292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f] [77->-e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca]" + _, _, migratingSlots, importingSlots, err := DecodeSlotsFromStr(slotStr, " ") + if err != nil { + t.Fatalf(err.Error()) + } + // convey.So(len(slots), convey.ShouldEqual, 0) + // convey.So(len(slotMap), convey.ShouldEqual, 0) + convey.So(migratingSlots, convey.ShouldContainKey, 77) + convey.So(importingSlots, convey.ShouldContainKey, 93) + convey.So(importingSlots[93], convey.ShouldEqual, "292f8b365bb7edb5e285caf0b7e6ddc7265d2f4f") + }) + convey.Convey("convert slot slice to string", t, func() { + slots := []int{0, 1, 2, 3, 4, 10, 11, 12, 13, 17} + str01 := ConvertSlotToStr(slots) + convey.So(str01, convey.ShouldEqual, "0-4,10-13,17") + }) +} diff --git a/dbm-services/redis/db-tools/dbmon/models/myredis/myredis.go b/dbm-services/redis/db-tools/dbmon/models/myredis/myredis.go new file mode 100644 index 0000000000..b7868da915 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/myredis/myredis.go @@ -0,0 +1,124 @@ +// Package myredis 该文件中保存一些公共函数 +package myredis + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "path/filepath" + "strconv" + "strings" + "sync" + "time" +) + +// GetRedisLoccalConfFile 本地获取redis实例配置文件 +func GetRedisLoccalConfFile(port int) (confFile string, err error) { + dataDir := consts.GetRedisDataDir() + instConf := filepath.Join(dataDir, "redis", strconv.Itoa(port), "instance.conf") + redisConf := filepath.Join(dataDir, "redis", strconv.Itoa(port), "redis.conf") + if util.FileExists(instConf) { + return instConf, nil + } + if util.FileExists(redisConf) { + return redisConf, nil + } + err = fmt.Errorf("[%s,%s] not exists", instConf, redisConf) + mylog.Logger.Error(err.Error()) + return +} + +// GetRedisPasswdFromConfFile (从配置文件中)获取本地redis实例密码 +func GetRedisPasswdFromConfFile(port int) (password string, err error) { + confFile, err := GetRedisLoccalConfFile(port) + if err != nil { + err = fmt.Errorf("get redis local config file failed,err:%v,port:%d", err, port) + mylog.Logger.Error(err.Error()) + return + } + cmd01 := fmt.Sprintf(`grep -E '^requirepass' %s|awk '{print $2}'|head -1`, confFile) + password, err = util.RunBashCmd(cmd01, "", nil, 10*time.Second) + if err != nil { + return + } + password = strings.TrimPrefix(password, "\"") + password = strings.TrimSuffix(password, "\"") + return +} + +// GetProxyPasswdFromConfFlie (从配置文件中)获取本地proxy实例密码 +func GetProxyPasswdFromConfFlie(port int, role string) (password string, err error) { + dataDir := consts.GetRedisDataDir() + var grepCmd string + if role == consts.MetaRolePredixy { + grepCmd = fmt.Sprintf(`grep -w "password" %s/twemproxy*/%d/nutcracker.%d.yml|grep -vE "#"|awk '{print $2}'`, + dataDir, port, port) + } else if role == consts.MetaRoleTwemproxy { + grepCmd = fmt.Sprintf(`grep -iw "auth" %s/predixy/%d/predixy.conf|awk '{print $2}'`, dataDir, port) + } + password, err = util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + if err != nil { + return + } + password = strings.TrimPrefix(password, "\"") + password = strings.TrimSuffix(password, "\"") + return +} + +type connTestItem struct { + IP string + Port int + Password string + Err error +} + +func (c *connTestItem) addr() string { + return c.IP + ":" + strconv.Itoa(c.Port) +} + +// LocalRedisConnectTest 本地Redis连接性测试 +// 从本地获取redis的password,并确认每个redis是否可链接 +func LocalRedisConnectTest(ip string, ports []int) (err error) { + if len(ports) == 0 { + err = fmt.Errorf("LocalRedisConnectTest ports(%+v) cannot be empty", ports) + return + } + l01 := make([]*connTestItem, 0, len(ports)) + var password string + for _, port := range ports { + password, err = GetRedisPasswdFromConfFile(port) + if err != nil { + return + } + l01 = append(l01, &connTestItem{ + IP: ip, + Port: port, + Password: password, + }) + } + // 并发测试 + wg := sync.WaitGroup{} + for _, item := range l01 { + test01 := item + wg.Add(1) + go func(test01 *connTestItem) { + defer wg.Done() + cli01, err := NewRedisClient(test01.addr(), test01.Password, 0, consts.TendisTypeRedisInstance) + if err != nil { + test01.Err = err + return + } + cli01.Close() + }(test01) + } + wg.Wait() + + for _, item := range l01 { + test01 := item + if test01.Err != nil { + return test01.Err + } + } + return +} diff --git a/dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_cluster_setslotinfo.go b/dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_cluster_setslotinfo.go new file mode 100644 index 0000000000..c19a2abc62 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_cluster_setslotinfo.go @@ -0,0 +1,202 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +// ClusterSetSlotInfo 命令:cluster setslot info信息 +type ClusterSetSlotInfo struct { + // dst Node命令cluster setslot info结果 + ImportingTaskIDs []string + ImportingSlotList []int + ImportingSlotMap map[int]bool `json:"-"` + SuccessImportSlotList []int + SuccessImportSlotMap map[int]bool `json:"-"` + FailImportSlotList []int + FailImportSlotMap map[int]bool `json:"-"` + RunningRcvTaskNum int + SuccessRcvTaskNum int + FailRcvTaskNum int + + // src Node命令cluster setslot info结果 + MigratingTaskIDs []string + MigratingSlotList []int + MigratingSlotMap map[int]bool `json:"-"` + SuccessMigrateSlotList []int + SuccessMigrateSlotMap map[int]bool `json:"-"` + FailMigrateSlotList []int + FailMigrateSlotMap map[int]bool `json:"-"` + RunningSendTaskNum int + SuccessSendTaskNum int + FailSendTaskNum int +} + +// ToString .. +func (info *ClusterSetSlotInfo) ToString() string { + ret, _ := json.Marshal(info) + return string(ret) +} + +// IsImportingSlot 是否是import中的slots +func (info *ClusterSetSlotInfo) IsImportingSlot(slotid int) bool { + _, ok := info.ImportingSlotMap[slotid] + return ok +} + +// IsSuccessImportSlot 是否是成功import的slots +func (info *ClusterSetSlotInfo) IsSuccessImportSlot(slotid int) bool { + _, ok := info.SuccessImportSlotMap[slotid] + return ok +} + +// IsFailImportSlot 是否是import失败的slot +func (info *ClusterSetSlotInfo) IsFailImportSlot(slotid int) bool { + _, ok := info.FailImportSlotMap[slotid] + return ok +} + +// GetDstRedisSlotsStatus 获取目标slots的状态 +func (info *ClusterSetSlotInfo) GetDstRedisSlotsStatus(slotList []int) ( + importing, successImport, failImport, unknow []int, +) { + for _, slotItem := range slotList { + if info.IsImportingSlot(slotItem) { + importing = append(importing, slotItem) + } else if info.IsSuccessImportSlot(slotItem) { + successImport = append(successImport, slotItem) + } else if info.IsFailImportSlot(slotItem) { + failImport = append(failImport, slotItem) + } else { + unknow = append(unknow, slotItem) + } + } + return +} + +// IsMigratingSlot 是否是migrate中的slot +func (info *ClusterSetSlotInfo) IsMigratingSlot(slotid int) bool { + _, ok := info.MigratingSlotMap[slotid] + return ok +} + +// IsSuccessMigrateSlot 是否是成功migrate的slot +func (info *ClusterSetSlotInfo) IsSuccessMigrateSlot(slotid int) bool { + _, ok := info.SuccessMigrateSlotMap[slotid] + return ok +} + +// IsFailMigrateSlot 是否是migrate失败的slot +func (info *ClusterSetSlotInfo) IsFailMigrateSlot(slotid int) bool { + _, ok := info.FailMigrateSlotMap[slotid] + return ok +} + +// GetSrcSlotsStatus 获取迁移任务中src节点上的slots状态 +func (info *ClusterSetSlotInfo) GetSrcSlotsStatus(slotList []int) ( + migrating, successMigrate, failMigrate, unknow []int) { + for _, slotItem := range slotList { + if info.IsMigratingSlot(slotItem) { + migrating = append(migrating, slotItem) + } else if info.IsSuccessMigrateSlot(slotItem) { + successMigrate = append(successMigrate, slotItem) + } else if info.IsFailMigrateSlot(slotItem) { + failMigrate = append(failMigrate, slotItem) + } else { + unknow = append(unknow, slotItem) + } + } + return +} + +// GetClusterSetSlotInfo 获取'cluster setslot info'的结果并解析 +func GetClusterSetSlotInfo(nodeAddr, nodePassword string) ( + setSlotInfo *ClusterSetSlotInfo, err error) { + // 测试nodeAddr的连通性 + cli01, err := NewRedisClient(nodeAddr, nodePassword, 0, consts.TendisTypeRedisInstance) + if err != nil { + return nil, err + } + defer cli01.Close() + + cmd := []interface{}{"cluster", "setslot", "info"} + ret, err := cli01.InstanceClient.Do(context.TODO(), cmd...).Result() + setSlotsInfo := &ClusterSetSlotInfo{} + setSlotsInfo.ImportingSlotMap = make(map[int]bool) + setSlotsInfo.SuccessImportSlotMap = make(map[int]bool) + setSlotsInfo.FailImportSlotMap = make(map[int]bool) + setSlotsInfo.MigratingSlotMap = make(map[int]bool) + setSlotsInfo.SuccessMigrateSlotMap = make(map[int]bool) + setSlotsInfo.FailMigrateSlotMap = make(map[int]bool) + + taskTimePattern := regexp.MustCompile(`\[.*?\]`) + + importInfos, ok := ret.([]interface{}) + if ok == false { + err = fmt.Errorf( + `GetClusterSetSlotInfo cmd:'cluster setslot info' result not []interface{},nodeAddr:%s.cmd:%v`, + nodeAddr, cmd) + mylog.Logger.Error(err.Error()) + return nil, err + } + for _, info01 := range importInfos { + infoItem := info01.(string) + infoItem = strings.TrimSpace(infoItem) + if infoItem == "" { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "importing taskid" { + list01[1] = strings.TrimSpace(list01[1]) + if list01[1] == "" { + continue + } + list01[1] = taskTimePattern.ReplaceAllString(list01[1], "") // 将task 时间替换掉 + setSlotsInfo.ImportingTaskIDs = strings.Fields(list01[1]) + } else if list01[0] == "importing slots" { + setSlotsInfo.ImportingSlotList, setSlotsInfo.ImportingSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "success import slots" { + setSlotsInfo.SuccessImportSlotList, setSlotsInfo.SuccessImportSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "fail import slots" { + setSlotsInfo.FailImportSlotList, setSlotsInfo.FailImportSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "running receiver task num" { + setSlotsInfo.RunningRcvTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "success receiver task num" { + setSlotsInfo.SuccessRcvTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "fail receiver task num" { + setSlotsInfo.FailRcvTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "migrating taskid" { + if infoItem == "" { + continue + } + list01[1] = taskTimePattern.ReplaceAllString(list01[1], "") // 将task 时间替换掉 + setSlotsInfo.MigratingTaskIDs = strings.Fields(list01[1]) + } else if list01[0] == "migrating slots" { + setSlotsInfo.MigratingSlotList, setSlotsInfo.MigratingSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "success migrate slots" { + setSlotsInfo.SuccessMigrateSlotList, setSlotsInfo.SuccessMigrateSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "fail migrate slots" { + setSlotsInfo.FailMigrateSlotList, setSlotsInfo.FailMigrateSlotMap, _, _, _ = DecodeSlotsFromStr(list01[1], " ") + } else if list01[0] == "running sender task num" { + list01[1] = strings.TrimSpace(list01[1]) + setSlotsInfo.RunningSendTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "success sender task num" { + list01[1] = strings.TrimSpace(list01[1]) + setSlotsInfo.SuccessSendTaskNum, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "fail sender task num" { + list01[1] = strings.TrimSpace(list01[1]) + setSlotsInfo.FailSendTaskNum, _ = strconv.Atoi(list01[1]) + } + } + return setSlotsInfo, nil +} diff --git a/dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_infoRepl.go b/dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_infoRepl.go new file mode 100644 index 0000000000..b09684fb92 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/models/myredis/tendisplus_infoRepl.go @@ -0,0 +1,445 @@ +package myredis + +import ( + "context" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" +) + +// InfoReplSlave Tendisplus master中执行info replication结果中slave状态 +// 如: slave0:ip=luketest03-redis-rdsplus4-1.luketest03-svc.dmc,port=30000,state=online,offset=930327677,lag=0 +type InfoReplSlave struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + Offset int64 `json:"offset"` + Lag int64 `json:"lag"` +} + +func (slave *InfoReplSlave) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + if len(list01) < 2 { + return fmt.Errorf(`%s format not correct, + the correct format is as follows:slave0:ip=xx,port=48000,state=online,offset=2510,lag=0`, line) + } + slave.Name = list01[0] + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + slave.IP = list02[1] + } else if list02[0] == "port" { + slave.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + slave.State = list02[1] + } else if list02[0] == "offset" { + slave.Offset, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + slave.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// InfoReplRocksdb .. +type InfoReplRocksdb struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + BinlogPos int64 `json:"binlog_pos"` + Lag int64 `json:"lag"` +} + +// InfoReplRocksdbSlave 在tendisplus master上执行info replication结果中rocksdb_slave0解析 +// 如: rocksdb0_slave0:ip=127.0.0.1,port=48000,dest_store_id=0,state=online,binlog_pos=249,lag=0,binlog_lag=0 +type InfoReplRocksdbSlave struct { + InfoReplRocksdb + DestStoreID int `json:"dest_store_id"` + BinlogLag int64 `json:"binlog_lag"` +} + +func (slave *InfoReplRocksdbSlave) decode(line string) error { + line = strings.TrimSpace(line) + var err error + list01 := strings.Split(line, ":") + if len(list01) < 2 { + err = fmt.Errorf(`%s format not correct, + the correct format is as follows: + rocksdb0_slave0:ip=xx,port=xx,dest_store_id=0,state=online,binlog_pos=249,lag=0,binlog_lag=0`, line) + mylog.Logger.Error(err.Error()) + return err + } + slave.Name = list01[0] + + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + slave.IP = list02[1] + } else if list02[0] == "port" { + slave.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "dest_store_id" { + slave.DestStoreID, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + slave.State = list02[1] + } else if list02[0] == "binlog_pos" { + slave.BinlogPos, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + slave.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "binlog_lag" { + slave.BinlogLag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// InfoReplRocksdbMaster 在tendisplus slave上执行info replication结果中rocksdb_master解析 +// 如: rocksdb0_master:ip=127.0.0.1,port=47000,src_store_id=0,state=online,binlog_pos=249,lag=0 +type InfoReplRocksdbMaster struct { + InfoReplRocksdb + SrcStoreID int `json:"src_store_id"` +} + +func (master *InfoReplRocksdbMaster) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + var err error + if len(list01) < 2 { + err = fmt.Errorf(`%s format not correct, + the correct format is as follows: + rocksdb0_master:ip=xxxx,port=47000,src_store_id=0,state=online,binlog_pos=249,lag=0`, line) + mylog.Logger.Error(err.Error()) + return err + } + master.Name = list01[0] + + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + if list02[0] == "ip" { + master.IP = list02[1] + } else if list02[0] == "port" { + master.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "src_store_id" { + master.SrcStoreID, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + master.State = list02[1] + } else if list02[0] == "binlog_pos" { + master.BinlogPos, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + master.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// TendisplusInfoReplData tendisplus info replication命令结果解析 +type TendisplusInfoReplData struct { + Addr string `json:"addr"` + Role string `json:"role"` + MasterHost string `json:"master_host"` + MasterPort int `json:"master_port"` + MasterLinkStatus string `json:"master_link_status"` + MasterLastIoSecondsAgo int64 `json:"master_last_io_seconds_ago"` + MasterSyncInPogress int64 `json:"master_sync_in_progress"` + SlaveReplOffset int64 `json:"slave_repl_offset"` + SlavePriority int64 `json:"slave_priority"` + SlaveReadOnly int `json:"slave_read_only"` + ConnectedSlaves int `json:"connected_slaves"` + MasterReplOffset int64 `json:"master_repl_offset"` + SlaveList []InfoReplSlave `json:"slave_list"` + RocksdbMasterList []InfoReplRocksdbMaster `json:"rocksdb_master_list"` + RocksdbSlaveList []InfoReplRocksdbSlave `json:"rocksdb_slave_list"` +} + +// String 用于打印 +func (rpl *TendisplusInfoReplData) String() string { + tmp, _ := json.Marshal(rpl) + return string(tmp) +} + +// GetRole master/slave +func (rpl *TendisplusInfoReplData) GetRole() string { + return rpl.Role +} + +// GetMasterLinkStatus up/down +func (rpl *TendisplusInfoReplData) GetMasterLinkStatus() string { + return rpl.MasterLinkStatus +} + +// SlaveMaxLag .. +// - 如果我的角色是slave,则从 RocksdbMasterList 中获取maxLag; +// - 如果我的角色是master,则先根据slaveAddr找到slave,然后从 SlaveList 中获取获取maxLag; +// - 如果slaveAddr为空,则获取master第一个slave的lag作为 maxLag; +func (rpl *TendisplusInfoReplData) SlaveMaxLag(slaveAddr string) (int64, error) { + var maxLag int64 = 0 + var err error = nil + slaveAddr = strings.TrimSpace(slaveAddr) + if rpl.GetRole() == "slave" { + if rpl.GetMasterLinkStatus() == "down" { + err = fmt.Errorf("slave:%s master_link_status is %s", rpl.Addr, rpl.GetMasterLinkStatus()) + mylog.Logger.Error(err.Error()) + return maxLag, err + } + for _, rdbMaster01 := range rpl.RocksdbMasterList { + if rdbMaster01.Lag > 18000000000000000 { + // 以前tendisplus的一个bug, 新版本已修复 + continue + } + if rdbMaster01.Lag > maxLag { + maxLag = rdbMaster01.Lag + } + } + return maxLag, nil + } + // role==master + if len(rpl.SlaveList) == 0 { + err = fmt.Errorf("master:%s have no slave", rpl.Addr) + mylog.Logger.Error(err.Error()) + return maxLag, err + } + if slaveAddr == "" { + // default first slave lag + maxLag = rpl.SlaveList[0].Lag + return maxLag, nil + } + var destSlave *InfoReplSlave = nil + for _, slave01 := range rpl.SlaveList { + slaveItem := slave01 + addr01 := fmt.Sprintf("%s:%d", slaveItem.IP, slaveItem.Port) + if slaveAddr == addr01 { + destSlave = &slaveItem + break + } + } + if destSlave == nil { + err = fmt.Errorf("master:%s not find slave:%s", rpl.Addr, slaveAddr) + mylog.Logger.Error(err.Error()) + return maxLag, err + } + maxLag = destSlave.Lag + return maxLag, nil +} + +// TendisplusInfoRepl tendisplus info replication结果解析 +// 参考内容: http://tendis.cn/#/Tendisplus/%E5%91%BD%E4%BB%A4/info?id=replication +func (db *RedisClient) TendisplusInfoRepl() (replData TendisplusInfoReplData, err error) { + var replRet string + if db.DbType == consts.TendisTypeRedisCluster { + replRet, err = db.ClusterClient.Info(context.TODO(), "replication").Result() + } else { + replRet, err = db.InstanceClient.Info(context.TODO(), "replication").Result() + } + if err != nil { + err = fmt.Errorf("info replication fail,err:%v,aadr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + infoList := strings.Split(replRet, "\n") + replData = TendisplusInfoReplData{} + replData.Addr = db.Addr + + slaveReg := regexp.MustCompile(`^slave\d+$`) + rocksdbSlaveReg := regexp.MustCompile(`^rocksdb\d+_slave\d+$`) + rocksdbMasterReg := regexp.MustCompile(`^rocksdb\d+_master$`) + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "role" { + replData.Role = list01[1] + } else if list01[0] == "master_host" { + replData.MasterHost = list01[1] + } else if list01[0] == "master_port" { + replData.MasterPort, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "master_link_status" { + replData.MasterLinkStatus = list01[1] + } else if list01[0] == "master_last_io_seconds_ago" { + replData.MasterLastIoSecondsAgo, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "master_sync_in_progress" { + replData.MasterSyncInPogress, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_repl_offset" { + replData.SlaveReplOffset, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_priority" { + replData.SlavePriority, _ = strconv.ParseInt(list01[1], 10, 64) + } else if list01[0] == "slave_read_only" { + replData.SlaveReadOnly, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "connected_slaves" { + replData.ConnectedSlaves, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "master_repl_offset" { + replData.MasterReplOffset, _ = strconv.ParseInt(list01[1], 10, 64) + } else if slaveReg.MatchString(list01[0]) { + slave01 := InfoReplSlave{} + err = slave01.decode(infoItem) + if err != nil { + return + } + replData.SlaveList = append(replData.SlaveList, slave01) + } else if rocksdbSlaveReg.MatchString(list01[0]) { + rdbSlave01 := InfoReplRocksdbSlave{} + err = rdbSlave01.decode(infoItem) + if err != nil { + return + } + replData.RocksdbSlaveList = append(replData.RocksdbSlaveList, rdbSlave01) + } else if rocksdbMasterReg.MatchString(list01[0]) { + rdbMaster01 := InfoReplRocksdbMaster{} + err = rdbMaster01.decode(infoItem) + if err != nil { + return + } + replData.RocksdbMasterList = append(replData.RocksdbMasterList, rdbMaster01) + } + } + return +} + +// TendisSSDInfoSlaveItem TendisSSD master中执行info slaves结果中slave状态 +// 如: slave0: ip=127.0.0.1,port=30000,state=IncrSync,seq=1111,lag=1 +type TendisSSDInfoSlaveItem struct { + Name string `json:"name"` + IP string `json:"ip"` + Port int `json:"port"` + State string `json:"state"` + Seq int64 `json:"seq"` + Lag int64 `json:"lag"` +} + +// Addr addr字符串 +func (item *TendisSSDInfoSlaveItem) Addr() string { + return item.IP + ":" + strconv.Itoa(item.Port) +} + +func (item *TendisSSDInfoSlaveItem) decode(line string) error { + line = strings.TrimSpace(line) + list01 := strings.Split(line, ":") + if len(list01) < 2 { + return fmt.Errorf(`%s format not correct, + the correct format is as follows:slave0:ip=xx,port=48000,state=IncrSync,seq=1111,lag=1`, line) + } + item.Name = list01[0] + list02 := strings.Split(list01[1], ",") + for _, item01 := range list02 { + list02 := strings.Split(item01, "=") + list02[0] = strings.TrimSpace(list02[0]) + list02[1] = strings.TrimSpace(list02[1]) + if list02[0] == "ip" { + item.IP = list02[1] + } else if list02[0] == "port" { + item.Port, _ = strconv.Atoi(list02[1]) + } else if list02[0] == "state" { + item.State = list02[1] + } else if list02[0] == "seq" { + item.Seq, _ = strconv.ParseInt(list02[1], 10, 64) + } else if list02[0] == "lag" { + item.Lag, _ = strconv.ParseInt(list02[1], 10, 64) + } + } + return nil +} + +// TendisSSDInfoSlavesData tendisSSD 'info slaves'结果 +type TendisSSDInfoSlavesData struct { + ConnectedSlaves int `json:"connected-slaves"` + DisConnectedSlaves int `json:"disconnected-slaves"` + SlaveList []TendisSSDInfoSlaveItem `json:"slave_list"` +} + +// String 用于打印 +func (data *TendisSSDInfoSlavesData) String() string { + tmp, _ := json.Marshal(data) + return string(tmp) +} + +// TendisSSDInfoSlaves tendisSSD 'info slaves'解析 +func (db *RedisClient) TendisSSDInfoSlaves() (ret TendisSSDInfoSlavesData, err error) { + var replRet string + replRet, err = db.InstanceClient.Info(context.TODO(), "slaves").Result() + if err != nil { + err = fmt.Errorf("info slaves fail,err:%v,aadr:%s", err, db.Addr) + mylog.Logger.Error(err.Error()) + return + } + infoList := strings.Split(replRet, "\n") + slaveReg := regexp.MustCompile(`^slave\d+$`) + for _, infoItem := range infoList { + infoItem = strings.TrimSpace(infoItem) + if strings.HasPrefix(infoItem, "#") { + continue + } + if len(infoItem) == 0 { + continue + } + list01 := strings.SplitN(infoItem, ":", 2) + if len(list01) < 2 { + continue + } + list01[0] = strings.TrimSpace(list01[0]) + list01[1] = strings.TrimSpace(list01[1]) + if list01[0] == "connected-slaves" { + ret.ConnectedSlaves, _ = strconv.Atoi(list01[1]) + } else if list01[0] == "disconnected-slaves" { + ret.DisConnectedSlaves, _ = strconv.Atoi(list01[1]) + } else if slaveReg.MatchString(list01[0]) { + slave01 := TendisSSDInfoSlaveItem{} + err = slave01.decode(infoItem) + if err != nil { + return + } + ret.SlaveList = append(ret.SlaveList, slave01) + } + } + return +} + +// ConnectedSlaves 'info replication'中获得 connected_slaves 数 +func (db *RedisClient) ConnectedSlaves() (ret int, err error) { + var dbType string + var plusInfoRepl TendisplusInfoReplData + var ssdInfoSlaves TendisSSDInfoSlavesData + var infoReplRet map[string]string + dbType, err = db.GetTendisType() + if err != nil { + return + } + if dbType == consts.TendisTypeTendisplusInsance { + plusInfoRepl, err = db.TendisplusInfoRepl() + if err != nil { + return + } + ret = len(plusInfoRepl.SlaveList) + } else if dbType == consts.TendisTypeTendisSSDInsance { + ssdInfoSlaves, err = db.TendisSSDInfoSlaves() + if err != nil { + return + } + ret = len(ssdInfoSlaves.SlaveList) + } else { + infoReplRet, err = db.Info("replication") + if err != nil { + return + } + connSlaves, _ := infoReplRet["connected_slaves"] + ret, _ = strconv.Atoi(connSlaves) + } + return +} diff --git a/dbm-services/redis/db-tools/dbmon/mylog/gin.go b/dbm-services/redis/db-tools/dbmon/mylog/gin.go new file mode 100644 index 0000000000..0427209d3f --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/mylog/gin.go @@ -0,0 +1,86 @@ +package mylog + +import ( + "net" + "net/http" + "net/http/httputil" + "os" + "runtime/debug" + "strings" + "time" + + "github.com/gin-gonic/gin" + "go.uber.org/zap" +) + +// 参考: https://www.cnblogs.com/you-men/p/14694928.html + +// GinLogger 接收gin框架默认的日志 +func GinLogger() gin.HandlerFunc { + return func(c *gin.Context) { + start := time.Now() + path := c.Request.URL.Path + query := c.Request.URL.RawQuery + c.Next() + + cost := time.Since(start) + Logger.Info(path, + zap.Int("status", c.Writer.Status()), + zap.String("method", c.Request.Method), + zap.String("path", path), + zap.String("query", query), + zap.String("ip", c.ClientIP()), + zap.String("user-agent", c.Request.UserAgent()), + zap.String("errors", c.Errors.ByType(gin.ErrorTypePrivate).String()), + zap.Duration("cost", cost), + ) + } +} + +// GinRecovery recover掉项目可能出现的panic,并使用zap记录相关日志 +func GinRecovery(stack bool) gin.HandlerFunc { + return func(c *gin.Context) { + defer func() { + if err := recover(); err != nil { + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + var brokenPipe bool + if ne, ok := err.(*net.OpError); ok { + if se, ok := ne.Err.(*os.SyscallError); ok { + if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), + "connection reset by peer") { + brokenPipe = true + } + } + } + + httpRequest, _ := httputil.DumpRequest(c.Request, false) + if brokenPipe { + Logger.Error(c.Request.URL.Path, + zap.Any("error", err), + zap.String("request", string(httpRequest)), + ) + // If the connection is dead, we can't write a status to it. + c.Error(err.(error)) // nolint: errcheck + c.Abort() + return + } + + if stack { + Logger.Error("[Recovery from panic]", + zap.Any("error", err), + zap.String("request", string(httpRequest)), + zap.String("stack", string(debug.Stack())), + ) + } else { + Logger.Error("[Recovery from panic]", + zap.Any("error", err), + zap.String("request", string(httpRequest)), + ) + } + c.AbortWithStatus(http.StatusInternalServerError) + } + }() + c.Next() + } +} diff --git a/dbm-services/redis/db-tools/dbmon/mylog/mylog.go b/dbm-services/redis/db-tools/dbmon/mylog/mylog.go new file mode 100644 index 0000000000..9cbd4bfc22 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/mylog/mylog.go @@ -0,0 +1,150 @@ +// Package mylog 日志 +package mylog + +import ( + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/robfig/cron/v3" + "github.com/spf13/viper" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" +) + +// Logger 全局logger +var Logger *zap.Logger + +// AdapterLog 适配器logger +var AdapterLog *LogAdapter + +// getCurrentDirectory 获取当前二进制程序所在执行路径 +func getCurrentDirectory() string { + dir, err := filepath.Abs(filepath.Dir(os.Args[0])) + if err != nil { + log.Panicf(fmt.Sprintf("GetCurrentDirectory failed,os.Args[0]=%s, err: %+v", os.Args[0], err)) + return dir + } + dir = strings.Replace(dir, "\\", "/", -1) + return dir +} + +// mkdirIfNotExistsWithPerm 如果目录不存在则创建,并指定文件Perm +func mkdirIfNotExistsWithPerm(dir string, perm os.FileMode) { + _, err := os.Stat(dir) + if err == nil { + return + } + if os.IsNotExist(err) == true { + err = os.MkdirAll(dir, perm) + if err != nil { + log.Panicf("MkdirAll fail,err:%v,dir:%s", err, dir) + } + } +} + +// InitRotateLoger 初始化日志logger +func InitRotateLoger() { + debug := viper.GetBool("BK_DBMON_DEBUG") + var level zap.AtomicLevel + if debug == true { + level = zap.NewAtomicLevelAt(zapcore.DebugLevel) + } else { + level = zap.NewAtomicLevelAt(zapcore.InfoLevel) + } + currDir := getCurrentDirectory() + logDir := filepath.Join(currDir, "logs") + mkdirIfNotExistsWithPerm(logDir, 0750) + + chownCmd := fmt.Sprintf("chown -R %s.%s %s", consts.MysqlAaccount, consts.MysqlGroup, logDir) + cmd := exec.Command("bash", "-c", chownCmd) + cmd.Run() + + cfg := zap.NewProductionConfig() + cfg.EncoderConfig = zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + TimeKey: "time", + NameKey: "name", + CallerKey: "caller", + FunctionKey: "func", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: zapcore.ISO8601TimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + EncodeName: zapcore.FullNameEncoder, + } + + lj := zapcore.AddSync(&lumberjack.Logger{ + Filename: filepath.Join(logDir, "bk-dbmon.log"), + MaxSize: 256, // 单个日志文件大小,单位MB + MaxBackups: 10, // 最多保存10个文件 + MaxAge: 15, // 最多保存15天内的日志 + LocalTime: true, + Compress: true, + }) + + core := zapcore.NewCore(zapcore.NewJSONEncoder(cfg.EncoderConfig), zapcore.NewMultiWriteSyncer(lj), level) + Logger = zap.New(core, zap.AddCaller()) + + AdapterLog = &LogAdapter{} + AdapterLog.Logger = Logger +} + +// 无实际作用,仅确保实现了 cron.Logger 接口 +var _ cron.Logger = (*LogAdapter)(nil) + +// LogAdapter 适配器,目标兼容 go.uber.org/zap.Logger 和 robfig/cron.Logger的接口 +type LogAdapter struct { + *zap.Logger +} + +// Error error +func (l *LogAdapter) Error(err error, msg string, keysAndValues ...interface{}) { + keysAndValues = formatTimes(keysAndValues) + l.Error(err, fmt.Sprintf(formatString(len(keysAndValues)+2), append([]interface{}{msg, "error", err}, + keysAndValues...)...)) +} + +// Info info +func (l *LogAdapter) Info(msg string, keysAndValues ...interface{}) { + keysAndValues = formatTimes(keysAndValues) + l.Logger.Info(fmt.Sprintf(formatString(len(keysAndValues)), append([]interface{}{msg}, keysAndValues...)...)) +} + +// formatString returns a logfmt-like format string for the number of +// key/values. +func formatString(numKeysAndValues int) string { + var sb strings.Builder + sb.WriteString("%s") + if numKeysAndValues > 0 { + sb.WriteString(", ") + } + for i := 0; i < numKeysAndValues/2; i++ { + if i > 0 { + sb.WriteString(", ") + } + sb.WriteString("%v=%v") + } + return sb.String() +} + +// formatTimes formats any time.Time values as RFC3339. +func formatTimes(keysAndValues []interface{}) []interface{} { + var formattedArgs []interface{} + for _, arg := range keysAndValues { + if t, ok := arg.(time.Time); ok { + arg = t.Format(time.RFC3339) + } + formattedArgs = append(formattedArgs, arg) + } + return formattedArgs +} diff --git a/dbm-services/redis/db-tools/dbmon/package.sh b/dbm-services/redis/db-tools/dbmon/package.sh new file mode 100644 index 0000000000..ed28d95fc0 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/package.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env sh + +DIR=$(dirname $0) +cd $DIR + +make build + +version=$(./bin/bk-dbmon -v|awk '{print $2}') +targetDir="bk-dbmon-$version" +tarName="$targetDir.tar.gz" + +if [[ ! -d $targetDir ]] +then + mkdir -p $targetDir +fi + +cp ./bin/bk-dbmon $targetDir/ +cp ./start.sh $targetDir/ +cp ./stop.sh $targetDir/ +cp ./dbmon-config.yaml $targetDir/ + +if [[ -e $tarName ]] +then +rm -rf $tarName +fi + +tar -zcf $tarName $targetDir + +echo "$tarName success" \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbmon/pkg/backupsys/backupsys.go b/dbm-services/redis/db-tools/dbmon/pkg/backupsys/backupsys.go new file mode 100644 index 0000000000..7561a97c43 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/backupsys/backupsys.go @@ -0,0 +1,230 @@ +// Package backupsys 备份系统 +package backupsys + +import ( + "bufio" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/util" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" +) + +// UploadTask 操作备份系统 +type UploadTask struct { + Files []string `json:"files"` // 全路径 + TaskIDs []uint64 `json:"taskids"` + Tag string `json:"tag"` +} + +// UploadFiles 上传文件 +func (task *UploadTask) UploadFiles() (err error) { + var taskIDStr string + var taskIDNum uint64 + if len(task.Files) == 0 { + return + } + if task.Tag == "" { + err = fmt.Errorf("BackupSystem uploadFiles tag(%s) cannot be empty", task.Tag) + mylog.Logger.Error(err.Error()) + return + } + for _, file := range task.Files { + if !util.FileExists(file) { + err = fmt.Errorf("BackupSystem uploadFiles %s not exists", file) + mylog.Logger.Error(err.Error()) + return + } + } + for _, bkfile := range task.Files { + bkCmd := fmt.Sprintf("%s -n -f %s --with-md5 -t %s|grep 'taskid'|awk -F: '{print $2}'", + consts.BackupClient, bkfile, task.Tag) + mylog.Logger.Info(bkCmd) + taskIDStr, err = util.RunBashCmd(bkCmd, "", nil, 10*time.Minute) + if err != nil { + return + } + taskIDNum, err = strconv.ParseUint(taskIDStr, 10, 64) + if err != nil { + err = fmt.Errorf("%s ParseUint failed,err:%v", taskIDStr, err) + mylog.Logger.Error(err.Error()) + return + } + task.TaskIDs = append(task.TaskIDs, taskIDNum) + } + return +} + +// CheckTasksStatus 检查tasks状态 +func (task *UploadTask) CheckTasksStatus() (runningTaskIDs, failTaskIDs, succTaskIDs []uint64, + runningFiles, failedFiles, succFiles []string, failMsgs []string, err error) { + var status TaskStatus + for idx, taskID := range task.TaskIDs { + status, err = GetTaskStatus(taskID) + if err != nil { + return + } + if status.Status > 4 { + // err = fmt.Errorf("ToBackupSystem %s failed,err:%s,taskid:%d", + // status.File, status.StatusInfo, taskID) + // mylog.Logger.Error(err.Error()) + failMsgs = append(failMsgs, fmt.Sprintf("taskid:%d,failMsg:%s", taskID, status.StatusInfo)) + failedFiles = append(failedFiles, task.Files[idx]) + failTaskIDs = append(failTaskIDs, task.TaskIDs[idx]) + } else if status.Status == 4 { + succFiles = append(succFiles, task.Files[idx]) + succTaskIDs = append(succTaskIDs, task.TaskIDs[idx]) + } else if status.Status < 4 { + runningFiles = append(runningFiles, task.Files[idx]) + runningTaskIDs = append(runningTaskIDs, task.TaskIDs[idx]) + } + } + return +} + +// WaitForUploadFinish 等待所有files上传成功 +func (task *UploadTask) WaitForUploadFinish() (err error) { + var times int64 + var msg string + var runningFiles, failFiles, succFiles, failMsgs []string + for { + times++ + _, _, _, runningFiles, failFiles, succFiles, failMsgs, err = task.CheckTasksStatus() + if err != nil { + return + } + // 只要有running的task,则继续等待 + if len(runningFiles) > 0 { + if times%6 == 0 { + // 每分钟打印一次日志 + msg = fmt.Sprintf("files[%+v] cnt:%d upload to backupSystem still running", runningFiles, len(runningFiles)) + mylog.Logger.Info(msg) + } + time.Sleep(10 * time.Second) + continue + } + if len(failMsgs) > 0 { + err = fmt.Errorf("failCnt:%d,failFiles:[%+v],err:%s", len(failFiles), failFiles, strings.Join(failFiles, ",")) + mylog.Logger.Error(err.Error()) + return + } + if len(succFiles) == len(task.Files) { + return nil + } + break + } + return +} + +// TaskStatus backup_client -q --taskid=xxxx 命令的结果 +type TaskStatus struct { + File string `json:"file"` + Host string `json:"host"` + SednupDateTime time.Time `json:"sendup_datetime"` + Status int `json:"status"` + StatusInfo string `json:"status_info"` + StartTime time.Time `json:"start_time"` + CompleteTime time.Time `json:"complete_time"` + ExpireTime time.Time `json:"expire_time"` +} + +// String 用于打印 +func (status *TaskStatus) String() string { + statusBytes, _ := json.Marshal(status) + return string(statusBytes) +} + +// GetTaskStatus 执行backup_client -q --taskid=xxxx 命令的结果并解析 +func GetTaskStatus(taskid uint64) (status TaskStatus, err error) { + var cmdRet string + bkCmd := fmt.Sprintf("%s -q --taskid=%d", consts.BackupClient, taskid) + cmdRet, err = util.RunBashCmd(bkCmd, "", nil, 30*time.Second) + if err != nil { + return + } + scanner := bufio.NewScanner(strings.NewReader(cmdRet)) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + line := scanner.Text() + line = strings.TrimSpace(line) + if line == "" { + continue + } + l01 := strings.SplitN(line, ":", 2) + if len(l01) != 2 { + err = fmt.Errorf("len()!=2,cmd:%s,result format not correct:%s", bkCmd, cmdRet) + mylog.Logger.Error(err.Error()) + return + } + first := strings.TrimSpace(l01[0]) + second := strings.TrimSpace(l01[1]) + switch first { + case "file": + status.File = second + case "host": + status.Host = second + case "sendup datetime": + if second == "0000-00-00 00:00:00" { + status.SednupDateTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.SednupDateTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse 'sendup datetime' failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "status": + status.Status, err = strconv.Atoi(second) + if err != nil { + err = fmt.Errorf("strconv.Atoi failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "status info": + status.StatusInfo = second + case "start_time": + if second == "0000-00-00 00:00:00" { + status.StartTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.StartTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse start_time failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "complete_time": + if second == "0000-00-00 00:00:00" { + status.CompleteTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.CompleteTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse complete_time failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + case "expire_time": + if second == "0000-00-00 00:00:00" { + status.ExpireTime = time.Time{} // "0000-01-00 00:00:00" + break + } + status.ExpireTime, err = time.ParseInLocation(consts.UnixtimeLayout, second, time.Local) + if err != nil { + err = fmt.Errorf("time.Parse expire_time failed,err:%v,value:%s,cmd:%s", err, second, bkCmd) + mylog.Logger.Error(err.Error()) + return + } + } + } + if err = scanner.Err(); err != nil { + err = fmt.Errorf("scanner.Scan failed,err:%v,cmd:%s", err, cmdRet) + mylog.Logger.Error(err.Error()) + return + } + return +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/consts/consts.go b/dbm-services/redis/db-tools/dbmon/pkg/consts/consts.go new file mode 100644 index 0000000000..ae315cc833 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/consts/consts.go @@ -0,0 +1,249 @@ +// Package consts 常量 +package consts + +// version +const ( + BkDbmonVersion = "v0.9" +) + +const ( + // TendisTypePredixyRedisCluster predixy + RedisCluster架构 + TendisTypePredixyRedisCluster = "PredixyRedisCluster" + // TendisTypePredixyTendisplusCluster predixy + TendisplusCluster架构 + TendisTypePredixyTendisplusCluster = "PredixyTendisplusCluster" + // TendisTypeTwemproxyRedisInstance twemproxy + RedisInstance架构 + TendisTypeTwemproxyRedisInstance = "TwemproxyRedisInstance" + // TendisTypeTwemproxyTendisplusInstance twemproxy+ TendisplusInstance架构 + TendisTypeTwemproxyTendisplusInstance = "TwemproxyTendisplusInstance" + // TendisTypeTwemproxyTendisSSDInstance twemproxy+ TendisSSDInstance架构 + TendisTypeTwemproxyTendisSSDInstance = "TwemproxyTendisSSDInstance" + // TendisTypeRedisInstance RedisCache 主从版 + TendisTypeRedisInstance = "RedisInstance" + // TendisTypeTendisplusInsance Tendisplus 主从版 + TendisTypeTendisplusInsance = "TendisplusInstance" + // TendisTypeTendisSSDInsance TendisSSD 主从版 + TendisTypeTendisSSDInsance = "TendisSSDInstance" + // TendisTypeRedisCluster 原生RedisCluster 架构 + TendisTypeRedisCluster = "RedisCluster" + // TendisTypeTendisplusCluster TendisplusCluster架构 + TendisTypeTendisplusCluster = "TendisplusCluster" + + // MongoTypeShardedCluster TODO + MongoTypeShardedCluster = "ShardedCluster" + // MongoTypeReplicaSet TODO + MongoTypeReplicaSet = "ReplicaSet" + // MongoTypeStandalone TODO + MongoTypeStandalone = "Standalone" +) + +const ( + // RedisMasterRole redis role master + RedisMasterRole = "master" + // RedisSlaveRole redis role slave + RedisSlaveRole = "slave" + + // RedisNoneRole none role + RedisNoneRole = "none" + + // MasterLinkStatusUP up status + MasterLinkStatusUP = "up" + // MasterLinkStatusDown down status + MasterLinkStatusDown = "down" + + // TendisSSDIncrSyncState IncrSync state + TendisSSDIncrSyncState = "IncrSync" + // TendisSSDReplFollowtate REPL_FOLLOW state + TendisSSDReplFollowtate = "REPL_FOLLOW" +) + +const ( + // RedisLinkStateConnected redis connection status connected + RedisLinkStateConnected = "connected" + // RedisLinkStateDisconnected redis connection status disconnected + RedisLinkStateDisconnected = "disconnected" +) + +const ( + // NodeStatusPFail Node is in PFAIL state. Not reachable for the node you are contacting, but still logically reachable + NodeStatusPFail = "fail?" + // NodeStatusFail Node is in FAIL state. It was not reachable for multiple nodes that promoted the PFAIL state to FAIL + NodeStatusFail = "fail" + // NodeStatusHandshake Untrusted node, we are handshaking. + NodeStatusHandshake = "handshake" + // NodeStatusNoAddr No address known for this node + NodeStatusNoAddr = "noaddr" + // NodeStatusNoFlags no flags at all + NodeStatusNoFlags = "noflags" +) + +const ( + // ClusterStateOK command 'cluster info',cluster_state + ClusterStateOK = "ok" +) +const ( + // DefaultMinSlots 0 + DefaultMinSlots = 0 + // DefaultMaxSlots 16383 + DefaultMaxSlots = 16383 +) + +// time layout +const ( + UnixtimeLayout = "2006-01-02 15:04:05" + FilenameTimeLayout = "20060102-150405" + FilenameDayLayout = "20060102" +) + +// account +const ( + MysqlAaccount = "mysql" + MysqlGroup = "mysql" +) + +// path dirs +const ( + UsrLocal = "/usr/local" + PackageSavePath = "/data/install" + Data1Path = "/data1" + DataPath = "/data" + DbaReportSaveDir = "/home/mysql/dbareport/" + BackupReportSaveDir = "/home/mysql/dbareport/" + RedisReportSaveDir = "/home/mysql/dbareport/redis/" +) + +// tool path +const ( + DbToolsPath = "/home/mysql/dbtools" + RedisShakeBin = "/home/mysql/dbtools/redis-shake" + RedisSafeDeleteToolBin = "/home/mysql/dbtools/redisSafeDeleteTool" + LdbTendisplusBin = "/home/mysql/dbtools/ldb_tendisplus" + TredisverifyBin = "/home/mysql/dbtools/tredisverify" + TredisBinlogBin = "/home/mysql/dbtools/tredisbinlog" + TredisDumpBin = "/home/mysql/dbtools/tredisdump" + NetCatBin = "/home/mysql/dbtools/netcat" + TendisKeyLifecycleBin = "/home/mysql/dbtools/tendis-key-lifecycle" + ZkWatchBin = "/home/mysql/dbtools/zkwatch" + ZstdBin = "/home/mysql/dbtools/zstd" + LzopBin = "/home/mysql/dbtools/lzop" + LdbWithV38Bin = "/home/mysql/dbtools/ldb_with_len.3.8" + LdbWithV513Bin = "/home/mysql/dbtools/ldb_with_len.5.13" + MyRedisCaptureBin = "/home/mysql/dbtools/myRedisCapture" + BinlogToolTendisplusBin = "/home/mysql/dbtools/binlogtool_tendisplus" + RedisCliBin = "/home/mysql/dbtools/redis-cli" +) + +// backup +const ( + NormalBackupType = "normal_backup" + ForeverBackupType = "forever_backup" + BackupClient = "/usr/local/bin/backup_client" + + RedisFullBackupTAG = "REDIS_FULL" + RedisBinlogTAG = "REDIS_BINLOG" + RedisForeverBackupTAG = "DBFILE" + + RedisFullBackupReportType = "redis_fullbackup" + RedisBinlogBackupReportType = "redis_binlogbackup" + + DoingRedisFullBackFileList = "redis_backup_file_list_%d_doing" + DoneRedisFullBackFileList = "redis_backup_file_list_%d_done" + + DoingRedisBinlogFileList = "redis_binlog_file_list_%d_doing" + DoneRedisBinlogFileList = "redis_binlog_file_list_%d_done" + + RedisFullbackupRepoter = "redis_fullbackup_%s.log" + RedisBinlogRepoter = "redis_binlog_%s.log" + + BackupStatusStart = "start" + BackupStatusRunning = "running" + BackupStatusToBakSystemStart = "to_backup_system_start" + BackupStatusToBakSystemFailed = "to_backup_system_failed" + BackupStatusToBakSysSuccess = "to_backup_system_success" + BackupStatusFailed = "failed" + BackupStatusLocalSuccess = "local_success" +) + +const ( + // RedisHotKeyReporter TODO + RedisHotKeyReporter = "redis_hotkey_%s.log" + // RedisBigKeyReporter TODO + RedisBigKeyReporter = "redis_bigkey_%s.log" + // RedisKeyModeReporter TODO + RedisKeyModeReporter = "redis_keymod_%s.log" + // RedisKeyLifeReporter TODO + RedisKeyLifeReporter = "redis_keylife_%s.log" +) + +// meta role +const ( + MetaRoleRedisMaster = "redis_master" + MetaRoleRedisSlave = "redis_slave" + MetaRolePredixy = "predixy" + MetaRoleTwemproxy = "twemproxy" +) + +const ( + // PayloadFormatRaw raw + PayloadFormatRaw = "raw" + // PayloadFormatBase64 base64 + PayloadFormatBase64 = "base64" +) + +// IsClusterDbType 存储端是否是cluster类型 +func IsClusterDbType(dbType string) bool { + if dbType == TendisTypePredixyRedisCluster || + dbType == TendisTypePredixyTendisplusCluster || + dbType == TendisTypeRedisCluster || + dbType == TendisTypeTendisplusCluster { + return true + } + return false +} + +// IsRedisInstanceDbType 存储端是否是cache类型 +func IsRedisInstanceDbType(dbType string) bool { + if dbType == TendisTypePredixyRedisCluster || + dbType == TendisTypeTwemproxyRedisInstance || + dbType == TendisTypeRedisInstance || + dbType == TendisTypeRedisCluster { + return true + } + return false +} + +// IsTendisplusInstanceDbType 存储端是否是tendisplus类型 +func IsTendisplusInstanceDbType(dbType string) bool { + if dbType == TendisTypePredixyTendisplusCluster || + dbType == TendisTypeTwemproxyTendisplusInstance || + dbType == TendisTypeTendisplusInsance || + dbType == TendisTypeTendisplusCluster { + return true + } + return false +} + +// IsTendisSSDInstanceDbType 存储端是否是tendisSSD类型 +func IsTendisSSDInstanceDbType(dbType string) bool { + if dbType == TendisTypeTwemproxyTendisSSDInstance || + dbType == TendisTypeTendisSSDInsance { + return true + } + return false +} + +// IsRedisMetaRole TODO +func IsRedisMetaRole(metaRole string) bool { + if metaRole == MetaRoleRedisMaster || + metaRole == MetaRoleRedisSlave { + return true + } + return false +} + +// IsMongo TODO +func IsMongo(clusterType string) bool { + if clusterType == MongoTypeShardedCluster || clusterType == MongoTypeReplicaSet || clusterType == MongoTypeStandalone { + return true + } + return false +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/consts/data_dir.go b/dbm-services/redis/db-tools/dbmon/pkg/consts/data_dir.go new file mode 100644 index 0000000000..04bc427ffb --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/consts/data_dir.go @@ -0,0 +1,181 @@ +package consts + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "syscall" +) + +// IsMountPoint2 Determine if a directory is a mountpoint, by comparing the device for the directory +// with the device for it's parent. If they are the same, it's not a mountpoint, if they're +// different, it is. +// reference: https://github.com/cnaize/kubernetes/blob/master/pkg/util/mount/mountpoint_unix.go#L29 +// 该函数与util/util.go 中 IsMountPoint()相同,但package consts 不建议依赖其他模块故拷贝了实现 +func IsMountPoint2(file string) bool { + stat, err := os.Stat(file) + if err != nil { + return false + } + rootStat, err := os.Lstat(file + "/..") + if err != nil { + return false + } + // If the directory has the same device as parent, then it's not a mountpoint. + return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev +} + +// fileExists 检查目录是否已经存在 +func fileExists(path string) bool { + _, err := os.Stat(path) + if err != nil { + return os.IsExist(err) + } + return true +} + +// SetRedisDataDir 设置环境变量 REDIS_DATA_DIR,并持久化到/etc/profile中 +// 如果函数参数 dataDir 不为空,则 REDIS_DATA_DIR = {dataDir} +// 否则,如果环境变量 REDIS_DATA_DIR 不为空,则直接读取; +// 否则,如果 /data1/redis 存在, 则 REDIS_DATA_DIR=/data1 +// 否则,如果 /data/redis, 则 REDIS_DATA_DIR=/data +// 否则,如果 /data1 是挂载点, 则 REDIS_DATA_DIR=/data1 +// 否则,如果 /data 是挂载点, 则 REDIS_DATA_DIR=/data +// 否则,REDIS_DATA_DIR=/data1 +func SetRedisDataDir(dataDir string) (err error) { + if dataDir == "" { + envDir := os.Getenv("REDIS_DATA_DIR") + if envDir != "" { // 环境变量 REDIS_DATA_DIR 不为空 + dataDir = envDir + } else { + if fileExists(filepath.Join(Data1Path, "redis")) { + // /data1/redis 存在 + dataDir = Data1Path + } else if fileExists(filepath.Join(DataPath, "redis")) { + // /data/redis 存在 + dataDir = DataPath + } else if IsMountPoint2(Data1Path) { + // /data1是挂载点 + dataDir = Data1Path + } else if IsMountPoint2(DataPath) { + // /data是挂载点 + dataDir = DataPath + } else { + // 函数参数 dataDir为空, 环境变量 REDIS_DATA_DIR 为空 + // /data1 和 /data 均不是挂载点 + // 强制指定 REDIS_DATA_DIR=/data1 + dataDir = Data1Path + } + } + } + dataDir = strings.TrimSpace(dataDir) + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export REDIS_DATA_DIR=' /etc/profile) +if [[ -z $ret ]] +then +echo "export REDIS_DATA_DIR=%s">>/etc/profile +fi + `, dataDir) + ret, err = exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetRedisDataDir failed,err:%v,ret:%s,shCmd:%s", err, string(ret), shCmd) + return + } + os.Setenv("REDIS_DATA_DIR", dataDir) + return nil +} + +// GetRedisDataDir 获取环境变量 REDIS_DATA_DIR,不为空直接返回, +// 否则,如果目录 /data1/redis存在,返回 /data1; +// 否则,如果目录 /data/redis存在,返回 /data; +// 否则,返回 /data1 +func GetRedisDataDir() string { + dataDir := os.Getenv("REDIS_DATA_DIR") + if dataDir == "" { + if fileExists(filepath.Join(Data1Path, "redis")) { + // /data1/redis 存在 + dataDir = Data1Path + } else if fileExists(filepath.Join(DataPath, "redis")) { + // /data/redis 存在 + dataDir = DataPath + } else { + dataDir = Data1Path + } + } + return dataDir +} + +// SetRedisBakcupDir 设置环境变量 REDIS_BACKUP_DIR ,并持久化到/etc/profile中 +// 如果函数参数 backupDir 不为空,则 REDIS_BACKUP_DIR = {backupDir} +// 否则,如果环境变量 REDIS_BACKUP_DIR 不为空,则直接读取; +// 否则,如果 /data/dbbak 存在, 则 REDIS_BACKUP_DIR=/data +// 否则,如果 /data1/dbbak 存在, 则 REDIS_BACKUP_DIR=/data1 +// 否则,如果 /data 是挂载点, 则 REDIS_BACKUP_DIR=/data +// 否则,如果 /data1 是挂载点, 则 REDIS_BACKUP_DIR=/data1 +// 否则,REDIS_BACKUP_DIR=/data +func SetRedisBakcupDir(backupDir string) (err error) { + if backupDir == "" { + envDir := os.Getenv("REDIS_BACKUP_DIR") + if envDir != "" { + backupDir = envDir + } else { + if fileExists(filepath.Join(DataPath, "dbbak")) { + // /data/dbbak 存在 + backupDir = DataPath + } else if fileExists(filepath.Join(Data1Path, "dbbak")) { + // /data1/dbbak 存在 + backupDir = Data1Path + } else if IsMountPoint2(DataPath) { + // /data是挂载点 + backupDir = DataPath + } else if IsMountPoint2(Data1Path) { + // /data1是挂载点 + backupDir = Data1Path + } else { + // 函数参数 backupDir 为空, 环境变量 REDIS_BACKUP_DIR 为空 + // /data1 和 /data 均不是挂载点 + // 强制指定 REDIS_BACKUP_DIR=/data + backupDir = DataPath + } + } + } + backupDir = strings.TrimSpace(backupDir) + var ret []byte + shCmd := fmt.Sprintf(` +ret=$(grep '^export REDIS_BACKUP_DIR=' /etc/profile) +if [[ -z $ret ]] +then +echo "export REDIS_BACKUP_DIR=%s">>/etc/profile +fi + `, backupDir) + ret, err = exec.Command("bash", "-c", shCmd).Output() + if err != nil { + err = fmt.Errorf("SetRedisBakcupDir failed,err:%v,ret:%s", err, string(ret)) + return + } + os.Setenv("REDIS_BACKUP_DIR", backupDir) + return nil +} + +// GetRedisBackupDir 获取环境变量 REDIS_BACKUP_DIR,默认值 /data +// 否则,如果目录 /data/dbbak 存在,返回 /data; +// 否则,如果目录 /data1/dbbak 存在,返回 /data1; +// 否则,返回 /data +func GetRedisBackupDir() string { + dataDir := os.Getenv("REDIS_BACKUP_DIR") + if dataDir == "" { + if fileExists(filepath.Join(DataPath, "dbbak")) { + // /data/dbbak 存在 + dataDir = DataPath + } else if fileExists(filepath.Join(Data1Path, "dbbak")) { + // /data1/dbbak 存在 + dataDir = Data1Path + } else { + dataDir = DataPath + } + } + return dataDir +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/consts/event_categories.go b/dbm-services/redis/db-tools/dbmon/pkg/consts/event_categories.go new file mode 100644 index 0000000000..7c6abdbfa1 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/consts/event_categories.go @@ -0,0 +1,43 @@ +package consts + +// twemproxy monitor event categories +const ( + EventTwemproxyRestart = "twemproxy_restart" + EventTwemproxyLogin = "twemproxy_login" +) + +// predixy monitor event categories +const ( + EventPredixyRestart = "predixy_restart" + EventPredixyLogin = "predixy_login" +) + +// redis monitor event categories +const ( + EventRedisLogin = "redis_login" + EventRedisSync = "redis_sync" + EventRedisPersist = "redis_persist" + EventRedisMaxmemory = "redis_maxmemory" + EventTendisBinlogLen = "tendis_binlog_len" + EventRedisClusterState = "redis_cluster_state" + + EventTimeDiffWarning = 120 + EventTimeDiffError = 300 + + EventMasterLastIOSecWarning = 600 + EventMasterLastIOSecError = 1200 + + EventSSDBinlogLenWarnning = 20000000 + EventSSDBinlogLenError = 50000000 + + EventMemoryUsedPercentWarnning = 80 // 80% + EventMemoryUsedPercentError = 90 // 90% +) + +// warn level +const ( + WarnLevelError = "error" + WarnLevelWarning = "warning" + WarnLevelSuccess = "success" + WarnLevelMessage = "message" +) diff --git a/dbm-services/redis/db-tools/dbmon/pkg/consts/mongo.go b/dbm-services/redis/db-tools/dbmon/pkg/consts/mongo.go new file mode 100644 index 0000000000..a077ba47b8 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/consts/mongo.go @@ -0,0 +1,47 @@ +package consts + +import ( + "os/user" + "path" +) + +// meta role +const ( + MetaRoleShardsvrBackup = "shardsvr-backup" + MetaRoleMongos = "mongos" +) + +// twemproxy monitor event categories +const ( + EventMongoRestart = "mongo_restart" + EventMongoLogin = "mongo_login" +) + +// MongoBin 相关 +const ( + MongoBin = "/usr/local/mongodb/bin/mongo" + MongoToolKit = "mongo-toolkit-go_Linux" +) + +// GetDbToolDir 获取dbtool目录,在用户目录 dbtools/mg 下 +func GetDbToolDir() string { + currentUser, err := user.Current() + if err != nil { + return "" + } + username := currentUser.Username + return path.Join("/home/", username, "dbtools", "mg") +} + +// GetDbTool 获取dbtool目录,在用户目录 dbtools/mg 下 +func GetDbTool(dbType string, bin string) string { + currentUser, err := user.Current() + if err != nil { + return "" + } + username := currentUser.Username + if dbType != "" { + return path.Join("/home/", username, "dbtools", dbType, bin) + } + return path.Join("/home/", username, "dbtools", bin) +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/customtime/customtime.go b/dbm-services/redis/db-tools/dbmon/pkg/customtime/customtime.go new file mode 100644 index 0000000000..3e9c9f8150 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/customtime/customtime.go @@ -0,0 +1,76 @@ +// Package customtime 自定义time +package customtime + +import ( + "database/sql/driver" + "fmt" + "strings" + "time" +) + +// CustomTime 自定义时间类型 +type CustomTime struct { + time.Time +} + +const ctLayout = "2006-01-02 15:04:05" + +var nilTime = (time.Time{}).UnixNano() + +// UnmarshalJSON .. +func (ct *CustomTime) UnmarshalJSON(b []byte) (err error) { + s := strings.Trim(string(b), "\"") + if s == "null" || s == "" { + ct.Time = time.Time{} + return + } + ct.Time, err = time.ParseInLocation(ctLayout, s, time.Local) + return +} + +// MarshalJSON .. +func (ct CustomTime) MarshalJSON() ([]byte, error) { + if ct.Time.UnixNano() == nilTime { + return []byte("null"), nil + } + return []byte(fmt.Sprintf("\"%s\"", ct.Time.Format(ctLayout))), nil +} + +// Scan scan +func (ct *CustomTime) Scan(value interface{}) error { + switch v := value.(type) { + case []byte: + return ct.UnmarshalText(string(v)) + case string: + return ct.UnmarshalText(v) + case time.Time: + ct.Time = v + case nil: + ct.Time = time.Time{} + default: + return fmt.Errorf("cannot sql.Scan() CustomTime from: %#v", v) + } + return nil +} + +// UnmarshalText unmarshal ... +func (ct *CustomTime) UnmarshalText(value string) error { + dd, err := time.ParseInLocation(ctLayout, value, time.Local) + if err != nil { + return err + } + ct.Time = dd + return nil +} + +// Value .. +// 注意这里ct不能是指针 +// 参考文章:https://www.codenong.com/44638610/ +func (ct CustomTime) Value() (driver.Value, error) { + return driver.Value(ct.Local().Format(ctLayout)), nil +} + +// IsSet .. +func (ct *CustomTime) IsSet() bool { + return ct.UnixNano() != nilTime +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/httpapi/httpapi.go b/dbm-services/redis/db-tools/dbmon/pkg/httpapi/httpapi.go new file mode 100644 index 0000000000..ae1814a0b8 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/httpapi/httpapi.go @@ -0,0 +1,40 @@ +// Package httpapi TODO +package httpapi + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "fmt" + "io/ioutil" + "net/http" + + "github.com/gin-gonic/gin" +) + +func health(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "message": "ok", + }) +} + +func version(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{ + "verion": consts.BkDbmonVersion, + }) +} + +// StartListen http开始监听 +func StartListen(conf *config.Configuration) { + if conf.HttpAddress == "" { + return + } + gin.SetMode(gin.ReleaseMode) + gin.DefaultWriter = ioutil.Discard + r := gin.Default() + r.Use(mylog.GinLogger(), mylog.GinRecovery(true)) + r.GET("/health", health) + r.GET("/version", version) + mylog.Logger.Info(fmt.Sprintf("start listen %s", conf.HttpAddress)) + r.Run(conf.HttpAddress) +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/kafka/crypto_base.go b/dbm-services/redis/db-tools/dbmon/pkg/kafka/crypto_base.go new file mode 100644 index 0000000000..e3f84b69fc --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/kafka/crypto_base.go @@ -0,0 +1,43 @@ +package kafka + +import ( + "crypto/sha256" + "crypto/sha512" + + "github.com/xdg-go/scram" +) + +var ( + // SHA256 256 + SHA256 scram.HashGeneratorFcn = sha256.New + // SHA512 512 + SHA512 scram.HashGeneratorFcn = sha512.New +) + +// XDGSCRAMClient struct +type XDGSCRAMClient struct { + *scram.Client + *scram.ClientConversation + scram.HashGeneratorFcn +} + +// Begin implement interface +func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { + x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) + if err != nil { + return err + } + x.ClientConversation = x.Client.NewConversation() + return nil +} + +// Step implement interface +func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { + response, err = x.ClientConversation.Step(challenge) + return +} + +// Done implement interface +func (x *XDGSCRAMClient) Done() bool { + return x.ClientConversation.Done() +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka.go b/dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka.go new file mode 100644 index 0000000000..74cdb52afe --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka.go @@ -0,0 +1,2 @@ +// Package kafka TODO +package kafka diff --git a/dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka_client.go b/dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka_client.go new file mode 100644 index 0000000000..e86d15d336 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/kafka/kafka_client.go @@ -0,0 +1,67 @@ +package kafka + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "fmt" + "io/ioutil" + "log" + "strings" + "time" + + "github.com/Shopify/sarama" +) + +// KafkaClient KafkaClient +// https://git..com/scc-gamedba2/dbmonitor/xml_server_kafka/blob/master/client/main.go +func KafkaClient(hosts, topic, user, password, fname string) error { + inputBytes, err := ioutil.ReadFile(fname) + if err != nil { + log.Print(err) + return err + } + + config := sarama.NewConfig() + config.Producer.Retry.Max = 1 + config.Producer.RequiredAcks = sarama.WaitForAll + + config.Metadata.Full = true + config.Version = sarama.V3_2_3_0 + config.ClientID = "bk-dbmon" + config.Metadata.Full = true + config.Net.SASL.Enable = true + config.Net.SASL.User = user + config.Net.SASL.Password = password + config.Net.SASL.Handshake = true + // 最大100MB + config.Producer.MaxMessageBytes = 100000000 + config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { + return &XDGSCRAMClient{ + HashGeneratorFcn: SHA512} + } + config.Net.SASL.Mechanism = sarama.SASLTypeSCRAMSHA512 + config.Producer.Return.Successes = true + config.Producer.Timeout = 6 * time.Second + + var producer sarama.SyncProducer + for i := 0; i < 10; i++ { + producer, err = sarama.NewSyncProducer(strings.Split(hosts, ","), config) + if err != nil { + mylog.Logger.Error(fmt.Sprintf("connection producer failed %s:%+v", hosts, err)) + time.Sleep(time.Second * 2) + continue + } + break + } + defer producer.Close() + srcValue := inputBytes + msg := &sarama.ProducerMessage{ + Topic: topic, + Value: sarama.ByteEncoder(srcValue), + } + if part, offset, err := producer.SendMessage(msg); err != nil { + mylog.Logger.Error(fmt.Sprintf("send file(%s) err=%s", fname, err)) + } else { + mylog.Logger.Info(fmt.Sprintf("send succ,partition=%d, offset=%d", part, offset)) + } + return err +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/ctl.go b/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/ctl.go new file mode 100644 index 0000000000..94a84e2ae6 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/ctl.go @@ -0,0 +1,122 @@ +// Package keylifecycle TODO +package keylifecycle + +import ( + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "fmt" + "strconv" + "strings" +) + +// Instance TODO +type Instance struct { + IP string `json:"ip"` + Port int `json:"port"` + Addr string `json:"addr"` + Password string `json:"passwords"` + App string `json:"app"` + Domain string `json:"domain"` + Role string `json:"role"` + Version string `json:"version"` + + Cli *myredis.RedisClient `json:"-"` +} + +func getStatToolParams(keys int64) (int64, int, int, int, int) { + step, slptime, sample, confidence, adjfactor := 1, 50, 3000, 400, 100 + + if keys >= 100000000 { + step, slptime, sample, confidence, adjfactor = 50, 10, 12000, 8000, 3000 + } else if keys >= 50000000 { + step, slptime, sample, confidence, adjfactor = 50, 10, 10000, 5000, 2200 + } else if keys >= 10000000 { + step, slptime, sample, confidence, adjfactor = 50, 10, 10000, 5000, 2000 + } else if keys >= 100000 { + step, slptime, sample, confidence, adjfactor = 50, 0, 10000, 4000, 1200 + } else if keys >= 10000 { + step, slptime, sample, confidence, adjfactor = 50, 0, 10000, 3000, 1000 + } + mylog.Logger.Info(fmt.Sprintf("get tools params for %d:%d,%d,%d,%d,%d", + keys, step, slptime, sample, confidence, adjfactor)) + return int64(step), slptime, sample, confidence, adjfactor +} + +const ( + // DEFATUL_VERSION_FACTOR TODO + DEFATUL_VERSION_FACTOR = 1000000 + // DEFAULT_VERSION_NUM TODO + DEFAULT_VERSION_NUM = 9999999 + // VERSION_TENDIS_SSD_TAG TODO + VERSION_TENDIS_SSD_TAG = "TRedis" + // VERSION_TENDIS_DELIMITER TODO + VERSION_TENDIS_DELIMITER = "-" +) + +// tendisSSDVersion2Int 转化ssd版本成数字 +func tendisSSDVersion2Int(v string) (int, int) { + if strings.Contains(v, VERSION_TENDIS_SSD_TAG) { + vps := strings.Split(v, VERSION_TENDIS_DELIMITER) + if len(vps) == 3 { + return dotString2Int(vps[0]), dotString2Int(vps[2]) + } + } + return DEFAULT_VERSION_NUM, DEFAULT_VERSION_NUM +} + +func dotString2Int(dt string) int { + dt = strings.TrimSpace(dt) + var vnum, step, factor int + if strings.HasPrefix(dt, "v") { + dt = dt[1:] + } + factor = DEFATUL_VERSION_FACTOR + step = 100 + parts := strings.Split(dt, ".") + for _, v := range parts { + vint, err := strconv.Atoi(v) + if err != nil { + return DEFAULT_VERSION_NUM + } + vnum += vint * factor + factor = factor / step + } + return vnum +} + +// KafkaMsg old msg struct. +// type KafkaMsg struct { +// Name string `json:"name"` +// From string `json:"form"` +// XMLRow struct { +// XMLField map[string]interface{} `json:"xml_field"` +// } `json:"xml_row"` +// } + +// func (t *Task) sendFileToKafka(file string, dict map[string]interface{}) error { +// fh, err := os.Open(file) +// if err != nil { +// return err +// } + +// dict["content"], err = ioutil.ReadAll(fh) +// if err != nil { +// return err +// } + +// kafkaMsg := KafkaMsg{Name: t.conf.KafaTopic, From: "K8S", +// XMLRow: struct { +// XMLField map[string]interface{} "json:\"xml_field\"" +// }{}, +// } +// kafkaMsg.XMLRow.XMLField = dict +// msgJSON, _ := json.Marshal(kafkaMsg) + +// fileName := "/tmp/kafka.txt" +// if err = ioutil.WriteFile(fileName, msgJSON, os.FileMode(0660)); err != nil { +// return err +// } + +// return kafka.KafkaClient(t.kafkaConf.KafkaHosts, t.conf.KafaTopic, +// t.kafkaConf.KafkaUser, t.kafkaConf.KafaPass, fileName) +// } diff --git a/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/job.go b/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/job.go new file mode 100644 index 0000000000..225479db29 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/job.go @@ -0,0 +1,147 @@ +package keylifecycle + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "io/fs" + "os" + "path/filepath" + "time" +) + +// GlobRedisKeyLifeCycleJob global var +var GlobRedisKeyLifeCycleJob *Job + +// Job tendis key 生命周期Job 入口 +type Job struct { // NOCC:golint/naming(其他:设计如此) + Conf *config.Configuration `json:"conf"` + StatTask *Task `json:"stat_task"` + RealBackupDir string `json:"real_backup_dir"` + HotKeyRp report.Reporter `json:"-"` + BigKeyRp report.Reporter `json:"-"` + KeyModeRp report.Reporter `json:"-"` + KeyLifeRp report.Reporter `json:"-"` + Err error `json:"-"` +} + +// InitRedisKeyLifeCycleJob tendis key 生命周期Job +func InitRedisKeyLifeCycleJob(conf *config.Configuration) { + GlobRedisKeyLifeCycleJob = &Job{ + Conf: conf, + } +} + +// Run 执行例行任务 +func (job *Job) Run() { + mylog.Logger.Info("keylifecycle wakeup,start running...") + defer func() { + if job.Err != nil { + mylog.Logger.Info(fmt.Sprintf("keylifecycle end fail,err:%v", job.Err)) + } else { + mylog.Logger.Info("keylifecycle end succ") + } + }() + job.Err = nil + if job.precheck(); job.Err != nil { + return + } + + if job.GetReporter(); job.Err != nil { + return + } + defer job.HotKeyRp.Close() + defer job.BigKeyRp.Close() + defer job.KeyModeRp.Close() + defer job.KeyLifeRp.Close() + + if job.createTasks(); job.Err != nil { + return + } + + if job.Err = job.StatTask.RunStat(); job.Err != nil { + return + } +} + +func (job *Job) precheck() { + if _, job.Err = os.Stat(job.Conf.KeyLifeCycle.StatDir); os.IsNotExist(job.Err) { + if job.Err = os.Mkdir(job.Conf.KeyLifeCycle.StatDir, fs.ModePerm); job.Err != nil { + return + } + } + + baseBins := []string{ + consts.TendisKeyLifecycleBin, + consts.LdbTendisplusBin, + consts.LdbWithV38Bin, + consts.LdbWithV513Bin, + } + for _, binfile := range baseBins { + if !util.FileExists(binfile) { + job.Err = fmt.Errorf("file :%s does not exist|%+v", binfile, job.Err) + } + } +} + +// GetReporter 上报者 +func (job *Job) GetReporter() { + reportDir := filepath.Join(job.Conf.ReportSaveDir, "keylifecycle") + util.MkDirsIfNotExists([]string{reportDir}) + util.LocalDirChownMysql(reportDir) + job.HotKeyRp, job.Err = report.NewFileReport(filepath.Join(reportDir, + fmt.Sprintf(consts.RedisHotKeyReporter, time.Now().Local().Format(consts.FilenameDayLayout)))) + job.BigKeyRp, job.Err = report.NewFileReport(filepath.Join(reportDir, + fmt.Sprintf(consts.RedisBigKeyReporter, time.Now().Local().Format(consts.FilenameDayLayout)))) + job.KeyModeRp, job.Err = report.NewFileReport(filepath.Join(reportDir, + fmt.Sprintf(consts.RedisKeyModeReporter, time.Now().Local().Format(consts.FilenameDayLayout)))) + job.KeyLifeRp, job.Err = report.NewFileReport(filepath.Join(reportDir, + fmt.Sprintf(consts.RedisKeyLifeReporter, time.Now().Local().Format(consts.FilenameDayLayout)))) +} + +func (job *Job) createTasks() { + var password string + localInstances := []Instance{} + + mylog.Logger.Info(fmt.Sprintf("keylifecycle start servers : %+v", job.Conf.Servers)) + for _, svrItem := range job.Conf.Servers { + if !consts.IsRedisMetaRole(svrItem.MetaRole) { + mylog.Logger.Info(fmt.Sprintf("keylifecycle start but unkonwn role : %s", svrItem.MetaRole)) + continue + } + for _, port := range svrItem.ServerPorts { + if password, job.Err = myredis.GetRedisPasswdFromConfFile(port); job.Err != nil { + return + } + + server := Instance{ + App: svrItem.BkBizID, + IP: svrItem.ServerIP, + Port: port, + Addr: fmt.Sprintf("%s:%d", svrItem.ServerIP, port), + Domain: svrItem.ClusterDomain, + Password: password, + } + + if server.Cli, job.Err = myredis.NewRedisClientWithTimeout(server.Addr, + server.Password, 0, consts.TendisTypeRedisInstance, time.Second); job.Err != nil { + return + } + + var info map[string]string + if info, job.Err = server.Cli.Info("all"); job.Err != nil { + return + } + + server.Role = info["role"] + server.Version = info["redis_version"] + localInstances = append(localInstances, server) + } + } + job.StatTask = NewKeyStatTask(localInstances, &job.Conf.KeyLifeCycle, + job.HotKeyRp, job.BigKeyRp, job.KeyModeRp, job.KeyLifeRp) +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/task.go b/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/task.go new file mode 100644 index 0000000000..1ad651b0c1 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/keylifecycle/task.go @@ -0,0 +1,380 @@ +package keylifecycle + +import ( + "bufio" + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "dbm-services/redis/db-tools/dbmon/util" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + "strings" + "time" +) + +// Task 任务内容 +type Task struct { + statServers []Instance + + lockFile string + logFile string + errFile string + magicFile string + basicDir string + + conf *config.ConfRedisKeyLifeCycle + HotKeyRp report.Reporter + BigKeyRp report.Reporter + KeyModeRp report.Reporter + KeyLifeRp report.Reporter +} + +// NewKeyStatTask new a task +func NewKeyStatTask(servers []Instance, conf *config.ConfRedisKeyLifeCycle, + hkRp report.Reporter, bkRp report.Reporter, kmRp report.Reporter, klRp report.Reporter) *Task { + + return &Task{ + statServers: servers, + conf: conf, + HotKeyRp: hkRp, + BigKeyRp: bkRp, + KeyModeRp: kmRp, + KeyLifeRp: klRp, + logFile: "tendis.keystat.log", + errFile: "tendis.keystat.err", + lockFile: "tendis.keystat.lock", + magicFile: "tendis.lifecycle.magic.done", + basicDir: fmt.Sprintf("%s/redis", consts.GetRedisDataDir()), + } +} + +// RunStat Main Entry +func (t *Task) RunStat() error { + if err := os.Chdir(t.conf.StatDir); err != nil { + mylog.Logger.Error(fmt.Sprintf("chdir failed :%+v", err)) + return err + } + path, _ := os.Getwd() + mylog.Logger.Info(fmt.Sprintf("current work dir is %s instances:%+v", path, t.statServers)) + + t.rotateFile(t.logFile) + t.rotateFile(t.errFile) + doneChan := make(chan struct{}, 1) + util.LockFileOnStart(t.lockFile, doneChan) + defer func() { doneChan <- struct{}{} }() + + rstHash := map[string]interface{}{} + cmdVer := fmt.Sprintf("%s version| grep build_date | awk '{print $3}'", consts.TendisKeyLifecycleBin) + r1, _ := util.RunBashCmd(cmdVer, "", nil, time.Second) + rstHash["tool_version"] = strings.TrimSuffix(r1, "\n") + + gStartTime := time.Now().Unix() + for _, server := range t.statServers { + if t.waitOrIgnore(server) { + continue + } + + t.setBasicVals(rstHash, server) + if server.Role == "master" { // 热key统计入口 + st := time.Now().Unix() + rstHash["data_type"] = "tendis_hotkeys" + rstHash["stime"] = time.Now().Format("2006-01-02 15:04:05") + f, err := t.hotKeyWithMonitor(server) + if err != nil { + mylog.Logger.Warn(fmt.Sprintf("get hot keys failed %s:%+v", server.Addr, err)) + continue + } + rstHash["etime"] = time.Now().Format("2006-01-02 15:04:05") + rstHash["check_cost"] = time.Now().Unix() - st + err = t.sendAndReport(t.HotKeyRp, f) + mylog.Logger.Warn(fmt.Sprintf("role master , do hot key analyse done.. :%s:%+v", server.Addr, err)) + } else if server.Role == "slave" { // 大key统计入口 + st := time.Now().Unix() + rstHash["stime"] = time.Now().Format("2006-01-02 15:04:05") + fbig, fmod, dbsize, _, err := t.bigKeySmartStat(server) + if err != nil { + mylog.Logger.Warn(fmt.Sprintf("get big keys failed %s:%+v", server.Addr, err)) + continue + } + rstHash["etime"] = time.Now().Format("2006-01-02 15:04:05") + rstHash["check_cost"] = time.Now().Unix() - st + rstHash["keys_total"] = dbsize + + rstHash["data_type"] = "tendis_bigkeys" + err = t.sendAndReport(t.BigKeyRp, fbig) + mylog.Logger.Warn(fmt.Sprintf("role slave , do big key analyse done.. :%s:%+v", server.Addr, err)) + + rstHash["data_type"] = "tendis_keymod" + err = t.sendAndReport(t.KeyModeRp, fmod) + mylog.Logger.Warn(fmt.Sprintf("role slave , do big key analyse done.. :%s:%+v", server.Addr, err)) + } else { + mylog.Logger.Error(fmt.Sprintf("unkown server role %s:%s", server.Addr, server.Role)) + } + statDetail, _ := json.Marshal(rstHash) + t.KeyLifeRp.AddRecord(string(statDetail), false) + fh, _ := os.OpenFile(t.magicFile, os.O_APPEND|os.O_WRONLY, 0644) + write := bufio.NewWriter(fh) + if _, err := write.WriteString(fmt.Sprintf("%s %d\n", rstHash["data_type"], server.Port)); err != nil { + mylog.Logger.Warn(fmt.Sprintf("append magic file failed %s:%+v", t.magicFile, err)) + } else { + write.Flush() + } + fh.Close() + } + + mylog.Logger.Info(fmt.Sprintf("keylifecycle total cost :%d", time.Now().Unix()-gStartTime)) + return nil +} + +func (t *Task) setBasicVals(rstHash map[string]interface{}, server Instance) { + rstHash["ip"] = server.IP + rstHash["port"] = server.Port + rstHash["domain"] = server.Domain + rstHash["app"] = server.App + rstHash["role"] = server.Role + + if strings.Contains(server.Version, "tendisplus") { + rstHash["redis_type"] = "tendis_plus" + } else if strings.Contains(server.Version, "TRedis") { + rstHash["redis_type"] = "tendis_ssd" + } else { + rstHash["redis_type"] = "tendis_cache" + } +} + +func (t *Task) rotateFile(f string) { + for i := 1; i < 2; i++ { + oldFile := fmt.Sprintf("%d.%s", i, f) + os.Rename(f, oldFile) + } +} + +// hotKeyWithMonitor 热key 分析 +func (t *Task) hotKeyWithMonitor(server Instance) (string, error) { + hkfile := fmt.Sprintf("tendis.keystat.hotkeys.%d.info", server.Port) + t.rotateFile(hkfile) + + mylog.Logger.Info(fmt.Sprintf("do hot key analyse : %s", server.Addr)) + hkCmd := fmt.Sprintf("%s hotkeys -A %s -S %s -a %s -L %s -D %s --raw -o %s > %s 2>&1", + consts.TendisKeyLifecycleBin, server.App, server.Addr, server.Password, + t.lockFile, server.Domain, t.logFile, hkfile) + + mylog.Logger.Info(fmt.Sprintf("exec cmd : %s", hkCmd)) + r1, r2 := util.RunBashCmd(hkCmd, "", nil, time.Second*(time.Duration(t.conf.HotKeyConf.Duration+10))) + mylog.Logger.Info(fmt.Sprintf("tools executed with result %s:%s:%s", server.Addr, r1, r2)) + + return hkfile, nil +} + +// bigKeySmartStat big / mode 入口 +func (t *Task) bigKeySmartStat(server Instance) (string, string, int64, int64, error) { + bkfile := fmt.Sprintf("tendis.keystat.bigkeys.%d.info", server.Port) + kmfile := fmt.Sprintf("tendis.keystat.keymode.%d.info", server.Port) + t.rotateFile(bkfile) + t.rotateFile(kmfile) + var dbsize, step int64 + var err error + + if strings.Contains(server.Version, "TRedis") { + dbsize, step, err = t.bigAndMode4TendisSSD(server, bkfile, kmfile) + } else if strings.Contains(server.Version, "tendisplus") { + dbsize, step, err = t.bigAndMode4TendisPlus(server, bkfile, kmfile) + } else { + if !util.FileExists(fmt.Sprintf("%s/redis/%d/data/appendonly.aof", t.basicDir, server.Port)) && + t.conf.BigKeyConf.UseRdb { + // 如果RDB save 正在跑(不是我自己触发的,那么需要等等) + dbsize, step, err = t.bigKeyWithRdb4Cache(server, bkfile, kmfile) + } else { + // 如果AOF 不存在, 那么还的使用 RDB 来统计 + dbsize, step, err = t.bigKeyWithAof4Cache(server, bkfile, kmfile) + } + } + return bkfile, kmfile, dbsize, step, err +} + +// bigKeyWithRdb4Cache -- 大key & key 模式分析 +func (t *Task) bigKeyWithRdb4Cache(server Instance, bkfile, kmfile string) (int64, int64, error) { + if err := server.Cli.BgSaveAndWaitForFinish(); err != nil { + return 0, 0, err + } + + allkeys := fmt.Sprintf("v.%d.keys", server.Port) + cmdKeys := fmt.Sprintf("%s rdbstat -f %s/%d/data/dump.rdb > %s 2>&1", + consts.TendisKeyLifecycleBin, t.basicDir, server.Port, allkeys) + if _, err := util.RunBashCmd(cmdKeys, "", nil, time.Hour); err != nil { + return 0, 0, err + } + return t.statRawKeysFileDetail(allkeys, bkfile, kmfile, server) +} + +func (t *Task) bigKeyWithAof4Cache(server Instance, bkfile, kmfile string) (int64, int64, error) { + if err := server.Cli.BgRewriteAOFAndWaitForDone(); err != nil { + return 0, 0, err + } + + allkeys := fmt.Sprintf("v.%d.keys", server.Port) + cmdKeys := fmt.Sprintf("%s parseaof -f %s/%d/data/appendonly.aof > %s 2>&1", + consts.TendisKeyLifecycleBin, t.basicDir, server.Port, allkeys) + if _, err := util.RunBashCmd(cmdKeys, "", nil, time.Hour); err != nil { + return 0, 0, err + } + + dbsize, err := server.Cli.DbSize() + if err != nil { + return 0, 0, err + } + + step, slptime, sample, confidence, adjfactor := getStatToolParams(dbsize) + cmdExec := fmt.Sprintf( + "cat %s | %s keystat --stdin --raw -B %s -M %s -o %s -S %s -a %s -A %s -D %s "+ + "--step %d --keymodetop 100 --samples %d --confidence %d --adjfactor %d --duration %d > %s 2>&1", + allkeys, consts.TendisKeyLifecycleBin, bkfile, kmfile, t.logFile, + server.Addr, server.Password, server.App, server.Domain, + step, sample, confidence, adjfactor, slptime, t.errFile) + mylog.Logger.Info(fmt.Sprintf("do stats keys %s:%s", server.Addr, cmdExec)) + _, err = util.RunBashCmd(cmdExec, "", nil, time.Second*time.Duration(t.conf.BigKeyConf.Duration)) + if er1 := os.Remove(allkeys); er1 != nil { + mylog.Logger.Warn(fmt.Sprintf("remove keys file err %s:+%v", allkeys, er1)) + } + return dbsize, step, err +} + +// bigAndMode4TendisSSD for tendis ssd +func (t *Task) bigAndMode4TendisSSD(server Instance, bkfile, kmfile string) (int64, int64, error) { + ldbTool := consts.LdbWithV38Bin + if _, smallVer := tendisSSDVersion2Int(server.Version); smallVer >= 1021700 { + ldbTool = consts.LdbWithV513Bin + } + + rockkeys := fmt.Sprintf("v.%d.keys", server.Port) + exportStr := "export LD_LIBRARY_PATH=LD_LIBRARY_PATH:/usr/local/redis/bin/deps &&" + cmdScan := fmt.Sprintf("%s %s --db=%s/%d/data/rocksdb/ scan > %s 2>&1", + exportStr, ldbTool, t.basicDir, server.Port, rockkeys) + if _, err := util.RunBashCmd(cmdScan, "", nil, time.Hour); err != nil { + mylog.Logger.Warn(fmt.Sprintf("exec cmd: %s failed: %+v", cmdScan, err)) + } + return t.statRawKeysFileDetail(rockkeys, bkfile, kmfile, server) +} + +// bigAndMode4TendisPlus -- here will do something +func (t *Task) bigAndMode4TendisPlus(server Instance, bkfile, kmfile string) (int64, int64, error) { + kvstore, err := server.Cli.GetKvstoreCount() + if err != nil { + return 0, 0, err + } + + rockkeys := fmt.Sprintf("v.%d.keys", server.Port) + for db := 0; db < kvstore; db++ { + rocksdir := fmt.Sprintf("%s/%d/data/rocksdb/%d/", t.basicDir, server.Port, db) + cmdScan := fmt.Sprintf("%s --db=%s scan >> %s 2>&1", consts.LdbTendisplusBin, rocksdir, rockkeys) + mylog.Logger.Info(fmt.Sprintf("do scan sst keys %s :%d: %s", server.Addr, db, cmdScan)) + if _, err := util.RunBashCmd(cmdScan, "", nil, time.Hour); err != nil { + mylog.Logger.Warn(fmt.Sprintf("exec cmd: %s failed: %+v", cmdScan, err)) + } + } + return t.statRawKeysFileDetail(rockkeys, bkfile, kmfile, server) +} + +func (t *Task) statRawKeysFileDetail(keysFile string, bkFile string, kmFile string, server Instance) (int64, int64, + error) { + var err error + keyLines, _ := util.GetFileLines(keysFile) + step, slptime, sample, confidence, adjfactor := getStatToolParams(keyLines) + + cmdExec := fmt.Sprintf( + "cat %s | %s keystat --ssd --stdin --raw -B %s -M %s -o %s -S %s -a %s -A %s -D %s "+ + "--step %d --keymodetop 100 --samples %d --confidence %d --adjfactor %d --duration %d > %s 2>&1", + keysFile, consts.TendisKeyLifecycleBin, bkFile, kmFile, t.logFile, + server.Addr, server.Password, server.App, server.Domain, + step, sample, confidence, adjfactor, slptime, t.errFile) + mylog.Logger.Info(fmt.Sprintf("do stats keys %s:%s", server.Addr, cmdExec)) + _, err = util.RunBashCmd(cmdExec, "", nil, time.Second*time.Duration(t.conf.BigKeyConf.Duration)) + if er1 := os.Remove(keysFile); er1 != nil { + mylog.Logger.Warn(fmt.Sprintf("remove keys file err %s:+%v", keysFile, er1)) + } + return keyLines, step, err +} + +func (t *Task) waitOrIgnore(server Instance) bool { + // 1. waitDisk + var diskOk bool + for i := 0; i < 100; i++ { + usage, _ := util.GetLocalDirDiskUsg(t.conf.StatDir) + // {TotalSize:105620869120 UsedSize:8513855488 AvailSize:92688084992 UsageRatio:8} + mylog.Logger.Info(fmt.Sprintf("current dir %s usage :%+v (max:%d%%)", + t.conf.StatDir, usage, t.conf.BigKeyConf.DiskMaxUsage)) + if usage.UsageRatio < t.conf.BigKeyConf.DiskMaxUsage { + diskOk = true + break + } + time.Sleep(time.Minute) + } + if !diskOk { + mylog.Logger.Warn(fmt.Sprintf("current disk not enough , byebye (great than:%d%%)", t.conf.BigKeyConf.DiskMaxUsage)) + return true + } + // 2. check aleary stated . + fh, err := os.Open(t.magicFile) + if os.IsNotExist(err) { + ioutil.WriteFile(t.magicFile, []byte(fmt.Sprintf("MAGIC_%s", time.Now().Format("20060102"))), 0644) + return false + } + ct, err := ioutil.ReadAll(fh) + if err != nil { + mylog.Logger.Warn(fmt.Sprintf("read magic file %s err :%+v", t.magicFile, err)) + return false + } + + lines := strings.Split(string(ct), "\n") + for i := 0; i < len(lines); i++ { + if i == 0 { + if !strings.Contains(lines[i], "MAGIC_") { + ioutil.WriteFile(t.magicFile, []byte(fmt.Sprintf("MAGIC_%s", time.Now().Format("20060102"))), 0644) + mylog.Logger.Warn(fmt.Sprintf("bad magic file format first line not magic :%s", lines[i])) + return false + } + if !strings.Contains(lines[i], fmt.Sprintf("MAGIC_%s", time.Now().Format("20060102"))) { + ioutil.WriteFile(t.magicFile, []byte(fmt.Sprintf("MAGIC_%s", time.Now().Format("20060102"))), 0644) + mylog.Logger.Warn(fmt.Sprintf("stats not today :%s", lines[i])) + return false + } + } + words := strings.Split(lines[i], " ") + if words[0] == strconv.Itoa(server.Port) { + return true + } + } + return false +} + +func (t *Task) sendAndReport(ctp report.Reporter, fname string) error { + if ctp == nil { + return fmt.Errorf("report nil, ignore report :%s", fname) + } + + fh, err := os.Open(fname) + if err != nil { + return err + } + defer fh.Close() + + reader := bufio.NewReader(fh) + for { + line, err := reader.ReadString('\n') + if err != nil { + if err == io.EOF { + break + } + return err + } + if err := ctp.AddRecord(string(line), true); err != nil { + mylog.Logger.Warn(fmt.Sprintf("add to reporter failed:%+v", err)) + } + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_job.go b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_job.go new file mode 100644 index 0000000000..15b34c7068 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_job.go @@ -0,0 +1,126 @@ +package mongojob + +import ( + actuator_consts "dbm-services/redis/db-tools/dbactuator/pkg/consts" + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "path" + "path/filepath" + "strconv" + "sync" + "time" +) + +// backupJobHandle 全局任务句柄 +var backupJobHandle *BackupJob +var lock = &sync.Mutex{} + +// GetBackupJob 获取任务句柄 singleInstance +func GetBackupJob(conf *config.Configuration) *BackupJob { + if backupJobHandle == nil { + lock.Lock() + defer lock.Unlock() + if backupJobHandle == nil { + backupJobHandle = &BackupJob{ + Conf: conf, + Name: "mongobackup", + } + } + } + return backupJobHandle +} + +// BackupJob TODO +/* + MongoDB 例行备份,1,每10分钟尝试执行一次. + 1,每小时执行一次备份,可能是全备,也可能是增量备份. + +*/ +// BackupJob 例行备份任务 +type BackupJob struct { // NOCC:golint/naming(其他:设计如此) + Name string `json:"name"` + Conf *config.Configuration `json:"conf"` + // Tasks []*BackupTask `json:"tasks"` + RealBackupDir string `json:"real_backup_dir"` // 如 /data/dbbak + Reporter report.Reporter `json:"-"` + Err error `json:"-"` +} + +// Run 执行例行备份. 被cron对象调用 +func (job *BackupJob) Run() { + mylog.Logger.Info(fmt.Sprintf("%s Run start", job.Name)) + defer func() { + mylog.Logger.Info(fmt.Sprintf("%s Run End, Err: %+v", job.Name, job.Err)) + }() + job.Err = nil + job.getRealBackupDir() + if job.Err != nil { + return + } + // job.getReporter() + // if job.Err != nil { + // return + // } + // defer job.Reporter.Close() + + // 调用mongodb-toolkit-go backup 来完成 + for _, svrItem := range job.Conf.Servers { + // 只在Backup节点上备份 + if svrItem.MetaRole != consts.MetaRoleShardsvrBackup { + continue + } + job.runOneServer(&svrItem) + } + + if job.Err != nil { + return + } + +} + +// runOneServer 执行单个实例的备份 +func (job *BackupJob) runOneServer(svrItem *config.ConfServerItem) { + // 1,检查实例是否可用 + // 2,检查实例是否需要备份 + // 3,执行备份 + // 4,上报备份结果 + // 备份操作稍微有点复杂,再封装一层 + // backupTask := NewBackupTask(job.Conf, svrItem, job.RealBackupDir, job.Reporter) + + dumpDir := path.Join(actuator_consts.GetMongoBackupDir(), "dbbak", "mg") + option := &BackupTaskOption{ + TaskName: "", + BackupDir: dumpDir, + BackupType: "AUTO", + Host: svrItem.ServerIP, + Port: strconv.Itoa(svrItem.ServerPorts[0]), + User: "root", + Password: "root", + SendToBs: true, + RemoveOldFileFirst: true, + FullFreq: 3600 * 24, + IncrFreq: 3600, + } + backupTask := NewBackupTask() + backupTask.Do(option) + +} + +// getRealBackupDir 获取本地binlog保存路径 +func (job *BackupJob) getRealBackupDir() { + job.RealBackupDir = path.Join(actuator_consts.GetMongoBackupDir(), "mg") + util.MkDirsIfNotExists([]string{job.RealBackupDir}) +} + +// getReporter 上报者 +func (job *BackupJob) getReporter() { + reportDir := filepath.Join(job.Conf.ReportSaveDir, "mongo") + reportFile := fmt.Sprintf(consts.RedisBinlogRepoter, time.Now().Local().Format(consts.FilenameDayLayout)) + util.MkDirsIfNotExists([]string{reportDir}) + util.LocalDirChownMysql(reportDir) + job.Reporter, job.Err = report.NewFileReport(filepath.Join(reportDir, reportFile)) +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_task.go b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_task.go new file mode 100644 index 0000000000..29f7d93393 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/backup_task.go @@ -0,0 +1,74 @@ +package mongojob + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "strconv" +) + +// BackupTaskOption TODO +type BackupTaskOption struct { + // TaskName 任务名称 + TaskName string `json:"task_name"` + // BackupDir 备份目录 + BackupDir string `json:"backup_dir"` + // BackupType 备份类型 + BackupType string `json:"backup_type"` + Host string `json:"host"` + Port string `json:"port"` + User string `json:"user"` + Password string `json:"password"` + SendToBs bool `json:"send_to_bs"` + RemoveOldFileFirst bool `json:"remove_old_file_first"` + FullFreq int `json:"full_freq"` + IncrFreq int `json:"incr_freq"` +} + +// BackupTask TODO +type BackupTask struct { +} + +// NewBackupTask 创建任务 +func NewBackupTask() *BackupTask { + return &BackupTask{} +} + +/* + my $cmd = "$RealBin/tools/mongo-toolkit-go_Linux backup --host $host --port $port --type $dumptype --user $user --pass '$pass' +--dir $dumpdir --send-to-bs --remove-old-file-first --fullFreq 3600 --incrFreq 3500"; + +*/ + +// Do TODO +func (task *BackupTask) Do(option *BackupTaskOption) error { + cb := util.NewCmdBuilder() + backupType := "AUTO" + cb.Append(consts.GetDbTool("mg", consts.MongoToolKit)).Append("backup", "--type", backupType). + Append("--host", option.Host).Append("--port", option.Port). + Append("--user", option.User).Append("--dir", option.BackupDir). + Append("--pass").AppendPassword(option.Password). + Append("--fullFreq", strconv.Itoa(option.FullFreq), "--incrFreq", strconv.Itoa(option.IncrFreq)) + + if option.SendToBs { + cb.Append("--send-to-bs") + } + if option.RemoveOldFileFirst { + cb.Append("--remove-old-file-first") + } + + cmdLine := cb.GetCmdLine("", false) + mylog.Logger.Info(fmt.Sprintf("cmdLine: %s", cmdLine)) + + cmd := cb.GetCmd() + + o, err := DoCommandWithTimeout(3600*24, cmd[0], cmd[1:]...) + mylog.Logger.Info(fmt.Sprintf("Exec %s cost %0.1f Seconds, stdout: %s, stderr %s", + cmdLine, + o.End.Sub(o.Start).Seconds(), + o.Stdout.String(), + o.Stderr.String())) + + return err +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/mongojob/check_service_job.go b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/check_service_job.go new file mode 100644 index 0000000000..a20dea81f0 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/check_service_job.go @@ -0,0 +1,182 @@ +package mongojob + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/embedfiles" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "fmt" + "strings" + "time" + + "github.com/pkg/errors" +) + +// checkServiceJobHandle 全局任务句柄 +var checkServiceJobHandle *CheckServiceJob + +// GetCheckServiceJob 获取任务句柄 +func GetCheckServiceJob(conf *config.Configuration) *CheckServiceJob { + if checkServiceJobHandle == nil { + lock.Lock() + defer lock.Unlock() + if checkServiceJobHandle == nil { + checkServiceJobHandle = &CheckServiceJob{ + Conf: conf, + Name: "login", + } + } + } + return checkServiceJobHandle +} + +// CheckServiceJob 登录检查. +type CheckServiceJob struct { // NOCC:golint/naming(其他:设计如此) + Name string `json:"name"` + Conf *config.Configuration `json:"conf"` + Tasks []*BackupTask `json:"tasks"` + RealBackupDir string `json:"real_backup_dir"` // 如 /data/dbbak + Reporter report.Reporter `json:"-"` + Err error `json:"-"` +} + +/* + sub conn_mongodb () { + my ( $self, $host, $port, $user, $pass, $timeout ) = @_; + my $cmd = "/usr/local/mongodb/bin/mongo --quiet --eval \"{var user='$user';var pwd='$pass';}\" $host:$port/admin $RealBin/tools/login.js"; + my $cmd_out = $self->do_command_timeout_v2 ( $cmd, $timeout, 1 ); + my $o = $cmd_out->[2]; + my $ok = ( $o =~ /connect ok/ ) ? 1 : 0; + my $mongo_type = ''; + + if ( $o =~ /mongo_type:(\w+)/ ) { + $mongo_type = $1; + } + return ( $ok, $mongo_type, $o ); + } +*/ +const mongoBin = "/usr/local/mongodb/bin/mongo" + +// Run 执行例行备份. 被cron对象调用 +func (job *CheckServiceJob) Run() { + mylog.Logger.Info(fmt.Sprintf("%s Run start", job.Name)) + defer func() { + mylog.Logger.Info(fmt.Sprintf("%s Run End, Err: %+v", job.Name, job.Err)) + }() + + for _, svrItem := range job.Conf.Servers { + mylog.Logger.Info(fmt.Sprintf("job %s server: %s:%v start", job.Name, svrItem.ServerIP, svrItem.ServerPorts)) + job.runOneServer(&svrItem) + mylog.Logger.Info(fmt.Sprintf("job %s server: %s:%v end", job.Name, svrItem.ServerIP, svrItem.ServerPorts)) + } + +} + +func (job *CheckServiceJob) runOneServer(svrItem *config.ConfServerItem) { + if !consts.IsMongo(svrItem.ClusterType) { + mylog.Logger.Warn(fmt.Sprintf("server %+v is not a mongo instance", svrItem.ServerIP)) + return + } + + if len(svrItem.ServerPorts) == 0 { + mylog.Logger.Error(fmt.Sprintf("server %+v has no port", svrItem.ServerIP)) + return + } + + // loginTimeout := job.Conf.InstConfig.Get(svrItem.ClusterDomain, svrItem.ServerIP, "login", "timeout") + loginTimeout := 10 + t := time.Now() + err := checkService(loginTimeout, svrItem) + mylog.Logger.Info(fmt.Sprintf("checkService %s:%d cost %0.1f seconds, err: %v", + svrItem.ServerIP, svrItem.ServerPorts[0], time.Now().Sub(t).Seconds(), err)) + if err == nil { + // ok + return + } + + // 检查 进程是否存在,存在: 发送消息LoginTimeout + // Port被别的进程占用,此处算是误告,但问题不大,反正都需要人工处理. + if checkPortInUse(svrItem.ServerPorts[0]) { + // 进程存在 + // 发送消息LoginTimeout + SendEvent(job.Conf, + svrItem, + consts.EventMongoRestart, + consts.WarnLevelError, + fmt.Sprintf("mongo %s:%d login failed:timeout", svrItem.ServerIP, svrItem.ServerPorts[0]), + ) + return + } + + // 不存在,尝试启动 + // 启动成功: 发送消息LoginSuccess + // 启动失败: 发送消息LoginFailed + startMongo(svrItem.ServerPorts[0]) + err = checkService(loginTimeout, svrItem) + if err == nil { + // 发送消息LoginSuccess + SendEvent(job.Conf, + svrItem, + consts.EventMongoRestart, + consts.WarnLevelWarning, + fmt.Sprintf("mongo %s:%d restart", svrItem.ServerIP, svrItem.ServerPorts[0]), + ) + } else { + // 发送消息LoginFailed + SendEvent(job.Conf, + svrItem, + consts.EventMongoRestart, + consts.WarnLevelError, + fmt.Sprintf("mongo %s:%d login failed", svrItem.ServerIP, svrItem.ServerPorts[0]), + ) + } + +} + +// checkPortInUse TODO +// todo checkPortInUse +// todo 分析/proc/tcp/netstat,判断端口是否被占用 +func checkPortInUse(port int) bool { + + return false +} + +// checkService TODO +func checkService(loginTimeout int, svrItem *config.ConfServerItem) error { + user := "root" + pass := "root" + authDb := "admin" + port := fmt.Sprintf("%d", svrItem.ServerPorts[0]) + outBuf, errBuf, err := ExecLoginJs(mongoBin, loginTimeout, svrItem.ServerIP, port, user, pass, authDb, + embedfiles.MongoLoginJs) + mylog.Logger.Info(fmt.Sprintf("outBuf: %s", outBuf)) + mylog.Logger.Info(fmt.Sprintf("errBuf: %s", errBuf)) + if err == nil { + return nil + } + if len(outBuf) == 0 { + return errors.New("login failed") + } + + // ExecLoginJs + if strings.Contains(string(outBuf), "connect ok") { + return nil + } + + return errors.New("login failed") +} + +func startMongo(port int) error { + cmd := "/usr/local/mongodb/bin/start.sh" + _, err := DoCommandWithTimeout(10, cmd, fmt.Sprintf("%d", port)) + if err != nil { + return err + } + return nil +} + +// SendWarnMessage TODO +func SendWarnMessage() { + +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/mongojob/cmd.go b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/cmd.go new file mode 100644 index 0000000000..5c908d3ab5 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/cmd.go @@ -0,0 +1,65 @@ +package mongojob + +import ( + "bytes" + "context" + log "dbm-services/redis/db-tools/dbmon/mylog" + "fmt" + "os/exec" + "strings" + "time" +) + +// ExecResult TODO +type ExecResult struct { + Start time.Time + End time.Time + Cmdline string + Stdout bytes.Buffer + Stderr bytes.Buffer +} + +// DoCommandWithTimeout TODO +func DoCommandWithTimeout(timeout int, bin string, args ...string) (*ExecResult, error) { + ctx := context.Background() + if timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) + defer cancel() + } + var ret = ExecResult{} + ret.Start = time.Now() + cmd := exec.CommandContext(ctx, bin, args...) + cmd.Stdout = &ret.Stdout + cmd.Stderr = &ret.Stderr + err := cmd.Run() + ret.End = time.Now() + ret.Cmdline = fmt.Sprintf("%s %s", bin, strings.Join(args, " ")) + return &ret, err +} + +// ExecJs 执行脚本 +func ExecJs(bin string, timeout int, host, port, user, pass, authDB, scriptContent string) ([]byte, []byte, error) { + args := []string{"--quiet", "--host", host, "--port", port} + if user != "" { + args = append(args, "--username", user, "--password", pass, "--authenticationDatabase", authDB) + } + args = append(args, "--eval", scriptContent) + out, err := DoCommandWithTimeout(timeout, bin, args...) + argLen := len(args) + log.Logger.Debug(fmt.Sprintf("exec %s %s return %s\n", bin, args[:argLen-2], out.Stdout.Bytes())) + log.Logger.Debug(fmt.Sprintf("scriptContent %s\n", scriptContent)) + return out.Stdout.Bytes(), out.Stderr.Bytes(), err +} + +// ExecLoginJs 执行脚本, 用户密码在eval传入 +func ExecLoginJs(bin string, timeout int, host, port, user, pass, authDB, scriptContent string) ([]byte, []byte, + error) { + args := []string{"--quiet", "--host", host, "--port", port} + args = append(args, "--eval", fmt.Sprintf("var user='%s';var pwd='%s';%s", user, pass, scriptContent)) + out, err := DoCommandWithTimeout(timeout, bin, args...) + argLen := len(args) + log.Logger.Debug(fmt.Sprintf("exec %s %s return %s\n", bin, args[:argLen-2], out.Stdout.Bytes())) + // log.Logger.Debug(fmt.Sprintf("scriptContent %s\n", scriptContent)) + return out.Stdout.Bytes(), out.Stderr.Bytes(), err +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/mongojob/mongojob.go b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/mongojob.go new file mode 100644 index 0000000000..799d11daee --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/mongojob.go @@ -0,0 +1,2 @@ +// Package mongojob TODO +package mongojob diff --git a/dbm-services/redis/db-tools/dbmon/pkg/mongojob/msg.go b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/msg.go new file mode 100644 index 0000000000..ae2dad74dc --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/mongojob/msg.go @@ -0,0 +1,39 @@ +package mongojob + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/sendwarning" + "fmt" +) + +// SendEvent TODO +func SendEvent(conf *config.Configuration, serverConf *config.ConfServerItem, + eventName, warnLevel, warnMsg string) error { + + msgH, err := sendwarning.NewBkMonitorEventSender( + conf.RedisMonitor.BkMonitorEventDataID, + conf.RedisMonitor.BkMonitorEventToken, + conf.GsePath, + ) + + if msgH != nil && err == nil { + err = msgH.SetBkBizID(serverConf.BkBizID). + SetBkCloudID(serverConf.BkCloudID). + SetApp(serverConf.App). + SetAppName(serverConf.AppName). + SetClusterDomain(serverConf.ClusterDomain). + SetClusterName(serverConf.ClusterName). + SetClusterType(serverConf.ClusterType). + SetInstanceRole(serverConf.MetaRole).SendWarning(eventName, warnMsg, warnLevel, serverConf.ServerIP) + } + + if err != nil { + mylog.Logger.Warn(fmt.Sprintf("SendEvent failed,name:%s level:%s warnMsg:%q err: %+v", eventName, warnLevel, warnMsg, + err)) + } else { + mylog.Logger.Info(fmt.Sprintf("SendEvent success,name:%s level:%s warnMsg:%q", eventName, warnLevel, warnMsg)) + } + + return err +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/job.go b/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/job.go new file mode 100644 index 0000000000..ab969485c1 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/job.go @@ -0,0 +1,342 @@ +package redisbinlogbackup + +import ( + "bufio" + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/backupsys" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "dbm-services/redis/db-tools/dbmon/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +// GlobRedisBinlogBakJob global var +var GlobRedisBinlogBakJob *Job + +// Job 例行备份任务 +type Job struct { // NOCC:golint/naming(其他:设计如此) + Conf *config.Configuration `json:"conf"` + Tasks []*Task `json:"tasks"` + RealBackupDir string `json:"real_backup_dir"` // 如 /data/dbbak + Reporter report.Reporter `json:"-"` + Err error `json:"-"` +} + +// InitGlobRedisBinlogBackupJob 新建例行binlog备份任务 +func InitGlobRedisBinlogBackupJob(conf *config.Configuration) { + GlobRedisBinlogBakJob = &Job{ + Conf: conf, + } +} + +// Run 执行例行备份 +func (job *Job) Run() { + mylog.Logger.Info("redisbinlogbackup wakeup,start running...") + defer func() { + if job.Err != nil { + mylog.Logger.Info(fmt.Sprintf("redisbinlogbackup end fail,err:%v", job.Err)) + } else { + mylog.Logger.Info("redisbinlogbackup end succ") + } + }() + job.Err = nil + job.GetRealBackupDir() + if job.Err != nil { + return + } + job.GetReporter() + if job.Err != nil { + return + } + defer job.Reporter.Close() + + // 检查历史备份任务状态 并 删除过旧的本地文件 + for _, svrItem := range job.Conf.Servers { + if !consts.IsRedisMetaRole(svrItem.MetaRole) { + continue + } + for _, port := range svrItem.ServerPorts { + job.CheckOldBinlogBackupStatus(port) + job.DeleteTooOldBinlogbackup(port) + } + } + job.createTasks() + if job.Err != nil { + return + } + // 本地串行备份 + for _, task := range job.Tasks { + bakTask := task + bakTask.BackupLocalBinlogs() + if bakTask.Err != nil { + job.Err = bakTask.Err + continue + } + } +} + +// GetRealBackupDir 获取本地binlog保存路径 +func (job *Job) GetRealBackupDir() { + job.RealBackupDir = consts.GetRedisBackupDir() + job.RealBackupDir = filepath.Join(job.RealBackupDir, "dbbak") + util.LocalDirChownMysql(job.RealBackupDir) +} + +// GetReporter 上报者 +func (job *Job) GetReporter() { + reportDir := filepath.Join(job.Conf.ReportSaveDir, "redis") + util.MkDirsIfNotExists([]string{reportDir}) + util.LocalDirChownMysql(reportDir) + reportFile := fmt.Sprintf(consts.RedisBinlogRepoter, time.Now().Local().Format(consts.FilenameDayLayout)) + job.Reporter, job.Err = report.NewFileReport(filepath.Join(reportDir, reportFile)) +} + +func (job *Job) createTasks() { + var task *Task + var password string + var taskBackupDir string + + for _, svrItem := range job.Conf.Servers { + if !consts.IsRedisMetaRole(svrItem.MetaRole) { + continue + } + for _, port := range svrItem.ServerPorts { + password, job.Err = myredis.GetRedisPasswdFromConfFile(port) + if job.Err != nil { + return + } + taskBackupDir = filepath.Join(job.RealBackupDir, "binlog", strconv.Itoa(port)) + util.MkDirsIfNotExists([]string{taskBackupDir}) + util.LocalDirChownMysql(taskBackupDir) + task = NewBinlogBackupTask(svrItem.BkBizID, svrItem.BkCloudID, + svrItem.ClusterDomain, svrItem.ServerIP, port, password, + job.Conf.RedisBinlogBackup.ToBackupSystem, + taskBackupDir, job.Conf.RedisBinlogBackup.OldFileLeftDay, job.Reporter) + job.Tasks = append(job.Tasks, task) + } + } +} + +// CheckOldBinlogBackupStatus 检查历史binlog备份任务状态 +// 1. 遍历 redis_binlog_file_list_${port}_doing 文件 +// 2. 已超过时间的任务,删除本地文件,从 redis_binlog_file_list_${port}_doing 中剔除 +// 3. 上传备份系统 运行中 or 失败的任务 记录到 redis_binlog_file_list_${port}_doing_temp +// 4. 已成功的任务,记录到 redis_binlog_file_list_${port}_done +// 5. rename redis_binlog_file_list_${port}_doing_temp to redis_binlog_file_list_${port}_doing +func (job *Job) CheckOldBinlogBackupStatus(port int) { + var doingHandler, tempHandler, doneHandler *os.File + var line string + var err error + var failMsgs []string + var runningTaskIDs, failedTaskIDs []uint64 + task := Task{} + oldFileLeftSec := job.Conf.RedisBinlogBackup.OldFileLeftDay * 24 * 3600 + nowTime := time.Now().Local() + // 示例: /data/dbbak/binlog/30000/redis_binlog_file_list_30000_doing + doingFile := filepath.Join(job.RealBackupDir, "binlog", strconv.Itoa(port), + fmt.Sprintf(consts.DoingRedisBinlogFileList, port)) + if !util.FileExists(doingFile) { + return + } + // 示例: /data/dbbak/binlog/30000/redis_binlog_file_list_30000_doing_temp + tempDoingFile := doingFile + "_temp" + // 示例: /data/dbbak/binlog/30000/redis_binlog_file_list_30000_done + doneFile := filepath.Join(job.RealBackupDir, "binlog", strconv.Itoa(port), + fmt.Sprintf(consts.DoneRedisBinlogFileList, port)) + + defer func() { + if job.Err == nil { + mylog.Logger.Info(fmt.Sprintf("rename %s to %s", tempDoingFile, doingFile)) + os.Rename(tempDoingFile, doingFile) // rename + } + }() + + doingHandler, job.Err = os.Open(doingFile) + if job.Err != nil { + job.Err = fmt.Errorf("os.Open file:%s fail,err:%v", doingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer doingHandler.Close() + + doneHandler, job.Err = os.OpenFile(doneFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0744) + if job.Err != nil { + job.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", doneFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer doneHandler.Close() + + tempHandler, job.Err = os.OpenFile(tempDoingFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0744) + if job.Err != nil { + job.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", tempDoingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer tempHandler.Close() + + scanner := bufio.NewScanner(doingHandler) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + line = scanner.Text() + err = json.Unmarshal([]byte(line), &task) + if err != nil { + // json.Unmarshal failed,skip ... + err = fmt.Errorf("json.Unmarshal fail,err:%s,data:%s,skip it", err, line) + mylog.Logger.Error(err.Error()) + continue + } + task.reporter = job.Reporter + // 删除旧文件 + if nowTime.Sub(task.BackupFileMTime.Time).Seconds() > float64(oldFileLeftSec) { + mylog.Logger.Info(fmt.Sprintf("%s start removing...", task.BackupFile)) + if util.FileExists(task.BackupFile) { + err = os.Remove(task.BackupFile) + if err != nil { + err = fmt.Errorf("os.Remove fail,err:%s,file:%s", err, task.BackupFile) + mylog.Logger.Error(err.Error()) + tempHandler.WriteString(line + "\n") // 删除失败的,记录到temp文件,下次继续重试 + } + fmt.Printf("remove %s\n", task.BackupFile) + } + continue + } + // 无需上传备份系统,本地已备份成功的情况 + if task.Status == consts.BackupStatusLocalSuccess { + doneHandler.WriteString(line + "\n") + continue + } + // 上传备份系统失败的情况,重试上传并写入temp文件中 + if task.Status == consts.BackupStatusToBakSystemFailed { + task.TransferToBackupSystem() + if task.Err != nil { + task.Message = task.Err.Error() + } else { + task.Status = consts.BackupStatusToBakSystemStart + task.Message = "上传备份系统中" + } + tempHandler.WriteString(task.ToString() + "\n") + continue + } + + // 判断是否上传成功 + if task.BackupTaskID > 0 { + uploadTask := backupsys.UploadTask{ + Files: []string{task.BackupFile}, + TaskIDs: []uint64{task.BackupTaskID}, + } + runningTaskIDs, failedTaskIDs, _, _, _, _, failMsgs, job.Err = uploadTask.CheckTasksStatus() + if job.Err != nil { + tempHandler.WriteString(line + "\n") // 获取tasks状态失败,下次重试 + continue + } + if len(failedTaskIDs) > 0 { + if task.Status != consts.BackupStatusFailed { // 失败状态不重复上报 + task.Status = consts.BackupStatusFailed + task.Message = fmt.Sprintf("上传失败,err:%s", strings.Join(failMsgs, ",")) + task.BackupRecordReport() + line = task.ToString() + } + tempHandler.WriteString(line + "\n") // 上传失败,下次继续重试 + } else if len(runningTaskIDs) > 0 { + tempHandler.WriteString(line + "\n") // 上传中,下次继续探测 + } else { + // 上传成功 + task.Status = consts.BackupStatusToBakSysSuccess + task.Message = "上传备份系统成功" + task.BackupRecordReport() + doneHandler.WriteString(task.ToString() + "\n") + } + } + // 其他失败的情况,写到done文件中 + if task.Status == consts.BackupStatusFailed { + doneHandler.WriteString(line + "\n") + continue + } + } + if job.Err = scanner.Err(); job.Err != nil { + job.Err = fmt.Errorf("scanner.Scan fail,err:%v,file:%v", job.Err, doingFile) + mylog.Logger.Error(job.Err.Error()) + return + } +} + +// DeleteTooOldBinlogbackup 根据 redis_binlog_file_list_{port}_done 删除太旧的本地文件 +// 将删除失败 or 不到OldFileLeftDay天数的task继续回写到 redis_binlog_file_list_{port}_done 文件中 +func (job *Job) DeleteTooOldBinlogbackup(port int) { + var doneHandler *os.File + task := Task{} + var line string + var err error + keepTasks := []string{} + oldFileLeftSec := job.Conf.RedisBinlogBackup.OldFileLeftDay * 24 * 3600 + nowTime := time.Now().Local() + + // 示例: /data/dbbak/binlog/30000/redis_binlog_file_list_30000_done + doneFile := filepath.Join(job.RealBackupDir, "binlog", + strconv.Itoa(port), fmt.Sprintf(consts.DoneRedisBinlogFileList, port)) + if !util.FileExists(doneFile) { + return + } + + defer func() { + if len(keepTasks) > 0 { + // 回写到 doneFile中 + done02, err01 := os.OpenFile(doneFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + if err01 != nil { + job.Err = fmt.Errorf("os.Openfile fail,err:%v,file:%s", err01, doneFile) + mylog.Logger.Error(job.Err.Error()) + return + } + defer done02.Close() + for _, line := range keepTasks { + done02.WriteString(line + "\n") + } + } + }() + doneHandler, job.Err = os.Open(doneFile) + if job.Err != nil { + job.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", doneFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer doneHandler.Close() + + scanner := bufio.NewScanner(doneHandler) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + line = scanner.Text() + err = json.Unmarshal([]byte(line), &task) + if err != nil { + // json.Unmarshal failed,skip ... + err = fmt.Errorf("json.Unmarshal fail,err:%v,data:%s,file:%s", job.Err, line, doneFile) + mylog.Logger.Warn(err.Error()) + continue + } + if nowTime.Sub(task.BackupFileMTime.Time).Seconds() > float64(oldFileLeftSec) { + if util.FileExists(task.BackupFile) { + err = os.Remove(task.BackupFile) + if err != nil { + err = fmt.Errorf("os.Remove fail,err:%v,file:%s", err, task.BackupFile) + mylog.Logger.Warn(err.Error()) + keepTasks = append(keepTasks, line) // 删除失败的,下次继续重试 + } + } + } else { + keepTasks = append(keepTasks, line) + } + } + if err = scanner.Err(); err != nil { + job.Err = fmt.Errorf("scanner.Scan fail,err:%v,file:%v", err, doneFile) + mylog.Logger.Error(job.Err.Error()) + return + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/redisbinlogbackup.go b/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/redisbinlogbackup.go new file mode 100644 index 0000000000..aa4bfd59c5 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/redisbinlogbackup.go @@ -0,0 +1,2 @@ +// Package redisbinlogbackup TODO +package redisbinlogbackup diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/task.go b/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/task.go new file mode 100644 index 0000000000..dc89cbce84 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redisbinlogbackup/task.go @@ -0,0 +1,418 @@ +package redisbinlogbackup + +import ( + "context" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/backupsys" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/customtime" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "dbm-services/redis/db-tools/dbmon/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/gofrs/flock" +) + +// tendisssd binlog文件正则 +// 例如: binlog-30000-0007426-20221109221541.log +// tendisplus binlog文件正则 +// 例如: binlog-0-0000887-20221107123830.log +var tendisBinlogReg = regexp.MustCompile(`binlog-(\d+)-(\d+)-(\d+).log`) + +// Task redis binlog备份task +type Task struct { + ReportType string `json:"report_type"` + BkBizID string `json:"bk_biz_id"` + BkCloudID int64 `json:"bk_cloud_id"` + ServerIP string `json:"server_ip"` + ServerPort int `json:"server_port"` + Domain string `json:"domain"` + Password string `json:"-"` + ToBackupSystem string `json:"-"` + OldFileLeftDay int `json:"-"` + DbType string `json:"db_type"` // TendisplusInstance or TendisSSDInstance + RealRole string `json:"role"` + DumpDir string `json:"-"` + KvStoreCount int `json:"-"` + BackupDir string `json:"backup_dir"` // 备份路径,如 /data/dbbak/binlog/30000 + BackupFile string `json:"backup_file"` // 备份的目标文件(已压缩) + KvstoreIdx int `json:"kvstoreidx"` // binlog对应的 kvstoreidx + BackupFileSize int64 `json:"backup_file_size"` // 备份文件大小(已压缩) + BackupFileStartTime customtime.CustomTime `json:"backup_file_start_time"` // binlog文件生成时间(非压缩) + BackupFileMTime customtime.CustomTime `json:"backup_file_mtime"` // binlog文件最后修改时间(非压缩) + BackupTaskID uint64 `json:"backup_taskid"` + BackupMD5 string `json:"backup_md5"` // 目前为空 + BackupTag string `json:"backup_tag"` // REDIS_BINLOG + Status string `json:"status"` + Message string `json:"message"` + Cli *myredis.RedisClient `json:"-"` + reporter report.Reporter + lockFile string `json:"-"` + Err error `json:"-"` +} + +// NewBinlogBackupTask new binlog backup task +func NewBinlogBackupTask(bkBizID string, bkCloudID int64, domain, ip string, port int, + password, toBackupSys, backupDir string, oldFileLeftDay int, + reporter report.Reporter) *Task { + + return &Task{ + ReportType: consts.RedisBinlogBackupReportType, + BkBizID: bkBizID, + BkCloudID: bkCloudID, + Domain: domain, + ServerIP: ip, + ServerPort: port, + Password: password, + ToBackupSystem: toBackupSys, + OldFileLeftDay: oldFileLeftDay, + BackupDir: backupDir, + BackupTag: consts.RedisBinlogTAG, + reporter: reporter, + } +} + +// Addr string +func (task *Task) Addr() string { + return task.ServerIP + ":" + strconv.Itoa(task.ServerPort) +} + +// ToString .. +func (task *Task) ToString() string { + tmpBytes, _ := json.Marshal(task) + return string(tmpBytes) +} + +// BackupLocalBinlogs 备份本地binlog文件 +func (task *Task) BackupLocalBinlogs() { + var err error + var locked bool + task.newConnect() + if task.Err != nil { + return + } + defer task.Cli.Close() + + defer util.LocalDirChownMysql(task.BackupDir) + + if task.DbType == consts.TendisTypeRedisInstance { + return + } + + // 获取文件锁 + lockFile := fmt.Sprintf("lock.%s.%d", task.ServerIP, task.ServerPort) + lockFile = filepath.Join(task.BackupDir, lockFile) + mylog.Logger.Info(fmt.Sprintf("redis(%s) try to get filelock:%s", task.Addr(), lockFile)) + + // 每10秒检测一次是否上锁成功,最多等待3小时 + flock := flock.New(lockFile) + lockctx, lockcancel := context.WithTimeout(context.Background(), 3*time.Hour) + defer lockcancel() + locked, task.Err = flock.TryLockContext(lockctx, 10*time.Second) + if task.Err != nil { + task.Err = fmt.Errorf("try to get filelock(%s) fail,err:%v,redis(%s)", lockFile, task.Err, task.Addr()) + mylog.Logger.Error(task.Err.Error()) + return + } + if !locked { + return + } + defer flock.Unlock() + + binlogs := task.GetTendisBinlogs() + if task.Err != nil { + return + } + oldFileLeftSec := task.OldFileLeftDay * 24 * 3600 + for _, item := range binlogs { + // 如果文件太旧则删除 + if time.Now().Local().Sub(item.FileMtime).Seconds() > float64(oldFileLeftSec) { + if util.FileExists(item.File) { + err = os.Remove(item.File) + if err != nil { + err = fmt.Errorf("os.Remove %s fail,err:%v", item.File, err) + mylog.Logger.Error(err.Error()) + } else { + mylog.Logger.Info(fmt.Sprintf("old binlog %s removed,nowTime=%s,fmtime=%s,subSecs=%d,", item.File, + time.Now().Local().Format(consts.UnixtimeLayout), + item.FileMtime.Format(consts.UnixtimeLayout), + int(time.Now().Local().Sub(item.FileMtime).Seconds()))) + } + continue + } + } + task.BackupFile = item.File + task.KvstoreIdx = item.KvStoreIdx + task.BackupFileStartTime.Time = item.StartTime + task.BackupFileMTime.Time = item.FileMtime + task.compressAndUpload() // 无论成功还是失败,都继续下一个binlog file + } +} +func (task *Task) newConnect() { + task.Cli, task.Err = myredis.NewRedisClient(task.Addr(), task.Password, 0, consts.TendisTypeRedisInstance) + if task.Err != nil { + return + } + task.RealRole, task.Err = task.Cli.GetRole() + if task.Err != nil { + return + } + task.DumpDir, task.Err = task.Cli.GetDumpDir() + if task.Err != nil { + return + } + task.DbType, task.Err = task.Cli.GetTendisType() + if task.Err != nil { + return + } + // 除tendisplus外,其余db类型, kvstorecount=1 + if task.DbType != consts.TendisTypeTendisplusInsance { + task.KvStoreCount = 1 + return + } + // tendisplus的kvstorecount实际获取 + task.KvStoreCount, task.Err = task.Cli.GetKvstoreCount() + if task.Err != nil { + return + } + return +} + +type tendisBinlogItem struct { + File string `json:"file"` // full path + KvStoreIdx int `json:"kvstoreidx"` + BinlogId int64 `json:"binlogId"` + StartTime time.Time `json:"start_time"` + FileMtime time.Time `json:"file_mtime"` + FileSize int64 `json:"file_size"` +} + +// GetTendisBinlogs 获取需备份的binlogs +func (task *Task) GetTendisBinlogs() (rets []tendisBinlogItem) { + var maxBinlogID int64 = 0 + var binlogID int64 + var startTime, fmTime time.Time + var fnameLayout string = "20060102150405" + var tempDumpDir string + + for storeIdx := 0; storeIdx < task.KvStoreCount; storeIdx++ { + if task.DbType != consts.TendisTypeTendisplusInsance { + tempDumpDir = task.DumpDir + } else { + tempDumpDir = filepath.Join(task.DumpDir, strconv.Itoa(storeIdx)) + } + maxBinlogID = 0 // 重置maxBinlogID + // 获取maxBinlogID + task.Err = filepath.Walk(tempDumpDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + err = fmt.Errorf("filepath.Walk %s fail,err:%v", tempDumpDir, err) + return err + } + if info.IsDir() { + return nil + } + if tendisBinlogReg.MatchString(info.Name()) { + l01 := tendisBinlogReg.FindStringSubmatch(info.Name()) + if len(l01) != 4 { + return nil + } + binlogID, err = strconv.ParseInt(l01[2], 10, 64) + if err != nil { + err = fmt.Errorf("binlogfile:%s %s to int64 fail,err:%v", info.Name(), l01[2], err) + return err + } + if binlogID > maxBinlogID { + maxBinlogID = binlogID + } + } + return nil + }) + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + mylog.Logger.Info(fmt.Sprintf("GetTendisBinlogs redis(%s) kvstore:%d maxBinlogID:%d", + task.Addr(), storeIdx, maxBinlogID)) + + task.Err = filepath.Walk(tempDumpDir, func(path string, info os.FileInfo, err error) error { + if err != nil { + err = fmt.Errorf("filepath.Walk %s fail,err:%v", tempDumpDir, err) + return err + } + if info.IsDir() { + return nil + } + + if tendisBinlogReg.MatchString(info.Name()) { + l01 := tendisBinlogReg.FindStringSubmatch(info.Name()) + if len(l01) != 4 { + return nil + } + binlogID, err = strconv.ParseInt(l01[2], 10, 64) + if err != nil { + err = fmt.Errorf("binlogfile:%s %s to int64 fail,err:%v", info.Name(), l01[2], err) + return err + } + // 只处理 binlogID < maxBinlogID 的文件 + if binlogID >= maxBinlogID { + return nil + } + + startTime, err = time.ParseInLocation(fnameLayout, l01[3], time.Local) + if err != nil { + err = fmt.Errorf("time.Parse '%s' fail,err:%v,binlogfile:%s", l01[3], err, info.Name()) + return err + } + fmTime = info.ModTime().Local() + // 如果binlog文件最近两分钟内修改过,则跳过暂不处理 + if time.Now().Local().Sub(fmTime).Seconds() < 120 { + return nil + } + + rets = append(rets, tendisBinlogItem{ + File: path, + BinlogId: binlogID, + KvStoreIdx: storeIdx, + StartTime: startTime, + FileMtime: fmTime, + FileSize: info.Size(), + }) + } + return nil + }) + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + } + mylog.Logger.Info(fmt.Sprintf("redis(%s) dbType:%s get %d binlog files", task.Addr(), task.DbType, len(rets))) + return +} + +// mvBinlogToBackupDir move binlogfile to backupDir +// When the binlog file name does not contain port information, the new file name add port. +func (task *Task) mvBinlogToBackupDir() { + filename := filepath.Base(task.BackupFile) + var mvCmd string + var targetName, targetFullPath string + if strings.Contains(filename, strconv.Itoa(task.ServerPort)) { + // binlog-30012-0007515-20221110084710.log => binlog-1.1.1.1-30012-0007515-20221110084710.log + targetName = strings.Replace(filename, "binlog-", "binlog-"+task.ServerIP+"-", -1) + targetFullPath = filepath.Join(task.BackupDir, targetName) + } else { + // binlog-1-0002151-20230306160416.log => binlog-1.1.1.1-30000-1-0002151-20230306160416.log + targetName = strings.Replace(filename, "binlog-", "binlog-"+task.ServerIP+"-"+strconv.Itoa(task.ServerPort)+"-", -1) + targetFullPath = filepath.Join(task.BackupDir, targetName) + } + mvCmd = fmt.Sprintf("mv %s %s", task.BackupFile, targetFullPath) + mylog.Logger.Info("mvBinlogToBackupDir mvCommand:" + mvCmd) + _, task.Err = util.RunBashCmd(mvCmd, "", nil, 1*time.Minute) + if task.Err != nil { + return + } + task.BackupFile = targetFullPath +} +func (task *Task) compressAndUpload() { + defer func() { + task.BackupRecordReport() + task.BackupRecordSaveToDoingFile() + }() + if strings.HasSuffix(task.BackupFile, ".log") { + task.mvBinlogToBackupDir() + if task.Err != nil { + task.Status = consts.BackupStatusFailed + task.Message = task.Err.Error() + return + } + task.BackupFile, task.Err = util.CompressFile(task.BackupFile, task.BackupDir, true) + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + task.Status = consts.BackupStatusFailed + task.Message = task.Err.Error() + return + } + fileInfo, _ := os.Stat(task.BackupFile) + task.BackupFileSize = fileInfo.Size() + } + if strings.ToLower(task.ToBackupSystem) == "yes" { + task.TransferToBackupSystem() + if task.Err != nil { + task.Status = consts.BackupStatusToBakSystemFailed + task.Message = fmt.Sprintf("上传备份系统失败,err:%v", task.Err) + return + } + task.Status = consts.BackupStatusToBakSystemStart + task.Message = "上传备份系统中" + } else { + task.Status = consts.BackupStatusLocalSuccess + task.Message = "本地备份成功,无需上传备份系统" + } +} + +// TransferToBackupSystem 备份文件上传到备份系统 +func (task *Task) TransferToBackupSystem() { + var msg string + cliFileInfo, err := os.Stat(consts.BackupClient) + if err != nil { + err = fmt.Errorf("os.stat(%s) failed,err:%v", consts.BackupClient, err) + mylog.Logger.Error(err.Error()) + return + } + if !util.IsExecOther(cliFileInfo.Mode().Perm()) { + err = fmt.Errorf("%s is unable to execute by other", consts.BackupClient) + mylog.Logger.Error(err.Error()) + return + } + uploader := backupsys.UploadTask{ + Files: []string{task.BackupFile}, + Tag: task.BackupTag, + } + task.Err = uploader.UploadFiles() + if task.Err != nil { + return + } + task.BackupTaskID = uploader.TaskIDs[0] + msg = fmt.Sprintf("redis(%s) backupFile:%s taskid(%+v) uploading to backupSystem", + task.Addr(), task.BackupFile, task.BackupTaskID) + mylog.Logger.Info(msg) + return +} + +// BackupRecordReport 备份记录上报 +func (task *Task) BackupRecordReport() { + if task.reporter == nil { + return + } + tmpBytes, _ := json.Marshal(task) + // task.Err=task.reporter.AddRecord(string(tmpBytes),true) + task.reporter.AddRecord(string(tmpBytes)+"\n", true) +} + +// BackupRecordSaveToDoingFile 备份记录保存到本地 redis_binlog_file_list_${port}_doing 文件中 +func (task *Task) BackupRecordSaveToDoingFile() { + backupDir := filepath.Dir(task.BackupFile) + // 例如: /data/dbbak/binlog/30000/redis_binlog_file_list_30000_doing + doingFile := filepath.Join(backupDir, fmt.Sprintf(consts.DoingRedisBinlogFileList, task.ServerPort)) + f, err := os.OpenFile(doingFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0744) + if err != nil { + task.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", doingFile, err) + mylog.Logger.Error(task.Err.Error()) + return + } + defer f.Close() + tmpBytes, _ := json.Marshal(task) + + if _, err = f.WriteString(string(tmpBytes) + "\n"); err != nil { + task.Err = fmt.Errorf("f.WriteString failed,err:%v,file:%s,line:%s", err, doingFile, string(tmpBytes)) + mylog.Logger.Error(task.Err.Error()) + return + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/job.go b/dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/job.go new file mode 100644 index 0000000000..40a4dc9c08 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/job.go @@ -0,0 +1,385 @@ +// Package redisfullbackup redis备份任务 +package redisfullbackup + +import ( + "bufio" + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/backupsys" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "dbm-services/redis/db-tools/dbmon/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "go.uber.org/zap" +) + +// GlobRedisFullBakJob global var +var GlobRedisFullBakJob *Job + +// Job 例行备份任务 +type Job struct { // NOCC:golint/naming(其他:设计如此) + Conf *config.Configuration `json:"conf"` + Tasks []*BackupTask `json:"tasks"` + RealBackupDir string `json:"real_backup_dir"` + Reporter report.Reporter `json:"-"` + Err error `json:"-"` +} + +// InitGlobRedisFullBackupJob 新建例行备份任务 +func InitGlobRedisFullBackupJob(conf *config.Configuration) { + GlobRedisFullBakJob = &Job{ + Conf: conf, + } +} + +// Run 执行例行备份 +func (job *Job) Run() { + mylog.Logger.Info("redisfullbackup wakeup,start running...", zap.String("conf", util.ToString(job.Conf))) + defer func() { + if job.Err != nil { + mylog.Logger.Info(fmt.Sprintf("redisfullbackup end fail,err:%v", job.Err)) + } else { + mylog.Logger.Info("redisfullbackup end succ") + } + }() + job.Err = nil + job.GetRealBackupDir() + if job.Err != nil { + return + } + job.GetReporter() + if job.Err != nil { + return + } + defer job.Reporter.Close() + + // 检查历史备份任务状态 并 删除过旧的本地文件 + for _, svrItem := range job.Conf.Servers { + if !consts.IsRedisMetaRole(svrItem.MetaRole) { + continue + } + for _, port := range svrItem.ServerPorts { + job.CheckOldFullbackupStatus(port) + job.DeleteTooOldFullbackup(port) + } + } + job.createTasks() + if job.Err != nil { + return + } + // 本地串行备份 + for _, task := range job.Tasks { + bakTask := task + bakTask.BakcupToLocal() + if bakTask.Err != nil { + job.Err = bakTask.Err + continue + } + } +} + +// GetRealBackupDir 获取本地全备保存路径 +func (job *Job) GetRealBackupDir() { + job.RealBackupDir = consts.GetRedisBackupDir() + job.RealBackupDir = filepath.Join(job.RealBackupDir, "dbbak") + // /data/dbbak/backup 目录需要 + util.MkDirsIfNotExists([]string{ + filepath.Join(job.RealBackupDir, "backup"), + }) + util.LocalDirChownMysql(job.RealBackupDir) +} + +// GetReporter 上报者 +func (job *Job) GetReporter() { + reportDir := filepath.Join(job.Conf.ReportSaveDir, "redis") + util.MkDirsIfNotExists([]string{reportDir}) + util.LocalDirChownMysql(reportDir) + reportFile := fmt.Sprintf(consts.RedisFullbackupRepoter, time.Now().Local().Format(consts.FilenameDayLayout)) + job.Reporter, job.Err = report.NewFileReport(filepath.Join(reportDir, reportFile)) +} + +func (job *Job) createTasks() { + var task *BackupTask + var password string + + for _, svrItem := range job.Conf.Servers { + if !consts.IsRedisMetaRole(svrItem.MetaRole) { + continue + } + for _, port := range svrItem.ServerPorts { + password, job.Err = myredis.GetRedisPasswdFromConfFile(port) + if job.Err != nil { + return + } + task = NewFullBackupTask(svrItem.BkBizID, svrItem.BkCloudID, + svrItem.ClusterDomain, svrItem.ServerIP, port, password, + job.Conf.RedisFullBackup.ToBackupSystem, consts.NormalBackupType, job.RealBackupDir, + job.Conf.RedisFullBackup.TarSplit, job.Conf.RedisFullBackup.TarSplitPartSize, + job.Reporter) + job.Tasks = append(job.Tasks, task) + } + } + mylog.Logger.Info(fmt.Sprintf("redisfullbackup createTasks tasks:%s", util.ToString(job.Tasks))) +} + +// CheckOldFullbackupStatus 检查历史全备任务状态 +// 1. 遍历 redis_backup_file_list_${port}_doing 文件 +// 2. 已超过时间的任务,删除本地文件,从 redis_backup_file_list_${port}_doing 中剔除 +// 3. 上传备份系统 运行中 or 失败的任务 记录到 redis_backup_file_list_${port}_doing_temp +// 4. 已成功的任务,记录到 redis_backup_file_list_${port}_done +// 5. rename redis_backup_file_list_${port}_doing_temp to redis_backup_file_list_${port}_doing +func (job *Job) CheckOldFullbackupStatus(port int) { + mylog.Logger.Info(fmt.Sprintf("port:%d start CheckOldFullbackupStatus", port)) + var doingHandler, tempHandler, doneHandler *os.File + var line string + task := BackupTask{} + var err error + var failMsgs []string + var runningTaskIDs, failedTaskIDs []uint64 + oldFileLeftSec := job.Conf.RedisFullBackup.OldFileLeftDay * 24 * 3600 + nowTime := time.Now().Local() + // 示例: /data/dbbak/backup/redis_backup_file_list_30000_doing + doingFile := filepath.Join(job.RealBackupDir, "backup", fmt.Sprintf(consts.DoingRedisFullBackFileList, port)) + if !util.FileExists(doingFile) { + return + } + // 示例: /data/dbbak/backup/redis_backup_file_list_30000_doing_temp + tempDoingFile := doingFile + "_temp" + // 示例: /data/dbbak/backup/redis_backup_file_list_30000_done + doneFile := filepath.Join(job.RealBackupDir, "backup", fmt.Sprintf(consts.DoneRedisFullBackFileList, port)) + + defer func() { + if job.Err == nil { + mylog.Logger.Info(fmt.Sprintf("rename %s to %s", tempDoingFile, doingFile)) + os.Rename(tempDoingFile, doingFile) // rename + } + }() + + doingHandler, job.Err = os.Open(doingFile) + if job.Err != nil { + job.Err = fmt.Errorf("os.Open file:%s fail,err:%v", doingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer doingHandler.Close() + + tempHandler, job.Err = os.OpenFile(tempDoingFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0744) + if job.Err != nil { + job.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", tempDoingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer tempHandler.Close() + + doneHandler, job.Err = os.OpenFile(doneFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0744) + if job.Err != nil { + job.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", doneFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer doneHandler.Close() + + scanner := bufio.NewScanner(doingHandler) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + line = scanner.Text() + err = json.Unmarshal([]byte(line), &task) + if err != nil { + // json.Unmarshal failed,skip ... + err = fmt.Errorf("json.Unmarshal fail,err:%s,data:%s,skip it", err, line) + mylog.Logger.Error(err.Error()) + continue + } + task.reporter = job.Reporter + // 删除旧文件 + if nowTime.Sub(task.EndTime.Time).Seconds() > float64(oldFileLeftSec) { + mylog.Logger.Info(fmt.Sprintf("%+v start removing...", task.BackupFiles)) + removeOK := true + for _, bakFile := range task.BackupFiles { + if util.FileExists(bakFile) { + err = os.Remove(bakFile) + if err != nil { + err = fmt.Errorf("os.Remove fail,err:%s,file:%s", err, bakFile) + mylog.Logger.Error(err.Error()) + removeOK = false + } + } + } + if !removeOK { + mylog.Logger.Info(fmt.Sprintf("%+v remove fail,continue add tempFile", task.BackupFiles)) + _, job.Err = tempHandler.WriteString(line + "\n") // 删除失败的,记录到temp文件,下次继续重试 + if job.Err != nil { + job.Err = fmt.Errorf("%s WriteString fail,err:%v", tempDoingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + } + continue + } + // 无需上传备份系统,本地已备份成功的情况 + if task.Status == consts.BackupStatusLocalSuccess { + _, job.Err = doneHandler.WriteString(line + "\n") + if job.Err != nil { + job.Err = fmt.Errorf("%s WriteString fail,err:%v", doneFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + continue + } + // 上传备份系统失败的情况,重试上传并写入temp文件中 + if task.Status == consts.BackupStatusToBakSystemFailed { + task.TransferToBackupSystem() + if task.Err != nil { + task.Message = task.Err.Error() + } else { + task.Status = consts.BackupStatusToBakSystemStart + task.Message = "上传备份系统中" + } + tempHandler.WriteString(task.ToString() + "\n") + continue + } + // 判断是否上传成功 + if len(task.BackupTaskIDs) > 0 { + uploadTask := backupsys.UploadTask{ + Files: task.BackupFiles, + TaskIDs: task.BackupTaskIDs, + } + runningTaskIDs, failedTaskIDs, _, _, _, _, failMsgs, job.Err = uploadTask.CheckTasksStatus() + if job.Err != nil { + _, job.Err = tempHandler.WriteString(line + "\n") // 获取tasks状态失败,下次重试 + if job.Err != nil { + job.Err = fmt.Errorf("%s WriteString fail,err:%v", tempDoingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + continue + } + if len(failedTaskIDs) > 0 { + if task.Status != consts.BackupStatusFailed { // 失败状态不重复上报 + task.Status = consts.BackupStatusFailed + task.Message = fmt.Sprintf("上传失败,err:%s", strings.Join(failMsgs, ",")) + task.BackupRecordReport() + line = task.ToString() + } + _, job.Err = tempHandler.WriteString(line + "\n") // 上传失败,下次继续重试 + if job.Err != nil { + job.Err = fmt.Errorf("%s WriteString fail,err:%v", tempDoingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + } else if len(runningTaskIDs) > 0 { + _, job.Err = tempHandler.WriteString(line + "\n") // 上传中,下次继续探测 + if job.Err != nil { + job.Err = fmt.Errorf("%s WriteString fail,err:%v", tempDoingFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + } else { + // 上传成功 + task.Status = consts.BackupStatusToBakSysSuccess + task.Message = "上传备份系统成功" + task.BackupRecordReport() + _, job.Err = doneHandler.WriteString(task.ToString() + "\n") + if job.Err != nil { + job.Err = fmt.Errorf("%s WriteString fail,err:%v", doneFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + } + } + // 其他失败的情况,写到done文件中 + if task.Status == consts.BackupStatusFailed { + doneHandler.WriteString(line + "\n") + continue + } + } + if job.Err = scanner.Err(); job.Err != nil { + job.Err = fmt.Errorf("scanner.Scan fail,err:%v,file:%v", job.Err, doingFile) + mylog.Logger.Error(job.Err.Error()) + return + } +} + +// DeleteTooOldFullbackup 根据 redis_backup_file_list_{port}_done 删除太旧的本地文件 +// 将删除失败 or 不到OldFileLeftDay天数的task继续回写到 redis_backup_file_list_{port}_done 文件中 +func (job *Job) DeleteTooOldFullbackup(port int) { + var doneHandler *os.File + task := BackupTask{} + var line string + var err error + keepTasks := []string{} + oldFileLeftSec := job.Conf.RedisFullBackup.OldFileLeftDay * 24 * 3600 + nowTime := time.Now().Local() + + // 示例: /data/dbbak/backup/redis_backup_file_list_30000_done + doneFile := filepath.Join(job.RealBackupDir, "backup", fmt.Sprintf(consts.DoneRedisFullBackFileList, port)) + if !util.FileExists(doneFile) { + return + } + + defer func() { + if len(keepTasks) > 0 { + // 回写到 doneFile中 + done02, err01 := os.OpenFile(doneFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0755) + if err01 != nil { + job.Err = fmt.Errorf("os.Openfile fail,err:%v,file:%s", err01, doneFile) + mylog.Logger.Error(job.Err.Error()) + return + } + defer done02.Close() + for _, line := range keepTasks { + done02.WriteString(line + "\n") + } + } + }() + doneHandler, job.Err = os.Open(doneFile) + if job.Err != nil { + job.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", doneFile, job.Err) + mylog.Logger.Error(job.Err.Error()) + return + } + defer doneHandler.Close() + + scanner := bufio.NewScanner(doneHandler) + scanner.Split(bufio.ScanLines) + for scanner.Scan() { + line = scanner.Text() + err = json.Unmarshal([]byte(line), &task) + if err != nil { + // json.Unmarshal failed,skip ... + err = fmt.Errorf("json.Unmarshal fail,err:%v,data:%s,file:%s", err, line, doneFile) + mylog.Logger.Warn(err.Error()) + continue + } + if nowTime.Sub(task.EndTime.Time).Seconds() > float64(oldFileLeftSec) { + removeOK := true + for _, bakFile := range task.BackupFiles { + if util.FileExists(bakFile) { + err = os.Remove(bakFile) + if err != nil { + err = fmt.Errorf("os.Remove fail,err:%v,file:%s", err, bakFile) + mylog.Logger.Warn(err.Error()) + removeOK = false + } + } + } + if !removeOK { + keepTasks = append(keepTasks, line) // 删除失败的,下次继续重试 + } + } else { + keepTasks = append(keepTasks, line) + } + } + if err = scanner.Err(); err != nil { + job.Err = fmt.Errorf("scanner.Scan fail,err:%v,file:%v", err, doneFile) + mylog.Logger.Error(job.Err.Error()) + return + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/task.go b/dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/task.go new file mode 100644 index 0000000000..ba384c4d7e --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redisfullbackup/task.go @@ -0,0 +1,543 @@ +package redisfullbackup + +import ( + "context" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/backupsys" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/customtime" + "dbm-services/redis/db-tools/dbmon/pkg/report" + "dbm-services/redis/db-tools/dbmon/util" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/gofrs/flock" +) + +// TendisSSDSetLogCount tendisSSD设置log参数 +type TendisSSDSetLogCount struct { + LogCount int64 `json:"log-count"` + SlaveLogKeepCount int64 `json:"slave-log-keep-count"` +} + +// BackupTask redis备份task +type BackupTask struct { + ReportType string `json:"report_type"` + BkBizID string `json:"bk_biz_id"` + BkCloudID int64 `json:"bk_cloud_id"` + ServerIP string `json:"server_ip"` + ServerPort int `json:"server_port"` + Domain string `json:"domain"` + Password string `json:"-"` + ToBackupSystem string `json:"-"` + DbType string `json:"db_type"` // RedisInstance or TendisplusInstance or TendisSSDInstance + BackupType string `json:"-"` // 常规备份、下线备份 + RealRole string `json:"role"` + DataSize uint64 `json:"-"` // redis实例数据大小 + DataDir string `json:"-"` + BackupDir string `json:"backup_dir"` + TarSplit bool `json:"-"` // 是否对tar文件做split + TarSplitPartSize string `json:"-"` + BackupFiles []string `json:"backup_files"` // 备份的目标文件,如果文件过大会切割成多个 + BackupFilesSize []int64 `json:"backup_files_size"` // 备份文件大小(已切割 or 已压缩 or 已打包) + BackupTaskIDs []uint64 `json:"backup_taskids"` + BackupMD5s []string `json:"backup_md5s"` // 目前为空 + BackupTag string `json:"backup_tag"` // REDIS_FULL or REDIS_BINLOG + // 全备尽管会切成多个文件,但其生成的起始时间、结束时间一样 + StartTime customtime.CustomTime `json:"start_time"` // 生成全备的起始时间 + EndTime customtime.CustomTime `json:"end_time"` // //生成全备的结束时间 + Status string `json:"status"` + Message string `json:"message"` + Cli *myredis.RedisClient `json:"-"` + SSDLogCount TendisSSDSetLogCount `json:"-"` + reporter report.Reporter + lockFile string `json:"-"` + Err error `json:"-"` +} + +// NewFullBackupTask new backup task +func NewFullBackupTask(bkBizID string, bkCloudID int64, domain, ip string, port int, password, + toBackupSys, backupType, backupDir string, tarSplit bool, tarSplitSize string, + reporter report.Reporter) *BackupTask { + return &BackupTask{ + ReportType: consts.RedisFullBackupReportType, + BkBizID: bkBizID, + BkCloudID: bkCloudID, + Domain: domain, + ServerIP: ip, + ServerPort: port, + Password: password, + ToBackupSystem: toBackupSys, + BackupType: backupType, + BackupDir: backupDir, + TarSplit: tarSplit, + TarSplitPartSize: tarSplitSize, + BackupTaskIDs: []uint64{}, + BackupMD5s: []string{}, + BackupTag: consts.RedisFullBackupTAG, + reporter: reporter, + } +} + +// Addr string +func (task *BackupTask) Addr() string { + return task.ServerIP + ":" + strconv.Itoa(task.ServerPort) +} + +// ToString .. +func (task *BackupTask) ToString() string { + tmpBytes, _ := json.Marshal(task) + return string(tmpBytes) +} + +// BakcupToLocal 执行备份task,备份到本地 +func (task *BackupTask) BakcupToLocal() { + var infoRet map[string]string + var connSlaves int + var locked bool + task.newConnect() + if task.Err != nil { + return + } + defer task.Cli.Close() + + infoRet, task.Err = task.Cli.Info("replication") + if task.Err != nil { + return + } + connSlaves, _ = strconv.Atoi(infoRet["connectedSlaves"]) + // 如果是redis_master且对应的slave大于0,则跳过备份 + if task.RealRole == consts.RedisMasterRole && connSlaves > 0 { + return + } + + // 获取文件锁 + lockFile := fmt.Sprintf("lock.%s.%d", task.ServerIP, task.ServerPort) + lockFile = filepath.Join(task.BackupDir, "backup", lockFile) + mylog.Logger.Info(fmt.Sprintf("redis(%s) try to get filelock:%s", task.Addr(), lockFile)) + + // 每10秒检测一次是否上锁成功,最多等待3小时 + flock := flock.New(lockFile) + lockctx, lockcancel := context.WithTimeout(context.Background(), 3*time.Hour) + defer lockcancel() + locked, task.Err = flock.TryLockContext(lockctx, 10*time.Second) + if task.Err != nil { + task.Err = fmt.Errorf("try to get filelock(%s) fail,err:%v,redis(%s)", lockFile, task.Err, task.Addr()) + mylog.Logger.Error(task.Err.Error()) + return + } + if !locked { + return + } + defer flock.Unlock() + + defer func() { + if task.Err != nil && task.Status == "" { + task.Message = task.Err.Error() + task.Status = consts.BackupStatusFailed + } + task.BackupRecordReport() + }() + + task.Status = consts.BackupStatusRunning + task.Message = "start backup..." + task.BackupRecordReport() + + mylog.Logger.Info(fmt.Sprintf("redis(%s) dbType:%s start backup...", task.Addr(), task.DbType)) + + task.PrecheckDisk() + if task.Err != nil { + return + } + + // 如果有备份正在执行,则先等待其完成 + task.Err = task.Cli.WaitForBackupFinish() + if task.Err != nil { + return + } + if task.DbType == consts.TendisTypeRedisInstance { + task.RedisInstanceBackup() + } else if task.DbType == consts.TendisTypeTendisplusInsance { + task.TendisplusInstanceBackup() + } else if task.DbType == consts.TendisTypeTendisSSDInsance { + task.TendisSSDInstanceBackup() + if task.Err != nil { + return + } + task.TendisSSDSetLougCount() + } + if task.Err != nil { + return + } + defer task.BackupRecordSaveToDoingFile() + // 备份上传备份系统 + if strings.ToLower(task.ToBackupSystem) != "yes" { + task.Status = consts.BackupStatusLocalSuccess + task.Message = "本地备份成功,无需上传备份系统" + return + } + task.TransferToBackupSystem() + if task.Err != nil { + task.Status = consts.BackupStatusToBakSystemFailed + task.Message = task.Err.Error() + return + } + task.Status = consts.BackupStatusToBakSystemStart + task.Message = "上传备份系统中" +} + +func (task *BackupTask) newConnect() { + task.Cli, task.Err = myredis.NewRedisClient(task.Addr(), task.Password, 0, consts.TendisTypeRedisInstance) + if task.Err != nil { + return + } + task.RealRole, task.Err = task.Cli.GetRole() + if task.Err != nil { + return + } + task.DataDir, task.Err = task.Cli.GetDir() + if task.Err != nil { + return + } + task.DbType, task.Err = task.Cli.GetTendisType() + if task.Err != nil { + return + } + // 获取数据量大小 + if task.DbType == consts.TendisTypeRedisInstance { + task.DataSize, task.Err = task.Cli.RedisInstanceDataSize() + } else if task.DbType == consts.TendisTypeTendisplusInsance { + task.DataSize, task.Err = task.Cli.TendisplusDataSize() + } else if task.DbType == consts.TendisTypeTendisSSDInsance { + task.DataSize, task.Err = task.Cli.TendisSSDDataSize() + } + if task.Err != nil { + return + } + return +} + +// PrecheckDisk 磁盘检查 +func (task *BackupTask) PrecheckDisk() { + // 检查磁盘空间是否足够 + bakDiskUsg, err := util.GetLocalDirDiskUsg(task.BackupDir) + task.Err = err + if task.Err != nil { + return + } + dataDiskUsg, err := util.GetLocalDirDiskUsg(task.DataDir) + task.Err = err + if task.Err != nil { + return + } + // 磁盘空间使用已有85%,则报错 + if bakDiskUsg.UsageRatio > 85 || dataDiskUsg.UsageRatio > 85 { + task.Err = fmt.Errorf("%s disk Used%d%% > 85%% or %s disk Used(%d%%) >85%%", + task.BackupDir, bakDiskUsg.UsageRatio, + task.DataDir, dataDiskUsg.UsageRatio) + mylog.Logger.Error(task.Err.Error()) + return + } + if task.DbType == consts.TendisTypeRedisInstance { + // redisInstance rdb or aof 都会使用data磁盘空间,如备份会导致磁盘空间超95%则报错 + if int((task.DataSize+dataDiskUsg.UsedSize)*100/dataDiskUsg.TotalSize) > 95 { + task.Err = fmt.Errorf("redis(%s) data_size(%dMB) bgsave/bgrewriteaof,disk(%s) space will occupy more than 95%%", + task.Addr(), task.DataSize/1024/1024, task.DataDir) + mylog.Logger.Error(task.Err.Error()) + return + } + } + if int((task.DataSize+bakDiskUsg.UsedSize)*100/bakDiskUsg.TotalSize) > 95 { + // 如果备份会导致磁盘空间超95% + task.Err = fmt.Errorf("redis(%s) data_size(%dMB) backup disk(%s) space will occupy more than 95%%", + task.Addr(), task.DataSize/1024/1024, task.BackupDir) + mylog.Logger.Error(task.Err.Error()) + return + } + mylog.Logger.Info(fmt.Sprintf( + "check disk space ok,redis(%s) data_size(%dMB),backupDir disk(%s) available space %dMB", + task.Addr(), task.DataSize/1024/1024, task.BackupDir, bakDiskUsg.AvailSize/1024/1024)) +} + +// RedisInstanceBackup redis(cache)实例备份 +func (task *BackupTask) RedisInstanceBackup() { + var srcFile string + var targetFile string + var confMap map[string]string + var fileSize int64 + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + task.StartTime.Time = time.Now().Local() + if task.RealRole == consts.RedisMasterRole { + // redis master backup rdb + confMap, task.Err = task.Cli.ConfigGet("dbfilename") + if task.Err != nil { + return + } + rdbFile := confMap["dbfilename"] + srcFile = filepath.Join(task.DataDir, rdbFile) + targetFile = filepath.Join(task.BackupDir, + fmt.Sprintf("%s-redis-%s-%s-%d-%s.rdb", + task.BkBizID, task.RealRole, task.ServerIP, task.ServerPort, nowtime)) + task.Err = task.Cli.BgSaveAndWaitForFinish() + } else { + srcFile = filepath.Join(task.DataDir, "appendonly.aof") + targetFile = filepath.Join(task.BackupDir, + fmt.Sprintf("%s-redis-%s-%s-%d-%s.aof", + task.BkBizID, task.RealRole, task.ServerIP, task.ServerPort, nowtime)) + task.Err = task.Cli.BgRewriteAOFAndWaitForDone() + } + if task.Err != nil { + return + } + task.EndTime.Time = time.Now().Local() + cpCmd := fmt.Sprintf("cp %s %s", srcFile, targetFile) + mylog.Logger.Info(cpCmd) + _, task.Err = util.RunBashCmd(cpCmd, "", nil, 10*time.Minute) + if task.Err != nil { + return + } + // aof文件,压缩; redis-server默认会对rdb做压缩,所以rdb文件不做压缩 + if strings.HasSuffix(srcFile, ".aof") { + targetFile, task.Err = util.CompressFile(targetFile, filepath.Dir(targetFile), true) + if task.Err != nil { + return + } + } + // task.BackupFiles = append(task.BackupFiles, filepath.Base(targetFile)) + task.BackupFiles = append(task.BackupFiles, targetFile) + fileSize, task.Err = util.GetFileSize(targetFile) + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.BackupFilesSize = append(task.BackupFilesSize, fileSize) + util.LocalDirChownMysql(task.BackupDir) + mylog.Logger.Info(fmt.Sprintf("redis(%s) local backup success", task.Addr())) + return +} + +// TendisplusInstanceBackup tendisplus实例备份 +func (task *BackupTask) TendisplusInstanceBackup() { + var tarFile string + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + backName := fmt.Sprintf("%s-TENDISPLUS-FULL-%s-%s-%d-%s", task.BkBizID, task.RealRole, task.ServerIP, task.ServerPort, + nowtime) + backupFullDir := filepath.Join(task.BackupDir, backName) + task.Err = util.MkDirsIfNotExists([]string{backupFullDir}) + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + task.StartTime.Time = time.Now().Local() + task.Err = task.Cli.TendisplusBackupAndWaitForDone(backupFullDir) + if task.Err != nil { + return + } + task.EndTime.Time = time.Now().Local() + if task.TarSplit && task.TarSplitPartSize != "" { + task.BackupFiles, task.Err = util.TarAndSplitADir(backupFullDir, task.BackupDir, task.TarSplitPartSize, true) + } else { + tarFile, task.Err = util.TarADir(backupFullDir, task.BackupDir, true) + task.BackupFiles = append(task.BackupFiles, tarFile) + } + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.GetBakFilesSize() + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + mylog.Logger.Info(fmt.Sprintf("tendisplus(%s) local backup success", task.Addr())) + return +} + +// tendisSSDBackupVerify 确定tendissd备份是否是有效的 +func (task *BackupTask) tendisSSDBackupVerify(backupFullDir string) { + var err error + verifyBin := consts.TredisverifyBin + if !util.FileExists(verifyBin) { + task.Err = fmt.Errorf("%s not exists", verifyBin) + mylog.Logger.Error(task.Err.Error()) + return + } + cmd := fmt.Sprintf(` +export LD_PRELOAD=/usr/local/redis/bin/deps/libjemalloc.so; +export LD_LIBRARY_PATH=LD_LIBRARY_PATH:/usr/local/redis/bin/deps; +%s %s 1 2>/dev/null + `, verifyBin, backupFullDir) + mylog.Logger.Info(cmd) + _, err = util.RunBashCmd(cmd, "", nil, 1*time.Hour) + if err != nil { + task.Err = fmt.Errorf("backupData(%s) verify failed", backupFullDir) + mylog.Logger.Error(task.Err.Error()) + return + } +} + +// TendisSSDInstanceBackup tendisSSD实例备份 +func (task *BackupTask) TendisSSDInstanceBackup() { + var tarFile string + var binlogsizeRet myredis.TendisSSDBinlogSize + nowtime := time.Now().Local().Format(consts.FilenameTimeLayout) + backName := fmt.Sprintf("%s-TENDISSSD-FULL-%s-%s-%d-%s", + task.BkBizID, task.RealRole, task.ServerIP, task.ServerPort, nowtime) + backupFullDir := filepath.Join(task.BackupDir, backName) + task.Err = util.MkDirsIfNotExists([]string{backupFullDir}) + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + task.StartTime.Time = time.Now().Local() + binlogsizeRet, _, task.Err = task.Cli.TendisSSDBackupAndWaitForDone(backupFullDir) + if task.Err != nil { + return + } + task.EndTime.Time = time.Now().Local() + + task.tendisSSDBackupVerify(backupFullDir) + if task.Err != nil { + return + } + + // 备份文件名带上 binlogPos + fileWithBinlogPos := fmt.Sprintf("%s-%d", backupFullDir, binlogsizeRet.EndSeq) + task.Err = os.Rename(backupFullDir, fileWithBinlogPos) + if task.Err != nil { + task.Err = fmt.Errorf("rename %s to %s fail,err:%v", backupFullDir, fileWithBinlogPos, task.Err) + mylog.Logger.Error(task.Err.Error()) + return + } + backupFullDir = fileWithBinlogPos + + // 只做打包,不做压缩,rocksdb中已经做了压缩 + if task.TarSplit && task.TarSplitPartSize != "" { + task.BackupFiles, task.Err = util.TarAndSplitADir(backupFullDir, task.BackupDir, task.TarSplitPartSize, true) + } else { + tarFile, task.Err = util.TarADir(backupFullDir, task.BackupDir, true) + task.BackupFiles = append(task.BackupFiles, filepath.Join(task.BackupDir, tarFile)) + } + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.GetBakFilesSize() + if task.Err != nil { + return + } + util.LocalDirChownMysql(task.BackupDir) + mylog.Logger.Info(fmt.Sprintf("tendisSSD(%s) local backup success", task.Addr())) + return +} + +// GetBakFilesSize 获取备份文件大小 +func (task *BackupTask) GetBakFilesSize() { + var fileSize int64 + task.BackupFilesSize = make([]int64, 0, len(task.BackupFiles)) + for _, bakFile := range task.BackupFiles { + fileSize, task.Err = util.GetFileSize(bakFile) + if task.Err != nil { + mylog.Logger.Error(task.Err.Error()) + return + } + task.BackupFilesSize = append(task.BackupFilesSize, fileSize) + } +} + +// TendisSSDSetLougCount tendisSSD设置log-count参数 +func (task *BackupTask) TendisSSDSetLougCount() { + if task.SSDLogCount.LogCount > 0 { + _, task.Err = task.Cli.ConfigSet("log-count", strconv.FormatInt(task.SSDLogCount.LogCount, 10)) + if task.Err != nil { + return + } + } + if task.SSDLogCount.SlaveLogKeepCount > 0 { + _, task.Err = task.Cli.ConfigSet("slave-log-keep-count", strconv.FormatInt(task.SSDLogCount.LogCount, 10)) + if task.Err != nil { + return + } + } +} + +// TransferToBackupSystem 备份文件上传到备份系统 +func (task *BackupTask) TransferToBackupSystem() { + var msg string + cliFileInfo, err := os.Stat(consts.BackupClient) + if err != nil { + err = fmt.Errorf("os.stat(%s) failed,err:%v", consts.BackupClient, err) + mylog.Logger.Error(err.Error()) + return + } + if !util.IsExecOther(cliFileInfo.Mode().Perm()) { + err = fmt.Errorf("%s is unable to execute by other", consts.BackupClient) + mylog.Logger.Error(err.Error()) + return + } + mylog.Logger.Info(fmt.Sprintf("redis(%s) backupFiles:%+v start upload backupSystem", task.Addr(), task.BackupFiles)) + bkTag := consts.RedisFullBackupTAG + if task.BackupType == consts.ForeverBackupType { + bkTag = consts.RedisForeverBackupTAG + } + uploader := backupsys.UploadTask{ + Files: task.BackupFiles, + Tag: bkTag, + } + task.Err = uploader.UploadFiles() + if task.Err != nil { + return + } + task.BackupTaskIDs = uploader.TaskIDs + // task.Err = uploader.WaitForUploadFinish() + // if task.Err != nil { + // return + // } + msg = fmt.Sprintf("redis(%s) backupFiles%+v taskid(%+v) uploading to backupSystem", + task.Addr(), task.BackupFiles, task.BackupTaskIDs) + mylog.Logger.Info(msg) + return +} + +// BackupRecordReport 备份记录上报 +func (task *BackupTask) BackupRecordReport() { + if task.reporter == nil { + return + } + tmpBytes, _ := json.Marshal(task) + // task.Err=task.reporter.AddRecord(string(tmpBytes),true) + task.reporter.AddRecord(string(tmpBytes)+"\n", true) +} + +// BackupRecordSaveToDoingFile 备份记录保存到本地 redis_backup_file_list_${port}_doing 文件中 +func (task *BackupTask) BackupRecordSaveToDoingFile() { + var backupDir string + if len(task.BackupFiles) == 0 { + mylog.Logger.Warn(fmt.Sprintf("redis(%s) backupFiles:%+v empty", task.Addr(), task.BackupFiles)) + backupDir = task.BackupDir + } else { + backupDir = filepath.Dir(task.BackupFiles[0]) + } + // 例如: /data/dbbak/backup/redis_backup_file_list_30000_doing + doingFile := filepath.Join(backupDir, "backup", fmt.Sprintf(consts.DoingRedisFullBackFileList, task.ServerPort)) + f, err := os.OpenFile(doingFile, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0744) + if err != nil { + task.Err = fmt.Errorf("os.OpenFile %s failed,err:%v", doingFile, err) + mylog.Logger.Error(task.Err.Error()) + return + } + defer f.Close() + tmpBytes, _ := json.Marshal(task) + + if _, err = f.WriteString(string(tmpBytes) + "\n"); err != nil { + task.Err = fmt.Errorf("f.WriteString failed,err:%v,file:%s,line:%s", err, doingFile, string(tmpBytes)) + mylog.Logger.Error(task.Err.Error()) + return + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/job.go b/dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/job.go new file mode 100644 index 0000000000..65a7f24eb9 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/job.go @@ -0,0 +1,84 @@ +// Package redisheartbeat 心跳写入 +package redisheartbeat + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "sync" +) + +// GlobRedisHeartbeatJob global var +var GlobRedisHeartbeatJob *Job + +// Job 心跳job +type Job struct { + Conf *config.Configuration `json:"conf"` + Tasks []*HeartbeatTask `json:"tasks"` + Err error `json:"-"` +} + +// InitGlobRedisHeartbeatJob 新建更新心跳任务 +func InitGlobRedisHeartbeatJob(conf *config.Configuration) { + GlobRedisHeartbeatJob = &Job{ + Conf: conf, + } +} + +func (job *Job) createTasks() { + var task *HeartbeatTask + var password string + job.Tasks = []*HeartbeatTask{} + for _, svrItem := range job.Conf.Servers { + if !consts.IsRedisMetaRole(svrItem.MetaRole) { + continue + } + for _, port := range svrItem.ServerPorts { + password, job.Err = myredis.GetRedisPasswdFromConfFile(port) + if job.Err != nil { + return + } + task = NewHeartbeatTask(svrItem.BkBizID, svrItem.BkCloudID, + svrItem.ServerIP, port, svrItem.ClusterDomain, password) + job.Tasks = append(job.Tasks, task) + } + } +} + +// Run 执行例行备份 +func (job *Job) Run() { + job.Err = nil + job.createTasks() + if job.Err != nil { + return + } + // 并发更新心跳 + wg := sync.WaitGroup{} + genChan := make(chan *HeartbeatTask) + var limit int = 10 // 并发度10 + for worker := 0; worker < limit; worker++ { + wg.Add(1) + go func() { + defer wg.Done() + for taskItem := range genChan { + taskItem.UpdateHeartbeat() + } + }() + } + go func() { + // 关闭genChan,以便让所有goroutine退出 + defer close(genChan) + for _, task := range job.Tasks { + bakTask := task + genChan <- bakTask + } + }() + wg.Wait() + for _, task := range job.Tasks { + beatTask := task + if beatTask.Err != nil { + job.Err = beatTask.Err + return + } + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/task.go b/dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/task.go new file mode 100644 index 0000000000..edd33e8d52 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redisheartbeat/task.go @@ -0,0 +1,207 @@ +package redisheartbeat + +import ( + "context" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "fmt" + "strconv" + "time" +) + +// HeartbeatTask 心跳task +type HeartbeatTask struct { // NOCC:golint/naming(其他:) + BkBizID string `json:"bk_biz_id"` + BkCloudID int64 `json:"bk_cloud_id"` + ServerIP string `json:"server_ip"` + ServerPort int `json:"server_port"` + Domain string `json:"domain"` + Password string `json:"-"` + DbType string `json:"db_type"` // RedisInstance or TendisplusInstance or TendisSSDInstance + RealRole string `json:"role"` + ClusterEnabled bool `json:"cluster_enabled"` + DbSize int64 `json:"dbsize"` + MasterCli *myredis.RedisClient `json:"-"` + SlaveCli *myredis.RedisClient `json:"-"` + Err error `json:"-"` +} + +// NewHeartbeatTask 新建心跳task +func NewHeartbeatTask(bkBizID string, bkCloudID int64, + ip string, port int, domain, password string) *HeartbeatTask { + return &HeartbeatTask{ + BkBizID: bkBizID, + BkCloudID: bkCloudID, + ServerIP: ip, + ServerPort: port, + Domain: domain, + Password: password, + } +} + +// Addr string +func (task *HeartbeatTask) Addr() string { + return task.ServerIP + ":" + strconv.Itoa(task.ServerPort) +} + +// UpdateHeartbeat 更新心跳信息 +func (task *HeartbeatTask) UpdateHeartbeat() { + task.newConnect() + if task.Err != nil { + return + } + defer func() { + if task.MasterCli != nil { + task.MasterCli.Close() + task.MasterCli = nil + } + if task.SlaveCli != nil { + task.SlaveCli.Close() + task.SlaveCli = nil + } + }() + task.UpdateInstanceHeartbeat() + if task.Err != nil { + return + } + task.UpdateTendisplusHeartbeat() + if task.Err != nil { + return + } +} + +func (task *HeartbeatTask) newConnect() { + var masterIP, masterPort, masterAddr string + var cli *myredis.RedisClient + if task.Password == "" { + task.Password, task.Err = myredis.GetRedisPasswdFromConfFile(task.ServerPort) + if task.Err != nil { + return + } + } + cli, task.Err = myredis.NewRedisClientWithTimeout(task.Addr(), task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + return + } + masterIP, masterPort, _, task.RealRole, _, task.Err = cli.GetMasterData() + if task.Err != nil { + return + } + mylog.Logger.Debug(fmt.Sprintf("redis(%s) role:%s found master(%s:%s)", task.Addr(), task.RealRole, masterIP, + masterPort)) + task.DbType, task.Err = cli.GetTendisType() + if task.Err != nil { + return + } + if !consts.IsTendisplusInstanceDbType(task.DbType) { + // tendisplus执行dbsize很慢,所以不执行 + task.DbSize, task.Err = cli.DbSize() + if task.Err != nil { + return + } + } + task.ClusterEnabled, task.Err = cli.IsClusterEnabled() + if task.Err != nil { + return + } + if task.RealRole == consts.RedisMasterRole { + task.MasterCli = cli + } else { + task.SlaveCli = cli + masterAddr = masterIP + ":" + masterPort + task.MasterCli, task.Err = myredis.NewRedisClientWithTimeout(masterAddr, task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + return + } + } +} + +// UpdateTendisplusHeartbeat 更新tendisplus心跳(调用adminset命令,确保每个kvstore都有心跳写入) +// 对回档很重要 +func (task *HeartbeatTask) UpdateTendisplusHeartbeat() { + if task.DbType != consts.TendisTypeTendisplusInsance { + return + } + // tendisplus master会写入心跳,tendisplus slave也会在master中写入心跳 + beatKey := fmt.Sprintf("%s_%d:heartbeat", task.ServerIP, task.ServerPort) + nowVal := time.Now().Local().Unix() + _, task.Err = task.MasterCli.AdminSet(beatKey, strconv.FormatInt(nowVal, 10)) + if task.Err != nil { + return + } +} + +// UpdateInstanceHeartbeat 更新 非cluster的 心跳信息 +func (task *HeartbeatTask) UpdateInstanceHeartbeat() { + if task.ClusterEnabled { + mylog.Logger.Debug(fmt.Sprintf("redis:%s cluster-enabled=%v skip...", task.Addr(), task.ClusterEnabled)) + return + } + var kvlist []interface{} + var slaveBeatTime time.Time + // 如果允许切换为 db1 + task.Err = task.MasterCli.SelectDB1WhenClusterDisabled() + if task.Err != nil { + return + } + // 只有'我'是master,才设置 + if task.RealRole == consts.RedisMasterRole { + kvlist = []interface{}{ + "master_ip", task.ServerIP, + "master_port", strconv.Itoa(task.ServerPort), + } + _, task.Err = task.MasterCli.Mset(kvlist) + if task.Err != nil { + return + } + } + // 无论'我'是master还是slave,都需要在master上写入这两个key + timeKey := fmt.Sprintf("%s:%d:time", task.ServerIP, task.ServerPort) + timeVal := time.Now().Local().Unix() + var timeStr string + + kvlist = []interface{}{ + timeKey, strconv.FormatInt(timeVal, 10), + } + dbsizeKey := fmt.Sprintf("%s:%d:0:dbsize", task.ServerIP, task.ServerPort) + if !consts.IsTendisplusInstanceDbType(task.DbType) { + // 非tendisplus写入dbsize key + kvlist = append(kvlist, dbsizeKey, strconv.FormatInt(task.DbSize, 10)) + } + _, task.Err = task.MasterCli.Mset(kvlist) + if task.Err != nil { + return + } + // 只有'我'是slave,才在master上写入 diff key + if task.RealRole == consts.RedisSlaveRole { + task.Err = task.SlaveCli.SelectDB1WhenClusterDisabled() + if task.Err != nil { + return + } + task.Err = task.SlaveCli.ReadOnlyOnClusterSlave() + if task.Err != nil { + return + } + timeStr, task.Err = task.SlaveCli.InstanceClient.Get(context.TODO(), timeKey).Result() + if task.Err != nil { + task.Err = fmt.Errorf("redis:%s db:1 'get %s' fail,err:%v", task.SlaveCli.Addr, timeKey, task.Err) + mylog.Logger.Error(task.Err.Error()) + return + } + if timeStr == "" { + return + } + timeVal, _ := strconv.ParseInt(timeStr, 10, 64) + slaveBeatTime = time.Unix(timeVal, 0) + diffSec := int(time.Now().Local().Sub(slaveBeatTime).Seconds()) + diffKey := fmt.Sprintf("%s:%d:timediff", task.ServerIP, task.ServerPort) + kvlist = []interface{}{diffKey, strconv.Itoa(diffSec)} + _, task.Err = task.MasterCli.Mset(kvlist) + if task.Err != nil { + return + } + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/base_task.go b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/base_task.go new file mode 100644 index 0000000000..1a53ed4412 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/base_task.go @@ -0,0 +1,39 @@ +package redismonitor + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/pkg/sendwarning" +) + +type baseTask struct { + ServerConf config.ConfServerItem `json:"server_conf"` + Password string `json:"password"` + eventSender *sendwarning.BkMonitorEventSender `json:"-"` + Err error `json:"-"` +} + +func newBaseTask(conf *config.Configuration, serverConf config.ConfServerItem, passwd string) (task baseTask, + err error) { + task = baseTask{ + ServerConf: serverConf, + Password: passwd, + } + task.eventSender, err = sendwarning.NewBkMonitorEventSender( + conf.RedisMonitor.BkMonitorEventDataID, + conf.RedisMonitor.BkMonitorEventToken, + conf.GsePath, + ) + if err != nil { + return + } + task.eventSender. + SetBkBizID(serverConf.BkBizID). + SetBkCloudID(serverConf.BkCloudID). + SetApp(serverConf.App). + SetAppName(serverConf.AppName). + SetClusterDomain(serverConf.ClusterDomain). + SetClusterName(serverConf.ClusterName). + SetClusterType(serverConf.ClusterType). + SetInstanceRole(serverConf.MetaRole) + return +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/job.go b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/job.go new file mode 100644 index 0000000000..0b0f2fbecc --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/job.go @@ -0,0 +1,99 @@ +package redismonitor + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "fmt" +) + +// GlobRedisMonitorJob global var +var GlobRedisMonitorJob *Job + +// Job 监控任务 +type Job struct { + Conf *config.Configuration `json:"conf"` + Err error `json:"-"` +} + +// InitGlobRedisMonitorJob 新建监控任务 +func InitGlobRedisMonitorJob(conf *config.Configuration) { + GlobRedisMonitorJob = &Job{ + Conf: conf, + } +} + +// Run new monitor tasks and run +func (job *Job) Run() { + mylog.Logger.Info("redismonitor wakeup,start running...") + defer func() { + if job.Err != nil { + mylog.Logger.Info(fmt.Sprintf("redismonitor end fail,err:%v", job.Err)) + } else { + mylog.Logger.Info("redismonitor end succ") + } + }() + job.Err = nil + var password string + var predixyItem *PredixyMonitorTask + var twemItem *TwemproxyMonitorTask + var redisItem *RedisMonitorTask + predixyTasks := []*PredixyMonitorTask{} + twemproxyTasks := []*TwemproxyMonitorTask{} + redisTasks := []*RedisMonitorTask{} + for _, svrItem := range job.Conf.Servers { + if svrItem.MetaRole == consts.MetaRolePredixy && len(svrItem.ServerPorts) > 0 { + password, job.Err = myredis.GetProxyPasswdFromConfFlie(svrItem.ServerPorts[0], svrItem.MetaRole) + if job.Err != nil { + continue + } + predixyItem, job.Err = NewPredixyMonitorTask(job.Conf, svrItem, password) + if job.Err != nil { + continue + } + predixyTasks = append(predixyTasks, predixyItem) + } else if svrItem.MetaRole == consts.MetaRoleTwemproxy && len(svrItem.ServerPorts) > 0 { + password, job.Err = myredis.GetProxyPasswdFromConfFlie(svrItem.ServerPorts[0], svrItem.MetaRole) + if job.Err != nil { + continue + } + twemItem, job.Err = NewTwemproxyMonitorTask(job.Conf, svrItem, password) + if job.Err != nil { + continue + } + twemproxyTasks = append(twemproxyTasks, twemItem) + } else if consts.IsRedisMetaRole(svrItem.MetaRole) && len(svrItem.ServerPorts) > 0 { + password, job.Err = myredis.GetRedisPasswdFromConfFile(svrItem.ServerPorts[0]) + if job.Err != nil { + continue + } + redisItem, job.Err = NewRedisMonitorTask(job.Conf, svrItem, password) + if job.Err != nil { + continue + } + redisTasks = append(redisTasks, redisItem) + } + } + for _, predixy01 := range predixyTasks { + predixyItem := predixy01 + predixyItem.RunMonitor() + if predixyItem.Err != nil { + return + } + } + for _, twem01 := range twemproxyTasks { + twemItem := twem01 + twemItem.RunMonitor() + if twemItem.Err != nil { + return + } + } + for _, redis01 := range redisTasks { + redisItem := redis01 + redisItem.RunMonitor() + if redisItem.Err != nil { + return + } + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/predixy_task.go b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/predixy_task.go new file mode 100644 index 0000000000..3d130270b6 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/predixy_task.go @@ -0,0 +1,97 @@ +package redismonitor + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" +) + +// PredixyMonitorTask Predixy monitor task +type PredixyMonitorTask struct { + baseTask + proxyCli *myredis.RedisClient `json:"-"` + Err error `json:"-"` +} + +// NewPredixyMonitorTask new +func NewPredixyMonitorTask(conf *config.Configuration, serverConf config.ConfServerItem, + password string) (task *PredixyMonitorTask, err error) { + task = &PredixyMonitorTask{} + task.baseTask, err = newBaseTask(conf, serverConf, password) + if err != nil { + return + } + return +} + +// RunMonitor run +func (task *PredixyMonitorTask) RunMonitor() { + defer func() { + if task.proxyCli != nil { + task.proxyCli.Close() + } + }() + + task.RestartWhenConnFail() + if task.Err != nil { + return + } + return +} + +// RestartWhenConnFail 当连接失败时重启Predixy +func (task *PredixyMonitorTask) RestartWhenConnFail() { + var isPortInUse bool + var msg string + var proxyAddr string + for _, proxyPort := range task.ServerConf.ServerPorts { + task.eventSender.SetInstance(task.ServerConf.ServerIP + ":" + strconv.Itoa(proxyPort)) + proxyAddr = fmt.Sprintf("%s:%d", task.ServerConf.ServerIP, proxyPort) + isPortInUse, _ = util.CheckPortIsInUse(task.ServerConf.ServerIP, strconv.Itoa(proxyPort)) + if isPortInUse { + task.proxyCli, task.Err = myredis.NewRedisClientWithTimeout(proxyAddr, task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err == nil { + // predixy 正常运行中 + mylog.Logger.Info(fmt.Sprintf("predixy(%s) check alive ok", proxyAddr)) + continue + } + } + startScript := filepath.Join(consts.UsrLocal, "predixy", "bin", "start_predixy.sh") + if !util.FileExists(startScript) { + task.Err = fmt.Errorf("predixy(%s) connect fail,%s not exists??", proxyAddr, startScript) + mylog.Logger.Error(task.Err.Error()) + task.eventSender.SendWarning(consts.EventPredixyLogin, task.Err.Error(), + consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + startCmd := []string{startScript + " " + strconv.Itoa(proxyPort)} + mylog.Logger.Info(strings.Join(startCmd, " ")) + _, task.Err = util.RunLocalCmd(startCmd[0], startCmd[1:], "", nil, 10*time.Second) + if task.Err != nil { + msg = fmt.Sprintf("predixy(%s) connect fail,restart fail", task.proxyCli.Addr) + mylog.Logger.Error(msg) + task.eventSender.SendWarning(consts.EventPredixyLogin, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + task.proxyCli, task.Err = myredis.NewRedisClientWithTimeout(proxyAddr, task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + // Predixy 重启失败 + msg = fmt.Sprintf("predixy(%s) restart but still connect fail", proxyAddr) + mylog.Logger.Info(msg) + task.eventSender.SendWarning(consts.EventPredixyRestart, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + msg = fmt.Sprintf("predixy(%s) restart and connect success", proxyAddr) + mylog.Logger.Info(msg) + task.eventSender.SendWarning(consts.EventPredixyRestart, msg, consts.WarnLevelWarning, task.ServerConf.ServerIP) + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redis_task.go b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redis_task.go new file mode 100644 index 0000000000..5cd94987f8 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redis_task.go @@ -0,0 +1,429 @@ +package redismonitor + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "fmt" + "strconv" + "strings" + "time" + + "github.com/dustin/go-humanize" +) + +// RedisMonitorTask redis monitor task +type RedisMonitorTask struct { + baseTask + redisClis []*myredis.RedisClient `json:"-"` + Err error `json:"-"` +} + +// NewRedisMonitorTask new +func NewRedisMonitorTask(conf *config.Configuration, serverConf config.ConfServerItem, + password string) (task *RedisMonitorTask, err error) { + task = &RedisMonitorTask{} + task.baseTask, err = newBaseTask(conf, serverConf, password) + if err != nil { + return + } + return +} + +func (task *RedisMonitorTask) getRedisAddr(ip string, port int) string { + return ip + ":" + strconv.Itoa(port) +} + +// RunMonitor 每次执行只会产生一种告警,否则告警可能太多了 +func (task *RedisMonitorTask) RunMonitor() { + defer func() { + for _, cliItem := range task.redisClis { + cli01 := cliItem + cli01.Close() + } + task.redisClis = []*myredis.RedisClient{} + }() + task.CheckRedisConn() + if task.Err != nil { + return + } + task.SetDbmonKeyOnMaster() + if task.Err != nil { + return + } + task.CheckSyncOnSlave() + if task.Err != nil { + return + } + task.CheckPersist() + if task.Err != nil { + return + } + task.TendisSSDCheck() + if task.Err != nil { + return + } + task.CheckCacheMaxmemory() + if task.Err != nil { + return + } + task.CheckClusterState() + if task.Err != nil { + return + } + return +} + +// CheckRedisConn check redis whether can connect +func (task *RedisMonitorTask) CheckRedisConn() { + var cli01 *myredis.RedisClient + var addr01 string + for _, port := range task.ServerConf.ServerPorts { + addr01 = task.getRedisAddr(task.ServerConf.ServerIP, port) + cli01, task.Err = myredis.NewRedisClientWithTimeout(addr01, task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + task.eventSender.SetInstance(addr01) + task.eventSender.SendWarning(consts.EventRedisLogin, task.Err.Error(), consts.WarnLevelError, + task.ServerConf.ServerIP) + return + } + task.redisClis = append(task.redisClis, cli01) + } +} + +// SetDbmonKeyOnMaster 如果'我'是master,则写入 dbmon:$master_ip:$master_port key +func (task *RedisMonitorTask) SetDbmonKeyOnMaster() { + var role, dbmonKey string + var clusterEnabled bool + timeVal := time.Now().Local().Unix() + timeStr := strconv.FormatInt(timeVal, 10) + for idx, cli01 := range task.redisClis { + cliItem := cli01 + task.eventSender.SetInstance(cliItem.Addr) + role, task.Err = cliItem.GetRole() + if task.Err != nil { + task.eventSender.SendWarning(consts.EventRedisLogin, task.Err.Error(), + consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + if role != consts.RedisMasterRole { + continue + } + clusterEnabled, task.Err = cliItem.IsClusterEnabled() + if task.Err != nil { + task.eventSender.SendWarning(consts.EventRedisLogin, task.Err.Error(), + consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + // cluster 集群不写入dbmon:* keys + if clusterEnabled { + continue + } + cliItem.SelectDB(1) + + dbmonKey = fmt.Sprintf("dbmon:%s:%d", task.ServerConf.ServerIP, task.ServerConf.ServerPorts[idx]) + _, task.Err = cliItem.Set(dbmonKey, timeStr, 0) + if task.Err != nil { + task.eventSender.SendWarning(consts.EventRedisLogin, task.Err.Error(), + consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + } +} + +// setSlaveDbmonKey 从slave上连接master,并执行set dbmon:$slaveIP:$slaveport $time +func (task *RedisMonitorTask) setSlaveDbmonKey(selfAddr, masterIP, masterPort string) { + var masterCli *myredis.RedisClient = nil + var msg, dbmonKey string + var clusterEnabled bool + masterAddr := masterIP + ":" + masterPort + masterCli, task.Err = myredis.NewRedisClientWithTimeout(masterAddr, task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + msg = fmt.Sprintf("redis(%s) conn_master fail,master is %s:%s", selfAddr, masterIP, masterPort) + mylog.Logger.Error(msg) + task.eventSender.SendWarning(consts.EventRedisSync, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + defer masterCli.Close() + + clusterEnabled, task.Err = masterCli.IsClusterEnabled() + if task.Err != nil { + return + } + // cluster 集群不写入dbmon:* keys + if clusterEnabled { + return + } + + task.Err = masterCli.SelectDB1WhenClusterDisabled() + if task.Err != nil { + return + } + dbmonKey = fmt.Sprintf("dbmon:%s", selfAddr) + timeVal := time.Now().Local().Unix() + timeStr := strconv.FormatInt(timeVal, 10) + _, task.Err = masterCli.Set(dbmonKey, timeStr, 0) + if task.Err != nil { + msg = fmt.Sprintf("redis_master(%s) 'set %s %s' fail", masterAddr, dbmonKey, timeStr) + mylog.Logger.Error(msg) + task.eventSender.SendWarning(consts.EventRedisSync, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } +} + +func (task *RedisMonitorTask) checkSlaveTimediff(idx int) { + var clusterEnabled bool + var timeDiffKey, timeDiffStr, msg string + var timeDiffVal int + var warnLevel string + cliItem := task.redisClis[idx] + clusterEnabled, task.Err = cliItem.IsClusterEnabled() + if task.Err != nil { + return + } + // cluster 集群不写入dbmon:* keys + if clusterEnabled { + return + } + task.Err = cliItem.SelectDB1WhenClusterDisabled() + if task.Err != nil { + return + } + timeDiffKey = fmt.Sprintf("%s:timediff", cliItem.Addr) + timeDiffStr, task.Err = cliItem.Get(timeDiffKey) + if task.Err != nil { + return + } + timeDiffVal, _ = strconv.Atoi(timeDiffStr) + if timeDiffVal > consts.EventTimeDiffWarning { + warnLevel = consts.WarnLevelWarning + if timeDiffVal > consts.EventTimeDiffError { + warnLevel = consts.WarnLevelError + } + task.eventSender.AppendMetrcs(map[string]float64{ + "timediff": float64(timeDiffVal), + }) + + msg = fmt.Sprintf("redis_slave(%s) SYNC timediff(%d) > %ds", cliItem.Addr, timeDiffVal, 120) + mylog.Logger.Warn(msg) + task.eventSender.SendWarning(consts.EventRedisSync, msg, warnLevel, task.ServerConf.ServerIP) + return + } +} + +// CheckSyncOnSlave 检查slave上的sync状态 +func (task *RedisMonitorTask) CheckSyncOnSlave() { + var msg, warnLevel string + var masterHost, masterPort, linkStatus, selfRole string + var masterLastIOSec int64 + for idx, cli01 := range task.redisClis { + cliItem := cli01 + task.eventSender.SetInstance(cliItem.Addr) + masterHost, masterPort, linkStatus, selfRole, masterLastIOSec, task.Err = cliItem.GetMasterData() + if task.Err != nil { + // task.trigger.SendWarning(consts.WarnRedisSync, task.Err.Error(), task.ServerConf.ServerIP) + // return + continue + } + if selfRole != consts.RedisSlaveRole { + continue + } + task.setSlaveDbmonKey(cliItem.Addr, masterHost, masterPort) + if task.Err != nil { + return + } + if linkStatus != consts.MasterLinkStatusUP { + msg = fmt.Sprintf("redis_slave(%s) master_link_status=%s", cliItem.Addr, linkStatus) + mylog.Logger.Error(msg) + task.eventSender.SendWarning(consts.EventRedisSync, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + if masterLastIOSec > consts.EventMasterLastIOSecWarning { + warnLevel = consts.WarnLevelWarning + if masterLastIOSec > consts.EventMasterLastIOSecError { + warnLevel = consts.WarnLevelError + } + task.eventSender.AppendMetrcs(map[string]float64{ + "master_last_io_seconds_ago": float64(masterLastIOSec), + }) + msg = fmt.Sprintf("redis_slave(%s) master_last_io_seconds_ago:%d > %d", + cliItem.Addr, masterLastIOSec, consts.EventMasterLastIOSecWarning) + mylog.Logger.Warn(msg) + task.eventSender.SendWarning(consts.EventRedisSync, msg, warnLevel, task.ServerConf.ServerIP) + return + } + task.checkSlaveTimediff(idx) + if task.Err != nil { + return + } + } +} + +// CheckPersist 检查master是否有slave,检查cache redis slave是否开启aof +func (task *RedisMonitorTask) CheckPersist() { + var role, dbtype, appendonly string + var msg string + var connectedSlaves int + var confmap map[string]string + for _, cli01 := range task.redisClis { + cliItem := cli01 + task.eventSender.SetInstance(cliItem.Addr) + role, task.Err = cliItem.GetRole() + if task.Err != nil { + // task.trigger.SendWarning(consts.WarnRedisLogin, task.Err.Error(), task.ServerConf.ServerIP) + // return + continue + } + connectedSlaves, task.Err = cliItem.ConnectedSlaves() + if task.Err != nil { + // task.trigger.SendWarning(consts.WarnRedisLogin, task.Err.Error(), task.ServerConf.ServerIP) + // return + continue + } + if role == consts.RedisMasterRole && connectedSlaves == 0 { + msg = fmt.Sprintf("redis_master(%s) no slave", cliItem.Addr) + mylog.Logger.Error(msg) + task.eventSender.SendWarning(consts.EventRedisPersist, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + dbtype, task.Err = cliItem.GetTendisType() + if task.Err != nil { + continue + } + if dbtype != consts.TendisTypeRedisInstance || role != consts.RedisSlaveRole { + continue + } + // 检查 cache redis slave, aof 是否开启 + // TODO 无法知道远程配置的情况下,如果是人为关闭的aof,如何不告警 + confmap, task.Err = cliItem.ConfigGet("appendonly") + if task.Err != nil { + continue + } + appendonly, _ = confmap["appendonly"] + if strings.ToLower(appendonly) == "no" { + msg = fmt.Sprintf("redis_slave(%s) appendonly==%s", cliItem.Addr, appendonly) + mylog.Logger.Warn(msg) + task.eventSender.SendWarning(consts.EventRedisPersist, msg, consts.WarnLevelWarning, task.ServerConf.ServerIP) + return + } + } +} + +// TendisSSDCheck tendisssd check +// - check binloglen +func (task *RedisMonitorTask) TendisSSDCheck() { + var dbtype, msg, warnLevel string + var binlogRange myredis.TendisSSDBinlogSize + var binlogLen uint64 + for _, cli01 := range task.redisClis { + cliItem := cli01 + task.eventSender.SetInstance(cliItem.Addr) + dbtype, task.Err = cliItem.GetTendisType() + if task.Err != nil { + continue + } + if dbtype != consts.TendisTypeTendisSSDInsance { + continue + } + binlogRange, task.Err = cliItem.TendisSSDBinlogSize() + if task.Err != nil { + continue + } + binlogLen = binlogRange.EndSeq - binlogRange.FirstSeq + if binlogLen > consts.EventSSDBinlogLenWarnning { + warnLevel = consts.WarnLevelWarning + if binlogLen > consts.EventSSDBinlogLenError { + warnLevel = consts.WarnLevelError + } + task.eventSender.AppendMetrcs(map[string]float64{ + "binloglen": float64(binlogLen), + }) + msg = fmt.Sprintf("tendisSSD(%s) binlogrange[%d,%d] binloglen %d > %d ", + cliItem.Addr, + binlogRange.FirstSeq, binlogRange.EndSeq, binlogLen, + consts.EventSSDBinlogLenWarnning) + mylog.Logger.Warn(msg) + task.eventSender.SendWarning(consts.EventTendisBinlogLen, msg, warnLevel, task.ServerConf.ServerIP) + return + } + } +} + +// CheckCacheMaxmemory 检查cache redis的memused 与 maxmemory比值 +func (task *RedisMonitorTask) CheckCacheMaxmemory() { + var dbtype, msg, warnLevel string + var maxmemory uint64 + var memoryUsed uint64 + var usedPercent float64 + for _, cli01 := range task.redisClis { + cliItem := cli01 + task.eventSender.SetInstance(cliItem.Addr) + dbtype, task.Err = cliItem.GetTendisType() + if task.Err != nil { + continue + } + if dbtype != consts.TendisTypeRedisInstance { + continue + } + maxmemory, task.Err = cliItem.MaxMemory() + if task.Err != nil { + continue + } + if maxmemory == 0 { + continue + } + memoryUsed, _, task.Err = cliItem.GetMemUsed() + if task.Err != nil { + continue + } + usedPercent = float64(memoryUsed*1.0) / float64(maxmemory) + if usedPercent > consts.EventMemoryUsedPercentWarnning { + warnLevel = consts.WarnLevelWarning + if usedPercent > consts.EventMemoryUsedPercentError { + warnLevel = consts.WarnLevelError + } + task.eventSender.AppendMetrcs(map[string]float64{ + "used_memory": float64(memoryUsed), + "maxmemory": float64(maxmemory), + "used_percent": usedPercent, + }) + msg = fmt.Sprintf("redis(%s) used_memory:%s maxmemory:%s used_percent:%.2f%%", + cliItem.Addr, + humanize.IBytes(memoryUsed), humanize.IBytes(maxmemory), usedPercent, + ) + mylog.Logger.Warn(msg) + task.eventSender.SendWarning(consts.EventRedisMaxmemory, msg, warnLevel, task.ServerConf.ServerIP) + } + } +} + +// CheckClusterState 检查集群状态 +func (task *RedisMonitorTask) CheckClusterState() { + var clusterEnabled bool + var clusterInfo *myredis.CmdClusterInfo + var msg string + for _, cli01 := range task.redisClis { + cliItem := cli01 + task.eventSender.SetInstance(cliItem.Addr) + clusterEnabled, task.Err = cliItem.IsClusterEnabled() + if task.Err != nil { + return + } + if !clusterEnabled { + continue + } + clusterInfo, task.Err = cliItem.ClusterInfo() + if task.Err != nil { + return + } + if clusterInfo.ClusterState != consts.ClusterStateOK { + msg = fmt.Sprintf("redis(%s) cluster_state:%s != %s", cliItem.Addr, clusterInfo.ClusterState, consts.ClusterStateOK) + mylog.Logger.Warn(msg) + task.eventSender.SendWarning(consts.EventRedisClusterState, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redismonitor.go b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redismonitor.go new file mode 100644 index 0000000000..c19c1c7427 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/redismonitor.go @@ -0,0 +1,2 @@ +// Package redismonitor TODO +package redismonitor diff --git a/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/twemproxy_task.go b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/twemproxy_task.go new file mode 100644 index 0000000000..346779dc0b --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/redismonitor/twemproxy_task.go @@ -0,0 +1,98 @@ +package redismonitor + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/models/myredis" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/pkg/sendwarning" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "path/filepath" + "strconv" + "strings" + "time" +) + +// TwemproxyMonitorTask twemproxy monitor task +type TwemproxyMonitorTask struct { + baseTask + eventSender *sendwarning.BkMonitorEventSender `json:"-"` + proxyCli *myredis.RedisClient `json:"-"` + Err error `json:"-"` +} + +// NewTwemproxyMonitorTask new +func NewTwemproxyMonitorTask(conf *config.Configuration, serverConf config.ConfServerItem, + password string) (task *TwemproxyMonitorTask, err error) { + task = &TwemproxyMonitorTask{} + task.baseTask, err = newBaseTask(conf, serverConf, password) + if err != nil { + return + } + return +} + +// RunMonitor run +func (task *TwemproxyMonitorTask) RunMonitor() { + defer func() { + if task.proxyCli != nil { + task.proxyCli.Close() + } + }() + + task.RestartWhenConnFail() + if task.Err != nil { + return + } + return +} + +// RestartWhenConnFail 当连接失败时重启twemproxy +func (task *TwemproxyMonitorTask) RestartWhenConnFail() { + var isPortInUse bool + var msg string + var proxyAddr string + for _, proxyPort := range task.ServerConf.ServerPorts { + proxyAddr = fmt.Sprintf("%s:%d", task.ServerConf.ServerIP, proxyPort) + task.eventSender.SetInstance(proxyAddr) + isPortInUse, _ = util.CheckPortIsInUse(task.ServerConf.ServerIP, strconv.Itoa(proxyPort)) + if isPortInUse { + task.proxyCli, task.Err = myredis.NewRedisClientWithTimeout(proxyAddr, task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err == nil { + // twemproxy 正常运行中 + mylog.Logger.Info(fmt.Sprintf("twemproxy(%s) check alive ok", proxyAddr)) + return + } + } + startScript := filepath.Join(consts.UsrLocal, "twemproxy", "bin", "start_nutcracker.sh") + if !util.FileExists(startScript) { + task.Err = fmt.Errorf("twemproxy(%s) connect fail,%s not exists??", proxyAddr, startScript) + mylog.Logger.Error(task.Err.Error()) + task.eventSender.SendWarning(consts.EventTwemproxyLogin, task.Err.Error(), consts.WarnLevelError, + task.ServerConf.ServerIP) + return + } + startCmd := []string{startScript + " " + strconv.Itoa(proxyPort)} + mylog.Logger.Info(strings.Join(startCmd, " ")) + _, task.Err = util.RunLocalCmd(startCmd[0], startCmd[1:], "", nil, 10*time.Second) + if task.Err != nil { + msg = fmt.Sprintf("twemproxy(%s) connect fail,restart fail", task.proxyCli.Addr) + task.eventSender.SendWarning(consts.EventTwemproxyLogin, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + task.proxyCli, task.Err = myredis.NewRedisClientWithTimeout(proxyAddr, task.Password, 0, + consts.TendisTypeRedisInstance, 5*time.Second) + if task.Err != nil { + // twemproxy 重启失败 + msg = fmt.Sprintf("twemproxy(%s) restart but still connect fail", proxyAddr) + mylog.Logger.Info(msg) + task.eventSender.SendWarning(consts.EventTwemproxyRestart, msg, consts.WarnLevelError, task.ServerConf.ServerIP) + return + } + msg = fmt.Sprintf("twemproxy(%s) restart and connect success", proxyAddr) + mylog.Logger.Info(msg) + task.eventSender.SendWarning(consts.EventTwemproxyRestart, msg, consts.WarnLevelWarning, task.ServerConf.ServerIP) + } +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/report/clear_history.go b/dbm-services/redis/db-tools/dbmon/pkg/report/clear_history.go new file mode 100644 index 0000000000..1aedf38821 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/report/clear_history.go @@ -0,0 +1,49 @@ +package report + +import ( + "dbm-services/redis/db-tools/dbmon/config" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "path/filepath" + "time" + + "go.uber.org/zap" +) + +// GlobHistoryClearJob global var +var GlobHistoryClearJob *HistoryClearJob + +// HistoryClearJob 清理历史report记录 +type HistoryClearJob struct { + Conf *config.Configuration `json:"conf"` +} + +// InitGlobalHistoryClearJob new +func InitGlobalHistoryClearJob(conf *config.Configuration) { + GlobHistoryClearJob = &HistoryClearJob{ + Conf: conf, + } +} + +// Run run +func (job *HistoryClearJob) Run() { + mylog.Logger.Info("historyClear wakeup,start running...", zap.String("conf", util.ToString(job.Conf))) + defer mylog.Logger.Info("historyClear end running") + job.ClearRedisHistoryReport() +} + +// ClearRedisHistoryReport 清理redis历史report记录 +func (job *HistoryClearJob) ClearRedisHistoryReport() (err error) { + var clearCmd string + redisReportPath := filepath.Join(job.Conf.GsePath, "redis") + if !util.FileExists(redisReportPath) { + return + } + clearCmd = fmt.Sprintf( + `cd %s && find ./ -type f -regex '.*\.log$' -mtime +%d -exec rm -f {} \;`, + redisReportPath, job.Conf.ReportLeftDay) + mylog.Logger.Info(clearCmd) + _, err = util.RunBashCmd(clearCmd, "", nil, 1*time.Hour) + return +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/report/filereport.go b/dbm-services/redis/db-tools/dbmon/pkg/report/filereport.go new file mode 100644 index 0000000000..1ab0ecd376 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/report/filereport.go @@ -0,0 +1,99 @@ +package report + +import ( + "bufio" + "dbm-services/redis/db-tools/dbmon/mylog" + "fmt" + "os" + "sync" +) + +var _ Reporter = (*FileReport)(nil) + +// FileReport 文件上报 +type FileReport struct { + saveFile string + fileP *os.File + bufWriter *bufio.Writer + mux sync.Mutex // 并发安全写入 +} + +// NewFileReport new +func NewFileReport(savefile string) (ret *FileReport, err error) { + ret = &FileReport{} + err = ret.SetSaveFile(savefile) + return ret, err +} + +// AddRecord 新增记录 +func (f *FileReport) AddRecord(item string, flush bool) (err error) { + if f.saveFile == "" { + err = fmt.Errorf("saveFile(%s) can't be empty", f.saveFile) + mylog.Logger.Error(err.Error()) + return + } + _, err = f.bufWriter.WriteString(item) + if err != nil { + err = fmt.Errorf("bufio.Writer WriteString fail,err:%v,saveFile:%s", err, f.saveFile) + mylog.Logger.Error(err.Error()) + return + } + if flush == true { + f.bufWriter.Flush() + } + return nil +} + +// SaveFile .. +func (f *FileReport) SaveFile() string { + return f.saveFile +} + +// SetSaveFile set方法 +func (f *FileReport) SetSaveFile(savefile string) error { + var err error + err = f.Close() + if err != nil { + return err + } + if savefile == "" { + err = fmt.Errorf("saveFile(%s) cannot be empty", savefile) + mylog.Logger.Error(err.Error()) + return err + } + f.saveFile = savefile + f.fileP, err = os.OpenFile(savefile, os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + err = fmt.Errorf("open file:%s fail,err:%v", savefile, err) + mylog.Logger.Error(err.Error()) + return err + } + f.bufWriter = bufio.NewWriter(f.fileP) + return nil +} + +// Close file +func (f *FileReport) Close() error { + f.mux.Lock() + defer f.mux.Unlock() + + var err error + if f.saveFile == "" { + return nil + } + f.saveFile = "" + + err = f.bufWriter.Flush() + if err != nil { + err = fmt.Errorf("bufio flush fail.err:%v,file:%s", err, f.saveFile) + mylog.Logger.Error(err.Error()) + return nil + } + err = f.fileP.Close() + if err != nil { + err = fmt.Errorf("file close fail.err:%v,file:%s", err, f.saveFile) + mylog.Logger.Error(err.Error()) + return nil + } + return nil +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/report/report.go b/dbm-services/redis/db-tools/dbmon/pkg/report/report.go new file mode 100644 index 0000000000..71a1ceaa5d --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/report/report.go @@ -0,0 +1,2 @@ +// Package report TODO +package report diff --git a/dbm-services/redis/db-tools/dbmon/pkg/report/reporter.go b/dbm-services/redis/db-tools/dbmon/pkg/report/reporter.go new file mode 100644 index 0000000000..44c4c80caf --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/report/reporter.go @@ -0,0 +1,56 @@ +package report // 主动上报(备份等)记录 + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "dbm-services/redis/db-tools/dbmon/util" + "fmt" + "os" + "path/filepath" + "time" +) + +// Reporter 上报接口 +type Reporter interface { + AddRecord(item string, flush bool) error + Close() error +} + +// CreateReportDir 创建上报目录 /home/mysql/dbareport -> {REDIS_BACKUP_DIR}/dbbak/dbareport +func CreateReportDir() (err error) { + var realLink string + realReportDir := filepath.Join(consts.GetRedisBackupDir(), "dbbak", "dbareport") // 如 /data/dbbak/dbareport + if !util.FileExists(realReportDir) { + err = util.MkDirsIfNotExists([]string{realReportDir}) + if err != nil { + mylog.Logger.Error(err.Error()) + return + } + } + util.LocalDirChownMysql(realReportDir) + if util.FileExists(consts.DbaReportSaveDir) { + realLink, err = filepath.EvalSymlinks(consts.DbaReportSaveDir) + if err != nil { + err = fmt.Errorf("filepath.EvalSymlinks %s fail,err:%v", consts.DbaReportSaveDir, err) + mylog.Logger.Error(err.Error()) + return err + } + // /home/mysql/dbareport -> /data/dbbak/dbareport ok,直接返回 + if realLink == realReportDir { + return nil + } + // 如果 /home/mysql/dbareport 不是指向 /data/dbbak/dbareport,先删除 + rmCmd := "rm -rf " + consts.DbaReportSaveDir + util.RunBashCmd(rmCmd, "", nil, 1*time.Minute) + } + err = os.Symlink(realReportDir, consts.DbaReportSaveDir) + if err != nil { + err = fmt.Errorf("os.Symlink %s -> %s fail,err:%s", consts.DbaReportSaveDir, realReportDir, err) + mylog.Logger.Error(err.Error()) + return + } + mylog.Logger.Info(fmt.Sprintf("create softLink success,%s -> %s", consts.DbaReportSaveDir, realReportDir)) + util.MkDirsIfNotExists([]string{consts.RedisReportSaveDir}) + util.LocalDirChownMysql(consts.DbaReportSaveDir) + return +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/sendwarning/bkmonitorbeat.go b/dbm-services/redis/db-tools/dbmon/pkg/sendwarning/bkmonitorbeat.go new file mode 100644 index 0000000000..3013ac08fd --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/sendwarning/bkmonitorbeat.go @@ -0,0 +1,281 @@ +package sendwarning + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/util" + "encoding/json" + "fmt" + "path/filepath" + "strings" + "time" +) + +// eventBodyItem 告警项 +type eventBodyItem struct { + EventName string `json:"event_name"` + Target string `json:"target"` + Event struct { + Content string `json:"content"` + } `json:"event"` + Dimension map[string]interface{} `json:"dimension,omitempty"` + Metrics map[string]float64 `json:"metrics,omitempty"` +} + +// BkMonitorEventSender 蓝鲸监控自定义事件 +type BkMonitorEventSender struct { + DataID int64 `json:"data_id"` + AccessToken string `json:"access_token"` + GsePath string `json:"-"` + ToolBkMonitorBeat string `json:"-"` + AgentAddress string `json:"-"` + Data []eventBodyItem `json:"data"` +} + +// NewBkMonitorEventSender new +func NewBkMonitorEventSender(dataID int64, token, gsePath string) (ret *BkMonitorEventSender, err error) { + if !util.FileExists(gsePath) { + err = fmt.Errorf("GSE_PATH:%s not exists", gsePath) + mylog.Logger.Error(err.Error()) + return + } + ret = &BkMonitorEventSender{ + DataID: dataID, + AccessToken: token, + GsePath: gsePath, + } + ret.ToolBkMonitorBeat = filepath.Join(gsePath, "plugins/bin/bkmonitorbeat") + if !util.FileExists(ret.ToolBkMonitorBeat) { + err = fmt.Errorf("%s not exists", ret.ToolBkMonitorBeat) + mylog.Logger.Error(err.Error()) + return + } + beatConf := filepath.Join(gsePath, "plugins/etc/bkmonitorbeat.conf") + if !util.FileExists(beatConf) { + err = fmt.Errorf("%s not exists", beatConf) + mylog.Logger.Error(err.Error()) + return + } + grepCmd := fmt.Sprintf(`grep ipc %s|awk '{print $2}'`, beatConf) + ret.AgentAddress, err = util.RunBashCmd(grepCmd, "", nil, 10*time.Second) + if err != nil { + return + } + ret.Data = append(ret.Data, eventBodyItem{}) + return +} + +// SendWarning 发送告警,示例: +// 可以不传入 dimension 和 metrics,如直接调用 SendWarning("xxx","xxx","1.1.1.1",nil,nil) +/* + /usr/local/gse_bkte/plugins/bin/bkmonitorbeat -report -report.bk_data_id 5428xx \ + -report.type agent \ + -report.message.kind event \ + -report.agent.address /usr/local/gse_bkte/agent/data/ipc.state.report \ + + -report.message.body '{ + "data_id":5428xx, + "access_token":"xxxx", + "data":[{ + "event_name":"REDIS_MEM", + "target":"1.1.1.1", + "event":{ + "content":" tendisx.aaaa.testapp.db 1.1.1.1:30000 memory_used 7.2GB >= 90% maxmemory:8GB" + }, + "dimension":{ + "bk_biz_id":"200500194", + "bk_cloud_id":"0", + "app_id":"200500194", + "app_name":"测试app", + "app":"testapp", + "cluster_domain":"tendisx.aaaa.testapp.db", + "cluster_name":"aaaa", + "cluster_type":"PredixyTendisplusCluster", + "instance":"1.1.1.1:30000", + "instance_role":"redis_slave", + "warn_level":"warning" or "error" + }, + "metrics":{ + "memory_used":7730941133, + "maxmemory":8589934592 + } + } + ]}' +*/ +func (bm *BkMonitorEventSender) SendWarning(eventName, warnmsg, warnLevel, targetIP string) (err error) { + bm.newDimenSion() + bm.Data[0].EventName = eventName + bm.Data[0].Target = targetIP + // bm.Data[0].Event.Content = bm.addDbMetaInfo(warnmsg) + bm.Data[0].Event.Content = warnmsg + bm.Data[0].Dimension["warn_level"] = warnLevel + + tempBytes, _ := json.Marshal(bm) + sendCmd := fmt.Sprintf( + `%s -report -report.bk_data_id %d -report.type agent -report.message.kind event -report.agent.address %s -report.message.body '%s'`, + bm.ToolBkMonitorBeat, bm.DataID, bm.AgentAddress, string(tempBytes)) + mylog.Logger.Info(sendCmd) + _, err = util.RunBashCmd(sendCmd, "", nil, 20*time.Second) + if err != nil { + return + } + return nil +} + +// addDbMetaInfo 生成content中前面db元信息 +func (bm *BkMonitorEventSender) addDbMetaInfo(warnmsg string) string { + var ret strings.Builder + var ok bool + if len(bm.Data[0].Dimension) > 0 { + firstDimen := bm.Data[0].Dimension + if _, ok = firstDimen["bk_biz_id"]; ok { + ret.WriteString(fmt.Sprintf("bk_biz_id:%v\n", firstDimen["bk_biz_id"])) + } + if _, ok = firstDimen["bk_cloud_id"]; ok { + ret.WriteString(fmt.Sprintf("bk_cloud_id:%v\n", firstDimen["bk_cloud_id"])) + } + // if _, ok = firstDimen["app_id"]; ok { + // ret.WriteString(fmt.Sprintf("app_id:%v\n", firstDimen["app_id"])) + // } + if _, ok = firstDimen["app"]; ok { + ret.WriteString(fmt.Sprintf("app:%v\n", firstDimen["app"])) + } + if _, ok = firstDimen["app_name"]; ok { + ret.WriteString(fmt.Sprintf("app_name:%v\n", firstDimen["app_name"])) + } + if _, ok = firstDimen["cluster_domain"]; ok { + ret.WriteString(fmt.Sprintf("cluster_domain:%v\n", firstDimen["cluster_domain"])) + } + if _, ok = firstDimen["cluster_type"]; ok { + ret.WriteString(fmt.Sprintf("cluster_type:%v\n", firstDimen["cluster_type"])) + } + if _, ok = firstDimen["instance"]; ok { + ret.WriteString(fmt.Sprintf("instance:%v\n", firstDimen["instance"])) + } + if _, ok = firstDimen["instance_role"]; ok { + ret.WriteString(fmt.Sprintf("instance_role:%v\n", firstDimen["instance_role"])) + } + } + ret.WriteString("message:" + warnmsg) + return ret.String() +} +func (bm *BkMonitorEventSender) newDimenSion() { + if len(bm.Data) == 0 { + bm.Data = append(bm.Data, eventBodyItem{}) + } + if len(bm.Data[0].Dimension) == 0 { + bm.Data[0].Dimension = map[string]interface{}{} + } +} + +// SetBkBizID set bk_biz_id +func (bm *BkMonitorEventSender) SetBkBizID(bkBizID string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["bk_biz_id"] = bkBizID + bm.Data[0].Dimension["app_id"] = bkBizID + return bm +} + +// SetBkCloudID set bk_cloud_id +func (bm *BkMonitorEventSender) SetBkCloudID(bkCloudID int64) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["bk_cloud_id"] = bkCloudID + return bm +} + +// SetApp set app +func (bm *BkMonitorEventSender) SetApp(app string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["app"] = app + return bm +} + +// SetAppName TODO +// SetApp set app +func (bm *BkMonitorEventSender) SetAppName(appname string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["app_name"] = appname + return bm +} + +// SetClusterDomain set domain +func (bm *BkMonitorEventSender) SetClusterDomain(domain string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["cluster_domain"] = domain + return bm +} + +// SetClusterName set cluster name +func (bm *BkMonitorEventSender) SetClusterName(clusterName string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["cluster_name"] = clusterName + return bm +} + +// SetClusterType set cluster name +func (bm *BkMonitorEventSender) SetClusterType(clusterType string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["cluster_type"] = clusterType + return bm +} + +// SetInstanceRole set role +func (bm *BkMonitorEventSender) SetInstanceRole(role string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["instance_role"] = role + return bm +} + +// SetInstanceHost set server host +func (bm *BkMonitorEventSender) SetInstanceHost(host string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["instance_host"] = host + return bm +} + +// SetInstance set instance +func (bm *BkMonitorEventSender) SetInstance(instance string) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension["instance"] = instance + return bm +} + +// ReplaceAllDimensions 用参数中dimensions替代 bm.Data[0].Dimension +func (bm *BkMonitorEventSender) ReplaceAllDimensions(dimensions map[string]interface{}) *BkMonitorEventSender { + bm.newDimenSion() + bm.Data[0].Dimension = dimensions + return bm +} + +// AppendDimensions 将参数中 dimensions 内容 replace 到 bm.Data[0].Dimension +func (bm *BkMonitorEventSender) AppendDimensions(dimensions map[string]interface{}) *BkMonitorEventSender { + bm.newDimenSion() + for key, val := range dimensions { + bm.Data[0].Dimension[key] = val + } + return bm +} + +func (bm *BkMonitorEventSender) newMetrics() { + if len(bm.Data) == 0 { + bm.Data = append(bm.Data, eventBodyItem{}) + } + if len(bm.Data[0].Metrics) == 0 { + bm.Data[0].Metrics = map[string]float64{} + } +} + +// ReplaceAllMetrcs 用参数中 metics 替代 bm.Data[0].Metrics +func (bm *BkMonitorEventSender) ReplaceAllMetrcs(metrcs map[string]float64) *BkMonitorEventSender { + bm.newMetrics() + bm.Data[0].Metrics = metrcs + return bm +} + +// AppendMetrcs 将参数中 metics 内容 replace 到 bm.Data[0].Metrcs +func (bm *BkMonitorEventSender) AppendMetrcs(metrcs map[string]float64) *BkMonitorEventSender { + bm.newMetrics() + for key, val := range metrcs { + bm.Data[0].Metrics[key] = val + } + return bm +} diff --git a/dbm-services/redis/db-tools/dbmon/pkg/sendwarning/sendwarning.go b/dbm-services/redis/db-tools/dbmon/pkg/sendwarning/sendwarning.go new file mode 100644 index 0000000000..11283f1bde --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/pkg/sendwarning/sendwarning.go @@ -0,0 +1,2 @@ +// Package sendwarning TODO +package sendwarning diff --git a/dbm-services/redis/db-tools/dbmon/start.sh b/dbm-services/redis/db-tools/dbmon/start.sh new file mode 100755 index 0000000000..1cbc01a9e7 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/start.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env sh + +DIR=$(dirname $0) +cd $DIR + +nowtime=$(date "+%Y-%m-%d %H:%M:%S") +confFile="dbmon-config.yaml" + +httpAddr=$(grep 'http_address' dbmon-config.yaml |awk '{print $2}'|sed -e "s/^'//" -e "s/'$//" -e 's/^"//' -e 's/$"//') +httpAddr="http://$httpAddr/health" + +if curl $httpAddr >/dev/null 2>&1 +then + echo "[$nowtime] bk-dbmon is running" + exit 0 +fi + +if [[ ! -e $confFile ]] +then + echo "[$nowtime] $confFile not exists" + exit -1 +fi + +if [[ ! -d "./logs" ]] +then + mkdir -p ./logs +fi + +nohup ./bk-dbmon --config=$confFile >>./logs/start.log 2>&1 & + +sleep 1 + +if curl $httpAddr >/dev/null 2>&1 +then + echo "[$nowtime] bk-dbmon start success" +else + echo "[$nowtime] bk-dbmon start fail,bk-dbmon not running" + exit -1 +fi + +add_to_cron () { + P=`pwd` + CMD="cd $P && sh start.sh >> start.log 2>&1" + TMPF=./crontab.old + + # Maybe 'crontab -l' will output 'no crontab for xxx',so we output to 2>/dev/null + if crontab -l 2>/dev/null | grep "$CMD" 1>/dev/null ;then + : + else + crontab -l 2>/dev/null > $TMPF + cat >> $TMPF </dev/null 2>&1 +then + ps aux|grep 'bk-dbmon --config'|grep -v grep|awk '{print $2}'|xargs kill +else + echo "[$nowtime] bk-dbmon not running" + exit 0 +fi + +if curl $httpAddr >/dev/null 2>&1 +then + echo "[$nowtime] bk-dbmon kill fail,still running" + exit 0 +else + echo "[$nowtime] bk-dbmon stop success" +fi + +delete_cron () { + P=`pwd` + CMD="cd $P && sh start.sh >> start.log 2>&1" + TMPF=./crontab.old + + if crontab -l 2>/dev/null | grep -P "bk-dbmon.*start.sh" 1>/dev/null ;then + echo "[$nowtime] delete_from_cron"; + crontab -l 2>/dev/null | grep -v "bk-dbmon.*start.sh" | grep -v "^#.*bk-dbmon start.sh" > $TMPF + crontab $TMPF + fi +} + +delete_cron \ No newline at end of file diff --git a/dbm-services/redis/db-tools/dbmon/util/cmd_builder.go b/dbm-services/redis/db-tools/dbmon/util/cmd_builder.go new file mode 100644 index 0000000000..bb75814378 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/util/cmd_builder.go @@ -0,0 +1,66 @@ +package util + +import ( + "fmt" + "strings" +) + +type arg struct { + v string + isPwd bool +} + +// CmdBuilder 用于生成给sh执行的命令行 +// 支持标记密码参数,用于生成不带密码的命令行 + +// CmdBuilder TODO +type CmdBuilder struct { + Args []arg +} + +// NewCmdBuilder TODO +func NewCmdBuilder() *CmdBuilder { + c := CmdBuilder{} + return &c +} + +// Append TODO +func (c *CmdBuilder) Append(v ...string) *CmdBuilder { + for _, vv := range v { + c.Args = append(c.Args, arg{vv, false}) + } + return c +} + +// AppendPassword TODO +func (c *CmdBuilder) AppendPassword(v string) *CmdBuilder { + c.Args = append(c.Args, arg{v, true}) + return c +} + +// GetCmd TODO +func (c *CmdBuilder) GetCmd() []string { + tmpSlice := make([]string, 0, len(c.Args)) + for i := range c.Args { + tmpSlice = append(tmpSlice, c.Args[i].v) + } + + return tmpSlice +} + +// GetCmdLine TODO +func (c *CmdBuilder) GetCmdLine(suUser string, replacePassword bool) string { + tmpSlice := make([]string, 0, len(c.Args)) + for i := range c.Args { + if replacePassword && c.Args[i].isPwd { + tmpSlice = append(tmpSlice, "xxx") + } else { + tmpSlice = append(tmpSlice, c.Args[i].v) + } + } + cmdLine := strings.Join(tmpSlice, " ") + if suUser != "" { + return fmt.Sprintf(`su %s -c "%s"`, suUser, cmdLine) + } + return cmdLine +} diff --git a/dbm-services/redis/db-tools/dbmon/util/compress.go b/dbm-services/redis/db-tools/dbmon/util/compress.go new file mode 100644 index 0000000000..a8b2ed96c3 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/util/compress.go @@ -0,0 +1,163 @@ +package util + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/dustin/go-humanize" +) + +// IsZstdExecutable 通过'zstd -V'命令确定本地zstd工具是能正常运行的 +func IsZstdExecutable() (ok bool) { + var err error + if !FileExists(consts.ZstdBin) { + return false + } + cmd := exec.Command(consts.ZstdBin, "-V") + if err = cmd.Start(); err != nil { + // err = fmt.Errorf("'%s -V' cmd.Start fail,err:%v", zstdBin, err) + return false + } + if err = cmd.Wait(); err != nil { + // err = fmt.Errorf("'%s -V' cmd.Wait fail,err:%v", zstdBin, err) + return false + } + return true +} + +// CompressFile 压缩文件 +// 优先使用zstd 做压缩,zstd无法使用则使用gzip +func CompressFile(file, targetDir string, rmOrigin bool) (retFile string, err error) { + var compressCmd string + fileDir := filepath.Dir(file) + filename := filepath.Base(file) + if targetDir == "" { + targetDir = fileDir + } + if IsZstdExecutable() { + retFile = filepath.Join(targetDir, filename+".zst") + if rmOrigin { + compressCmd = fmt.Sprintf(`cd %s && %s --rm -T4 %s -o %s`, fileDir, consts.ZstdBin, filename, retFile) + } else { + compressCmd = fmt.Sprintf(`cd %s && %s -T4 %s -o %s`, fileDir, consts.ZstdBin, filename, retFile) + } + _, err = RunBashCmd(compressCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + } else { + retFile = filepath.Join(targetDir, filename+".gz") + if rmOrigin { + compressCmd = fmt.Sprintf(`gzip < %s >%s && rm -f %s`, file, retFile, file) + } else { + compressCmd = fmt.Sprintf(`gzip < %s >%s`, file, retFile) + } + _, err = RunBashCmd(compressCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + } + return +} + +// SplitLargeFile 切割大文件为小文件,并返回切割后的结果 +// 参数file须是全路径; +// 如果file大小 小于 splitTargetSize,则返回值splitTargetSize只包含 file 一个元素 +func SplitLargeFile(file, splitTargetSize string, rmOrigin bool) (splitedFiles []string, err error) { + var fileSize int64 + var splitLimit uint64 + var cmdRet string + if file == "" { + return + } + fileSize, err = GetFileSize(file) + if err != nil { + return + } + splitLimit, err = humanize.ParseBytes(splitTargetSize) + if err != nil { + err = fmt.Errorf("humanize.ParseBytes fail,err:%v,splitTargetSize:%s", err, splitTargetSize) + return + } + if fileSize < int64(splitLimit) { + splitedFiles = append(splitedFiles, file) + return + } + fileDir := filepath.Dir(file) + fileBase := filepath.Base(file) + fileBase = strings.TrimSuffix(fileBase, ".tar") + fileBase = strings.TrimSuffix(fileBase, ".tar.gz") + fileBase = fileBase + ".split." + splitCmd := fmt.Sprintf(`cd %s && split --verbose -a 3 -b %s -d %s %s|grep -i --only-match -E "%s[0-9]+"`, + fileDir, splitTargetSize, file, fileBase, fileBase) + mylog.Logger.Info(splitCmd) + cmdRet, err = RunBashCmd(splitCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + l01 := strings.Split(cmdRet, "\n") + for _, item := range l01 { + item = strings.TrimSpace(item) + if item == "" { + continue + } + splitedFiles = append(splitedFiles, filepath.Join(fileDir, item)) + } + if rmOrigin { + err = os.Remove(file) + mylog.Logger.Info(fmt.Sprintf("rm %s", file)) + if err != nil { + err = fmt.Errorf("os.Remove fail,err:%v,file:%s", err, file) + return + } + } + return +} + +// TarADir 对一个目录进行tar打包, +// 如打包 /data/dbbak/REDIS-FULL-rocksdb-1.1.1.1-30000 为 /tmp/REDIS-FULL-rocksdb-1.1.1.1-30000.tar +// 参数: originDir 为 /data/dbbak/REDIS-FULL-rocksdb-1.1.1.1-30000 +// 参数: tarSaveDir 为 /tmp/ +// 返回值: tarFile 为 /tmp/REDIS-FULL-rocksdb-1.1.1.1-30000.tar +func TarADir(originDir, tarSaveDir string, rmOrigin bool) (tarFile string, err error) { + var tarCmd string + basename := filepath.Base(originDir) + baseDir := filepath.Dir(originDir) + if tarSaveDir == "" { + tarSaveDir = filepath.Dir(originDir) + } + tarFile = filepath.Join(tarSaveDir, basename+".tar") + + if rmOrigin { + tarCmd = fmt.Sprintf(`tar --remove-files -cf %s -C %s %s`, tarFile, baseDir, basename) + } else { + tarCmd = fmt.Sprintf(`tar -cf %s -C %s %s`, tarFile, baseDir, basename) + } + mylog.Logger.Info(tarCmd) + _, err = RunBashCmd(tarCmd, "", nil, 6*time.Hour) + if err != nil { + return + } + return +} + +// TarAndSplitADir 对目录tar打包并执行split +func TarAndSplitADir(originDir, targetSaveDir, splitTargetSize string, rmOrigin bool) ( + splitedFiles []string, err error) { + var tarFile string + tarFile, err = TarADir(originDir, targetSaveDir, rmOrigin) + if err != nil { + return + } + splitedFiles, err = SplitLargeFile(tarFile, splitTargetSize, rmOrigin) + if err != nil { + return + } + return +} diff --git a/dbm-services/redis/db-tools/dbmon/util/osCmd.go b/dbm-services/redis/db-tools/dbmon/util/osCmd.go new file mode 100644 index 0000000000..d4172b323a --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/util/osCmd.go @@ -0,0 +1,160 @@ +package util + +import ( + "bytes" + "context" + "dbm-services/redis/db-tools/dbmon/mylog" + "fmt" + "io" + "os" + "os/exec" + "strings" + "time" +) + +// DealLocalCmdPid 处理本地命令得到pid +type DealLocalCmdPid interface { + DealProcessPid(pid int) error +} + +// RunBashCmd bash -c "$cmd" 执行命令并得到命令结果 +func RunBashCmd(cmd, outFile string, dealPidMethod DealLocalCmdPid, + timeout time.Duration) (retStr string, err error) { + opts := []string{"-c", cmd} + return RunLocalCmd("bash", opts, outFile, dealPidMethod, timeout) +} + +// RunLocalCmd 运行本地命令并得到命令结果 +/* + *参数: + * outFile: 不为空,则将标准输出结果打印到outFile中; + * dealPidMethod: 不为空,则将命令pid传给dealPidMethod.DealProcessPid()函数; + * logger: 用于打印日志; + */ +func RunLocalCmd( + cmd string, opts []string, outFile string, + dealPidMethod DealLocalCmdPid, timeout time.Duration) (retStr string, err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + cmdCtx := exec.CommandContext(ctx, cmd, opts...) + var retBuffer bytes.Buffer + var errBuffer bytes.Buffer + var outFileHandler *os.File + if len(strings.TrimSpace(outFile)) == 0 { + cmdCtx.Stdout = &retBuffer + } else { + outFileHandler, err = os.Create(outFile) + if err != nil { + mylog.Logger.Error(fmt.Sprintf("RunLocalCmd create outfile fail,err:%v,outFile:%s", err, outFile)) + return "", fmt.Errorf("RunLocalCmd create outfile fail,err:%v,outFile:%s", err, outFile) + } + defer outFileHandler.Close() + mylog.Logger.Info(fmt.Sprintf("RunLocalCmd create outfile(%s) success ...", outFile)) + cmdCtx.Stdout = outFileHandler + } + cmdCtx.Stderr = &errBuffer + mylog.Logger.Debug(fmt.Sprintf("Running a new local cmd:%s,opts:%+v", cmd, opts)) + + if err = cmdCtx.Start(); err != nil { + mylog.Logger.Error(fmt.Sprintf("RunLocalCmd cmd Start fail,err:%v,cmd:%s,opts:%+v", err, cmd, opts)) + return "", fmt.Errorf("RunLocalCmd cmd Start fail,err:%v", err) + } + if dealPidMethod != nil { + dealPidMethod.DealProcessPid(cmdCtx.Process.Pid) + } + if err = cmdCtx.Wait(); err != nil { + mylog.Logger.Error(fmt.Sprintf("RunLocalCmd cmd wait fail,err:%v,errBuffer:%s,retBuffer:%s,cmd:%s,opts:%+v", + err, errBuffer.String(), retBuffer.String(), cmd, opts)) + return "", fmt.Errorf("RunLocalCmd cmd wait fail,err:%v", err) + } + retStr = retBuffer.String() + if len(errBuffer.String()) > 0 { + mylog.Logger.Error(fmt.Sprintf("RunLocalCmd fail,err:%v,cmd:%s,opts:%+v", errBuffer.String(), cmd, opts)) + err = fmt.Errorf("RunLocalCmd fail,err:%s", retBuffer.String()+"\n"+errBuffer.String()) + } else { + err = nil + } + retStr = strings.TrimSpace(retStr) + return +} + +// SetOSUserPassword run set user password by chpasswd +func SetOSUserPassword(user, password string) error { + exec.Command("/bin/bash", "-c", "") + cmd := exec.Command("chpasswd") + stdin, err := cmd.StdinPipe() + if err != nil { + return fmt.Errorf("new pipe failed, err:%w", err) + } + go func() { + _, err := io.WriteString(stdin, fmt.Sprintf("%s:%s", user, password)) + if err != nil { + mylog.Logger.Warn(fmt.Sprintf("write into pipe failed, err:%v", err)) + } + if err := stdin.Close(); err != nil { + mylog.Logger.Warn(fmt.Sprintf("colse stdin failed, err:%v", err)) + } + }() + if output, err := cmd.CombinedOutput(); err != nil { + err = fmt.Errorf("run chpasswd failed, output:%s, err:%v", string(output), err) + mylog.Logger.Error(err.Error()) + return err + } + return nil +} + +// RunBashCmdNoLog bash -c "$cmd" 执行命令并得到命令结果 +func RunBashCmdNoLog(cmd, outFile string, dealPidMethod DealLocalCmdPid, + timeout time.Duration) (retStr string, err error) { + opts := []string{"-c", cmd} + return RunLocalCmdNoLog("bash", opts, outFile, dealPidMethod, timeout) +} + +// RunLocalCmdNoLog 不打印日志的RunLocalCmd +/* + *参数: + * outFile: 不为空,则将标准输出结果打印到outFile中; + * dealPidMethod: 不为空,则将命令pid传给dealPidMethod.DealProcessPid()函数; + * logger: 用于打印日志; + */ +func RunLocalCmdNoLog( + cmd string, opts []string, outFile string, + dealPidMethod DealLocalCmdPid, timeout time.Duration) (retStr string, err error) { + ctx, cancel := context.WithTimeout(context.TODO(), timeout) + defer cancel() + + cmdCtx := exec.CommandContext(ctx, cmd, opts...) + var retBuffer bytes.Buffer + var errBuffer bytes.Buffer + var outFileHandler *os.File + if len(strings.TrimSpace(outFile)) == 0 { + cmdCtx.Stdout = &retBuffer + } else { + outFileHandler, err = os.Create(outFile) + if err != nil { + return "", fmt.Errorf("RunLocalCmd create outfile fail,err:%v,outFile:%s", err, outFile) + } + defer outFileHandler.Close() + cmdCtx.Stdout = outFileHandler + } + cmdCtx.Stderr = &errBuffer + + if err = cmdCtx.Start(); err != nil { + return "", fmt.Errorf("RunLocalCmd cmd Start fail,err:%v", err) + } + if dealPidMethod != nil { + dealPidMethod.DealProcessPid(cmdCtx.Process.Pid) + } + if err = cmdCtx.Wait(); err != nil { + return "", fmt.Errorf("RunLocalCmd cmd wait fail,err:%v", err) + } + retStr = retBuffer.String() + if len(errBuffer.String()) > 0 { + err = fmt.Errorf("RunLocalCmd fail,err:%s", retBuffer.String()+"\n"+errBuffer.String()) + } else { + err = nil + } + retStr = strings.TrimSpace(retStr) + return +} diff --git a/dbm-services/redis/db-tools/dbmon/util/reflect.go b/dbm-services/redis/db-tools/dbmon/util/reflect.go new file mode 100644 index 0000000000..0698796aa2 --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/util/reflect.go @@ -0,0 +1,20 @@ +package util + +import ( + "reflect" + "runtime" +) + +// GetTypeName 获取接口类型名 +func GetTypeName(object interface{}) string { + t := reflect.TypeOf(object) + if t.Kind() == reflect.Ptr { + return "*" + t.Elem().Name() + } + return t.Name() +} + +// GetFunctionName 获取函数名 +func GetFunctionName(i interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() +} diff --git a/dbm-services/redis/db-tools/dbmon/util/util.go b/dbm-services/redis/db-tools/dbmon/util/util.go new file mode 100644 index 0000000000..fe790381ba --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/util/util.go @@ -0,0 +1,316 @@ +// Package util 公共函数 +package util + +import ( + "bufio" + "crypto/md5" + "dbm-services/redis/db-tools/dbmon/mylog" + "dbm-services/redis/db-tools/dbmon/pkg/consts" + "encoding/json" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "strings" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NotFound error +const NotFound = "not found" + +// NewNotFound .. +func NewNotFound() error { + return errors.New(NotFound) +} + +// IsNotFoundErr .. +func IsNotFoundErr(err error) bool { + if err.Error() == NotFound { + return true + } + return false +} + +// GetCurrentDirectory 获取当前二进制程序所在执行路径 +func GetCurrentDirectory() (string, error) { + dir, err := filepath.Abs(filepath.Dir(os.Args[0])) + if err != nil { + return dir, fmt.Errorf("convert absolute path failed, err: %+v", err) + } + dir = strings.Replace(dir, "\\", "/", -1) + return dir, nil +} + +// GetLocalIP 获得本地ip +func GetLocalIP() (string, error) { + var localIP string + var err error + addrs, err := net.InterfaceAddrs() + if err != nil { + return localIP, fmt.Errorf("GetLocalIP net.InterfaceAddrs fail,err:%v", err) + } + for _, addr := range addrs { + if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + localIP = ipnet.IP.String() + return localIP, nil + } + } + } + return localIP, fmt.Errorf("can't find local ip") +} + +// FileExists 检查目录是否已经存在 +func FileExists(path string) bool { + _, err := os.Stat(path) + if err != nil { + return os.IsExist(err) + } + return true +} + +// GetFileMd5 求文件md5sum值 +func GetFileMd5(fileAbPath string) (md5sum string, err error) { + rFile, err := os.Open(fileAbPath) + if err != nil { + return "", fmt.Errorf("GetFileMd5 fail,err:%v,file:%s", err, fileAbPath) + } + defer func(rFile *os.File) { + _ = rFile.Close() + }(rFile) + h := md5.New() + if _, err := io.Copy(h, rFile); err != nil { + return "", fmt.Errorf("GetFileMd5 io.Copy fail,err:%v,file:%s", err, fileAbPath) + } + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +// IsMountPoint Determine if a directory is a mountpoint, by comparing the device for the directory +// with the device for it's parent. If they are the same, it's not a mountpoint, if they're +// different, it is. +// reference: https://github.com/cnaize/kubernetes/blob/master/pkg/util/mount/mountpoint_unix.go#L29 +func IsMountPoint(file string) (bool, error) { + stat, err := os.Stat(file) + if err != nil { + return false, err + } + rootStat, err := os.Lstat(file + "/..") + if err != nil { + return false, err + } + // If the directory has the same device as parent, then it's not a mountpoint. + return stat.Sys().(*syscall.Stat_t).Dev != rootStat.Sys().(*syscall.Stat_t).Dev, nil +} + +// FindFirstMountPoint find first mountpoint in prefer order +func FindFirstMountPoint(paths ...string) (string, error) { + for _, path := range paths { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + continue + } + } + isMountPoint, err := IsMountPoint(path) + if err != nil { + return "", fmt.Errorf("check whether mountpoint failed, path: %s, err: %v", path, err) + } + if isMountPoint { + return path, nil + } + } + return "", fmt.Errorf("no available mountpoint found, choices: %#v", paths) +} + +// CheckPortIsInUse 检查端口是否被占用 +func CheckPortIsInUse(ip, port string) (inUse bool, err error) { + timeout := time.Second + conn, err := net.DialTimeout("tcp", net.JoinHostPort(ip, port), timeout) + if err != nil && strings.Contains(err.Error(), "connection refused") { + return false, nil + } else if err != nil { + return false, fmt.Errorf("net.DialTimeout fail,err:%v", err) + } + if conn != nil { + defer func(conn net.Conn) { + _ = conn.Close() + }(conn) + return true, nil + } + return false, nil +} + +// IsValidIP 判断字符串是否是一个有效IP +func IsValidIP(ipStr string) bool { + if net.ParseIP(ipStr) == nil { + return false + } + return true +} + +// MkDirsIfNotExists 如果目录不存在则创建 +func MkDirsIfNotExists(dirs []string) error { + return MkDirsIfNotExistsWithPerm(dirs, 0755) +} + +// MkDirsIfNotExistsWithPerm 如果目录不存在则创建,并指定文件Perm +func MkDirsIfNotExistsWithPerm(dirs []string, perm os.FileMode) error { + for _, dir := range dirs { + _, err := os.Stat(dir) + if err == nil { + continue + } + if os.IsNotExist(err) == true { + err = os.MkdirAll(dir, perm) + if err != nil { + return fmt.Errorf("MkdirAll fail,err:%v,dir:%s", err, dirs) + } + } + } + return nil +} + +// IsExecOwner owner是否可执行 +func IsExecOwner(mode os.FileMode) bool { + return mode&0100 != 0 +} + +// IsExecGroup grouper是否可执行 +func IsExecGroup(mode os.FileMode) bool { + return mode&0010 != 0 +} + +// IsExecOther other是否可执行 +func IsExecOther(mode os.FileMode) bool { + return mode&0001 != 0 +} + +// IsExecAny owner/grouper/other 任意一个可执行 +func IsExecAny(mode os.FileMode) bool { + return mode&0111 != 0 +} + +// IsExecAll owner/grouper/other 全部可执行 +func IsExecAll(mode os.FileMode) bool { + return mode&0111 == 0111 +} + +// LocalDirChownMysql 改变localDir的属主为mysql +func LocalDirChownMysql(localDir string) (err error) { + cmd := fmt.Sprintf("chown -R %s.%s %s", consts.MysqlAaccount, consts.MysqlGroup, localDir) + _, err = RunBashCmd(cmd, "", nil, 1*time.Hour) + return +} + +// HostDiskUsage 本地路径所在磁盘使用情况 +type HostDiskUsage struct { + TotalSize uint64 `json:"ToTalSize"` // bytes + UsedSize uint64 `json:"UsedSize"` // bytes + AvailSize uint64 `json:"AvailSize"` // bytes + UsageRatio int `json:"UsageRatio"` +} + +// String 用于打印 +func (disk *HostDiskUsage) String() string { + ret := fmt.Sprintf("total_size=%dMB,used_size=%d,avail_size=%d,Use=%d%%", + disk.TotalSize/1024/1024, + disk.UsedSize/1024/1024, + disk.AvailSize/1024/1024, + disk.UsageRatio) + return ret +} + +// GetLocalDirDiskUsg 获取本地路径所在磁盘使用情况 +// 参考: +// https://stackoverflow.com/questions/20108520/get-amount-of-free-disk-space-using-go +// http://evertrain.blogspot.com/2018/05/golang-disk-free.html +func GetLocalDirDiskUsg(localDir string) (diskUsg HostDiskUsage, err error) { + var stat unix.Statfs_t + if err = unix.Statfs(localDir, &stat); err != nil { + err = fmt.Errorf("unix.Statfs fail,err:%v,localDir:%s", err, localDir) + return + } + diskUsg.TotalSize = stat.Blocks * uint64(stat.Bsize) + diskUsg.AvailSize = stat.Bavail * uint64(stat.Bsize) + diskUsg.UsedSize = (stat.Blocks - stat.Bfree) * uint64(stat.Bsize) + diskUsg.UsageRatio = int(diskUsg.UsedSize * 100 / diskUsg.TotalSize) + return +} + +// GetFileSize 获取文件大小(单位byte) +func GetFileSize(filename string) (size int64, err error) { + fileInfo, err := os.Stat(filename) + if err != nil { + err = fmt.Errorf("file:%s os.Stat fail,err:%v", filename, err) + return + } + return fileInfo.Size(), nil +} + +// LockFileOnStart LockFileOnStart +func LockFileOnStart(lockfile string, doneCh chan struct{}) { + os.Create(lockfile) + fh, _ := os.Open(lockfile) + var c int + for { + c++ + err := syscall.Flock(int(fh.Fd()), syscall.LOCK_EX|syscall.LOCK_NB) + if err != nil { + mylog.Logger.Error(fmt.Sprintf("lock file %s failed :%v", lockfile, err)) + if c > 120 { + os.Exit(1) + } else { + mylog.Logger.Warn(fmt.Sprintf("waiting get lock file [%s] [%d]...", lockfile, c)) + time.Sleep(time.Second * time.Duration(60)) + } + } else { + break + } + } + mylog.Logger.Info(fmt.Sprintf("get lock file success [%s]..", lockfile)) + go func(fh *os.File) { + defer mylog.Logger.Debug(fmt.Sprintf("job done,close lock file handler :[%s]", lockfile)) + defer fh.Close() + for { + select { + case <-doneCh: + mylog.Logger.Info(fmt.Sprintf("job done ,realse lock file :[%s]", lockfile)) + goto TAG_END + default: + time.Sleep(time.Second) + } + } + TAG_END: + }(fh) +} + +// GetFileLines GetFileLines +func GetFileLines(f string) (int64, error) { + var cnt int64 + fp, err := os.Open(f) + if err != nil { + mylog.Logger.Warn(fmt.Sprintf("open failed %s:%+v", f, err)) // 打开文件错误 + return 0, err + } + defer fp.Close() + buf := bufio.NewScanner(fp) + for { + if !buf.Scan() { + break // 文件读完了,退出for + } + cnt++ + _ = buf.Text() // 获取每一行 + // fmt.Println(line) + } + return cnt, nil +} + +// ToString string +func ToString(param interface{}) string { + ret, _ := json.Marshal(param) + return string(ret) +} diff --git a/dbm-services/redis/db-tools/dbmon/util/version.go b/dbm-services/redis/db-tools/dbmon/util/version.go new file mode 100644 index 0000000000..b9d38501bc --- /dev/null +++ b/dbm-services/redis/db-tools/dbmon/util/version.go @@ -0,0 +1,118 @@ +package util + +import ( + "dbm-services/redis/db-tools/dbmon/mylog" + "fmt" + "regexp" + "strconv" + "strings" + "time" +) + +func convertVersionToUint(version string) (total uint64, err error) { + version = strings.TrimSpace(version) + if version == "" { + return 0, nil + } + list01 := strings.Split(version, ".") + var billion string + var thousand string + var single string + if len(list01) == 0 { + err = fmt.Errorf("version:%s format not correct", version) + mylog.Logger.Error(err.Error()) + return 0, err + } + billion = list01[0] + if len(list01) >= 2 { + thousand = list01[1] + } + if len(list01) >= 3 { + single = list01[2] + } + + if billion != "" { + b, err := strconv.ParseUint(billion, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,billion:%s,version:%s", err, billion, version) + mylog.Logger.Error(err.Error()) + return 0, err + } + total += b * 1000000 + } + if thousand != "" { + t, err := strconv.ParseUint(thousand, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,thousand:%s,version:%s", err, thousand, version) + mylog.Logger.Error(err.Error()) + return 0, err + } + total += t * 1000 + } + if single != "" { + s, err := strconv.ParseUint(single, 10, 64) + if err != nil { + err = fmt.Errorf("convertVersionToUint strconv.ParseUint fail,err:%v,single:%s,version:%s", err, single, version) + mylog.Logger.Error(err.Error()) + return 0, err + } + total += s + } + return total, nil +} + +// VersionParse tendis版本解析 +/* + * VersionParse + * 2.8.17-TRedis-v1.2.20, baseVersion: 2008017,subVersion:1002020 + * 6.2.7,baseVersion: 6002007 + */ +func VersionParse(version string) (baseVersion, subVersion uint64, err error) { + reg01 := regexp.MustCompile(`[\d+.]+`) + rets := reg01.FindAllString(version, -1) + if len(rets) == 0 { + err = fmt.Errorf("TendisVersionParse version:%s format not correct", version) + mylog.Logger.Error(err.Error()) + return 0, 0, err + } + if len(rets) >= 1 { + baseVersion, err = convertVersionToUint(rets[0]) + if err != nil { + return 0, 0, err + } + } + if len(rets) >= 2 { + subVersion, err = convertVersionToUint(rets[1]) + if err != nil { + return 0, 0, err + } + } + + return baseVersion, subVersion, nil +} + +// RedisCliVersion redis-cli 的版本解析 +func RedisCliVersion(cliBin string) (baseVersion, subVersion uint64, err error) { + cmd := cliBin + " -v" + verRet, err := RunBashCmd(cmd, "", nil, 20*time.Second) + if err != nil { + return + } + baseVersion, subVersion, err = VersionParse(verRet) + if err != nil { + return + } + return +} + +// IsCliSupportedNoAuthWarning redis-cli 是否支持 --no-auth-warning参数 +func IsCliSupportedNoAuthWarning(cliBin string) bool { + bVer, _, err := RedisCliVersion(cliBin) + if err != nil { + return false + } + if bVer > 6000000 { + return true + } + return false +} diff --git a/dbm-services/redis/redis-dts/.gitignore b/dbm-services/redis/redis-dts/.gitignore new file mode 100644 index 0000000000..7b07cbf245 --- /dev/null +++ b/dbm-services/redis/redis-dts/.gitignore @@ -0,0 +1,9 @@ +dbm-services/redis/redis-dts/config.yaml +bin/config.yaml +./bin/redis-cli-v6.x +dbm-services/redis/redis-dts.tar.gz +.vscode/ +build.yml +.codecc +.idea +.vscode \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/Makefile b/dbm-services/redis/redis-dts/Makefile new file mode 100644 index 0000000000..2f08b1fa67 --- /dev/null +++ b/dbm-services/redis/redis-dts/Makefile @@ -0,0 +1,11 @@ +SRV_NAME= redis-dts +binDir=bin + +clean: + -rm ./${binDir}/${SRV_NAME} + +build:clean + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o ./${binDir}/$(SRV_NAME) -v main.go + + +.PHONY: init clean build gotool clean help api curl \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/README.md b/dbm-services/redis/redis-dts/README.md new file mode 100644 index 0000000000..284c6cbcb6 --- /dev/null +++ b/dbm-services/redis/redis-dts/README.md @@ -0,0 +1,16 @@ +## redis-dts + +### 简介 +redis数据传输服务. + +### 使用说明 +- 申请一台`Tlinux 2.2+`机器作为`DTS server`; +- `DTS server`配置DNS,配置地址; +- 程序本地执行`make build`; +- 将`redis-dts`目录打包传输到`DTS server`上,建议`/data1/dbbak`目录下; +- `DTS server`上执行`cd /data1/dbbak/redis-dts && sh start.sh`; +- 源`TendisSSD`集群必须支持参数`slave-log-keep-count`,建议版本:`2.8.17-TRedis-v1.2.20`、`2.8.17-TRedis-v1.3.10`; + +### 原理简介 +#### 架构图 +![redis-dts架构图](./images/redis-dts%E6%9E%B6%E6%9E%84%E5%9B%BE.png) \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/bin/config-template.yaml b/dbm-services/redis/redis-dts/bin/config-template.yaml new file mode 100644 index 0000000000..7295c4fff6 --- /dev/null +++ b/dbm-services/redis/redis-dts/bin/config-template.yaml @@ -0,0 +1,38 @@ +TENDIS_DEBUG: false +ENV: prod #正式环境:prod,测试环境:test +bkCloudID: 0 #本dts_server所在云区域ID +tredisdumperTimeout: 604800 #tredisdump命令超时时间7天 +tendisBakcupParallelLimit: 5 #每个task的并发度控制,必须是${task_type}ParallelLimit +backupfileFetchParallelLimit: 5 +tendisdumpParallelLimit: 10 +cmdsImporterParallelLimit: 10 +perTaskImportClients: 40 #将output0切割为多少分,每份由一个redis-cli导入 +makeSyncParallelLimit: 5 +makeCacheSyncParallelLimit: 10 +maxCacheDataSizePerDtsServer: 256GiB #单台DTS最大迁移的cache数据量256GB +maxLocalDiskDataSizeRatioNTendisSSD: 8 # 单台DTS最大迁移的SSD数据量为本地磁盘的 1/8 +ssdSlaveLogKeepCount: 200000000 +tredisdumpOutputRespFileSize: 1MiB #tredisdump输出格式为RESP格式时,每个文件目标 1M +tredisdumpOutputCmdFileSize: 1GiB #tredisdump输出格式为普通set/hset命令时,每个文件目标 1GB +respFileImportParallelLimit: 4 # RESP格式文件导入时,并发度为4 +cmdFileImportParallelLimit: 40 # 普通命令文件导入时,并发度为40(也就是每个task同一时间最大导入40个普通命令格式文件) +respFileImportTimeout: 120 # RESP格式文件导入时,超时时间120s +cmdFileImportTimeout: 604800 # 普通命令文件导入时,超时时间7天 +importMaxRetryTimes: 5 #非list类型的key,导入可重试,最大重试次数 +tredisdumpTheadCnt: 10 #tredisdump --thread 参数 +tendisplusMakeSyncParallelLimit: 5 +memSizePerTendisplusKvStoreSync: 500MiB # 每个tendisplus kvstore启动一个redis-sync,默认其占用500MiB内存 +dtsRemoteTendisxk8s: + rootUrl: http://api.xxxx + secret_key: xxxx + secret_id: xxxx +gormlog: false +ABSUSER: mysql +ABSPASSWORD: xxxxxx +ABSPORT: 36000 +ABSTIMEOUT: 14400 +RsyncPullBwLimit: 400000 # rsync bandwidth limit, kbit/s +RsyncPullTimeout: 36000 # rsync timeout,10 hour +WarnMessageNotifier: xxxx #告警通知人 +DtsServerDiskMaxUsgRatio: 90 #dtsserver磁盘最大使用90%,超过则发送告警 +DtsServerMemMaxUsgRatio: 80 #dtsserver内存最大使用90%,超过则发送告警 \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/bin/redis-shake-template.conf b/dbm-services/redis/redis-dts/bin/redis-shake-template.conf new file mode 100644 index 0000000000..daf490f331 --- /dev/null +++ b/dbm-services/redis/redis-dts/bin/redis-shake-template.conf @@ -0,0 +1,227 @@ +# this is the configuration of redis-shake. +# if you have any problem, please visit https://github.com/alibaba/RedisShake/wiki/FAQ + +# current configuration version, do not modify. +# 当前配置文件的版本号,请不要修改该值。 +conf.version = 1 + +# ------------------------------------------------------ +# id +id = redis-shake + +# log file,日志文件,不配置将打印到stdout (e.g. /var/log/redis-shake.log ) +log.file ={{LOG_FILE}} +# log level: "none", "error", "warn", "info", "debug". default is "info". +log.level = {{LOG_LEVEL}} +log.maxbackups = 1000 +# pid path,进程文件存储地址(e.g. /var/run/),不配置将默认输出到执行下面, +# 注意这个是目录,真正的pid是`{pid_path}/{id}.pid` +pid_path = {{PID_PATH}} + +# pprof port. +system_profile = {{SYSTEM_PROFILE}} +# restful port, set -1 means disable, in `restore` mode RedisShake will exit once finish restoring RDB only if this value +# is -1, otherwise, it'll wait forever. +# restful port,查看metric端口, -1表示不启用,如果是`restore`模式,只有设置为-1才会在完成RDB恢复后退出,否则会一直block。 +http_profile = {{HTTP_PROFILE}} + +# parallel routines number used in RDB file syncing. default is 64. +# 启动多少个并发线程同步一个RDB文件。 +parallel = 64 + +# source redis configuration. +# used in `dump`, `sync` and `rump`. +# source redis type, e.g. "standalone" (default), "sentinel" or "cluster". +# 1. "standalone": standalone db mode. +# 2. "sentinel": the redis address is read from sentinel. +# 3. "cluster": the source redis has several db. +# 4. "proxy": the proxy address, currently, only used in "rump" mode. +# 源端redis的类型,支持standalone,sentinel,cluster和proxy四种模式,注意:目前proxy只用于rump模式。 +source.type = standalone +# ip:port +# the source address can be the following: +# 1. single db address. for "standalone" type. +# 2. ${sentinel_master_name}:${master or slave}@sentinel single/cluster address, e.g., mymaster:master@127.0.0.1:26379;127.0.0.1:26380, or @127.0.0.1:26379;127.0.0.1:26380. for "sentinel" type. +# 3. cluster that has several db nodes split by semicolon(;). for "cluster" type. e.g., 127.0.0.1:20331;127.0.0.2:20441. +# 4. proxy address(used in "rump" mode only). for "proxy" type. +# 源redis地址。对于sentinel或者开源cluster模式,输入格式为"master名字:拉取角色为master或者slave@sentinel的地址",别的cluster +# 架构,比如codis, twemproxy, aliyun proxy等需要配置所有master或者slave的db地址。 +source.address = {{SRC_ADDR}} +# password of db/proxy. even if type is sentinel. +source.password_raw = {{SRC_PASSWORD}} +# auth type, don't modify it +source.auth_type = auth +# tls enable, true or false. Currently, only support standalone. +# open source redis does NOT support tls so far, but some cloud versions do. +source.tls_enable = false +# input RDB file. +# used in `decode` and `restore`. +# if the input is list split by semicolon(;), redis-shake will restore the list one by one. +# 如果是decode或者restore,这个参数表示读取的rdb文件。支持输入列表,例如:rdb.0;rdb.1;rdb.2 +# redis-shake将会挨个进行恢复。 +source.rdb.input = +# the concurrence of RDB syncing, default is len(source.address) or len(source.rdb.input). +# used in `dump`, `sync` and `restore`. 0 means default. +# This is useless when source.type isn't cluster or only input is only one RDB. +# 拉取的并发度,如果是`dump`或者`sync`,默认是source.address中db的个数,`restore`模式默认len(source.rdb.input)。 +# 假如db节点/输入的rdb有5个,但rdb.parallel=3,那么一次只会 +# 并发拉取3个db的全量数据,直到某个db的rdb拉取完毕并进入增量,才会拉取第4个db节点的rdb, +# 以此类推,最后会有len(source.address)或者len(rdb.input)个增量线程同时存在。 +source.rdb.parallel = 0 +# for special cloud vendor: ucloud +# used in `decode` and `restore`. +# ucloud集群版的rdb文件添加了slot前缀,进行特判剥离: ucloud_cluster。 +source.rdb.special_cloud = + +source.rdb.start_segment={{START_SEGMENT}} +source.rdb.end_segment={{END_SEGMENT}} + +# target redis configuration. used in `restore`, `sync` and `rump`. +# the type of target redis can be "standalone", "proxy" or "cluster". +# 1. "standalone": standalone db mode. +# 2. "sentinel": the redis address is read from sentinel. +# 3. "cluster": open source cluster (not supported currently). +# 4. "proxy": proxy layer ahead redis. Data will be inserted in a round-robin way if more than 1 proxy given. +# 目的redis的类型,支持standalone,sentinel,cluster和proxy四种模式。 +target.type = standalone +# ip:port +# the target address can be the following: +# 1. single db address. for "standalone" type. +# 2. ${sentinel_master_name}:${master or slave}@sentinel single/cluster address, e.g., mymaster:master@127.0.0.1:26379;127.0.0.1:26380, or @127.0.0.1:26379;127.0.0.1:26380. for "sentinel" type. +# 3. cluster that has several db nodes split by semicolon(;). for "cluster" type. +# 4. proxy address. for "proxy" type. +target.address = {{TARGET_ADDR}} +# password of db/proxy. even if type is sentinel. +target.password_raw ={{TARGET_PASSWORD}} +# auth type, don't modify it +target.auth_type = auth +# all the data will be written into this db. < 0 means disable. +target.db = 0 +# tls enable, true or false. Currently, only support standalone. +# open source redis does NOT support tls so far, but some cloud versions do. +target.tls_enable = false +# output RDB file prefix. +# used in `decode` and `dump`. +# 如果是decode或者dump,这个参数表示输出的rdb前缀,比如输入有3个db,那么dump分别是: +# ${output_rdb}.0, ${output_rdb}.1, ${output_rdb}.2 +target.rdb.output = rdb_decode_result +# some redis proxy like twemproxy doesn't support to fetch version, so please set it here. +# e.g., target.version = 4.0 +target.version = {{TARGET_VERSION}} + +# use for expire key, set the time gap when source and target timestamp are not the same. +# 用于处理过期的键值,当迁移两端不一致的时候,目的端需要加上这个值 +fake_time = + +# how to solve when destination restore has the same key. +# rewrite: overwrite. +# none: panic directly. +# ignore: skip this key. not used in rump mode. +# used in `restore`, `sync` and `rump`. +# 当源目的有重复key,是否进行覆写 +# rewrite表示源端覆盖目的端。 +# none表示一旦发生进程直接退出。 +# ignore表示保留目的端key,忽略源端的同步key。该值在rump模式下没有用。 +key_exists = rewrite + +# filter db, key, slot, lua. +# filter db. +# used in `restore`, `sync` and `rump`. +# e.g., "0;5;10" means match db0, db5 and db10. +# at most one of `filter.db.whitelist` and `filter.db.blacklist` parameters can be given. +# if the filter.db.whitelist is not empty, the given db list will be passed while others filtered. +# if the filter.db.blacklist is not empty, the given db list will be filtered while others passed. +# all dbs will be passed if no condition given. +# 指定的db被通过,比如0;5;10将会使db0, db5, db10通过, 其他的被过滤 +filter.db.whitelist = 0;1 +# 指定的db被过滤,比如0;5;10将会使db0, db5, db10过滤,其他的被通过 +filter.db.blacklist = +# filter key with prefix string. multiple keys are separated by ';'. +# e.g., "abc;bzz" match let "abc", "abc1", "abcxxx", "bzz" and "bzzwww". +# used in `restore`, `sync` and `rump`. +# at most one of `filter.key.whitelist` and `filter.key.blacklist` parameters can be given. +# if the filter.key.whitelist is not empty, the given keys will be passed while others filtered. +# if the filter.key.blacklist is not empty, the given keys will be filtered while others passed. +# all the namespace will be passed if no condition given. +# 支持按前缀过滤key,只让指定前缀的key通过,分号分隔。比如指定abc,将会通过abc, abc1, abcxxx +filter.key.whitelist ={{KEY_WHITE_REGEX}} +# 支持按前缀过滤key,不让指定前缀的key通过,分号分隔。比如指定abc,将会阻塞abc, abc1, abcxxx +filter.key.blacklist = ^master_ip$;^master_port$;^dbha:agent:{{KEY_BLACK_REGEX}} +# filter given slot, multiple slots are separated by ';'. +# e.g., 1;2;3 +# used in `sync`. +# 指定过滤slot,只让指定的slot通过 +filter.slot = +# filter lua script. true means not pass. However, in redis 5.0, the lua +# converts to transaction(multi+{commands}+exec) which will be passed. +# 控制不让lua脚本通过,true表示不通过 +filter.lua = false + +# big key threshold, the default is 500 * 1024 * 1024 bytes. If the value is bigger than +# this given value, all the field will be spilt and write into the target in order. If +# the target Redis type is Codis, this should be set to 1, please checkout FAQ to find +# the reason. +# 正常key如果不大,那么都是直接调用restore写入到目的端,如果key对应的value字节超过了给定 +# 的值,那么会分批依次一个一个写入。如果目的端是Codis,这个需要置为1,具体原因请查看FAQ。 +# 如果目的端大版本小于源端,也建议设置为1。 +big_key_threshold = 524288000 + +# enable metric +# used in `sync`. +# 是否启用metric +metric = true +# print in log +# 是否将metric打印到log中 +metric.print_log = false + +# sender information. +# sender flush buffer size of byte. +# used in `sync`. +# 发送缓存的字节长度,超过这个阈值将会强行刷缓存发送 +sender.size = 104857600 +# sender flush buffer size of oplog number. +# used in `sync`. flush sender buffer when bigger than this threshold. +# 发送缓存的报文个数,超过这个阈值将会强行刷缓存发送,对于目的端是cluster的情况,这个值 +# 的调大将会占用部分内存。 +sender.count = 4095 +# delay channel size. once one oplog is sent to target redis, the oplog id and timestamp will also +# stored in this delay queue. this timestamp will be used to calculate the time delay when receiving +# ack from target redis. +# used in `sync`. +# 用于metric统计时延的队列 +sender.delay_channel_size = 65535 + +# enable keep_alive option in TCP when connecting redis. +# the unit is second. +# 0 means disable. +# TCP keep-alive保活参数,单位秒,0表示不启用。 +keep_alive = 5 + +# used in `rump`. +# number of keys captured each time. default is 100. +# 每次scan的个数,不配置则默认100. +scan.key_number = 50 +# used in `rump`. +# we support some special redis types that don't use default `scan` command like alibaba cloud and tencent cloud. +# 有些版本具有特殊的格式,与普通的scan命令有所不同,我们进行了特殊的适配。目前支持腾讯云的集群版"tencent_cluster" +# 和阿里云的集群版"aliyun_cluster",注释主从版不需要配置,只针对集群版。 +scan.special_cloud = +# used in `rump`. +# we support to fetching data from given file which marks the key list. +# 有些云版本,既不支持sync/psync,也不支持scan,我们支持从文件中进行读取所有key列表并进行抓取:一行一个key。 +scan.key_file = + +# limit the rate of transmission. Only used in `rump` currently. +# e.g., qps = 1000 means pass 1000 keys per second. default is 500,000(0 means default) +qps = 200000 + +# enable resume from break point, please visit xxx to see more details. +# 断点续传开关 +resume_from_break_point = false + +# ----------------splitter---------------- +# below variables are useless for current open source version so don't set. + +# replace hash tag. +# used in `sync`. +replace_hash_tag = false diff --git a/dbm-services/redis/redis-dts/bin/start.sh b/dbm-services/redis/redis-dts/bin/start.sh new file mode 100755 index 0000000000..1da2b3a4b3 --- /dev/null +++ b/dbm-services/redis/redis-dts/bin/start.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env sh + +#检查必要文件是否存在 +confFile="./config.yaml" +dtsBinFile="./redis-dts" +tredisdumpBinFile="./tredisdump" +redisCliBinFile="./redis-cli" +syncTemplateConfFile="./tendisssd-sync-template.conf" +scpFile="./scp.exp.2" +if [[ ! -e "$confFile" ]] +then + echo "error:$confFile not exists" + exit -1 +fi + +if [[ ! -e "$dtsBinFile" ]] +then + echo "error:$dtsBinFile not exists" + exit -1 +fi + +if [[ ! -e "$tredisdumpBinFile" ]] +then + echo "error:$tredisdumpBinFile not exists" + exit -1 +fi + +if [[ ! -e "$redisCliBinFile" ]] +then + echo "error:$redisCliBinFile not exists" + exit -1 +fi + +if [[ ! -e "$syncTemplateConfFile" ]] +then + echo "error:$syncTemplateConfFile not exists" + exit -1 +fi + +if [[ ! -e "$scpFile" ]] +then + echo "error:$scpFile not exists" + exit -1 +fi + +# 如果已经有一个 redis-dts 在运行,不能直接启动 +processCnt=$(ps -ef|grep $dtsBinFile|grep -v grep|grep -v sync|wc -l) +if [[ $processCnt -ge 1 ]] +then + echo "error:there are a 'redis-dts' running" + ps -ef|grep $dtsBinFile|grep -v grep|grep -v sync + exit -1 +fi + +# 启动 redis-dts +chmod u+x $dtsBinFile +nohup $dtsBinFile & + +sleep 2 + +#再次检查是否启动成功 +processCnt=$(ps -ef|grep $dtsBinFile|grep -v grep|grep -v sync|wc -l) +if [[ $processCnt -ge 1 ]] +then + echo "success:start $dtsBinFile success" +else + echo "error: start $dtsBinFile fail" + exit -1 +fi \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/bin/stop.sh b/dbm-services/redis/redis-dts/bin/stop.sh new file mode 100755 index 0000000000..84e75eb4f4 --- /dev/null +++ b/dbm-services/redis/redis-dts/bin/stop.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env sh + +processCnt=$(ps -ef|grep scp.exp.2|grep -v grep|wc -l) +if [[ $processCnt -gt 0 ]] +then + echo "error:fetching backup file,cannot stop ..." + exit -1 +fi + +processCnt=$(ps -ef|grep tredisdump|grep -v grep|wc -l) +if [[ $processCnt -gt 0 ]] +then + echo "error:tredisdump running,cannot stop ..." + ps -ef|grep tredisdump|grep -v grep + exit -1 +fi + +processCnt=$(ps -ef|grep redis-cli|grep '\-\-pipe'|grep -v grep|wc -l) +if [[ $processCnt -gt 0 ]] +then + echo "error:importing data running,cannot stop ..." + ps -ef|grep redis-cli|grep '\-\-pipe'|grep -v grep + exit -1 +fi +processCnt=$(ps -ef|grep redis-dts|grep -v grep|grep -v sync|wc -l) +if [[ $processCnt -eq 0 ]] +then + echo "success:redis-dts not running" + exit 0 +fi + +ps -ef|grep redis-dts|grep -v grep|grep -vi sync|awk '{print $2}'|xargs kill + +#再次检查是否stop成功 +processCnt=$(ps -ef|grep redis-dts|grep -v grep|grep -v sync|wc -l) +if [[ $processCnt -eq 0 ]] +then + echo "success:stop redis-dts success" + exit 0 +else + echo "error: stop redis-dts fail" + exit -1 +fi \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/bin/tendisplus-sync-template.conf b/dbm-services/redis/redis-dts/bin/tendisplus-sync-template.conf new file mode 100644 index 0000000000..3039fcf264 --- /dev/null +++ b/dbm-services/redis/redis-dts/bin/tendisplus-sync-template.conf @@ -0,0 +1,22 @@ +[server] +logfile={{SYNC_LOG_FILE}} +port={{SYNC_PORT}} +skip-ha=no +mode=redis +threads=20 +loglevel=warning +skip-start=yes +kvstore={{KV_STORE_ID}} +test-mode=yes +proxy-enable={{PROXY_ENABLE}} +connection-per-node=50 +max-queue-size=100000 +filter-commands=adminset,adminget +hash-suffix-enable=yes +fullpsync-notice-enabled=yes + +[source] +{{SRC_ADDR}}|{{SRC_PASSWORD}} + +[remote] +{{DST_ADDR}}|{{DST_PASSWORD}} \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/bin/tendisssd-sync-template.conf b/dbm-services/redis/redis-dts/bin/tendisssd-sync-template.conf new file mode 100755 index 0000000000..648ec75a0b --- /dev/null +++ b/dbm-services/redis/redis-dts/bin/tendisssd-sync-template.conf @@ -0,0 +1,36 @@ +[server] +mode=tendis-ssd +skip-start=yes +skip-ha=no +threads=20 +port={{SYNC_PORT}} +test-mode=no +loglevel={{LOG_LEVEL}} +logfile={{SYNC_LOG_FILE}} +key-white-regex={{KEY_WHITE_REGEX}} +key-black-regex={{KEY_BLACK_REGEX}} + +[source] +{{SRC_ADDR}}|{{SRC_PASSWORD}} + +[remote] +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} +{{DST_ADDR}}|{{DST_PASSWORD}} \ No newline at end of file diff --git a/dbm-services/redis/redis-dts/config/config.go b/dbm-services/redis/redis-dts/config/config.go new file mode 100644 index 0000000000..23baa62357 --- /dev/null +++ b/dbm-services/redis/redis-dts/config/config.go @@ -0,0 +1,28 @@ +// Package config TODO +package config + +import ( + "flag" + + "github.com/spf13/viper" +) + +var cfgFile = flag.String("config-file", "./config.yaml", "Input your config file") + +// InitConfig 加载配置文件 +func InitConfig() { + flag.Parse() + + if *cfgFile != "" { + viper.SetConfigFile(*cfgFile) + } else { + viper.AddConfigPath("./") + viper.SetConfigType("yaml") + viper.SetConfigName("config") + } + + if err := viper.ReadInConfig(); err != nil { + panic(err) + } + viper.WatchConfig() // auto reload config file when config file changed +} diff --git a/dbm-services/redis/redis-dts/go.mod b/dbm-services/redis/redis-dts/go.mod new file mode 100644 index 0000000000..8caa38681f --- /dev/null +++ b/dbm-services/redis/redis-dts/go.mod @@ -0,0 +1,54 @@ +module dbm-services/redis/redis-dts + +go 1.19 + +require ( + github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 + github.com/dustin/go-humanize v1.0.1 + github.com/go-redis/redis/v8 v8.11.5 + github.com/jinzhu/gorm v1.9.16 + github.com/juju/ratelimit v1.0.2 + github.com/pkg/sftp v1.13.5 + github.com/shirou/gopsutil/v3 v3.23.2 + github.com/spf13/viper v1.15.0 + go.uber.org/zap v1.24.0 + golang.org/x/crypto v0.8.0 + golang.org/x/sys v0.7.0 + gopkg.in/natefinch/lumberjack.v2 v2.2.1 +) + +require ( + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/denisenkom/go-mssqldb v0.10.0 // indirect + github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-sql-driver/mysql v1.7.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jinzhu/inflection v1.0.0 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/kr/fs v0.1.0 // indirect + github.com/lib/pq v1.10.0 // indirect + github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-sqlite3 v1.14.16 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.0.7 // indirect + github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect + github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.12 // indirect + go.uber.org/multierr v1.8.0 // indirect + golang.org/x/text v0.9.0 // indirect + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/dbm-services/redis/redis-dts/go.sum b/dbm-services/redis/redis-dts/go.sum new file mode 100644 index 0000000000..35421fbe43 --- /dev/null +++ b/dbm-services/redis/redis-dts/go.sum @@ -0,0 +1,585 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.10.0 h1:QykgLZBorFE95+gO3u9esLd0BmbvpWp0/waNNZfHBM8= +github.com/denisenkom/go-mssqldb v0.10.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1 h1:CaO/zOnF8VvUfEbhRatPcwKVWamvbYd8tQGRWacE9kU= +github.com/dgrijalva/jwt-go/v4 v4.0.0-preview1/go.mod h1:+hnT3ywWDTAFrW5aE+u2Sa/wT555ZqwoCS+pk3p6ry4= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= +github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jinzhu/gorm v1.9.16 h1:+IyIjPEABKRpsu/F8OvDPy9fyQlgsg2luMV2ZIH5i5o= +github.com/jinzhu/gorm v1.9.16/go.mod h1:G3LB3wezTOWM2ITLzPxEXgSkOXAntiLHS7UdBefADcs= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/juju/ratelimit v1.0.2 h1:sRxmtRiajbvrcLQT7S+JbqU0ntsb9W2yhSdNN8tWfaI= +github.com/juju/ratelimit v1.0.2/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0 h1:Zx5DJFEYQXio93kgXnQ09fXNiUKsqv4OUEu2UtGcB1E= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de h1:V53FWzU6KAZVi1tPp5UIsMoUWJ2/PNwYIDXnu7QuBCE= +github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us= +github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= +github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= +github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/shirou/gopsutil/v3 v3.23.2 h1:PAWSuiAszn7IhPMBtXsbSCafej7PqUOvY6YywlQUExU= +github.com/shirou/gopsutil/v3 v3.23.2/go.mod h1:gv0aQw33GLo3pG8SiWKiQrbDzbRY1K80RyZJ7V4Th1M= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU= +github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= +go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= +golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git "a/dbm-services/redis/redis-dts/images/redis-dts\346\236\266\346\236\204\345\233\276.png" "b/dbm-services/redis/redis-dts/images/redis-dts\346\236\266\346\236\204\345\233\276.png" new file mode 100644 index 0000000000000000000000000000000000000000..1df5b36a0595b5155a36c2f6a5c11eafd8d8d999 GIT binary patch literal 153281 zcmeFZ2T)UM*FTDgq9UTAARtXdM4D1WI*JHLm)=!6NRSdj4I+XfD4j@0>0RlBF4B?E zkrF}=5C}+s5JJe^_`aU~Q>Ej{ zFpk|4!S({jGo|#Ch;@C^?v(j_vgfeGyJc+(gZmCH!V&w2zu0#iQRF^(GK^Qg!qrRV z&@1F2tI%_YSdUl*cAr&w`BD7YHc!<&?Ky zg2!kmrDcntCl1|P2!aP6rAktjysA=o^-y0A;Yh4v{1OxQiMNef_x0AhRfh+(uM1xn zwt7erByo2O&GFPw`1Pv7t9hYE4?pg-Ha1G1;fxav3V7thayamdz>9aC(npW(GaL{E=+WYP4q-ZAYHwK^s@SNjQ(Oa%k5L?YX-`1~9329_%)l2|fi%jWXQ)Tg4*xu+ zR6l5}P2+q`K_O3}a`(2b=b@G4BhSw3F+M{a<;)a)9>9@l`0X)u&MU)jg*j>81g^Cf z=j1d5U8*pAnEq|X`Qan#wC4{f@4tHR8M}cJ>53N@S*!CaT^X;;_cegI%}#itx@)V@ zlWsWuDvkgxUERxGUS1Ehv`)9Hzik~FNWd+8jlTDWnOpp9d&szX#nuIjsY_HJTqK%> zCm75<{ueGixR?A3 ziq@@Tr=$LdyZ(G^z|})x80||LoWIl;3d%E-5;uy_^Ye=ELUWZ?gHM=4ayxoq@+}T- zNzRFYTkJ|4<20VgNn*s=o}E5rHSJEvHbH-$xIo#p>91j9m_j@OU7y7X5ZJD+5hIpyB#!P+(rXHTy$wlc z=lrs~PLMkx%W?jSfLwBSdCHcpFtUnYZp3>yRfR=KGTu=JgRp|T7(t7+3gn`1zI7Kq zXzFmEE!i{082Mjuy0~QZqiLztnovDly012NtH0w%)4-dq4YxvDQhb?9M)Z%SEEd>2 z+8D7bVn3_Q^P@>kT(Apq@YMd+NR5Sqrg)HPTC%PJF7Bd~imxQb%cjgPErjX6hWt3uLCQY&`4IFGTioj(zq?4b?(}D&#~GRo zO$A-H%ABwb9Mk=`0NJ+F_;ErVE0=lt)H)6BqwbI2W>~rO#E5lOV8_h3hU8~E84Q!! z;xSEpN*DhpZT9bt8u1*^Qs=bV|E&e*qU4FTjO}C_SW2dZjeewE!ht$^2(3s|ed z-=VGLDJp^-DrTWPeVx)*Q}nFzfi|2W=`Z+Pf=~jAG~fZ{p2vd5Gx?!2R}0`pOH4oe z!TogY6nH-7Q|Md{m>koVB^Kd$GlehOpDf0mG&A$K!;L)8Xl2fhAF=T7*!!@{JN~J= zYSr3AlW6PfasI5ZpzRL#yCatCH2un7W)FrcV6cGm*=j9E`g~~D#*nzf)eF}ec=Tc4 zx!Z#+!+u``KHPq(ixT&2=viL%wgdZOGQS&agzV>c7cgJEbg-)p?kM_lb!>yxur{=VIpiW>QVdT*?_J{m(St%aauAc_wNr9Fnk&f1 zDPc*mLd_F$Zl?uwdG)lK(7$wlKpB;XllE1T|BYah;yvd7;|8s;-6-Jiayz^MRe05kt-OW( zad?)GW)g#+rkWd16@H_(R&6bMbm44eUEVY!8T!cA9-9|C8jt1MY0^CY#ZS%y^R?tyEBVY5-fzpE9H9j z8erc+$3BK}izh7|`Im@C#5K}U7xF49DsI3BdRZ}C6^TjuFv^4dLH3|*!pl$6lUEHA zN2m`Sv%1vi^Z3f07~m=S^uWjrvh%+>22oH6pZSRY5~86EDZu=cBab3lH=W}at&%hIcBs(ZAwPm zY$pFl_E-c|_gjv3`WLgg$IEg0oDApAz1T7g>(7L@RTU|R39xv=> zwO*R=w{I-yULerorb*oYR-2_B2H8Y}xyad{isLUGkiSToKd{HU-8CmgNz@mPeyCwB z&E7FS5GZr@WBpS zEJB;w{O~ng4WLM;85ho|6&7t*e&(PG1<-8Be(d>z2`eM`sGz0qd9D+3JELxnC6DiR z8YB{neQQ6Atpj@80uIOVq4IA7M(7_9yZrVsT|7YtfW?lq4J+$>{8z6YE6I__*L)0#~cb@&f9HihnQY^+{~(2Wz1|LJ*vjL^5o$p~R1d zqRxhF(=QKlm(>9}u|vNhOz3`iM4BQ%jp}1V z&~d%3Jy69;(r(^0%l-K8+$f>>a!p2upobsJIvyHb2hCpzw9cIw?JxCE%UV{&0a7k0 z(}jpjDYxt&3%}1N&Lj|?RhntL`YvqS(w$5-LX59O~;^vZT1nzscPySm%0)`?R%Pg`wa?Ygp*KdAi4(fNP)1S0x>tW5@W-eEN zr~2;&`EEAQDR{Y@EgZH`@@B1QD9&r$M5p?ix7JGDyP}@NUuxU4-8oj~%*A>W%b}F) zoAIQU&{{7Wxk7JP?_2GQ8x=QiJ`!}KWK=)Vx`E#K#qYwqV{p20pP@cXPog2zPs~?9 z^gQ0%3c#{O{6wYqH#Po7Yo$3HS0j2QoUB=`Hw?5J%!u4<*f}XN_3E%Mskqhed`-PY zhhoJNyxiH8?DARD6&r%?+~geyyk_pZK^Lt^Huu7wUm7k=H1%l{dyszb&1D(kYZ_BD zm0B}{%^EnC-N&tHXwtF&lwe)fL%wi5Rv+)*SN-SyDmWY9<+jkFhqe%C-j{k3yPTa@ z=GphSIt_7zmlmT<8d0hf6hB$TnV>F7L&dQ7NgmX&|CYZ@ckIja%zQ-s>m#Z(R*J*z zdJVz%Cqhm1lXzOVr@7ejQOZnb{@9ECIx78C3q$gxV>@5q@ALO#f&%A-1Lf&BbhS^|&v61LtrCsz!rgZGsC_{bGT zZu1QwPWoTTsCnzgh^jMyjJkKJTTiaJSx>y2otwA4M+B!b?H6pWAn9TAvXOe>YC9g! zyWa^bKofSNr6t{~_>HUVhFE9@lWMB8;B*-NAOfG&I#jOfEz2I$)xzBz=8#IblbbV# zbL#dRQPcFRvuPlo;!J6hI=uumf7G@AW~LH@U1oKtLwgF|V|!T+vWM@7PotQ|JA*%e zL5)P97>6TTx@aaW>iNO`p|EN>n7$=h8bZ4uP{fIYo$F**Jz+InLg}?Nx0a!ToPUo+ zCD&%vN`888I^g&ETooqvTG&or?!*pm2510_r)I zD$eGw>iIn9(?yrMd1Xkw*Y)mQaE04A^lcc~=6Nb<$4|2z4B1!OmVM~Fh7fuh`N^`i zb4e+6+BTPkfTK=9X9q?)%aVsR^UmylG6+@;6l2-(GKP>U3a#7UDC|jt6)jS5ja$-q ziz}o{4H`tf{av^BqE3Cf!!GFdO4G_rlucfHKeuX~w@5Z3#zr8aBbZe@#<<*(J2&ne ztR3ag6PD*VXw*DF@L57_d}Tzv5;V*~rPV{QsSJzpulh!5whr_gkN9%%v&Xl zkWua3AV!CwJZ{1f!UG_{L_Z-`V3ePQH^wsghVQMtfv5{OZZxb_Y)peR#rTjbbY@$VV8npIP=Hl zlTUL!{?N48w7%@rpO*Dth1;0}7z7f#keU+$H6D{+opliMaz2elDSe{|)1X((bpoPK zCazlwZ<(ZyY+$Fx@ScKDy=hz6h}oT7FF?Pj(f@ANh@85NR7l7Z&Dn-=v(fm z<4+EK?Sb6W<>T8 z{H?Vy!U?0WoawF1?{D@PQb||7Gn2Z{MS@n*1_>^Hn>)c9gfQ>P9!Iev>Sl93NHR4} zaNIKCVu^jN&q~YMeG5lq0{6yVWk+#2wD9q0DUu!Z>GsE$dZ9uVOL2aY<3aW0R$PW& zpmj+w2J(g(i(slB@*@g>9t8%!q$A&E9u$lcb7WL)x<6aBfDyz?YFH=?cz4ZTK^sjy zhuw_O%o>J8D^)~O(-F11-(h*H{U_ijg$m}!B*~2`uKlk(HnzIq7e;7L|H)gC;5fiz zMO43t$8w6OhM!brt@K&ad12-&_c)gY?T~$Dk-b@dECiRmaC7>BY&94kg6V2ol6P6z zYVYv)bm0;8MDbW{OZwKWDpDPO*vEC)#WquqJQt{$iQx3J+F6{g=i5{C7l=yVmXVyx zll}H=Si&1+IhT@+{f_wtavP|rT5EZ4whwpoO+Ac{%x3o+)3uuT=8{76>Dt+;3l~`K z@S_`RiypmzAXTCmnZv>Etu40geQ(Qy$mTwYzW8}a9Dk)Q<5IRXvL`K-3UkR&)nxtBSugTtadhW^wVe-9t}4=C zuc6+;=O{?*gX1?#J6l4HU1rO8m&!i6il@2l`n%SIo~eBelr#U*o7DgX{}@?K=c`Uh zb{hOl4i=XkR{R*!z1%6p z)4KKAjM?FjVCLVj^7Q52Pi}1yE`HnHqfIkLqcnA=y^Dq{l0<7aDhM}nw`cNxxhLoPm-+QA|B!{I$*HCV~il`%EO zVYj0BA&Mf~9FrWhM}wmu%}dZtrF~by<@84(`M9zB;#<`2DJ%lp{kc6b)Q6!vOyJM> zl8W);{$DH~37s-lL3UCj(fKsK2l2n(RaArtWe^YydCzv>^`Bqr8=B zAD_BVPp9^Q80R^BYUHslAofoZ>ekR9 zUx9WMzrFaIY6Ltbv+E7p@XOqOcBY%Sz2jC$hU)Riu7J;A>b zA6FinuM@oR@x~jkE!Mu$F|Yw~W4>#COS_mj1Q~U-)f7*h?W?Dgo4#&8P+!tFvac zx1JX4fSn65)q41Im&i$V11I~COLWk&388r5hB(-MFViTxDtVW=2rH{NT-0D{EN;Av z$vq@vXIlBZ*mgJ)iU?*DGmP)NCAfq@dIg)3x8cGw`Cs~|nr@1wbSjcuOh{L3u{-(M zl6yECS2VRFacFR!nD18~%kws23T<>3;W1J=3dWOklz3o?Y(KSMz)&c~cIEn3t1LZ` z?%U{2cVt}&yjP@qlyE+4yd+f+l~D4-4f)?lWRw=#u3q{L@yHud?VImHa;o%KKN1(U z9(CVc6?5ovq9Yjqxb>9Reh7MCxF0#A;LLQ5i=ByAvwr^t`kHKE37=ub6R#XY45@lb zbJPW2RI(J8*x8)U7G>=(gmXV};y&aMS#&yEE4f+o)`UobqYdfkmJH*vjp>Lq(j~RC zswjAin8LQO8f73uL(LA30Pwx6P_EPhkFV72o36q50|=!nJvtk~meM`!={~aGEF`E!vW*ySq1kq}>VcsiVBTS`V3Zrx=(c5)2QxF+UMpW<(>--|?z7m{Jc2PmfGdJD+ig7d7= zg7jg#UNfw~2!CBT7($?HfWdP|T#Vld7gs=ry`QSVyO0xg`IGE?BS-95JSu92itGAW z>#%U1-ytbi#44dfZPlpyms4ogi|5Da3^2L|m+X}Kd;w?KKywR+!<7!3Pe5mvZ^+hs zcy#;^8DqlrzsVTa0uEw$@E;XOV+Q+`mnRa6%|79V3vVEY{N|_l<_c61F`8}AYnCWN zuT-=t;$iqvB*AZq3D)s35`QoBjy>`Q+XQb>ki$uF#!cf2@_U&~{HMCiCAfMWd;l8H z;9$Al*yM_zs!{N2`Vym}6R09rZ-O_AK8Z2@=0pb7yO<5I*AXLaP1%V(hU&YIvSmCr zW=5IAu}Q^h&2ZYBcLDokhd!x~bX|O3)xur?U1c`sUvECeG$O=?92;MP zR&=?QnwpCcdNRH9KGwUD_faHzMD(y244b<-FfRs?T6j~#-0-yu`)fBQjRj8OuZ?BudEivHoI(W zuG>mm)`-b~R&F^ndINrnp?(y9f%a8#+B$8)TU&Qf=Y6zP-fYs*dVM3-kRr$L(vFqd zbf3ucbU{8!5#%>EbWnR6S80#Bk}#jS6^8Rj9_t97_22AYe!Kr=pQrPkLEFym@~$}q z=h!~%Ip#bu>D*9q2>~524ECG&>}YOQ;^^7J)ZbRkI?B8}%>Ny7A2r>>Z!o*P#)r&9 z?SlBK$f|WV;+9pSwAHr-^BR9A{ha z0m2|9zSkv3dgX)f7tijXmS5Gsg-UhD=-Nh8ztnD9UAQ^BLn{M~q)p_H<+L1OKpyvp z=R4C-pO~0=-g(kNO+3*1V`>|Kp-P?$`t~;L(x{q^CxWgCQFhMQW`ArK(Bk-gs~(tJ zJq3XHF!VIr;5Xa*bM+dpo!PPOkI8WBlX^1oD0Zymekh|roK%vOC(T$2GfY4fG;9dR zFAMr?f73aI#9WWR_)Vht``60J30<8|!EtaT5?q4h)^3kF z_E*_QYY{lxF3xN)zub5pVk7WCNbqXcA`0`-{Dahtm@i=omg2ZGg&9LP`t1q2-e7Ae zJ#lYAu!ysCKaa^l@y%GDw4ULLhrEz^N^kk5b5C!LbDP)3`n24OF<$38h@+KhS*tYW zWAgQ;OL_a^w!H?k-l92^+#q}v97^`kN5U)iHt)sVzk|NX=%YarWT6 z&u#AUbRQ>agHvNxMdC3>u-!xv1MIH<;z#z{Pj&%F+d_*)k7v|@ z*y{!(%jb`mJC4Nhu={o%3hUIwax=BR+z>YG7QFE-+YG?V7dBP{ks za`4O5+0LdT!OrlRD-C}0)$GoL>Xm7s$K12`d2}#7H4Bw?MsOBcxvxE7 z@Bp+?fyD@1d5p6*a6+j4bYJPzmJst>|2#iY$JhY}HG>h7gK6q)kpa#-#P(q6Y%Le=f?qhd4zp-`H?J^z1Eg4o8GCXvar_1)E3 zBN3fn;R~2kPem3S;kCCBx@4J|ely~pyYbS8=%TmIqV7*BZY7+*->~V&wKQWoSLq&l z@k3M5bc|PdCg8I($<#+)oMF&C{U@mpm^jF}vGi;$X5Q@ZK;fx z&Zcz3DD<mWaw$ZZDiz#i)|sqa4E}-b_Gr}qMdZ!tBUhq z*!Alrz=f6}^uVj?`$- z<#m|!B=5EtC+vXcvq{?Un!2Hcu3C}d)g-UN3@JD@K8Zisgg*{%P+=tXu}0?GI^fBX zrbwHYaZl~AJxPdj?kCfeIHj56Bq2nG?kZ>dSa{X@^cn_-tKmoM1Yr#o^(Vi%C7>B5 zJHhY27*?h|lo2iBi7tWBeT?RmZMTvm;OQ%{P7R7}MS?5^zAgpNSGKB%0e-#p*efG+sCtm9VFCNtuirk`;nN!Nu;hJ7Hn&rD%JdUJ71k>?aEAL?$u75kV z<({|HNp{8V20rR5dAi%h1h#I9qVx~=a+?maF&LW}4B-)v2V8Of ztSVq%=QlFJf7L|%lwLNp{0b%zE$6bOdmu)z&?pqP+SSW1@VX)%)^uV7y&~Hqvp4LD zn5vnNrju14txr*cE@awvLaFiXRgOsJ3s0LV?CYkFZuFYb5zzT2@%NodQAbE~Y3Zjn zOEG|Fo63YMoA7TVZKUNh(ss_kYl@=PE0kBl5D3+k3;HB!^yB)1G`|(MDcOxqx8fa# zWm*os_7=|ql-lggx{?g+)B4L_JK@|ryBWw`Fqv@Dg5HWN4GSiT9Zq-AUubYf7BhEL;K*=6^r z*J?eHpUBubvE+Q>`3G}h_~mQn&0}X6F{=suk>hmltiHa?z$&LM^?)pzS5Kzy5SgJy z>x91Kxr5+&nxFsoE`gF>rp{tNH!~rxIzG*0NV0HFKoZZ9Wo_&=Kx7yl%F*HrnGv`h(kYusjl+5tW~V9%=%S4Q!iU3czqM z=spS-Wi!wOqYv@BWX+=cVZKPWh>iQjpHp1$fba%?&XU;zBn2W*1tfvINNVsfJ?~d%wl<;0Ky32K(qc$94Jr#O|y?5m)TN5RcXkuF4&=3bVE1 zGD#Pj`rY!Pi)3Bmmr`XBC5R&U~4Au(vdAw{I)s_j@-%A@x=S}-@)!a zbSj>_0z#9xl=h}t2-LqN@WLCT*(dk|z3GH-L2b#-h>Da`4&X#oOMQYbpAE78^|-n9 zq<;`VOs1*pkBWLwX+q%DhhWqnz2}!1xKVA@aW*t7wr~5=P5%aST_A&Trj_UM2rV>3 zDv($la|A(+P@2hA{^BW{hr&fxNq+gw{KyZTJ&1ZQT0FJ&R8-QfG-q8ZQ&CzgmUbE{ zv@no#o*X6eY1Vc27lLMa*D%sgqa`-_J4-(&R^vLv9jlc!#-TP|i6Mo>*sdYvy-X2b z@m!tDUzY`<_Z23-gL|4RfdlwNaOP4)cceFT<|I{h^Qj`vjf_!&Z9lnL1Eqli^Bj#1cQ_Jp(GFH$ug=KsjHYFPu68v`b9$-gE&|E!k(W#2|^xr@Y_$h6vC{(KhTtu~zlxcK+%C$NkusOv0~7N4d0t?{9m11dY2S9I8*xh5Qp z|JX@+2g>*bcHnOL-D}MMCKi>$$51B%PxGh1U_849`X61Rt(RYi zURB>-M3gE}9pFtM5R4p!Kn*Ti7gEnEpZY_YfR`*#Ch(txUVZL{FIQjP{>QaySK~0* zuC2m-aM^O7>z3oOko!Y1L@b!>vHl9`CHlmSet{fdLe#F)&!^*ZOCs)t`k& ztNdVZ3HW>eO|<53CM>|8;$Uw*3;lBX7x$LKK`PXEfAgn``19lc;7tGPkWBHf9K<6A zMn<0{wU@nZLj$e%@87>XrkSGy>)qbi*tl#$60W~G-S7T`P_0+wEbRD)_4?1^l~@<@ zsAuF~5b$qHCwvCreO%EU!qtqHxGm@Bg%NC9VZIG9BeG^W=btzA100~Hs@TMnahb0V zXfHS7^Mt-0P-r&!|H8)sh(W?*;hBXK8S~lEKWIZX%RSbZ6KCte-RG|BpML!+*fe+e+tSDC77)WaYQ#&A~1lx$)0RApW>jR|+GQo8)qFZRx@r(~eU>opOf z{KDF+YJH#4COg-kzG2Ql>C~ShJAa(0fXxGL-sr>B<3P=X*Fsu7;_@sFefyV|PW50%o^02H3L!N%?OQxQ z3X9h5qI6>48~k9)sr<->{D0DnQM7`whktQ{$sYv(lnBL#U&KRzMNRhk8)NjF4*w&D zhzJ0J9w(n&{WYw32ta;lwm)0sKi=(+f(cXs06md8%<=lyOdnv$4qTlBe)j*%)PBJA z{csv6rMg4tyK5?wKl1UBLTFaZ0oN8IGr^i9xQh6B!gi5XgN%2=I z9J?1=3K$;n30`CH19hDk4!=($U3!o5c);~TKIWgR=T*O(#)UJEomN)lU_s2xD2D2Y zv33I72vnfz{my;Ciw@7IsP>OgeLytj)m~1i)O&v~xsxMesZv{!*xibI(04hT8xKoG~?xNrTgVEPuGr>JF3)ZtbUHq;8>^QS_f!eR=w7NwLpE= z-lA%_vH-*xCSJSy+3|ok=K3Y%W!D)VR!&TXxp;1f<+hZ5J#j-yh&k&im8uzMk^Dho-#PCLEd!9Rrlu?GSp$aGN4$4D&1*uh|7u4r^XT7_i zFIRX7gy}jV%eJuCfpFPoM`98{%`Sv|GcJ;bq?H*L$4-cD-s3HZ^pbQQMRi&2iO(YO z>&0`S;c_rpTm1KB%|v0D;dhCmPW+C=r5^ldF&q(2l+w*d zY{i!#x8>}|-#ZNlXW%ZtlAyV@vdiG&uaj-t+fe%0>_F%G9sQE(cCP`!M5|Tt;ho~P zt~Uk~b9G51x>a6kHm`^Mszm$Gnj4F~)2`D)jLOJeaCB#B{0-l4mcrl-U5SOkbe;Xy zuQnECM*t_>f-=70jRESDi11Iz$dd>_*^;qL!{B$jmK*Ezr%W=>khjnl8+-H!AFwmP zhFEe@4!=C>T{hje<}*Qd8AV6v%)Mu1iz#O=JpaIFHdA~eW0orZhIMfFyPFD7!V;rV z+0)44^ytXsr0LanY;C(<){29tC1$>6cRqN3l5!}a(XodE6bxov1oEaaCPnpng4Ng= z*Ki_0C*59_{@xhfZ(SqE#KR^FC0hcK;8+dWjBx4Q4X%DKT+e%G17VMauC8`Z0=J|T6 z9~9NCGIt0mUzi)>5T2PnBkb0yh9}%o$l90IyF$J$F{m-X@YO1i)}%|_;MA%4Y}Jcy z%QsxH=g}E_SJ)|I?>$2o_V@NbJ%3sYahwjh^@I3Pj0eKnTmVNvP=T$1&t4QBkX9R- z0Ir0C?p|+(-;~^lj4T0<6j~LNw(l=<>9wZP?8jIlKcJrkll{nVMGZ>>MlzrtOTj^d zoSGi|>jAHRz6)U7#~@dyQe*istmf(T9e;EC@#G5i{(+z&9`=d0#IE?w=F8Q%KB+ry za-Dm_jv)0snBZ*1%Y8nlu}T%Ag5do`>dBQ@UbMEQl1lm&+WC_G!*^xwwj76lBo0%9 zLegIY@Jj2=f#=oA`11X~UD5p|s%<%U|7!HY`^l~gq?JXzn?()VkC#q7ak4vL6xx7X zuj}*saZt+hiz)Y$a*Lj%rlpeBQMk%DRvDW5Ck9XI&n4Nk3jDt80p zD~zAuEJZej2~2HyHFJVkh)Pt7#xUetRozAR!P_d`2P_lGB}^;YGM25wd&yQ3_`%tGOH9;=2>k z!tuu?@-^gTQr24it4uJ{|6_^e9rGVFYh+PHeu9{XniBCeIeNhAV z)0BH;r!Q8MF3;W6rln`4DxN-Xz|P z!WBs)3`RnBImMBz#+R2Fi^6<2u}b}LCyd=q`(rUG&(Lqsh21PKK9^CsN45h5wkwdy zrw09xABx>vD_1N*N3*<~JUz|OcggS@eDEXWI#BERurcabDu-;T6BjP-eP=` zh=pVK7xr(CjH^99h(bTy;s7E)7tOWPqupG3&>y|?5_#Z9AT~*TSEI@&s~1mu}rgT2EfLib#4o6|#XTvzX5i(LBX8Tuuwti|ed{e)kuQ z`&u!kI|hD&W?C$4AWgg_D1=K5M%5373@u`f8X}GXd8x+tK5PtQim*IBzoV)CYt_1n z&=lF!1*pDAxa|DZgR+Q=0Cxq1sH8|5npaRJtv~Mkz%l= z>#}0sR+WC)cxTn_GIA~gE;~VdR+pR|^0wq&vY0evGq1RqlrUa2Y=!I;OeIQC*E;%_2x0V z?6=E;Wrs*YjACkzIVQzLC)s68K}3YpZ@jbtLx)F1M` zA~V&@s^?&kP~Z%gkuRSenGYh=CJhe}E>B&n7OAm10K7`d_zw(Qdv%Lv+DM51sT zAKEG})}UO3SC)Ka)`T(TMw;7;pOjV}4dt`TKG)*QYcdPB{gnP_v*s=erx}X0(bAS9 z^Kn@v_;>4qEFVKhlwWY~eU^UCTW2f$y+aqYN{Yrd#7K>7sd{C_mgC9)@gV4EI53_ z(7?m7H>62D>9%hZJ58ti>|R@~++9c}W_ixWQ9N3XyrWZ8Gbbrt4cB@e9qFC2d#VNu z=1WNQ=IeC&td02o;INr!nf*vBMlPXPu={$EQf@@i6UyE6X6S@Lh7dS?1pf*OrI$H1 zY0y)>cX4fRgr-Om1!4CIbQ9mn_Sx3(pwW9?WJh|<$F*C(zG4|-=0$yRQgv`B4KBft zsd?!g|4tShe2#&^yEPO5Ya0^{7GR8gL+G~m=4TVGj9ih2iyGf>n$?YbItI@VN?k@u zU!|PSUt{X)%`h6UW3WGF6-0sHMp)~EE;FmB$eX1mLZV|%1gMEQn1Uzu0+R0P($2Kq z!dKGClAqb;XvDm*97EBBw;n1o2``NBR);GpsK^L=z+>a!IuQ1a14(iCUAY#j(&iU)$>j z3bg_oeIjEu_+`*F&gu?^+O^|rwTn(@J!_gOSfN2jj!cV# z{SNB`;bl7rPy=2ewKff`5M_HYtYNlq* zD?3DE7mVpQs~CI4H)_-7^rd>@dr$1%Xfh0R+kY^R*kiY2zo!W#xDI#s9HKd&p~7!b zZ#tNH;Sl9eK;xbSj!*O?*o@SeO!rzt%kbNyk=6Om#Ny5R2FJ+*pj`@076?D(H>lHh z{B&|b^|1Pt#W}ycGJzwj?LMK~LG}=3?Coc0VVo5w!rFSmi#&OAD6=MIU^hR} z)Bnjg%TE*Vyibx2h9gH3XcmOlF&NwKhX8q~_e=<)k)vg;Vb}bq82DHv9tj8tbD zWK|YPTD{d@WU@GqX83vNaDhDIw(EhKNvoEbG~n_ywANLSZjCW^pqF>4->U`#67GANu#DE(qG|&WSd>R##k1)NS^?RGDsE$q~-Lydi&2DFt5{zv+(<9l@xk0~2x}w1sAm*5-+Ok68(D4Q27Lp|^ zQl9I#hyp-+J}8JH3CSu9E+z^3l14{_x0C7WX3Vz7MpI+dvWuOJ4L3(Fk*U!)dxoq> zkTu?*Tjjn0I4)@~M9#2qu9?jiB}Q znk&6U`KebZ13IrXx^=i2jpDy857&ENVwQgTwA#Faxgc=5ghen~;jQeqF3E$+41d0^ zE!>JYUPcR2W27}(0v#z;w2muAPk{icQQT$*a|IY*gS{k9ch4Y2L`&AwrvPAn5%xin z`I`i#RAMVhL{QRf@Z&?QFPIvoi4_sL<>kEf;epw9o+=T(h1pfyP4?=2_JY%@$Jwn` z7^&w^FoOAfR|uEu9vNx3e~G8fRFQv%2Hs9I<)vzKN5g?q#s}~2b}rQ`1-I=k*gj(B zZ`Rk+>~MEvz0r-%v(-FV%FAY_^t~BcX-ne1$7+3kHVn&-WF>!)@^)NrOe6($=jGRQ z6O)iUx`m)RNmul`l%gE&N#)kM4|{Ytig>qZe@(CdpzsBV#HLU>o#HW$6Rhj?I&M0o zm2UGM&3nnrj}%tQ`h8dGQ?<#9wBR|hXN|N*`qKJ8mU*eKVKq)R=Zzj7OKz zHGVf8QI&G8H4s*WB=(8dS|R?xp@}M`PU03Aaf;7X=>*=n;m;H>7hmB&N;5bAGajSm zMKvh48A8(JFFg!4u#xNbKtb>kCfg;~1dXcBi=>0l@{R{okGR=2k(NA@B4e~5nDx4xS*LMljeK_V_w1~|ye zLl}*EOItBR#kL(~42Cm#N;u>7?hAbmqEM5u0ID z;?-tQX7@JKK^$mR!u|&)nLvHoG3LQVz_KGkQc^+F0DA_g^|UvBbHXO9w+u^R zhw)e~H_gr*pVn3?jr#n;xvdK*o=b{x_H#(QJ?kBHiRpBX72vHNZv?&!UtCqEVq>w7 z%MFr`$t|`YCLOvsvgV1ZDvGx>_ftCMh8-32U7Q#-2Tu;|Q`N(k_WO`8B)y8Jr)${1 zLugI`6wl%DhcaH9nM!k?2Lw0jJ}u&vNm96|{S@kVV~&Gy7D+F;hdu$TN(-^-+uDCF zf?O?1onH<%4}`+R6h3SCtXytOh1QC@pt_l8AFzJ?(3+r|8oul0L@-hGP#-glIGuCs zP)a404Dbydx{(zcHT0DAq{DUbS#N(^JGo{jwO8JcBvxy6qQho^N|ODq4Bm~@Q1UFf zr^HpPGZpg|*T4SVhrNq65W6|EOH}u7%a!sZ6)=gp#=wg2&2Yild8seDJX-|e-Ba4# zn*#;;c{L5)3dOq8+4_gfrVwFXk}-^GzLMKI5}Ej1^WnbiivpeepejP1$(_(MGs8ME zVnz8hJ|w`0cS4>AoE?Xc2Yk{`Yhhc`rRoV6wxbr$+w9nW8))_ocVW`y1){onoB>G} zMuvjq!KZ)%F#~GopWl_{0d62)+g{GA-~QaNzhPj6vHQ>}FGCL>xRN|KA8x(@UCCi7 z5-r(y*mbt*Dena~XWhspN6u3Gbdf><%9vRrsMX&(b%02=T)7 zl(6Sbrd_hG2rgHQ7@3x#!$Zm$?MHnQdoNMcCCd4(Mg5cb?BaPkX-LBa6v>*>_k~%T zPDN8j^v=E7t(XHI{~}I&3^1xhZ<>ki31?0FiLy_7bFu?l6YjKdh2l_RSee!X)0wwV z;<upDED)Vnm;9FIl zJyThBS@=Bf!B0KuMFwUN_mQ0!XBks{d3o48A1f<~Gn{l+@^Y#s#X;)@+e=X9{@r{s zvTFX$xlR80tQF_B3Lf8Pl*6QYTlC?k(r?$jJOc4o>f(zUrac7|XGX70eCNykEQ}vz z-lI*t&yxU|>_As5o;8Qrf}kLR3ew#mB_N%GigZbLiFDTh1BieM(gV^WBH++9G$=ikbTib@ zL-#Q7UU;8#f9IU{d7t%s>wDMw*17~Hoa-O^zxQwdc1&}E(Gjgkeoy(AwEmi--=VU8 z_`RP~9~W=(;!YQsqt*^w0ebb6K-H7~!?wBaWjDDbsg#)s;bxsSgY$%SWPfJd{C3** zpA7VpF)tWx!)f&PTDW;r$vhK#(V_c;fY;Q*_G{35EV+Ro%vonvK*m!es{PuM7^)psnG2cc*5~kHULnLlF55J zWKWjvYb2wd!cZhl8K28iQK!1?Ko0Mnbp>+WevDJGUK8LBL_}@s&3M+rD%9CWmvDq| zfYegS$)7yi4RaaV4s(gX`IK-8(KpCa-sm4EOCEL6Kx`H_p?j-$XOj`|(kr5btl4cJ zp}I7G!=i(Y8a<^WlSaPR0o(1PJqe)r!7qs1tiV-D7g#+Na z|LJxDRgw+z%_U%E#lI&^#iuN?v9hlOqcWN`uJ-I`zA_HTkZ`CyjQEm?Rg{onWlGB| z-Yvc!q4>2v(UD2J)4gXD_MIl4Y7;09x)Vc0>#V7s2_B&oI4to0X0a6R9#BFehYEkF z&2o4a%c8FYlq>n$PmO!17*wz|H#Ny7Ae28>ZFc7HIa2WYgE7Tm++! z-3F`3yPqo1vWV1m$7Ia#ih5y{ER>QECDZyK1_dXY&-T;iYO7L%j)N;2SJ#b1EY8-^ zS@30g_IkxU#U=^kQNQ0AX_=4m`5CE9!7DZZ#<$t%(3|bvtlje=s#AU;DamBW>SXA1 zbM9&jW7oqOEbE82Yn;Sut^Hau8CmP`yN?V@N|%E|i<16rXMFD&%17NqrnA4c;m~mk zE0kCHyqLnVqVSZ+xolE!N0LXq7WWM^9o;(0TRYIJ(f=S&q~2ux(=)=gx_oVN@Q>+p zD|Lw%K#3wO3hZ48YjNCHm+|i)+qAt9A(6{UPAUOPAT(3ARVu%9?9Th`oj3Z-`{{K< zLQ7)(JXZY(hE+tO8+?{ECaHY(-871l#ILCyDDsn8N2G4(k={}h5k`&QYnHGHEnV+V z0P_?<1C2dUq*4JVty;#$EUP9dkEaZ2AyxL{@ea=QhP*PHcPM>(Ae=s_w~vO~k36=w z*~7aeN*b9r9+^y!R1b%J{mN9k^2=eEG-AL0WJ;piP3jeY?5TP0ZGqjH5L2MkV_TuI zscrUWaXoz8S*OnY0tgxrIsMj$9o2#P#K}nyE4|wk6@0V0`D6qQP5l>y*4LtKHGR$s z-qP*3Pb~z>N3wY`*587V#f*(2YBsuGv}VN2F`CR;dICkpyE{PS6oYW1^&|Z&yCCEd z>u-m=o0gPcO579t&yydmJzF(0YovTMWtpZ$ZnM+0SL~uc?ol#QRUfXf?KdLR$_)(n zx1utJ5o&8wBo5BdkOrKbRi3`@P1yb+q6T_52Ro3>!kBB^Y#)@0j#GFcMbpbh6p~IdU#r5fTB}mZ zkR*W(D%5!VknAB&f@uRl>w@R#56DmV04W)xtcy7shEi!-weaWBEn#q{Cjzr(8@}=0 z5QLKM$mDDb_Y7%Gz_4}oJ8=*{Psj%G_LQARme#>=zU_r%o;S~ z;8`c!07w}l;hnr7d84{7%H;gb%iiXjlQWwz6M#Xq7xbxAAUA+6819D~1{5yH%X2{J?ZQR+NXl){NM?v6Tb}*Szax)`aG@p$%PRWK2Y~i z+FC5p^;U+g%MzyS!tC>Xi*xl5U~EQBo+$t-!b$6?$s%SKa}V0klijd1PK)(Z5ObK` z98b#nbztkAO9#5IAVhWmB6L#ie>frEU%zNPAXHquoh9v62dj|xdb?1DraQ(%zZU)MIZ|8n%-Pt93wbc(U~dW(7}BM^oo#c{oU?f5tM;aUxq5=eiGT)3Otco zS`)OsY6Y=o3_}t4&b8-B8WKsq$XY$?pBFOE8s%_JW`lrIy-Ox-la?v|MW&k84%$L| z7Mysz-G;pe*4c;FUETe^p+Qt5XT*AGjZfmAQG?`~oA)(3|OeviK=a{7>+kYw zOB;4ppw}-dv&^%o#*YsP{-i2s{>okd)07%&jIbX2m&DIM{h|LQ@sn%mvSROY*cyhZ z@1{Svd@1HAq!k2IAeyFJzTW@}GdPcc>a5hnk{3S-)a!pELWUhtufLLf9Lvnq8Qa|b znQ38SlDdwwK-qbCGG(ybuc<4aN_FO$TXu;NSSP1^vJ7gb_ZlyNCJ5mS_{adIGL5rU z=koSI7;R7Q2{f~=N_Yt*0i!lGaUfyPAjP>3pC-&KhhyHcueg5+Il$30~7-J~RT$_86)GR8K2>(`Dbr)!6K4VNj&M}A`#-;+O|Wwt;4 zB?SXz7V>-?6vTIFTxm8n*i9MDq1bM$@+8OTN;q<6WMDB-^a7Rapv*}I%Ed{5MyYLT zE{#|fnD!w@wKHnKz_YQbsQdR`Pl*+#jsta@=9;-3N8EFfYRUoLn`f1`w>-aG^&0!5 zg`6XnR{E=)2sk;oU9<9F5~02FbDy_F;0B#p$aS-s6H9~BRXH)B!0v({iZ%^&+Ne?t zZStN)<=3+wp^L#ubn|4Hw_3CM8C>YnJqOo#XO(0ULdDw{W267*usVM$ufB0+X*#O? zSe45K+cUdGbu~3Lx$K=mT4Cp`#H+{yC9;bE`?>Px@oG9YBZWGxXcXC2bNInfJnDCM z246{*1JE&Q%;EfL=l(^=s!eWMa#rAEGhLlSBeISM<9$@26_f|w|Cu-E3_7yRQNXeP zon!3zGtE?4>a8QtSjUcB@svhiug7E zQ3usE#D;qQJQtD{m83BIC%f|nx~vkCR(atm@TX^+3dUxr;{4AmhGKp>1Ec?UVf11Y$9wjRdY}m?|3=2jKbf~ML?NbK(Xs8Pb~g6sQK_*Ik5HI5@x<~&5Qil zI0&%3BDtX5PjhKZS&Ob0O5a3#(@u-UJQOh`XDp~+&anYCWmECv-OqRY<9|Q`V+O&T zSVx}UR^uE_y*8qTdLdVrwG0=2`La zdw5DgfT$ZVC5V|hcpq>v*O`=v@|iU~ln}tews~;v-g0eT>d(SGe>Q`L{AX(^V{Uo1 zH+44E`_n1gm&ENx<$pA||24G4nt==`VnNx`%5 zaY?d`ovxg7n)SZhsu_|xO+J~Uhv~;|R-I>bFnqwWeWs-%Buf}MHq-u9tmbg)6!h z7S?Z+1q6^t6#-g8ocV%7rbxnn+yC`vn7ENos|=920ycR3RVF>urcRJz2^4#O56-mv zU7}PHOO&MrPPIZ(&i*(Al*iJn@@S4(xb z4kWYPymp*dhwVY@@wrVBr-C!JWnRq1K{_(c3}#7Jjgp7P_Os@A(iMLk8vf$jSP#r> z{)i?t;6Q^PisQMLOF7<*-tc!3iND+)d~9dwi?vx-G~Q>ASf|n+-~_#kqcyVz)oDAg zHhS+hHCNA-R@wNwrxj11+D-o~rnK*EI0`Xhd6RxN>6btFC9AeE28E(#dr|^1x;UGu z9{?d7nt@nmPFCoToS%|Xkne3q-#)s{zF`>tBag~HE``NstM??$IvUQSpSVmKSI^oR zO%C64*QV@3HP_iGG4bain9XIXdJg_Tyq z=Pu=Vl{3(J!dLZ)J~SlH7KLk4WtUlPk*%4aHExmgLhegB-dbB7<+rMs+dBao&QzcT z5x0u9lIPnh%_4Fpn-vL>rG3%UzPH?@a=heWp$#T^gi=;=1_|=@;^dVZ=fV_3qKh{M zt>}qn&KNXBrp5Vl2U0WD(US^U-66#nfv{axir%g-woOnhd7nBXJZacZ+cB;IK^_*I zM?j`h{A#(S&;6>3=U@CwwlDWs#6MgvYV&;r7r*&EJNfeqfv^W@v@2cu2+GBOl}JB^ zY^t<1oY)IWOB1;rJhJrRtHjI>4LIUDt}1{^UUy%~#pIe|KtsbQ*8tD8oS~DqO^iA9 zRkjF+G*RZDU6047XZiJKmFfgu=^_-^K-24aKY|@uT~kSyl4Yb+ ziu|O$?aFuqpaS-HBk$FoOi%6a#zx2~_^tbNr|nR43(d|v_qualFL*j#!DKZ|d!F)3 z(KdYzH8bv)`Z0Vex`L-hc|6H|Jg1$E$9T)nS2HGjLr8PB-66lBA~40%aIZWneNKLA3~h8sAdr0+ z@OpNTkl6y?y_S#^VysMLqLPSHa!|x$mf+-1<>-vVvYtb3?#|b7Ta# zZl6;+kEg-x>AI4)G7k7)bzyuwoI0T=GJ)ud0EbGK!>kuQoVFP>Z*Gl&tN1<~Zlw+6 z05j_sq+PVeIjU>8I4as9?Fv59rKS2K2t?JH{STG*7-i zXO;DfjEdMNOHq9;b5q@S#TAio0$(9a8}Y@F{M-^xH8#D1ujWb}*o*NM6 zK4`p4nD*qMqjPIz_wdL&@xWPi#O1dnk)J25NsroK3=OqF(>~pKo?C9yL{8Yf{F&d9 zpP;>G-UYjxB(HW-C@VKLe{Z5v4*>lv7^(nQw**Io;Jjow!{M;&w(X^Q8H{s%F+2)^ z-c8;eppWM=Q>9|h^Vume7Poh0Z)$4C{nE*^ZTEdyubN zlqz|IU`KxRD8BoII#kdKQS}JIPjD3zF46cZtkM7EM=lLnzX{6$pwRdM8THL`b^TkA-~Bvk)m_mSwBcVZ6WX1gMD)sC#rc6rt^ zFAW}^;u&*uUjLQ*_Gqm6vT#OTb@R9g0@X?PKb?%Y&mSwRgs(BD^)U@%!w~Xy_W6d( zNwxyrkHpOy+$hbp;LElCpc%%`49L72n`Z@F-`gCubRpc`1%}0zkGQ7k1+NUq(^*wh zg_6v`m#i(!W6wJtb?~>AB!D4Y3i_QsmjQcT_P>4+>`3l(QQwfHkv?CPNwiI=<}}ph zS^3o7wYwXV%Szry%`QUYR&40*zGvWNW!yf@B8zQ$|BL%9!AI`UKbskhL(C9N-^9ww zx)0I3#XTnYye!&CdZ|lLek?jF96L#V;wvu4&F1vuFu42qg6%twxx=bcymoODAB3y= zd$|X98WyG6?OTY+8bhaOzw&AGLaWegvX<{MsCnx(!9s=MOku!xP}K#zsi=ECS4JX( z-wTfR)GX6D8sBNG7rR-?F8DpdBv2wf@{jGKMvn7zN#>D7)WRqLS%+@LVz3b zoR)IBJSCeUP&YqMT;Ctfdz|u~?s2Buq8)nyvcZ1r#{;Q~fq`QEIyq+=Crq_`!h!{B z$qUmjM^fmMS5{A6zL}^AQHsEo5ccna!{dXbclmlmW5v|zt!TRF=$cRVWWgLeWu6R4 ziuG3|L3+w}ezi`o(nnR8ksX|-ib}S8x>{Dw?Y*bx8_`eui&gLwpxov0zdfrpWgU8D znI&t5CicI-tN{M-;N_NmQxH(v7}8dJj7BF<%!<{YyIP@^`oalp`Q#&$o+4>(QKM<6 z)IpS+nM*hOq$$qxj7lKgGAg=)ki7>&0d1~!qt&_gYU8FRklemmkr`Tw{I=euxtkyf zXXl_+ftn@@+#NP+TN~w1cduT0do=f^fw4ckA=c;B?3H~SK!@v$tETS7EkiT{kwRO& zL0tTwJ&(g|yKk2Sap)6V79I>#7TTONER^4y<2W?*2@SuSc<>@?^|X4*+mXF=!iU7At9tMK#+{_!d)~6|pP0DfwUkwyFbDXPdqk`kJ9b$^LD!f&d z6X*g~7{!Kn?}Yt4iA%nGT8kInazH*PACc?!Au)q8yZ zyDamGH;_f?50QQA)Y@hHpBfy$J+F<%r`){^Y0lx9uDpLi_x7mrvflK;c9RRmbCGR= zz$CGj3KHSGg;-`H{Q4(%j5kkYzL~w`KfS!z?%9EKrfyz&XNZ?Hj5HkcosE1>7rso` zD|Z}p?;#Fl44WX{jQ#n7B(SIVj0C-)Uq6Y~mvb#Ei5R z8D=wjrzRIS%CS}IMdkbxdU)5sK=S#~rUI8*ZR=6FX*HjubDnYf%F=;ql;Bw$J)S5; zq~u@^bAUkmbSmUnFUS5iXkIXazfv{tJf?*cE0!GO4)^WVjcz8Ul_WXANq1H}Kta(9 zA3a7xss_R{rC%M^?VL+m5$AK`Flf^^o|Vd0$2aie&{4YM<+B!x_5Ml{Z!h2a}+jORTRZfB9c(nL@V;t zn+oBKr$Z>R|5-NJ=A0ORbG$HFm0H-~V~c+D?8L8htdr*MQ!tGsMewsbxvaTBAd5RU zHy8Wlz||&((0Kuc#k=0B`tEcuEi17;Xt{ZLL!;ztc}bgOMKf0c zkz?_!V)f2C+W$Lm^UqLE?)m%=DDw}1`WH<7=TFNAyPyC8UBB~&=t*g=5PjoAKU$*_ zA}OUmBa%0=R#p7-S5XnZl+uePntCLwu}U_Re_f-P+QEXj{!f3M%wbT*iWPcGz53$w z1@V#`csUP$cPESOK@=zb^$NOLFklh?pDz)d{Gf`V#nTE%gtR-n-A+2u$l!>6f&B5v zH_HEJ#s6Va|7A1qAe#aB|K%kA>n8!=^Jv`a#-iPSGr0eJ>5F^D&cEF9e;>quJk!P? ziv-=3l~lzBu;u^odR2Tt`+r`p{~QrR2n9~QaSYb`JjH+R>3==le|r_~)pYZJIGpRZ z-4B)RuXdW|Hh{z7BW}97`E!j*kVhd6gZsDwv=f|<9+k6)(M^s31m2^#rf!d50-bfPTZ$+q;@|7CiqXtEm`_IR}Y28XFq^7;5~Hj3S6k84!^q zTR@Xz^@do(OCa~In)LnFn6GvBFjDvXi8?~i>VI3te-Tkvxi12rvb2II&tGUXlfv85 zgbce%b!_mkd_;%hrxPldk`{CZe+Dqf@{a zfpqX1G}~pp>MLL}xd&&aCT0j>0iZuCK6fN*$YUH17mF{CKj6ea13sChlsgIVv}9{l zZqV_uViCWj0#0T^coOb^oPUmD!#$>1)gmn~L+M;jATE(#1+9e+77Dar5-Hy2j|27LnU zbN}W2r9^&CAIoOrB5uD!yrjem{RH1Z1*}WA!rT<}nhJ?N56r$WoZQ`s1k}{fPNa17 z!vr&50q4^&zL0{yPEs7+g?NR=cZ#u%6?I~c5r5C`mQuum-xFjTfMM;$FL|-dX9B~L z5;T2^y+{0|p1;*-!N#b!fs+){n&J0Uh1u6qZXHlxj9RmLTjQ77xmR5-z` z{e36?0yXyGmx%-t&WN#B%rKmUkQ42s|84J?LpXr3Y0mFH#P?MR-e2L@AXC7myZ^-) zbYn1-=|UO|KW06S?H-^8i%m|Bg|O?FV;4)!nq~|K)bMg~^;GPqk8M7opSBGuM|J}E zmwcaDMm0F->d6Oll{38Z$2%beH~(^cJh9VYNgT>Yq|lE%lxac*k0OqXfUF(eif;#i zUSa@aC&ErA_mqfTm4Q(tQ5+=5UR*C)#S(-imK!^z^5JK!Pi4BTxyYH2BZAo)Tr zg`u63Jnu~7b@^O#Y0_qXEWpP6o07x1kTpj%VU!A_t+t(&J={Gi?&gA2*GH?uSu<_} zd$vacY{EdaUVN|-cA^NZ?Q<5+B0Inm}dYWx@Xzb^|HEGhx69h^_*T)>q?*4^3G&r;{g!8D;jBqhu|C{bg=$!(;KTLLi#k zwxji~2OtZ=FEoYeagL5V&Dh$#tk)g=aalTB@N8E`lsEBvU9jBtF}l?>ZN zdX}xU0$AYJkDD}!?&)xsh;MW?L8ylPqXgAX#YXd+y zz)Z5px~$Aw({lf2w;}IalAKuWO^$51B6)mfE8TSD3rw}As5YU3z=(Y1fngFVB*|f` z0k=ulu%sj24F}FVbu@PvP6LZr=B8QP{`pGUaM}D-^1Jf~$@VuVEvPQbV!hucwJcW;c9=jEuTOx<2~juO(UCTZj`lh`*3TL* z!!%$f{@i$abFdS*u@~suUynO~@aakiHtL;uiiwXQr>z1_JC6^JtxERAcsfmGVy;0F z!!Ta2(JL11*IV(wX2-&Sr72>JID~T6j#dbuk|s<6pAirZ7kWZ!T`XW>9?i=mdCsoa z3KkPi2yFvaXg%;$t&`LWIlSscvtE3yoZ)%Hf6bE@LAQAzUY!M_|7&jkQs>v>Rf~D} z=kb#MQ9C@ZjM0NHJJd!*qYmFV(HK9urJVgia1tqh1f8^d&U&kdhZJpiGICBNm(%{n?X3Dng>d$ zbYwWx>xw%70JwQVU`8S|26ym98D;%E`8k9;EuBcgR9Wuomw z2elPxK+v#((~IA-l*=*?+@O#3@oV-F9N<_G2X`tfGAIJesEFNT32cbKTi_g1?tPwk z`EO97OA=dOJMf1^k*SvbQ9pmax~ff)v&ol72iq|`Y*;b?mN;9tE9D{000@PzG8S`L zS-^*196p&KBj#Y2tzAw7=B+V_uTg6{iMTQMHwpI#ZTYI0Pr@xz?De58le(6iwH^`1 zHbth*#a?!gl-cNjN!Bm{%M=!wqR7AqXz+KtdzQqm&w;~zhg>j0Rtd1#+HX@J!27Ik z0JcGJp0Vu1Um)NgKV$K6s#lUZa?ghlkvx4)L#00~05xG=xXAajic;AXeEb zpXIRE*#H}JLXt0fgbut&L@95XKnpJbGpfEdxNHA!qQrpJ{6HV(mUV4x#tIP*c7^~H z(#rF$cAU{-DhaC5;T+|mBo+-GDyh~4o8(;7d6nZjz(fj@nF%*nUwR9Z?%;nx=1Q~A zllwWCcNsCm(*_V^!hD+8m$5J!I}N_H{iLxX|>4Sg81R{BOjoiwe2eX^SRALhVIg6aRt+fKx@c>gkSrypd0 zMr#>VRZo;^A=n*6w!CaUW!lvEmFVQuElf40E>jvgSJAZMdPOfRN*TGASKQUx)k#$L z?M3kj&a@>bT3R|6Jlx^pH}2XI+?ZmoKiPtq)JKk+=YW}3ODkSpZg(^1S6K8I~GrX1<21uecDgLcX>EX@kv%=?iij* zh#uKz9o$bF$b|(#?GIf>Cz?~}@lFB(Y_x(_XuG8=2LRfS2frKXvZ8b{rAwCdS+q0M z$MdYJ2b37d$N_^j29=M|x_)ZuH<^KUd>kLU#I$(VyA-_qu1seqrP}uI2><}^(U%;A z?qAyl4w>WB$l?9d01|&Xz&l`=e>K<8-+3|+v}1Nk#t*rM#Y6JunD2q*mO5pH`X<~) zgsg^z8YSMD7GkEIuEVCouH&Zi8>7XHUiQdG?Vv)3jLe9)ft|%noD&8tXVX4CV{hfG zrgJU*DN9|=&e3#LAGTf{N%^_`cwQHy zyMEKJyE3FS=C+>sZrNfxBY7%=4niA9sWH@&U8O+>2lXA*A2jAp$zp@A0HumKQE7Pd z;K)Vyqgv;n8|hK9uD2*tvj&?t?;(UD^MUrY;QxJn8G10$Q1vXSkVEff>qkbdE@8HT zBr0W@l$#Id`l!btL`BpE69N6uK>Zdghrw@|xm|(bx#*4}37ggxRQcS*m@~IV()NnV zXNF*>sVzC1_PGL+Vww?YJt~o9_P7+16Kw^C2Y%DeT}A|OqYXMT2e>cxhc0R~EGp=t zA><+8JmgY{LYc7uuQQp5fuG=7GuAM)#IcZ_2s&eR_J z0}kojslR}q3%PQ1@(pCXJLOJ&>n1Q4OOh|{Qo_tDe_NjGKSJ&(+|ZrzASdl^Xn)>8 zwc7D^wK`U>@rPEtkK?euIMvwNvCt3m>}aQ|?3PUFpek;IT275$T2}N%67r0Q5+iB| zpR2%giO)BJ36GK~mCY;!TFQ0Obm~8TJi#@@g1w&rpWEbl?xeFz)h%<9?}yx-VFY~o znmCazousl?X=2vV4OQP8Qs`+;hUHp5H9X%z?2obtTphb4!YUig3rFNyB>WE0JBsi$ z*K{pViHDFJR5}HP3J3;M-VE(Fh!|_Vq64Z1G?DHyfsnq+rLZGMo{jU=nP&{#G z%Hx`ImXg|;-s-a5)5bYLg6CrXzcEHWIYCgvAhGv+kI-c0EbS)y-3qou#tvDlNBB12W?w?)WZRLI-6>*dMs^X#I7IdlWc`LHHaMRibsTQqrBEp zMtaFujrK|h-#>85dHF?g@rJzF_l{+BY2>#-L)KIbsuYm5s|bv>KDK|)gu5U)BZGE| zL;KFO?OTf}uWMfYVFouw;aKX$<0=z?#!WOjEiB~raPMjY;J)iXrc``#GPzc`*iSn@ z(^V^UuR3GRPNLu$Zc55Y*MJH$@Bokyj+7v?w*g0VXLiJ66Mq(|ZhQyvE{t9|MmV&H zaP`6US!h!|as+9=K zZXz&XrMsg|Y=-imMlb8eTox@?s;xjp0cg0LnQJgtsx|u}VHFRaCXoc;hsDDRUR((a z&0>r;qc#!8fbl^V2`H&voNUnp+7FQckWUB+Q%b;*5D=$F8}#h--HtM$b9&tDiMd z2oN2%mu`Qe8QmxM9u_iYT;v5`Fn;Usr6o4O)Nw?A*1k)N^6#;V1cGoN8AyUDDMoQDA&8kbFq2 zNf{Y*G0?j1;gNceM=|L#2R>G0#A8Y|$nnlqP-a(a?LJRdf6fw6bf%EB*;TUHzIEiv zXxQjTe-`F%dwL1c&JNI!=AggaLC~SML6l<4-(6!q+FiR$^Y2@B!Z%Mo410Hv1wgeh z^F)2izvoc7ni#`qxZ-Oz{V*%lZJMSNk6{m?~FHoD$l`K+kB$$NJp34rH`UL`^0i`eh|Up*US8W#3JyE zYA?IlvpAri<{c=&_XKacdw8^FNyh#No4mNuY7#7i-L<46J)$`|72!XYKv7E1J8?A; z-jg%Dc(#;NQq`%p>6Lm9nO70Gn(h&eA(M~JC_K|JG<2RtjvCUMmMH!_4wD>{#m;iM z@!ULO$LvO!&28hFl{#GfYvRrLJ_qNdOHD9>(}T$1S0mq5b-KxhBoJV3I1JY;}(;&>z=ZB}A5)b3f zVqUEoINSmJnfdnsja#!2uLgcq91-NL0+6lm)y%^zilV(n%Z@XNT-(8I!D>S=2ZxO1 zP2ahpBZ1b^*N9TqHDW`N8r#(*cy7Sbttu}q!zIe_c8q#64{1&1c(yajQEcP)RHfy( z>?2ndKV?*FMp;WN%r1N1`x~PRYF&t_>K-#;mFAolLB@WP;T#wWAowYEIX`8d9hIU_ z?kQ93$=&XMbZI=GODS&o+pv6)$hmCI8W6ESo&$x%^dMUmMl9l3eya|RT4(=z>E8AL zOXus&E8Nw=cort`^y43!qEdp8c{KhM0ua+JgjhAfS3sApjrz1Q?>)_%_u^Bf-Jshm zV6v+w&Dvp;3Pzb_ftu=_@uE+aZ?r^~jen4MqlDR>T+}^V;WKh60Lhl53QV|?4ruV> zs|YMR8B+L348@8xb6d@aOCtLSc~(?~LP{}4>^VWyEtfng18*@#0_*26N4OCvlMu-x zH6xg-3%3DfzWNUTV-uCdJAJOtH67`ohupPUK>YBo-)?g{?W7M<>G>1y4*qb1dwf8A zlUrNq)P!3(7O)VP%OS5C9p94Fw$Zzk>}P`jT5wPlBcTY+-inn2$JMInpOziI5flx$ z^*tV7T7o~c9Mk3sIRElCsB~0ksCwZnBl)U+?67h*0lt2-=m(Hz5h>ITs2~JB^QbFTvUr-cG2ZK30tU zKFR_E=?bK^lxx3~?U|N?8{H|m4Hf92PO_O$@FVzD5bTfp01b0~aW(dIqM7w1I}P~> zW8`jt&h~^;2dSSgNLf=C0=O_yd6Rl!>&^B?w|l_J8VGNLoT=j5C37NS=gu{D=naD( z?M8i8!p6Ub!4I>~V$|0r%|8Oh^KFJ%Y}y3(wbtv~8bPa_4mNtGP*=V8usvbAQ2*M2JmY2hK)b{+3ruxBt+WL>rN~oKy^a@fBHJY8HM@o`Y5@?2QjYN?0>Fw(RoP;Z7 zaDPYJ9l%qeCtEH`v#51U7EuAc0ZujMrlQ=ZHH0)~Di(%%?PqnBgfqe)88y-j;H$4a zpYJA$TwP^;Yp{tD^&8WGvmyoNl0U$K4BT}ZMDCu|+Cl#N1PRe8lJoSGf2YEmSazq= zPH5iH|UUwY;lb{kvM+V_zk!*A=9UxI5h|v z)WqEWW;$fQwIou9kI+ThPEE=hg7f9H!BA5kD^8Ic5jFqFAAhYMX-> zoKuZPA3xstM3FW(?v|anHO=lPWKU&UZlBmG6eT(!Djl2T&?e+CnxyMAMQP+OKbCrC zHn%T_Ec(2!%h*PwoZf<4EJh>LwT1uzZm0Iu3qQf;?38+SKLXy(rPXT)&<1Ua9_d)(mfb4s%E3DpL`pqDXl?!g?oaQgHvZ;_f@6Q%0OoE!!?bsTsG$$?qTO0 zM-DepiXB9W+ZyGYMW#7qT9wV;mv_1o#6KDScvRJZvFtKx-VSe|>=FIO{Kk*krEA)| z)dL56*(rk2rBh>WXzwNQ4?)K(?{|aElyl0DK(0g0XV2#*nsuUJar)&K7-=hZ6|E8GE^P1`BmBJG zw?5R}&q9O8$^Q=y4CBHE9ixy&{L=iKoA5r!yc9UQLB00ywj*Fr$QSa%r zLNV6_04NQE(1#nX2`=t*Yp|&(0`USH4wvf`T6WIAT(Acw_numKoz-H4ucpd$jwcw5kZj z=IT@MQ?`jEv@J3_<`DfghPi#d6M##y*xuOB{o%_Vtw8qhCp2~*h<-*%jl#Hi;tq>$ zc}88;!SZMMx-Q1XDSD0a5`nSTd|6SSo2_8IrZsF%a4!{K(baF@{iAQV0zGeTW3#;o zIJ9X>cXu@D?#T_GEh&MbI6c`Mz~KyhAf@T!Yvy4?m1P?Nj)AahQKRt`8J0HKc@JGz*D5K*wF+i5oY~My>!bJV?>`5 zB$0_RI=plmG(;S^&{JWi3$_r%iSL(9)%*wsL{niCdyrSq*3PWsWU z@xrrMBnQ`FU= zRLaFLjDPQVeOQ4=`qVu4Sp?H;gAMB-1}kw>qms|8N%D(p=U%1Va3b;C%x?J*@norG z_0;RD)98!cpCkgFr+8v^r(#1sE#@9$fHp)DlCUH;&;pb>58OzJ?ro$}Vnd)dLmI3s zDDoYT<<%IEl&L{4>MjI!6L5a|qPj)UiLi;7xBFyi4?KL80beTK!R zUE;pmQ&yi6&dNsDBsCk|`l0DEMkoXlx|qzv1%0ASJO<@H(GShsnvem znXC;6T>G6~p!)OK`-Tl2WLh_YPWxvL{$nUMC0I_XKt9aOp8iGRR6MK8l1!jk)02lA zW`UPa+omimz!Me`xYa*HZ!0%~j+j=|S3cY#q{6Gd#GoTy3EKA4q)7UePqC?{vP6>e zRWDCAxHVqvAcG2umY$jrvZC?iCxJ}+t(m@fzLeWabMDY?k??UmoUUp1@P>?Aeo$3h ze}Tz*P6>nkmG6WpiR0tft9DXCjO?3b{@PHx)IPHZ#G&h!UetQ_N1o$yapf(Q)qFgs z>V%Qf6_%L99MDTxbYFsrS4VP8cPJ*% zyvq63WT!s5(QuT8(zdWQx>Tt7nY0j_S~}QLs^B?+x8RS;t(|@SxoUF91ij3_S95-- z=aW?){uBPT$?8}^k(E4?9SAS zEeew+2yWU!aJcl-nWnk}ZscwCKbaCyYJl)$s8?1b_Pp`*jRvBtv^X{MX5#r3YX263 z!IL*vFDvUf7nz1&>t@k`Rrnr_2}RxgRPSZa>l-aD`}1-VHjO)w#u$jxawRCg_i;5_ zzG1)GWyjR-vf%de=~LQeh%FtVW}$rC<#K{LJXOfSdZxzB9dk5p8)a!y!QaVaSQ%R} zAZz*4$FJsEveBoYT1S=LxQ@KLgo-O4IF8?4h&?Jc9C4!^x`I;2Cok&*@#=~R#w1cvSsWM~)~hL9FXr5mL| zVCb$vBqSw<8l)Mz;r{tQ_kGTJ^SpZ2;swl_wPsjz&9(Rb?$3AcR5APKA_hcmd&E#*OPTl=X4F@3v3tqm?%K~!%1jPZpK0}L@_Rjx?*NF?&$)P zJ+fKPu)>b*^eP6~!PV`jDf#cff?BeEO?v{1k6~Y2mRg3Fd=UJ=YKi1%3a+OSej{o3 zpgNXul|*lg=!Z!)!8h{qp=~z@t?s|X|AJKSuWpXqw?D2UuX)E(#YKY~veX99a|#W+ zJBD4!5<9Db`*l3QloJ;0qRt&w9>h4;=}7*CISLL0H%}QoxUs}GSEmcy_?VWf5C?86 zQg&Uc4!7O2?>;)3iz`5~bUJui<*HEu#f^2neF6;@$~`SL!f0u4u9kfu^(c3_-QVYy z`b71A4giWZRIp<~z-i`?=*J!q(Y0%t^4>+d>NI#pfBl(9z zOewV>9_YM^^*YNUhuv=lL=V3-e-K7a{s)Wr$<_6IR`h|ke>xxJ|k3+oo}T@!4LPA)wXQ^lUdq43S}-mNGa|-#D8`HQzS_{h~z!8iPE5- zffrnPY+N8?ZTELl>%2~I=4%V(&Z(%&zn2?~rT+2SQSId%EwU|doUF38-=1tpf95#D zHm{*(c=gS0yj_=oXW95EMM=uTy=k3Q;#5TvhcXlSotOtaei6S@2Q@!iX@{kj2C2K#-^YWp_ivdX8)V%+g~ZHlXWuJ8Eh`4 zs|q#Gr3Mfa?fz;T3z)}|o%OPs#cdE`3Drb&rq#Khk-}DcBbT8O((lIr>~=Ofau4b%X^u zdSVcZ%_$e+{-$}u#|#6bPwh?DAi%joOg5#URPII0~ z`_Zo))g9O0SKkY6Egb)nJWMfb58G8M_zZ}l&8QxN$CwPASDTrtZxRCiL2bUZc?r~J zzQ9gdQM)*uy5N&hu^rgk!q?|&zUu#j1<}5;*t}g5@h}_b9kUFSP(ipsOe@J zT_@ts5)GO%%)x&L<%(iLajY<;asP$xi|05N#iJe{4bHm~N( z?EdaFTZR6rDr(P)%IkdIe#F2$u=u$9nl4`}SEdefV%Lq>EclZt>@N!b1~ue-}c9UPt7^ugNOII0q3ikI?kYV4m$m6 z6FctBbo>gf^1qw#8qu+I(FY;bYhs2hhlw|>%V0uOmJiiIQnT93H~;%?Uy_~K(WDpr zqV@#}lT7-bA8Ec@o~bnCX)+J~aw~BaB}cGYOF#uUFyAqNi9<6_eC@kJ(BY=HqGM9* ze@t&1`7>>PzUtj*yva!+P<|L*r-Sy(FMIC`q>0ClbP6B&G@&E;@woB*d4Y1y%+OQr z;RUSv;x1DF@un?k>HQb+4|PAUKg7iR{i%+~Amm#~=gTU8;lDsH_CX*Jhk-Nj+FJ76 z{=&)S70hJ$oaR`68Bhj3Km6bZPtfWv7o4olZvaH->%XeaalfMMffnqo7E5VSQQxF0 zNRK_EGLfH_m|-hm_16_EQOuvN# zl^NW^vx4uhLg@zM>A7-)uD>yOO}!Oy-x!jU@ZOoN7JTkB^Ujf*`X-9O{p0On@Pn=+ zjpx-~{n*s~VdsNDqU=44&*+B=;{&RfM8ta!)X)j6khQ%F>89Fvdn~{F z{oS`UkU%yZToK^F3lOSS1#Ma^skar*o}Hwz|5_RCA^_O?UDtk3gxNh8>nS0?@r*Dd z4+=!CHvck){{7Qp7%0B&3ZAvgx$HGKM*Pi9c_SOdST@+ETdvQ)JzJe?L_tSEm(S7% zZfI+hO06s_Qvncp&J0)#V(R4YSUMu;M&l6yG&wltV(+c0H}@Z0X0NcgmYvtMR((PE zL3To?9}jeD>Q!c(hYVmSnK-G+(5a!3V}g!HzZ2Xc1XWG@`pc-yjjw8&P24?I&6{*b zCruyCnVA;*d=3^31)4X=&zi9J1GcK@LZ?~&?!Msaq9Y*KGXHke|ktR zwhWz7_w2aLg#QmMO!QjLO23iEsA(G@k@?WTD3xpoxG^sab)0wgA5MR3g^e=h5f0iB z11YUpS29|~C5hyo(1~NL2-D0qzi}fWsJ6Gx6!-4!&R=x?Qunq0fP>F@|2J%xkg$KY z^$!k|kkeb}at~D%?HZ{|6x6KyGUP0}?jQ7XG6dcTZLR>+b|T^NN6L zzkg7n^6Wo@4a*6>dIn6<`*E6lT-6Tomh!%o?ryh*xaR1|2 zI{bz!oAOwa?}y)v6mZj@U!%@` z5W8yR4b8**ZS)6{hW1Ts>d&JuJIn=&I1v_6vTrDXzj5kYcV4~{Q2?L z@#cJAckEHUu|VsdNGguYzneF;72?a>b@IEKX9gVA>Py(y^#>hYwC|3lMn=kHe(=$y z@I;oZBb)r|sk{BJ;I`oHr1;?V^6zrLJYWN>?na82U#r?j6nx+GSG z<{R`HkGV!cR4}Bh`OLE#b3>n!QGt{0fSizXw;2dGC zcgt%ZhAy=b1{Cm)>s#o@S1YCimx;?bBuso~J6B-${w#5isSMl!X(5?<-=npU7gI-h zRgs?q!}FbGl_k8-XKpn5R86yLM(A(Es8I|y#ckptlc4>a4vGT0czdaG*K_Yuw(*`8$X$504ha$~~wbcBM z!%{=NbTvbq94>wjK5$PF9@y}=0@qL!O z(k<9ta%yL$E_0UyY9)qYLzI@xm6C>jB*!j1m3Mjr93YF_-27fVW^oqJ(Xxl`QZeys zJwZ~y^*u1X9^QVoGhL3I+&;QG#GXjF8%+qQu`vK18=XhyL5V*D34zh%nsf*TnW`pZ zRmZAk^Z91$nWfHJbYz>xi;)E(#pSO65FOFJyoFd+(|Mq(`mjLIILtdH^YpOoykPGS zt*0XR7XhTVD7eiUibEul3(vUwFEs~+^|2FuRiRTpYGljFC9P14hU&oc&m_>YmDhr( z5gWjb-C_D^Iy~vakja#hITN7OXPW|aGr3;JJDYEbD*ji8@~6#qVx6|{D*~WJTXF3> zd4aua58AOHmFJzwe-_n4OEo#C9D>0lurP`qa|UQ@ak1>5YQ>1_3XL?}__K>PDYT6a6`RWgDbq-JmPrht z3H+Cr=s&#}8}d-*;CWgHm&{Efskaq}f0`8qn8l|GK!ad4z6B_L2`VmSFGONx7#}WE zugqQ1AJh*~EOs?ObtF%usoT5yb|!>f$VcUtaHgkMl@<&}!xv(;WEr$8dTpO|j4k2> zSX+e=Q0``A#NjV3wfY#)b6WO_m}(6)rW1kff$F8<*z*jypT8X%asN7M{|GkRC(`x)Y@o5|el)lWCkbG~2~#Hd&rlRT6X->s49)_=)fulZJFUFVziy5Ux5VsU6O|l6KQ06*b1ocyMWO zT-A86`?k`BzE~vtFoo%Un@4;M7fp=ho)MM_y-T3*DnElV`8aPWsbzq zlAQ+vk-5y1hzN}$V}HHn{PZ5k*rzRU`0#7)#V8N__Q6wyl^eqB*A25~ zy0a#p6R*x$%eX3_jmN2|GlD(8|=PN0FOry%?`JbKC#Zpl5ck z{69te7rB2F4u+Z$V}kUQ#I|;6YqSsb!RK;8CV}6fH zhZ!((4D%x9kR~7(hJQH#z2N7IjTq*tF$2%lyY3i9a@?2~+2@9adTjs6 zZ-c1i9B=ZEITXKLE|^#q7e$vH;z+ZLU{a!QT)kkDIxGM`dx)$wYIwgwW9DqRXHSfY zOH6qC;_jmJp)v^3y)`Nr>b{-M)$RRB4#}y_%0J64t;1dA6;clDKpHH0y!+C=F=Ie@ zv1#|LroOHYXy7tG+1 z^Qx8@+oFYXck@P${-=INOm~AA!f}UMBVHc6z(&)h1xj0bxBIfUx z=MK~Kq0jpmdCV~4D1Ssm;0F4xxEfFM20v>%PXfx5J1nsZ)mYYqT3 zad`3{Ks_k@dJY7C?u!Z68{$M46fDw@%!08>O#S_U7!8i)DEd`?idZoF#m+QuUYz+4 zha{cFi``i*Q=E~N;QNzYR>Z`nNcKVRLL_m0sn&Gsn^+$3a{kiiqWjUs9(?oo)O?`F z$bPZNXV)*m8D+r+Z-bm{0NU&~RE(D>#h$f7sP|(M!I(|d*XvFg=AIP}u|`-}CJUp96<(8Bs%eXbxu3ZK= zZrXA7qq7+1mR+1H#~$j?g+4IXZRDw%oH0rgNWnVq6qq_%7kl(eIp_?Y;rI#~iIp>V zSp|VHq8Fpzbp7NvgJgaHY3-t6=ghAumk!1VZ$%AH3bvubzN!!wKkK-&h)4+4AoM#w zSTg3p9!y{?pV1k3;uJt)$wG-r&wajjo?WBgVcHonM^p#!eDPas{N-^dHO)mPq~zO<*0~pKkv~k_2Bu#r=yH z2bGxOI{82~T?Nn33Hip{Cuo(2pHg&s$`!Oh6te+?(sPm?B^^Wy45SZLTt%4dusSo~brxu3HMcSgIQDZO1&G7|q82(&T{wCEp4 zbbWs8Z*%p1t!tlQLe{Ox59nQnw%x;jhl2!VF1CbLlwVtkUNy%A+iKOvD3mBa$dvzYu&sYzN7{9b9 znKki<*Vzzrno;NJ$Pb@FAMtVA^~jEVDJyHvKU;d5vz{2vx5XBtNGGi17!)j z3rI!9_x!}_ZfOl&E>A>p_0{NMH{3DOWKG*@GuGNRD1 z8S#RYJB|kE-+yD{?mLpC>P+SR2mfF2DhK-cVSj_3^3>lj6S=q zA3Hkg%fY_7N;cn8SdMP^oo=Am9G8H0u;&><&Io-xKR^H8j_Up;dzTSfFf1Df@T$*@ z^nT@}dDh+HqlqCQN%)cF7Lef67QmgRmGKy{5ZIogkTFeJIOcZIO;*ih&9#vNwV9zHH_|k*UI?8v2WXL#kqvjIDWe zcJ_bWF`=FmU338wr-#e0jh=lgCs~gX?n3opHepxwOTfjJJat#*z>;E!fO_}&Si}gP zmXvR1mO}-0rb^imAu8x}cjh(VEGZ^ky%R5M7p(?G3srK@`f-3YzP=z~3B5V-#o+X^ z{vurQ0U!AS=P5fjq3gjylUAT;;PEimaW9E%S;ggYx6$V1J#@sI8&OLvVS_gEVKtLV zI02AkgPvPNpIcq=-)TgLs;MtV`a`g|ms!%!Lk>4wDSin8>V)149Gz@+wx;ymjk%vN z!&rSyfukoR8$`kB!I{dCSlq2C+L`-G$a>$k*ixjIlRkWZ)||6bQy?9)$oezxbrKc7 z(Ux-@J1LEjMab6>=+WbSfuMyn4Ux?POZSC9EqFxX?{#G4G1Y3QxOz#OI9BSKb4n8> z=7phGQ1XbD?A^N(4FXM4q9`?g_)?7}R3|QK2B1(9Fl4;u*%+2bDTyxy(sJ{TI+s5+AI_Fh5hMJn%tB0gGee~liaSQJb;>*G~ zbmaZrsw5;6s+^ z*os)w>~=%Vy|)hTj+cj^1FwXx=iI~Ppe%+4w7lz|D#cjXt7ISb98>s&Fc~*N=6Wl{ zib3?-gmBMSe0@6COuPu`#WVJI)n-d|LCl;gUA8hgtaCTgb4Xqjf8mc*yt(wvp6s2z zLA4kQ)T$~EckHsf^kRoRRZlv#gJvlI(d+LhF+~6F6IWzr8@GQaJXjE3HTJ(kBo6JQ zj_8R4?Jk6E5QzgjWqbx7{%i*vR%(oM*<6|68|jqm==VEEca#LMpAg~rq2cyxyo z(Lqde8h#-Zpvdg5@+ns4@RhUOV!65NotGdyK@aC%V9dD1?40sY_mE_{P$s1bG>3J! z&=}@E@+xP1}Pfzjz<*@cIcNMrpfWErOOakoxOV!J40 zmIL&)S0`|(#cRiyESVqZYZk{nsRnS5vm06p&9}-@tOX`?6z0liCbie{cOOWjlQZ3n z0TqeoALo><1K?1Gq?9Kmp&7r~_IKNY;*OM?bxZRkML=jPGD5nl#%`GlL>z(=@y`m=PcRDe_YVn%OJZd8>1MaadhJ^ApMsbmR1>s7iDEzIUCm%n&Ql<#Z=7 zO1}FWmq33JgC+*ept1BeFVVEw3Qn>a%t|m8WsX{yZDKyi5+8MxIeA`tJ}JE{*u{u1GPm6rAhfN8n5mNSU3Bii3ZAN7dtnr zenF~b%qsz1e6NQbmtn+k0JO|!oF&-A%@mO#pT(fb#L;l$n*>u}`BBODmjQ*2$R7fT z4{@QIgycK;^V(EvFSD>Au)5k!4jhq&$E2KZ*)0_6h*6x8$GbBPMkLmskAYP9-3vnP z9w)=Q>+Xhuv$L9*!4h2E!36@sH(LInJ?d-YruU^aF&VY=%ujOwte%yh2 z>q{3Bvp4I_srPAYUrQ!>Ncg4->WgolI5Q6(u0tN`^7s<$>v^-ikU`P4cDNGcKjDKF z{vQnEyV3TnsVMJX_QL5PHJqtA5fT1fUD$m1d%KWca{6t*0m0)QyoPR)Q)FU6nw z=eyn~k$-c8&Ak2qTFdS^nQF0P?|(hZg7OCar81VQe$c5~)y`m;aJ!N=izl5j*iH z#D^LoIZ%t|w`3D48E#C$Fzgp<-)Kzt;)TIJ60#z5mPe+BeaT z=8!_s216e*a-y{SQ`M{;$8Up5Nf!EMh$dtT;d^<|ebg0-S!nK%)o-zRHfvtZ`C{Fe zTD}_cEqzfZn(V&UhmP`uW}{E6+?%Aj-ukm>hu)DX?g;x5XGJ&um{kT}e5V-AkM@&a zl4rI#{bhf%eU|Z4W?O^ZE7ma|htGE-{`z@^9Y^5;M)$7s zFUI4}+vAa)C6XsE7rR@7M);eCcB?xNP%-XW37|345vmQ0L)q&w75$TpjpNzQd%ZkL zSNfH;`_kN?NB*>b67hv;oF@12Gbb#qTD|wDbSQ*k(5apq;~fCUP3_Y1o0D&xaiwH1PgnD(ToaiMWtW-3D;i z$^Y)Oy?iThw1i{(oby>P3JiEzSn1nF3mzuqjjMWni94xRIo@dbhBq1Ax{QvF>dgyZ zkz1GJ0atHK-RmeB>^qhquAM{W0Dpyxuug2Dr{Tpd`s3?=R495U(tGNw3@2i4&9MAs zKi&-VrF%x$oI=gw{U!MaM>EPY+cb9*Q^z#2U5*F7&u&A+yjq5Lf82wv7N@p3!4-bl zIX=3j|DL4W(Kl*0c}6{C*?x%$*8jRYl4mpchu$m_fU)r#eKD9`|6?9D&;9;$313?; z0n2*{1@eEtE{&s}VzA&(OjP&qLv#cpAMOty-rLr@l-#PWZR=d?FzCd^zU-=!u(pT+UeU1#i z&=79TYj%A3A;+3y*`{^pH{%Iy)j>AEjU}}X?qK<)BjHEZdWPEW^0Z996u%_Hh31IW zn7Oe@|1^QCaj1o9#q~ozlh)y4Hxq+Kbc~aCpY_pPPQhV7nBn1l!u@19;6ww-vj?O| z`c><)H+!OIA0mo>3yCxAhek+pO7uR(r+Q688lmQEk{G?Bmd2OVODw4_1o6K_t!}=b z4r+tuUuHr(rSYV12vc07Z@%5Vdl0Kpmb|(SX!tA)WyS zAs#F$f11*e2BtQYye2F7JOq$e#9Y4+I7ygt{vpJ~;YJfqB4T1658~becvl-ifhudh z35=^(m$s7^$A^OlsZQVyW5&{J*;|2vHu1s(A4spMg~%ftoIu>RLJJLBhQUX^h@%{9 ze3c>cxU9WS4KG?6-MHu9C`BUP*sNltTmr>J@+}}9!s+c{Qf4KqO(!eeP`#uSF~Q;M zLOvkN8dwENv1Fm63z@|xHk&R6Au=ra3!+cTd=ywC9yDn06WC#sgkjB4tVFvke+UV6=?HO-Rx4Y<%bi{d{D5`DCVG-GsuKr z3pd^NOq~Yzz7#j|%BYNJz(A$Y^_{FC3mB@U#1%4(#+Bn+@G7*bI>enJ+Y;!EcX*i&*9&JDRydjKmruWjZ+a{Ock%zTt^8=6IE)HO64Ay$kdS6$@j^g*%mv?21^^^%UJ#3mQr$9VdiuF!adxnKO> z-Kthip1HT)Ff?9V5QkDdok$a8?V{Ts-@vTsnXE`xp3oxu+I?-eHAN6f6oCuPsqiv= z{h7ovZ=^Ox{pTlO`z-(HEE%>-4Gbg=CHhhLKRiuCzxiJiL}b6?=0JBa_~8aoNEL-Y zO0yCE6L9MMLkQbKRs(+{v|7+WIn18p@^E81E0b71rT~yhv+G`i@gTGip=bS>eiZw%T{ z4lP{$&8?;kYT*e+n7FPF>d(#YdeTHn|3g;!F2dO{ng?NNSFwQi-VvfG2 z^s@SlM@b1fKo4XaP2NC zIYy>FY^|B=ki@sw?OjOJ7l1{!xHepN#YJi2&v*F~aL{qOvJNPbw>f#Q)18=F_fWO9 zbcs+UzggkHP6q8!SpA&s!l0@nbMZf$HkiJi*u32gRIj+Y-mTWly&Rzb)y~z=3)U9W zS^lY&x3+ilRzNr(JMg1^m1KTA!u>LSz768>{0q^5cZPa&vB#0|5tKxxhA)DkSgdWi z0FXVKm50eESG+JpDwdk9d*+MN&Atcu7cgpa{$XJbT42i)%n`LusEZr4d49TXhV3tV zxks^w+io@Du98;=SdH52q}`SQpUr=Jjes#jKh3l$%Sz2>-Nk-|hQIXL9KF|wAu4}| z#r>;BN(+YeiVo*uFpjnmM=YNZ8Vkh0JW$sP+YGc%E=F9AjKr3al7c29Q~@mwsy)Om z=NR`RR-k35PMsV_epQ4z4VwluK~=U{ch=Foh>h};oU(%uVL)U@}kJrLZt@U@E$Pk`RS)2#xKcOkl&yOJ8pmD2FGgRCg=GAJ`g*w zSnv5cO`7B*1j+pX^HoC39E}j#e#pd+@odf1o z8zxopA^RdCwc-eZjN7RE)V23XNigQDVW$T1N`%!?v_VuC_5g9(ZEu&%vXA<+ZvRKK zJ2eH0gkmrdIZ812D)vgGA+dOm#USH0%!lc)Q=B=?Hslq53aNyg8(KuOH+p=><72|B z(6?wQ#~Gh6k*u8AmSK-DI7_?e7?Wr7vbs9FNk|CfPHVM=dZ6Lrc>>&07R>|^xG7(c zuuhLqbY;C2h?1?Pl)*doLtrX_R8S|JUaD~A&?lCPALxKGC3W8IgW>*|lpd(H5RaPo z5mFX9G{?JW{V#(u*OsDu9-W$M56E{2Z4-z=J zcNBx@ybP)^@9g;OfBHrHNADWLBiGy4Q|QS$q~oO#Cr>L*wh_sfjB$vLOU76k;yV`3 z4{Kob4&hPx2fLZttE}X}bEqb_FbROT^I^=G@6@`VNd2#)`_Dh4jxnN7HPRSHo+{Yn zr?gOL5L!gkH@f!bRUh4}=QNuDm3{JuD~`HF-%g1il?PZ}#Y4$_kl!d^hCF{w zNyp~e?|0;sRyB>iE6gSc3H)<;`IF~0Xgytp(sDr!1b&1@J7^KsPm0-780N@{Xs{w4 zvtgud*5Te9o1UL&CLF@niDNlX6I_JJYSoP`PrY;!W+A8xzt3rag8p*3Iw7SA&91%; zNqa|sF9RX-J`_lS)I*aiYp|dTA#W_Tb9zT5a2AD|u|#6t*y5qma7RG?A%CUY4z)?; zETsXIyx3+?iJLJ`J*1!aS{w9DG1PTh&)^%9F7H{C4x=hz3T6ajijRuo13~fqM>2Md zNBj1@-J zG22t|xlc{BxYq!!hbJLBLcf7ElC{!4Sft9`%c8c4re?%_2&9=Jco{a42-OS=4DrxG z$|0UM{d+(G)eppwdM$^ZAO%XmhD+|t)qOdk6#xGl@njR($A9 zKikXRxIj<5}l%IB5E{S(jahGZquFWwpj{#Kt z`Vj;IfydB2!se#4NFpwNiL+-(^s$?m`@~{txm1hUmKIGcN3i3vKq9L9Er?kDDxzE4 zKcrlyA~yOtZ@$TFngYp%dn%LjJK{gl}zG1_%87YN|=`QV9Oc=MMdqC9{3qM;){?a+O*xB&t2uAvI+XdV> z?AHSDhh$uzlz1P{tY%b%bq;Q|TFA#XSWh4>srj(4g)r@`7L*58PI9VQMc zkD3^QXY;ojak)qHg~Ht#;kfEbq2y>Wa<}|l$2?^uF+s55h-v1TJel=4QNCB*OOhBB z{+5Q-vp=%MJxW@9xK&>`&*e>?VNt73O+GqOp>XC)RT2-;k&iNrbeA?|Ra%a_h&`qX zpbJV98=`3wb?7&E)-;weg)uvU2be6Hj=y776J=P0h13bT5iKVRK`4ST0Y6k(Tg%p2 zUJ}W1{2PH9*CyU$S0?m8!h4(*pZ72{l4pYLr=NBH^x8^trWKg@=e5m8qd*j>LKCY= zc23z{FSzg<>^mbEeg`UreEUWT!G6tm8lmk{DBE|z)TW)m<Ic#$rGA z3t4}IiI2ZWNt+fZ;oyW8SYb_g-P>+gu3w4e1h-saO7m`RZhlHIqdTx7g~MeIfbO;t zXtT|(=y>Hau!BAeIIw{r70GBa-t&s3;z1|`1K9I%?E0Z{>2kXzdtkFM^Kf|h?BB`y zIa>k^g`jN#X=APtloUE_H=^u8Kl(uoku07FVLnU(3ZJ__9rf>aW{4X$gv@nm&y#t; z-a)gAzNJ50X65g_?`+>|82G*qNqx$xW3l8KIfs?U7nr{MC@JL5Go@>BL~QKzrVdTHN6F{veMLi))cUA7VwzgX;eFGw)R z;1TDJNkkxZ8hR;7h@% zF}N^zL=U|%AIha=o3q$;@N4as86IHL-jn`}WFqrROepX_7KH6IAHfG7=dxm1Rk_PWcRUAxcHs+3sU!um58 z2H&i(+mppsA~g^{!4N^U;3qXk3;tk0O@g0p%L`IpXNkWC#1p|)49;wY&_w(qkk1_; zdDISh!kox8dTpc_Cz}Z|{{VuU7F);Mm^=QfM&jNmlG#?)S_)gSl!ZTPbk{FDa>;b^ z0H?5*y0fAz;TZo>i%Zkv-K+eo9Pad-b=%M<+C0DwWyZrK3jX}R#+o-&#H^fr6ILS>v=Zv zoL(RmsRmy{^xW}P80HT_&z>rBql0u;!MKaKnZ2pOEqYb0XNQ|l%=Z+RQ5XL6JML;~ zZT2Z*WVcHb`PP{OI%B=}m2ZA;wm2Z~rCe96aFYjC^09ZN6|YKe>Rn%1pqO`M3Ll_7 zrpALkVUKqk718l{J+lT8?%Y2o4)b%LT?A`?S~mivkLAjm=b40$yoY5CAR?Ye^7^Qk z^!V+&#uJpAg0|U+RQ+L{gBuk9w9JE)q}=_6=a5c3YV>2W)iE0QhiV^W{SDBWUQH_P z%7!Vrrss7t63cGX^e&P3_@w7WEv{<1`pBN5dY6`#vcu6XE_RFU?ZsN>hwXesGd%+l zHhz9B{(j4z8sJSYy=_y(xRbAc$Sn5lvJ~X_NfWoS%z$iAu$3@A)#tn{(of!arBKAT z8LWfST5W7BakwI?72i)Z=+-xOx`qaul^VrV!|8yzoVMbH<45Ro0t$abj!5IEZg|nK z(c9iY`VEYMf3esKHH&;vmmxOovaHxDJ5we2SKeu8C?GmSNsbpo){&okgXxzxI_-%Jp|8#tuTt$27{m*T zpvhf}T$@~R(m&l;wpe-!OzwnDKRM29%7S3M($AInKI)`n%O}3Ty5jl+9a&0ZS3dTw zt*P6eD8$u*f|MCa{QwMTV}Fgrs;V~?JX2wqDrL_#MPol^+BKhEKi@Ic)I|z>hDwR6 zspwRBzGLOsvG0yIM@JN&l$|6++H>$()c&ySro*Dz+v(?I**VIb{-fXO{ox>5A&c6K z*VkJ1zU#eo(Cy`70hJ-%+b~1}3bd-8biJwWX}ZwZx>fxA-HN!%Rz?eZ{=2}G* zb&?;gS};wQdG!|a>GiLj8Z$fL6$-55h=1yyg42C^f1^%Xq+0A_MeUz8Rml( z1do!sK4+xsrq>pA7D#)EpZwU%MJJ2Sbu;_4K32x@Yn;S-SA+xj++pk(iMiP0}?^=^t%kHs8`}@=>iQ7mY{qi>G#W3T&%xZaw2vYSWCd>L0k0n;Ao0S^T6KQ z^Xtm@L2B7U`H6yFe+VfB<=OnMZh3M`!ym)ispa?VVAfGy5?E#&q)e|q?Kl2u_u8du z3ZkwZ7rv06I$>VEWkUa;C$)HvC1N&M@rxmxQVL3e%!XIzexMN%82QaqNz zRBfc+uvz2!8dmP|0fmWN zcoNQiIMhBRrd&Yl_n|r2%t#SmV7WGrz)TmI4qi+NEu|*id&&GYtodZI*<%q(C^h_2 zx2M4D&l>Ab2gOuEkhqKi2~5)8iz+$V+S)D$DYBw$`DLE1 zSwF(+i{KBPUIyOo)u85Es#uTdqn=kCGt&3b1{X_`^+!2Be&j0?*0`dF6UpdwpH&;l z)nzv9wk%rYOTS+qsiyWf>0pK07NR2J_(cT}5`tFiXK`BUS|7>@oTTrcD~ zql!1_drL!?MOZg=de>tVfc9W4J>|R0Y&v|?d`$JoQ?r`ZH!-*K{F4vKI=@u%?ynpK znuyBs(t? z!0hN2M&h<&%E}@@YHy^HAoaQ$olWrBxeg+(tTnl#`?2H;m9NhnQ!>%WLJ(h3o6!EK zw7#*iM7gifxtqHSgt&M@SiGOZ5^GeJ8+iNw3M>CN%8-g+xgf+o?{2p-+_(`-tEIf8Ek678Q!<(a*ck~XuIsIlk>fLP zc1@2Brs)@6w8G(V{YJpc6cB>WgC43!j^_j0{PAQB8aHSB6e@FbL+`4Z&Pu?co-(B) zTn_Q;ljEb>T5gwYDhhj^I*00pn<#|I`w2ovf`34EbKKg#n6pbUNE#!;psakg58~dL zf4mvH7M1ANdJAMc?T*&^vqGSHye3MAh)&s;8oMofz=Q~g>(lK;9kIacQ>oK%@mjMy zl{a78J$ai>eYmU`yUG#kJ4YqEQENrNz9GbRERXID=hsMd-HDZ~sIi%i8|V}x6kYG(UNIrgOtNX5V= zRJ5{QCZ#{mWtYG2ZSXEu9nhxWj5iV&0uQRz%FVmp@m5UNA%7}=sYepO<9sGk)Is~! zhbs^&hSR4Vg#@O}x^bcQLa-x20Ur@cX;=g@wWLJ(lu9s$DFV0xiA0nA7!wn3J76_e zt)HPPhk^###7Hj{BMINa%PBq*B>LbCb@Ccim1*%fegi+ETz#<$oRRxaDC1{ZIZ^@8 z*%V-n=+i;Kkh#%c+x04i%*_1fLhXg`)_8tVBV|()qw;j}NnYWnlzkPf9x)82)thtV zlG^`d8)#S-94UOSp7y`my-p?Ic61lKZq{}yN6VJ>N}1uyR#be*vTcZaxq5Mxd`zab z@#Ts+Ns%a~JtzPTw*Dc+49IbX5RGV?muhCRJgw-+8$OOFf85Vb4 zk-JZ#=wp%j6js!9o`%+*sp-l)|5z4>~oRt9l*%BRNPI2Yc(J<@s1m*y6w-Zog=A^dso z%%Gnmpy(qRw?<4Jwf+2!bNP&m`XKRsAyGNiF(>MXeuIAT6|RBB#M6(ie5Acmfp}CU z*c_-Xw2|ZW!9?9Uo8gzNxH`lqxmW&7X`8zy=LDaU`qTJ=e@3g{Ql|WIJ9nEWCpraU*A9@RpbObS;JS@CaD>tT><$|Rl zAJ>(Pq+NN7;cSiS1n$b1hg%9U3vi745;97nLIkxG5X>4R33O}q} zR~k2M##)c385MPWo50I;^qEvs(<%dqgK(m-D^jX8kIkQNW7Zzp=IiY}68YIYfB_N# z0^!PRc$=@cxHqMJox0c70FT$Pl&=w^s2!pCWjy43);zeFN!rg949vRMM|!ZU1oBDr zKNP@%{{G5#yf|3O8prKUMDZ)sp}V`gJBN;M`@ZL#?+1T~#agqU_3Z1u>K0+P+m(SJ$flwW=#2Uj z3(C*N0WfMB!LP0S`(bl}>Hi^PKW5mMud4wlr>q{dl-JT!hnyxKwIRR%c=o66Hzz-L zZON0uJ7!i+#ls5iP2NwjfLl7AS8Hi-_Mt<2*AFvDxu6>mzw%HxIMyJ_M6nz%jn6{@ zPjlJ&`SwGuOq?8Ny~_#{m*)dzTrntQPxOuoCZZ>(LB!X!E|0suOu3hq^Nw>_TxM)u zN;@eM!S6lHErEw3eF|5jT_8MYy`I{k-|6tP2VHj<9*aq24@ardU~DFkhDhpjP~pmx zYIki{qUCl!?@#4qrZ}-7Abi}A!Xf+07ucTm*9A{nrCRVCP%C7iAEchX62gnI$|#Y> zL!jM!6)GHHlaFHy9yrd8tydrzj3oEvjbK%G0OeYD98O1cUTjk2V4t{MAYbj?$QGL{ z?asLCC7PgaV6AruoPdR7YlPteciIXPowxZBuwKuuSV*vd-}#)V|MX{v45wIuuezGn zIlPAC9>oP;n`55RQ&Bp}%EBTpXwY_xRiUt|Fia`HWq4nsm z*!90nOVhPBg1Pj&0}sulzIMM`^fPMaBt{LB8?kK#N~W}? z{+W<{k{zn97Lg6O&#Cm#rtj+}i8jt3RgjYCI`KAc3yH4Q7q#gs*aD1N)J?MajMKx> zN#EbH`1&)212b5-0OLLz=%INk?3&4BI`BKHSg_%e0I|(xH_tI5_JEgrALygg;T>U> zkpBmsPuMN0KRYM`ZH8OVpL|>ae=(V#viBS|?ci#K-aCo`!PqWlIPB^5=eJH5*b$O0HU4_V%a^38xI?`Z|Y^&}x*Ole_S5u5{1>7D0 zU)TFD_?orOY%^x_N*xEtq>~Kk+@j!s@O)lwHpT1Dj95oEU5`!^z+c2x?FQ+2j^s`5 z6ek(mh4^MI#3XL}3aq{O22w_G=h|AmCIw8lY1z3x+!1$-(9*s`Jzf=CLe-AhZt{ti z@xz9>RUy=FiSBXGWUnZIg`HhZOSxzLDQQe4SDc#|b10`#tyzzavzScd?2uA7X;#eN ztvedIFr8Lb`mpKl+}BQ17zOe?zeCy?#*h`jEPA{>TU8!PE;YG1)R3c?KdXMk-jo4j z&Q&;NaB9)-+H&GJoUliSzcdGERU5px-&`yH{RZb<%J+6iW1oPny9(<7$fjcG^BYu) zSrsfP=Ib2(4lMmWbwuj$zbSdN4?w=%{Q?R+gtjxGW|=9RdY)*B@pv_e zH>N-|zOoR_A7kyC{NAkv-Xmyi=0KSlj)P#Y)|8W?*pSX)3U;iJPH$WrcbAXOhi=_A zj}e^EAOrJ9AiZ^|Ow_i41=GzXVdX6Lt`TBBM^A8r;QzA`<;a$n(%1d&+`GyCpcBz% zxKX{}y-+KFTb7q#vCZ}MK6di7AARYJ>cE9to)lhZ%#$bdop>+A=aq_Ds6IW!W=rsq zs;mkkLc~K#Gypw+y54F1UlgE4^801jrm4N|zWUpEG0vDpla|_=VO|MmBQ8`|>P1Xc zDmHg4gKu&{Sv>I(>RF%^_TJAEgh3`Hq+z#fByuk)mBARkPslHnDzeSt}qrO3C^ z0B3HU(Y8-VdV+eqfL+kiANZ9uSrq1p=CR+JzBqn(SA)X3C7ZHjb!boiAR%?Yq;Jif z%AqMVmelEOkxn_4gW2fj*Ew^sf!4`HcH#(f7<%#b4NTI71g4;?eurgsFc8N+7xc}&n`uBoOb;&#K~3`omi_wh<{8U~v*H${ z1yI{+hl&67;Tr7MZGhFoWig{(Fmc!d{^AoBht3c@dgPJr8Yv`41f@7*a z^Z^6kE$Zg>wthBt#C=TNhSE44CYIS+aPY!_(BIzyaVU#G_s}Lkz|AoUQn<=&#*^wV zSgAXt{CMhXdZ_#7t{;aUw2r-5k&Fr3P+f*UyXY@!4a^*p4bYxvJw03=*nP$6E$Ktt zsIRETybQpVC9HawvQ;s?7@Z1lN$TGdUGWWvq=mVCDj03M!_E)QS=HN##aYQGsMoDiD3785OqLmd0ZzHFZJzJNIF zd}e7=%x%kz0#?13yUIa4A>jmzA=KY4pO@HUwyZ;eHQ*1xO=WlFoH`sg_H#kG+(rwM z_}8gV><3c>4i>%6pNEfM@~6dDAlk{7kHw%aG-Vl1Dol*G6eQ=zqHn;6cM-b8s-Xp+ z`lcG=yIqhzHgejy#YG35nPuJa*n@Q>iMpkjnBwCRBkv@_+;{gtR>Sw(gv- z&+YjnVC;#PaP|76(A6X4OPoHpiwEXbrgpt|EQ!k3!@1lq8}%fe(d{HCs>gu|)IDr4 zL?Y{&w?J@5YJ&;Oe7yg{uJM+dTv;5k(7I?XMrqBSg~`%M^f1LTG*zW@p^bY5OyGy) zjQ(dIH$`4A3WW)Df}E${6U<`RkJe+0w+bSPqgL6(-cigF$+X=WR0IYs)ARL?Wi$Le ze|Q{czNIDy8q@(F#~3n6A>}+7!6*SAG-^Z)GK5d*qS^}h)NmmZCs>6EHzX8C{|HM> zv^z;@VhyqXqbxDr?h&uI-q4-6nGq$*w}y+=M!;J7hkZT>nL7hEVm!b#E6GFGUa6hsO=1-( z7p@gemXLcYXX@vg_GXRLnBi7u8SiO291cnT&Y_uOa;VoRmBSo-wo?qo;YVM4S*Y_) z=LZi@A!3jl-77baJCBB*jtZ=m>+SKtPK@tED`*u z5#c02P0ld%guLDY`x`l^$Ab=x)`-o+cQWH$WQ)Z+IsI((-gLdw#gu$SR1Wc?eg5egL8f6Z(MVEmVMpVeg7k0(fmV1 zcayW+p-mov=cc>KSyvH6rD)YF-ikxQG;xlExFYB3@^Z6sL^ORlh2)J|bE4CRkD6>r z{o#B-?-(dkOJ950f13Rrh8Z3#+vu`1xEfWv(E4U$cVE+%peDQk{t*I$fu}WH@ZMVZ zcMtug2wx{nU<*)*dHEio1V;=?tQTv7n(fCD7zoGU(If26!omS`UUtEe&G7p>y%++1d;I-eoon=>gqifrv`B zPf44^;oZ*YeepNUA4D?Q#Gfdqi%XBI1NCH0-=_Z*j&l6prNnz`YfJU6I|Pp4lwh9~s)@P@VJ zeltaP$T#;_6qg2>Iu+vmI2M7W1aUoOVPEOE9uo)a2oEHpNVF`a( z%y-$L%Kh;)gmeqeks1~3Dm-5S&vt~Ha1xgTbqAbfw%B`Av_sboMr5^?Z1IS^g=*xD zxarTKpZNodX01v_CoMG(Q#K|fz*8q^JNPjVd2M@J!E0%!B?))h^M%S3FKrquHz^7s z2)D-oPeY=st1CC!bqNOM%H;=q%kB`I2zKc90H*8JF0vCTzu7cpI`AXz%j@fH=x7GN ze1&GYb~Bq3=USbmmKB`m{y4E_lRXt3s3CF=aUkQoAOSpo+oh<6feWD^c#vRTxD_>D zBQD)ZF`#cdOD5Ilh`-b5g=^f1nF@HFA;R%5vOgFxJ2DNp5kd*YoYlhcO~<5keXkXP9VmGw*3j~r(WE2Gn93vOaD z!p)MoLItn9Fx)|Xj$H+SGy+D3>}>ekg6jR9kbL{sQ`wl29IwOmCH8kVh-#C+Ir%+% zIZlIL4z}Qbe7A$4)+LKk`bx%KvX(0W3kx$^k&0y*@_dybHUt&9z!Sy`sqA~xkrF^G`)=}oL^l3D6kH=?!*uE zOb${=&wq>I@;_f=R^tT=USN=IuF_5#4-b|up}P{>)h)+zS-zJ_W#Pw0?j}Q z0AWw5-DK|_2>mc~II)WrzG`n$8*oq}^f8lZ^{D{5!YsByiuv+8- z1!!pc$*+JpGC2c|c7^6g!A}g*XMn!gf%fp}b^4{!5(1P|PY`ARFyM+2%>k>+eiSOT zSOD%Y6CiHvE(rSb@rwzNKH#ATx?C4rv}t2~p-rtA=};#2M-TNFuz2@vH_8{%35Mgx zEYj{pCquU@{0RyN+?$&&OG>^fNB-ySc-wE0=aO+DTN{282imM!#IB2_z}fsR*5 z{+w}Gm2*EuwbkB`bAf-zK=kM;@=o%}%Pal@W=?jh44f1T|Clq`%U+Zd(fMiH)AY7WIrrC7 zWQtcCg39Si%Ac6kc|ZjCos(?yX$Es_d?TW0sDpZmbQFTR@Xvod2a7015vaMUmV|iJ z^J3AYF@4;Bn~N*F0_o+&gQdh8-h+@rK7Cu{9q7v29s#)n9_$77I#YGzaHhqeY%tm6yru| zxz@i;?vz;D$OCan$7{8d6w4*GI@ydD)gi6m=V09zKsfR%H5!UvCMg5DcKq(j|07}u z0>XS20I4Vcqhpu_F~t?Q04;zt)|mo(*XhL2k#oNs$boT=6nJW^x?&cS6ZJhWD)9Gl z3=lDpJ#F`g@IdF2EIDAI6+(*U`Zr+aE4#ivP`&6qdyua17W0(zkKQ02vOLO|&?qWP1 z=g4Gs>(g!;zY^0RyZ-Z-+W+zv+2|%CAoh`dLVqUkYl@#Vg}AP8O#-X2PSp0}C!NB= z`Z3+>U8wo)2M8~@Y@RwC0IS!azx$xU=#1`a0p2bhfa5!}gx;*(Pvc;7R z6_p598>Gx3*NN$s2v8;G++5=b!b!o|Z?tE)^{3RyaR6o$g?;Y?&E#E@X{Au7H4)z} zX4hRnbrbGUXUtN{35NU!O%sf<`N3LAS=``KMN@*ZRu)E8RLz0JAl zJ>MC^(QI*MbaF|dR3teZ&wO{X*0!dEX_a)l*@KkK2j;p}h~Wdf%2_319-Z$pxgDrl z9h`-Ca}k72vuCL*tqEz1=R*7+ND+W;K^_yYp+z1JARTK}tJCmXFQkE9t1c8Mw*7UH zIy(#;WXzIDy=yq{qx-vyEt(m<0Rr;i4Nn|Hv2eU3%;&m~qp|PRg${GSA`81tM~p|` zrE?T3*qil07^I>AUKZr*b=3B>(7;fl&^egRX}3F1S-3U@OrGQo+;2O|<{l$Cj+~zQ z)Bg<#8>rsqGsxQ)D2_)J>be^1@he0v9#9{x^3GsP)&1G-h^W1{Z*}iXyLDbWOLQs& zj?~c>oUGS~BDaHL$ny1bsVPUc`_nRhF)WEOe+wmwJR#bJhnn;l02?w_!Zc@*++}rt zX>X@daZ?=Pj3tSLX?|737Z3Uxmc__k7f&X}eySu`k+saW{(^IZY z&GB8a(iL2HPKJDcZ6-ZembypRm4%Fp@VJ@*h3i;1z;_TxaJf6Am`P|hP{Mga1tboVsKd4$XP^USp9DCu^ z?852975{+k+D1VahFNGW_5%>FD#x`3sQ_|nfG3dS(Df51>f*}^6@*Zqlb3e(;{Txd zH5BT& zWo0&e^a_(|S&Ovl>DQ3z-K8>reuII50UDVdV5I|~M|1E+D+NZzfI?C74IlLfC+wFEMBn-0 zD%pOm8IDgbuJc>MduiNG2&W{(#N_kP9BbEhwVMtHAf)uzmGI^LIw{ZvkoB~y{@y-lVhiKwi~Ux4(%;{6~9woEv;H}E!K&2PP<>H@z|9{ z!qR{SBT{d@wtIR4m-zx@gov7bMe}z)XQJq9TXql{lCyJCL1hn9m;;KY*_G{A1lMHT zES@DseSz7Ik8AlzMtxG6h@mYaz~V{r={hLmwBMnfENCqf8N0NWeDwcwM11H4GZo}| z%F>%TRQsDWp zs2a6q4du>AN?IH!TMttHEG39`BETHp zHoM{-p4{LSSr^>~KY+9A-LRhrQt5E?jux%Sw1}DjPW)`4GANxFOcqS(Cis))3vCpw z29sPB>5KV4GlKnKEESdO^}f4r%F;Rlb{4VBht1K>G&%%(sN>Nrod@ma`JeSht7qk=Wx9S`Yd_Or92m8NV_$|ELm;N2N~6AqF2Au(7AiGHeKp}D!Uz3X zTc^=$s%S9Y1`z+1w#9d%2+|GU-6Fz`=72|x;Gz1DU#*i21{17y^@w4We8g%s6!Dxm z!@XRDRE4X$15N%sTDs~uOi#;o`$fzqNi-@lXZXQ=Kco7Nn0Ai8#WP>MjTGYc5nEZC z4y$N3_jow1M(xo46So|XR5UH%aTyOeo7oZh`kcY*!ZM=ODs4E>Dnf(?6aHc`t29mg->n4S0GC$KzsT{eLRjNO!eVZ0VH$!G<@LKExk z?BU^Y{Qxz0eY#@=w0E6PwG9j!bxboxXEB{UQd&HOUz7CkeG`i=ZZ@)Q@cz7?tt;d7 zd~w$y%YkjyfsV{R4*@IAy&j&+2m)MNn8gp4#AlwcnOzNeSBqx&b{IfD706#%<4#wb ziTMhz;pBFM*niZrvoAUs*9}S7{jAPKC{}>&E}a19 z)h9!(w}VmjL@60i`KCp;wnJB?wH$~ZPXlNzF@49-^*w{cyfj$ZJl8FEheyI4g=N@x zYMWjs-A~O?gJc~I)?T`7>y+%`DAL%X8a=35Hqh|D9z!&GwC}IZ=Ro39 zF}301zE%9Z;*7#q&FttRZ3Dt&)dDN1kEE|7J8C?X#L$1azt_vV*T4GmexZTqOCH*9 zvW}O7>Zs{Z0@mo$-_)RzkEAU_<7rxRCP%$-*JHC#`}@u$foE73s}KS9@>nW!IclgD z>qDun<)xH?_S(o%;{!*DJ{oVbO-o^4Cb_%TteFwjQl?A9DSRD@if%hPA=k`pp*UE1 ztz0I64t9D=mKTL?#b|zlJzG3eR{IMlY7>gNxHc6BV4x_YTKb24d;+|{x`Vz>VQ6vj z-PVEYQy`7ak}uR63jf1_+J}3Q%{shz2{pQ@6#d8qycv^#3Mz?qJmf^1okJD9xWdfu zxz%@Pq8MtJzkK z>s=zRn#Z8lUrKUg5if2@^)4ba!!2j!2w~~5?e0v>_$uvmr4tNXO$xCPOgUOs-KZJ_}t_vzA(cBkIBx<=}e?YwYb@|DnOUFW|5YevW6DiEU zqbUhyKJ=|t?wDXN1xH=|Rm~B~PfdjvhQD10&3tkFl6o{$_0M=@3Cjavx-B%j?*&x} zRd9W$MBnkD-YT$*zH|5#Em%)#gYuQoZ8Z5PA(yR!YZaJcahLT>XQajBgguR3Vfa}u zUaI}8yBWSa_D9{pg@x=?hYvv?pMq8yVd(To-bEqI9Sdq4I9H(h5DbsxrW;yB_|3Lv*4O^)$dOA<01HN4~em@ z;45mnH1;qqD8b;?RI$8=(Z^-;(EsfMxM0<7buC-xu@ccGAjG^luD4Pb6qSlc`XR2Z zn{)Bji6q2Gx6PDWY9mh75WbP$>EW}}qiUg;FVWE3W`lqmTPK^Wp91YTU)ie&S*@3R zHz_`Hz0?(KSv>;fv>2P`SPVKK;`wIQGr4NDF@3^1gGC4K7iwExTe6jM?VVdnzJ0Y) z%Zg3mU0|R(04Sd8jTJ2nD+}}#{Ee@hJ@O*JW35CYY*qb$^jRdx0?+zIH}s67RiFOB zNUq}b;EJLr6w8--u8cNVeWf`YqS5-rrLz=Gp>1tuOW}+iodm4hs+J`(Id>*#yq->L zME#35J@`@?v3O+5Y0vn^Z7~A$)9j&N3#~im z@ZHQBiXo=0YMiKMqQQ4~lzW0F&YaabyslLDa8nc{ZTu+!8(XuPRg1US|l_)cpT%GgB!GFJT@6cTd{07O|;;Q0~2)DlVk!#&x&_g&7AVznafZXe(;<#O1 z?e_im8&^kx1~xj!qLA9w>_Jn@K283bU29NvqJW&dR)zPveyiXPi9G-=i)TWfHPiFx zmQA2May;NCFBph>=C+Qh5A9w-3;+d{E+(-!8aCH4=*<h=OcE2Yp>tNvQ+)g3Eeomz!Q%E^kYVS zXKELci?f%s>1yqFT{qI+sAGHfc7gN&Rv6xf zvG$ek3K8{QZ?-sU9M%Bdt5Sumdrvu63d<1pZv1|C1Q*~D-Z8K@6r-*!k<51mL*Nj@ zc?zXrryueN0_wPqMj!G#%0)syJ6~j#x}k>bUAO_@<+D3d$ZdXtTlBv>*+B6y#hhhQ z2RyaN{qGPB@(xP2+K<>(i8IuCsMV;*0gut#i57VDwYDT{|9R33i|lJ5SNhf-t55U) zee@pqHvfL3zrN1tKZj?jDZSJ-3z9t;sftjxv%{9jf&gv^`|~qm~x4G zDNbS`J$Wu^kp$A4>^TT##SAvHe`4a5>aGzi*TsWMZ968UOCO4JQ;#!t@=KsD5>I3v z0s=?Qh@&kZZeuI__&f~x;W?ckLeqBZpJlflPyUo5V6%)I#bzqTV4pw4?3Lw`kPlv7 zds)#@raApGo6N)Ie>w2vzW^(E=Do1SrhoSV&qlZTQt#H-5kV8IINF!270=ioUb>JX z?&0Q6xMWw8su;7}{eq;&V%XO(H%DF>3(SdVoNL~OT->a{Pi=C>NJbtxS?lHl;>9F} zqjrn@h|%p$*pEWjcP}1o&O0#t$e@iX0%z2NsqjNJDybp1XLAfEk0-+CLiaeXPXRqy z0U@VANKtWSG0f$8Z!G=X|6>KwA9n9@-FL$b(#kJ*od}P9!;`JN{D3J1zDSje9r<-A z;S%eqCx_>^R`V8Ft(AI|t~r44OinIcT4Vs5i_GBWU+E0=SWc}nf&0_sw?6pWI<;nh z+>jAFUSvhf+??lUzc}^C+-P|sIE#D_uP^T%?`Pu<_9u5m+b4Rky*buc#ixU9o@VFe zX*^$mhdyfaqF82bj6U2zQ*0*WL{KGf7F=g9Tei?5fde#RlPy_ii1gMCI-=UV*^&wm zhhfWrJ|6D%pDRe&z1AZ_lx|=w5n#ZAbbrbW+c0{BRf1HV8i_nMqW`*F?KT~0{pQ*} zIvNT&g7T4#cDN*0mHfv<`x_r-O`RjbHSWVy&sBbb@)+^7^}chH4HN@qZ8%*t^-F~d`?qiciRWgv{I(=5$J&nvEAe}0E489&vK8O^SSv>| zIKu&Yv$>|EYv|^5TAk{h+Bw=aR@oZAhr2v*C)p_R0TNY>>j9oei~S4BKYMPj;r7jL zt@Oc!Z*xofopP0hQISF^!9Q*j=rO-!39szn1=H6h!W`rbCv%~VY@nl zGaE)`E>bO(y4wAM{ps(c&rfmMoAv(sv))N`58LL9x!%#C*d<~Bg!cCEuC?Jxes^k& z0+=Z~1`Hh?9jOGtinE0}aZWb*0w^+}U=F}K`FpwkLyW~`gp)_}+1A5d*(8UzVbTAw z0T2^GM>yKJ5%m{<=Ooy^0+nq<$3+Cli#HPKvsf@2q6`jjO~;`kM2QFuxoc9U8u-Kc z7REL~)oJYmX8-X-w+A7BTHVuiTAju0)Q*3Ed>$VkZEQc2xix)E@@;V?6noz#<#u<+ z$+O!)`LzCm|CPg*ss31u0O1($83s?$Vqeh^=;9fk&spmto^CcnaU~VA#FPgEyE}N7 z8sS)IkI)TTel!;Ss1`(8yd7I(F+TkKm2WfwAT0Vq)|%~nf6}2A?=SP?>{<;N%YPAN zFVoKBxerlT0=hE+7HfIaZKiI^{^D4&NSlwpjSbY+71oHYQ46KiO%1402O|nS4-b&q zpFd&Baj};x$Nyqjks46?j7ZQ;i(@mCA3-=Oc@EgYRO!`N#Ff5sIp#?Y+onEUAG~we zXG~0Ze0j#T)Tj^D%6`ocz7LNP)mV0zO%bqK-;*=04B$1Gf)?W(4CdO|@6EEV#@|){ zzJc|CD3@pcbHG?xwmFZIeLb1-egDGfZ^d*h2j~daI<0@z7#4bak?Jai-6jxjmWjvG z`LfWTxi9QX=4}d1mrkea?UZ3Eb+Tm0)BgcU z!gj_0Ijz~t2aLCF=)ZwUmz^d^*NxES{7^F0Hz0wQ?mv@mYS^1i&&SYoIa3eyv@O?) z?|(!>KSiDSKD?V-D^(A@WJ9JIC)-WOBLTHJnJY&$6N~ErVDI=-607Ma%aw-csP(RS zs1*~^anQdB*KE%6uhM(OZdU7&#rOBC=Hu#I0A55so%y4og_9?vVK+ID4S$efiJQpk z;G|={7hlVR3i}Y{+wAgm{p2sL(3j3vW4T3LGzv<71)npy9zz8yo@97`+=p#oAnc=eyCl$DWwvC?nveY7#@_H)@zV=V5^ zdQH^$66k;cy6y|Y&Ddn7dAUQxROb1`n86}SeRy((Lo=1ur#}|YOxT8lb_LixvPMFm zZoe@5ejCxqMmv()!p11q<=sPi?@@tBVcXDTHW5YQ(@QrjUk!^&kg@V3e?v;sh(yp9`DDL6C-LlNO?%pcc0?; zVz|&EvJMZ7DccG_!V2ZB(;R!L1l+Atekyz|3C$4aJMZ(Xgwb}nIx6(vSTI)7W$%b2 zpPGIp0)m=vb<02N0TKGR&l_5Z{|x)!KX~{{_Q5x1!N&`w2p_H7PVF>jzyM6yDUYsR zaviY!g&U>ZC-H!&owx!7A4Ow0R|k!aLSK(FW7%fxW&SlF$dJyDo*(xdvL|0SZLNX` zYqQ<2*wDkK9|K(ONy+s`lKvi4qD0ehjS!a=+ub|}v1kN(Ob^nnUi{U9%j-DN@*F^v zcL5-9kgj8`6)gMc^kuWD3j;E87FS?^>PR?T3Rb|FVve_Uox#v0sK@hTrM*K3)TKeV zWg+c#c(vnQA9J@}rcFJM)x`^dCAlkH`(XU>2kh-LU>T8m0mMD$uj&Yb9~~{s?g^91 z^m?B&nveQyyePb?DI%z`Z?glge~$fSBV=YzB+jLzL2BbI;3^5cL8!V|`KV|2Xit+$!t{fK6dtT#Hc; zzYt}U_CCO#d*Jxk6sRm~tdUgQ%d{9n7qT5yckY6C`Z2}Mygym2P_NIc28Wts~w&c zY%*vR{J1w;Zo(yNYHa`7r1i@0oO;n*U&cIif|*)?^Q$%PbLMirGUjRga)gGjZnJy1 zD+=C-c3lRaM_|m+me`GQa@a7!xz-%Rm4Uj^oauCM)VXkh>q?%x%^TG#u`uj;*4D{4 z(lsYsUd>V&PkTRokgi>d9jW8bDd8F3FfYLP6O<4FoZ8mszt(NrGI=b%J?|rTkn&Qt z9V}F}Ip1r9nIn4BIHKJ~ij_7gJb$xv`3kmDs{W+s?W6fq5%ui_GEe;yOU!=p#Hwz&2 zW~weUJC)C7IVRAkmL$$J5zSs?LPt{ATSBoz93~Q{*}Y*~%34UD2-rFk`8;2S zZ#_IR=5=yBj~6cH%HO)vx4YqGYG;WUJyP=DaJ)oC`0D`2@TZnk=M$0tXU7eNwRw5u zi)6%~w+sUgg@|QB|CwLa!dPAKANSZx=NqACa=_#P9Z|&Xb0c~0W^dL z-o|jZm~Lw{`FZbrvQZRR<(XJ6)jVn~cFBddNZt=Y8Qlzq23zROVNWMfC=sg@%Dz0`#|rzYgQsJRl0Q91p3sxj_kaohCk(s<(zh`>50wC zx@?|b@8!P8o;0JPY-7I+2X#&Vl~R<7>rDxc1v0}=ioVTMyi|%!B=TJ_&s_mdpAyg? zf4@5e6qY?VXOX}1{DBsRv84*Wldi`4W&7n*O;}f^V*_rfAovb$lj$F;x@fz*mnTPR zRs+x17w?2p?*Vo~F^g=#6fH#ZWxs>>Ve{1=P*(G%O4^dD_cDK3B8R|NcmX(mmu z=3QlQ!-1@M+-BJfv++F zVa`u?aUsSw?AJ3#tjtue4^ZjwzHE4>LiZ?{Z+pY2Y$v?(rjzjWb`gy`@=082E;b$? zF~})RyZYU0kJp~rZ>hcJZ^|%gkC@f8r=GZo=FgTJg@LftMt%+2aTLi*#M3h}-95Z} zE@jj^H~#N!;XLsO95CKMs)BmtCti~;Ae}Pu7>ti23j5f@l|#zUbF4iqtXevGLO&y1 zKYoaM+0P^n^H(oJ8*WTBgrA^CaEg~3WGiPmSa477#acmtny)|8lRfP($#1{*R1~aB zx3hD$1-&J@mHRINl8?&d@)qu`(=>bT6&nWe>k8GLATHX_l$;Pz$wBTDA3>Ej$;A|f zhn^1zz9#w(dEw2Jrq*T&7L|tFe;7m#cyIPlFH417gq@eh?_n8@J{p$ds5k!W54iNL zQ?ACls*6P0&8$T-JBg3K8x#U&7Wrwv-2?4RCY>d5p)A+goXUMlJ%`W&!-YYX&>l;| zo=vAQU{cU@Jhrrb&JA^j*Mt1plUzj!^!8rSqBi=8h%9Z^kmNZ+TU$J1H7t2-ajDaf_5MH1x24wb;B&-fYLrdqmPuYu@R5EB_e(_W$;n=2|n}nc);MFpzj^q zKLkptKV44HoTiiJ$6-=e%Ua9zEEeOu{Im}K$6H$892aK0>!!FMC}2)k9FInF!Ux$N z@_~M0KVpg@{~e$$1OL}Lp>yq4t8Y;$Xq?Qb$04iL5T#$0cxL7@06L_Qiw@PAA#A-2 zZmKJiwb>*m%9xl!oa5?ym?#vLHJfZI7S+d}J^mN-q6Y^Qr?5(%E!Fz>C6C3l1xmgE zq~^i+i}p2;3jB7z0d4Q=3&1=^!v4&II!`j6E%HG4#T^&z{@ycJow1X~_7b8U=Czja zW+ys|6kfJ$OovyR>Zqi<7c0eNsz_EfrT*G`$_Z)#XvF7k%bKKm8f)}v+@d(6X@5hE z#%g0D$=fpAOVhf*sbUw!>tMu_-*(&yyE}Ms>T3j5X@AY;AD)p_PeBAbv~aX`v%H@TCQc!Vsg)o&E?YD3J89I zX4ds1gvu|!}lJ-eYi|NJoWrv zH=ecRqS>ux;xIos;MS9?>ygSm17133Q?$b?)#t7$U)P$f<3`V4hHf^wuCu8waq~BaheY+&J5Nc*J%I<3F?*%irbmVSHr-{v+T5ToZcgfTg!MvtvS z@*7Zq?UBc_w0a!iT$_S`A2|lX-C#S?^M6_uftJo?W^VDxgC~%mjN2Fvm6_p@wiM6CV9rtyScu8pn3QMtmdN77t8Tg=xf2o3+nC6 z%8^Nmwg$q_q+R;!iu@!_8tNr233ko9k3Yr1DZ@@jHABUmHg_rNo(|9*C2+(ue~ z_|NZYG?JcB^}Q3{PHvP3u7|3ArFiY^5cuDJI)i}$iY8DPhc5Dx_~Xv@B&+fR;1 zk7x=IaXkFWZ0e!qMAh^Y{UrsL*kivbHrOGMU?s4$*UKf?3c9 z^&y;u8V8+NJE#suTbUqevjo|=)OGXFcGDH0YOO%pav=F*qgNU93jYv*#?7mJN7&eu z+K>)ljwsXc;-R3UpjK0B7(&h_&^k@xZYno8)Hw9Z`-`krFj?4))k3f$ltHUMg;4Dm<7_e0(e)QhakR}!-*bi^B`g#}m=v;l zLQM6C(+SOm(oQgrKC@vymeaL<>bHIM?5Mb>9=pa z5Uw_78+PIIUJ?bW702I(ruGT>FR_Yt`8B=Yqrs47+ zCO{D2{)(>KyfKjJc9>J@!yh6#R0Hm<#NxOI%Jw+MCjklX&@>ZxV0Xm7u_r?#jh&IGm?lD5g z0wFz_F8FHrNSpOPJA8gL+WtHuQG=4^%t@xO(OE7xM1A-~DIgw>hiGbtQqEvghg=9) zFOi6HEaH&Db2?fXY55IG$AmX2F5bat_Jz#4)Y_ft`O#4}-i=Swgf$%>l0%m1 zxFz<@6UHtgmUY|SD|nNTo0??Y)Yu(yofhFce62l{2|)c^(j*vo#%Ys4pUy9O@%f_c zn^OQ2`-RZ@EO^LU+WzRlNGg(}aPy(XE<^7VL%KI?ARgxj{N@MEABr~>v*|!8R!f)P z^TAIkM@l$;voPcXQ8sSvv5<*80;nPaZ{DBqh0`*qfA?_*$#{}~i5+bb+vacsu07ug z*Moq}_23c!w`s{L`M7iRQ^P%42)DVSI;}93uYuSlF9Cgy<;t0FBA_U0x3!1$M5w? zXf?QGu?9v{DbP$fRH5$F?|+<_lzp|u&|Z;Wj*vgAa~Jm5wP-9*r`po@cfP=^RKN7J z#hZcpNEvLr$Ce?JNF*!Jw*sSBZRj=9!*+A&3u(oCo(OBpU-P`KR+ZV6hQ$j9sKcJX zUw8p1Z3+lq-2rR3Ugd`yv~G3OG8$*-&Wh|B5WKwbRag_D2**=cllsf@@;a2lUOs>g ztVQcf0zOivT=|T2Yj3U*rZVkjF$%fV?+ZV%%CXT-&QpEq_^$pcZNg8KrN>O~Koi*j z6SJGHJlLTK0**AyNKbcAKMR>=FSOPJhaMfgEDHh$mHdAWsyJ{^W1oBulxH4zv1`fr z4TAB*$vnIn8T)#=Y|(GW;4YBnoPo9#+oKf0*9&j8auS=_qnN1YXh4i6gNaAqpLV0P zOLz%ye!tf8N)pm`GN9d`ZJ!j(TA+ULA-3GB;}GAkD*4BI#*auQbkTn_K*i*GSviMY zL{mZ0WP96{wL(PetxEaXf}S#ZAu%$ZgEhzV0gXS;LjKv#4K&@GS5B zSuu8Lxre3YHJJEC4aBui({*Bj6b@in>f+_2ot!f2A2w(#U$Keik9d$>RE+R8_@{}y zywjcbb#t?*z!PuX)+uyp_G zi5Ha9d3K(w>@gpX-!SfPjDGfO%N9M7nv4WF0U;;BmpYh2KEuE$iF(nJYz9Vy&ypif zgk+5b(R-vv?@_($&%$8yT=CwQW9`ND!jEE*OQF#Q5)qTnPW*Y`gO*q$R96|EetH=d zX`_>z$Pz6CeBd9FPN}8srNM}j1hhw$Gv&Ivrtin_{J61$3_9+B{nBxE42ih+D$>8M zWS>w>$4lO&WKU5VT_-940R@g}Y2CCe>xr;*ja85D5bJB(u_{769B-$hi*Hc6V?JXS z;|BqOvxht^@_z!DRJS-y>U>tD$CtP0L4*g9Q(%h+YrFBF*k{9_vGARS# zwp*Q~svmn-iGrj>+%trYI;pQ68-g_@QP$>hD{1)e<#2$|q8L`Mp5AM-e~o0`;B^l# z_u~tf2ZB+*KG&^@$zV?4D%29|3a#33)Ue3|nkCuRBX}^G+vvpVtnO=mTR{zYOUW$g zG;7xD8|?Pv&bD=!A90tBZT zH^dgC=MQN%i~-a270)F*EB<^iuCwf=`~QQTn~Bh{uP3!D;-6y?=0m(L_{rV0BBB2m zd;D(?S0^$|9Vo1k25q;y$?2E1Y&X{_yCW@W;r-!>xRbxY%@Q)YGjz5gku-d?9|~B+ zl8LwsTTtUDaW2)DT&L>+cEgs&LA?lYIrVKrY~GM)TOeY5=FNen{``lzqTmfKPX<1Z zvjXvAxlU`*GW4H>i;E5MHXMs9kF=FTMf{J>1s!KM2H4EjEQlxXa`mzByqEy*Y7WHz z#F*}k{u+I}8nA7N1D+Kiw7CrDUGX>J)Tj11U!+|JmaNf=!L};>|LR|sKY0UAOJ&X7 zI$q7|b{i0S46j={%(mt3DQNi+N`rH-*@(%r%XSIi({FM~#~Z7-j3L)VV;Q`=r(0XS z)p8~+;H%&=5RWv#rF^&Xk9gq+#PFW{HUVA8-BkHb(YcoE?=v>KB=)#Pri7ze+I{Hn zv(xc_DO7pR))eNKWJ%_}ks4Qg>F%bprVYc|8%*NOZw-Tf^J z&tL+&s-{4he;0Q&=tJ4TuD1e7 z&aV&TzMcogvNR^x($`>s*5)7aVZdD+)5RP=))I~51CKr4%|G3sUo9E4wTZ@ErRZdU z)ejQ>bDJB{Hg|y=Vj!kH%K!cJuY(2Dr=?r<=^n|^5fEXbL8yqXRb~JG(?|1`%!Z$A zcCt2k@Cl>sbry=@R^R6iTI>xpjRM`?-e%L&nNI*ls(td1gvAyx!(jU4CA_M73Y|{d zT8`z_yU5wMx)4hG#gQ@YL+90No9nR~WESHEHjh%}zL(r73KYi4C*prqU1i#h?OPS3 zvu^o1!W@k+&yP-Ka}7+H$9<@0E^32nguO&l zA6uqM^+JB=>2G`pFyXn2W>T%R4LRRq_=)hMgl(=6iP$8+>W23Z!?5m)Gw%2mb1v z1$>Uirw<;pdV?{;Ri z_MB$-jrk0N+5bKyEuDeI{8T~9bb$yyd?uc^>twgyPtsXpzT2K+ZK^ixlVVo~H}JU^ zk`1(hSnKzNHdH|rrsadtqduOdewP2Lm-3T@#zc}M*Yy9=veSGc`}phrN4a1f@H7yV z-V*L#OZF&oAqxna=OLiGAdjZ=D^2y{&>c8! zj*hK^pFhsPdGkp*MR+c4_x)e}z}*$Ud{2@?q>KIUvzFj-6T)Qjc8E&3Ad>teP@T%# zi{Mossk(h9^QWffd05Ke8nFBT8;xMWud> zqRG4hpi1R1^oUa3>oln!0~e|<*`m_$H8t~+L+?chyrCMs{sSkyVQhT1o(|3J+?_CDB+zU)C04hn z-g-{OY`~iX_uZEgQez>9b!0b+Hv2V61M*Dsrurh;9TkdvgMeqOj}cL<;nz(u00Y8b z&v$UZ#d{RIemmdXm)pxsGWtM0e#|U=_bVj$qPf2O&{hz1c|x8Le?Yx}vNk#?|988` z$M^S%0zt{uQe_2Dgl%NI9J&3a1@!cVcGG5KSBGI4j+9VYz)ujwmoi{u*)#8-h+&mO zzKHn#cJA`t#%%0=z9+coewM0KY60(MipJUVt#0@Y?KDNQbulg6J)%DT$Ra3&=KqlU zAxw2OXJhy3H^ZzNK=Pch(Pnt#P@6Su<4;>+M75W3lk#Q_G!Mmxso{1U;?S2^a;` z_S?f`A$agzqt2V&tQpnJHW!5~smXUxjuOMh@4V&u3MZ0y>VBi9QeK<=!-DlDwRfL^ zM)q=#9QK~~e&dtr48J0wq3>o>wW$4i>f6uD$MHKKC#Q(U0C<(!2Z!kw@roVl%fhn- z@(AwPWPZSYrb&P_Qz#LDIt_F+n_Pw}URZUMU^pLLaK66i``kLO>=c0gzL3&#|8ysN zTWaYC_a&1P(p_)A%gpgRkx{UYtlvMw(42*F zM}m&nLotY`WtzOKN(>)Cmd@zmoMs3Z~WqAu}^pzfo(a<^6M`)Px8+T zc=U^X=eE{WGiP-D^mmpf=pp*H{H3>^espXd=Un=Z&C8hzQ6Laq+;{%U-jh_p^- z0V?G8dVwpcJ7))ugtd zRu1)Q@z}WO8fCs73eme(Ag*F3e~`gM{;w7PFA%W!>ABJMR``*#5c9EoL(Uzn-9El@{^_#M3Sr$*KXDL_(^uN>tFc$foucNdNXp18SY6rO+EotO(uY2 zmB4GOqww2K;?lBU2Z(1>SSLuRif*|;{*Qyc5ch3 z%E{?G7rytySL*2z?Y|0wPa}%osuZ$2wuGqSg!F*w15M(Pd+)t(hC>7LtD=U<$B1KY zt8V2&3kn#OvqUrY=gQfuHdiFj0enp`a53q@xB?o4r(f1+DbG(Gw_O zHz^h8swAqa44NzW7p5gy>S(Al?!I9wX=j#Ii%N~v#5({iMR$4s0y1d8}s6R~kTrHBO#h5W^sLdX@Ghxy# zoeJ?=X8Nxu=pN;_l2N9;MsxFsQHL$8(&_s2bjUrlcfPgVJ%O275#b(e?|`<28mCHA9KSLGkrqmHU|bl|HnCdgsUlx984 zI(GQ4<7sr*R3DC|U5gS%7@8aSoT1KEM+P7_eEKPsE*GK}^W)J9`T#VJddRc@u2k%Gn!@h>H!k>FtM})2}a%{=9)Gj9*_l^US znZ6t}{6JRC*TZC+El&6lbELqaSa|+XS*M+KacueAa^5cgO8L)ZXRRlb=TX+zsZGV+ah+XRG=ctenjXPg0Bj*)~~O{QJC5xy~xvc*gKX5-}msL=4e#EOU$aw?8O) zjVvHog|u@$ysxHB?F+YS2|6L22l5?2rn|jGvnPoN+pxx*!Og|gE%HhZB1-w-K|QB= zP74s|at9NcH2~0ek&(o`XgiglbExaVyaYL)UEWkdWQ1o02$_7H&NN3(;WwT7o9GuM zWDl)gTd(@eHrVPck^2n0M~Rtd{z$*l5lpthe&ro7hsINB(B%A8rNmS%oN$L4m-!3J zzoPvXfbIOpFk(kk^3Fg{buJ7-jyQ_SXm?pRahjTfm^yn>LY~taGWQC@|p$ zKqmr?%P&QkzR5oz87-!6sPs!`jhP^2ZlQGQDwBEr0p1?LdY18~kiVp( z5%VH>48x3VsQ}acYLip>rfV1VdN8OO*`Jc?n0nSH;dr4glZy)xsva5qaE(&ZXDIC3 z2`_wx8hzoX{?(_XKRj@I#$s?Atcm&cxNKw^s;_#4N)^~tF~0(}6VNRC8*}Y8m?b(a zW8?a>dD19VH?>FMIY>JLtW%w^;6^1K!Y2Cb8khk7O0J~Ox8022kbi6l3ejwBT+P5# z)RSBMm}s@v>not94dex1^I4;m>IkRxE%itS0x3bq2v<;kfl6|$=Zkj$z^we9z`D=! z!Wq90aC5Q&ciO@6aXzO7igkoyccGYEt`chdeAc6p87<~!XYgxfm|Q$+30T5?pve$H zk|4>ILx#|;O@e5${2ya2p#SBBsC!(f^H#9&XzTSZQ4n!st5=8fYk>;p>km1tIlwTd zUTrZ_DDH>2Z__*(^pc3yMpRVP8NglqUUR|5P zhSvVNGFDGMse6~F!$O9UO2q)vgL$sIP~j16%T*a^GwI`PAgx zE5pubx0DT_IX@k*1amO%W3ST1-=G(a_C4cOOuHmHA1$SN&VA<*+MLhv{@^HXxzFd7 ztJw7Yx)Jw>P)n%dB`aqd%#ncmy5R0|zXgBuJ(2X<_nS5o1gJ$u|-;B+QyC>hVp3@pm5+^!C$+dh@3F zfUB?$+V^xcBsxHjL}FNTp-g~fDZ5DewDAG#*snb@)&diDkT<@R>)V$VeX`-e_RljM z6pvk1&|yHgK#8cfyWzh`1VoZrXL=WT?xaFbR`|j-vUsvdzd#ZhVW~V|DUy^4->Ijy zCr~e>*{vHwSY-|O2q)mnlq|!X`+2ikmIP#F#$#6gCo2P_(NKDFb@j1{VfGq8u+Xf@ z6E2t`=HyQ>{iAlL=RN0HBe;NpW%ict3@Cn->5IyWpha11uJKU{QEoPcUPryOU-khs zah2UJkLAg5waZJ^eD*3(x_wI+3K|c*J{zE%I+Y__A1&npVMNX|AB=L|r|74^R}n}I zcduE3rw=cU9roQGT9SGrcNMQeKfbZx<06tQ0BUJL6VXfczu`QMLOuYzoZ!V$yOdEP zP7zRj?1=bI*hjr;WkS)S&>=U<%!hS4d@SrGFJ`H_l`@L@E74CxXJUaWLQ=Yx*LJ~v zokjEg&IOv8NF6YlPXQQhtL@>`hshgD8xRaT5aI(cSLvJRuCc{ekEAyG#tG#wrOJf9 z<%AQ50>DSuUtuK0ubg8vfr!NM{0gWzluD+(KXPC`_mRMVkHsWQ8OkG1G7y&0m@CsS z0&s~-PSQQ$M2(u(3GdVjr9!Y1L_A%AUSpv<53sBjacekrDo{_BBIZ8SAye4hF<$V* z&SAA3Azs+wyXg0XPnhTu?Ck6=NnMjCgdBED{Qua-U?+_G!+25CFQR;c2ee;aci!uv z_huM$C*CEcw^)`feHaG2akiN&=97>@#8z)DPnVdSkEYd%AN^Gl-fJq*OzE44+0{z9 zW_~vp;dSAE391RO?=#_pB)C64G)@W3S=TZMjDx& zV@`**Rq0?%zqEw{29{VKQVBweBocEI-beBgF{vf_fG1*g-6HPa{Y&i}!7ll{Z%(4t zdsX|F+wZF(C%8BtpL|VD*MorAPM4X(_x`yi+4Nh3L-NUNHH{+E>iy5PDyHGxa)B1L zL-rjT(3jGs4NuYe$VLYg_L&FQWbLzNM<2|Wb(#nDaQr%s^ShY{;s|3k)Xr?9fa1N1_n;2} zr7{u355#UqO9S$*xQF(v?8t3Ifij-JQ33paTtyGOGVpsA zRcP~{q*4RA-(lJuu3qY~sDo^yiLk{M79P#p1Oi_C6xfO1;WcpvM9&%YX=eaj;T!;W z)hn=g+=eEC52S?U)`Ld`Cw+)R-sFicC15%}cBXC!&ofF!$1Mz*6~_W=_VlU+&$jd7{Kb zSfpQ_U2tf{zJ`C^KSE~q28?J6wagy`H@PN#l%h-Y_n_@2%F1mjv8J)o-Fp{11wa7U zPTI*WneXy=0@51*X6Wsoxjc%oOrAX)ho61FeJ)nn{&ly&AnH@ywI4M;Tax|}?&vH& zfcV|N(La{LAJ=qAUNoH?wo13kVnQJjMtR7n@bew5h{w0k5zmbkr@^11Ur&U{hF_!e zd>=zSbRY2dG=n{o%Av1OJOLFb~W|3Yk$6sF$@pd3>EK%kYg8=oSpf1nTc@1y|*U zkTn!V(8kLr;86oAW;87s+4uU*uCJ=jT6c3Jkk34C&$j#{T>2eLP8F13{tsu#Z8g>x zvlyEKWw1&W00LKPk1gkd?6%O5lJ((hoQss|>$lAjVq-k|u7AxPM+J7~F_3}wc57{6 zAO3SFT@7HsA3tb~ zcJn1|_;~%K{~Ho1Jnvi5%KI7S)nD8?Ge?jz@2&~M?<&nn{MPR=5U2u)d`0e+DUm)$ zE4)ZdM05+5QBjHVeV)-WOuSyfR9aw|0@$37$G?>d-{z_Pj)6Z#%(vutkcEdH@ev{O zCVn1#aG~b{&wObf)^E{FvU?ZHn%_L&B$ZSk`38aAPvF@>%Nhfs*tw%n?i$3E?G$@) ztp3*1?ZUR6eb8$xgY4G4AI+(*(#w?CM}Nrh`A%zZt~`t4O~d7^?j!d@%i+zC;S=rs zL3Q*vov`A-7a%tK>7SKNEkMe(?dQ61lvaC-y@j{NH+ z@IE0E`rY7upEjue)=1yu5c{;&&9@oWwoJbqY2BCEa#huRzD1H@hp}wM19LqzKbuE# z16^;p!OmoFZ`pm#S!cp{qRekI`1F~`l^% z3{QJ+rRu2OwcYhZZ>dX3n*Pm5PG-;Yxv5)ng3nYzI)u4H+mvU^42(_UuH)0wjTDDe zGMdTY2VB~17Ks$^dJ;b%79Fg&WJ2~+ubM2#l27|mQw~%@ca*#=8IZ(36JS1l*{EA) zu5Nu?IAYUn>Q8&);MsKpo;l1X&mJwL*pXUHp$ zT0ZrAFRYs_Y^rXZMX0(pH>u$I9gB=OaR3y%DEwt;7vM4mh!h9J;jcz60da%^%#O6-nDb<2>0kT4Xj3XiSW-={KQ&Db~_+ z2m(sbQguaM*WRZ+B(sP2u%|0NYqe2#JV}8Db6|sZ@1u8qy5k_zRmyz`ieTQuVLcjy z+nUEp>?tk~5C>Le)yp(cqS~as{od@kv|>tuvzJ+7bQ^`~$p}`34A}lQKYXOP76pG5 z&<@sGXqL_PJ%7TUF|SS)7VrSM-AC=y&jOp3Z5fMJu^-!>AImR8=Rb#Cj^F!Tzjls+ z;rQs1#-9!9mbbmL%y4XY@OWUl5?SypDXZ6nA%+&WqEuMP>HYz~+Oz9{%?AHTh7z1n z!iiZ%V*#D(eu{tF`#R8>Q7^{LnAVD65SYmhJsF{xLq!Fd?<0s&dc8lhbDx%dO^cJs z_Sj*&wO@JdzWR832DdC2OlcO2q4b4F3F7JqVImvHB<70JU#MjUu9*QJbBSB-gFcPaSvj@;6s zm@k{Dd`9tCai?)>>2O{)B)<|ta_FYn6(S!x@NK=xe7JhZ8bmj(6n&yKe4}xeb#;zu zv$XweX%f~98fb{NnW}4Xi3fPi4q``rB6<4vDrXNG(V*gK_|sYTl5d}jUP=6wwJ^860SijjH)Y)|Z+Upgu2D0Ms=wE0dypO#dz78^csi*3HGKP= zRXHy9xycnye}Ckb?V@|kZpB+MIW{s9-^!3=UC5E+=h}L^fO&;mxjObdqZwyw;VwjG zzCPG?)+rX5cHdPR;-~22n?dscuAK9Cc{+~Vy7jJqmLC;*B;CM&;_h&df=IZYiZgY)Ay98+B@G?|_s&M+H_rd2K1y~Hf3I#Xck z$H`Q}myI97^lkZX7Qioy$DzwMzIc_DvmMX%FvbI+8nK*Az7!VSPoIpIzf2`W!+eLH zT=l)DS}vEWg5B@b&xPGt=RDJPN|_X!E=LqsDBSbL(&XNslob|Mz{#p)ohY^&fDfhzxV|xKCtEVriLh$E@-=|O)y|(LP7)%EmLRWgr3T$-ed$3z* zIn3m-PNGmUHxY>M(Leawe+FQPJgXNDt;<_xRlGvd$vKR^D+93e7>b7OKNLd@sD}C? z1zA2#2aP+KPaY}uDy`$Bhfi1zcH;8{9*AKslX~d|6@PCZuXHBQpgiW4cf2!;ZHF9> zx~q?j8S@EAN&T#I4>|_Ex2f4vCrfMeeyzc$B#8@n{A+;(fm^?#MXTarr1g$md&?O9 zI}DPv6rPj($$FO7GxbJ=nlS%Ut58qisXi^6S=k1jYEow0@ZB-rC#P=AO?~{)&Vdw8 z(?I&qyWlld>T=&lxS{S!fG|{p0iN;hLa*{*V5`-{@54gy0QlCyAT+Rlrt%EM3_A@YQTeZ)(giA6Yl|Fxh0FD7HW7QaZ!6z&o?256{K$lkk!(;*VwGc2@!yY zKJ*nr=$Dc_cJA5r;x8=m>$A3biuIXKpdH+?YOT*RI@+;JNHmc|S8bGSuA^Fr*%K^cmxj>eN$I|FcsJKD3SCip)2`S)ASR+~*92FiA@(lB)y;5ix~WSx za1RJGeAd+lGw^WW`Fn=y-d9zr!Hy4l8vFGDAOA{vljaZQ5fYby8N?w>A> z*UN5}gq24NGFYH~9@${LD3hC~kU>ty^!=(`!H>w>sXGH0V`O8x8cEJ@@|92NC#=cu zJ6MHaA)xfhX4m&?)_-A(;j6y0bsGS~ePLoJ=8|-sdQ7MxT z0qL;vNDwdpA))p=M;RX&yG3?9N#-s}b!w|%#h`XNL*K{RH!jm^Y{kAK-6wz&Ip0H1 zOiZj(KNaKDP}ql3Ptfne2#_bMj@JI}EEVClc4U<35jV)Pv1(fTv}F{whSjjyQ4-z- z!hRVedGd!&);*m~;b2wz6tQR%qWiXYRq^(wG-L47>8i_+VaB zj?9}uLLI;Gk7FV9op5#nUTw%+*LR1D-3hZqU|QEenb#p^UsayNU@PIpki3(IW34s^j+!V)tMDvEMo%r zzW`bL$_$Jo)Uu7U(WL^cqd_it!X2e1@&)4m`h4foeMkAANb9e71$#Or*1cdM06+F^ zjMfpa<+$K!cz5g_$$f6II(GbHZpLAD)7+mS?i0)f!nZ~lb);Rk0z7Y70-K? zb-k6SjHafMLTi)WYDkbg;xRDZ@(Qb z3W*GWQ;~jvy?g~9J16-hZT(9yc7!u!1Q>|JfACGm|!G2 zjfu?7j)?zp!aE-6%5&`Zg=&qfutccS*u6+@(jh{-oBcd}SQ@Z15VoI&6XxT&H8i;F zFxSDjyeYUAan!b<<3@h)TJiG!xJ~SxvKzjX5kOi!o7Pa(R^NzIWIJu1@ZPU{X5{{@ zngoA7i^(`h1s$Fy{xn@lPA_b|jrrv5q$<4uDsJV8x}X1aiEomIp;;S-NiGRl%#jKS z9toE}-k-JJu?2BKtW4G!j=f^Gpw(wbLuO|K@r;YY5aiw)Jk?K1O-^Tz*R3%sWwvZ0OFEZa zZ{Gj>l7&8O&?u96|1C`TdP`l~#3X-?A!fTVsMPDNM~5BPGATU|;VJ%wfW?mwpZ^Ru zl+rH_dBGJ3z7gWDFvlEHkyd4({O#(t6FYAg+1x?9i1X7A=yPq^Qm@TWhT4~(oQ_7* zDB9xdl+{58D2tiblRHpRUx(rhurT=y?{PrI#+|VrqIdx(&_q&HW3nHN^ z*<(hJb4tw9VTnR47Zl;{hZaOh+ewws(c!$-{Kb-Yn(aLWrqSEz#>4N# zyVr9w2$?q!%v3e$ddLg9zHB^mT)LsQrYY4hG zBbEm~QYPpW%T91DBX~eGP$NLE?dc9~E63v|{Feu*S*6JZXNfL3DV%2vZ;Zgd^|ZOn z#C}(wn##LUnIm0Z>v`LrA#fC!S@&E^i?vGDMupFne1F>Ip(3CYLD?iL#GW6{>(uqS zvpg3j&N`o4(a(eve{!SFtxxe~&HUQeeL+4`|AIL3Yw;KFw3EimxTI0Irne&Ya&TdC zAl=CWU{dV0!&GhVh(B{eHC6_sU*Gb~p zUC+bXWKOHXs?1c#SkF7WVuwy#;~;F#BV@1&n;wSVFjGM~EwJaV1{Q743z!+DxV&K; ztxz%UwFD=l&0XYZRp2Ag%oErx^|?g7*nl_NhhyQ!v}OaFodV_}hg?1tWGF9_C@gl1 z*6d}*GEIkJ=3y{IJHXSFU_CqHrfL_K(3VqRndYENd}_ZrlbER9=w1BzCwTj1H8%ZX zYK;B;v&Evhb6b>S?~};Zk64kZUU2GI_s>o=2#exK+@7M}jXAF<)#YB?oPp;L$7Qas zU4|iiJ$<2Boyvo7NMK_iUPs!inruVBw+6b3VNhPZ0#m){QrK8462eyJj)t-aG5-agX-In`@(r!-}nnWQEf2N4_Ji zM}niQRrcKo+H6tJqY)JJENHPlogFUxRwi#ge(ZB&iMZztX0iMf57xQ(=}}qc=1;B~ zdg->}-lw96qg61J)_(_#5a%T1yk|zwuJ8GvV|Z;lz;B{-fqn#o^5^4c@foj-SPU9Y0&^r>xd}1GYytT91Yjq&f#~ytlfY>=0juArLnj_h zhM-H8b-&XJfdJaa&_Kj4U{&$`0Am&l?i|G0#GmmvkMw;<0reXWIHS>!b7>CL)r2B|3wF`~U> zt;%Gz*g8P~k3F%FM3}?DUY!_#FrrY|BeIt(0eXVoMS0QWY53e`J^NS^I5~fmAfq|D zciG1Y&NW(kd+NMcTRNaM7iYJgJa3Q1X5=t>z-+O{f%LK>eYF$0URy>+h6}Lk7CBcf zqW@}?{i0@fn2WtSTwEljb!Ne`A1C7sc&|Q;qg$By%rsimj8MB5BSaiA+7lOYr=Gz+ zE2!bU0)kInp*;Kaoh$Xk^UdDJ?t3YpKK(wLleJa{)WN%tMpQVB!0OM$s?u)0E-|7* zkH;-bvn~&qw6et|!4#C&m>LB~@wB0~H?bjBZ=`s>pMfrPk>vVH4J9y|hrUwz|4WssGI@lkux2c$rhrrr7gsH2prnHn3xjsgN}3AhN3J41_S0 z2?-bdPOXV>5o7|SOW`a~8$GiyeD{=l zOqjq)6T~FJFSIPl4td(s2;(DHN!dIL5qprJMp-B7EbuxTU^I@H@aZk7SamzZNaGBX z?o|3-A3u=g`?lWjUbjB&tagS^_4)T5F_weWET0?w1zES=w)Y20iM~w8I>uiBg;UzI zi#ai{K+NJshmlk{D&mqSyr+OOQ_p1R^|rpKH6QaRVPoQPS;`23a4@xJ3nGnOj18?1Th~eT$;@M7pZ}uj5IGkMX(#@Hy zFPp)w>*Wz0u?vCP54aZKmdhlUkzr}IW68o_pVst=nYCD69#Jrox;OUm)sB-t&zeYfx*M=n9P(J-Gyf%1bWrjFHf=YmhF(X1-Jtb|bRc zxgWK7(<~~{VleI^8kW#m%afsU2rjXcW}=Xk|4s@4v6}$3IRlSg67sqH{BNaqUxeab zX~P=0?npW#SJ_@4zuVPdL0pTJO&VfvTZ)6Mt!U|skcvy>&D5nkPs+1-M)JHB%~3+n zP^ZHG19S+yJ=HEtMeSsUcWmT52F%#e4*>-BJe*F6B8BrE0}XRX8tt(*mI zI~H!%yi1@Zh$}fCf6RM@>;O0NpWm8tIYbvPk*WxwgZLL~9YLI;I(+>jt&f#eotryi z;d)krVTL6`Xjr61Vmh>b3Y4g1OFx(_R5=%Ocys9dzpK_NLKj9;4>)@Sw2^C}|2eq- z&hH0vQ-)~ zXDo<^zHGCH{S+GB_e6ns6+`b!OcXp0{_^1|Fczt=9 z>{)Yeonav_#zvgaANa|*s8947Kj@Q_?0lW9?}`#u@o}0__6a?1*uZuNRLsZw0lQTc zz{lWQc8)iy^#T6wi{5(0a|}us7BK`ON3*4A;(EV)LASxZ(}5q}9=+}6vmSJ$6#ld9 z2J?0$AEFi3h{3)MJrP2P#TJ0<7Zohb@3T?h5I=mWv7Q^nrvmJd?A6K4V*|N6d&QEx za1r+u-<=G%Oc;Ixpp-enMT!{j$N7B9fp<7_@^9>P1M6e_!7VKPx`C}slQbZ=2M#CO^HS~|tW zDs5>t_e=#C?_NkqG@!9O?=muB7VR*!K;S2Kzv5%?EYlr=Yx@JHL+0mwpC2FpT8Z8N zP;*cQO*@U61ztU$CHoD?xGg~%KhK&xru8>|+fBOy8ynkL-Sx^C83o#%EJ@~a`bF@( z<$HW5NG6Zo7p6tRVu~T=*onb7&%pqMm7_j)>$K;RuMsnZ&Jr^w%L{{=CmDMu+vb8= zA2lyo*IPytkY35Kr~Z(kTJOIEopi;sAEy9Ya`0AKP6mg}6j=VcYjsP-CX711@!B$R4tuXX-jFlHzyyr5DB+1*J;PplR2Ls^ zWx$@Bp?lw9g}!$R!A+_3s(!ffi{2_>Y>nxGD?Z4ahll5TKTAv+uNr;*TNGiowVP)k z#xHO49XR*QH~!V}J@ERWF{ z@O<1G=X1-A@V&MaxXN>|o5TLVRgqF+)EGdCA57z%3b@;~Z`=r)mK%Yd4C6J-w{7OJ zCxT=s6i;->R_{n9l}l!}MB6k>Bsq(R^IhdZm$y(GQjC2JVJz`FD43wcf^4`uE(3$- zA(MaY0jpu1fqlk*MkJteG(7x{W_Ho#q~}g)>lS*~d(1bMsGy8|2f45R^DQ;b|0%Ni z;do}}k+Dm;8Z&uSRmlhmE{uBRzLILy=@bqJ=`C0F=00RSKNLjbk?V+X>P|2-w4Dr} zKMu$L9H)QyY(G#D2){Vj3EL<1h`1eQ??DwLG%r@8JJ^uku#MlV-k%t?sYlpUmr|hCXP6GJ^ zL(?2F=T1HfZ>rvqT{?o|2_Xj0iN3YbW%m#wZ!YJEI zgOcjm=;$?>$A<1%Z$0zFcU&syC8ei)b1Tx@If$W}JQd<9=4k2luI&;Wje*@>?5bst z_10+RXQ1lQy}5D&G@86Bd3P5@xbBgkZ*(s9^i5uc69 z>K+71iegdCYv>SeGI*^;kRUI$uUl}(iTg-|n@)rmi^{p`4bnC=n)3ScTZ^L_R_Fj_ z-?J#c#BtJ8tP)(>uYkijn65Hv=sH`U(JMQ3h1xkqoF$YpL} zq~6;aF#WBi4vlu^Zd+uIHF)^ZXN5)k=21^El#?HQV7mm`;%>TZ@rffYTMQbv{z37_os8|!Wz>Q>m%o9`lh&tIxiH;r z(}cNaex{|v%>#2M_n#n$#O=d=D?ceEv0c3iG^O8pg@D^gG!f0AhIRI43>RyXyAheM z)(#6J`X#70cn~na56-bAz7w&nXj5aQP;Vo1Aw+21X_idW;IcLC(hAMvd>uW<4D(wN zpO;vP{h0NTM|0U1Np9JNs}B3;hLdyuY$fBOh;U=X^#F}}2DwJ^mARc#`)N{L(!@|c z-_5lhNFze$-qNl198Uw{wP~@tXQv|9)y3(+=RNk~yX_F#-;u$&;9ZQ$G4~@Hb?UBh z`f?+AOn0<_D7Y0vDqeUf%;N$%nrU~B-RBfj$N?D>BK%SOd;HE?2vuEEfAb##hV>IW za(U`~>dm{WLvk|*_y~@CNFwV>G6W5qac`DS*qYzl6G$HX0k7FfqUM`78La=#01H6h z2pH#*QGSs`am;AgLYEef=aE8rG(IC03}t-j9DlbL0TGi)2~?)~2q6f){DjjQ$R;Uy zfAcBY(u>(+0OW&n(`w%NCSE85&3K_bjxC4kVn6>XU{A8?boPD(C(Ue$E(=xPD5tWJ zZGTqkXTGtz<9L`eBf1~)p^$>a;#HMP4;Pbm^&n-}#8g9wSmJfLusW*C>7ytLQFzr< z_JB*7av1EL8^25l(GDR((F}0K{_mT1yks`a;Pz}JU0u7{F2q5;QKL1z5)>s&?SS}g zSqCQjg4)FIwfxJMFOlmZU_qIB;k^CFRnqlH#!Sb8%0#mIwCvGm9G{Y9Ipw$kfMuwm-cVd&etdZMAE z8`WS<_dm#R`#Z*^e#y-VG2>-rs((wGtoIbC_+1Y`icM3Dl6t0>!E?krD?i6EaJO-n za}d8`8>2U3-?mE=%Do5`XXsiT2s)Rz*!WG1_-5{+Al*^nlO`yW4s`90z*&IT&&x0- zo={@Y0#v0i*rty!Y5j`b(L3#p4Q1q#T2$fndCoW}S)f_Ua=`IrAn=k4!_{pstSMawhvmHHEtad3gA~802hj6sNq;UHV zy5j^P=3p~LlUwgpMRzE%kntZOtJB0m7LQtf%iX4#+$}^d`kROyTV~QF?xG{_rG;gP zG=SHEZB2DKT{tnxuvA(;L#HC><6XoRhsSrj)xjhQ#l%g#c287f%)D>{20p##WVEfk ztn{ro^(F(7T(Ge0`w6^?yDPtP-}{9NaKP1(#a(?jD%Nry(dcWzPgE`Ru}($^is&dT zqcpXtH(O8^qrZ{lRxBsGB$YS4uPNY;{Yd?eEN3YRbwJTpv0)@j40h!<+8O;6i}AH# z&_$9m6m7j-GQ8U2jLgn@k35V_(RZMc`iw#V+d&ofiV5NNsNc237ZVec^)|!+dXsy6 z9da(9N^CO)-l#<_D#fWIGkFN{^kigCzGWb8e3YL10}i8^!@YQw`)UZhLZ5+K_1&Hh z*FovZ8PRW~TNQc8g&_)(Wt)tnKD3AU6`~=|o?*nQ`Yv!mm?4&=ygBZH|Mt05<-B=X1~?9N}VUs;HRL44No5vy&rCRHvNQ0;%O(I zfS`^_MuYXYci~0V3u|?~4?-r(Gd=BMv&Wz_$q!uL18Sv#tLw6-^4b~iOwCvU3QN+} zZfrs2tf?2SdJ%X{MSH2Kbm{!WiOX#M$#F+nYk?&(F>w}!p{Z+CNcocUbQoJsqC!VQy_A-`9VU z@f*+v85Ss1SoK(Ao@Ua(^r%S6d{Cdm?tduip}Uz((Sj=Q>nRX%nVq9xfSZ@ZQNS(@ zP;F_E1PCLA%_jUJ>N)}^%*iH764s+ZB`JxKV6jM zRp1-R@O;xJ#&R3I^Ll{{XHQLcdBt<$55Il%H)evvlzrU~ft^%f^t+Ekr&r(SP*x;P za|2}rI?u<1nBwJA{n(*(ZiS;xR&FSWwkmJ|GrgXyw{GW=04d zIuXK^FAQin=yv`X?Z6j(jV|{fmjwL-jvcr0*aK!Q`2FZO*kc(w=jORC&~_DYnw{@X zf!OBm^Ha$H+HrUndih1>$$Pob7uj*-(aO4&osTUYOwR2-@Ud}tslH%T=-qa&`6>RI z{-5IFfUA~1=;G$k2fv%GzMlxM5-(`7`TL?uwJd3E&k?BhQWRxjCKDr|vrm>KxBbP3 zm`{8x-DtY zd7V?QK)u8?Lg=3Lyc?GCecu|ee5nI6cKOS}paQJGOs6)LjBdTY#N%vq0|OcE+5&ol5hm%>2=eiJ_fu6M?SnJTAtHbKPlO zcXy0E=IJCJ%X~|`GWEWDbYxAs>%!Mrf*_$I=$H9H*XOQpEdZ}#HaF=lRP3ZIqA&b^ z@%0rzZGT(0Eu}~)(o&$f6=;D#ixzh)TBJA>cY=FxDQ*QyvEs#@;1qXvOK=Mg!R7t> zzu))n_wIZ1W-`N&WQK6E&pCUqz1G@FpCMQh;uj*nGPg}~8aW}?PWHT;Lt<}VeoV}} z7hH(BZkz8mq_2 z*Q^#P8SqVuBX)S4N9BgA!jpQB zMP6m5C3|Sp_mSLu#*Ct#65YE@E7!#mvFMP4e?1{KvUU!`V+izvLbXD_U9d*jpKiUYIjSv2b1qBAU0>_*xI1I2_&$4$qu)M@CYBef?$s|;X=S%rPkx>3kUlcKI8hf6v5|GJn=#&AD^u&c7IHv!hDRz>h8cWbAplvCTQOg zj4diipG#fWE@>wcExQ*&;UP`M_Kj(XD{)gUkTw+yU#|R{YrC(YftDpc{mF7LDbuA< z6i;4lF($d^LH*Y4MlirfZ%yl!{M~Y_mvc=>8HX(acY})yrABo+CIDDaZFtjfeJ5 zFPDtlw~u=$!2*hPx6Xy&8_HFMlu<1th^3%@9f*TqcfDY z;p6L9*9!DD>^$g-Y|fyAi-1#8520=Eo-Bh`pbWl)>DXPzo+Vyo!KCy;uTl!A3o@h> z2q_!^iiV6ruME~js+3QV!=FQuW{HIRkYL=*t%E+NYjdHlA=lKPw0KpC=yU->){W-a_%JnXOsSleZ-4 zyixz`-@HMB601y2IFnAjofCA^eNGJA&na>HSA*blSj|ekI0lEKu;?73in6}zS1tN>uH4D(fnj@Sz z1K*4I98E9xI`j9we;n607eSdXhSBrqP`DcNw`>wijr*v?77so7KBl^v7y>=}81Fbj zvAdnLQ;Twzi|&4{;}C^n`y?!^LqO|iq5+0o#;2JZ{$%?HoWrdYGSbpy*p?ulPH=Y& z+Oe!oiMXjux8t2}vTgn5PpffASRg_@6+c)3i}BwzFTNNRLw+(DZ)HN$Y3zvF*;)NS zjylw3JU<`&hlTJXuk!)DCzAXv*4D@iG)e}!A9%)U6k6zsXn(0?eH!tn2fOD>zTW1< z(C8Z-W|!C2e!jwP&|tfs`4P;3YmGZ$-f}A7&Z!ZX_#K4TW9}#+@ib*TA1*jvp`zYc z^3d6kci+uVq<}*_rtDBT!TxJwcuZraU=-?B!DDnw*M0&|N>4O<-Y00*&EZ3!QE8~# z@875+RZR(WKSU~i-aRGh14`tW9p)A0$t9C??_oNJ#0ph-m7-A6_jArnqDyj@_CxlO z$WONz>m-XfA1>wPvV7Kx|8~5*9nxC-D`Msib9xq(=~E3p%;2x@n*tE#*- zpGEd#GNgXJ(FSYrFKn|MFQ<_SZn_b3I0Y_5dX`)havQ(E;y{ITkUn|CJ(54u%$rXB z%>-8OxJ5Ej-1wE=&#*a)#o%3(5{`f&o7_i{ts}R#bxs{zxXXeK7iqj>+nebp{Xl(&s#Saust16Y;|ElcI!7;$#_;kD$%~n$ByZMYJ?8*A(4(b4L)PAZkC8 zOYe>THFLVmil)J588W}4>*@JzZI31I4E_V6_LDZgvVjnMcnmIm3rh;O?U6p9KG}Yb zMM&S7fn*soWWHhQFUM}uTCQ(zbnB1^(`?AtOxCT>r^p}GickB zIl6xdwj0;X8v|ojrz^bxv1j7ffEc4pMW;9EOwqX)MP)6Pt~|r2B-47)tSn#~t0q3- z7NZ>kncwWXXq27ckWujlC&W7_?9?sCR9s#i68`M-A~73a@7;RqKWx1)rQcR8>R zWHpT{jh;kdi(L>&@s+1P@RjH@0RSLbshsD~>A^qwR*oyDK<&b}61#}{SsgE}{(RnQ z!w;PTPiI8xWs|JkeK<=)uR>I2X~Q^FTNO^Tlo+8fV%+>3eNodajgA@Pj+$`w^%&2} z!Xa=gLn*X%NXQ;RR>E0Z%;S$5{G-MX0fT~J1<}Uc!Xj&u!#aS7lb^ONA;f1o_`3e2 zpQ(f!OO$?$`{lEsz6GxghkF3tgr2maQ)0Y%Q1rQ;_=UM7XMS^<@xc6-XS^I5d|^(X znJZgODMwTp*$5*@>0ifM`?46SRm%5<&2zt5F$1K*Pz3!hyS2J)^r0ep)#My$h4_b@ z{tqPm4>IkC=J#g>aJ4sKV#=+4esR=|+}Lv5=o||$vc0iqc#unX+g3=)cpdw^B_zD> z)85qG1zadgTca4ay{p8?&S?3MUt;yv$0s)$&mcGZP?w`8Gkz<~B;I6Q_4zrpE^D|CQ>YJp)sNTbyeeagYQ3Vri(QWQDaty)N3D31%ZJ